Coverage Report

Created: 2020-02-15 09:57

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This coordinates the per-function state used while generating code.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CodeGenFunction.h"
14
#include "CGBlocks.h"
15
#include "CGCUDARuntime.h"
16
#include "CGCXXABI.h"
17
#include "CGCleanup.h"
18
#include "CGDebugInfo.h"
19
#include "CGOpenMPRuntime.h"
20
#include "CodeGenModule.h"
21
#include "CodeGenPGO.h"
22
#include "TargetInfo.h"
23
#include "clang/AST/ASTContext.h"
24
#include "clang/AST/ASTLambda.h"
25
#include "clang/AST/Attr.h"
26
#include "clang/AST/Decl.h"
27
#include "clang/AST/DeclCXX.h"
28
#include "clang/AST/StmtCXX.h"
29
#include "clang/AST/StmtObjC.h"
30
#include "clang/Basic/Builtins.h"
31
#include "clang/Basic/CodeGenOptions.h"
32
#include "clang/Basic/TargetInfo.h"
33
#include "clang/CodeGen/CGFunctionInfo.h"
34
#include "clang/Frontend/FrontendDiagnostic.h"
35
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
36
#include "llvm/IR/DataLayout.h"
37
#include "llvm/IR/Dominators.h"
38
#include "llvm/IR/FPEnv.h"
39
#include "llvm/IR/IntrinsicInst.h"
40
#include "llvm/IR/Intrinsics.h"
41
#include "llvm/IR/MDBuilder.h"
42
#include "llvm/IR/Operator.h"
43
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
44
using namespace clang;
45
using namespace CodeGen;
46
47
/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
48
/// markers.
49
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
50
243k
                                      const LangOptions &LangOpts) {
51
243k
  if (CGOpts.DisableLifetimeMarkers)
52
7
    return false;
53
243k
54
243k
  // Sanitizers may use markers.
55
243k
  if (CGOpts.SanitizeAddressUseAfterScope ||
56
243k
      
LangOpts.Sanitize.has(SanitizerKind::HWAddress)242k
||
57
243k
      
LangOpts.Sanitize.has(SanitizerKind::Memory)242k
)
58
972
    return true;
59
242k
60
242k
  // For now, only in optimized builds.
61
242k
  return CGOpts.OptimizationLevel != 0;
62
242k
}
63
64
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
65
    : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
66
      Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
67
              CGBuilderInserterTy(this)),
68
      SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()),
69
      PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers(
70
243k
                    CGM.getCodeGenOpts(), CGM.getLangOpts())) {
71
243k
  if (!suppressNewContext)
72
226k
    CGM.getCXXABI().getMangleContext().startNewFunction();
73
243k
74
243k
  llvm::FastMathFlags FMF;
75
243k
  if (CGM.getLangOpts().FastMath)
76
87
    FMF.setFast();
77
243k
  if (CGM.getLangOpts().FiniteMathOnly) {
78
3
    FMF.setNoNaNs();
79
3
    FMF.setNoInfs();
80
3
  }
81
243k
  if (CGM.getCodeGenOpts().NoNaNsFPMath) {
82
19
    FMF.setNoNaNs();
83
19
  }
84
243k
  if (CGM.getCodeGenOpts().NoSignedZeros) {
85
5
    FMF.setNoSignedZeros();
86
5
  }
87
243k
  if (CGM.getCodeGenOpts().ReciprocalMath) {
88
1
    FMF.setAllowReciprocal();
89
1
  }
90
243k
  if (CGM.getCodeGenOpts().Reassociate) {
91
1
    FMF.setAllowReassoc();
92
1
  }
93
243k
  Builder.setFastMathFlags(FMF);
94
243k
  SetFPModel();
95
243k
}
96
97
243k
CodeGenFunction::~CodeGenFunction() {
98
243k
  assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
99
243k
100
243k
  // If there are any unclaimed block infos, go ahead and destroy them
101
243k
  // now.  This can happen if IR-gen gets clever and skips evaluating
102
243k
  // something.
103
243k
  if (FirstBlockInfo)
104
0
    destroyBlockInfos(FirstBlockInfo);
105
243k
106
243k
  if (getLangOpts().OpenMP && 
CurFn47.4k
)
107
47.4k
    CGM.getOpenMPRuntime().functionFinished(*this);
108
243k
109
243k
  // If we have an OpenMPIRBuilder we want to finalize functions (incl.
110
243k
  // outlining etc) at some point. Doing it once the function codegen is done
111
243k
  // seems to be a reasonable spot. We do it here, as opposed to the deletion
112
243k
  // time of the CodeGenModule, because we have to ensure the IR has not yet
113
243k
  // been "emitted" to the outside, thus, modifications are still sensible.
114
243k
  if (llvm::OpenMPIRBuilder *OMPBuilder = CGM.getOpenMPIRBuilder())
115
138
    OMPBuilder->finalize();
116
243k
}
117
118
// Map the LangOption for rounding mode into
119
// the corresponding enum in the IR.
120
static llvm::fp::RoundingMode ToConstrainedRoundingMD(
121
243k
  LangOptions::FPRoundingModeKind Kind) {
122
243k
123
243k
  switch (Kind) {
124
243k
  case LangOptions::FPR_ToNearest:  return llvm::fp::rmToNearest;
125
0
  case LangOptions::FPR_Downward:   return llvm::fp::rmDownward;
126
0
  case LangOptions::FPR_Upward:     return llvm::fp::rmUpward;
127
0
  case LangOptions::FPR_TowardZero: return llvm::fp::rmTowardZero;
128
78
  case LangOptions::FPR_Dynamic:    return llvm::fp::rmDynamic;
129
0
  }
130
0
  llvm_unreachable("Unsupported FP RoundingMode");
131
0
}
132
133
// Map the LangOption for exception behavior into
134
// the corresponding enum in the IR.
135
static llvm::fp::ExceptionBehavior ToConstrainedExceptMD(
136
243k
  LangOptions::FPExceptionModeKind Kind) {
137
243k
138
243k
  switch (Kind) {
139
242k
  case LangOptions::FPE_Ignore:  return llvm::fp::ebIgnore;
140
54
  case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
141
1.29k
  case LangOptions::FPE_Strict:  return llvm::fp::ebStrict;
142
0
  }
143
0
  llvm_unreachable("Unsupported FP Exception Behavior");
144
0
}
145
146
243k
void CodeGenFunction::SetFPModel() {
147
243k
  auto fpRoundingMode = ToConstrainedRoundingMD(
148
243k
                          getLangOpts().getFPRoundingMode());
149
243k
  auto fpExceptionBehavior = ToConstrainedExceptMD(
150
243k
                               getLangOpts().getFPExceptionMode());
151
243k
152
243k
  if (fpExceptionBehavior == llvm::fp::ebIgnore &&
153
243k
      
fpRoundingMode == llvm::fp::rmToNearest242k
)
154
242k
    // Constrained intrinsics are not used.
155
242k
    ;
156
1.37k
  else {
157
1.37k
    Builder.setIsFPConstrained(true);
158
1.37k
    Builder.setDefaultConstrainedRounding(fpRoundingMode);
159
1.37k
    Builder.setDefaultConstrainedExcept(fpExceptionBehavior);
160
1.37k
  }
161
243k
}
162
163
CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T,
164
                                                    LValueBaseInfo *BaseInfo,
165
166k
                                                    TBAAAccessInfo *TBAAInfo) {
166
166k
  return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
167
166k
                                 /* forPointeeType= */ true);
168
166k
}
169
170
CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T,
171
                                                   LValueBaseInfo *BaseInfo,
172
                                                   TBAAAccessInfo *TBAAInfo,
173
345k
                                                   bool forPointeeType) {
174
345k
  if (TBAAInfo)
175
316k
    *TBAAInfo = CGM.getTBAAAccessInfo(T);
176
345k
177
345k
  // Honor alignment typedef attributes even on incomplete types.
178
345k
  // We also honor them straight for C++ class types, even as pointees;
179
345k
  // there's an expressivity gap here.
180
345k
  if (auto TT = T->getAs<TypedefType>()) {
181
34.6k
    if (auto Align = TT->getDecl()->getMaxAlignment()) {
182
343
      if (BaseInfo)
183
343
        *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
184
343
      return getContext().toCharUnitsFromBits(Align);
185
343
    }
186
344k
  }
187
344k
188
344k
  if (BaseInfo)
189
326k
    *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
190
344k
191
344k
  CharUnits Alignment;
192
344k
  if (T->isIncompleteType()) {
193
955
    Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
194
343k
  } else {
195
343k
    // For C++ class pointees, we don't know whether we're pointing at a
196
343k
    // base or a complete object, so we generally need to use the
197
343k
    // non-virtual alignment.
198
343k
    const CXXRecordDecl *RD;
199
343k
    if (forPointeeType && 
(RD = T->getAsCXXRecordDecl())313k
) {
200
204k
      Alignment = CGM.getClassPointerAlignment(RD);
201
204k
    } else {
202
139k
      Alignment = getContext().getTypeAlignInChars(T);
203
139k
      if (T.getQualifiers().hasUnaligned())
204
5
        Alignment = CharUnits::One();
205
139k
    }
206
343k
207
343k
    // Cap to the global maximum type alignment unless the alignment
208
343k
    // was somehow explicit on the type.
209
343k
    if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
210
159k
      if (Alignment.getQuantity() > MaxAlign &&
211
159k
          
!getContext().isAlignmentRequired(T)6
)
212
3
        Alignment = CharUnits::fromQuantity(MaxAlign);
213
159k
    }
214
343k
  }
215
344k
  return Alignment;
216
344k
}
217
218
30.0k
LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
219
30.0k
  LValueBaseInfo BaseInfo;
220
30.0k
  TBAAAccessInfo TBAAInfo;
221
30.0k
  CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
222
30.0k
  return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
223
30.0k
                          TBAAInfo);
224
30.0k
}
225
226
/// Given a value of type T* that may not be to a complete object,
227
/// construct an l-value with the natural pointee alignment of T.
228
LValue
229
58.5k
CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
230
58.5k
  LValueBaseInfo BaseInfo;
231
58.5k
  TBAAAccessInfo TBAAInfo;
232
58.5k
  CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
233
58.5k
                                            /* forPointeeType= */ true);
234
58.5k
  return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
235
58.5k
}
236
237
238
731k
llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
239
731k
  return CGM.getTypes().ConvertTypeForMem(T);
240
731k
}
241
242
2.14M
llvm::Type *CodeGenFunction::ConvertType(QualType T) {
243
2.14M
  return CGM.getTypes().ConvertType(T);
244
2.14M
}
245
246
4.45M
TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
247
4.45M
  type = type.getCanonicalType();
248
4.45M
  while (true) {
249
4.45M
    switch (type->getTypeClass()) {
250
0
#define TYPE(name, parent)
251
0
#define ABSTRACT_TYPE(name, parent)
252
0
#define NON_CANONICAL_TYPE(name, parent) case Type::name:
253
0
#define DEPENDENT_TYPE(name, parent) case Type::name:
254
0
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
255
0
#include "clang/AST/TypeNodes.inc"
256
0
      llvm_unreachable("non-canonical or dependent type in IR-generation");
257
0
258
0
    case Type::Auto:
259
0
    case Type::DeducedTemplateSpecialization:
260
0
      llvm_unreachable("undeduced type in IR-generation");
261
0
262
0
    // Various scalar types.
263
4.21M
    case Type::Builtin:
264
4.21M
    case Type::Pointer:
265
4.21M
    case Type::BlockPointer:
266
4.21M
    case Type::LValueReference:
267
4.21M
    case Type::RValueReference:
268
4.21M
    case Type::MemberPointer:
269
4.21M
    case Type::Vector:
270
4.21M
    case Type::ExtVector:
271
4.21M
    case Type::FunctionProto:
272
4.21M
    case Type::FunctionNoProto:
273
4.21M
    case Type::Enum:
274
4.21M
    case Type::ObjCObjectPointer:
275
4.21M
    case Type::Pipe:
276
4.21M
      return TEK_Scalar;
277
4.21M
278
4.21M
    // Complexes.
279
4.21M
    case Type::Complex:
280
6.95k
      return TEK_Complex;
281
4.21M
282
4.21M
    // Arrays, records, and Objective-C objects.
283
4.21M
    case Type::ConstantArray:
284
231k
    case Type::IncompleteArray:
285
231k
    case Type::VariableArray:
286
231k
    case Type::Record:
287
231k
    case Type::ObjCObject:
288
231k
    case Type::ObjCInterface:
289
231k
      return TEK_Aggregate;
290
231k
291
231k
    // We operate on atomic values according to their underlying type.
292
231k
    case Type::Atomic:
293
271
      type = cast<AtomicType>(type)->getValueType();
294
271
      continue;
295
0
    }
296
0
    llvm_unreachable("unknown type kind!");
297
0
  }
298
4.45M
}
299
300
242k
llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
301
242k
  // For cleanliness, we try to avoid emitting the return block for
302
242k
  // simple cases.
303
242k
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
304
242k
305
242k
  if (CurBB) {
306
127k
    assert(!CurBB->getTerminator() && "Unexpected terminated block.");
307
127k
308
127k
    // We have a valid insert point, reuse it if it is empty or there are no
309
127k
    // explicit jumps to the return block.
310
127k
    if (CurBB->empty() || 
ReturnBlock.getBlock()->use_empty()105k
) {
311
127k
      ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
312
127k
      delete ReturnBlock.getBlock();
313
127k
      ReturnBlock = JumpDest();
314
127k
    } else
315
22
      EmitBlock(ReturnBlock.getBlock());
316
127k
    return llvm::DebugLoc();
317
127k
  }
318
115k
319
115k
  // Otherwise, if the return block is the target of a single direct
320
115k
  // branch then we can just put the code in that block instead. This
321
115k
  // cleans up functions which started with a unified return block.
322
115k
  if (ReturnBlock.getBlock()->hasOneUse()) {
323
112k
    llvm::BranchInst *BI =
324
112k
      dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
325
112k
    if (BI && 
BI->isUnconditional()112k
&&
326
112k
        
BI->getSuccessor(0) == ReturnBlock.getBlock()112k
) {
327
112k
      // Record/return the DebugLoc of the simple 'return' expression to be used
328
112k
      // later by the actual 'ret' instruction.
329
112k
      llvm::DebugLoc Loc = BI->getDebugLoc();
330
112k
      Builder.SetInsertPoint(BI->getParent());
331
112k
      BI->eraseFromParent();
332
112k
      delete ReturnBlock.getBlock();
333
112k
      ReturnBlock = JumpDest();
334
112k
      return Loc;
335
112k
    }
336
2.42k
  }
337
2.42k
338
2.42k
  // FIXME: We are at an unreachable point, there is no reason to emit the block
339
2.42k
  // unless it has uses. However, we still need a place to put the debug
340
2.42k
  // region.end for now.
341
2.42k
342
2.42k
  EmitBlock(ReturnBlock.getBlock());
343
2.42k
  return llvm::DebugLoc();
344
2.42k
}
345
346
969k
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
347
969k
  if (!BB) 
return961k
;
348
7.74k
  if (!BB->use_empty())
349
7.73k
    return CGF.CurFn->getBasicBlockList().push_back(BB);
350
8
  delete BB;
351
8
}
352
353
242k
void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
354
242k
  assert(BreakContinueStack.empty() &&
355
242k
         "mismatched push/pop in break/continue stack!");
356
242k
357
242k
  bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
358
242k
    && 
NumSimpleReturnExprs == NumReturnExprs12.0k
359
242k
    && 
ReturnBlock.getBlock()->use_empty()10.8k
;
360
242k
  // Usually the return expression is evaluated before the cleanup
361
242k
  // code.  If the function contains only a simple return statement,
362
242k
  // such as a constant, the location before the cleanup code becomes
363
242k
  // the last useful breakpoint in the function, because the simple
364
242k
  // return expression will be evaluated after the cleanup code. To be
365
242k
  // safe, set the debug location for cleanup code to the location of
366
242k
  // the return statement.  Otherwise the cleanup code should be at the
367
242k
  // end of the function's lexical scope.
368
242k
  //
369
242k
  // If there are multiple branches to the return block, the branch
370
242k
  // instructions will get the location of the return statements and
371
242k
  // all will be fine.
372
242k
  if (CGDebugInfo *DI = getDebugInfo()) {
373
88.9k
    if (OnlySimpleReturnStmts)
374
580
      DI->EmitLocation(Builder, LastStopPoint);
375
88.3k
    else
376
88.3k
      DI->EmitLocation(Builder, EndLoc);
377
88.9k
  }
378
242k
379
242k
  // Pop any cleanups that might have been associated with the
380
242k
  // parameters.  Do this in whatever block we're currently in; it's
381
242k
  // important to do this before we enter the return block or return
382
242k
  // edges will be *really* confused.
383
242k
  bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
384
242k
  bool HasOnlyLifetimeMarkers =
385
242k
      HasCleanups && 
EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth)7.48k
;
386
242k
  bool EmitRetDbgLoc = !HasCleanups || 
HasOnlyLifetimeMarkers7.48k
;
387
242k
  if (HasCleanups) {
388
7.48k
    // Make sure the line table doesn't jump back into the body for
389
7.48k
    // the ret after it's been at EndLoc.
390
7.48k
    Optional<ApplyDebugLocation> AL;
391
7.48k
    if (CGDebugInfo *DI = getDebugInfo()) {
392
1.71k
      if (OnlySimpleReturnStmts)
393
580
        DI->EmitLocation(Builder, EndLoc);
394
1.13k
      else
395
1.13k
        // We may not have a valid end location. Try to apply it anyway, and
396
1.13k
        // fall back to an artificial location if needed.
397
1.13k
        AL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc);
398
1.71k
    }
399
7.48k
400
7.48k
    PopCleanupBlocks(PrologueCleanupDepth);
401
7.48k
  }
402
242k
403
242k
  // Emit function epilog (to return).
404
242k
  llvm::DebugLoc Loc = EmitReturnBlock();
405
242k
406
242k
  if (ShouldInstrumentFunction()) {
407
11
    if (CGM.getCodeGenOpts().InstrumentFunctions)
408
6
      CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
409
11
    if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
410
4
      CurFn->addFnAttr("instrument-function-exit-inlined",
411
4
                       "__cyg_profile_func_exit");
412
11
  }
413
242k
414
242k
  // Emit debug descriptor for function end.
415
242k
  if (CGDebugInfo *DI = getDebugInfo())
416
88.9k
    DI->EmitFunctionEnd(Builder, CurFn);
417
242k
418
242k
  // Reset the debug location to that of the simple 'return' expression, if any
419
242k
  // rather than that of the end of the function's scope '}'.
420
242k
  ApplyDebugLocation AL(*this, Loc);
421
242k
  EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
422
242k
  EmitEndEHSpec(CurCodeDecl);
423
242k
424
242k
  assert(EHStack.empty() &&
425
242k
         "did not remove all scopes from cleanup stack!");
426
242k
427
242k
  // If someone did an indirect goto, emit the indirect goto block at the end of
428
242k
  // the function.
429
242k
  if (IndirectBranch) {
430
35
    EmitBlock(IndirectBranch->getParent());
431
35
    Builder.ClearInsertionPoint();
432
35
  }
433
242k
434
242k
  // If some of our locals escaped, insert a call to llvm.localescape in the
435
242k
  // entry block.
436
242k
  if (!EscapedLocals.empty()) {
437
32
    // Invert the map from local to index into a simple vector. There should be
438
32
    // no holes.
439
32
    SmallVector<llvm::Value *, 4> EscapeArgs;
440
32
    EscapeArgs.resize(EscapedLocals.size());
441
32
    for (auto &Pair : EscapedLocals)
442
37
      EscapeArgs[Pair.second] = Pair.first;
443
32
    llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
444
32
        &CGM.getModule(), llvm::Intrinsic::localescape);
445
32
    CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
446
32
  }
447
242k
448
242k
  // Remove the AllocaInsertPt instruction, which is just a convenience for us.
449
242k
  llvm::Instruction *Ptr = AllocaInsertPt;
450
242k
  AllocaInsertPt = nullptr;
451
242k
  Ptr->eraseFromParent();
452
242k
453
242k
  // If someone took the address of a label but never did an indirect goto, we
454
242k
  // made a zero entry PHI node, which is illegal, zap it now.
455
242k
  if (IndirectBranch) {
456
35
    llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
457
35
    if (PN->getNumIncomingValues() == 0) {
458
15
      PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
459
15
      PN->eraseFromParent();
460
15
    }
461
35
  }
462
242k
463
242k
  EmitIfUsed(*this, EHResumeBlock);
464
242k
  EmitIfUsed(*this, TerminateLandingPad);
465
242k
  EmitIfUsed(*this, TerminateHandler);
466
242k
  EmitIfUsed(*this, UnreachableBlock);
467
242k
468
242k
  for (const auto &FuncletAndParent : TerminateFunclets)
469
19
    EmitIfUsed(*this, FuncletAndParent.second);
470
242k
471
242k
  if (CGM.getCodeGenOpts().EmitDeclMetadata)
472
17.5k
    EmitDeclMetadata();
473
242k
474
242k
  for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
475
242k
           I = DeferredReplacements.begin(),
476
242k
           E = DeferredReplacements.end();
477
242k
       I != E; 
++I97
) {
478
97
    I->first->replaceAllUsesWith(I->second);
479
97
    I->first->eraseFromParent();
480
97
  }
481
242k
482
242k
  // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
483
242k
  // PHIs if the current function is a coroutine. We don't do it for all
484
242k
  // functions as it may result in slight increase in numbers of instructions
485
242k
  // if compiled with no optimizations. We do it for coroutine as the lifetime
486
242k
  // of CleanupDestSlot alloca make correct coroutine frame building very
487
242k
  // difficult.
488
242k
  if (NormalCleanupDest.isValid() && 
isCoroutine()272
) {
489
42
    llvm::DominatorTree DT(*CurFn);
490
42
    llvm::PromoteMemToReg(
491
42
        cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
492
42
    NormalCleanupDest = Address::invalid();
493
42
  }
494
242k
495
242k
  // Scan function arguments for vector width.
496
242k
  for (llvm::Argument &A : CurFn->args())
497
388k
    if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
498
73.2k
      LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
499
73.2k
                                   VT->getPrimitiveSizeInBits().getFixedSize());
500
242k
501
242k
  // Update vector width based on return type.
502
242k
  if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
503
34.7k
    LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
504
34.7k
                                  VT->getPrimitiveSizeInBits().getFixedSize());
505
242k
506
242k
  // Add the required-vector-width attribute. This contains the max width from:
507
242k
  // 1. min-vector-width attribute used in the source program.
508
242k
  // 2. Any builtins used that have a vector width specified.
509
242k
  // 3. Values passed in and out of inline assembly.
510
242k
  // 4. Width of vector arguments and return types for this function.
511
242k
  // 5. Width of vector aguments and return types for functions called by this
512
242k
  //    function.
513
242k
  CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
514
242k
515
242k
  // If we generated an unreachable return block, delete it now.
516
242k
  if (ReturnBlock.isValid() && 
ReturnBlock.getBlock()->use_empty()2.44k
) {
517
929
    Builder.ClearInsertionPoint();
518
929
    ReturnBlock.getBlock()->eraseFromParent();
519
929
  }
520
242k
  if (ReturnValue.isValid()) {
521
116k
    auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
522
116k
    if (RetAlloca && 
RetAlloca->use_empty()113k
) {
523
101k
      RetAlloca->eraseFromParent();
524
101k
      ReturnValue = Address::invalid();
525
101k
    }
526
116k
  }
527
242k
}
528
529
/// ShouldInstrumentFunction - Return true if the current function should be
530
/// instrumented with __cyg_profile_func_* calls
531
484k
bool CodeGenFunction::ShouldInstrumentFunction() {
532
484k
  if (!CGM.getCodeGenOpts().InstrumentFunctions &&
533
484k
      
!CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining484k
&&
534
484k
      
!CGM.getCodeGenOpts().InstrumentFunctionEntryBare484k
)
535
484k
    return false;
536
32
  if (!CurFuncDecl || 
CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()28
)
537
10
    return false;
538
22
  return true;
539
22
}
540
541
/// ShouldXRayInstrument - Return true if the current function should be
542
/// instrumented with XRay nop sleds.
543
472k
bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
544
472k
  return CGM.getCodeGenOpts().XRayInstrumentFunctions;
545
472k
}
546
547
/// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
548
/// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
549
2
bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
550
2
  return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
551
2
         (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
552
2
          CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
553
1
              XRayInstrKind::Custom);
554
2
}
555
556
2
bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
557
2
  return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
558
2
         (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
559
2
          CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
560
1
              XRayInstrKind::Typed);
561
2
}
562
563
llvm::Constant *
564
CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F,
565
113
                                            llvm::Constant *Addr) {
566
113
  // Addresses stored in prologue data can't require run-time fixups and must
567
113
  // be PC-relative. Run-time fixups are undesirable because they necessitate
568
113
  // writable text segments, which are unsafe. And absolute addresses are
569
113
  // undesirable because they break PIE mode.
570
113
571
113
  // Add a layer of indirection through a private global. Taking its address
572
113
  // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
573
113
  auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
574
113
                                      /*isConstant=*/true,
575
113
                                      llvm::GlobalValue::PrivateLinkage, Addr);
576
113
577
113
  // Create a PC-relative address.
578
113
  auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
579
113
  auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
580
113
  auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
581
113
  return (IntPtrTy == Int32Ty)
582
113
             ? 
PCRelAsInt70
583
113
             : 
llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty)43
;
584
113
}
585
586
llvm::Value *
587
CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
588
8
                                          llvm::Value *EncodedAddr) {
589
8
  // Reconstruct the address of the global.
590
8
  auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
591
8
  auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
592
8
  auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
593
8
  auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
594
8
595
8
  // Load the original pointer through the global.
596
8
  return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
597
8
                            "decoded_addr");
598
8
}
599
600
void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
601
                                               llvm::Function *Fn)
602
1.49k
{
603
1.49k
  if (!FD->hasAttr<OpenCLKernelAttr>())
604
1.09k
    return;
605
403
606
403
  llvm::LLVMContext &Context = getLLVMContext();
607
403
608
403
  CGM.GenOpenCLArgMetadata(Fn, FD, this);
609
403
610
403
  if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
611
2
    QualType HintQTy = A->getTypeHint();
612
2
    const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
613
2
    bool IsSignedInteger =
614
2
        HintQTy->isSignedIntegerType() ||
615
2
        
(1
HintEltQTy1
&&
HintEltQTy->getElementType()->isSignedIntegerType()1
);
616
2
    llvm::Metadata *AttrMDArgs[] = {
617
2
        llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
618
2
            CGM.getTypes().ConvertType(A->getTypeHint()))),
619
2
        llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
620
2
            llvm::IntegerType::get(Context, 32),
621
2
            llvm::APInt(32, (uint64_t)(IsSignedInteger ? 
11
:
01
))))};
622
2
    Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
623
2
  }
624
403
625
403
  if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
626
1
    llvm::Metadata *AttrMDArgs[] = {
627
1
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
628
1
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
629
1
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
630
1
    Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
631
1
  }
632
403
633
403
  if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
634
7
    llvm::Metadata *AttrMDArgs[] = {
635
7
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
636
7
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
637
7
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
638
7
    Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
639
7
  }
640
403
641
403
  if (const OpenCLIntelReqdSubGroupSizeAttr *A =
642
1
          FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
643
1
    llvm::Metadata *AttrMDArgs[] = {
644
1
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
645
1
    Fn->setMetadata("intel_reqd_sub_group_size",
646
1
                    llvm::MDNode::get(Context, AttrMDArgs));
647
1
  }
648
403
}
649
650
/// Determine whether the function F ends with a return stmt.
651
126k
static bool endsWithReturn(const Decl* F) {
652
126k
  const Stmt *Body = nullptr;
653
126k
  if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
654
98.7k
    Body = FD->getBody();
655
27.3k
  else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
656
1.21k
    Body = OMD->getBody();
657
126k
658
126k
  if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
659
98.4k
    auto LastStmt = CS->body_rbegin();
660
98.4k
    if (LastStmt != CS->body_rend())
661
56.8k
      return isa<ReturnStmt>(*LastStmt);
662
69.3k
  }
663
69.3k
  return false;
664
69.3k
}
665
666
326
void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
667
326
  if (SanOpts.has(SanitizerKind::Thread)) {
668
4
    Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
669
4
    Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
670
4
  }
671
326
}
672
673
/// Check if the return value of this function requires sanitization.
674
359k
bool CodeGenFunction::requiresReturnValueCheck() const {
675
359k
  return requiresReturnValueNullabilityCheck() ||
676
359k
         
(359k
SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)359k
&&
CurCodeDecl108
&&
677
359k
          
CurCodeDecl->getAttr<ReturnsNonNullAttr>()108
);
678
359k
}
679
680
35
static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
681
35
  auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
682
35
  if (!MD || 
!MD->getDeclName().getAsIdentifierInfo()3
||
683
35
      
!MD->getDeclName().getAsIdentifierInfo()->isStr("allocate")3
||
684
35
      
(2
MD->getNumParams() != 12
&&
MD->getNumParams() != 21
))
685
33
    return false;
686
2
687
2
  if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
688
0
    return false;
689
2
690
2
  if (MD->getNumParams() == 2) {
691
1
    auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
692
1
    if (!PT || !PT->isVoidPointerType() ||
693
1
        !PT->getPointeeType().isConstQualified())
694
0
      return false;
695
2
  }
696
2
697
2
  return true;
698
2
}
699
700
/// Return the UBSan prologue signature for \p FD if one is available.
701
static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
702
203
                                            const FunctionDecl *FD) {
703
203
  if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
704
96
    if (!MD->isStatic())
705
90
      return nullptr;
706
113
  return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
707
113
}
708
709
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
710
                                    llvm::Function *Fn,
711
                                    const CGFunctionInfo &FnInfo,
712
                                    const FunctionArgList &Args,
713
                                    SourceLocation Loc,
714
242k
                                    SourceLocation StartLoc) {
715
242k
  assert(!CurFn &&
716
242k
         "Do not use a CodeGenFunction object for more than one function");
717
242k
718
242k
  const Decl *D = GD.getDecl();
719
242k
720
242k
  DidCallStackSave = false;
721
242k
  CurCodeDecl = D;
722
242k
  if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
723
212k
    if (FD->usesSEHTry())
724
100
      CurSEHParent = FD;
725
242k
  CurFuncDecl = (D ? 
D->getNonClosureContext()236k
:
nullptr5.89k
);
726
242k
  FnRetTy = RetTy;
727
242k
  CurFn = Fn;
728
242k
  CurFnInfo = &FnInfo;
729
242k
  assert(CurFn->isDeclaration() && "Function already has body?");
730
242k
731
242k
  // If this function has been blacklisted for any of the enabled sanitizers,
732
242k
  // disable the sanitizer for the function.
733
242k
  do {
734
242k
#define SANITIZER(NAME, ID)                                                    \
735
695k
  if (
SanOpts.empty()456k
) \
736
695k
    
break238k
; \
737
695k
  
if (218k
SanOpts.has(SanitizerKind::ID)218k
) \
738
218k
    
if (7.21k
CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)7.21k
) \
739
7.21k
      
SanOpts.set(SanitizerKind::ID, false)49
;
740
242k
741
242k
#include "clang/Basic/Sanitizers.def"
742
218k
#undef SANITIZER
743
218k
  } while (
04.11k
);
744
242k
745
242k
  if (D) {
746
236k
    // Apply the no_sanitize* attributes to SanOpts.
747
236k
    for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
748
934
      SanitizerMask mask = Attr->getMask();
749
934
      SanOpts.Mask &= ~mask;
750
934
      if (mask & SanitizerKind::Address)
751
52
        SanOpts.set(SanitizerKind::KernelAddress, false);
752
934
      if (mask & SanitizerKind::KernelAddress)
753
5
        SanOpts.set(SanitizerKind::Address, false);
754
934
      if (mask & SanitizerKind::HWAddress)
755
5
        SanOpts.set(SanitizerKind::KernelHWAddress, false);
756
934
      if (mask & SanitizerKind::KernelHWAddress)
757
5
        SanOpts.set(SanitizerKind::HWAddress, false);
758
934
    }
759
236k
  }
760
242k
761
242k
  // Apply sanitizer attributes to the function.
762
242k
  if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
763
1.03k
    Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
764
242k
  if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
765
30
    Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
766
242k
  if (SanOpts.has(SanitizerKind::MemTag))
767
1
    Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
768
242k
  if (SanOpts.has(SanitizerKind::Thread))
769
62
    Fn->addFnAttr(llvm::Attribute::SanitizeThread);
770
242k
  if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
771
205
    Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
772
242k
  if (SanOpts.has(SanitizerKind::SafeStack))
773
14
    Fn->addFnAttr(llvm::Attribute::SafeStack);
774
242k
  if (SanOpts.has(SanitizerKind::ShadowCallStack))
775
1
    Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
776
242k
777
242k
  // Apply fuzzing attribute to the function.
778
242k
  if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
779
6
    Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
780
242k
781
242k
  // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
782
242k
  // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
783
242k
  if (SanOpts.has(SanitizerKind::Thread)) {
784
62
    if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
785
3
      IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
786
3
      if (OMD->getMethodFamily() == OMF_dealloc ||
787
3
          
OMD->getMethodFamily() == OMF_initialize2
||
788
3
          
(1
OMD->getSelector().isUnarySelector()1
&&
II->isStr(".cxx_destruct")1
)) {
789
3
        markAsIgnoreThreadCheckingAtRuntime(Fn);
790
3
      }
791
3
    }
792
62
  }
793
242k
794
242k
  // Ignore unrelated casts in STL allocate() since the allocator must cast
795
242k
  // from void* to T* before object initialization completes. Don't match on the
796
242k
  // namespace because not all allocators are in std::
797
242k
  if (D && 
SanOpts.has(SanitizerKind::CFIUnrelatedCast)236k
) {
798
35
    if (matchesStlAllocatorFn(D, getContext()))
799
2
      SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
800
35
  }
801
242k
802
242k
  // Ignore null checks in coroutine functions since the coroutines passes
803
242k
  // are not aware of how to move the extra UBSan instructions across the split
804
242k
  // coroutine boundaries.
805
242k
  if (D && 
SanOpts.has(SanitizerKind::Null)236k
)
806
242
    if (const auto *FD = dyn_cast<FunctionDecl>(D))
807
242
      if (FD->getBody() &&
808
242
          FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
809
1
        SanOpts.Mask &= ~SanitizerKind::Null;
810
242k
811
242k
  if (D) {
812
236k
    // Apply xray attributes to the function (as a string, for now)
813
236k
    if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
814
80
      if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
815
80
              XRayInstrKind::FunctionEntry) ||
816
80
          CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
817
76
              XRayInstrKind::FunctionExit)) {
818
76
        if (XRayAttr->alwaysXRayInstrument() && 
ShouldXRayInstrumentFunction()42
)
819
35
          Fn->addFnAttr("function-instrument", "xray-always");
820
76
        if (XRayAttr->neverXRayInstrument())
821
34
          Fn->addFnAttr("function-instrument", "xray-never");
822
76
        if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
823
1
          if (ShouldXRayInstrumentFunction())
824
1
            Fn->addFnAttr("xray-log-args",
825
1
                          llvm::utostr(LogArgs->getArgumentCount()));
826
76
      }
827
236k
    } else {
828
236k
      if (ShouldXRayInstrumentFunction() && 
!CGM.imbueXRayAttrs(Fn, Loc)20
)
829
8
        Fn->addFnAttr(
830
8
            "xray-instruction-threshold",
831
8
            llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
832
236k
    }
833
236k
834
236k
    if (ShouldXRayInstrumentFunction()) {
835
86
      if (CGM.getCodeGenOpts().XRayIgnoreLoops)
836
1
        Fn->addFnAttr("xray-ignore-loops");
837
86
838
86
      if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
839
86
              XRayInstrKind::FunctionExit))
840
6
        Fn->addFnAttr("xray-skip-exit");
841
86
842
86
      if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
843
86
              XRayInstrKind::FunctionEntry))
844
6
        Fn->addFnAttr("xray-skip-entry");
845
86
    }
846
236k
847
236k
    unsigned Count, Offset;
848
236k
    if (const auto *Attr = D->getAttr<PatchableFunctionEntryAttr>()) {
849
14
      Count = Attr->getCount();
850
14
      Offset = Attr->getOffset();
851
236k
    } else {
852
236k
      Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
853
236k
      Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
854
236k
    }
855
236k
    if (Count && 
Offset <= Count11
) {
856
11
      Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
857
11
      if (Offset)
858
4
        Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
859
11
    }
860
236k
  }
861
242k
862
242k
  // Add no-jump-tables value.
863
242k
  Fn->addFnAttr("no-jump-tables",
864
242k
                llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));
865
242k
866
242k
  // Add no-inline-line-tables value.
867
242k
  if (CGM.getCodeGenOpts().NoInlineLineTables)
868
4
    Fn->addFnAttr("no-inline-line-tables");
869
242k
870
242k
  // Add profile-sample-accurate value.
871
242k
  if (CGM.getCodeGenOpts().ProfileSampleAccurate)
872
2
    Fn->addFnAttr("profile-sample-accurate");
873
242k
874
242k
  if (D && 
D->hasAttr<CFICanonicalJumpTableAttr>()236k
)
875
2
    Fn->addFnAttr("cfi-canonical-jump-table");
876
242k
877
242k
  if (getLangOpts().OpenCL) {
878
1.59k
    // Add metadata for a kernel function.
879
1.59k
    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
880
1.49k
      EmitOpenCLKernelMetadata(FD, Fn);
881
1.59k
  }
882
242k
883
242k
  // If we are checking function types, emit a function type signature as
884
242k
  // prologue data.
885
242k
  if (getLangOpts().CPlusPlus && 
SanOpts.has(SanitizerKind::Function)172k
) {
886
210
    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
887
203
      if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
888
113
        // Remove any (C++17) exception specifications, to allow calling e.g. a
889
113
        // noexcept function through a non-noexcept pointer.
890
113
        auto ProtoTy =
891
113
          getContext().getFunctionTypeWithExceptionSpec(FD->getType(),
892
113
                                                        EST_None);
893
113
        llvm::Constant *FTRTTIConst =
894
113
            CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
895
113
        llvm::Constant *FTRTTIConstEncoded =
896
113
            EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
897
113
        llvm::Constant *PrologueStructElems[] = {PrologueSig,
898
113
                                                 FTRTTIConstEncoded};
899
113
        llvm::Constant *PrologueStructConst =
900
113
            llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
901
113
        Fn->setPrologueData(PrologueStructConst);
902
113
      }
903
203
    }
904
210
  }
905
242k
906
242k
  // If we're checking nullability, we need to know whether we can check the
907
242k
  // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
908
242k
  if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
909
60
    auto Nullability = FnRetTy->getNullability(getContext());
910
60
    if (Nullability && 
*Nullability == NullabilityKind::NonNull17
) {
911
15
      if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
912
15
            
CurCodeDecl1
&&
CurCodeDecl->getAttr<ReturnsNonNullAttr>()1
))
913
14
        RetValNullabilityPrecondition =
914
14
            llvm::ConstantInt::getTrue(getLLVMContext());
915
15
    }
916
60
  }
917
242k
918
242k
  // If we're in C++ mode and the function name is "main", it is guaranteed
919
242k
  // to be norecurse by the standard (3.6.1.3 "The function main shall not be
920
242k
  // used within a program").
921
242k
  if (getLangOpts().CPlusPlus)
922
172k
    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
923
145k
      if (FD->isMain())
924
3.35k
        Fn->addFnAttr(llvm::Attribute::NoRecurse);
925
242k
926
242k
  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
927
212k
    if (FD->usesFPIntrin())
928
55
      Fn->addFnAttr(llvm::Attribute::StrictFP);
929
242k
930
242k
  // If a custom alignment is used, force realigning to this alignment on
931
242k
  // any main function which certainly will need it.
932
242k
  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
933
212k
    if ((FD->isMain() || 
FD->isMSVCRTEntryPoint()207k
) &&
934
212k
        
CGM.getCodeGenOpts().StackAlignment4.80k
)
935
1
      Fn->addFnAttr("stackrealign");
936
242k
937
242k
  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
938
242k
939
242k
  // Create a marker to make it easy to insert allocas into the entryblock
940
242k
  // later.  Don't create this with the builder, because we don't want it
941
242k
  // folded.
942
242k
  llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
943
242k
  AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
944
242k
945
242k
  ReturnBlock = getJumpDestInCurrentScope("return");
946
242k
947
242k
  Builder.SetInsertPoint(EntryBB);
948
242k
949
242k
  // If we're checking the return value, allocate space for a pointer to a
950
242k
  // precise source location of the checked return statement.
951
242k
  if (requiresReturnValueCheck()) {
952
19
    ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
953
19
    InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
954
19
  }
955
242k
956
242k
  // Emit subprogram debug descriptor.
957
242k
  if (CGDebugInfo *DI = getDebugInfo()) {
958
88.9k
    // Reconstruct the type from the argument list so that implicit parameters,
959
88.9k
    // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
960
88.9k
    // convention.
961
88.9k
    CallingConv CC = CallingConv::CC_C;
962
88.9k
    if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
963
86.3k
      if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
964
86.3k
        CC = SrcFnTy->getCallConv();
965
88.9k
    SmallVector<QualType, 16> ArgTypes;
966
88.9k
    for (const VarDecl *VD : Args)
967
150k
      ArgTypes.push_back(VD->getType());
968
88.9k
    QualType FnType = getContext().getFunctionType(
969
88.9k
        RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
970
88.9k
    DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
971
88.9k
                          Builder);
972
88.9k
  }
973
242k
974
242k
  if (ShouldInstrumentFunction()) {
975
11
    if (CGM.getCodeGenOpts().InstrumentFunctions)
976
6
      CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
977
11
    if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
978
4
      CurFn->addFnAttr("instrument-function-entry-inlined",
979
4
                       "__cyg_profile_func_enter");
980
11
    if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
981
1
      CurFn->addFnAttr("instrument-function-entry-inlined",
982
1
                       "__cyg_profile_func_enter_bare");
983
11
  }
984
242k
985
242k
  // Since emitting the mcount call here impacts optimizations such as function
986
242k
  // inlining, we just add an attribute to insert a mcount call in backend.
987
242k
  // The attribute "counting-function" is set to mcount function name which is
988
242k
  // architecture dependent.
989
242k
  if (CGM.getCodeGenOpts().InstrumentForProfiling) {
990
119
    // Calls to fentry/mcount should not be generated if function has
991
119
    // the no_instrument_function attribute.
992
119
    if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
993
96
      if (CGM.getCodeGenOpts().CallFEntry)
994
4
        Fn->addFnAttr("fentry-call", "true");
995
92
      else {
996
92
        Fn->addFnAttr("instrument-function-entry-inlined",
997
92
                      getTarget().getMCountName());
998
92
      }
999
96
      if (CGM.getCodeGenOpts().MNopMCount) {
1000
2
        if (!CGM.getCodeGenOpts().CallFEntry)
1001
1
          CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1002
1
            << "-mnop-mcount" << "-mfentry";
1003
2
        Fn->addFnAttr("mnop-mcount");
1004
2
      }
1005
96
1006
96
      if (CGM.getCodeGenOpts().RecordMCount) {
1007
2
        if (!CGM.getCodeGenOpts().CallFEntry)
1008
1
          CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1009
1
            << "-mrecord-mcount" << "-mfentry";
1010
2
        Fn->addFnAttr("mrecord-mcount");
1011
2
      }
1012
96
    }
1013
119
  }
1014
242k
1015
242k
  if (CGM.getCodeGenOpts().PackedStack) {
1016
2
    if (getContext().getTargetInfo().getTriple().getArch() !=
1017
2
        llvm::Triple::systemz)
1018
1
      CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1019
1
        << "-mpacked-stack";
1020
2
    Fn->addFnAttr("packed-stack");
1021
2
  }
1022
242k
1023
242k
  if (RetTy->isVoidType()) {
1024
126k
    // Void type; nothing to return.
1025
126k
    ReturnValue = Address::invalid();
1026
126k
1027
126k
    // Count the implicit return.
1028
126k
    if (!endsWithReturn(D))
1029
124k
      ++NumReturnExprs;
1030
126k
  } else 
if (116k
CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect116k
) {
1031
2.19k
    // Indirect return; emit returned value directly into sret slot.
1032
2.19k
    // This reduces code size, and affects correctness in C++.
1033
2.19k
    auto AI = CurFn->arg_begin();
1034
2.19k
    if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1035
49
      ++AI;
1036
2.19k
    ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
1037
2.19k
    if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
1038
862
      ReturnValuePointer =
1039
862
          CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
1040
862
      Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast(
1041
862
                              ReturnValue.getPointer(), Int8PtrTy),
1042
862
                          ReturnValuePointer);
1043
862
    }
1044
114k
  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1045
114k
             
!hasScalarEvaluationKind(CurFnInfo->getReturnType())5
) {
1046
5
    // Load the sret pointer from the argument struct and return into that.
1047
5
    unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1048
5
    llvm::Function::arg_iterator EI = CurFn->arg_end();
1049
5
    --EI;
1050
5
    llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
1051
5
    ReturnValuePointer = Address(Addr, getPointerAlign());
1052
5
    Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
1053
5
    ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy));
1054
114k
  } else {
1055
114k
    ReturnValue = CreateIRTemp(RetTy, "retval");
1056
114k
1057
114k
    // Tell the epilog emitter to autorelease the result.  We do this
1058
114k
    // now so that various specialized functions can suppress it
1059
114k
    // during their IR-generation.
1060
114k
    if (getLangOpts().ObjCAutoRefCount &&
1061
114k
        
!CurFnInfo->isReturnsRetained()361
&&
1062
114k
        
RetTy->isObjCRetainableType()327
)
1063
133
      AutoreleaseResult = true;
1064
114k
  }
1065
242k
1066
242k
  EmitStartEHSpec(CurCodeDecl);
1067
242k
1068
242k
  PrologueCleanupDepth = EHStack.stable_begin();
1069
242k
1070
242k
  // Emit OpenMP specific initialization of the device functions.
1071
242k
  if (getLangOpts().OpenMP && 
CurCodeDecl47.4k
)
1072
42.9k
    CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1073
242k
1074
242k
  EmitFunctionProlog(*CurFnInfo, CurFn, Args);
1075
242k
1076
242k
  if (D && 
isa<CXXMethodDecl>(D)236k
&&
cast<CXXMethodDecl>(D)->isInstance()95.2k
) {
1077
88.0k
    CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
1078
88.0k
    const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
1079
88.0k
    if (MD->getParent()->isLambda() &&
1080
88.0k
        
MD->getOverloadedOperator() == OO_Call1.60k
) {
1081
1.44k
      // We're in a lambda; figure out the captures.
1082
1.44k
      MD->getParent()->getCaptureFields(LambdaCaptureFields,
1083
1.44k
                                        LambdaThisCaptureField);
1084
1.44k
      if (LambdaThisCaptureField) {
1085
68
        // If the lambda captures the object referred to by '*this' - either by
1086
68
        // value or by reference, make sure CXXThisValue points to the correct
1087
68
        // object.
1088
68
1089
68
        // Get the lvalue for the field (which is a copy of the enclosing object
1090
68
        // or contains the address of the enclosing object).
1091
68
        LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
1092
68
        if (!LambdaThisCaptureField->getType()->isPointerType()) {
1093
1
          // If the enclosing object was captured by value, just use its address.
1094
1
          CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
1095
67
        } else {
1096
67
          // Load the lvalue pointed to by the field, since '*this' was captured
1097
67
          // by reference.
1098
67
          CXXThisValue =
1099
67
              EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1100
67
        }
1101
68
      }
1102
2.45k
      for (auto *FD : MD->getParent()->fields()) {
1103
2.45k
        if (FD->hasCapturedVLAType()) {
1104
24
          auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1105
24
                                           SourceLocation()).getScalarVal();
1106
24
          auto VAT = FD->getCapturedVLAType();
1107
24
          VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1108
24
        }
1109
2.45k
      }
1110
86.6k
    } else {
1111
86.6k
      // Not in a lambda; just use 'this' from the method.
1112
86.6k
      // FIXME: Should we generate a new load for each use of 'this'?  The
1113
86.6k
      // fast register allocator would be happier...
1114
86.6k
      CXXThisValue = CXXABIThisValue;
1115
86.6k
    }
1116
88.0k
1117
88.0k
    // Check the 'this' pointer once per function, if it's available.
1118
88.0k
    if (CXXABIThisValue) {
1119
88.0k
      SanitizerSet SkippedChecks;
1120
88.0k
      SkippedChecks.set(SanitizerKind::ObjectSize, true);
1121
88.0k
      QualType ThisTy = MD->getThisType();
1122
88.0k
1123
88.0k
      // If this is the call operator of a lambda with no capture-default, it
1124
88.0k
      // may have a static invoker function, which may call this operator with
1125
88.0k
      // a null 'this' pointer.
1126
88.0k
      if (isLambdaCallOperator(MD) &&
1127
88.0k
          
MD->getParent()->getLambdaCaptureDefault() == LCD_None1.44k
)
1128
297
        SkippedChecks.set(SanitizerKind::Null, true);
1129
88.0k
1130
88.0k
      EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? 
TCK_ConstructorCall35.0k
1131
88.0k
                                                : 
TCK_MemberCall53.0k
,
1132
88.0k
                    Loc, CXXABIThisValue, ThisTy,
1133
88.0k
                    getContext().getTypeAlignInChars(ThisTy->getPointeeType()),
1134
88.0k
                    SkippedChecks);
1135
88.0k
    }
1136
88.0k
  }
1137
242k
1138
242k
  // If any of the arguments have a variably modified type, make sure to
1139
242k
  // emit the type size.
1140
242k
  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1141
634k
       i != e; 
++i392k
) {
1142
392k
    const VarDecl *VD = *i;
1143
392k
1144
392k
    // Dig out the type as written from ParmVarDecls; it's unclear whether
1145
392k
    // the standard (C99 6.9.1p10) requires this, but we're following the
1146
392k
    // precedent set by gcc.
1147
392k
    QualType Ty;
1148
392k
    if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1149
238k
      Ty = PVD->getOriginalType();
1150
153k
    else
1151
153k
      Ty = VD->getType();
1152
392k
1153
392k
    if (Ty->isVariablyModifiedType())
1154
121
      EmitVariablyModifiedType(Ty);
1155
392k
  }
1156
242k
  // Emit a location at the end of the prologue.
1157
242k
  if (CGDebugInfo *DI = getDebugInfo())
1158
88.9k
    DI->EmitLocation(Builder, StartLoc);
1159
242k
1160
242k
  // TODO: Do we need to handle this in two places like we do with
1161
242k
  // target-features/target-cpu?
1162
242k
  if (CurFuncDecl)
1163
231k
    if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1164
7.27k
      LargestVectorWidth = VecWidth->getVectorWidth();
1165
242k
}
1166
1167
162k
void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1168
162k
  incrementProfileCounter(Body);
1169
162k
  if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1170
162k
    EmitCompoundStmtWithoutScope(*S);
1171
46
  else
1172
46
    EmitStmt(Body);
1173
162k
}
1174
1175
/// When instrumenting to collect profile data, the counts for some blocks
1176
/// such as switch cases need to not include the fall-through counts, so
1177
/// emit a branch around the instrumentation code. When not instrumenting,
1178
/// this just calls EmitBlock().
1179
void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1180
1.60k
                                               const Stmt *S) {
1181
1.60k
  llvm::BasicBlock *SkipCountBB = nullptr;
1182
1.60k
  if (HaveInsertPoint() && 
CGM.getCodeGenOpts().hasProfileClangInstr()592
) {
1183
22
    // When instrumenting for profiling, the fallthrough to certain
1184
22
    // statements needs to skip over the instrumentation code so that we
1185
22
    // get an accurate count.
1186
22
    SkipCountBB = createBasicBlock("skipcount");
1187
22
    EmitBranch(SkipCountBB);
1188
22
  }
1189
1.60k
  EmitBlock(BB);
1190
1.60k
  uint64_t CurrentCount = getCurrentProfileCount();
1191
1.60k
  incrementProfileCounter(S);
1192
1.60k
  setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1193
1.60k
  if (SkipCountBB)
1194
22
    EmitBlock(SkipCountBB);
1195
1.60k
}
1196
1197
/// Tries to mark the given function nounwind based on the
1198
/// non-existence of any throwing calls within it.  We believe this is
1199
/// lightweight enough to do at -O0.
1200
153k
static void TryMarkNoThrow(llvm::Function *F) {
1201
153k
  // LLVM treats 'nounwind' on a function as part of the type, so we
1202
153k
  // can't do this on functions that can be overwritten.
1203
153k
  if (F->isInterposable()) 
return21
;
1204
153k
1205
153k
  for (llvm::BasicBlock &BB : *F)
1206
232k
    for (llvm::Instruction &I : BB)
1207
2.04M
      if (I.mayThrow())
1208
65.3k
        return;
1209
153k
1210
153k
  F->setDoesNotThrow();
1211
87.7k
}
1212
1213
QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1214
211k
                                               FunctionArgList &Args) {
1215
211k
  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1216
211k
  QualType ResTy = FD->getReturnType();
1217
211k
1218
211k
  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1219
211k
  if (MD && 
MD->isInstance()95.3k
) {
1220
88.1k
    if (CGM.getCXXABI().HasThisReturn(GD))
1221
1.57k
      ResTy = MD->getThisType();
1222
86.5k
    else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1223
204
      ResTy = CGM.getContext().VoidPtrTy;
1224
88.1k
    CGM.getCXXABI().buildThisParam(*this, Args);
1225
88.1k
  }
1226
211k
1227
211k
  // The base version of an inheriting constructor whose constructed base is a
1228
211k
  // virtual base is not passed any arguments (because it doesn't actually call
1229
211k
  // the inherited constructor).
1230
211k
  bool PassedParams = true;
1231
211k
  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1232
35.0k
    if (auto Inherited = CD->getInheritedConstructor())
1233
225
      PassedParams =
1234
225
          getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1235
211k
1236
211k
  if (PassedParams) {
1237
237k
    for (auto *Param : FD->parameters()) {
1238
237k
      Args.push_back(Param);
1239
237k
      if (!Param->hasAttr<PassObjectSizeAttr>())
1240
237k
        continue;
1241
88
1242
88
      auto *Implicit = ImplicitParamDecl::Create(
1243
88
          getContext(), Param->getDeclContext(), Param->getLocation(),
1244
88
          /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1245
88
      SizeArguments[Param] = Implicit;
1246
88
      Args.push_back(Implicit);
1247
88
    }
1248
211k
  }
1249
211k
1250
211k
  if (MD && 
(95.3k
isa<CXXConstructorDecl>(MD)95.3k
||
isa<CXXDestructorDecl>(MD)60.2k
))
1251
48.7k
    CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1252
211k
1253
211k
  return ResTy;
1254
211k
}
1255
1256
static bool
1257
shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD,
1258
21
                                             const ASTContext &Context) {
1259
21
  QualType T = FD->getReturnType();
1260
21
  // Avoid the optimization for functions that return a record type with a
1261
21
  // trivial destructor or another trivially copyable type.
1262
21
  if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
1263
12
    if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1264
12
      return !ClassDecl->hasTrivialDestructor();
1265
9
  }
1266
9
  return !T.isTriviallyCopyableType(Context);
1267
9
}
1268
1269
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1270
211k
                                   const CGFunctionInfo &FnInfo) {
1271
211k
  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1272
211k
  CurGD = GD;
1273
211k
1274
211k
  FunctionArgList Args;
1275
211k
  QualType ResTy = BuildFunctionArgList(GD, Args);
1276
211k
1277
211k
  // Check if we should generate debug info for this function.
1278
211k
  if (FD->hasAttr<NoDebugAttr>())
1279
11.7k
    DebugInfo = nullptr; // disable debug info indefinitely for this function
1280
211k
1281
211k
  // The function might not have a body if we're generating thunks for a
1282
211k
  // function declaration.
1283
211k
  SourceRange BodyRange;
1284
211k
  if (Stmt *Body = FD->getBody())
1285
211k
    BodyRange = Body->getSourceRange();
1286
125
  else
1287
125
    BodyRange = FD->getLocation();
1288
211k
  CurEHLocation = BodyRange.getEnd();
1289
211k
1290
211k
  // Use the location of the start of the function to determine where
1291
211k
  // the function definition is located. By default use the location
1292
211k
  // of the declaration as the location for the subprogram. A function
1293
211k
  // may lack a declaration in the source code if it is created by code
1294
211k
  // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1295
211k
  SourceLocation Loc = FD->getLocation();
1296
211k
1297
211k
  // If this is a function specialization then use the pattern body
1298
211k
  // as the location for the function.
1299
211k
  if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1300
84.7k
    if (SpecDecl->hasBody(SpecDecl))
1301
84.6k
      Loc = SpecDecl->getLocation();
1302
211k
1303
211k
  Stmt *Body = FD->getBody();
1304
211k
1305
211k
  // Initialize helper which will detect jumps which can cause invalid lifetime
1306
211k
  // markers.
1307
211k
  if (Body && 
ShouldEmitLifetimeMarkers211k
)
1308
13.6k
    Bypasses.Init(Body);
1309
211k
1310
211k
  // Emit the standard function prologue.
1311
211k
  StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1312
211k
1313
211k
  // Generate the body of the function.
1314
211k
  PGO.assignRegionCounters(GD, CurFn);
1315
211k
  if (isa<CXXDestructorDecl>(FD))
1316
13.6k
    EmitDestructorBody(Args);
1317
197k
  else if (isa<CXXConstructorDecl>(FD))
1318
35.0k
    EmitConstructorBody(Args);
1319
162k
  else if (getLangOpts().CUDA &&
1320
162k
           
!getLangOpts().CUDAIsDevice366
&&
1321
162k
           
FD->hasAttr<CUDAGlobalAttr>()115
)
1322
31
    CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1323
162k
  else if (isa<CXXMethodDecl>(FD) &&
1324
162k
           
cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()46.5k
) {
1325
49
    // The lambda static invoker function is special, because it forwards or
1326
49
    // clones the body of the function call operator (but is actually static).
1327
49
    EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1328
162k
  } else if (FD->isDefaulted() && 
isa<CXXMethodDecl>(FD)706
&&
1329
162k
             
(702
cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator()702
||
1330
702
              
cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator()202
)) {
1331
686
    // Implicit copy-assignment gets the same special treatment as implicit
1332
686
    // copy-constructors.
1333
686
    emitImplicitAssignmentOperatorBody(Args);
1334
162k
  } else if (Body) {
1335
162k
    EmitFunctionBody(Body);
1336
162k
  } else
1337
162k
    
llvm_unreachable0
("no definition for emitted function");
1338
211k
1339
211k
  // C++11 [stmt.return]p2:
1340
211k
  //   Flowing off the end of a function [...] results in undefined behavior in
1341
211k
  //   a value-returning function.
1342
211k
  // C11 6.9.1p12:
1343
211k
  //   If the '}' that terminates a function is reached, and the value of the
1344
211k
  //   function call is used by the caller, the behavior is undefined.
1345
211k
  if (getLangOpts().CPlusPlus && 
!FD->hasImplicitReturnZero()144k
&&
!SawAsmBlock141k
&&
1346
211k
      
!FD->getReturnType()->isVoidType()141k
&&
Builder.GetInsertBlock()54.4k
) {
1347
167
    bool ShouldEmitUnreachable =
1348
167
        CGM.getCodeGenOpts().StrictReturn ||
1349
167
        
shouldUseUndefinedBehaviorReturnOptimization(FD, getContext())21
;
1350
167
    if (SanOpts.has(SanitizerKind::Return)) {
1351
1
      SanitizerScope SanScope(this);
1352
1
      llvm::Value *IsFalse = Builder.getFalse();
1353
1
      EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1354
1
                SanitizerHandler::MissingReturn,
1355
1
                EmitCheckSourceLocation(FD->getLocation()), None);
1356
166
    } else if (ShouldEmitUnreachable) {
1357
148
      if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1358
117
        EmitTrapCall(llvm::Intrinsic::trap);
1359
148
    }
1360
167
    if (SanOpts.has(SanitizerKind::Return) || 
ShouldEmitUnreachable166
) {
1361
149
      Builder.CreateUnreachable();
1362
149
      Builder.ClearInsertionPoint();
1363
149
    }
1364
167
  }
1365
211k
1366
211k
  // Emit the standard function epilogue.
1367
211k
  FinishFunction(BodyRange.getEnd());
1368
211k
1369
211k
  // If we haven't marked the function nothrow through other means, do
1370
211k
  // a quick pass now to see if we can.
1371
211k
  if (!CurFn->doesNotThrow())
1372
153k
    TryMarkNoThrow(CurFn);
1373
211k
}
1374
1375
/// ContainsLabel - Return true if the statement contains a label in it.  If
1376
/// this statement is not executed normally, it not containing a label means
1377
/// that we can just remove the code.
1378
38.7k
bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1379
38.7k
  // Null statement, not a label!
1380
38.7k
  if (!S) 
return false87
;
1381
38.6k
1382
38.6k
  // If this is a label, we have to emit the code, consider something like:
1383
38.6k
  // if (0) {  ...  foo:  bar(); }  goto foo;
1384
38.6k
  //
1385
38.6k
  // TODO: If anyone cared, we could track __label__'s, since we know that you
1386
38.6k
  // can't jump to one from outside their declared region.
1387
38.6k
  if (isa<LabelStmt>(S))
1388
5
    return true;
1389
38.6k
1390
38.6k
  // If this is a case/default statement, and we haven't seen a switch, we have
1391
38.6k
  // to emit the code.
1392
38.6k
  if (isa<SwitchCase>(S) && 
!IgnoreCaseStmts68
)
1393
0
    return true;
1394
38.6k
1395
38.6k
  // If this is a switch statement, we want to ignore cases below it.
1396
38.6k
  if (isa<SwitchStmt>(S))
1397
10
    IgnoreCaseStmts = true;
1398
38.6k
1399
38.6k
  // Scan subexpressions for verboten labels.
1400
38.6k
  for (const Stmt *SubStmt : S->children())
1401
28.5k
    if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1402
11
      return true;
1403
38.6k
1404
38.6k
  
return false38.6k
;
1405
38.6k
}
1406
1407
/// containsBreak - Return true if the statement contains a break out of it.
1408
/// If the statement (recursively) contains a switch or loop with a break
1409
/// inside of it, this is fine.
1410
952
bool CodeGenFunction::containsBreak(const Stmt *S) {
1411
952
  // Null statement, not a label!
1412
952
  if (!S) 
return false0
;
1413
952
1414
952
  // If this is a switch or loop that defines its own break scope, then we can
1415
952
  // include it and anything inside of it.
1416
952
  if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1417
952
      
isa<ForStmt>(S)949
)
1418
3
    return false;
1419
949
1420
949
  if (isa<BreakStmt>(S))
1421
1
    return true;
1422
948
1423
948
  // Scan subexpressions for verboten breaks.
1424
948
  for (const Stmt *SubStmt : S->children())
1425
870
    if (containsBreak(SubStmt))
1426
1
      return true;
1427
948
1428
948
  
return false947
;
1429
948
}
1430
1431
2.61k
bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1432
2.61k
  if (!S) 
return false0
;
1433
2.61k
1434
2.61k
  // Some statement kinds add a scope and thus never add a decl to the current
1435
2.61k
  // scope. Note, this list is longer than the list of statements that might
1436
2.61k
  // have an unscoped decl nested within them, but this way is conservatively
1437
2.61k
  // correct even if more statement kinds are added.
1438
2.61k
  if (isa<IfStmt>(S) || 
isa<SwitchStmt>(S)2.61k
||
isa<WhileStmt>(S)2.61k
||
1439
2.61k
      
isa<DoStmt>(S)2.61k
||
isa<ForStmt>(S)2.61k
||
isa<CompoundStmt>(S)2.61k
||
1440
2.61k
      
isa<CXXForRangeStmt>(S)2.60k
||
isa<CXXTryStmt>(S)2.60k
||
1441
2.61k
      
isa<ObjCForCollectionStmt>(S)2.60k
||
isa<ObjCAtTryStmt>(S)2.60k
)
1442
15
    return false;
1443
2.60k
1444
2.60k
  if (isa<DeclStmt>(S))
1445
7
    return true;
1446
2.59k
1447
2.59k
  for (const Stmt *SubStmt : S->children())
1448
2.34k
    if (mightAddDeclToScope(SubStmt))
1449
2
      return true;
1450
2.59k
1451
2.59k
  
return false2.59k
;
1452
2.59k
}
1453
1454
/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1455
/// to a constant, or if it does but contains a label, return false.  If it
1456
/// constant folds return true and set the boolean result in Result.
1457
bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1458
                                                   bool &ResultBool,
1459
94.4k
                                                   bool AllowLabels) {
1460
94.4k
  llvm::APSInt ResultInt;
1461
94.4k
  if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1462
85.0k
    return false;
1463
9.42k
1464
9.42k
  ResultBool = ResultInt.getBoolValue();
1465
9.42k
  return true;
1466
9.42k
}
1467
1468
/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1469
/// to a constant, or if it does but contains a label, return false.  If it
1470
/// constant folds return true and set the folded value.
1471
bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1472
                                                   llvm::APSInt &ResultInt,
1473
94.8k
                                                   bool AllowLabels) {
1474
94.8k
  // FIXME: Rename and handle conversion of other evaluatable things
1475
94.8k
  // to bool.
1476
94.8k
  Expr::EvalResult Result;
1477
94.8k
  if (!Cond->EvaluateAsInt(Result, getContext()))
1478
85.3k
    return false;  // Not foldable, not integer or not fully evaluatable.
1479
9.51k
1480
9.51k
  llvm::APSInt Int = Result.Val.getInt();
1481
9.51k
  if (!AllowLabels && 
CodeGenFunction::ContainsLabel(Cond)9.51k
)
1482
0
    return false;  // Contains a label.
1483
9.51k
1484
9.51k
  ResultInt = Int;
1485
9.51k
  return true;
1486
9.51k
}
1487
1488
1489
1490
/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1491
/// statement) to the specified blocks.  Based on the condition, this might try
1492
/// to simplify the codegen of the conditional based on the branch.
1493
///
1494
void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
1495
                                           llvm::BasicBlock *TrueBlock,
1496
                                           llvm::BasicBlock *FalseBlock,
1497
95.8k
                                           uint64_t TrueCount) {
1498
95.8k
  Cond = Cond->IgnoreParens();
1499
95.8k
1500
95.8k
  if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1501
46.3k
1502
46.3k
    // Handle X && Y in a condition.
1503
46.3k
    if (CondBOp->getOpcode() == BO_LAnd) {
1504
2.87k
      // If we have "1 && X", simplify the code.  "0 && X" would have constant
1505
2.87k
      // folded if the case was simple enough.
1506
2.87k
      bool ConstantBool = false;
1507
2.87k
      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1508
2.87k
          
ConstantBool58
) {
1509
58
        // br(1 && X) -> br(X).
1510
58
        incrementProfileCounter(CondBOp);
1511
58
        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1512
58
                                    TrueCount);
1513
58
      }
1514
2.81k
1515
2.81k
      // If we have "X && 1", simplify the code to use an uncond branch.
1516
2.81k
      // "X && 0" would have been constant folded to 0.
1517
2.81k
      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1518
2.81k
          
ConstantBool14
) {
1519
14
        // br(X && 1) -> br(X).
1520
14
        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1521
14
                                    TrueCount);
1522
14
      }
1523
2.80k
1524
2.80k
      // Emit the LHS as a conditional.  If the LHS conditional is false, we
1525
2.80k
      // want to jump to the FalseBlock.
1526
2.80k
      llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1527
2.80k
      // The counter tells us how often we evaluate RHS, and all of TrueCount
1528
2.80k
      // can be propagated to that branch.
1529
2.80k
      uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1530
2.80k
1531
2.80k
      ConditionalEvaluation eval(*this);
1532
2.80k
      {
1533
2.80k
        ApplyDebugLocation DL(*this, Cond);
1534
2.80k
        EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
1535
2.80k
        EmitBlock(LHSTrue);
1536
2.80k
      }
1537
2.80k
1538
2.80k
      incrementProfileCounter(CondBOp);
1539
2.80k
      setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1540
2.80k
1541
2.80k
      // Any temporaries created here are conditional.
1542
2.80k
      eval.begin(*this);
1543
2.80k
      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
1544
2.80k
      eval.end(*this);
1545
2.80k
1546
2.80k
      return;
1547
2.80k
    }
1548
43.4k
1549
43.4k
    if (CondBOp->getOpcode() == BO_LOr) {
1550
3.42k
      // If we have "0 || X", simplify the code.  "1 || X" would have constant
1551
3.42k
      // folded if the case was simple enough.
1552
3.42k
      bool ConstantBool = false;
1553
3.42k
      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1554
3.42k
          
!ConstantBool5
) {
1555
5
        // br(0 || X) -> br(X).
1556
5
        incrementProfileCounter(CondBOp);
1557
5
        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1558
5
                                    TrueCount);
1559
5
      }
1560
3.42k
1561
3.42k
      // If we have "X || 0", simplify the code to use an uncond branch.
1562
3.42k
      // "X || 1" would have been constant folded to 1.
1563
3.42k
      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1564
3.42k
          
!ConstantBool0
) {
1565
0
        // br(X || 0) -> br(X).
1566
0
        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1567
0
                                    TrueCount);
1568
0
      }
1569
3.42k
1570
3.42k
      // Emit the LHS as a conditional.  If the LHS conditional is true, we
1571
3.42k
      // want to jump to the TrueBlock.
1572
3.42k
      llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1573
3.42k
      // We have the count for entry to the RHS and for the whole expression
1574
3.42k
      // being true, so we can divy up True count between the short circuit and
1575
3.42k
      // the RHS.
1576
3.42k
      uint64_t LHSCount =
1577
3.42k
          getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1578
3.42k
      uint64_t RHSCount = TrueCount - LHSCount;
1579
3.42k
1580
3.42k
      ConditionalEvaluation eval(*this);
1581
3.42k
      {
1582
3.42k
        ApplyDebugLocation DL(*this, Cond);
1583
3.42k
        EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
1584
3.42k
        EmitBlock(LHSFalse);
1585
3.42k
      }
1586
3.42k
1587
3.42k
      incrementProfileCounter(CondBOp);
1588
3.42k
      setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1589
3.42k
1590
3.42k
      // Any temporaries created here are conditional.
1591
3.42k
      eval.begin(*this);
1592
3.42k
      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
1593
3.42k
1594
3.42k
      eval.end(*this);
1595
3.42k
1596
3.42k
      return;
1597
3.42k
    }
1598
43.4k
  }
1599
89.5k
1600
89.5k
  if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1601
1.12k
    // br(!x, t, f) -> br(x, f, t)
1602
1.12k
    if (CondUOp->getOpcode() == UO_LNot) {
1603
1.12k
      // Negate the count.
1604
1.12k
      uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1605
1.12k
      // Negate the condition and swap the destination blocks.
1606
1.12k
      return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1607
1.12k
                                  FalseCount);
1608
1.12k
    }
1609
88.4k
  }
1610
88.4k
1611
88.4k
  if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1612
0
    // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1613
0
    llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1614
0
    llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1615
0
1616
0
    ConditionalEvaluation cond(*this);
1617
0
    EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1618
0
                         getProfileCount(CondOp));
1619
0
1620
0
    // When computing PGO branch weights, we only know the overall count for
1621
0
    // the true block. This code is essentially doing tail duplication of the
1622
0
    // naive code-gen, introducing new edges for which counts are not
1623
0
    // available. Divide the counts proportionally between the LHS and RHS of
1624
0
    // the conditional operator.
1625
0
    uint64_t LHSScaledTrueCount = 0;
1626
0
    if (TrueCount) {
1627
0
      double LHSRatio =
1628
0
          getProfileCount(CondOp) / (double)getCurrentProfileCount();
1629
0
      LHSScaledTrueCount = TrueCount * LHSRatio;
1630
0
    }
1631
0
1632
0
    cond.begin(*this);
1633
0
    EmitBlock(LHSBlock);
1634
0
    incrementProfileCounter(CondOp);
1635
0
    {
1636
0
      ApplyDebugLocation DL(*this, Cond);
1637
0
      EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1638
0
                           LHSScaledTrueCount);
1639
0
    }
1640
0
    cond.end(*this);
1641
0
1642
0
    cond.begin(*this);
1643
0
    EmitBlock(RHSBlock);
1644
0
    EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1645
0
                         TrueCount - LHSScaledTrueCount);
1646
0
    cond.end(*this);
1647
0
1648
0
    return;
1649
0
  }
1650
88.4k
1651
88.4k
  if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1652
0
    // Conditional operator handling can give us a throw expression as a
1653
0
    // condition for a case like:
1654
0
    //   br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1655
0
    // Fold this to:
1656
0
    //   br(c, throw x, br(y, t, f))
1657
0
    EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1658
0
    return;
1659
0
  }
1660
88.4k
1661
88.4k
  // If the branch has a condition wrapped by __builtin_unpredictable,
1662
88.4k
  // create metadata that specifies that the branch is unpredictable.
1663
88.4k
  // Don't bother if not optimizing because that metadata would not be used.
1664
88.4k
  llvm::MDNode *Unpredictable = nullptr;
1665
88.4k
  auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1666
88.4k
  if (Call && 
CGM.getCodeGenOpts().OptimizationLevel != 04.67k
) {
1667
50
    auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1668
50
    if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1669
3
      llvm::MDBuilder MDHelper(getLLVMContext());
1670
3
      Unpredictable = MDHelper.createUnpredictable();
1671
3
    }
1672
50
  }
1673
88.4k
1674
88.4k
  // Create branch weights based on the number of times we get here and the
1675
88.4k
  // number of times the condition should be true.
1676
88.4k
  uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1677
88.4k
  llvm::MDNode *Weights =
1678
88.4k
      createProfileWeights(TrueCount, CurrentCount - TrueCount);
1679
88.4k
1680
88.4k
  // Emit the code with the fully general case.
1681
88.4k
  llvm::Value *CondV;
1682
88.4k
  {
1683
88.4k
    ApplyDebugLocation DL(*this, Cond);
1684
88.4k
    CondV = EvaluateExprAsBool(Cond);
1685
88.4k
  }
1686
88.4k
  Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1687
88.4k
}
1688
1689
/// ErrorUnsupported - Print out an error that codegen doesn't support the
1690
/// specified stmt yet.
1691
1
void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1692
1
  CGM.ErrorUnsupported(S, Type);
1693
1
}
1694
1695
/// emitNonZeroVLAInit - Emit the "zero" initialization of a
1696
/// variable-length array whose elements have a non-zero bit-pattern.
1697
///
1698
/// \param baseType the inner-most element type of the array
1699
/// \param src - a char* pointing to the bit-pattern for a single
1700
/// base element of the array
1701
/// \param sizeInChars - the total size of the VLA, in chars
1702
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1703
                               Address dest, Address src,
1704
0
                               llvm::Value *sizeInChars) {
1705
0
  CGBuilderTy &Builder = CGF.Builder;
1706
0
1707
0
  CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1708
0
  llvm::Value *baseSizeInChars
1709
0
    = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1710
0
1711
0
  Address begin =
1712
0
    Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1713
0
  llvm::Value *end =
1714
0
    Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
1715
0
1716
0
  llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1717
0
  llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1718
0
  llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1719
0
1720
0
  // Make a loop over the VLA.  C99 guarantees that the VLA element
1721
0
  // count must be nonzero.
1722
0
  CGF.EmitBlock(loopBB);
1723
0
1724
0
  llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1725
0
  cur->addIncoming(begin.getPointer(), originBB);
1726
0
1727
0
  CharUnits curAlign =
1728
0
    dest.getAlignment().alignmentOfArrayElement(baseSize);
1729
0
1730
0
  // memcpy the individual element bit-pattern.
1731
0
  Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
1732
0
                       /*volatile*/ false);
1733
0
1734
0
  // Go to the next element.
1735
0
  llvm::Value *next =
1736
0
    Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1737
0
1738
0
  // Leave if that's the end of the VLA.
1739
0
  llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1740
0
  Builder.CreateCondBr(done, contBB, loopBB);
1741
0
  cur->addIncoming(next, loopBB);
1742
0
1743
0
  CGF.EmitBlock(contBB);
1744
0
}
1745
1746
void
1747
7.47k
CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
1748
7.47k
  // Ignore empty classes in C++.
1749
7.47k
  if (getLangOpts().CPlusPlus) {
1750
7.33k
    if (const RecordType *RT = Ty->getAs<RecordType>()) {
1751
7.03k
      if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1752
6.33k
        return;
1753
1.13k
    }
1754
7.33k
  }
1755
1.13k
1756
1.13k
  // Cast the dest ptr to the appropriate i8 pointer type.
1757
1.13k
  if (DestPtr.getElementType() != Int8Ty)
1758
1.13k
    DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1759
1.13k
1760
1.13k
  // Get size and alignment info for this aggregate.
1761
1.13k
  CharUnits size = getContext().getTypeSizeInChars(Ty);
1762
1.13k
1763
1.13k
  llvm::Value *SizeVal;
1764
1.13k
  const VariableArrayType *vla;
1765
1.13k
1766
1.13k
  // Don't bother emitting a zero-byte memset.
1767
1.13k
  if (size.isZero()) {
1768
11
    // But note that getTypeInfo returns 0 for a VLA.
1769
11
    if (const VariableArrayType *vlaType =
1770
8
          dyn_cast_or_null<VariableArrayType>(
1771
8
                                          getContext().getAsArrayType(Ty))) {
1772
8
      auto VlaSize = getVLASize(vlaType);
1773
8
      SizeVal = VlaSize.NumElts;
1774
8
      CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1775
8
      if (!eltSize.isOne())
1776
8
        SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1777
8
      vla = vlaType;
1778
8
    } else {
1779
3
      return;
1780
3
    }
1781
1.12k
  } else {
1782
1.12k
    SizeVal = CGM.getSize(size);
1783
1.12k
    vla = nullptr;
1784
1.12k
  }
1785
1.13k
1786
1.13k
  // If the type contains a pointer to data member we can't memset it to zero.
1787
1.13k
  // Instead, create a null constant and copy it to the destination.
1788
1.13k
  // TODO: there are other patterns besides zero that we can usefully memset,
1789
1.13k
  // like -1, which happens to be the pattern used by member-pointers.
1790
1.13k
  
if (1.13k
!CGM.getTypes().isZeroInitializable(Ty)1.13k
) {
1791
9
    // For a VLA, emit a single element, then splat that over the VLA.
1792
9
    if (vla) 
Ty = getContext().getBaseElementType(vla)0
;
1793
9
1794
9
    llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1795
9
1796
9
    llvm::GlobalVariable *NullVariable =
1797
9
      new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1798
9
                               /*isConstant=*/true,
1799
9
                               llvm::GlobalVariable::PrivateLinkage,
1800
9
                               NullConstant, Twine());
1801
9
    CharUnits NullAlign = DestPtr.getAlignment();
1802
9
    NullVariable->setAlignment(NullAlign.getAsAlign());
1803
9
    Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
1804
9
                   NullAlign);
1805
9
1806
9
    if (vla) 
return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal)0
;
1807
9
1808
9
    // Get and call the appropriate llvm.memcpy overload.
1809
9
    Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
1810
9
    return;
1811
9
  }
1812
1.12k
1813
1.12k
  // Otherwise, just memset the whole thing to zero.  This is legal
1814
1.12k
  // because in LLVM, all default initializers (other than the ones we just
1815
1.12k
  // handled above) are guaranteed to have a bit pattern of all zeros.
1816
1.12k
  Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1817
1.12k
}
1818
1819
57
llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1820
57
  // Make sure that there is a block for the indirect goto.
1821
57
  if (!IndirectBranch)
1822
34
    GetIndirectGotoBlock();
1823
57
1824
57
  llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1825
57
1826
57
  // Make sure the indirect branch includes all of the address-taken blocks.
1827
57
  IndirectBranch->addDestination(BB);
1828
57
  return llvm::BlockAddress::get(CurFn, BB);
1829
57
}
1830
1831
56
llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
1832
56
  // If we already made the indirect branch for indirect goto, return its block.
1833
56
  if (IndirectBranch) 
return IndirectBranch->getParent()21
;
1834
35
1835
35
  CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
1836
35
1837
35
  // Create the PHI node that indirect gotos will add entries to.
1838
35
  llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1839
35
                                              "indirect.goto.dest");
1840
35
1841
35
  // Create the indirect branch instruction.
1842
35
  IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1843
35
  return IndirectBranch->getParent();
1844
35
}
1845
1846
/// Computes the length of an array in elements, as well as the base
1847
/// element type and a properly-typed first element pointer.
1848
llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
1849
                                              QualType &baseType,
1850
4.41k
                                              Address &addr) {
1851
4.41k
  const ArrayType *arrayType = origArrayType;
1852
4.41k
1853
4.41k
  // If it's a VLA, we have to load the stored size.  Note that
1854
4.41k
  // this is the size of the VLA in bytes, not its size in elements.
1855
4.41k
  llvm::Value *numVLAElements = nullptr;
1856
4.41k
  if (isa<VariableArrayType>(arrayType)) {
1857
344
    numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
1858
344
1859
344
    // Walk into all VLAs.  This doesn't require changes to addr,
1860
344
    // which has type T* where T is the first non-VLA element type.
1861
381
    do {
1862
381
      QualType elementType = arrayType->getElementType();
1863
381
      arrayType = getContext().getAsArrayType(elementType);
1864
381
1865
381
      // If we only have VLA components, 'addr' requires no adjustment.
1866
381
      if (!arrayType) {
1867
340
        baseType = elementType;
1868
340
        return numVLAElements;
1869
340
      }
1870
41
    } while (isa<VariableArrayType>(arrayType));
1871
344
1872
344
    // We get out here only if we find a constant array type
1873
344
    // inside the VLA.
1874
344
  }
1875
4.41k
1876
4.41k
  // We have some number of constant-length arrays, so addr should
1877
4.41k
  // have LLVM type [M x [N x [...]]]*.  Build a GEP that walks
1878
4.41k
  // down to the first element of addr.
1879
4.41k
  SmallVector<llvm::Value*, 8> gepIndices;
1880
4.07k
1881
4.07k
  // GEP down to the array type.
1882
4.07k
  llvm::ConstantInt *zero = Builder.getInt32(0);
1883
4.07k
  gepIndices.push_back(zero);
1884
4.07k
1885
4.07k
  uint64_t countFromCLAs = 1;
1886
4.07k
  QualType eltType;
1887
4.07k
1888
4.07k
  llvm::ArrayType *llvmArrayType =
1889
4.07k
    dyn_cast<llvm::ArrayType>(addr.getElementType());
1890
7.95k
  while (llvmArrayType) {
1891
3.87k
    assert(isa<ConstantArrayType>(arrayType));
1892
3.87k
    assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1893
3.87k
             == llvmArrayType->getNumElements());
1894
3.87k
1895
3.87k
    gepIndices.push_back(zero);
1896
3.87k
    countFromCLAs *= llvmArrayType->getNumElements();
1897
3.87k
    eltType = arrayType->getElementType();
1898
3.87k
1899
3.87k
    llvmArrayType =
1900
3.87k
      dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1901
3.87k
    arrayType = getContext().getAsArrayType(arrayType->getElementType());
1902
3.87k
    assert((!llvmArrayType || arrayType) &&
1903
3.87k
           "LLVM and Clang types are out-of-synch");
1904
3.87k
  }
1905
4.07k
1906
4.07k
  if (arrayType) {
1907
293
    // From this point onwards, the Clang array type has been emitted
1908
293
    // as some other type (probably a packed struct). Compute the array
1909
293
    // size, and just emit the 'begin' expression as a bitcast.
1910
626
    while (arrayType) {
1911
333
      countFromCLAs *=
1912
333
          cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1913
333
      eltType = arrayType->getElementType();
1914
333
      arrayType = getContext().getAsArrayType(eltType);
1915
333
    }
1916
293
1917
293
    llvm::Type *baseType = ConvertType(eltType);
1918
293
    addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
1919
3.78k
  } else {
1920
3.78k
    // Create the actual GEP.
1921
3.78k
    addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
1922
3.78k
                                             gepIndices, "array.begin"),
1923
3.78k
                   addr.getAlignment());
1924
3.78k
  }
1925
4.07k
1926
4.07k
  baseType = eltType;
1927
4.07k
1928
4.07k
  llvm::Value *numElements
1929
4.07k
    = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1930
4.07k
1931
4.07k
  // If we had any VLA dimensions, factor them in.
1932
4.07k
  if (numVLAElements)
1933
4
    numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1934
4.07k
1935
4.07k
  return numElements;
1936
4.41k
}
1937
1938
1.66k
CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
1939
1.66k
  const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1940
1.66k
  assert(vla && "type was not a variable array type!");
1941
1.66k
  return getVLASize(vla);
1942
1.66k
}
1943
1944
CodeGenFunction::VlaSizePair
1945
4.08k
CodeGenFunction::getVLASize(const VariableArrayType *type) {
1946
4.08k
  // The number of elements so far; always size_t.
1947
4.08k
  llvm::Value *numElements = nullptr;
1948
4.08k
1949
4.08k
  QualType elementType;
1950
5.18k
  do {
1951
5.18k
    elementType = type->getElementType();
1952
5.18k
    llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1953
5.18k
    assert(vlaSize && "no size for VLA!");
1954
5.18k
    assert(vlaSize->getType() == SizeTy);
1955
5.18k
1956
5.18k
    if (!numElements) {
1957
4.08k
      numElements = vlaSize;
1958
4.08k
    } else {
1959
1.09k
      // It's undefined behavior if this wraps around, so mark it that way.
1960
1.09k
      // FIXME: Teach -fsanitize=undefined to trap this.
1961
1.09k
      numElements = Builder.CreateNUWMul(numElements, vlaSize);
1962
1.09k
    }
1963
5.18k
  } while ((type = getContext().getAsVariableArrayType(elementType)));
1964
4.08k
1965
4.08k
  return { numElements, elementType };
1966
4.08k
}
1967
1968
CodeGenFunction::VlaSizePair
1969
2.45k
CodeGenFunction::getVLAElements1D(QualType type) {
1970
2.45k
  const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1971
2.45k
  assert(vla && "type was not a variable array type!");
1972
2.45k
  return getVLAElements1D(vla);
1973
2.45k
}
1974
1975
CodeGenFunction::VlaSizePair
1976
2.45k
CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
1977
2.45k
  llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
1978
2.45k
  assert(VlaSize && "no size for VLA!");
1979
2.45k
  assert(VlaSize->getType() == SizeTy);
1980
2.45k
  return { VlaSize, Vla->getElementType() };
1981
2.45k
}
1982
1983
2.10k
void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
1984
2.10k
  assert(type->isVariablyModifiedType() &&
1985
2.10k
         "Must pass variably modified type to EmitVLASizes!");
1986
2.10k
1987
2.10k
  EnsureInsertPoint();
1988
2.10k
1989
2.10k
  // We're going to walk down into the type and look for VLA
1990
2.10k
  // expressions.
1991
3.06k
  do {
1992
3.06k
    assert(type->isVariablyModifiedType());
1993
3.06k
1994
3.06k
    const Type *ty = type.getTypePtr();
1995
3.06k
    switch (ty->getTypeClass()) {
1996
0
1997
0
#define TYPE(Class, Base)
1998
0
#define ABSTRACT_TYPE(Class, Base)
1999
0
#define NON_CANONICAL_TYPE(Class, Base)
2000
0
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2001
0
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2002
0
#include "clang/AST/TypeNodes.inc"
2003
0
      llvm_unreachable("unexpected dependent type!");
2004
0
2005
0
    // These types are never variably-modified.
2006
0
    case Type::Builtin:
2007
0
    case Type::Complex:
2008
0
    case Type::Vector:
2009
0
    case Type::ExtVector:
2010
0
    case Type::Record:
2011
0
    case Type::Enum:
2012
0
    case Type::Elaborated:
2013
0
    case Type::TemplateSpecialization:
2014
0
    case Type::ObjCTypeParam:
2015
0
    case Type::ObjCObject:
2016
0
    case Type::ObjCInterface:
2017
0
    case Type::ObjCObjectPointer:
2018
0
      llvm_unreachable("type class is never variably-modified!");
2019
0
2020
0
    case Type::Adjusted:
2021
0
      type = cast<AdjustedType>(ty)->getAdjustedType();
2022
0
      break;
2023
0
2024
50
    case Type::Decayed:
2025
50
      type = cast<DecayedType>(ty)->getPointeeType();
2026
50
      break;
2027
0
2028
90
    case Type::Pointer:
2029
90
      type = cast<PointerType>(ty)->getPointeeType();
2030
90
      break;
2031
0
2032
0
    case Type::BlockPointer:
2033
0
      type = cast<BlockPointerType>(ty)->getPointeeType();
2034
0
      break;
2035
0
2036
3
    case Type::LValueReference:
2037
3
    case Type::RValueReference:
2038
3
      type = cast<ReferenceType>(ty)->getPointeeType();
2039
3
      break;
2040
3
2041
3
    case Type::MemberPointer:
2042
0
      type = cast<MemberPointerType>(ty)->getPointeeType();
2043
0
      break;
2044
3
2045
12
    case Type::ConstantArray:
2046
12
    case Type::IncompleteArray:
2047
12
      // Losing element qualification here is fine.
2048
12
      type = cast<ArrayType>(ty)->getElementType();
2049
12
      break;
2050
12
2051
2.82k
    case Type::VariableArray: {
2052
2.82k
      // Losing element qualification here is fine.
2053
2.82k
      const VariableArrayType *vat = cast<VariableArrayType>(ty);
2054
2.82k
2055
2.82k
      // Unknown size indication requires no size computation.
2056
2.82k
      // Otherwise, evaluate and record it.
2057
2.82k
      if (const Expr *size = vat->getSizeExpr()) {
2058
2.82k
        // It's possible that we might have emitted this already,
2059
2.82k
        // e.g. with a typedef and a pointer to it.
2060
2.82k
        llvm::Value *&entry = VLASizeMap[size];
2061
2.82k
        if (!entry) {
2062
2.62k
          llvm::Value *Size = EmitScalarExpr(size);
2063
2.62k
2064
2.62k
          // C11 6.7.6.2p5:
2065
2.62k
          //   If the size is an expression that is not an integer constant
2066
2.62k
          //   expression [...] each time it is evaluated it shall have a value
2067
2.62k
          //   greater than zero.
2068
2.62k
          if (SanOpts.has(SanitizerKind::VLABound) &&
2069
2.62k
              
size->getType()->isSignedIntegerType()2
) {
2070
2
            SanitizerScope SanScope(this);
2071
2
            llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
2072
2
            llvm::Constant *StaticArgs[] = {
2073
2
                EmitCheckSourceLocation(size->getBeginLoc()),
2074
2
                EmitCheckTypeDescriptor(size->getType())};
2075
2
            EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
2076
2
                                     SanitizerKind::VLABound),
2077
2
                      SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
2078
2
          }
2079
2.62k
2080
2.62k
          // Always zexting here would be wrong if it weren't
2081
2.62k
          // undefined behavior to have a negative bound.
2082
2.62k
          entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
2083
2.62k
        }
2084
2.82k
      }
2085
2.82k
      type = vat->getElementType();
2086
2.82k
      break;
2087
12
    }
2088
12
2089
12
    case Type::FunctionProto:
2090
1
    case Type::FunctionNoProto:
2091
1
      type = cast<FunctionType>(ty)->getReturnType();
2092
1
      break;
2093
1
2094
44
    case Type::Paren:
2095
44
    case Type::TypeOf:
2096
44
    case Type::UnaryTransform:
2097
44
    case Type::Attributed:
2098
44
    case Type::SubstTemplateTypeParm:
2099
44
    case Type::PackExpansion:
2100
44
    case Type::MacroQualified:
2101
44
      // Keep walking after single level desugaring.
2102
44
      type = type.getSingleStepDesugaredType(getContext());
2103
44
      break;
2104
44
2105
44
    case Type::Typedef:
2106
28
    case Type::Decltype:
2107
28
    case Type::Auto:
2108
28
    case Type::DeducedTemplateSpecialization:
2109
28
      // Stop walking: nothing to do.
2110
28
      return;
2111
28
2112
28
    case Type::TypeOfExpr:
2113
13
      // Stop walking: emit typeof expression.
2114
13
      EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2115
13
      return;
2116
28
2117
28
    case Type::Atomic:
2118
1
      type = cast<AtomicType>(ty)->getValueType();
2119
1
      break;
2120
28
2121
28
    case Type::Pipe:
2122
0
      type = cast<PipeType>(ty)->getElementType();
2123
0
      break;
2124
3.02k
    }
2125
3.02k
  } while (type->isVariablyModifiedType());
2126
2.10k
}
2127
2128
1.08k
Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2129
1.08k
  if (getContext().getBuiltinVaListType()->isArrayType())
2130
459
    return EmitPointerWithAlignment(E);
2131
623
  return EmitLValue(E).getAddress(*this);
2132
623
}
2133
2134
29
Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2135
29
  return EmitLValue(E).getAddress(*this);
2136
29
}
2137
2138
void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2139
7.40k
                                              const APValue &Init) {
2140
7.40k
  assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2141
7.40k
  if (CGDebugInfo *Dbg = getDebugInfo())
2142
5.01k
    if (CGM.getCodeGenOpts().hasReducedDebugInfo())
2143
5.01k
      Dbg->EmitGlobalVariable(E->getDecl(), Init);
2144
7.40k
}
2145
2146
CodeGenFunction::PeepholeProtection
2147
1.03k
CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2148
1.03k
  // At the moment, the only aggressive peephole we do in IR gen
2149
1.03k
  // is trunc(zext) folding, but if we add more, we can easily
2150
1.03k
  // extend this protection.
2151
1.03k
2152
1.03k
  if (!rvalue.isScalar()) 
return PeepholeProtection()55
;
2153
982
  llvm::Value *value = rvalue.getScalarVal();
2154
982
  if (!isa<llvm::ZExtInst>(value)) 
return PeepholeProtection()963
;
2155
19
2156
19
  // Just make an extra bitcast.
2157
19
  assert(HaveInsertPoint());
2158
19
  llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2159
19
                                                  Builder.GetInsertBlock());
2160
19
2161
19
  PeepholeProtection protection;
2162
19
  protection.Inst = inst;
2163
19
  return protection;
2164
19
}
2165
2166
1.03k
void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2167
1.03k
  if (!protection.Inst) 
return1.01k
;
2168
19
2169
19
  // In theory, we could try to duplicate the peepholes now, but whatever.
2170
19
  protection.Inst->eraseFromParent();
2171
19
}
2172
2173
void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2174
                                              QualType Ty, SourceLocation Loc,
2175
                                              SourceLocation AssumptionLoc,
2176
                                              llvm::Value *Alignment,
2177
424
                                              llvm::Value *OffsetValue) {
2178
424
  llvm::Value *TheCheck;
2179
424
  llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2180
424
      CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2181
424
  if (SanOpts.has(SanitizerKind::Alignment)) {
2182
33
    emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2183
33
                                 OffsetValue, TheCheck, Assumption);
2184
33
  }
2185
424
}
2186
2187
void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2188
                                              const Expr *E,
2189
                                              SourceLocation AssumptionLoc,
2190
                                              llvm::Value *Alignment,
2191
402
                                              llvm::Value *OffsetValue) {
2192
402
  if (auto *CE = dyn_cast<CastExpr>(E))
2193
168
    E = CE->getSubExprAsWritten();
2194
402
  QualType Ty = E->getType();
2195
402
  SourceLocation Loc = E->getExprLoc();
2196
402
2197
402
  emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2198
402
                          OffsetValue);
2199
402
}
2200
2201
llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2202
                                                 llvm::Value *AnnotatedVal,
2203
                                                 StringRef AnnotationStr,
2204
22
                                                 SourceLocation Location) {
2205
22
  llvm::Value *Args[4] = {
2206
22
    AnnotatedVal,
2207
22
    Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
2208
22
    Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
2209
22
    CGM.EmitAnnotationLineNo(Location)
2210
22
  };
2211
22
  return Builder.CreateCall(AnnotationFn, Args);
2212
22
}
2213
2214
7
void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2215
7
  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2216
7
  // FIXME We create a new bitcast for every annotation because that's what
2217
7
  // llvm-gcc was doing.
2218
7
  for (const auto *I : D->specific_attrs<AnnotateAttr>())
2219
11
    EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
2220
11
                       Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2221
11
                       I->getAnnotation(), D->getLocation());
2222
7
}
2223
2224
Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2225
2
                                              Address Addr) {
2226
2
  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2227
2
  llvm::Value *V = Addr.getPointer();
2228
2
  llvm::Type *VTy = V->getType();
2229
2
  llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2230
2
                                    CGM.Int8PtrTy);
2231
2
2232
4
  for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2233
4
    // FIXME Always emit the cast inst so we can differentiate between
2234
4
    // annotation on the first field of a struct and annotation on the struct
2235
4
    // itself.
2236
4
    if (VTy != CGM.Int8PtrTy)
2237
4
      V = Builder.CreateBitCast(V, CGM.Int8PtrTy);
2238
4
    V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
2239
4
    V = Builder.CreateBitCast(V, VTy);
2240
4
  }
2241
2
2242
2
  return Address(V, Addr.getAlignment());
2243
2
}
2244
2245
45.1k
CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2246
2247
CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2248
42.5k
    : CGF(CGF) {
2249
42.5k
  assert(!CGF->IsSanitizerScope);
2250
42.5k
  CGF->IsSanitizerScope = true;
2251
42.5k
}
2252
2253
42.5k
CodeGenFunction::SanitizerScope::~SanitizerScope() {
2254
42.5k
  CGF->IsSanitizerScope = false;
2255
42.5k
}
2256
2257
void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2258
                                   const llvm::Twine &Name,
2259
                                   llvm::BasicBlock *BB,
2260
4.16M
                                   llvm::BasicBlock::iterator InsertPt) const {
2261
4.16M
  LoopStack.InsertHelper(I);
2262
4.16M
  if (IsSanitizerScope)
2263
14.9k
    CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
2264
4.16M
}
2265
2266
void CGBuilderInserter::InsertHelper(
2267
    llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2268
4.17M
    llvm::BasicBlock::iterator InsertPt) const {
2269
4.17M
  llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2270
4.17M
  if (CGF)
2271
4.16M
    CGF->InsertHelper(I, Name, BB, InsertPt);
2272
4.17M
}
2273
2274
static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
2275
                                CodeGenModule &CGM, const FunctionDecl *FD,
2276
29.7k
                                std::string &FirstMissing) {
2277
29.7k
  // If there aren't any required features listed then go ahead and return.
2278
29.7k
  if (ReqFeatures.empty())
2279
0
    return false;
2280
29.7k
2281
29.7k
  // Now build up the set of caller features and verify that all the required
2282
29.7k
  // features are there.
2283
29.7k
  llvm::StringMap<bool> CallerFeatureMap;
2284
29.7k
  CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2285
29.7k
2286
29.7k
  // If we have at least one of the features in the feature list return
2287
29.7k
  // true, otherwise return false.
2288
29.7k
  return std::all_of(
2289
215k
      ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
2290
215k
        SmallVector<StringRef, 1> OrFeatures;
2291
215k
        Feature.split(OrFeatures, '|');
2292
222k
        return llvm::any_of(OrFeatures, [&](StringRef Feature) {
2293
222k
          if (!CallerFeatureMap.lookup(Feature)) {
2294
7.14k
            FirstMissing = Feature.str();
2295
7.14k
            return false;
2296
7.14k
          }
2297
215k
          return true;
2298
215k
        });
2299
215k
      });
2300
29.7k
}
2301
2302
// Emits an error if we don't have a valid set of target features for the
2303
// called function.
2304
void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2305
32.4k
                                          const FunctionDecl *TargetDecl) {
2306
32.4k
  return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2307
32.4k
}
2308
2309
// Emits an error if we don't have a valid set of target features for the
2310
// called function.
2311
void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
2312
44.3k
                                          const FunctionDecl *TargetDecl) {
2313
44.3k
  // Early exit if this is an indirect call.
2314
44.3k
  if (!TargetDecl)
2315
0
    return;
2316
44.3k
2317
44.3k
  // Get the current enclosing function if it exists. If it doesn't
2318
44.3k
  // we can't check the target features anyhow.
2319
44.3k
  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2320
44.3k
  if (!FD)
2321
0
    return;
2322
44.3k
2323
44.3k
  // Grab the required features for the call. For a builtin this is listed in
2324
44.3k
  // the td file with the default cpu, for an always_inline function this is any
2325
44.3k
  // listed cpu and any listed features.
2326
44.3k
  unsigned BuiltinID = TargetDecl->getBuiltinID();
2327
44.3k
  std::string MissingFeature;
2328
44.3k
  if (BuiltinID) {
2329
32.4k
    SmallVector<StringRef, 1> ReqFeatures;
2330
32.4k
    const char *FeatureList =
2331
32.4k
        CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2332
32.4k
    // Return if the builtin doesn't have any required features.
2333
32.4k
    if (!FeatureList || 
StringRef(FeatureList) == ""21.6k
)
2334
14.6k
      return;
2335
17.8k
    StringRef(FeatureList).split(ReqFeatures, ',');
2336
17.8k
    if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2337
542
      CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2338
542
          << TargetDecl->getDeclName()
2339
542
          << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2340
17.8k
2341
17.8k
  } else 
if (11.8k
!TargetDecl->isMultiVersion()11.8k
&&
2342
11.8k
             TargetDecl->hasAttr<TargetAttr>()) {
2343
11.8k
    // Get the required features for the callee.
2344
11.8k
2345
11.8k
    const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2346
11.8k
    ParsedTargetAttr ParsedAttr =
2347
11.8k
        CGM.getContext().filterFunctionTargetAttrs(TD);
2348
11.8k
2349
11.8k
    SmallVector<StringRef, 1> ReqFeatures;
2350
11.8k
    llvm::StringMap<bool> CalleeFeatureMap;
2351
11.8k
    CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap,
2352
11.8k
                                           GlobalDecl(TargetDecl));
2353
11.8k
2354
13.1k
    for (const auto &F : ParsedAttr.Features) {
2355
13.1k
      if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2356
13.1k
        ReqFeatures.push_back(StringRef(F).substr(1));
2357
13.1k
    }
2358
11.8k
2359
183k
    for (const auto &F : CalleeFeatureMap) {
2360
183k
      // Only positive features are "required".
2361
183k
      if (F.getValue())
2362
183k
        ReqFeatures.push_back(F.getKey());
2363
183k
    }
2364
11.8k
    if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2365
14
      CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2366
14
          << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2367
11.8k
  }
2368
44.3k
}
2369
2370
87
void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2371
87
  if (!CGM.getCodeGenOpts().SanitizeStats)
2372
77
    return;
2373
10
2374
10
  llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2375
10
  IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2376
10
  CGM.getSanStats().create(IRB, SSK);
2377
10
}
2378
2379
llvm::Value *
2380
212
CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
2381
212
  llvm::Value *Condition = nullptr;
2382
212
2383
212
  if (!RO.Conditions.Architecture.empty())
2384
68
    Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2385
212
2386
212
  if (!RO.Conditions.Features.empty()) {
2387
91
    llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2388
91
    Condition =
2389
91
        Condition ? 
Builder.CreateAnd(Condition, FeatureCond)2
:
FeatureCond89
;
2390
91
  }
2391
212
  return Condition;
2392
212
}
2393
2394
static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2395
                                             llvm::Function *Resolver,
2396
                                             CGBuilderTy &Builder,
2397
                                             llvm::Function *FuncToReturn,
2398
212
                                             bool SupportsIFunc) {
2399
212
  if (SupportsIFunc) {
2400
104
    Builder.CreateRet(FuncToReturn);
2401
104
    return;
2402
104
  }
2403
108
2404
108
  llvm::SmallVector<llvm::Value *, 10> Args;
2405
108
  llvm::for_each(Resolver->args(),
2406
116
                 [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
2407
108
2408
108
  llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2409
108
  Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2410
108
2411
108
  if (Resolver->getReturnType()->isVoidTy())
2412
29
    Builder.CreateRetVoid();
2413
79
  else
2414
79
    Builder.CreateRet(Result);
2415
108
}
2416
2417
void CodeGenFunction::EmitMultiVersionResolver(
2418
65
    llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2419
65
  assert(getContext().getTargetInfo().getTriple().isX86() &&
2420
65
         "Only implemented for x86 targets");
2421
65
2422
65
  bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2423
65
2424
65
  // Main function's basic block.
2425
65
  llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2426
65
  Builder.SetInsertPoint(CurBlock);
2427
65
  EmitX86CpuInit();
2428
65
2429
212
  for (const MultiVersionResolverOption &RO : Options) {
2430
212
    Builder.SetInsertPoint(CurBlock);
2431
212
    llvm::Value *Condition = FormResolverCondition(RO);
2432
212
2433
212
    // The 'default' or 'generic' case.
2434
212
    if (!Condition) {
2435
55
      assert(&RO == Options.end() - 1 &&
2436
55
             "Default or Generic case must be last");
2437
55
      CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2438
55
                                       SupportsIFunc);
2439
55
      return;
2440
55
    }
2441
157
2442
157
    llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2443
157
    CGBuilderTy RetBuilder(*this, RetBlock);
2444
157
    CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2445
157
                                     SupportsIFunc);
2446
157
    CurBlock = createBasicBlock("resolver_else", Resolver);
2447
157
    Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2448
157
  }
2449
65
2450
65
  // If no generic/default, emit an unreachable.
2451
65
  Builder.SetInsertPoint(CurBlock);
2452
10
  llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2453
10
  TrapCall->setDoesNotReturn();
2454
10
  TrapCall->setDoesNotThrow();
2455
10
  Builder.CreateUnreachable();
2456
10
  Builder.ClearInsertionPoint();
2457
10
}
2458
2459
// Loc - where the diagnostic will point, where in the source code this
2460
//  alignment has failed.
2461
// SecondaryLoc - if present (will be present if sufficiently different from
2462
//  Loc), the diagnostic will additionally point a "Note:" to this location.
2463
//  It should be the location where the __attribute__((assume_aligned))
2464
//  was written e.g.
2465
void CodeGenFunction::emitAlignmentAssumptionCheck(
2466
    llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2467
    SourceLocation SecondaryLoc, llvm::Value *Alignment,
2468
    llvm::Value *OffsetValue, llvm::Value *TheCheck,
2469
33
    llvm::Instruction *Assumption) {
2470
33
  assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2471
33
         cast<llvm::CallInst>(Assumption)->getCalledValue() ==
2472
33
             llvm::Intrinsic::getDeclaration(
2473
33
                 Builder.GetInsertBlock()->getParent()->getParent(),
2474
33
                 llvm::Intrinsic::assume) &&
2475
33
         "Assumption should be a call to llvm.assume().");
2476
33
  assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2477
33
         "Assumption should be the last instruction of the basic block, "
2478
33
         "since the basic block is still being generated.");
2479
33
2480
33
  if (!SanOpts.has(SanitizerKind::Alignment))
2481
0
    return;
2482
33
2483
33
  // Don't check pointers to volatile data. The behavior here is implementation-
2484
33
  // defined.
2485
33
  if (Ty->getPointeeType().isVolatileQualified())
2486
1
    return;
2487
32
2488
32
  // We need to temorairly remove the assumption so we can insert the
2489
32
  // sanitizer check before it, else the check will be dropped by optimizations.
2490
32
  Assumption->removeFromParent();
2491
32
2492
32
  {
2493
32
    SanitizerScope SanScope(this);
2494
32
2495
32
    if (!OffsetValue)
2496
23
      OffsetValue = Builder.getInt1(0); // no offset.
2497
32
2498
32
    llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2499
32
                                    EmitCheckSourceLocation(SecondaryLoc),
2500
32
                                    EmitCheckTypeDescriptor(Ty)};
2501
32
    llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2502
32
                                  EmitCheckValue(Alignment),
2503
32
                                  EmitCheckValue(OffsetValue)};
2504
32
    EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2505
32
              SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2506
32
  }
2507
32
2508
32
  // We are now in the (new, empty) "cont" basic block.
2509
32
  // Reintroduce the assumption.
2510
32
  Builder.Insert(Assumption);
2511
32
  // FIXME: Assumption still has it's original basic block as it's Parent.
2512
32
}
2513
2514
43.1k
llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
2515
43.1k
  if (CGDebugInfo *DI = getDebugInfo())
2516
16.0k
    return DI->SourceLocToDebugLoc(Location);
2517
27.1k
2518
27.1k
  return llvm::DebugLoc();
2519
27.1k
}