Coverage Report

Created: 2020-09-15 12:33

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This coordinates the per-function state used while generating code.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CodeGenFunction.h"
14
#include "CGBlocks.h"
15
#include "CGCUDARuntime.h"
16
#include "CGCXXABI.h"
17
#include "CGCleanup.h"
18
#include "CGDebugInfo.h"
19
#include "CGOpenMPRuntime.h"
20
#include "CodeGenModule.h"
21
#include "CodeGenPGO.h"
22
#include "TargetInfo.h"
23
#include "clang/AST/ASTContext.h"
24
#include "clang/AST/ASTLambda.h"
25
#include "clang/AST/Attr.h"
26
#include "clang/AST/Decl.h"
27
#include "clang/AST/DeclCXX.h"
28
#include "clang/AST/StmtCXX.h"
29
#include "clang/AST/StmtObjC.h"
30
#include "clang/Basic/Builtins.h"
31
#include "clang/Basic/CodeGenOptions.h"
32
#include "clang/Basic/TargetInfo.h"
33
#include "clang/CodeGen/CGFunctionInfo.h"
34
#include "clang/Frontend/FrontendDiagnostic.h"
35
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
36
#include "llvm/IR/DataLayout.h"
37
#include "llvm/IR/Dominators.h"
38
#include "llvm/IR/FPEnv.h"
39
#include "llvm/IR/IntrinsicInst.h"
40
#include "llvm/IR/Intrinsics.h"
41
#include "llvm/IR/MDBuilder.h"
42
#include "llvm/IR/Operator.h"
43
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
44
using namespace clang;
45
using namespace CodeGen;
46
47
/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
48
/// markers.
49
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
50
288k
                                      const LangOptions &LangOpts) {
51
288k
  if (CGOpts.DisableLifetimeMarkers)
52
7
    return false;
53
288k
54
  // Sanitizers may use markers.
55
288k
  if (CGOpts.SanitizeAddressUseAfterScope ||
56
287k
      LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
57
287k
      LangOpts.Sanitize.has(SanitizerKind::Memory))
58
1.25k
    return true;
59
287k
60
  // For now, only in optimized builds.
61
287k
  return CGOpts.OptimizationLevel != 0;
62
287k
}
63
64
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
65
    : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
66
      Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
67
              CGBuilderInserterTy(this)),
68
      SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()),
69
      DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm),
70
      ShouldEmitLifetimeMarkers(
71
288k
          shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
72
288k
  if (!suppressNewContext)
73
263k
    CGM.getCXXABI().getMangleContext().startNewFunction();
74
288k
75
288k
  SetFastMathFlags(CurFPFeatures);
76
288k
  SetFPModel();
77
288k
}
78
79
288k
CodeGenFunction::~CodeGenFunction() {
80
288k
  assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
81
288k
82
288k
  if (getLangOpts().OpenMP && 
CurFn66.5k
)
83
66.5k
    CGM.getOpenMPRuntime().functionFinished(*this);
84
288k
85
  // If we have an OpenMPIRBuilder we want to finalize functions (incl.
86
  // outlining etc) at some point. Doing it once the function codegen is done
87
  // seems to be a reasonable spot. We do it here, as opposed to the deletion
88
  // time of the CodeGenModule, because we have to ensure the IR has not yet
89
  // been "emitted" to the outside, thus, modifications are still sensible.
90
288k
  if (CGM.getLangOpts().OpenMPIRBuilder)
91
163
    CGM.getOpenMPRuntime().getOMPBuilder().finalize();
92
288k
}
93
94
// Map the LangOption for exception behavior into
95
// the corresponding enum in the IR.
96
llvm::fp::ExceptionBehavior
97
288k
clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) {
98
288k
99
288k
  switch (Kind) {
100
286k
  case LangOptions::FPE_Ignore:  return llvm::fp::ebIgnore;
101
54
  case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
102
1.57k
  case LangOptions::FPE_Strict:  return llvm::fp::ebStrict;
103
0
  }
104
0
  llvm_unreachable("Unsupported FP Exception Behavior");
105
0
}
106
107
288k
void CodeGenFunction::SetFPModel() {
108
288k
  llvm::RoundingMode RM = getLangOpts().getFPRoundingMode();
109
288k
  auto fpExceptionBehavior = ToConstrainedExceptMD(
110
288k
                               getLangOpts().getFPExceptionMode());
111
288k
112
288k
  Builder.setDefaultConstrainedRounding(RM);
113
288k
  Builder.setDefaultConstrainedExcept(fpExceptionBehavior);
114
288k
  Builder.setIsFPConstrained(fpExceptionBehavior != llvm::fp::ebIgnore ||
115
286k
                             RM != llvm::RoundingMode::NearestTiesToEven);
116
288k
}
117
118
288k
void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
119
288k
  llvm::FastMathFlags FMF;
120
288k
  FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
121
288k
  FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
122
288k
  FMF.setNoInfs(FPFeatures.getNoHonorInfs());
123
288k
  FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
124
288k
  FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
125
288k
  FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
126
288k
  FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
127
288k
  Builder.setFastMathFlags(FMF);
128
288k
}
129
130
CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
131
                                                  FPOptions FPFeatures)
132
9.37k
    : CGF(CGF), OldFPFeatures(CGF.CurFPFeatures) {
133
9.37k
  CGF.CurFPFeatures = FPFeatures;
134
9.37k
135
9.37k
  if (OldFPFeatures == FPFeatures)
136
9.17k
    return;
137
196
138
196
  FMFGuard.emplace(CGF.Builder);
139
196
140
196
  llvm::RoundingMode NewRoundingBehavior =
141
196
      static_cast<llvm::RoundingMode>(FPFeatures.getRoundingMode());
142
196
  CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
143
196
  auto NewExceptionBehavior =
144
196
      ToConstrainedExceptMD(static_cast<LangOptions::FPExceptionModeKind>(
145
196
          FPFeatures.getFPExceptionMode()));
146
196
  CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
147
196
148
196
  CGF.SetFastMathFlags(FPFeatures);
149
196
150
196
  assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
151
196
          isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
152
196
          isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
153
196
          (NewExceptionBehavior == llvm::fp::ebIgnore &&
154
196
           NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
155
196
         "FPConstrained should be enabled on entire function");
156
196
157
784
  auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
158
784
    auto OldValue =
159
784
        CGF.CurFn->getFnAttribute(Name).getValueAsString() == "true";
160
784
    auto NewValue = OldValue & Value;
161
784
    if (OldValue != NewValue)
162
29
      CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
163
784
  };
164
196
  mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
165
196
  mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
166
196
  mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
167
196
  mergeFnAttrValue("unsafe-fp-math", FPFeatures.getAllowFPReassociate() &&
168
73
                                         FPFeatures.getAllowReciprocal() &&
169
60
                                         FPFeatures.getAllowApproxFunc() &&
170
60
                                         FPFeatures.getNoSignedZero());
171
196
}
172
173
9.37k
CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
174
9.37k
  CGF.CurFPFeatures = OldFPFeatures;
175
9.37k
}
176
177
40.8k
LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
178
40.8k
  LValueBaseInfo BaseInfo;
179
40.8k
  TBAAAccessInfo TBAAInfo;
180
40.8k
  CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
181
40.8k
  return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
182
40.8k
                          TBAAInfo);
183
40.8k
}
184
185
/// Given a value of type T* that may not be to a complete object,
186
/// construct an l-value with the natural pointee alignment of T.
187
LValue
188
59.4k
CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
189
59.4k
  LValueBaseInfo BaseInfo;
190
59.4k
  TBAAAccessInfo TBAAInfo;
191
59.4k
  CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
192
59.4k
                                                /* forPointeeType= */ true);
193
59.4k
  return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
194
59.4k
}
195
196
197
878k
llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
198
878k
  return CGM.getTypes().ConvertTypeForMem(T);
199
878k
}
200
201
2.89M
llvm::Type *CodeGenFunction::ConvertType(QualType T) {
202
2.89M
  return CGM.getTypes().ConvertType(T);
203
2.89M
}
204
205
5.13M
TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
206
5.13M
  type = type.getCanonicalType();
207
5.13M
  while (true) {
208
5.13M
    switch (type->getTypeClass()) {
209
0
#define TYPE(name, parent)
210
0
#define ABSTRACT_TYPE(name, parent)
211
0
#define NON_CANONICAL_TYPE(name, parent) case Type::name:
212
0
#define DEPENDENT_TYPE(name, parent) case Type::name:
213
0
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
214
0
#include "clang/AST/TypeNodes.inc"
215
0
      llvm_unreachable("non-canonical or dependent type in IR-generation");
216
0
217
0
    case Type::Auto:
218
0
    case Type::DeducedTemplateSpecialization:
219
0
      llvm_unreachable("undeduced type in IR-generation");
220
0
221
    // Various scalar types.
222
4.88M
    case Type::Builtin:
223
4.88M
    case Type::Pointer:
224
4.88M
    case Type::BlockPointer:
225
4.88M
    case Type::LValueReference:
226
4.88M
    case Type::RValueReference:
227
4.88M
    case Type::MemberPointer:
228
4.88M
    case Type::Vector:
229
4.88M
    case Type::ExtVector:
230
4.88M
    case Type::ConstantMatrix:
231
4.88M
    case Type::FunctionProto:
232
4.88M
    case Type::FunctionNoProto:
233
4.88M
    case Type::Enum:
234
4.88M
    case Type::ObjCObjectPointer:
235
4.88M
    case Type::Pipe:
236
4.88M
    case Type::ExtInt:
237
4.88M
      return TEK_Scalar;
238
4.88M
239
    // Complexes.
240
7.52k
    case Type::Complex:
241
7.52k
      return TEK_Complex;
242
4.88M
243
    // Arrays, records, and Objective-C objects.
244
241k
    case Type::ConstantArray:
245
241k
    case Type::IncompleteArray:
246
241k
    case Type::VariableArray:
247
241k
    case Type::Record:
248
241k
    case Type::ObjCObject:
249
241k
    case Type::ObjCInterface:
250
241k
      return TEK_Aggregate;
251
241k
252
    // We operate on atomic values according to their underlying type.
253
286
    case Type::Atomic:
254
286
      type = cast<AtomicType>(type)->getValueType();
255
286
      continue;
256
0
    }
257
0
    llvm_unreachable("unknown type kind!");
258
0
  }
259
5.13M
}
260
261
287k
llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
262
  // For cleanliness, we try to avoid emitting the return block for
263
  // simple cases.
264
287k
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
265
287k
266
287k
  if (CurBB) {
267
148k
    assert(!CurBB->getTerminator() && "Unexpected terminated block.");
268
148k
269
    // We have a valid insert point, reuse it if it is empty or there are no
270
    // explicit jumps to the return block.
271
148k
    if (CurBB->empty() || 
ReturnBlock.getBlock()->use_empty()123k
) {
272
148k
      ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
273
148k
      delete ReturnBlock.getBlock();
274
148k
      ReturnBlock = JumpDest();
275
148k
    } else
276
30
      EmitBlock(ReturnBlock.getBlock());
277
148k
    return llvm::DebugLoc();
278
148k
  }
279
138k
280
  // Otherwise, if the return block is the target of a single direct
281
  // branch then we can just put the code in that block instead. This
282
  // cleans up functions which started with a unified return block.
283
138k
  if (ReturnBlock.getBlock()->hasOneUse()) {
284
136k
    llvm::BranchInst *BI =
285
136k
      dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
286
136k
    if (BI && 
BI->isUnconditional()136k
&&
287
136k
        BI->getSuccessor(0) == ReturnBlock.getBlock()) {
288
      // Record/return the DebugLoc of the simple 'return' expression to be used
289
      // later by the actual 'ret' instruction.
290
136k
      llvm::DebugLoc Loc = BI->getDebugLoc();
291
136k
      Builder.SetInsertPoint(BI->getParent());
292
136k
      BI->eraseFromParent();
293
136k
      delete ReturnBlock.getBlock();
294
136k
      ReturnBlock = JumpDest();
295
136k
      return Loc;
296
136k
    }
297
2.51k
  }
298
2.51k
299
  // FIXME: We are at an unreachable point, there is no reason to emit the block
300
  // unless it has uses. However, we still need a place to put the debug
301
  // region.end for now.
302
2.51k
303
2.51k
  EmitBlock(ReturnBlock.getBlock());
304
2.51k
  return llvm::DebugLoc();
305
2.51k
}
306
307
1.14M
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
308
1.14M
  if (!BB) 
return1.14M
;
309
7.71k
  if (!BB->use_empty())
310
7.71k
    return CGF.CurFn->getBasicBlockList().push_back(BB);
311
8
  delete BB;
312
8
}
313
314
287k
void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
315
287k
  assert(BreakContinueStack.empty() &&
316
287k
         "mismatched push/pop in break/continue stack!");
317
287k
318
287k
  bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
319
13.4k
    && NumSimpleReturnExprs == NumReturnExprs
320
12.1k
    && ReturnBlock.getBlock()->use_empty();
321
  // Usually the return expression is evaluated before the cleanup
322
  // code.  If the function contains only a simple return statement,
323
  // such as a constant, the location before the cleanup code becomes
324
  // the last useful breakpoint in the function, because the simple
325
  // return expression will be evaluated after the cleanup code. To be
326
  // safe, set the debug location for cleanup code to the location of
327
  // the return statement.  Otherwise the cleanup code should be at the
328
  // end of the function's lexical scope.
329
  //
330
  // If there are multiple branches to the return block, the branch
331
  // instructions will get the location of the return statements and
332
  // all will be fine.
333
287k
  if (CGDebugInfo *DI = getDebugInfo()) {
334
90.9k
    if (OnlySimpleReturnStmts)
335
586
      DI->EmitLocation(Builder, LastStopPoint);
336
90.3k
    else
337
90.3k
      DI->EmitLocation(Builder, EndLoc);
338
90.9k
  }
339
287k
340
  // Pop any cleanups that might have been associated with the
341
  // parameters.  Do this in whatever block we're currently in; it's
342
  // important to do this before we enter the return block or return
343
  // edges will be *really* confused.
344
287k
  bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
345
287k
  bool HasOnlyLifetimeMarkers =
346
287k
      HasCleanups && 
EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth)8.00k
;
347
287k
  bool EmitRetDbgLoc = !HasCleanups || 
HasOnlyLifetimeMarkers8.00k
;
348
287k
  if (HasCleanups) {
349
    // Make sure the line table doesn't jump back into the body for
350
    // the ret after it's been at EndLoc.
351
8.00k
    Optional<ApplyDebugLocation> AL;
352
8.00k
    if (CGDebugInfo *DI = getDebugInfo()) {
353
1.73k
      if (OnlySimpleReturnStmts)
354
586
        DI->EmitLocation(Builder, EndLoc);
355
1.14k
      else
356
        // We may not have a valid end location. Try to apply it anyway, and
357
        // fall back to an artificial location if needed.
358
1.14k
        AL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc);
359
1.73k
    }
360
8.00k
361
8.00k
    PopCleanupBlocks(PrologueCleanupDepth);
362
8.00k
  }
363
287k
364
  // Emit function epilog (to return).
365
287k
  llvm::DebugLoc Loc = EmitReturnBlock();
366
287k
367
287k
  if (ShouldInstrumentFunction()) {
368
11
    if (CGM.getCodeGenOpts().InstrumentFunctions)
369
6
      CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
370
11
    if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
371
4
      CurFn->addFnAttr("instrument-function-exit-inlined",
372
4
                       "__cyg_profile_func_exit");
373
11
  }
374
287k
375
  // Emit debug descriptor for function end.
376
287k
  if (CGDebugInfo *DI = getDebugInfo())
377
90.9k
    DI->EmitFunctionEnd(Builder, CurFn);
378
287k
379
  // Reset the debug location to that of the simple 'return' expression, if any
380
  // rather than that of the end of the function's scope '}'.
381
287k
  ApplyDebugLocation AL(*this, Loc);
382
287k
  EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
383
287k
  EmitEndEHSpec(CurCodeDecl);
384
287k
385
287k
  assert(EHStack.empty() &&
386
287k
         "did not remove all scopes from cleanup stack!");
387
287k
388
  // If someone did an indirect goto, emit the indirect goto block at the end of
389
  // the function.
390
287k
  if (IndirectBranch) {
391
35
    EmitBlock(IndirectBranch->getParent());
392
35
    Builder.ClearInsertionPoint();
393
35
  }
394
287k
395
  // If some of our locals escaped, insert a call to llvm.localescape in the
396
  // entry block.
397
287k
  if (!EscapedLocals.empty()) {
398
    // Invert the map from local to index into a simple vector. There should be
399
    // no holes.
400
35
    SmallVector<llvm::Value *, 4> EscapeArgs;
401
35
    EscapeArgs.resize(EscapedLocals.size());
402
35
    for (auto &Pair : EscapedLocals)
403
40
      EscapeArgs[Pair.second] = Pair.first;
404
35
    llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
405
35
        &CGM.getModule(), llvm::Intrinsic::localescape);
406
35
    CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
407
35
  }
408
287k
409
  // Remove the AllocaInsertPt instruction, which is just a convenience for us.
410
287k
  llvm::Instruction *Ptr = AllocaInsertPt;
411
287k
  AllocaInsertPt = nullptr;
412
287k
  Ptr->eraseFromParent();
413
287k
414
  // If someone took the address of a label but never did an indirect goto, we
415
  // made a zero entry PHI node, which is illegal, zap it now.
416
287k
  if (IndirectBranch) {
417
35
    llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
418
35
    if (PN->getNumIncomingValues() == 0) {
419
15
      PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
420
15
      PN->eraseFromParent();
421
15
    }
422
35
  }
423
287k
424
287k
  EmitIfUsed(*this, EHResumeBlock);
425
287k
  EmitIfUsed(*this, TerminateLandingPad);
426
287k
  EmitIfUsed(*this, TerminateHandler);
427
287k
  EmitIfUsed(*this, UnreachableBlock);
428
287k
429
287k
  for (const auto &FuncletAndParent : TerminateFunclets)
430
30
    EmitIfUsed(*this, FuncletAndParent.second);
431
287k
432
287k
  if (CGM.getCodeGenOpts().EmitDeclMetadata)
433
18.3k
    EmitDeclMetadata();
434
287k
435
287k
  for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
436
287k
           I = DeferredReplacements.begin(),
437
287k
           E = DeferredReplacements.end();
438
287k
       I != E; 
++I97
) {
439
97
    I->first->replaceAllUsesWith(I->second);
440
97
    I->first->eraseFromParent();
441
97
  }
442
287k
443
  // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
444
  // PHIs if the current function is a coroutine. We don't do it for all
445
  // functions as it may result in slight increase in numbers of instructions
446
  // if compiled with no optimizations. We do it for coroutine as the lifetime
447
  // of CleanupDestSlot alloca make correct coroutine frame building very
448
  // difficult.
449
287k
  if (NormalCleanupDest.isValid() && 
isCoroutine()284
) {
450
51
    llvm::DominatorTree DT(*CurFn);
451
51
    llvm::PromoteMemToReg(
452
51
        cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
453
51
    NormalCleanupDest = Address::invalid();
454
51
  }
455
287k
456
  // Scan function arguments for vector width.
457
287k
  for (llvm::Argument &A : CurFn->args())
458
472k
    if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
459
112k
      LargestVectorWidth =
460
112k
          std::max((uint64_t)LargestVectorWidth,
461
112k
                   VT->getPrimitiveSizeInBits().getKnownMinSize());
462
287k
463
  // Update vector width based on return type.
464
287k
  if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
465
52.2k
    LargestVectorWidth =
466
52.2k
        std::max((uint64_t)LargestVectorWidth,
467
52.2k
                 VT->getPrimitiveSizeInBits().getKnownMinSize());
468
287k
469
  // Add the required-vector-width attribute. This contains the max width from:
470
  // 1. min-vector-width attribute used in the source program.
471
  // 2. Any builtins used that have a vector width specified.
472
  // 3. Values passed in and out of inline assembly.
473
  // 4. Width of vector arguments and return types for this function.
474
  // 5. Width of vector aguments and return types for functions called by this
475
  //    function.
476
287k
  CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
477
287k
478
  // If we generated an unreachable return block, delete it now.
479
287k
  if (ReturnBlock.isValid() && 
ReturnBlock.getBlock()->use_empty()2.54k
) {
480
952
    Builder.ClearInsertionPoint();
481
952
    ReturnBlock.getBlock()->eraseFromParent();
482
952
  }
483
287k
  if (ReturnValue.isValid()) {
484
139k
    auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
485
139k
    if (RetAlloca && 
RetAlloca->use_empty()136k
) {
486
123k
      RetAlloca->eraseFromParent();
487
123k
      ReturnValue = Address::invalid();
488
123k
    }
489
139k
  }
490
287k
}
491
492
/// ShouldInstrumentFunction - Return true if the current function should be
493
/// instrumented with __cyg_profile_func_* calls
494
574k
bool CodeGenFunction::ShouldInstrumentFunction() {
495
574k
  if (!CGM.getCodeGenOpts().InstrumentFunctions &&
496
574k
      !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
497
574k
      !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
498
574k
    return false;
499
32
  if (!CurFuncDecl || 
CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()28
)
500
10
    return false;
501
22
  return true;
502
22
}
503
504
/// ShouldXRayInstrument - Return true if the current function should be
505
/// instrumented with XRay nop sleds.
506
574k
bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
507
574k
  return CGM.getCodeGenOpts().XRayInstrumentFunctions;
508
574k
}
509
510
/// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
511
/// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
512
2
bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
513
2
  return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
514
2
         (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
515
1
          CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
516
1
              XRayInstrKind::Custom);
517
2
}
518
519
2
bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
520
2
  return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
521
2
         (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
522
1
          CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
523
1
              XRayInstrKind::Typed);
524
2
}
525
526
llvm::Constant *
527
CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F,
528
113
                                            llvm::Constant *Addr) {
529
  // Addresses stored in prologue data can't require run-time fixups and must
530
  // be PC-relative. Run-time fixups are undesirable because they necessitate
531
  // writable text segments, which are unsafe. And absolute addresses are
532
  // undesirable because they break PIE mode.
533
113
534
  // Add a layer of indirection through a private global. Taking its address
535
  // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
536
113
  auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
537
113
                                      /*isConstant=*/true,
538
113
                                      llvm::GlobalValue::PrivateLinkage, Addr);
539
113
540
  // Create a PC-relative address.
541
113
  auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
542
113
  auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
543
113
  auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
544
113
  return (IntPtrTy == Int32Ty)
545
70
             ? PCRelAsInt
546
43
             : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
547
113
}
548
549
llvm::Value *
550
CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
551
8
                                          llvm::Value *EncodedAddr) {
552
  // Reconstruct the address of the global.
553
8
  auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
554
8
  auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
555
8
  auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
556
8
  auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
557
8
558
  // Load the original pointer through the global.
559
8
  return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
560
8
                            "decoded_addr");
561
8
}
562
563
void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
564
                                               llvm::Function *Fn)
565
1.62k
{
566
1.62k
  if (!FD->hasAttr<OpenCLKernelAttr>())
567
1.20k
    return;
568
414
569
414
  llvm::LLVMContext &Context = getLLVMContext();
570
414
571
414
  CGM.GenOpenCLArgMetadata(Fn, FD, this);
572
414
573
414
  if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
574
2
    QualType HintQTy = A->getTypeHint();
575
2
    const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
576
2
    bool IsSignedInteger =
577
2
        HintQTy->isSignedIntegerType() ||
578
1
        (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
579
2
    llvm::Metadata *AttrMDArgs[] = {
580
2
        llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
581
2
            CGM.getTypes().ConvertType(A->getTypeHint()))),
582
2
        llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
583
2
            llvm::IntegerType::get(Context, 32),
584
1
            llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
585
2
    Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
586
2
  }
587
414
588
414
  if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
589
1
    llvm::Metadata *AttrMDArgs[] = {
590
1
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
591
1
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
592
1
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
593
1
    Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
594
1
  }
595
414
596
414
  if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
597
7
    llvm::Metadata *AttrMDArgs[] = {
598
7
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
599
7
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
600
7
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
601
7
    Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
602
7
  }
603
414
604
414
  if (const OpenCLIntelReqdSubGroupSizeAttr *A =
605
1
          FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
606
1
    llvm::Metadata *AttrMDArgs[] = {
607
1
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
608
1
    Fn->setMetadata("intel_reqd_sub_group_size",
609
1
                    llvm::MDNode::get(Context, AttrMDArgs));
610
1
  }
611
414
}
612
613
/// Determine whether the function F ends with a return stmt.
614
147k
static bool endsWithReturn(const Decl* F) {
615
147k
  const Stmt *Body = nullptr;
616
147k
  if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
617
109k
    Body = FD->getBody();
618
38.0k
  else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
619
1.22k
    Body = OMD->getBody();
620
147k
621
147k
  if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
622
109k
    auto LastStmt = CS->body_rbegin();
623
109k
    if (LastStmt != CS->body_rend())
624
66.4k
      return isa<ReturnStmt>(*LastStmt);
625
81.5k
  }
626
81.5k
  return false;
627
81.5k
}
628
629
328
void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
630
328
  if (SanOpts.has(SanitizerKind::Thread)) {
631
4
    Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
632
4
    Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
633
4
  }
634
328
}
635
636
/// Check if the return value of this function requires sanitization.
637
427k
bool CodeGenFunction::requiresReturnValueCheck() const {
638
427k
  return requiresReturnValueNullabilityCheck() ||
639
427k
         (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && 
CurCodeDecl109
&&
640
109
          CurCodeDecl->getAttr<ReturnsNonNullAttr>());
641
427k
}
642
643
35
static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
644
35
  auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
645
35
  if (!MD || 
!MD->getDeclName().getAsIdentifierInfo()3
||
646
3
      !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
647
2
      (MD->getNumParams() != 1 && 
MD->getNumParams() != 21
))
648
33
    return false;
649
2
650
2
  if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
651
0
    return false;
652
2
653
2
  if (MD->getNumParams() == 2) {
654
1
    auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
655
1
    if (!PT || !PT->isVoidPointerType() ||
656
1
        !PT->getPointeeType().isConstQualified())
657
0
      return false;
658
2
  }
659
2
660
2
  return true;
661
2
}
662
663
/// Return the UBSan prologue signature for \p FD if one is available.
664
static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
665
203
                                            const FunctionDecl *FD) {
666
203
  if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
667
96
    if (!MD->isStatic())
668
90
      return nullptr;
669
113
  return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
670
113
}
671
672
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
673
                                    llvm::Function *Fn,
674
                                    const CGFunctionInfo &FnInfo,
675
                                    const FunctionArgList &Args,
676
                                    SourceLocation Loc,
677
287k
                                    SourceLocation StartLoc) {
678
287k
  assert(!CurFn &&
679
287k
         "Do not use a CodeGenFunction object for more than one function");
680
287k
681
287k
  const Decl *D = GD.getDecl();
682
287k
683
287k
  DidCallStackSave = false;
684
287k
  CurCodeDecl = D;
685
287k
  if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
686
246k
    if (FD->usesSEHTry())
687
102
      CurSEHParent = FD;
688
279k
  CurFuncDecl = (D ? D->getNonClosureContext() : 
nullptr7.62k
);
689
287k
  FnRetTy = RetTy;
690
287k
  CurFn = Fn;
691
287k
  CurFnInfo = &FnInfo;
692
287k
  assert(CurFn->isDeclaration() && "Function already has body?");
693
287k
694
  // If this function has been blacklisted for any of the enabled sanitizers,
695
  // disable the sanitizer for the function.
696
287k
  do {
697
287k
#define SANITIZER(NAME, ID)                                                    \
698
796k
  if (
SanOpts.empty()513k
) \
699
283k
    break;                                                                     \
700
230k
  if (SanOpts.has(SanitizerKind::ID))                                          \
701
7.40k
    if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc))                \
702
52
      SanOpts.set(SanitizerKind::ID, false);
703
287k
704
287k
#include "clang/Basic/Sanitizers.def"
705
230k
#undef SANITIZER
706
4.17k
  } while (0);
707
287k
708
287k
  if (D) {
709
    // Apply the no_sanitize* attributes to SanOpts.
710
946
    for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
711
946
      SanitizerMask mask = Attr->getMask();
712
946
      SanOpts.Mask &= ~mask;
713
946
      if (mask & SanitizerKind::Address)
714
52
        SanOpts.set(SanitizerKind::KernelAddress, false);
715
946
      if (mask & SanitizerKind::KernelAddress)
716
5
        SanOpts.set(SanitizerKind::Address, false);
717
946
      if (mask & SanitizerKind::HWAddress)
718
5
        SanOpts.set(SanitizerKind::KernelHWAddress, false);
719
946
      if (mask & SanitizerKind::KernelHWAddress)
720
5
        SanOpts.set(SanitizerKind::HWAddress, false);
721
946
    }
722
279k
  }
723
287k
724
  // Apply sanitizer attributes to the function.
725
287k
  if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
726
1.03k
    Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
727
287k
  if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
728
31
    Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
729
287k
  if (SanOpts.has(SanitizerKind::MemTag))
730
37
    Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
731
287k
  if (SanOpts.has(SanitizerKind::Thread))
732
65
    Fn->addFnAttr(llvm::Attribute::SanitizeThread);
733
287k
  if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
734
207
    Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
735
287k
  if (SanOpts.has(SanitizerKind::SafeStack))
736
14
    Fn->addFnAttr(llvm::Attribute::SafeStack);
737
287k
  if (SanOpts.has(SanitizerKind::ShadowCallStack))
738
1
    Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
739
287k
740
  // Apply fuzzing attribute to the function.
741
287k
  if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
742
2
    Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
743
287k
744
  // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
745
  // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
746
287k
  if (SanOpts.has(SanitizerKind::Thread)) {
747
65
    if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
748
3
      IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
749
3
      if (OMD->getMethodFamily() == OMF_dealloc ||
750
2
          OMD->getMethodFamily() == OMF_initialize ||
751
3
          
(1
OMD->getSelector().isUnarySelector()1
&&
II->isStr(".cxx_destruct")1
)) {
752
3
        markAsIgnoreThreadCheckingAtRuntime(Fn);
753
3
      }
754
3
    }
755
65
  }
756
287k
757
  // Ignore unrelated casts in STL allocate() since the allocator must cast
758
  // from void* to T* before object initialization completes. Don't match on the
759
  // namespace because not all allocators are in std::
760
287k
  if (D && 
SanOpts.has(SanitizerKind::CFIUnrelatedCast)279k
) {
761
35
    if (matchesStlAllocatorFn(D, getContext()))
762
2
      SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
763
35
  }
764
287k
765
  // Ignore null checks in coroutine functions since the coroutines passes
766
  // are not aware of how to move the extra UBSan instructions across the split
767
  // coroutine boundaries.
768
287k
  if (D && 
SanOpts.has(SanitizerKind::Null)279k
)
769
243
    if (const auto *FD = dyn_cast<FunctionDecl>(D))
770
243
      if (FD->getBody() &&
771
243
          FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
772
1
        SanOpts.Mask &= ~SanitizerKind::Null;
773
287k
774
  // Apply xray attributes to the function (as a string, for now)
775
287k
  if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
776
80
    if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
777
80
            XRayInstrKind::FunctionEntry) ||
778
5
        CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
779
76
            XRayInstrKind::FunctionExit)) {
780
76
      if (XRayAttr->alwaysXRayInstrument() && 
ShouldXRayInstrumentFunction()42
)
781
35
        Fn->addFnAttr("function-instrument", "xray-always");
782
76
      if (XRayAttr->neverXRayInstrument())
783
34
        Fn->addFnAttr("function-instrument", "xray-never");
784
76
      if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
785
1
        if (ShouldXRayInstrumentFunction())
786
1
          Fn->addFnAttr("xray-log-args",
787
1
                        llvm::utostr(LogArgs->getArgumentCount()));
788
76
    }
789
287k
  } else {
790
287k
    if (ShouldXRayInstrumentFunction() && 
!CGM.imbueXRayAttrs(Fn, Loc)22
)
791
10
      Fn->addFnAttr(
792
10
          "xray-instruction-threshold",
793
10
          llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
794
287k
  }
795
287k
796
287k
  if (ShouldXRayInstrumentFunction()) {
797
88
    if (CGM.getCodeGenOpts().XRayIgnoreLoops)
798
1
      Fn->addFnAttr("xray-ignore-loops");
799
88
800
88
    if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
801
88
            XRayInstrKind::FunctionExit))
802
6
      Fn->addFnAttr("xray-skip-exit");
803
88
804
88
    if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
805
88
            XRayInstrKind::FunctionEntry))
806
6
      Fn->addFnAttr("xray-skip-entry");
807
88
  }
808
287k
809
287k
  unsigned Count, Offset;
810
287k
  if (const auto *Attr =
811
14
          D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
812
14
    Count = Attr->getCount();
813
14
    Offset = Attr->getOffset();
814
287k
  } else {
815
287k
    Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
816
287k
    Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
817
287k
  }
818
287k
  if (Count && 
Offset <= Count11
) {
819
11
    Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
820
11
    if (Offset)
821
4
      Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
822
11
  }
823
287k
824
  // Add no-jump-tables value.
825
287k
  Fn->addFnAttr("no-jump-tables",
826
287k
                llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));
827
287k
828
  // Add no-inline-line-tables value.
829
287k
  if (CGM.getCodeGenOpts().NoInlineLineTables)
830
4
    Fn->addFnAttr("no-inline-line-tables");
831
287k
832
  // Add profile-sample-accurate value.
833
287k
  if (CGM.getCodeGenOpts().ProfileSampleAccurate)
834
2
    Fn->addFnAttr("profile-sample-accurate");
835
287k
836
287k
  if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
837
29
    Fn->addFnAttr("use-sample-profile");
838
287k
839
287k
  if (D && 
D->hasAttr<CFICanonicalJumpTableAttr>()279k
)
840
2
    Fn->addFnAttr("cfi-canonical-jump-table");
841
287k
842
287k
  if (getLangOpts().OpenCL) {
843
    // Add metadata for a kernel function.
844
1.70k
    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
845
1.62k
      EmitOpenCLKernelMetadata(FD, Fn);
846
1.70k
  }
847
287k
848
  // If we are checking function types, emit a function type signature as
849
  // prologue data.
850
287k
  if (getLangOpts().CPlusPlus && 
SanOpts.has(SanitizerKind::Function)195k
) {
851
210
    if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
852
203
      if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
853
        // Remove any (C++17) exception specifications, to allow calling e.g. a
854
        // noexcept function through a non-noexcept pointer.
855
113
        auto ProtoTy =
856
113
          getContext().getFunctionTypeWithExceptionSpec(FD->getType(),
857
113
                                                        EST_None);
858
113
        llvm::Constant *FTRTTIConst =
859
113
            CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
860
113
        llvm::Constant *FTRTTIConstEncoded =
861
113
            EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
862
113
        llvm::Constant *PrologueStructElems[] = {PrologueSig,
863
113
                                                 FTRTTIConstEncoded};
864
113
        llvm::Constant *PrologueStructConst =
865
113
            llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
866
113
        Fn->setPrologueData(PrologueStructConst);
867
113
      }
868
203
    }
869
210
  }
870
287k
871
  // If we're checking nullability, we need to know whether we can check the
872
  // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
873
287k
  if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
874
60
    auto Nullability = FnRetTy->getNullability(getContext());
875
60
    if (Nullability && 
*Nullability == NullabilityKind::NonNull17
) {
876
15
      if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
877
1
            CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
878
14
        RetValNullabilityPrecondition =
879
14
            llvm::ConstantInt::getTrue(getLLVMContext());
880
15
    }
881
60
  }
882
287k
883
  // If we're in C++ mode and the function name is "main", it is guaranteed
884
  // to be norecurse by the standard (3.6.1.3 "The function main shall not be
885
  // used within a program").
886
  //
887
  // OpenCL C 2.0 v2.2-11 s6.9.i:
888
  //     Recursion is not supported.
889
  //
890
  // SYCL v1.2.1 s3.10:
891
  //     kernels cannot include RTTI information, exception classes,
892
  //     recursive code, virtual functions or make use of C++ libraries that
893
  //     are not compiled for the device.
894
287k
  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
895
246k
    if ((getLangOpts().CPlusPlus && 
FD->isMain()157k
) ||
getLangOpts().OpenCL243k
||
896
241k
        getLangOpts().SYCLIsDevice ||
897
241k
        (getLangOpts().CUDA && 
FD->hasAttr<CUDAGlobalAttr>()591
))
898
5.36k
      Fn->addFnAttr(llvm::Attribute::NoRecurse);
899
246k
  }
900
287k
901
287k
  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
902
246k
    Builder.setIsFPConstrained(FD->usesFPIntrin());
903
246k
    if (FD->usesFPIntrin())
904
1.60k
      Fn->addFnAttr(llvm::Attribute::StrictFP);
905
246k
  }
906
287k
907
  // If a custom alignment is used, force realigning to this alignment on
908
  // any main function which certainly will need it.
909
287k
  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
910
246k
    if ((FD->isMain() || 
FD->isMSVCRTEntryPoint()241k
) &&
911
5.15k
        CGM.getCodeGenOpts().StackAlignment)
912
1
      Fn->addFnAttr("stackrealign");
913
287k
914
287k
  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
915
287k
916
  // Create a marker to make it easy to insert allocas into the entryblock
917
  // later.  Don't create this with the builder, because we don't want it
918
  // folded.
919
287k
  llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
920
287k
  AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
921
287k
922
287k
  ReturnBlock = getJumpDestInCurrentScope("return");
923
287k
924
287k
  Builder.SetInsertPoint(EntryBB);
925
287k
926
  // If we're checking the return value, allocate space for a pointer to a
927
  // precise source location of the checked return statement.
928
287k
  if (requiresReturnValueCheck()) {
929
19
    ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
930
19
    InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
931
19
  }
932
287k
933
  // Emit subprogram debug descriptor.
934
287k
  if (CGDebugInfo *DI = getDebugInfo()) {
935
    // Reconstruct the type from the argument list so that implicit parameters,
936
    // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
937
    // convention.
938
90.9k
    CallingConv CC = CallingConv::CC_C;
939
90.9k
    if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
940
88.1k
      if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
941
88.1k
        CC = SrcFnTy->getCallConv();
942
90.9k
    SmallVector<QualType, 16> ArgTypes;
943
90.9k
    for (const VarDecl *VD : Args)
944
153k
      ArgTypes.push_back(VD->getType());
945
90.9k
    QualType FnType = getContext().getFunctionType(
946
90.9k
        RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
947
90.9k
    DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
948
90.9k
                          Builder);
949
90.9k
  }
950
287k
951
287k
  if (ShouldInstrumentFunction()) {
952
11
    if (CGM.getCodeGenOpts().InstrumentFunctions)
953
6
      CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
954
11
    if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
955
4
      CurFn->addFnAttr("instrument-function-entry-inlined",
956
4
                       "__cyg_profile_func_enter");
957
11
    if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
958
1
      CurFn->addFnAttr("instrument-function-entry-inlined",
959
1
                       "__cyg_profile_func_enter_bare");
960
11
  }
961
287k
962
  // Since emitting the mcount call here impacts optimizations such as function
963
  // inlining, we just add an attribute to insert a mcount call in backend.
964
  // The attribute "counting-function" is set to mcount function name which is
965
  // architecture dependent.
966
287k
  if (CGM.getCodeGenOpts().InstrumentForProfiling) {
967
    // Calls to fentry/mcount should not be generated if function has
968
    // the no_instrument_function attribute.
969
119
    if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
970
96
      if (CGM.getCodeGenOpts().CallFEntry)
971
4
        Fn->addFnAttr("fentry-call", "true");
972
92
      else {
973
92
        Fn->addFnAttr("instrument-function-entry-inlined",
974
92
                      getTarget().getMCountName());
975
92
      }
976
96
      if (CGM.getCodeGenOpts().MNopMCount) {
977
2
        if (!CGM.getCodeGenOpts().CallFEntry)
978
1
          CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
979
1
            << "-mnop-mcount" << "-mfentry";
980
2
        Fn->addFnAttr("mnop-mcount");
981
2
      }
982
96
983
96
      if (CGM.getCodeGenOpts().RecordMCount) {
984
2
        if (!CGM.getCodeGenOpts().CallFEntry)
985
1
          CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
986
1
            << "-mrecord-mcount" << "-mfentry";
987
2
        Fn->addFnAttr("mrecord-mcount");
988
2
      }
989
96
    }
990
119
  }
991
287k
992
287k
  if (CGM.getCodeGenOpts().PackedStack) {
993
2
    if (getContext().getTargetInfo().getTriple().getArch() !=
994
2
        llvm::Triple::systemz)
995
1
      CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
996
1
        << "-mpacked-stack";
997
2
    Fn->addFnAttr("packed-stack");
998
2
  }
999
287k
1000
287k
  if (RetTy->isVoidType()) {
1001
    // Void type; nothing to return.
1002
147k
    ReturnValue = Address::invalid();
1003
147k
1004
    // Count the implicit return.
1005
147k
    if (!endsWithReturn(D))
1006
145k
      ++NumReturnExprs;
1007
139k
  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1008
    // Indirect return; emit returned value directly into sret slot.
1009
    // This reduces code size, and affects correctness in C++.
1010
2.48k
    auto AI = CurFn->arg_begin();
1011
2.48k
    if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1012
49
      ++AI;
1013
2.48k
    ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
1014
2.48k
    if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
1015
992
      ReturnValuePointer =
1016
992
          CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
1017
992
      Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast(
1018
992
                              ReturnValue.getPointer(), Int8PtrTy),
1019
992
                          ReturnValuePointer);
1020
992
    }
1021
136k
  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1022
5
             !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
1023
    // Load the sret pointer from the argument struct and return into that.
1024
5
    unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1025
5
    llvm::Function::arg_iterator EI = CurFn->arg_end();
1026
5
    --EI;
1027
5
    llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
1028
5
    ReturnValuePointer = Address(Addr, getPointerAlign());
1029
5
    Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
1030
5
    ReturnValue = Address(Addr, CGM.getNaturalTypeAlignment(RetTy));
1031
136k
  } else {
1032
136k
    ReturnValue = CreateIRTemp(RetTy, "retval");
1033
136k
1034
    // Tell the epilog emitter to autorelease the result.  We do this
1035
    // now so that various specialized functions can suppress it
1036
    // during their IR-generation.
1037
136k
    if (getLangOpts().ObjCAutoRefCount &&
1038
381
        !CurFnInfo->isReturnsRetained() &&
1039
347
        RetTy->isObjCRetainableType())
1040
140
      AutoreleaseResult = true;
1041
136k
  }
1042
287k
1043
287k
  EmitStartEHSpec(CurCodeDecl);
1044
287k
1045
287k
  PrologueCleanupDepth = EHStack.stable_begin();
1046
287k
1047
  // Emit OpenMP specific initialization of the device functions.
1048
287k
  if (getLangOpts().OpenMP && 
CurCodeDecl66.5k
)
1049
60.4k
    CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1050
287k
1051
287k
  EmitFunctionProlog(*CurFnInfo, CurFn, Args);
1052
287k
1053
287k
  if (D && 
isa<CXXMethodDecl>(D)279k
&&
cast<CXXMethodDecl>(D)->isInstance()102k
) {
1054
95.2k
    CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
1055
95.2k
    const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
1056
95.2k
    if (MD->getParent()->isLambda() &&
1057
1.65k
        MD->getOverloadedOperator() == OO_Call) {
1058
      // We're in a lambda; figure out the captures.
1059
1.48k
      MD->getParent()->getCaptureFields(LambdaCaptureFields,
1060
1.48k
                                        LambdaThisCaptureField);
1061
1.48k
      if (LambdaThisCaptureField) {
1062
        // If the lambda captures the object referred to by '*this' - either by
1063
        // value or by reference, make sure CXXThisValue points to the correct
1064
        // object.
1065
82
1066
        // Get the lvalue for the field (which is a copy of the enclosing object
1067
        // or contains the address of the enclosing object).
1068
82
        LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
1069
82
        if (!LambdaThisCaptureField->getType()->isPointerType()) {
1070
          // If the enclosing object was captured by value, just use its address.
1071
1
          CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
1072
81
        } else {
1073
          // Load the lvalue pointed to by the field, since '*this' was captured
1074
          // by reference.
1075
81
          CXXThisValue =
1076
81
              EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1077
81
        }
1078
82
      }
1079
2.52k
      for (auto *FD : MD->getParent()->fields()) {
1080
2.52k
        if (FD->hasCapturedVLAType()) {
1081
24
          auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1082
24
                                           SourceLocation()).getScalarVal();
1083
24
          auto VAT = FD->getCapturedVLAType();
1084
24
          VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1085
24
        }
1086
2.52k
      }
1087
93.7k
    } else {
1088
      // Not in a lambda; just use 'this' from the method.
1089
      // FIXME: Should we generate a new load for each use of 'this'?  The
1090
      // fast register allocator would be happier...
1091
93.7k
      CXXThisValue = CXXABIThisValue;
1092
93.7k
    }
1093
95.2k
1094
    // Check the 'this' pointer once per function, if it's available.
1095
95.2k
    if (CXXABIThisValue) {
1096
95.2k
      SanitizerSet SkippedChecks;
1097
95.2k
      SkippedChecks.set(SanitizerKind::ObjectSize, true);
1098
95.2k
      QualType ThisTy = MD->getThisType();
1099
95.2k
1100
      // If this is the call operator of a lambda with no capture-default, it
1101
      // may have a static invoker function, which may call this operator with
1102
      // a null 'this' pointer.
1103
95.2k
      if (isLambdaCallOperator(MD) &&
1104
1.48k
          MD->getParent()->getLambdaCaptureDefault() == LCD_None)
1105
308
        SkippedChecks.set(SanitizerKind::Null, true);
1106
95.2k
1107
37.8k
      EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
1108
57.3k
                                                : TCK_MemberCall,
1109
95.2k
                    Loc, CXXABIThisValue, ThisTy,
1110
95.2k
                    getContext().getTypeAlignInChars(ThisTy->getPointeeType()),
1111
95.2k
                    SkippedChecks);
1112
95.2k
    }
1113
95.2k
  }
1114
287k
1115
  // If any of the arguments have a variably modified type, make sure to
1116
  // emit the type size.
1117
287k
  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1118
762k
       i != e; 
++i475k
) {
1119
475k
    const VarDecl *VD = *i;
1120
475k
1121
    // Dig out the type as written from ParmVarDecls; it's unclear whether
1122
    // the standard (C99 6.9.1p10) requires this, but we're following the
1123
    // precedent set by gcc.
1124
475k
    QualType Ty;
1125
475k
    if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1126
295k
      Ty = PVD->getOriginalType();
1127
180k
    else
1128
180k
      Ty = VD->getType();
1129
475k
1130
475k
    if (Ty->isVariablyModifiedType())
1131
137
      EmitVariablyModifiedType(Ty);
1132
475k
  }
1133
  // Emit a location at the end of the prologue.
1134
287k
  if (CGDebugInfo *DI = getDebugInfo())
1135
90.9k
    DI->EmitLocation(Builder, StartLoc);
1136
287k
1137
  // TODO: Do we need to handle this in two places like we do with
1138
  // target-features/target-cpu?
1139
287k
  if (CurFuncDecl)
1140
273k
    if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1141
7.34k
      LargestVectorWidth = VecWidth->getVectorWidth();
1142
287k
}
1143
1144
190k
void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1145
190k
  incrementProfileCounter(Body);
1146
190k
  if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1147
190k
    EmitCompoundStmtWithoutScope(*S);
1148
55
  else
1149
55
    EmitStmt(Body);
1150
190k
}
1151
1152
/// When instrumenting to collect profile data, the counts for some blocks
1153
/// such as switch cases need to not include the fall-through counts, so
1154
/// emit a branch around the instrumentation code. When not instrumenting,
1155
/// this just calls EmitBlock().
1156
void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1157
1.67k
                                               const Stmt *S) {
1158
1.67k
  llvm::BasicBlock *SkipCountBB = nullptr;
1159
1.67k
  if (HaveInsertPoint() && 
CGM.getCodeGenOpts().hasProfileClangInstr()617
) {
1160
    // When instrumenting for profiling, the fallthrough to certain
1161
    // statements needs to skip over the instrumentation code so that we
1162
    // get an accurate count.
1163
22
    SkipCountBB = createBasicBlock("skipcount");
1164
22
    EmitBranch(SkipCountBB);
1165
22
  }
1166
1.67k
  EmitBlock(BB);
1167
1.67k
  uint64_t CurrentCount = getCurrentProfileCount();
1168
1.67k
  incrementProfileCounter(S);
1169
1.67k
  setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1170
1.67k
  if (SkipCountBB)
1171
22
    EmitBlock(SkipCountBB);
1172
1.67k
}
1173
1174
/// Tries to mark the given function nounwind based on the
1175
/// non-existence of any throwing calls within it.  We believe this is
1176
/// lightweight enough to do at -O0.
1177
184k
static void TryMarkNoThrow(llvm::Function *F) {
1178
  // LLVM treats 'nounwind' on a function as part of the type, so we
1179
  // can't do this on functions that can be overwritten.
1180
184k
  if (F->isInterposable()) 
return23
;
1181
184k
1182
184k
  for (llvm::BasicBlock &BB : *F)
1183
285k
    for (llvm::Instruction &I : BB)
1184
2.61M
      if (I.mayThrow())
1185
66.7k
        return;
1186
184k
1187
117k
  F->setDoesNotThrow();
1188
117k
}
1189
1190
QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1191
245k
                                               FunctionArgList &Args) {
1192
245k
  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1193
245k
  QualType ResTy = FD->getReturnType();
1194
245k
1195
245k
  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1196
245k
  if (MD && 
MD->isInstance()102k
) {
1197
95.2k
    if (CGM.getCXXABI().HasThisReturn(GD))
1198
1.63k
      ResTy = MD->getThisType();
1199
93.6k
    else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1200
214
      ResTy = CGM.getContext().VoidPtrTy;
1201
95.2k
    CGM.getCXXABI().buildThisParam(*this, Args);
1202
95.2k
  }
1203
245k
1204
  // The base version of an inheriting constructor whose constructed base is a
1205
  // virtual base is not passed any arguments (because it doesn't actually call
1206
  // the inherited constructor).
1207
245k
  bool PassedParams = true;
1208
245k
  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1209
37.9k
    if (auto Inherited = CD->getInheritedConstructor())
1210
225
      PassedParams =
1211
225
          getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1212
245k
1213
245k
  if (PassedParams) {
1214
293k
    for (auto *Param : FD->parameters()) {
1215
293k
      Args.push_back(Param);
1216
293k
      if (!Param->hasAttr<PassObjectSizeAttr>())
1217
293k
        continue;
1218
88
1219
88
      auto *Implicit = ImplicitParamDecl::Create(
1220
88
          getContext(), Param->getDeclContext(), Param->getLocation(),
1221
88
          /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1222
88
      SizeArguments[Param] = Implicit;
1223
88
      Args.push_back(Implicit);
1224
88
    }
1225
245k
  }
1226
245k
1227
245k
  if (MD && 
(102k
isa<CXXConstructorDecl>(MD)102k
||
isa<CXXDestructorDecl>(MD)64.5k
))
1228
54.1k
    CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1229
245k
1230
245k
  return ResTy;
1231
245k
}
1232
1233
static bool
1234
shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD,
1235
21
                                             const ASTContext &Context) {
1236
21
  QualType T = FD->getReturnType();
1237
  // Avoid the optimization for functions that return a record type with a
1238
  // trivial destructor or another trivially copyable type.
1239
21
  if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
1240
12
    if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1241
12
      return !ClassDecl->hasTrivialDestructor();
1242
9
  }
1243
9
  return !T.isTriviallyCopyableType(Context);
1244
9
}
1245
1246
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1247
245k
                                   const CGFunctionInfo &FnInfo) {
1248
245k
  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1249
245k
  CurGD = GD;
1250
245k
1251
245k
  FunctionArgList Args;
1252
245k
  QualType ResTy = BuildFunctionArgList(GD, Args);
1253
245k
1254
  // Check if we should generate debug info for this function.
1255
245k
  if (FD->hasAttr<NoDebugAttr>())
1256
12.4k
    DebugInfo = nullptr; // disable debug info indefinitely for this function
1257
245k
1258
  // The function might not have a body if we're generating thunks for a
1259
  // function declaration.
1260
245k
  SourceRange BodyRange;
1261
245k
  if (Stmt *Body = FD->getBody())
1262
245k
    BodyRange = Body->getSourceRange();
1263
128
  else
1264
128
    BodyRange = FD->getLocation();
1265
245k
  CurEHLocation = BodyRange.getEnd();
1266
245k
1267
  // Use the location of the start of the function to determine where
1268
  // the function definition is located. By default use the location
1269
  // of the declaration as the location for the subprogram. A function
1270
  // may lack a declaration in the source code if it is created by code
1271
  // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1272
245k
  SourceLocation Loc = FD->getLocation();
1273
245k
1274
  // If this is a function specialization then use the pattern body
1275
  // as the location for the function.
1276
245k
  if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1277
87.3k
    if (SpecDecl->hasBody(SpecDecl))
1278
87.3k
      Loc = SpecDecl->getLocation();
1279
245k
1280
245k
  Stmt *Body = FD->getBody();
1281
245k
1282
  // Initialize helper which will detect jumps which can cause invalid lifetime
1283
  // markers.
1284
245k
  if (Body && 
ShouldEmitLifetimeMarkers245k
)
1285
31.6k
    Bypasses.Init(Body);
1286
245k
1287
  // Emit the standard function prologue.
1288
245k
  StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1289
245k
1290
  // Generate the body of the function.
1291
245k
  PGO.assignRegionCounters(GD, CurFn);
1292
245k
  if (isa<CXXDestructorDecl>(FD))
1293
16.1k
    EmitDestructorBody(Args);
1294
229k
  else if (isa<CXXConstructorDecl>(FD))
1295
37.8k
    EmitConstructorBody(Args);
1296
191k
  else if (getLangOpts().CUDA &&
1297
436
           !getLangOpts().CUDAIsDevice &&
1298
141
           FD->hasAttr<CUDAGlobalAttr>())
1299
44
    CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1300
191k
  else if (isa<CXXMethodDecl>(FD) &&
1301
48.3k
           cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1302
    // The lambda static invoker function is special, because it forwards or
1303
    // clones the body of the function call operator (but is actually static).
1304
49
    EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1305
191k
  } else if (FD->isDefaulted() && 
isa<CXXMethodDecl>(FD)709
&&
1306
705
             (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1307
689
              
cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator()202
)) {
1308
    // Implicit copy-assignment gets the same special treatment as implicit
1309
    // copy-constructors.
1310
689
    emitImplicitAssignmentOperatorBody(Args);
1311
190k
  } else if (Body) {
1312
190k
    EmitFunctionBody(Body);
1313
190k
  } else
1314
0
    llvm_unreachable("no definition for emitted function");
1315
245k
1316
  // C++11 [stmt.return]p2:
1317
  //   Flowing off the end of a function [...] results in undefined behavior in
1318
  //   a value-returning function.
1319
  // C11 6.9.1p12:
1320
  //   If the '}' that terminates a function is reached, and the value of the
1321
  //   function call is used by the caller, the behavior is undefined.
1322
245k
  if (getLangOpts().CPlusPlus && 
!FD->hasImplicitReturnZero()157k
&&
!SawAsmBlock153k
&&
1323
153k
      !FD->getReturnType()->isVoidType() && 
Builder.GetInsertBlock()57.5k
) {
1324
178
    bool ShouldEmitUnreachable =
1325
178
        CGM.getCodeGenOpts().StrictReturn ||
1326
21
        shouldUseUndefinedBehaviorReturnOptimization(FD, getContext());
1327
178
    if (SanOpts.has(SanitizerKind::Return)) {
1328
1
      SanitizerScope SanScope(this);
1329
1
      llvm::Value *IsFalse = Builder.getFalse();
1330
1
      EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1331
1
                SanitizerHandler::MissingReturn,
1332
1
                EmitCheckSourceLocation(FD->getLocation()), None);
1333
177
    } else if (ShouldEmitUnreachable) {
1334
159
      if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1335
128
        EmitTrapCall(llvm::Intrinsic::trap);
1336
159
    }
1337
178
    if (SanOpts.has(SanitizerKind::Return) || 
ShouldEmitUnreachable177
) {
1338
160
      Builder.CreateUnreachable();
1339
160
      Builder.ClearInsertionPoint();
1340
160
    }
1341
178
  }
1342
245k
1343
  // Emit the standard function epilogue.
1344
245k
  FinishFunction(BodyRange.getEnd());
1345
245k
1346
  // If we haven't marked the function nothrow through other means, do
1347
  // a quick pass now to see if we can.
1348
245k
  if (!CurFn->doesNotThrow())
1349
184k
    TryMarkNoThrow(CurFn);
1350
245k
}
1351
1352
/// ContainsLabel - Return true if the statement contains a label in it.  If
1353
/// this statement is not executed normally, it not containing a label means
1354
/// that we can just remove the code.
1355
48.8k
bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1356
  // Null statement, not a label!
1357
48.8k
  if (!S) 
return false87
;
1358
48.7k
1359
  // If this is a label, we have to emit the code, consider something like:
1360
  // if (0) {  ...  foo:  bar(); }  goto foo;
1361
  //
1362
  // TODO: If anyone cared, we could track __label__'s, since we know that you
1363
  // can't jump to one from outside their declared region.
1364
48.7k
  if (isa<LabelStmt>(S))
1365
6
    return true;
1366
48.7k
1367
  // If this is a case/default statement, and we haven't seen a switch, we have
1368
  // to emit the code.
1369
48.7k
  if (isa<SwitchCase>(S) && 
!IgnoreCaseStmts68
)
1370
0
    return true;
1371
48.7k
1372
  // If this is a switch statement, we want to ignore cases below it.
1373
48.7k
  if (isa<SwitchStmt>(S))
1374
10
    IgnoreCaseStmts = true;
1375
48.7k
1376
  // Scan subexpressions for verboten labels.
1377
48.7k
  for (const Stmt *SubStmt : S->children())
1378
35.6k
    if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1379
13
      return true;
1380
48.7k
1381
48.7k
  return false;
1382
48.7k
}
1383
1384
/// containsBreak - Return true if the statement contains a break out of it.
1385
/// If the statement (recursively) contains a switch or loop with a break
1386
/// inside of it, this is fine.
1387
952
bool CodeGenFunction::containsBreak(const Stmt *S) {
1388
  // Null statement, not a label!
1389
952
  if (!S) 
return false0
;
1390
952
1391
  // If this is a switch or loop that defines its own break scope, then we can
1392
  // include it and anything inside of it.
1393
952
  if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1394
949
      isa<ForStmt>(S))
1395
3
    return false;
1396
949
1397
949
  if (isa<BreakStmt>(S))
1398
1
    return true;
1399
948
1400
  // Scan subexpressions for verboten breaks.
1401
948
  for (const Stmt *SubStmt : S->children())
1402
870
    if (containsBreak(SubStmt))
1403
1
      return true;
1404
948
1405
947
  return false;
1406
948
}
1407
1408
2.61k
bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1409
2.61k
  if (!S) 
return false0
;
1410
2.61k
1411
  // Some statement kinds add a scope and thus never add a decl to the current
1412
  // scope. Note, this list is longer than the list of statements that might
1413
  // have an unscoped decl nested within them, but this way is conservatively
1414
  // correct even if more statement kinds are added.
1415
2.61k
  if (isa<IfStmt>(S) || 
isa<SwitchStmt>(S)2.61k
||
isa<WhileStmt>(S)2.61k
||
1416
2.61k
      isa<DoStmt>(S) || 
isa<ForStmt>(S)2.61k
||
isa<CompoundStmt>(S)2.61k
||
1417
2.60k
      isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1418
2.60k
      isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1419
15
    return false;
1420
2.60k
1421
2.60k
  if (isa<DeclStmt>(S))
1422
7
    return true;
1423
2.59k
1424
2.59k
  for (const Stmt *SubStmt : S->children())
1425
2.34k
    if (mightAddDeclToScope(SubStmt))
1426
2
      return true;
1427
2.59k
1428
2.59k
  return false;
1429
2.59k
}
1430
1431
/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1432
/// to a constant, or if it does but contains a label, return false.  If it
1433
/// constant folds return true and set the boolean result in Result.
1434
bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1435
                                                   bool &ResultBool,
1436
104k
                                                   bool AllowLabels) {
1437
104k
  llvm::APSInt ResultInt;
1438
104k
  if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1439
91.9k
    return false;
1440
12.3k
1441
12.3k
  ResultBool = ResultInt.getBoolValue();
1442
12.3k
  return true;
1443
12.3k
}
1444
1445
/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1446
/// to a constant, or if it does but contains a label, return false.  If it
1447
/// constant folds return true and set the folded value.
1448
bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1449
                                                   llvm::APSInt &ResultInt,
1450
104k
                                                   bool AllowLabels) {
1451
  // FIXME: Rename and handle conversion of other evaluatable things
1452
  // to bool.
1453
104k
  Expr::EvalResult Result;
1454
104k
  if (!Cond->EvaluateAsInt(Result, getContext()))
1455
92.2k
    return false;  // Not foldable, not integer or not fully evaluatable.
1456
12.4k
1457
12.4k
  llvm::APSInt Int = Result.Val.getInt();
1458
12.4k
  if (!AllowLabels && 
CodeGenFunction::ContainsLabel(Cond)12.4k
)
1459
0
    return false;  // Contains a label.
1460
12.4k
1461
12.4k
  ResultInt = Int;
1462
12.4k
  return true;
1463
12.4k
}
1464
1465
/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1466
/// statement) to the specified blocks.  Based on the condition, this might try
1467
/// to simplify the codegen of the conditional based on the branch.
1468
/// \param Weights The weights determined by the likelihood attributes.
1469
void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
1470
                                           llvm::BasicBlock *TrueBlock,
1471
                                           llvm::BasicBlock *FalseBlock,
1472
                                           uint64_t TrueCount,
1473
105k
                                           llvm::MDNode *Weights) {
1474
105k
  Cond = Cond->IgnoreParens();
1475
105k
1476
105k
  if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1477
53.5k
1478
    // Handle X && Y in a condition.
1479
53.5k
    if (CondBOp->getOpcode() == BO_LAnd) {
1480
      // If we have "1 && X", simplify the code.  "0 && X" would have constant
1481
      // folded if the case was simple enough.
1482
3.14k
      bool ConstantBool = false;
1483
3.14k
      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1484
59
          ConstantBool) {
1485
        // br(1 && X) -> br(X).
1486
59
        incrementProfileCounter(CondBOp);
1487
59
        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1488
59
                                    TrueCount, Weights);
1489
59
      }
1490
3.08k
1491
      // If we have "X && 1", simplify the code to use an uncond branch.
1492
      // "X && 0" would have been constant folded to 0.
1493
3.08k
      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1494
20
          ConstantBool) {
1495
        // br(X && 1) -> br(X).
1496
20
        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1497
20
                                    TrueCount, Weights);
1498
20
      }
1499
3.06k
1500
      // Emit the LHS as a conditional.  If the LHS conditional is false, we
1501
      // want to jump to the FalseBlock.
1502
3.06k
      llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1503
      // The counter tells us how often we evaluate RHS, and all of TrueCount
1504
      // can be propagated to that branch.
1505
3.06k
      uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1506
3.06k
1507
3.06k
      ConditionalEvaluation eval(*this);
1508
3.06k
      {
1509
3.06k
        ApplyDebugLocation DL(*this, Cond);
1510
3.06k
        EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount,
1511
3.06k
                             Weights);
1512
3.06k
        EmitBlock(LHSTrue);
1513
3.06k
      }
1514
3.06k
1515
3.06k
      incrementProfileCounter(CondBOp);
1516
3.06k
      setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1517
3.06k
1518
      // Any temporaries created here are conditional.
1519
3.06k
      eval.begin(*this);
1520
3.06k
      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount,
1521
3.06k
                           Weights);
1522
3.06k
      eval.end(*this);
1523
3.06k
1524
3.06k
      return;
1525
3.06k
    }
1526
50.3k
1527
50.3k
    if (CondBOp->getOpcode() == BO_LOr) {
1528
      // If we have "0 || X", simplify the code.  "1 || X" would have constant
1529
      // folded if the case was simple enough.
1530
3.66k
      bool ConstantBool = false;
1531
3.66k
      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1532
6
          !ConstantBool) {
1533
        // br(0 || X) -> br(X).
1534
6
        incrementProfileCounter(CondBOp);
1535
6
        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1536
6
                                    TrueCount, Weights);
1537
6
      }
1538
3.65k
1539
      // If we have "X || 0", simplify the code to use an uncond branch.
1540
      // "X || 1" would have been constant folded to 1.
1541
3.65k
      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1542
0
          !ConstantBool) {
1543
        // br(X || 0) -> br(X).
1544
0
        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1545
0
                                    TrueCount, Weights);
1546
0
      }
1547
3.65k
1548
      // Emit the LHS as a conditional.  If the LHS conditional is true, we
1549
      // want to jump to the TrueBlock.
1550
3.65k
      llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1551
      // We have the count for entry to the RHS and for the whole expression
1552
      // being true, so we can divy up True count between the short circuit and
1553
      // the RHS.
1554
3.65k
      uint64_t LHSCount =
1555
3.65k
          getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1556
3.65k
      uint64_t RHSCount = TrueCount - LHSCount;
1557
3.65k
1558
3.65k
      ConditionalEvaluation eval(*this);
1559
3.65k
      {
1560
3.65k
        ApplyDebugLocation DL(*this, Cond);
1561
3.65k
        EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount,
1562
3.65k
                             Weights);
1563
3.65k
        EmitBlock(LHSFalse);
1564
3.65k
      }
1565
3.65k
1566
3.65k
      incrementProfileCounter(CondBOp);
1567
3.65k
      setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1568
3.65k
1569
      // Any temporaries created here are conditional.
1570
3.65k
      eval.begin(*this);
1571
3.65k
      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount,
1572
3.65k
                           Weights);
1573
3.65k
1574
3.65k
      eval.end(*this);
1575
3.65k
1576
3.65k
      return;
1577
3.65k
    }
1578
50.3k
  }
1579
98.9k
1580
98.9k
  if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1581
    // br(!x, t, f) -> br(x, f, t)
1582
1.25k
    if (CondUOp->getOpcode() == UO_LNot) {
1583
      // Negate the count.
1584
1.25k
      uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1585
      // Negate the condition and swap the destination blocks.
1586
1.25k
      return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1587
1.25k
                                  FalseCount, Weights);
1588
1.25k
    }
1589
97.6k
  }
1590
97.6k
1591
97.6k
  if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1592
    // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1593
0
    llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1594
0
    llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1595
0
1596
0
    ConditionalEvaluation cond(*this);
1597
0
    EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1598
0
                         getProfileCount(CondOp), Weights);
1599
0
1600
    // When computing PGO branch weights, we only know the overall count for
1601
    // the true block. This code is essentially doing tail duplication of the
1602
    // naive code-gen, introducing new edges for which counts are not
1603
    // available. Divide the counts proportionally between the LHS and RHS of
1604
    // the conditional operator.
1605
0
    uint64_t LHSScaledTrueCount = 0;
1606
0
    if (TrueCount) {
1607
0
      double LHSRatio =
1608
0
          getProfileCount(CondOp) / (double)getCurrentProfileCount();
1609
0
      LHSScaledTrueCount = TrueCount * LHSRatio;
1610
0
    }
1611
0
1612
0
    cond.begin(*this);
1613
0
    EmitBlock(LHSBlock);
1614
0
    incrementProfileCounter(CondOp);
1615
0
    {
1616
0
      ApplyDebugLocation DL(*this, Cond);
1617
0
      EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1618
0
                           LHSScaledTrueCount, Weights);
1619
0
    }
1620
0
    cond.end(*this);
1621
0
1622
0
    cond.begin(*this);
1623
0
    EmitBlock(RHSBlock);
1624
0
    EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1625
0
                         TrueCount - LHSScaledTrueCount, Weights);
1626
0
    cond.end(*this);
1627
0
1628
0
    return;
1629
0
  }
1630
97.6k
1631
97.6k
  if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1632
    // Conditional operator handling can give us a throw expression as a
1633
    // condition for a case like:
1634
    //   br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1635
    // Fold this to:
1636
    //   br(c, throw x, br(y, t, f))
1637
0
    EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1638
0
    return;
1639
0
  }
1640
97.6k
1641
  // If the branch has a condition wrapped by __builtin_unpredictable,
1642
  // create metadata that specifies that the branch is unpredictable.
1643
  // Don't bother if not optimizing because that metadata would not be used.
1644
97.6k
  llvm::MDNode *Unpredictable = nullptr;
1645
97.6k
  auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1646
97.6k
  if (Call && 
CGM.getCodeGenOpts().OptimizationLevel != 05.14k
) {
1647
65
    auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1648
65
    if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1649
3
      llvm::MDBuilder MDHelper(getLLVMContext());
1650
3
      Unpredictable = MDHelper.createUnpredictable();
1651
3
    }
1652
65
  }
1653
97.6k
1654
  // Create branch weights based on the number of times we get here and the
1655
  // number of times the condition should be true.
1656
97.6k
  if (!Weights) {
1657
97.6k
    uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1658
97.6k
    Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
1659
97.6k
  }
1660
97.6k
1661
  // Emit the code with the fully general case.
1662
97.6k
  llvm::Value *CondV;
1663
97.6k
  {
1664
97.6k
    ApplyDebugLocation DL(*this, Cond);
1665
97.6k
    CondV = EvaluateExprAsBool(Cond);
1666
97.6k
  }
1667
97.6k
  Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1668
97.6k
}
1669
1670
/// ErrorUnsupported - Print out an error that codegen doesn't support the
1671
/// specified stmt yet.
1672
1
void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1673
1
  CGM.ErrorUnsupported(S, Type);
1674
1
}
1675
1676
/// emitNonZeroVLAInit - Emit the "zero" initialization of a
1677
/// variable-length array whose elements have a non-zero bit-pattern.
1678
///
1679
/// \param baseType the inner-most element type of the array
1680
/// \param src - a char* pointing to the bit-pattern for a single
1681
/// base element of the array
1682
/// \param sizeInChars - the total size of the VLA, in chars
1683
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1684
                               Address dest, Address src,
1685
0
                               llvm::Value *sizeInChars) {
1686
0
  CGBuilderTy &Builder = CGF.Builder;
1687
0
1688
0
  CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1689
0
  llvm::Value *baseSizeInChars
1690
0
    = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1691
0
1692
0
  Address begin =
1693
0
    Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1694
0
  llvm::Value *end =
1695
0
    Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
1696
0
1697
0
  llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1698
0
  llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1699
0
  llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1700
0
1701
  // Make a loop over the VLA.  C99 guarantees that the VLA element
1702
  // count must be nonzero.
1703
0
  CGF.EmitBlock(loopBB);
1704
0
1705
0
  llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1706
0
  cur->addIncoming(begin.getPointer(), originBB);
1707
0
1708
0
  CharUnits curAlign =
1709
0
    dest.getAlignment().alignmentOfArrayElement(baseSize);
1710
0
1711
  // memcpy the individual element bit-pattern.
1712
0
  Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
1713
0
                       /*volatile*/ false);
1714
0
1715
  // Go to the next element.
1716
0
  llvm::Value *next =
1717
0
    Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1718
0
1719
  // Leave if that's the end of the VLA.
1720
0
  llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1721
0
  Builder.CreateCondBr(done, contBB, loopBB);
1722
0
  cur->addIncoming(next, loopBB);
1723
0
1724
0
  CGF.EmitBlock(contBB);
1725
0
}
1726
1727
void
1728
7.59k
CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
1729
  // Ignore empty classes in C++.
1730
7.59k
  if (getLangOpts().CPlusPlus) {
1731
7.44k
    if (const RecordType *RT = Ty->getAs<RecordType>()) {
1732
7.11k
      if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1733
6.40k
        return;
1734
1.18k
    }
1735
7.44k
  }
1736
1.18k
1737
  // Cast the dest ptr to the appropriate i8 pointer type.
1738
1.18k
  if (DestPtr.getElementType() != Int8Ty)
1739
1.18k
    DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1740
1.18k
1741
  // Get size and alignment info for this aggregate.
1742
1.18k
  CharUnits size = getContext().getTypeSizeInChars(Ty);
1743
1.18k
1744
1.18k
  llvm::Value *SizeVal;
1745
1.18k
  const VariableArrayType *vla;
1746
1.18k
1747
  // Don't bother emitting a zero-byte memset.
1748
1.18k
  if (size.isZero()) {
1749
    // But note that getTypeInfo returns 0 for a VLA.
1750
11
    if (const VariableArrayType *vlaType =
1751
8
          dyn_cast_or_null<VariableArrayType>(
1752
8
                                          getContext().getAsArrayType(Ty))) {
1753
8
      auto VlaSize = getVLASize(vlaType);
1754
8
      SizeVal = VlaSize.NumElts;
1755
8
      CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1756
8
      if (!eltSize.isOne())
1757
8
        SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1758
8
      vla = vlaType;
1759
3
    } else {
1760
3
      return;
1761
3
    }
1762
1.17k
  } else {
1763
1.17k
    SizeVal = CGM.getSize(size);
1764
1.17k
    vla = nullptr;
1765
1.17k
  }
1766
1.18k
1767
  // If the type contains a pointer to data member we can't memset it to zero.
1768
  // Instead, create a null constant and copy it to the destination.
1769
  // TODO: there are other patterns besides zero that we can usefully memset,
1770
  // like -1, which happens to be the pattern used by member-pointers.
1771
1.18k
  if (!CGM.getTypes().isZeroInitializable(Ty)) {
1772
    // For a VLA, emit a single element, then splat that over the VLA.
1773
9
    if (vla) 
Ty = getContext().getBaseElementType(vla)0
;
1774
9
1775
9
    llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1776
9
1777
9
    llvm::GlobalVariable *NullVariable =
1778
9
      new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1779
9
                               /*isConstant=*/true,
1780
9
                               llvm::GlobalVariable::PrivateLinkage,
1781
9
                               NullConstant, Twine());
1782
9
    CharUnits NullAlign = DestPtr.getAlignment();
1783
9
    NullVariable->setAlignment(NullAlign.getAsAlign());
1784
9
    Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
1785
9
                   NullAlign);
1786
9
1787
9
    if (vla) 
return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal)0
;
1788
9
1789
    // Get and call the appropriate llvm.memcpy overload.
1790
9
    Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
1791
9
    return;
1792
9
  }
1793
1.17k
1794
  // Otherwise, just memset the whole thing to zero.  This is legal
1795
  // because in LLVM, all default initializers (other than the ones we just
1796
  // handled above) are guaranteed to have a bit pattern of all zeros.
1797
1.17k
  Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1798
1.17k
}
1799
1800
57
llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1801
  // Make sure that there is a block for the indirect goto.
1802
57
  if (!IndirectBranch)
1803
34
    GetIndirectGotoBlock();
1804
57
1805
57
  llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1806
57
1807
  // Make sure the indirect branch includes all of the address-taken blocks.
1808
57
  IndirectBranch->addDestination(BB);
1809
57
  return llvm::BlockAddress::get(CurFn, BB);
1810
57
}
1811
1812
56
llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
1813
  // If we already made the indirect branch for indirect goto, return its block.
1814
56
  if (IndirectBranch) 
return IndirectBranch->getParent()21
;
1815
35
1816
35
  CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
1817
35
1818
  // Create the PHI node that indirect gotos will add entries to.
1819
35
  llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1820
35
                                              "indirect.goto.dest");
1821
35
1822
  // Create the indirect branch instruction.
1823
35
  IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1824
35
  return IndirectBranch->getParent();
1825
35
}
1826
1827
/// Computes the length of an array in elements, as well as the base
1828
/// element type and a properly-typed first element pointer.
1829
llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
1830
                                              QualType &baseType,
1831
4.78k
                                              Address &addr) {
1832
4.78k
  const ArrayType *arrayType = origArrayType;
1833
4.78k
1834
  // If it's a VLA, we have to load the stored size.  Note that
1835
  // this is the size of the VLA in bytes, not its size in elements.
1836
4.78k
  llvm::Value *numVLAElements = nullptr;
1837
4.78k
  if (isa<VariableArrayType>(arrayType)) {
1838
615
    numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
1839
615
1840
    // Walk into all VLAs.  This doesn't require changes to addr,
1841
    // which has type T* where T is the first non-VLA element type.
1842
652
    do {
1843
652
      QualType elementType = arrayType->getElementType();
1844
652
      arrayType = getContext().getAsArrayType(elementType);
1845
652
1846
      // If we only have VLA components, 'addr' requires no adjustment.
1847
652
      if (!arrayType) {
1848
611
        baseType = elementType;
1849
611
        return numVLAElements;
1850
611
      }
1851
41
    } while (isa<VariableArrayType>(arrayType));
1852
615
1853
    // We get out here only if we find a constant array type
1854
    // inside the VLA.
1855
615
  }
1856
4.78k
1857
  // We have some number of constant-length arrays, so addr should
1858
  // have LLVM type [M x [N x [...]]]*.  Build a GEP that walks
1859
  // down to the first element of addr.
1860
4.17k
  SmallVector<llvm::Value*, 8> gepIndices;
1861
4.17k
1862
  // GEP down to the array type.
1863
4.17k
  llvm::ConstantInt *zero = Builder.getInt32(0);
1864
4.17k
  gepIndices.push_back(zero);
1865
4.17k
1866
4.17k
  uint64_t countFromCLAs = 1;
1867
4.17k
  QualType eltType;
1868
4.17k
1869
4.17k
  llvm::ArrayType *llvmArrayType =
1870
4.17k
    dyn_cast<llvm::ArrayType>(addr.getElementType());
1871
8.13k
  while (llvmArrayType) {
1872
3.96k
    assert(isa<ConstantArrayType>(arrayType));
1873
3.96k
    assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1874
3.96k
             == llvmArrayType->getNumElements());
1875
3.96k
1876
3.96k
    gepIndices.push_back(zero);
1877
3.96k
    countFromCLAs *= llvmArrayType->getNumElements();
1878
3.96k
    eltType = arrayType->getElementType();
1879
3.96k
1880
3.96k
    llvmArrayType =
1881
3.96k
      dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1882
3.96k
    arrayType = getContext().getAsArrayType(arrayType->getElementType());
1883
3.96k
    assert((!llvmArrayType || arrayType) &&
1884
3.96k
           "LLVM and Clang types are out-of-synch");
1885
3.96k
  }
1886
4.17k
1887
4.17k
  if (arrayType) {
1888
    // From this point onwards, the Clang array type has been emitted
1889
    // as some other type (probably a packed struct). Compute the array
1890
    // size, and just emit the 'begin' expression as a bitcast.
1891
668
    while (arrayType) {
1892
355
      countFromCLAs *=
1893
355
          cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1894
355
      eltType = arrayType->getElementType();
1895
355
      arrayType = getContext().getAsArrayType(eltType);
1896
355
    }
1897
313
1898
313
    llvm::Type *baseType = ConvertType(eltType);
1899
313
    addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
1900
3.86k
  } else {
1901
    // Create the actual GEP.
1902
3.86k
    addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
1903
3.86k
                                             gepIndices, "array.begin"),
1904
3.86k
                   addr.getAlignment());
1905
3.86k
  }
1906
4.17k
1907
4.17k
  baseType = eltType;
1908
4.17k
1909
4.17k
  llvm::Value *numElements
1910
4.17k
    = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1911
4.17k
1912
  // If we had any VLA dimensions, factor them in.
1913
4.17k
  if (numVLAElements)
1914
4
    numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1915
4.17k
1916
4.17k
  return numElements;
1917
4.78k
}
1918
1919
2.14k
CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
1920
2.14k
  const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1921
2.14k
  assert(vla && "type was not a variable array type!");
1922
2.14k
  return getVLASize(vla);
1923
2.14k
}
1924
1925
CodeGenFunction::VlaSizePair
1926
5.73k
CodeGenFunction::getVLASize(const VariableArrayType *type) {
1927
  // The number of elements so far; always size_t.
1928
5.73k
  llvm::Value *numElements = nullptr;
1929
5.73k
1930
5.73k
  QualType elementType;
1931
7.27k
  do {
1932
7.27k
    elementType = type->getElementType();
1933
7.27k
    llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1934
7.27k
    assert(vlaSize && "no size for VLA!");
1935
7.27k
    assert(vlaSize->getType() == SizeTy);
1936
7.27k
1937
7.27k
    if (!numElements) {
1938
5.73k
      numElements = vlaSize;
1939
1.53k
    } else {
1940
      // It's undefined behavior if this wraps around, so mark it that way.
1941
      // FIXME: Teach -fsanitize=undefined to trap this.
1942
1.53k
      numElements = Builder.CreateNUWMul(numElements, vlaSize);
1943
1.53k
    }
1944
7.27k
  } while ((type = getContext().getAsVariableArrayType(elementType)));
1945
5.73k
1946
5.73k
  return { numElements, elementType };
1947
5.73k
}
1948
1949
CodeGenFunction::VlaSizePair
1950
3.17k
CodeGenFunction::getVLAElements1D(QualType type) {
1951
3.17k
  const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1952
3.17k
  assert(vla && "type was not a variable array type!");
1953
3.17k
  return getVLAElements1D(vla);
1954
3.17k
}
1955
1956
CodeGenFunction::VlaSizePair
1957
3.17k
CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
1958
3.17k
  llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
1959
3.17k
  assert(VlaSize && "no size for VLA!");
1960
3.17k
  assert(VlaSize->getType() == SizeTy);
1961
3.17k
  return { VlaSize, Vla->getElementType() };
1962
3.17k
}
1963
1964
2.83k
void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
1965
2.83k
  assert(type->isVariablyModifiedType() &&
1966
2.83k
         "Must pass variably modified type to EmitVLASizes!");
1967
2.83k
1968
2.83k
  EnsureInsertPoint();
1969
2.83k
1970
  // We're going to walk down into the type and look for VLA
1971
  // expressions.
1972
4.03k
  do {
1973
4.03k
    assert(type->isVariablyModifiedType());
1974
4.03k
1975
4.03k
    const Type *ty = type.getTypePtr();
1976
4.03k
    switch (ty->getTypeClass()) {
1977
0
1978
0
#define TYPE(Class, Base)
1979
0
#define ABSTRACT_TYPE(Class, Base)
1980
0
#define NON_CANONICAL_TYPE(Class, Base)
1981
0
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
1982
0
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1983
0
#include "clang/AST/TypeNodes.inc"
1984
0
      llvm_unreachable("unexpected dependent type!");
1985
0
1986
    // These types are never variably-modified.
1987
0
    case Type::Builtin:
1988
0
    case Type::Complex:
1989
0
    case Type::Vector:
1990
0
    case Type::ExtVector:
1991
0
    case Type::ConstantMatrix:
1992
0
    case Type::Record:
1993
0
    case Type::Enum:
1994
0
    case Type::Elaborated:
1995
0
    case Type::TemplateSpecialization:
1996
0
    case Type::ObjCTypeParam:
1997
0
    case Type::ObjCObject:
1998
0
    case Type::ObjCInterface:
1999
0
    case Type::ObjCObjectPointer:
2000
0
    case Type::ExtInt:
2001
0
      llvm_unreachable("type class is never variably-modified!");
2002
0
2003
0
    case Type::Adjusted:
2004
0
      type = cast<AdjustedType>(ty)->getAdjustedType();
2005
0
      break;
2006
0
2007
70
    case Type::Decayed:
2008
70
      type = cast<DecayedType>(ty)->getPointeeType();
2009
70
      break;
2010
0
2011
90
    case Type::Pointer:
2012
90
      type = cast<PointerType>(ty)->getPointeeType();
2013
90
      break;
2014
0
2015
0
    case Type::BlockPointer:
2016
0
      type = cast<BlockPointerType>(ty)->getPointeeType();
2017
0
      break;
2018
0
2019
3
    case Type::LValueReference:
2020
3
    case Type::RValueReference:
2021
3
      type = cast<ReferenceType>(ty)->getPointeeType();
2022
3
      break;
2023
3
2024
0
    case Type::MemberPointer:
2025
0
      type = cast<MemberPointerType>(ty)->getPointeeType();
2026
0
      break;
2027
3
2028
12
    case Type::ConstantArray:
2029
12
    case Type::IncompleteArray:
2030
      // Losing element qualification here is fine.
2031
12
      type = cast<ArrayType>(ty)->getElementType();
2032
12
      break;
2033
12
2034
3.77k
    case Type::VariableArray: {
2035
      // Losing element qualification here is fine.
2036
3.77k
      const VariableArrayType *vat = cast<VariableArrayType>(ty);
2037
3.77k
2038
      // Unknown size indication requires no size computation.
2039
      // Otherwise, evaluate and record it.
2040
3.77k
      if (const Expr *size = vat->getSizeExpr()) {
2041
        // It's possible that we might have emitted this already,
2042
        // e.g. with a typedef and a pointer to it.
2043
3.77k
        llvm::Value *&entry = VLASizeMap[size];
2044
3.77k
        if (!entry) {
2045
3.41k
          llvm::Value *Size = EmitScalarExpr(size);
2046
3.41k
2047
          // C11 6.7.6.2p5:
2048
          //   If the size is an expression that is not an integer constant
2049
          //   expression [...] each time it is evaluated it shall have a value
2050
          //   greater than zero.
2051
3.41k
          if (SanOpts.has(SanitizerKind::VLABound) &&
2052
2
              size->getType()->isSignedIntegerType()) {
2053
2
            SanitizerScope SanScope(this);
2054
2
            llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
2055
2
            llvm::Constant *StaticArgs[] = {
2056
2
                EmitCheckSourceLocation(size->getBeginLoc()),
2057
2
                EmitCheckTypeDescriptor(size->getType())};
2058
2
            EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
2059
2
                                     SanitizerKind::VLABound),
2060
2
                      SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
2061
2
          }
2062
3.41k
2063
          // Always zexting here would be wrong if it weren't
2064
          // undefined behavior to have a negative bound.
2065
3.41k
          entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
2066
3.41k
        }
2067
3.77k
      }
2068
3.77k
      type = vat->getElementType();
2069
3.77k
      break;
2070
12
    }
2071
12
2072
1
    case Type::FunctionProto:
2073
1
    case Type::FunctionNoProto:
2074
1
      type = cast<FunctionType>(ty)->getReturnType();
2075
1
      break;
2076
1
2077
44
    case Type::Paren:
2078
44
    case Type::TypeOf:
2079
44
    case Type::UnaryTransform:
2080
44
    case Type::Attributed:
2081
44
    case Type::SubstTemplateTypeParm:
2082
44
    case Type::MacroQualified:
2083
      // Keep walking after single level desugaring.
2084
44
      type = type.getSingleStepDesugaredType(getContext());
2085
44
      break;
2086
44
2087
28
    case Type::Typedef:
2088
28
    case Type::Decltype:
2089
28
    case Type::Auto:
2090
28
    case Type::DeducedTemplateSpecialization:
2091
      // Stop walking: nothing to do.
2092
28
      return;
2093
28
2094
13
    case Type::TypeOfExpr:
2095
      // Stop walking: emit typeof expression.
2096
13
      EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2097
13
      return;
2098
28
2099
1
    case Type::Atomic:
2100
1
      type = cast<AtomicType>(ty)->getValueType();
2101
1
      break;
2102
28
2103
0
    case Type::Pipe:
2104
0
      type = cast<PipeType>(ty)->getElementType();
2105
0
      break;
2106
3.99k
    }
2107
3.99k
  } while (type->isVariablyModifiedType());
2108
2.83k
}
2109
2110
1.12k
Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2111
1.12k
  if (getContext().getBuiltinVaListType()->isArrayType())
2112
478
    return EmitPointerWithAlignment(E);
2113
651
  return EmitLValue(E).getAddress(*this);
2114
651
}
2115
2116
29
Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2117
29
  return EmitLValue(E).getAddress(*this);
2118
29
}
2119
2120
void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2121
7.43k
                                              const APValue &Init) {
2122
7.43k
  assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2123
7.43k
  if (CGDebugInfo *Dbg = getDebugInfo())
2124
5.02k
    if (CGM.getCodeGenOpts().hasReducedDebugInfo())
2125
5.01k
      Dbg->EmitGlobalVariable(E->getDecl(), Init);
2126
7.43k
}
2127
2128
CodeGenFunction::PeepholeProtection
2129
1.44k
CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2130
  // At the moment, the only aggressive peephole we do in IR gen
2131
  // is trunc(zext) folding, but if we add more, we can easily
2132
  // extend this protection.
2133
1.44k
2134
1.44k
  if (!rvalue.isScalar()) 
return PeepholeProtection()55
;
2135
1.39k
  llvm::Value *value = rvalue.getScalarVal();
2136
1.39k
  if (!isa<llvm::ZExtInst>(value)) 
return PeepholeProtection()1.32k
;
2137
67
2138
  // Just make an extra bitcast.
2139
67
  assert(HaveInsertPoint());
2140
67
  llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2141
67
                                                  Builder.GetInsertBlock());
2142
67
2143
67
  PeepholeProtection protection;
2144
67
  protection.Inst = inst;
2145
67
  return protection;
2146
67
}
2147
2148
1.44k
void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2149
1.44k
  if (!protection.Inst) 
return1.38k
;
2150
67
2151
  // In theory, we could try to duplicate the peepholes now, but whatever.
2152
67
  protection.Inst->eraseFromParent();
2153
67
}
2154
2155
void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2156
                                              QualType Ty, SourceLocation Loc,
2157
                                              SourceLocation AssumptionLoc,
2158
                                              llvm::Value *Alignment,
2159
445
                                              llvm::Value *OffsetValue) {
2160
445
  if (Alignment->getType() != IntPtrTy)
2161
221
    Alignment =
2162
221
        Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
2163
445
  if (OffsetValue && 
OffsetValue->getType() != IntPtrTy16
)
2164
5
    OffsetValue =
2165
5
        Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
2166
445
  llvm::Value *TheCheck = nullptr;
2167
445
  if (SanOpts.has(SanitizerKind::Alignment)) {
2168
33
    llvm::Value *PtrIntValue =
2169
33
        Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
2170
33
2171
33
    if (OffsetValue) {
2172
9
      bool IsOffsetZero = false;
2173
9
      if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
2174
6
        IsOffsetZero = CI->isZero();
2175
9
2176
9
      if (!IsOffsetZero)
2177
9
        PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
2178
9
    }
2179
33
2180
33
    llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
2181
33
    llvm::Value *Mask =
2182
33
        Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
2183
33
    llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
2184
33
    TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
2185
33
  }
2186
445
  llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2187
445
      CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2188
445
2189
445
  if (!SanOpts.has(SanitizerKind::Alignment))
2190
412
    return;
2191
33
  emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2192
33
                               OffsetValue, TheCheck, Assumption);
2193
33
}
2194
2195
void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2196
                                              const Expr *E,
2197
                                              SourceLocation AssumptionLoc,
2198
                                              llvm::Value *Alignment,
2199
418
                                              llvm::Value *OffsetValue) {
2200
418
  if (auto *CE = dyn_cast<CastExpr>(E))
2201
168
    E = CE->getSubExprAsWritten();
2202
418
  QualType Ty = E->getType();
2203
418
  SourceLocation Loc = E->getExprLoc();
2204
418
2205
418
  emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2206
418
                          OffsetValue);
2207
418
}
2208
2209
llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2210
                                                 llvm::Value *AnnotatedVal,
2211
                                                 StringRef AnnotationStr,
2212
22
                                                 SourceLocation Location) {
2213
22
  llvm::Value *Args[4] = {
2214
22
    AnnotatedVal,
2215
22
    Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
2216
22
    Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
2217
22
    CGM.EmitAnnotationLineNo(Location)
2218
22
  };
2219
22
  return Builder.CreateCall(AnnotationFn, Args);
2220
22
}
2221
2222
7
void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2223
7
  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2224
  // FIXME We create a new bitcast for every annotation because that's what
2225
  // llvm-gcc was doing.
2226
7
  for (const auto *I : D->specific_attrs<AnnotateAttr>())
2227
11
    EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
2228
11
                       Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2229
11
                       I->getAnnotation(), D->getLocation());
2230
7
}
2231
2232
Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2233
2
                                              Address Addr) {
2234
2
  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2235
2
  llvm::Value *V = Addr.getPointer();
2236
2
  llvm::Type *VTy = V->getType();
2237
2
  llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2238
2
                                    CGM.Int8PtrTy);
2239
2
2240
4
  for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2241
    // FIXME Always emit the cast inst so we can differentiate between
2242
    // annotation on the first field of a struct and annotation on the struct
2243
    // itself.
2244
4
    if (VTy != CGM.Int8PtrTy)
2245
4
      V = Builder.CreateBitCast(V, CGM.Int8PtrTy);
2246
4
    V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
2247
4
    V = Builder.CreateBitCast(V, VTy);
2248
4
  }
2249
2
2250
2
  return Address(V, Addr.getAlignment());
2251
2
}
2252
2253
63.9k
CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2254
2255
CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2256
52.5k
    : CGF(CGF) {
2257
52.5k
  assert(!CGF->IsSanitizerScope);
2258
52.5k
  CGF->IsSanitizerScope = true;
2259
52.5k
}
2260
2261
52.5k
CodeGenFunction::SanitizerScope::~SanitizerScope() {
2262
52.5k
  CGF->IsSanitizerScope = false;
2263
52.5k
}
2264
2265
void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2266
                                   const llvm::Twine &Name,
2267
                                   llvm::BasicBlock *BB,
2268
4.89M
                                   llvm::BasicBlock::iterator InsertPt) const {
2269
4.89M
  LoopStack.InsertHelper(I);
2270
4.89M
  if (IsSanitizerScope)
2271
15.2k
    CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
2272
4.89M
}
2273
2274
void CGBuilderInserter::InsertHelper(
2275
    llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2276
4.89M
    llvm::BasicBlock::iterator InsertPt) const {
2277
4.89M
  llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2278
4.89M
  if (CGF)
2279
4.89M
    CGF->InsertHelper(I, Name, BB, InsertPt);
2280
4.89M
}
2281
2282
static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
2283
                                CodeGenModule &CGM, const FunctionDecl *FD,
2284
30.0k
                                std::string &FirstMissing) {
2285
  // If there aren't any required features listed then go ahead and return.
2286
30.0k
  if (ReqFeatures.empty())
2287
0
    return false;
2288
30.0k
2289
  // Now build up the set of caller features and verify that all the required
2290
  // features are there.
2291
30.0k
  llvm::StringMap<bool> CallerFeatureMap;
2292
30.0k
  CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2293
30.0k
2294
  // If we have at least one of the features in the feature list return
2295
  // true, otherwise return false.
2296
30.0k
  return std::all_of(
2297
217k
      ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
2298
217k
        SmallVector<StringRef, 1> OrFeatures;
2299
217k
        Feature.split(OrFeatures, '|');
2300
225k
        return llvm::any_of(OrFeatures, [&](StringRef Feature) {
2301
225k
          if (!CallerFeatureMap.lookup(Feature)) {
2302
7.87k
            FirstMissing = Feature.str();
2303
7.87k
            return false;
2304
7.87k
          }
2305
217k
          return true;
2306
217k
        });
2307
217k
      });
2308
30.0k
}
2309
2310
// Emits an error if we don't have a valid set of target features for the
2311
// called function.
2312
void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2313
51.9k
                                          const FunctionDecl *TargetDecl) {
2314
51.9k
  return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2315
51.9k
}
2316
2317
// Emits an error if we don't have a valid set of target features for the
2318
// called function.
2319
void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
2320
64.0k
                                          const FunctionDecl *TargetDecl) {
2321
  // Early exit if this is an indirect call.
2322
64.0k
  if (!TargetDecl)
2323
0
    return;
2324
64.0k
2325
  // Get the current enclosing function if it exists. If it doesn't
2326
  // we can't check the target features anyhow.
2327
64.0k
  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2328
64.0k
  if (!FD)
2329
0
    return;
2330
64.0k
2331
  // Grab the required features for the call. For a builtin this is listed in
2332
  // the td file with the default cpu, for an always_inline function this is any
2333
  // listed cpu and any listed features.
2334
64.0k
  unsigned BuiltinID = TargetDecl->getBuiltinID();
2335
64.0k
  std::string MissingFeature;
2336
64.0k
  if (BuiltinID) {
2337
51.9k
    SmallVector<StringRef, 1> ReqFeatures;
2338
51.9k
    const char *FeatureList =
2339
51.9k
        CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2340
    // Return if the builtin doesn't have any required features.
2341
51.9k
    if (!FeatureList || 
StringRef(FeatureList) == ""23.1k
)
2342
33.9k
      return;
2343
18.0k
    StringRef(FeatureList).split(ReqFeatures, ',');
2344
18.0k
    if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2345
543
      CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2346
543
          << TargetDecl->getDeclName()
2347
543
          << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2348
18.0k
2349
12.0k
  } else if (!TargetDecl->isMultiVersion() &&
2350
12.0k
             TargetDecl->hasAttr<TargetAttr>()) {
2351
    // Get the required features for the callee.
2352
12.0k
2353
12.0k
    const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2354
12.0k
    ParsedTargetAttr ParsedAttr =
2355
12.0k
        CGM.getContext().filterFunctionTargetAttrs(TD);
2356
12.0k
2357
12.0k
    SmallVector<StringRef, 1> ReqFeatures;
2358
12.0k
    llvm::StringMap<bool> CalleeFeatureMap;
2359
12.0k
    CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2360
12.0k
2361
13.3k
    for (const auto &F : ParsedAttr.Features) {
2362
13.3k
      if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2363
13.3k
        ReqFeatures.push_back(StringRef(F).substr(1));
2364
13.3k
    }
2365
12.0k
2366
184k
    for (const auto &F : CalleeFeatureMap) {
2367
      // Only positive features are "required".
2368
184k
      if (F.getValue())
2369
184k
        ReqFeatures.push_back(F.getKey());
2370
184k
    }
2371
12.0k
    if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2372
19
      CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2373
19
          << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2374
12.0k
  }
2375
64.0k
}
2376
2377
87
void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2378
87
  if (!CGM.getCodeGenOpts().SanitizeStats)
2379
77
    return;
2380
10
2381
10
  llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2382
10
  IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2383
10
  CGM.getSanStats().create(IRB, SSK);
2384
10
}
2385
2386
llvm::Value *
2387
234
CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
2388
234
  llvm::Value *Condition = nullptr;
2389
234
2390
234
  if (!RO.Conditions.Architecture.empty())
2391
84
    Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2392
234
2393
234
  if (!RO.Conditions.Features.empty()) {
2394
94
    llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2395
94
    Condition =
2396
92
        Condition ? 
Builder.CreateAnd(Condition, FeatureCond)2
: FeatureCond;
2397
94
  }
2398
234
  return Condition;
2399
234
}
2400
2401
static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2402
                                             llvm::Function *Resolver,
2403
                                             CGBuilderTy &Builder,
2404
                                             llvm::Function *FuncToReturn,
2405
234
                                             bool SupportsIFunc) {
2406
234
  if (SupportsIFunc) {
2407
118
    Builder.CreateRet(FuncToReturn);
2408
118
    return;
2409
118
  }
2410
116
2411
116
  llvm::SmallVector<llvm::Value *, 10> Args;
2412
116
  llvm::for_each(Resolver->args(),
2413
116
                 [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
2414
116
2415
116
  llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2416
116
  Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2417
116
2418
116
  if (Resolver->getReturnType()->isVoidTy())
2419
29
    Builder.CreateRetVoid();
2420
87
  else
2421
87
    Builder.CreateRet(Result);
2422
116
}
2423
2424
void CodeGenFunction::EmitMultiVersionResolver(
2425
68
    llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2426
68
  assert(getContext().getTargetInfo().getTriple().isX86() &&
2427
68
         "Only implemented for x86 targets");
2428
68
2429
68
  bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2430
68
2431
  // Main function's basic block.
2432
68
  llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2433
68
  Builder.SetInsertPoint(CurBlock);
2434
68
  EmitX86CpuInit();
2435
68
2436
234
  for (const MultiVersionResolverOption &RO : Options) {
2437
234
    Builder.SetInsertPoint(CurBlock);
2438
234
    llvm::Value *Condition = FormResolverCondition(RO);
2439
234
2440
    // The 'default' or 'generic' case.
2441
234
    if (!Condition) {
2442
58
      assert(&RO == Options.end() - 1 &&
2443
58
             "Default or Generic case must be last");
2444
58
      CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2445
58
                                       SupportsIFunc);
2446
58
      return;
2447
58
    }
2448
176
2449
176
    llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2450
176
    CGBuilderTy RetBuilder(*this, RetBlock);
2451
176
    CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2452
176
                                     SupportsIFunc);
2453
176
    CurBlock = createBasicBlock("resolver_else", Resolver);
2454
176
    Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2455
176
  }
2456
68
2457
  // If no generic/default, emit an unreachable.
2458
10
  Builder.SetInsertPoint(CurBlock);
2459
10
  llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2460
10
  TrapCall->setDoesNotReturn();
2461
10
  TrapCall->setDoesNotThrow();
2462
10
  Builder.CreateUnreachable();
2463
10
  Builder.ClearInsertionPoint();
2464
10
}
2465
2466
// Loc - where the diagnostic will point, where in the source code this
2467
//  alignment has failed.
2468
// SecondaryLoc - if present (will be present if sufficiently different from
2469
//  Loc), the diagnostic will additionally point a "Note:" to this location.
2470
//  It should be the location where the __attribute__((assume_aligned))
2471
//  was written e.g.
2472
void CodeGenFunction::emitAlignmentAssumptionCheck(
2473
    llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2474
    SourceLocation SecondaryLoc, llvm::Value *Alignment,
2475
    llvm::Value *OffsetValue, llvm::Value *TheCheck,
2476
33
    llvm::Instruction *Assumption) {
2477
33
  assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2478
33
         cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
2479
33
             llvm::Intrinsic::getDeclaration(
2480
33
                 Builder.GetInsertBlock()->getParent()->getParent(),
2481
33
                 llvm::Intrinsic::assume) &&
2482
33
         "Assumption should be a call to llvm.assume().");
2483
33
  assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2484
33
         "Assumption should be the last instruction of the basic block, "
2485
33
         "since the basic block is still being generated.");
2486
33
2487
33
  if (!SanOpts.has(SanitizerKind::Alignment))
2488
0
    return;
2489
33
2490
  // Don't check pointers to volatile data. The behavior here is implementation-
2491
  // defined.
2492
33
  if (Ty->getPointeeType().isVolatileQualified())
2493
1
    return;
2494
32
2495
  // We need to temorairly remove the assumption so we can insert the
2496
  // sanitizer check before it, else the check will be dropped by optimizations.
2497
32
  Assumption->removeFromParent();
2498
32
2499
32
  {
2500
32
    SanitizerScope SanScope(this);
2501
32
2502
32
    if (!OffsetValue)
2503
23
      OffsetValue = Builder.getInt1(0); // no offset.
2504
32
2505
32
    llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2506
32
                                    EmitCheckSourceLocation(SecondaryLoc),
2507
32
                                    EmitCheckTypeDescriptor(Ty)};
2508
32
    llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2509
32
                                  EmitCheckValue(Alignment),
2510
32
                                  EmitCheckValue(OffsetValue)};
2511
32
    EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2512
32
              SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2513
32
  }
2514
32
2515
  // We are now in the (new, empty) "cont" basic block.
2516
  // Reintroduce the assumption.
2517
32
  Builder.Insert(Assumption);
2518
  // FIXME: Assumption still has it's original basic block as it's Parent.
2519
32
}
2520
2521
51.5k
llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
2522
51.5k
  if (CGDebugInfo *DI = getDebugInfo())
2523
16.5k
    return DI->SourceLocToDebugLoc(Location);
2524
34.9k
2525
34.9k
  return llvm::DebugLoc();
2526
34.9k
}