Coverage Report

Created: 2022-07-16 07:03

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This coordinates the per-function state used while generating code.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CodeGenFunction.h"
14
#include "CGBlocks.h"
15
#include "CGCUDARuntime.h"
16
#include "CGCXXABI.h"
17
#include "CGCleanup.h"
18
#include "CGDebugInfo.h"
19
#include "CGOpenMPRuntime.h"
20
#include "CodeGenModule.h"
21
#include "CodeGenPGO.h"
22
#include "TargetInfo.h"
23
#include "clang/AST/ASTContext.h"
24
#include "clang/AST/ASTLambda.h"
25
#include "clang/AST/Attr.h"
26
#include "clang/AST/Decl.h"
27
#include "clang/AST/DeclCXX.h"
28
#include "clang/AST/Expr.h"
29
#include "clang/AST/StmtCXX.h"
30
#include "clang/AST/StmtObjC.h"
31
#include "clang/Basic/Builtins.h"
32
#include "clang/Basic/CodeGenOptions.h"
33
#include "clang/Basic/TargetInfo.h"
34
#include "clang/CodeGen/CGFunctionInfo.h"
35
#include "clang/Frontend/FrontendDiagnostic.h"
36
#include "llvm/ADT/ArrayRef.h"
37
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
38
#include "llvm/IR/DataLayout.h"
39
#include "llvm/IR/Dominators.h"
40
#include "llvm/IR/FPEnv.h"
41
#include "llvm/IR/IntrinsicInst.h"
42
#include "llvm/IR/Intrinsics.h"
43
#include "llvm/IR/MDBuilder.h"
44
#include "llvm/IR/Operator.h"
45
#include "llvm/Support/CRC.h"
46
#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
47
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
48
49
using namespace clang;
50
using namespace CodeGen;
51
52
/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
53
/// markers.
54
static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
55
315k
                                      const LangOptions &LangOpts) {
56
315k
  if (CGOpts.DisableLifetimeMarkers)
57
7
    return false;
58
59
  // Sanitizers may use markers.
60
315k
  if (CGOpts.SanitizeAddressUseAfterScope ||
61
315k
      
LangOpts.Sanitize.has(SanitizerKind::HWAddress)314k
||
62
315k
      
LangOpts.Sanitize.has(SanitizerKind::Memory)314k
)
63
937
    return true;
64
65
  // For now, only in optimized builds.
66
314k
  return CGOpts.OptimizationLevel != 0;
67
315k
}
68
69
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
70
    : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
71
      Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
72
              CGBuilderInserterTy(this)),
73
      SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()),
74
      DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm),
75
      ShouldEmitLifetimeMarkers(
76
315k
          shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
77
315k
  if (!suppressNewContext)
78
288k
    CGM.getCXXABI().getMangleContext().startNewFunction();
79
315k
  EHStack.setCGF(this);
80
81
315k
  SetFastMathFlags(CurFPFeatures);
82
315k
}
83
84
315k
CodeGenFunction::~CodeGenFunction() {
85
315k
  assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
86
87
315k
  if (getLangOpts().OpenMP && 
CurFn70.4k
)
88
70.4k
    CGM.getOpenMPRuntime().functionFinished(*this);
89
90
  // If we have an OpenMPIRBuilder we want to finalize functions (incl.
91
  // outlining etc) at some point. Doing it once the function codegen is done
92
  // seems to be a reasonable spot. We do it here, as opposed to the deletion
93
  // time of the CodeGenModule, because we have to ensure the IR has not yet
94
  // been "emitted" to the outside, thus, modifications are still sensible.
95
315k
  if (CGM.getLangOpts().OpenMPIRBuilder && 
CurFn477
)
96
477
    CGM.getOpenMPRuntime().getOMPBuilder().finalize(CurFn);
97
315k
}
98
99
// Map the LangOption for exception behavior into
100
// the corresponding enum in the IR.
101
llvm::fp::ExceptionBehavior
102
316k
clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) {
103
104
316k
  switch (Kind) {
105
312k
  case LangOptions::FPE_Ignore:  return llvm::fp::ebIgnore;
106
754
  case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
107
2.86k
  case LangOptions::FPE_Strict:  return llvm::fp::ebStrict;
108
0
  default:
109
0
    llvm_unreachable("Unsupported FP Exception Behavior");
110
316k
  }
111
316k
}
112
113
317k
void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
114
317k
  llvm::FastMathFlags FMF;
115
317k
  FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
116
317k
  FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
117
317k
  FMF.setNoInfs(FPFeatures.getNoHonorInfs());
118
317k
  FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
119
317k
  FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
120
317k
  FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
121
317k
  FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
122
317k
  Builder.setFastMathFlags(FMF);
123
317k
}
124
125
CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
126
                                                  const Expr *E)
127
199k
    : CGF(CGF) {
128
199k
  ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
129
199k
}
130
131
CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
132
                                                  FPOptions FPFeatures)
133
33.5k
    : CGF(CGF) {
134
33.5k
  ConstructorHelper(FPFeatures);
135
33.5k
}
136
137
233k
void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
138
233k
  OldFPFeatures = CGF.CurFPFeatures;
139
233k
  CGF.CurFPFeatures = FPFeatures;
140
141
233k
  OldExcept = CGF.Builder.getDefaultConstrainedExcept();
142
233k
  OldRounding = CGF.Builder.getDefaultConstrainedRounding();
143
144
233k
  if (OldFPFeatures == FPFeatures)
145
230k
    return;
146
147
2.49k
  FMFGuard.emplace(CGF.Builder);
148
149
2.49k
  llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode();
150
2.49k
  CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
151
2.49k
  auto NewExceptionBehavior =
152
2.49k
      ToConstrainedExceptMD(static_cast<LangOptions::FPExceptionModeKind>(
153
2.49k
          FPFeatures.getExceptionMode()));
154
2.49k
  CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
155
156
2.49k
  CGF.SetFastMathFlags(FPFeatures);
157
158
2.49k
  assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
159
2.49k
          isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
160
2.49k
          isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
161
2.49k
          (NewExceptionBehavior == llvm::fp::ebIgnore &&
162
2.49k
           NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
163
2.49k
         "FPConstrained should be enabled on entire function");
164
165
9.98k
  auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
166
9.98k
    auto OldValue =
167
9.98k
        CGF.CurFn->getFnAttribute(Name).getValueAsBool();
168
9.98k
    auto NewValue = OldValue & Value;
169
9.98k
    if (OldValue != NewValue)
170
85
      CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
171
9.98k
  };
172
2.49k
  mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
173
2.49k
  mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
174
2.49k
  mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
175
2.49k
  mergeFnAttrValue("unsafe-fp-math", FPFeatures.getAllowFPReassociate() &&
176
2.49k
                                         
FPFeatures.getAllowReciprocal()236
&&
177
2.49k
                                         
FPFeatures.getAllowApproxFunc()188
&&
178
2.49k
                                         
FPFeatures.getNoSignedZero()188
);
179
2.49k
}
180
181
233k
CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
182
233k
  CGF.CurFPFeatures = OldFPFeatures;
183
233k
  CGF.Builder.setDefaultConstrainedExcept(OldExcept);
184
233k
  CGF.Builder.setDefaultConstrainedRounding(OldRounding);
185
233k
}
186
187
41.3k
LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
188
41.3k
  LValueBaseInfo BaseInfo;
189
41.3k
  TBAAAccessInfo TBAAInfo;
190
41.3k
  CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
191
41.3k
  Address Addr(V, ConvertTypeForMem(T), Alignment);
192
41.3k
  return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
193
41.3k
}
194
195
/// Given a value of type T* that may not be to a complete object,
196
/// construct an l-value with the natural pointee alignment of T.
197
LValue
198
64.3k
CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
199
64.3k
  LValueBaseInfo BaseInfo;
200
64.3k
  TBAAAccessInfo TBAAInfo;
201
64.3k
  CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
202
64.3k
                                                /* forPointeeType= */ true);
203
64.3k
  Address Addr(V, ConvertTypeForMem(T), Align);
204
64.3k
  return MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
205
64.3k
}
206
207
208
1.67M
llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
209
1.67M
  return CGM.getTypes().ConvertTypeForMem(T);
210
1.67M
}
211
212
3.38M
llvm::Type *CodeGenFunction::ConvertType(QualType T) {
213
3.38M
  return CGM.getTypes().ConvertType(T);
214
3.38M
}
215
216
6.26M
TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
217
6.26M
  type = type.getCanonicalType();
218
6.26M
  while (true) {
219
6.26M
    switch (type->getTypeClass()) {
220
0
#define TYPE(name, parent)
221
0
#define ABSTRACT_TYPE(name, parent)
222
0
#define NON_CANONICAL_TYPE(name, parent) case Type::name:
223
0
#define DEPENDENT_TYPE(name, parent) case Type::name:
224
0
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
225
0
#include "clang/AST/TypeNodes.inc"
226
0
      llvm_unreachable("non-canonical or dependent type in IR-generation");
227
228
0
    case Type::Auto:
229
0
    case Type::DeducedTemplateSpecialization:
230
0
      llvm_unreachable("undeduced type in IR-generation");
231
232
    // Various scalar types.
233
3.19M
    case Type::Builtin:
234
5.13M
    case Type::Pointer:
235
5.13M
    case Type::BlockPointer:
236
5.30M
    case Type::LValueReference:
237
5.34M
    case Type::RValueReference:
238
5.35M
    case Type::MemberPointer:
239
5.86M
    case Type::Vector:
240
5.86M
    case Type::ExtVector:
241
5.86M
    case Type::ConstantMatrix:
242
5.86M
    case Type::FunctionProto:
243
5.86M
    case Type::FunctionNoProto:
244
5.87M
    case Type::Enum:
245
6.01M
    case Type::ObjCObjectPointer:
246
6.01M
    case Type::Pipe:
247
6.01M
    case Type::BitInt:
248
6.01M
      return TEK_Scalar;
249
250
    // Complexes.
251
7.52k
    case Type::Complex:
252
7.52k
      return TEK_Complex;
253
254
    // Arrays, records, and Objective-C objects.
255
5.62k
    case Type::ConstantArray:
256
5.62k
    case Type::IncompleteArray:
257
5.64k
    case Type::VariableArray:
258
238k
    case Type::Record:
259
238k
    case Type::ObjCObject:
260
238k
    case Type::ObjCInterface:
261
238k
      return TEK_Aggregate;
262
263
    // We operate on atomic values according to their underlying type.
264
180
    case Type::Atomic:
265
180
      type = cast<AtomicType>(type)->getValueType();
266
180
      continue;
267
6.26M
    }
268
0
    llvm_unreachable("unknown type kind!");
269
0
  }
270
6.26M
}
271
272
314k
llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
273
  // For cleanliness, we try to avoid emitting the return block for
274
  // simple cases.
275
314k
  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
276
277
314k
  if (CurBB) {
278
162k
    assert(!CurBB->getTerminator() && "Unexpected terminated block.");
279
280
    // We have a valid insert point, reuse it if it is empty or there are no
281
    // explicit jumps to the return block.
282
162k
    if (CurBB->empty() || 
ReturnBlock.getBlock()->use_empty()134k
) {
283
162k
      ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
284
162k
      delete ReturnBlock.getBlock();
285
162k
      ReturnBlock = JumpDest();
286
162k
    } else
287
36
      EmitBlock(ReturnBlock.getBlock());
288
162k
    return llvm::DebugLoc();
289
162k
  }
290
291
  // Otherwise, if the return block is the target of a single direct
292
  // branch then we can just put the code in that block instead. This
293
  // cleans up functions which started with a unified return block.
294
151k
  if (ReturnBlock.getBlock()->hasOneUse()) {
295
148k
    llvm::BranchInst *BI =
296
148k
      dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
297
148k
    if (BI && 
BI->isUnconditional()148k
&&
298
148k
        
BI->getSuccessor(0) == ReturnBlock.getBlock()148k
) {
299
      // Record/return the DebugLoc of the simple 'return' expression to be used
300
      // later by the actual 'ret' instruction.
301
148k
      llvm::DebugLoc Loc = BI->getDebugLoc();
302
148k
      Builder.SetInsertPoint(BI->getParent());
303
148k
      BI->eraseFromParent();
304
148k
      delete ReturnBlock.getBlock();
305
148k
      ReturnBlock = JumpDest();
306
148k
      return Loc;
307
148k
    }
308
148k
  }
309
310
  // FIXME: We are at an unreachable point, there is no reason to emit the block
311
  // unless it has uses. However, we still need a place to put the debug
312
  // region.end for now.
313
314
3.48k
  EmitBlock(ReturnBlock.getBlock());
315
3.48k
  return llvm::DebugLoc();
316
151k
}
317
318
1.25M
static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
319
1.25M
  if (!BB) 
return1.24M
;
320
8.90k
  if (!BB->use_empty())
321
8.89k
    return CGF.CurFn->getBasicBlockList().push_back(BB);
322
8
  delete BB;
323
8
}
324
325
314k
void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
326
314k
  assert(BreakContinueStack.empty() &&
327
314k
         "mismatched push/pop in break/continue stack!");
328
329
314k
  bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
330
314k
    && 
NumSimpleReturnExprs == NumReturnExprs15.2k
331
314k
    && 
ReturnBlock.getBlock()->use_empty()13.9k
;
332
  // Usually the return expression is evaluated before the cleanup
333
  // code.  If the function contains only a simple return statement,
334
  // such as a constant, the location before the cleanup code becomes
335
  // the last useful breakpoint in the function, because the simple
336
  // return expression will be evaluated after the cleanup code. To be
337
  // safe, set the debug location for cleanup code to the location of
338
  // the return statement.  Otherwise the cleanup code should be at the
339
  // end of the function's lexical scope.
340
  //
341
  // If there are multiple branches to the return block, the branch
342
  // instructions will get the location of the return statements and
343
  // all will be fine.
344
314k
  if (CGDebugInfo *DI = getDebugInfo()) {
345
98.0k
    if (OnlySimpleReturnStmts)
346
634
      DI->EmitLocation(Builder, LastStopPoint);
347
97.4k
    else
348
97.4k
      DI->EmitLocation(Builder, EndLoc);
349
98.0k
  }
350
351
  // Pop any cleanups that might have been associated with the
352
  // parameters.  Do this in whatever block we're currently in; it's
353
  // important to do this before we enter the return block or return
354
  // edges will be *really* confused.
355
314k
  bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
356
314k
  bool HasOnlyLifetimeMarkers =
357
314k
      HasCleanups && 
EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth)7.01k
;
358
314k
  bool EmitRetDbgLoc = !HasCleanups || 
HasOnlyLifetimeMarkers7.01k
;
359
314k
  if (HasCleanups) {
360
    // Make sure the line table doesn't jump back into the body for
361
    // the ret after it's been at EndLoc.
362
7.01k
    Optional<ApplyDebugLocation> AL;
363
7.01k
    if (CGDebugInfo *DI = getDebugInfo()) {
364
1.76k
      if (OnlySimpleReturnStmts)
365
634
        DI->EmitLocation(Builder, EndLoc);
366
1.13k
      else
367
        // We may not have a valid end location. Try to apply it anyway, and
368
        // fall back to an artificial location if needed.
369
1.13k
        AL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc);
370
1.76k
    }
371
372
7.01k
    PopCleanupBlocks(PrologueCleanupDepth);
373
7.01k
  }
374
375
  // Emit function epilog (to return).
376
314k
  llvm::DebugLoc Loc = EmitReturnBlock();
377
378
314k
  if (ShouldInstrumentFunction()) {
379
11
    if (CGM.getCodeGenOpts().InstrumentFunctions)
380
7
      CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
381
11
    if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
382
2
      CurFn->addFnAttr("instrument-function-exit-inlined",
383
2
                       "__cyg_profile_func_exit");
384
11
  }
385
386
  // Emit debug descriptor for function end.
387
314k
  if (CGDebugInfo *DI = getDebugInfo())
388
98.0k
    DI->EmitFunctionEnd(Builder, CurFn);
389
390
  // Reset the debug location to that of the simple 'return' expression, if any
391
  // rather than that of the end of the function's scope '}'.
392
314k
  ApplyDebugLocation AL(*this, Loc);
393
314k
  EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
394
314k
  EmitEndEHSpec(CurCodeDecl);
395
396
314k
  assert(EHStack.empty() &&
397
314k
         "did not remove all scopes from cleanup stack!");
398
399
  // If someone did an indirect goto, emit the indirect goto block at the end of
400
  // the function.
401
314k
  if (IndirectBranch) {
402
39
    EmitBlock(IndirectBranch->getParent());
403
39
    Builder.ClearInsertionPoint();
404
39
  }
405
406
  // If some of our locals escaped, insert a call to llvm.localescape in the
407
  // entry block.
408
314k
  if (!EscapedLocals.empty()) {
409
    // Invert the map from local to index into a simple vector. There should be
410
    // no holes.
411
38
    SmallVector<llvm::Value *, 4> EscapeArgs;
412
38
    EscapeArgs.resize(EscapedLocals.size());
413
38
    for (auto &Pair : EscapedLocals)
414
46
      EscapeArgs[Pair.second] = Pair.first;
415
38
    llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
416
38
        &CGM.getModule(), llvm::Intrinsic::localescape);
417
38
    CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
418
38
  }
419
420
  // Remove the AllocaInsertPt instruction, which is just a convenience for us.
421
314k
  llvm::Instruction *Ptr = AllocaInsertPt;
422
314k
  AllocaInsertPt = nullptr;
423
314k
  Ptr->eraseFromParent();
424
425
  // PostAllocaInsertPt, if created, was lazily created when it was required,
426
  // remove it now since it was just created for our own convenience.
427
314k
  if (PostAllocaInsertPt) {
428
2.26k
    llvm::Instruction *PostPtr = PostAllocaInsertPt;
429
2.26k
    PostAllocaInsertPt = nullptr;
430
2.26k
    PostPtr->eraseFromParent();
431
2.26k
  }
432
433
  // If someone took the address of a label but never did an indirect goto, we
434
  // made a zero entry PHI node, which is illegal, zap it now.
435
314k
  if (IndirectBranch) {
436
39
    llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
437
39
    if (PN->getNumIncomingValues() == 0) {
438
19
      PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
439
19
      PN->eraseFromParent();
440
19
    }
441
39
  }
442
443
314k
  EmitIfUsed(*this, EHResumeBlock);
444
314k
  EmitIfUsed(*this, TerminateLandingPad);
445
314k
  EmitIfUsed(*this, TerminateHandler);
446
314k
  EmitIfUsed(*this, UnreachableBlock);
447
448
314k
  for (const auto &FuncletAndParent : TerminateFunclets)
449
17
    EmitIfUsed(*this, FuncletAndParent.second);
450
451
314k
  if (CGM.getCodeGenOpts().EmitDeclMetadata)
452
23.3k
    EmitDeclMetadata();
453
454
314k
  for (const auto &R : DeferredReplacements) {
455
107
    if (llvm::Value *Old = R.first) {
456
106
      Old->replaceAllUsesWith(R.second);
457
106
      cast<llvm::Instruction>(Old)->eraseFromParent();
458
106
    }
459
107
  }
460
314k
  DeferredReplacements.clear();
461
462
  // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
463
  // PHIs if the current function is a coroutine. We don't do it for all
464
  // functions as it may result in slight increase in numbers of instructions
465
  // if compiled with no optimizations. We do it for coroutine as the lifetime
466
  // of CleanupDestSlot alloca make correct coroutine frame building very
467
  // difficult.
468
314k
  if (NormalCleanupDest.isValid() && 
isCoroutine()393
) {
469
118
    llvm::DominatorTree DT(*CurFn);
470
118
    llvm::PromoteMemToReg(
471
118
        cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
472
118
    NormalCleanupDest = Address::invalid();
473
118
  }
474
475
  // Scan function arguments for vector width.
476
314k
  for (llvm::Argument &A : CurFn->args())
477
528k
    if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
478
131k
      LargestVectorWidth =
479
131k
          std::max((uint64_t)LargestVectorWidth,
480
131k
                   VT->getPrimitiveSizeInBits().getKnownMinSize());
481
482
  // Update vector width based on return type.
483
314k
  if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
484
58.8k
    LargestVectorWidth =
485
58.8k
        std::max((uint64_t)LargestVectorWidth,
486
58.8k
                 VT->getPrimitiveSizeInBits().getKnownMinSize());
487
488
314k
  if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
489
4
    LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
490
491
  // Add the required-vector-width attribute. This contains the max width from:
492
  // 1. min-vector-width attribute used in the source program.
493
  // 2. Any builtins used that have a vector width specified.
494
  // 3. Values passed in and out of inline assembly.
495
  // 4. Width of vector arguments and return types for this function.
496
  // 5. Width of vector aguments and return types for functions called by this
497
  //    function.
498
314k
  CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
499
500
  // Add vscale_range attribute if appropriate.
501
314k
  Optional<std::pair<unsigned, unsigned>> VScaleRange =
502
314k
      getContext().getTargetInfo().getVScaleRange(getLangOpts());
503
314k
  if (VScaleRange) {
504
30.5k
    CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(
505
30.5k
        getLLVMContext(), VScaleRange->first, VScaleRange->second));
506
30.5k
  }
507
508
  // If we generated an unreachable return block, delete it now.
509
314k
  if (ReturnBlock.isValid() && 
ReturnBlock.getBlock()->use_empty()3.52k
) {
510
1.64k
    Builder.ClearInsertionPoint();
511
1.64k
    ReturnBlock.getBlock()->eraseFromParent();
512
1.64k
  }
513
314k
  if (ReturnValue.isValid()) {
514
151k
    auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
515
151k
    if (RetAlloca && 
RetAlloca->use_empty()147k
) {
516
131k
      RetAlloca->eraseFromParent();
517
131k
      ReturnValue = Address::invalid();
518
131k
    }
519
151k
  }
520
314k
}
521
522
/// ShouldInstrumentFunction - Return true if the current function should be
523
/// instrumented with __cyg_profile_func_* calls
524
628k
bool CodeGenFunction::ShouldInstrumentFunction() {
525
628k
  if (!CGM.getCodeGenOpts().InstrumentFunctions &&
526
628k
      
!CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining628k
&&
527
628k
      
!CGM.getCodeGenOpts().InstrumentFunctionEntryBare628k
)
528
628k
    return false;
529
40
  if (!CurFuncDecl || 
CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()36
)
530
18
    return false;
531
22
  return true;
532
40
}
533
534
314k
bool CodeGenFunction::ShouldSkipSanitizerInstrumentation() {
535
314k
  if (!CurFuncDecl)
536
15.6k
    return false;
537
298k
  return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>();
538
314k
}
539
540
/// ShouldXRayInstrument - Return true if the current function should be
541
/// instrumented with XRay nop sleds.
542
628k
bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
543
628k
  return CGM.getCodeGenOpts().XRayInstrumentFunctions;
544
628k
}
545
546
/// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
547
/// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
548
2
bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
549
2
  return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
550
2
         (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
551
2
          CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
552
1
              XRayInstrKind::Custom);
553
2
}
554
555
2
bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
556
2
  return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
557
2
         (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
558
2
          CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
559
1
              XRayInstrKind::Typed);
560
2
}
561
562
llvm::Value *
563
CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
564
8
                                          llvm::Value *EncodedAddr) {
565
  // Reconstruct the address of the global.
566
8
  auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
567
8
  auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
568
8
  auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
569
8
  auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
570
571
  // Load the original pointer through the global.
572
8
  return Builder.CreateLoad(Address(GOTAddr, Int8PtrTy, getPointerAlign()),
573
8
                            "decoded_addr");
574
8
}
575
576
void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
577
2.44k
                                         llvm::Function *Fn) {
578
2.44k
  if (!FD->hasAttr<OpenCLKernelAttr>() && 
!FD->hasAttr<CUDAGlobalAttr>()1.92k
)
579
1.83k
    return;
580
581
604
  llvm::LLVMContext &Context = getLLVMContext();
582
583
604
  CGM.GenKernelArgMetadata(Fn, FD, this);
584
585
604
  if (!getLangOpts().OpenCL)
586
87
    return;
587
588
517
  if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
589
2
    QualType HintQTy = A->getTypeHint();
590
2
    const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
591
2
    bool IsSignedInteger =
592
2
        HintQTy->isSignedIntegerType() ||
593
2
        
(1
HintEltQTy1
&&
HintEltQTy->getElementType()->isSignedIntegerType()1
);
594
2
    llvm::Metadata *AttrMDArgs[] = {
595
2
        llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
596
2
            CGM.getTypes().ConvertType(A->getTypeHint()))),
597
2
        llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
598
2
            llvm::IntegerType::get(Context, 32),
599
2
            llvm::APInt(32, (uint64_t)(IsSignedInteger ? 
11
:
01
))))};
600
2
    Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
601
2
  }
602
603
517
  if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
604
1
    llvm::Metadata *AttrMDArgs[] = {
605
1
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
606
1
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
607
1
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
608
1
    Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
609
1
  }
610
611
517
  if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
612
7
    llvm::Metadata *AttrMDArgs[] = {
613
7
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
614
7
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
615
7
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
616
7
    Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
617
7
  }
618
619
517
  if (const OpenCLIntelReqdSubGroupSizeAttr *A =
620
517
          FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
621
1
    llvm::Metadata *AttrMDArgs[] = {
622
1
        llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
623
1
    Fn->setMetadata("intel_reqd_sub_group_size",
624
1
                    llvm::MDNode::get(Context, AttrMDArgs));
625
1
  }
626
517
}
627
628
/// Determine whether the function F ends with a return stmt.
629
162k
static bool endsWithReturn(const Decl* F) {
630
162k
  const Stmt *Body = nullptr;
631
162k
  if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
632
121k
    Body = FD->getBody();
633
41.4k
  else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
634
1.25k
    Body = OMD->getBody();
635
636
162k
  if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
637
121k
    auto LastStmt = CS->body_rbegin();
638
121k
    if (LastStmt != CS->body_rend())
639
75.8k
      return isa<ReturnStmt>(*LastStmt);
640
121k
  }
641
86.6k
  return false;
642
162k
}
643
644
337
void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
645
337
  if (SanOpts.has(SanitizerKind::Thread)) {
646
4
    Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
647
4
    Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
648
4
  }
649
337
}
650
651
/// Check if the return value of this function requires sanitization.
652
467k
bool CodeGenFunction::requiresReturnValueCheck() const {
653
467k
  return requiresReturnValueNullabilityCheck() ||
654
467k
         
(467k
SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)467k
&&
CurCodeDecl111
&&
655
467k
          
CurCodeDecl->getAttr<ReturnsNonNullAttr>()111
);
656
467k
}
657
658
35
static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
659
35
  auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
660
35
  if (!MD || 
!MD->getDeclName().getAsIdentifierInfo()3
||
661
35
      
!MD->getDeclName().getAsIdentifierInfo()->isStr("allocate")3
||
662
35
      
(2
MD->getNumParams() != 12
&&
MD->getNumParams() != 21
))
663
33
    return false;
664
665
2
  if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
666
0
    return false;
667
668
2
  if (MD->getNumParams() == 2) {
669
1
    auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
670
1
    if (!PT || !PT->isVoidPointerType() ||
671
1
        !PT->getPointeeType().isConstQualified())
672
0
      return false;
673
1
  }
674
675
2
  return true;
676
2
}
677
678
/// Return the UBSan prologue signature for \p FD if one is available.
679
static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
680
204
                                            const FunctionDecl *FD) {
681
204
  if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
682
99
    if (!MD->isStatic())
683
93
      return nullptr;
684
111
  return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
685
204
}
686
687
void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
688
                                    llvm::Function *Fn,
689
                                    const CGFunctionInfo &FnInfo,
690
                                    const FunctionArgList &Args,
691
                                    SourceLocation Loc,
692
314k
                                    SourceLocation StartLoc) {
693
314k
  assert(!CurFn &&
694
314k
         "Do not use a CodeGenFunction object for more than one function");
695
696
0
  const Decl *D = GD.getDecl();
697
698
314k
  DidCallStackSave = false;
699
314k
  CurCodeDecl = D;
700
314k
  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
701
314k
  if (FD && 
FD->usesSEHTry()269k
)
702
109
    CurSEHParent = FD;
703
314k
  CurFuncDecl = (D ? 
D->getNonClosureContext()305k
:
nullptr9.08k
);
704
314k
  FnRetTy = RetTy;
705
314k
  CurFn = Fn;
706
314k
  CurFnInfo = &FnInfo;
707
314k
  assert(CurFn->isDeclaration() && "Function already has body?");
708
709
  // If this function is ignored for any of the enabled sanitizers,
710
  // disable the sanitizer for the function.
711
314k
  do {
712
314k
#define SANITIZER(NAME, ID)                                                    \
713
846k
  if (
SanOpts.empty()536k
) \
714
846k
    
break310k
; \
715
846k
  
if (225k
SanOpts.has(SanitizerKind::ID)225k
) \
716
225k
    
if (7.34k
CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)7.34k
) \
717
7.34k
      
SanOpts.set(SanitizerKind::ID, false)65
;
718
719
314k
#include "clang/Basic/Sanitizers.def"
720
225k
#undef SANITIZER
721
225k
  } while (
false3.95k
);
722
723
314k
  if (D) {
724
305k
    const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds);
725
305k
    bool NoSanitizeCoverage = false;
726
727
305k
    for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
728
      // Apply the no_sanitize* attributes to SanOpts.
729
539
      SanitizerMask mask = Attr->getMask();
730
539
      SanOpts.Mask &= ~mask;
731
539
      if (mask & SanitizerKind::Address)
732
65
        SanOpts.set(SanitizerKind::KernelAddress, false);
733
539
      if (mask & SanitizerKind::KernelAddress)
734
5
        SanOpts.set(SanitizerKind::Address, false);
735
539
      if (mask & SanitizerKind::HWAddress)
736
5
        SanOpts.set(SanitizerKind::KernelHWAddress, false);
737
539
      if (mask & SanitizerKind::KernelHWAddress)
738
5
        SanOpts.set(SanitizerKind::HWAddress, false);
739
740
      // SanitizeCoverage is not handled by SanOpts.
741
539
      if (Attr->hasCoverage())
742
24
        NoSanitizeCoverage = true;
743
539
    }
744
745
305k
    if (SanitizeBounds && 
!SanOpts.hasOneOf(SanitizerKind::Bounds)130
)
746
6
      Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
747
748
305k
    if (NoSanitizeCoverage && 
CGM.getCodeGenOpts().hasSanitizeCoverage()24
)
749
24
      Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
750
305k
  }
751
752
314k
  if (ShouldSkipSanitizerInstrumentation()) {
753
11
    CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
754
314k
  } else {
755
    // Apply sanitizer attributes to the function.
756
314k
    if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
757
528
      Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
758
314k
    if (SanOpts.hasOneOf(SanitizerKind::HWAddress |
759
314k
                         SanitizerKind::KernelHWAddress))
760
37
      Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
761
314k
    if (SanOpts.has(SanitizerKind::MemtagStack))
762
20
      Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
763
314k
    if (SanOpts.has(SanitizerKind::Thread))
764
85
      Fn->addFnAttr(llvm::Attribute::SanitizeThread);
765
314k
    if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
766
388
      Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
767
314k
  }
768
314k
  if (SanOpts.has(SanitizerKind::SafeStack))
769
20
    Fn->addFnAttr(llvm::Attribute::SafeStack);
770
314k
  if (SanOpts.has(SanitizerKind::ShadowCallStack))
771
3
    Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
772
773
  // Apply fuzzing attribute to the function.
774
314k
  if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
775
2
    Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
776
777
  // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
778
  // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
779
314k
  if (SanOpts.has(SanitizerKind::Thread)) {
780
86
    if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
781
3
      IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
782
3
      if (OMD->getMethodFamily() == OMF_dealloc ||
783
3
          
OMD->getMethodFamily() == OMF_initialize2
||
784
3
          
(1
OMD->getSelector().isUnarySelector()1
&&
II->isStr(".cxx_destruct")1
)) {
785
3
        markAsIgnoreThreadCheckingAtRuntime(Fn);
786
3
      }
787
3
    }
788
86
  }
789
790
  // Ignore unrelated casts in STL allocate() since the allocator must cast
791
  // from void* to T* before object initialization completes. Don't match on the
792
  // namespace because not all allocators are in std::
793
314k
  if (D && 
SanOpts.has(SanitizerKind::CFIUnrelatedCast)305k
) {
794
35
    if (matchesStlAllocatorFn(D, getContext()))
795
2
      SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
796
35
  }
797
798
  // Ignore null checks in coroutine functions since the coroutines passes
799
  // are not aware of how to move the extra UBSan instructions across the split
800
  // coroutine boundaries.
801
314k
  if (D && 
SanOpts.has(SanitizerKind::Null)305k
)
802
248
    if (FD && FD->getBody() &&
803
248
        FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
804
1
      SanOpts.Mask &= ~SanitizerKind::Null;
805
806
  // Apply xray attributes to the function (as a string, for now)
807
314k
  bool AlwaysXRayAttr = false;
808
314k
  if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
809
86
    if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
810
86
            XRayInstrKind::FunctionEntry) ||
811
86
        CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
812
82
            XRayInstrKind::FunctionExit)) {
813
82
      if (XRayAttr->alwaysXRayInstrument() && 
ShouldXRayInstrumentFunction()45
) {
814
38
        Fn->addFnAttr("function-instrument", "xray-always");
815
38
        AlwaysXRayAttr = true;
816
38
      }
817
82
      if (XRayAttr->neverXRayInstrument())
818
37
        Fn->addFnAttr("function-instrument", "xray-never");
819
82
      if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
820
1
        if (ShouldXRayInstrumentFunction())
821
1
          Fn->addFnAttr("xray-log-args",
822
1
                        llvm::utostr(LogArgs->getArgumentCount()));
823
82
    }
824
314k
  } else {
825
314k
    if (ShouldXRayInstrumentFunction() && 
!CGM.imbueXRayAttrs(Fn, Loc)31
)
826
19
      Fn->addFnAttr(
827
19
          "xray-instruction-threshold",
828
19
          llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
829
314k
  }
830
831
314k
  if (ShouldXRayInstrumentFunction()) {
832
103
    if (CGM.getCodeGenOpts().XRayIgnoreLoops)
833
1
      Fn->addFnAttr("xray-ignore-loops");
834
835
103
    if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
836
103
            XRayInstrKind::FunctionExit))
837
6
      Fn->addFnAttr("xray-skip-exit");
838
839
103
    if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
840
103
            XRayInstrKind::FunctionEntry))
841
6
      Fn->addFnAttr("xray-skip-entry");
842
843
103
    auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
844
103
    if (FuncGroups > 1) {
845
15
      auto FuncName = llvm::makeArrayRef<uint8_t>(
846
15
          CurFn->getName().bytes_begin(), CurFn->getName().bytes_end());
847
15
      auto Group = crc32(FuncName) % FuncGroups;
848
15
      if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
849
15
          
!AlwaysXRayAttr10
)
850
8
        Fn->addFnAttr("function-instrument", "xray-never");
851
15
    }
852
103
  }
853
854
314k
  if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone)
855
486
    if (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc))
856
10
      Fn->addFnAttr(llvm::Attribute::NoProfile);
857
858
314k
  unsigned Count, Offset;
859
314k
  if (const auto *Attr =
860
314k
          D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
861
21
    Count = Attr->getCount();
862
21
    Offset = Attr->getOffset();
863
314k
  } else {
864
314k
    Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
865
314k
    Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
866
314k
  }
867
314k
  if (Count && 
Offset <= Count16
) {
868
16
    Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
869
16
    if (Offset)
870
6
      Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
871
16
  }
872
  // Instruct that functions for COFF/CodeView targets should start with a
873
  // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
874
  // backends as they don't need it -- instructions on these architectures are
875
  // always atomically patchable at runtime.
876
314k
  if (CGM.getCodeGenOpts().HotPatch &&
877
314k
      
getContext().getTargetInfo().getTriple().isX86()11
)
878
9
    Fn->addFnAttr("patchable-function", "prologue-short-redirect");
879
880
  // Add no-jump-tables value.
881
314k
  if (CGM.getCodeGenOpts().NoUseJumpTables)
882
1
    Fn->addFnAttr("no-jump-tables", "true");
883
884
  // Add no-inline-line-tables value.
885
314k
  if (CGM.getCodeGenOpts().NoInlineLineTables)
886
4
    Fn->addFnAttr("no-inline-line-tables");
887
888
  // Add profile-sample-accurate value.
889
314k
  if (CGM.getCodeGenOpts().ProfileSampleAccurate)
890
2
    Fn->addFnAttr("profile-sample-accurate");
891
892
314k
  if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
893
20
    Fn->addFnAttr("use-sample-profile");
894
895
314k
  if (D && 
D->hasAttr<CFICanonicalJumpTableAttr>()305k
)
896
2
    Fn->addFnAttr("cfi-canonical-jump-table");
897
898
314k
  if (D && 
D->hasAttr<NoProfileFunctionAttr>()305k
)
899
4
    Fn->addFnAttr(llvm::Attribute::NoProfile);
900
901
314k
  if (D) {
902
    // Function attributes take precedence over command line flags.
903
305k
    if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) {
904
123
      switch (A->getThunkType()) {
905
57
      case FunctionReturnThunksAttr::Kind::Keep:
906
57
        break;
907
66
      case FunctionReturnThunksAttr::Kind::Extern:
908
66
        Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
909
66
        break;
910
123
      }
911
304k
    } else if (CGM.getCodeGenOpts().FunctionReturnThunks)
912
9
      Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
913
305k
  }
914
915
314k
  if (FD && 
(269k
getLangOpts().OpenCL269k
||
916
269k
             
(267k
getLangOpts().HIP267k
&&
getLangOpts().CUDAIsDevice559
))) {
917
    // Add metadata for a kernel function.
918
2.44k
    EmitKernelMetadata(FD, Fn);
919
2.44k
  }
920
921
  // If we are checking function types, emit a function type signature as
922
  // prologue data.
923
314k
  if (FD && 
getLangOpts().CPlusPlus269k
&&
SanOpts.has(SanitizerKind::Function)184k
) {
924
204
    if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
925
      // Remove any (C++17) exception specifications, to allow calling e.g. a
926
      // noexcept function through a non-noexcept pointer.
927
111
      auto ProtoTy = getContext().getFunctionTypeWithExceptionSpec(
928
111
          FD->getType(), EST_None);
929
111
      llvm::Constant *FTRTTIConst =
930
111
          CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
931
111
      llvm::GlobalVariable *FTRTTIProxy =
932
111
          CGM.GetOrCreateRTTIProxyGlobalVariable(FTRTTIConst);
933
111
      llvm::LLVMContext &Ctx = Fn->getContext();
934
111
      llvm::MDBuilder MDB(Ctx);
935
111
      Fn->setMetadata(llvm::LLVMContext::MD_func_sanitize,
936
111
                      MDB.createRTTIPointerPrologue(PrologueSig, FTRTTIProxy));
937
111
      CGM.addCompilerUsedGlobal(FTRTTIProxy);
938
111
    }
939
204
  }
940
941
  // If we're checking nullability, we need to know whether we can check the
942
  // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
943
314k
  if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
944
60
    auto Nullability = FnRetTy->getNullability(getContext());
945
60
    if (Nullability && 
*Nullability == NullabilityKind::NonNull17
) {
946
15
      if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
947
15
            
CurCodeDecl1
&&
CurCodeDecl->getAttr<ReturnsNonNullAttr>()1
))
948
14
        RetValNullabilityPrecondition =
949
14
            llvm::ConstantInt::getTrue(getLLVMContext());
950
15
    }
951
60
  }
952
953
  // If we're in C++ mode and the function name is "main", it is guaranteed
954
  // to be norecurse by the standard (3.6.1.3 "The function main shall not be
955
  // used within a program").
956
  //
957
  // OpenCL C 2.0 v2.2-11 s6.9.i:
958
  //     Recursion is not supported.
959
  //
960
  // SYCL v1.2.1 s3.10:
961
  //     kernels cannot include RTTI information, exception classes,
962
  //     recursive code, virtual functions or make use of C++ libraries that
963
  //     are not compiled for the device.
964
314k
  if (FD && 
(269k
(269k
getLangOpts().CPlusPlus269k
&&
FD->isMain()184k
) ||
965
269k
             
getLangOpts().OpenCL265k
||
getLangOpts().SYCLIsDevice263k
||
966
269k
             
(263k
getLangOpts().CUDA263k
&&
FD->hasAttr<CUDAGlobalAttr>()859
)))
967
6.42k
    Fn->addFnAttr(llvm::Attribute::NoRecurse);
968
969
314k
  llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode();
970
314k
  llvm::fp::ExceptionBehavior FPExceptionBehavior =
971
314k
      ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode());
972
314k
  Builder.setDefaultConstrainedRounding(RM);
973
314k
  Builder.setDefaultConstrainedExcept(FPExceptionBehavior);
974
314k
  if ((FD && 
(269k
FD->UsesFPIntrin()269k
||
FD->hasAttr<StrictFPAttr>()268k
)) ||
975
314k
      
(312k
!FD312k
&&
(44.3k
FPExceptionBehavior != llvm::fp::ebIgnore44.3k
||
976
44.3k
               
RM != llvm::RoundingMode::NearestTiesToEven44.3k
))) {
977
1.76k
    Builder.setIsFPConstrained(true);
978
1.76k
    Fn->addFnAttr(llvm::Attribute::StrictFP);
979
1.76k
  }
980
981
  // If a custom alignment is used, force realigning to this alignment on
982
  // any main function which certainly will need it.
983
314k
  if (FD && 
(269k
(269k
FD->isMain()269k
||
FD->isMSVCRTEntryPoint()264k
) &&
984
269k
             
CGM.getCodeGenOpts().StackAlignment5.73k
))
985
1
    Fn->addFnAttr("stackrealign");
986
987
  // "main" doesn't need to zero out call-used registers.
988
314k
  if (FD && 
FD->isMain()269k
)
989
5.70k
    Fn->removeFnAttr("zero-call-used-regs");
990
991
314k
  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
992
993
  // Create a marker to make it easy to insert allocas into the entryblock
994
  // later.  Don't create this with the builder, because we don't want it
995
  // folded.
996
314k
  llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
997
314k
  AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
998
999
314k
  ReturnBlock = getJumpDestInCurrentScope("return");
1000
1001
314k
  Builder.SetInsertPoint(EntryBB);
1002
1003
  // If we're checking the return value, allocate space for a pointer to a
1004
  // precise source location of the checked return statement.
1005
314k
  if (requiresReturnValueCheck()) {
1006
19
    ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
1007
19
    Builder.CreateStore(llvm::ConstantPointerNull::get(Int8PtrTy),
1008
19
                        ReturnLocation);
1009
19
  }
1010
1011
  // Emit subprogram debug descriptor.
1012
314k
  if (CGDebugInfo *DI = getDebugInfo()) {
1013
    // Reconstruct the type from the argument list so that implicit parameters,
1014
    // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
1015
    // convention.
1016
98.0k
    DI->emitFunctionStart(GD, Loc, StartLoc,
1017
98.0k
                          DI->getFunctionType(FD, RetTy, Args), CurFn,
1018
98.0k
                          CurFuncIsThunk);
1019
98.0k
  }
1020
1021
314k
  if (ShouldInstrumentFunction()) {
1022
11
    if (CGM.getCodeGenOpts().InstrumentFunctions)
1023
7
      CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
1024
11
    if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
1025
2
      CurFn->addFnAttr("instrument-function-entry-inlined",
1026
2
                       "__cyg_profile_func_enter");
1027
11
    if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
1028
2
      CurFn->addFnAttr("instrument-function-entry-inlined",
1029
2
                       "__cyg_profile_func_enter_bare");
1030
11
  }
1031
1032
  // Since emitting the mcount call here impacts optimizations such as function
1033
  // inlining, we just add an attribute to insert a mcount call in backend.
1034
  // The attribute "counting-function" is set to mcount function name which is
1035
  // architecture dependent.
1036
314k
  if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1037
    // Calls to fentry/mcount should not be generated if function has
1038
    // the no_instrument_function attribute.
1039
147
    if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1040
117
      if (CGM.getCodeGenOpts().CallFEntry)
1041
4
        Fn->addFnAttr("fentry-call", "true");
1042
113
      else {
1043
113
        Fn->addFnAttr("instrument-function-entry-inlined",
1044
113
                      getTarget().getMCountName());
1045
113
      }
1046
117
      if (CGM.getCodeGenOpts().MNopMCount) {
1047
2
        if (!CGM.getCodeGenOpts().CallFEntry)
1048
1
          CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1049
1
            << "-mnop-mcount" << "-mfentry";
1050
2
        Fn->addFnAttr("mnop-mcount");
1051
2
      }
1052
1053
117
      if (CGM.getCodeGenOpts().RecordMCount) {
1054
2
        if (!CGM.getCodeGenOpts().CallFEntry)
1055
1
          CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1056
1
            << "-mrecord-mcount" << "-mfentry";
1057
2
        Fn->addFnAttr("mrecord-mcount");
1058
2
      }
1059
117
    }
1060
147
  }
1061
1062
314k
  if (CGM.getCodeGenOpts().PackedStack) {
1063
2
    if (getContext().getTargetInfo().getTriple().getArch() !=
1064
2
        llvm::Triple::systemz)
1065
1
      CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1066
1
        << "-mpacked-stack";
1067
2
    Fn->addFnAttr("packed-stack");
1068
2
  }
1069
1070
314k
  if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX &&
1071
314k
      
!CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc)37
)
1072
32
    Fn->addFnAttr("warn-stack-size",
1073
32
                  std::to_string(CGM.getCodeGenOpts().WarnStackSize));
1074
1075
314k
  if (RetTy->isVoidType()) {
1076
    // Void type; nothing to return.
1077
162k
    ReturnValue = Address::invalid();
1078
1079
    // Count the implicit return.
1080
162k
    if (!endsWithReturn(D))
1081
159k
      ++NumReturnExprs;
1082
162k
  } else 
if (151k
CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect151k
) {
1083
    // Indirect return; emit returned value directly into sret slot.
1084
    // This reduces code size, and affects correctness in C++.
1085
4.25k
    auto AI = CurFn->arg_begin();
1086
4.25k
    if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1087
54
      ++AI;
1088
4.25k
    ReturnValue = Address(&*AI, ConvertType(RetTy),
1089
4.25k
                          CurFnInfo->getReturnInfo().getIndirectAlign());
1090
4.25k
    if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
1091
2.56k
      ReturnValuePointer =
1092
2.56k
          CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
1093
2.56k
      Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast(
1094
2.56k
                              ReturnValue.getPointer(), Int8PtrTy),
1095
2.56k
                          ReturnValuePointer);
1096
2.56k
    }
1097
147k
  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1098
147k
             
!hasScalarEvaluationKind(CurFnInfo->getReturnType())5
) {
1099
    // Load the sret pointer from the argument struct and return into that.
1100
5
    unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1101
5
    llvm::Function::arg_iterator EI = CurFn->arg_end();
1102
5
    --EI;
1103
5
    llvm::Value *Addr = Builder.CreateStructGEP(
1104
5
        CurFnInfo->getArgStruct(), &*EI, Idx);
1105
5
    llvm::Type *Ty =
1106
5
        cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
1107
5
    ReturnValuePointer = Address(Addr, Ty, getPointerAlign());
1108
5
    Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
1109
5
    ReturnValue =
1110
5
        Address(Addr, ConvertType(RetTy), CGM.getNaturalTypeAlignment(RetTy));
1111
147k
  } else {
1112
147k
    ReturnValue = CreateIRTemp(RetTy, "retval");
1113
1114
    // Tell the epilog emitter to autorelease the result.  We do this
1115
    // now so that various specialized functions can suppress it
1116
    // during their IR-generation.
1117
147k
    if (getLangOpts().ObjCAutoRefCount &&
1118
147k
        
!CurFnInfo->isReturnsRetained()423
&&
1119
147k
        
RetTy->isObjCRetainableType()388
)
1120
159
      AutoreleaseResult = true;
1121
147k
  }
1122
1123
314k
  EmitStartEHSpec(CurCodeDecl);
1124
1125
314k
  PrologueCleanupDepth = EHStack.stable_begin();
1126
1127
  // Emit OpenMP specific initialization of the device functions.
1128
314k
  if (getLangOpts().OpenMP && 
CurCodeDecl70.4k
)
1129
63.9k
    CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1130
1131
314k
  EmitFunctionProlog(*CurFnInfo, CurFn, Args);
1132
1133
314k
  if (isa_and_nonnull<CXXMethodDecl>(D) &&
1134
314k
      
cast<CXXMethodDecl>(D)->isInstance()110k
) {
1135
103k
    CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
1136
103k
    const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
1137
103k
    if (MD->getParent()->isLambda() &&
1138
103k
        
MD->getOverloadedOperator() == OO_Call1.76k
) {
1139
      // We're in a lambda; figure out the captures.
1140
1.64k
      MD->getParent()->getCaptureFields(LambdaCaptureFields,
1141
1.64k
                                        LambdaThisCaptureField);
1142
1.64k
      if (LambdaThisCaptureField) {
1143
        // If the lambda captures the object referred to by '*this' - either by
1144
        // value or by reference, make sure CXXThisValue points to the correct
1145
        // object.
1146
1147
        // Get the lvalue for the field (which is a copy of the enclosing object
1148
        // or contains the address of the enclosing object).
1149
79
        LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
1150
79
        if (!LambdaThisCaptureField->getType()->isPointerType()) {
1151
          // If the enclosing object was captured by value, just use its address.
1152
2
          CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
1153
77
        } else {
1154
          // Load the lvalue pointed to by the field, since '*this' was captured
1155
          // by reference.
1156
77
          CXXThisValue =
1157
77
              EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1158
77
        }
1159
79
      }
1160
2.68k
      for (auto *FD : MD->getParent()->fields()) {
1161
2.68k
        if (FD->hasCapturedVLAType()) {
1162
24
          auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1163
24
                                           SourceLocation()).getScalarVal();
1164
24
          auto VAT = FD->getCapturedVLAType();
1165
24
          VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1166
24
        }
1167
2.68k
      }
1168
102k
    } else {
1169
      // Not in a lambda; just use 'this' from the method.
1170
      // FIXME: Should we generate a new load for each use of 'this'?  The
1171
      // fast register allocator would be happier...
1172
102k
      CXXThisValue = CXXABIThisValue;
1173
102k
    }
1174
1175
    // Check the 'this' pointer once per function, if it's available.
1176
103k
    if (CXXABIThisValue) {
1177
103k
      SanitizerSet SkippedChecks;
1178
103k
      SkippedChecks.set(SanitizerKind::ObjectSize, true);
1179
103k
      QualType ThisTy = MD->getThisType();
1180
1181
      // If this is the call operator of a lambda with no capture-default, it
1182
      // may have a static invoker function, which may call this operator with
1183
      // a null 'this' pointer.
1184
103k
      if (isLambdaCallOperator(MD) &&
1185
103k
          
MD->getParent()->getLambdaCaptureDefault() == LCD_None1.64k
)
1186
363
        SkippedChecks.set(SanitizerKind::Null, true);
1187
1188
103k
      EmitTypeCheck(
1189
103k
          isa<CXXConstructorDecl>(MD) ? 
TCK_ConstructorCall40.4k
:
TCK_MemberCall63.1k
,
1190
103k
          Loc, CXXABIThisValue, ThisTy, CXXABIThisAlignment, SkippedChecks);
1191
103k
    }
1192
103k
  }
1193
1194
  // If any of the arguments have a variably modified type, make sure to
1195
  // emit the type size, but only if the function is not naked. Naked functions
1196
  // have no prolog to run this evaluation.
1197
314k
  if (!FD || 
!FD->hasAttr<NakedAttr>()269k
) {
1198
529k
    for (const VarDecl *VD : Args) {
1199
      // Dig out the type as written from ParmVarDecls; it's unclear whether
1200
      // the standard (C99 6.9.1p10) requires this, but we're following the
1201
      // precedent set by gcc.
1202
529k
      QualType Ty;
1203
529k
      if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1204
336k
        Ty = PVD->getOriginalType();
1205
193k
      else
1206
193k
        Ty = VD->getType();
1207
1208
529k
      if (Ty->isVariablyModifiedType())
1209
137
        EmitVariablyModifiedType(Ty);
1210
529k
    }
1211
314k
  }
1212
  // Emit a location at the end of the prologue.
1213
314k
  if (CGDebugInfo *DI = getDebugInfo())
1214
98.0k
    DI->EmitLocation(Builder, StartLoc);
1215
  // TODO: Do we need to handle this in two places like we do with
1216
  // target-features/target-cpu?
1217
314k
  if (CurFuncDecl)
1218
298k
    if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1219
9.51k
      LargestVectorWidth = VecWidth->getVectorWidth();
1220
314k
}
1221
1222
212k
void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1223
212k
  incrementProfileCounter(Body);
1224
212k
  if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1225
211k
    EmitCompoundStmtWithoutScope(*S);
1226
122
  else
1227
122
    EmitStmt(Body);
1228
1229
  // This is checked after emitting the function body so we know if there
1230
  // are any permitted infinite loops.
1231
212k
  if (checkIfFunctionMustProgress())
1232
124k
    CurFn->addFnAttr(llvm::Attribute::MustProgress);
1233
212k
}
1234
1235
/// When instrumenting to collect profile data, the counts for some blocks
1236
/// such as switch cases need to not include the fall-through counts, so
1237
/// emit a branch around the instrumentation code. When not instrumenting,
1238
/// this just calls EmitBlock().
1239
void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1240
2.00k
                                               const Stmt *S) {
1241
2.00k
  llvm::BasicBlock *SkipCountBB = nullptr;
1242
2.00k
  if (HaveInsertPoint() && 
CGM.getCodeGenOpts().hasProfileClangInstr()831
) {
1243
    // When instrumenting for profiling, the fallthrough to certain
1244
    // statements needs to skip over the instrumentation code so that we
1245
    // get an accurate count.
1246
23
    SkipCountBB = createBasicBlock("skipcount");
1247
23
    EmitBranch(SkipCountBB);
1248
23
  }
1249
2.00k
  EmitBlock(BB);
1250
2.00k
  uint64_t CurrentCount = getCurrentProfileCount();
1251
2.00k
  incrementProfileCounter(S);
1252
2.00k
  setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1253
2.00k
  if (SkipCountBB)
1254
23
    EmitBlock(SkipCountBB);
1255
2.00k
}
1256
1257
/// Tries to mark the given function nounwind based on the
1258
/// non-existence of any throwing calls within it.  We believe this is
1259
/// lightweight enough to do at -O0.
1260
206k
static void TryMarkNoThrow(llvm::Function *F) {
1261
  // LLVM treats 'nounwind' on a function as part of the type, so we
1262
  // can't do this on functions that can be overwritten.
1263
206k
  if (F->isInterposable()) 
return37
;
1264
1265
206k
  for (llvm::BasicBlock &BB : *F)
1266
331k
    for (llvm::Instruction &I : BB)
1267
3.09M
      if (I.mayThrow())
1268
73.4k
        return;
1269
1270
132k
  F->setDoesNotThrow();
1271
132k
}
1272
1273
QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1274
390k
                                               FunctionArgList &Args) {
1275
390k
  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1276
390k
  QualType ResTy = FD->getReturnType();
1277
1278
390k
  const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1279
390k
  if (MD && 
MD->isInstance()118k
) {
1280
103k
    if (CGM.getCXXABI().HasThisReturn(GD))
1281
1.66k
      ResTy = MD->getThisType();
1282
102k
    else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1283
216
      ResTy = CGM.getContext().VoidPtrTy;
1284
103k
    CGM.getCXXABI().buildThisParam(*this, Args);
1285
103k
  }
1286
1287
  // The base version of an inheriting constructor whose constructed base is a
1288
  // virtual base is not passed any arguments (because it doesn't actually call
1289
  // the inherited constructor).
1290
390k
  bool PassedParams = true;
1291
390k
  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1292
40.5k
    if (auto Inherited = CD->getInheritedConstructor())
1293
226
      PassedParams =
1294
226
          getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1295
1296
390k
  if (PassedParams) {
1297
485k
    for (auto *Param : FD->parameters()) {
1298
485k
      Args.push_back(Param);
1299
485k
      if (!Param->hasAttr<PassObjectSizeAttr>())
1300
485k
        continue;
1301
1302
88
      auto *Implicit = ImplicitParamDecl::Create(
1303
88
          getContext(), Param->getDeclContext(), Param->getLocation(),
1304
88
          /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1305
88
      SizeArguments[Param] = Implicit;
1306
88
      Args.push_back(Implicit);
1307
88
    }
1308
390k
  }
1309
1310
390k
  if (MD && 
(118k
isa<CXXConstructorDecl>(MD)118k
||
isa<CXXDestructorDecl>(MD)77.5k
))
1311
56.8k
    CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1312
1313
390k
  return ResTy;
1314
390k
}
1315
1316
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1317
269k
                                   const CGFunctionInfo &FnInfo) {
1318
269k
  assert(Fn && "generating code for null Function");
1319
0
  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1320
269k
  CurGD = GD;
1321
1322
269k
  FunctionArgList Args;
1323
269k
  QualType ResTy = BuildFunctionArgList(GD, Args);
1324
1325
269k
  if (FD->isInlineBuiltinDeclaration()) {
1326
    // When generating code for a builtin with an inline declaration, use a
1327
    // mangled name to hold the actual body, while keeping an external
1328
    // definition in case the function pointer is referenced somewhere.
1329
15
    std::string FDInlineName = (Fn->getName() + ".inline").str();
1330
15
    llvm::Module *M = Fn->getParent();
1331
15
    llvm::Function *Clone = M->getFunction(FDInlineName);
1332
15
    if (!Clone) {
1333
3
      Clone = llvm::Function::Create(Fn->getFunctionType(),
1334
3
                                     llvm::GlobalValue::InternalLinkage,
1335
3
                                     Fn->getAddressSpace(), FDInlineName, M);
1336
3
      Clone->addFnAttr(llvm::Attribute::AlwaysInline);
1337
3
    }
1338
15
    Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
1339
15
    Fn = Clone;
1340
269k
  } else {
1341
    // Detect the unusual situation where an inline version is shadowed by a
1342
    // non-inline version. In that case we should pick the external one
1343
    // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
1344
    // to detect that situation before we reach codegen, so do some late
1345
    // replacement.
1346
276k
    for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
1347
269k
         
PD = PD->getPreviousDecl()6.81k
) {
1348
6.81k
      if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
1349
2
        std::string FDInlineName = (Fn->getName() + ".inline").str();
1350
2
        llvm::Module *M = Fn->getParent();
1351
2
        if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
1352
1
          Clone->replaceAllUsesWith(Fn);
1353
1
          Clone->eraseFromParent();
1354
1
        }
1355
2
        break;
1356
2
      }
1357
6.81k
    }
1358
269k
  }
1359
1360
  // Check if we should generate debug info for this function.
1361
269k
  if (FD->hasAttr<NoDebugAttr>()) {
1362
    // Clear non-distinct debug info that was possibly attached to the function
1363
    // due to an earlier declaration without the nodebug attribute
1364
15.2k
    Fn->setSubprogram(nullptr);
1365
    // Disable debug info indefinitely for this function
1366
15.2k
    DebugInfo = nullptr;
1367
15.2k
  }
1368
1369
  // The function might not have a body if we're generating thunks for a
1370
  // function declaration.
1371
269k
  SourceRange BodyRange;
1372
269k
  if (Stmt *Body = FD->getBody())
1373
269k
    BodyRange = Body->getSourceRange();
1374
130
  else
1375
130
    BodyRange = FD->getLocation();
1376
269k
  CurEHLocation = BodyRange.getEnd();
1377
1378
  // Use the location of the start of the function to determine where
1379
  // the function definition is located. By default use the location
1380
  // of the declaration as the location for the subprogram. A function
1381
  // may lack a declaration in the source code if it is created by code
1382
  // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1383
269k
  SourceLocation Loc = FD->getLocation();
1384
1385
  // If this is a function specialization then use the pattern body
1386
  // as the location for the function.
1387
269k
  if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1388
93.0k
    if (SpecDecl->hasBody(SpecDecl))
1389
92.2k
      Loc = SpecDecl->getLocation();
1390
1391
269k
  Stmt *Body = FD->getBody();
1392
1393
269k
  if (Body) {
1394
    // Coroutines always emit lifetime markers.
1395
269k
    if (isa<CoroutineBodyStmt>(Body))
1396
118
      ShouldEmitLifetimeMarkers = true;
1397
1398
    // Initialize helper which will detect jumps which can cause invalid
1399
    // lifetime markers.
1400
269k
    if (ShouldEmitLifetimeMarkers)
1401
40.7k
      Bypasses.Init(Body);
1402
269k
  }
1403
1404
  // Emit the standard function prologue.
1405
269k
  StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1406
1407
  // Save parameters for coroutine function.
1408
269k
  if (Body && 
isa_and_nonnull<CoroutineBodyStmt>(Body)269k
)
1409
118
    llvm::append_range(FnArgs, FD->parameters());
1410
1411
  // Generate the body of the function.
1412
269k
  PGO.assignRegionCounters(GD, CurFn);
1413
269k
  if (isa<CXXDestructorDecl>(FD))
1414
16.2k
    EmitDestructorBody(Args);
1415
253k
  else if (isa<CXXConstructorDecl>(FD))
1416
40.4k
    EmitConstructorBody(Args);
1417
213k
  else if (getLangOpts().CUDA &&
1418
213k
           
!getLangOpts().CUDAIsDevice770
&&
1419
213k
           
FD->hasAttr<CUDAGlobalAttr>()270
)
1420
87
    CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1421
212k
  else if (isa<CXXMethodDecl>(FD) &&
1422
212k
           
cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()53.3k
) {
1423
    // The lambda static invoker function is special, because it forwards or
1424
    // clones the body of the function call operator (but is actually static).
1425
60
    EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1426
212k
  } else if (FD->isDefaulted() && 
isa<CXXMethodDecl>(FD)759
&&
1427
212k
             
(755
cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator()755
||
1428
755
              
cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator()214
)) {
1429
    // Implicit copy-assignment gets the same special treatment as implicit
1430
    // copy-constructors.
1431
737
    emitImplicitAssignmentOperatorBody(Args);
1432
212k
  } else if (Body) {
1433
212k
    EmitFunctionBody(Body);
1434
212k
  } else
1435
0
    llvm_unreachable("no definition for emitted function");
1436
1437
  // C++11 [stmt.return]p2:
1438
  //   Flowing off the end of a function [...] results in undefined behavior in
1439
  //   a value-returning function.
1440
  // C11 6.9.1p12:
1441
  //   If the '}' that terminates a function is reached, and the value of the
1442
  //   function call is used by the caller, the behavior is undefined.
1443
269k
  if (getLangOpts().CPlusPlus && 
!FD->hasImplicitReturnZero()184k
&&
!SawAsmBlock179k
&&
1444
269k
      
!FD->getReturnType()->isVoidType()179k
&&
Builder.GetInsertBlock()73.2k
) {
1445
185
    bool ShouldEmitUnreachable =
1446
185
        CGM.getCodeGenOpts().StrictReturn ||
1447
185
        
!CGM.MayDropFunctionReturn(FD->getASTContext(), FD->getReturnType())21
;
1448
185
    if (SanOpts.has(SanitizerKind::Return)) {
1449
1
      SanitizerScope SanScope(this);
1450
1
      llvm::Value *IsFalse = Builder.getFalse();
1451
1
      EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1452
1
                SanitizerHandler::MissingReturn,
1453
1
                EmitCheckSourceLocation(FD->getLocation()), None);
1454
184
    } else if (ShouldEmitUnreachable) {
1455
166
      if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1456
142
        EmitTrapCall(llvm::Intrinsic::trap);
1457
166
    }
1458
185
    if (SanOpts.has(SanitizerKind::Return) || 
ShouldEmitUnreachable184
) {
1459
167
      Builder.CreateUnreachable();
1460
167
      Builder.ClearInsertionPoint();
1461
167
    }
1462
185
  }
1463
1464
  // Emit the standard function epilogue.
1465
269k
  FinishFunction(BodyRange.getEnd());
1466
1467
  // If we haven't marked the function nothrow through other means, do
1468
  // a quick pass now to see if we can.
1469
269k
  if (!CurFn->doesNotThrow())
1470
206k
    TryMarkNoThrow(CurFn);
1471
269k
}
1472
1473
/// ContainsLabel - Return true if the statement contains a label in it.  If
1474
/// this statement is not executed normally, it not containing a label means
1475
/// that we can just remove the code.
1476
69.9k
bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1477
  // Null statement, not a label!
1478
69.9k
  if (!S) 
return false145
;
1479
1480
  // If this is a label, we have to emit the code, consider something like:
1481
  // if (0) {  ...  foo:  bar(); }  goto foo;
1482
  //
1483
  // TODO: If anyone cared, we could track __label__'s, since we know that you
1484
  // can't jump to one from outside their declared region.
1485
69.8k
  if (isa<LabelStmt>(S))
1486
14
    return true;
1487
1488
  // If this is a case/default statement, and we haven't seen a switch, we have
1489
  // to emit the code.
1490
69.7k
  if (isa<SwitchCase>(S) && 
!IgnoreCaseStmts12
)
1491
0
    return true;
1492
1493
  // If this is a switch statement, we want to ignore cases below it.
1494
69.7k
  if (isa<SwitchStmt>(S))
1495
10
    IgnoreCaseStmts = true;
1496
1497
  // Scan subexpressions for verboten labels.
1498
69.7k
  for (const Stmt *SubStmt : S->children())
1499
52.3k
    if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1500
26
      return true;
1501
1502
69.7k
  return false;
1503
69.7k
}
1504
1505
/// containsBreak - Return true if the statement contains a break out of it.
1506
/// If the statement (recursively) contains a switch or loop with a break
1507
/// inside of it, this is fine.
1508
88
bool CodeGenFunction::containsBreak(const Stmt *S) {
1509
  // Null statement, not a label!
1510
88
  if (!S) 
return false0
;
1511
1512
  // If this is a switch or loop that defines its own break scope, then we can
1513
  // include it and anything inside of it.
1514
88
  if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1515
88
      
isa<ForStmt>(S)85
)
1516
3
    return false;
1517
1518
85
  if (isa<BreakStmt>(S))
1519
1
    return true;
1520
1521
  // Scan subexpressions for verboten breaks.
1522
84
  for (const Stmt *SubStmt : S->children())
1523
54
    if (containsBreak(SubStmt))
1524
1
      return true;
1525
1526
83
  return false;
1527
84
}
1528
1529
210
bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1530
210
  if (!S) 
return false0
;
1531
1532
  // Some statement kinds add a scope and thus never add a decl to the current
1533
  // scope. Note, this list is longer than the list of statements that might
1534
  // have an unscoped decl nested within them, but this way is conservatively
1535
  // correct even if more statement kinds are added.
1536
210
  if (isa<IfStmt>(S) || 
isa<SwitchStmt>(S)208
||
isa<WhileStmt>(S)208
||
1537
210
      
isa<DoStmt>(S)208
||
isa<ForStmt>(S)205
||
isa<CompoundStmt>(S)205
||
1538
210
      
isa<CXXForRangeStmt>(S)195
||
isa<CXXTryStmt>(S)195
||
1539
210
      
isa<ObjCForCollectionStmt>(S)195
||
isa<ObjCAtTryStmt>(S)195
)
1540
15
    return false;
1541
1542
195
  if (isa<DeclStmt>(S))
1543
7
    return true;
1544
1545
188
  for (const Stmt *SubStmt : S->children())
1546
145
    if (mightAddDeclToScope(SubStmt))
1547
2
      return true;
1548
1549
186
  return false;
1550
188
}
1551
1552
/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1553
/// to a constant, or if it does but contains a label, return false.  If it
1554
/// constant folds return true and set the boolean result in Result.
1555
bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1556
                                                   bool &ResultBool,
1557
166k
                                                   bool AllowLabels) {
1558
166k
  llvm::APSInt ResultInt;
1559
166k
  if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1560
151k
    return false;
1561
1562
15.3k
  ResultBool = ResultInt.getBoolValue();
1563
15.3k
  return true;
1564
166k
}
1565
1566
/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1567
/// to a constant, or if it does but contains a label, return false.  If it
1568
/// constant folds return true and set the folded value.
1569
bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1570
                                                   llvm::APSInt &ResultInt,
1571
167k
                                                   bool AllowLabels) {
1572
  // FIXME: Rename and handle conversion of other evaluatable things
1573
  // to bool.
1574
167k
  Expr::EvalResult Result;
1575
167k
  if (!Cond->EvaluateAsInt(Result, getContext()))
1576
151k
    return false;  // Not foldable, not integer or not fully evaluatable.
1577
1578
15.3k
  llvm::APSInt Int = Result.Val.getInt();
1579
15.3k
  if (!AllowLabels && 
CodeGenFunction::ContainsLabel(Cond)15.3k
)
1580
0
    return false;  // Contains a label.
1581
1582
15.3k
  ResultInt = Int;
1583
15.3k
  return true;
1584
15.3k
}
1585
1586
/// Determine whether the given condition is an instrumentable condition
1587
/// (i.e. no "&&" or "||").
1588
672
bool CodeGenFunction::isInstrumentedCondition(const Expr *C) {
1589
  // Bypass simplistic logical-NOT operator before determining whether the
1590
  // condition contains any other logical operator.
1591
672
  if (const UnaryOperator *UnOp = dyn_cast<UnaryOperator>(C->IgnoreParens()))
1592
9
    if (UnOp->getOpcode() == UO_LNot)
1593
8
      C = UnOp->getSubExpr();
1594
1595
672
  const BinaryOperator *BOp = dyn_cast<BinaryOperator>(C->IgnoreParens());
1596
672
  return (!BOp || 
!BOp->isLogicalOp()256
);
1597
672
}
1598
1599
/// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
1600
/// increments a profile counter based on the semantics of the given logical
1601
/// operator opcode.  This is used to instrument branch condition coverage for
1602
/// logical operators.
1603
void CodeGenFunction::EmitBranchToCounterBlock(
1604
    const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock,
1605
    llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */,
1606
7.56k
    Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) {
1607
  // If not instrumenting, just emit a branch.
1608
7.56k
  bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
1609
7.56k
  if (!InstrumentRegions || 
!isInstrumentedCondition(Cond)78
)
1610
7.50k
    return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
1611
1612
62
  llvm::BasicBlock *ThenBlock = nullptr;
1613
62
  llvm::BasicBlock *ElseBlock = nullptr;
1614
62
  llvm::BasicBlock *NextBlock = nullptr;
1615
1616
  // Create the block we'll use to increment the appropriate counter.
1617
62
  llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
1618
1619
  // Set block pointers according to Logical-AND (BO_LAnd) semantics. This
1620
  // means we need to evaluate the condition and increment the counter on TRUE:
1621
  //
1622
  // if (Cond)
1623
  //   goto CounterIncrBlock;
1624
  // else
1625
  //   goto FalseBlock;
1626
  //
1627
  // CounterIncrBlock:
1628
  //   Counter++;
1629
  //   goto TrueBlock;
1630
1631
62
  if (LOp == BO_LAnd) {
1632
35
    ThenBlock = CounterIncrBlock;
1633
35
    ElseBlock = FalseBlock;
1634
35
    NextBlock = TrueBlock;
1635
35
  }
1636
1637
  // Set block pointers according to Logical-OR (BO_LOr) semantics. This means
1638
  // we need to evaluate the condition and increment the counter on FALSE:
1639
  //
1640
  // if (Cond)
1641
  //   goto TrueBlock;
1642
  // else
1643
  //   goto CounterIncrBlock;
1644
  //
1645
  // CounterIncrBlock:
1646
  //   Counter++;
1647
  //   goto FalseBlock;
1648
1649
27
  else if (LOp == BO_LOr) {
1650
27
    ThenBlock = TrueBlock;
1651
27
    ElseBlock = CounterIncrBlock;
1652
27
    NextBlock = FalseBlock;
1653
27
  } else {
1654
0
    llvm_unreachable("Expected Opcode must be that of a Logical Operator");
1655
0
  }
1656
1657
  // Emit Branch based on condition.
1658
62
  EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, TrueCount, LH);
1659
1660
  // Emit the block containing the counter increment(s).
1661
62
  EmitBlock(CounterIncrBlock);
1662
1663
  // Increment corresponding counter; if index not provided, use Cond as index.
1664
62
  incrementProfileCounter(CntrIdx ? 
CntrIdx2
:
Cond60
);
1665
1666
  // Go to the next block.
1667
62
  EmitBranch(NextBlock);
1668
62
}
1669
1670
/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1671
/// statement) to the specified blocks.  Based on the condition, this might try
1672
/// to simplify the codegen of the conditional based on the branch.
1673
/// \param LH The value of the likelihood attribute on the True branch.
1674
void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
1675
                                           llvm::BasicBlock *TrueBlock,
1676
                                           llvm::BasicBlock *FalseBlock,
1677
                                           uint64_t TrueCount,
1678
167k
                                           Stmt::Likelihood LH) {
1679
167k
  Cond = Cond->IgnoreParens();
1680
1681
167k
  if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1682
1683
    // Handle X && Y in a condition.
1684
73.8k
    if (CondBOp->getOpcode() == BO_LAnd) {
1685
      // If we have "1 && X", simplify the code.  "0 && X" would have constant
1686
      // folded if the case was simple enough.
1687
7.12k
      bool ConstantBool = false;
1688
7.12k
      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1689
7.12k
          
ConstantBool62
) {
1690
        // br(1 && X) -> br(X).
1691
62
        incrementProfileCounter(CondBOp);
1692
62
        return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1693
62
                                        FalseBlock, TrueCount, LH);
1694
62
      }
1695
1696
      // If we have "X && 1", simplify the code to use an uncond branch.
1697
      // "X && 0" would have been constant folded to 0.
1698
7.06k
      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1699
7.06k
          
ConstantBool29
) {
1700
        // br(X && 1) -> br(X).
1701
29
        return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
1702
29
                                        FalseBlock, TrueCount, LH, CondBOp);
1703
29
      }
1704
1705
      // Emit the LHS as a conditional.  If the LHS conditional is false, we
1706
      // want to jump to the FalseBlock.
1707
7.03k
      llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1708
      // The counter tells us how often we evaluate RHS, and all of TrueCount
1709
      // can be propagated to that branch.
1710
7.03k
      uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1711
1712
7.03k
      ConditionalEvaluation eval(*this);
1713
7.03k
      {
1714
7.03k
        ApplyDebugLocation DL(*this, Cond);
1715
        // Propagate the likelihood attribute like __builtin_expect
1716
        // __builtin_expect(X && Y, 1) -> X and Y are likely
1717
        // __builtin_expect(X && Y, 0) -> only Y is unlikely
1718
7.03k
        EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount,
1719
7.03k
                             LH == Stmt::LH_Unlikely ? 
Stmt::LH_None1
:
LH7.03k
);
1720
7.03k
        EmitBlock(LHSTrue);
1721
7.03k
      }
1722
1723
7.03k
      incrementProfileCounter(CondBOp);
1724
7.03k
      setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1725
1726
      // Any temporaries created here are conditional.
1727
7.03k
      eval.begin(*this);
1728
7.03k
      EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1729
7.03k
                               FalseBlock, TrueCount, LH);
1730
7.03k
      eval.end(*this);
1731
1732
7.03k
      return;
1733
7.06k
    }
1734
1735
66.7k
    if (CondBOp->getOpcode() == BO_LOr) {
1736
      // If we have "0 || X", simplify the code.  "1 || X" would have constant
1737
      // folded if the case was simple enough.
1738
443
      bool ConstantBool = false;
1739
443
      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1740
443
          
!ConstantBool7
) {
1741
        // br(0 || X) -> br(X).
1742
7
        incrementProfileCounter(CondBOp);
1743
7
        return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock,
1744
7
                                        FalseBlock, TrueCount, LH);
1745
7
      }
1746
1747
      // If we have "X || 0", simplify the code to use an uncond branch.
1748
      // "X || 1" would have been constant folded to 1.
1749
436
      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1750
436
          
!ConstantBool1
) {
1751
        // br(X || 0) -> br(X).
1752
1
        return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
1753
1
                                        FalseBlock, TrueCount, LH, CondBOp);
1754
1
      }
1755
1756
      // Emit the LHS as a conditional.  If the LHS conditional is true, we
1757
      // want to jump to the TrueBlock.
1758
435
      llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1759
      // We have the count for entry to the RHS and for the whole expression
1760
      // being true, so we can divy up True count between the short circuit and
1761
      // the RHS.
1762
435
      uint64_t LHSCount =
1763
435
          getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1764
435
      uint64_t RHSCount = TrueCount - LHSCount;
1765
1766
435
      ConditionalEvaluation eval(*this);
1767
435
      {
1768
        // Propagate the likelihood attribute like __builtin_expect
1769
        // __builtin_expect(X || Y, 1) -> only Y is likely
1770
        // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
1771
435
        ApplyDebugLocation DL(*this, Cond);
1772
435
        EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount,
1773
435
                             LH == Stmt::LH_Likely ? 
Stmt::LH_None1
:
LH434
);
1774
435
        EmitBlock(LHSFalse);
1775
435
      }
1776
1777
435
      incrementProfileCounter(CondBOp);
1778
435
      setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1779
1780
      // Any temporaries created here are conditional.
1781
435
      eval.begin(*this);
1782
435
      EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, FalseBlock,
1783
435
                               RHSCount, LH);
1784
1785
435
      eval.end(*this);
1786
1787
435
      return;
1788
436
    }
1789
66.7k
  }
1790
1791
160k
  if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1792
    // br(!x, t, f) -> br(x, f, t)
1793
2.56k
    if (CondUOp->getOpcode() == UO_LNot) {
1794
      // Negate the count.
1795
2.56k
      uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1796
      // The values of the enum are chosen to make this negation possible.
1797
2.56k
      LH = static_cast<Stmt::Likelihood>(-LH);
1798
      // Negate the condition and swap the destination blocks.
1799
2.56k
      return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1800
2.56k
                                  FalseCount, LH);
1801
2.56k
    }
1802
2.56k
  }
1803
1804
157k
  if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1805
    // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1806
2
    llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1807
2
    llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1808
1809
    // The ConditionalOperator itself has no likelihood information for its
1810
    // true and false branches. This matches the behavior of __builtin_expect.
1811
2
    ConditionalEvaluation cond(*this);
1812
2
    EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1813
2
                         getProfileCount(CondOp), Stmt::LH_None);
1814
1815
    // When computing PGO branch weights, we only know the overall count for
1816
    // the true block. This code is essentially doing tail duplication of the
1817
    // naive code-gen, introducing new edges for which counts are not
1818
    // available. Divide the counts proportionally between the LHS and RHS of
1819
    // the conditional operator.
1820
2
    uint64_t LHSScaledTrueCount = 0;
1821
2
    if (TrueCount) {
1822
0
      double LHSRatio =
1823
0
          getProfileCount(CondOp) / (double)getCurrentProfileCount();
1824
0
      LHSScaledTrueCount = TrueCount * LHSRatio;
1825
0
    }
1826
1827
2
    cond.begin(*this);
1828
2
    EmitBlock(LHSBlock);
1829
2
    incrementProfileCounter(CondOp);
1830
2
    {
1831
2
      ApplyDebugLocation DL(*this, Cond);
1832
2
      EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1833
2
                           LHSScaledTrueCount, LH);
1834
2
    }
1835
2
    cond.end(*this);
1836
1837
2
    cond.begin(*this);
1838
2
    EmitBlock(RHSBlock);
1839
2
    EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1840
2
                         TrueCount - LHSScaledTrueCount, LH);
1841
2
    cond.end(*this);
1842
1843
2
    return;
1844
2
  }
1845
1846
157k
  if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1847
    // Conditional operator handling can give us a throw expression as a
1848
    // condition for a case like:
1849
    //   br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1850
    // Fold this to:
1851
    //   br(c, throw x, br(y, t, f))
1852
0
    EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1853
0
    return;
1854
0
  }
1855
1856
  // Emit the code with the fully general case.
1857
157k
  llvm::Value *CondV;
1858
157k
  {
1859
157k
    ApplyDebugLocation DL(*this, Cond);
1860
157k
    CondV = EvaluateExprAsBool(Cond);
1861
157k
  }
1862
1863
157k
  llvm::MDNode *Weights = nullptr;
1864
157k
  llvm::MDNode *Unpredictable = nullptr;
1865
1866
  // If the branch has a condition wrapped by __builtin_unpredictable,
1867
  // create metadata that specifies that the branch is unpredictable.
1868
  // Don't bother if not optimizing because that metadata would not be used.
1869
157k
  auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1870
157k
  if (Call && 
CGM.getCodeGenOpts().OptimizationLevel != 06.12k
) {
1871
132
    auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1872
132
    if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1873
3
      llvm::MDBuilder MDHelper(getLLVMContext());
1874
3
      Unpredictable = MDHelper.createUnpredictable();
1875
3
    }
1876
132
  }
1877
1878
  // If there is a Likelihood knowledge for the cond, lower it.
1879
  // Note that if not optimizing this won't emit anything.
1880
157k
  llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(CondV, LH);
1881
157k
  if (CondV != NewCondV)
1882
40
    CondV = NewCondV;
1883
157k
  else {
1884
    // Otherwise, lower profile counts. Note that we do this even at -O0.
1885
157k
    uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1886
157k
    Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
1887
157k
  }
1888
1889
157k
  Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1890
157k
}
1891
1892
/// ErrorUnsupported - Print out an error that codegen doesn't support the
1893
/// specified stmt yet.
1894
1
void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1895
1
  CGM.ErrorUnsupported(S, Type);
1896
1
}
1897
1898
/// emitNonZeroVLAInit - Emit the "zero" initialization of a
1899
/// variable-length array whose elements have a non-zero bit-pattern.
1900
///
1901
/// \param baseType the inner-most element type of the array
1902
/// \param src - a char* pointing to the bit-pattern for a single
1903
/// base element of the array
1904
/// \param sizeInChars - the total size of the VLA, in chars
1905
static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1906
                               Address dest, Address src,
1907
0
                               llvm::Value *sizeInChars) {
1908
0
  CGBuilderTy &Builder = CGF.Builder;
1909
1910
0
  CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1911
0
  llvm::Value *baseSizeInChars
1912
0
    = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1913
1914
0
  Address begin =
1915
0
    Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1916
0
  llvm::Value *end = Builder.CreateInBoundsGEP(
1917
0
      begin.getElementType(), begin.getPointer(), sizeInChars, "vla.end");
1918
1919
0
  llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1920
0
  llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1921
0
  llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1922
1923
  // Make a loop over the VLA.  C99 guarantees that the VLA element
1924
  // count must be nonzero.
1925
0
  CGF.EmitBlock(loopBB);
1926
1927
0
  llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1928
0
  cur->addIncoming(begin.getPointer(), originBB);
1929
1930
0
  CharUnits curAlign =
1931
0
    dest.getAlignment().alignmentOfArrayElement(baseSize);
1932
1933
  // memcpy the individual element bit-pattern.
1934
0
  Builder.CreateMemCpy(Address(cur, CGF.Int8Ty, curAlign), src, baseSizeInChars,
1935
0
                       /*volatile*/ false);
1936
1937
  // Go to the next element.
1938
0
  llvm::Value *next =
1939
0
    Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1940
1941
  // Leave if that's the end of the VLA.
1942
0
  llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1943
0
  Builder.CreateCondBr(done, contBB, loopBB);
1944
0
  cur->addIncoming(next, loopBB);
1945
1946
0
  CGF.EmitBlock(contBB);
1947
0
}
1948
1949
void
1950
7.11k
CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
1951
  // Ignore empty classes in C++.
1952
7.11k
  if (getLangOpts().CPlusPlus) {
1953
6.96k
    if (const RecordType *RT = Ty->getAs<RecordType>()) {
1954
6.60k
      if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1955
5.82k
        return;
1956
6.60k
    }
1957
6.96k
  }
1958
1959
  // Cast the dest ptr to the appropriate i8 pointer type.
1960
1.28k
  if (DestPtr.getElementType() != Int8Ty)
1961
1.28k
    DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1962
1963
  // Get size and alignment info for this aggregate.
1964
1.28k
  CharUnits size = getContext().getTypeSizeInChars(Ty);
1965
1966
1.28k
  llvm::Value *SizeVal;
1967
1.28k
  const VariableArrayType *vla;
1968
1969
  // Don't bother emitting a zero-byte memset.
1970
1.28k
  if (size.isZero()) {
1971
    // But note that getTypeInfo returns 0 for a VLA.
1972
11
    if (const VariableArrayType *vlaType =
1973
11
          dyn_cast_or_null<VariableArrayType>(
1974
11
                                          getContext().getAsArrayType(Ty))) {
1975
8
      auto VlaSize = getVLASize(vlaType);
1976
8
      SizeVal = VlaSize.NumElts;
1977
8
      CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1978
8
      if (!eltSize.isOne())
1979
8
        SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1980
8
      vla = vlaType;
1981
8
    } else {
1982
3
      return;
1983
3
    }
1984
1.27k
  } else {
1985
1.27k
    SizeVal = CGM.getSize(size);
1986
1.27k
    vla = nullptr;
1987
1.27k
  }
1988
1989
  // If the type contains a pointer to data member we can't memset it to zero.
1990
  // Instead, create a null constant and copy it to the destination.
1991
  // TODO: there are other patterns besides zero that we can usefully memset,
1992
  // like -1, which happens to be the pattern used by member-pointers.
1993
1.28k
  if (!CGM.getTypes().isZeroInitializable(Ty)) {
1994
    // For a VLA, emit a single element, then splat that over the VLA.
1995
9
    if (vla) 
Ty = getContext().getBaseElementType(vla)0
;
1996
1997
9
    llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1998
1999
9
    llvm::GlobalVariable *NullVariable =
2000
9
      new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
2001
9
                               /*isConstant=*/true,
2002
9
                               llvm::GlobalVariable::PrivateLinkage,
2003
9
                               NullConstant, Twine());
2004
9
    CharUnits NullAlign = DestPtr.getAlignment();
2005
9
    NullVariable->setAlignment(NullAlign.getAsAlign());
2006
9
    Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
2007
9
                   Builder.getInt8Ty(), NullAlign);
2008
2009
9
    if (vla) 
return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal)0
;
2010
2011
    // Get and call the appropriate llvm.memcpy overload.
2012
9
    Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
2013
9
    return;
2014
9
  }
2015
2016
  // Otherwise, just memset the whole thing to zero.  This is legal
2017
  // because in LLVM, all default initializers (other than the ones we just
2018
  // handled above) are guaranteed to have a bit pattern of all zeros.
2019
1.27k
  Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
2020
1.27k
}
2021
2022
61
llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
2023
  // Make sure that there is a block for the indirect goto.
2024
61
  if (!IndirectBranch)
2025
38
    GetIndirectGotoBlock();
2026
2027
61
  llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
2028
2029
  // Make sure the indirect branch includes all of the address-taken blocks.
2030
61
  IndirectBranch->addDestination(BB);
2031
61
  return llvm::BlockAddress::get(CurFn, BB);
2032
61
}
2033
2034
60
llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
2035
  // If we already made the indirect branch for indirect goto, return its block.
2036
60
  if (IndirectBranch) 
return IndirectBranch->getParent()21
;
2037
2038
39
  CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
2039
2040
  // Create the PHI node that indirect gotos will add entries to.
2041
39
  llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
2042
39
                                              "indirect.goto.dest");
2043
2044
  // Create the indirect branch instruction.
2045
39
  IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
2046
39
  return IndirectBranch->getParent();
2047
60
}
2048
2049
/// Computes the length of an array in elements, as well as the base
2050
/// element type and a properly-typed first element pointer.
2051
llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
2052
                                              QualType &baseType,
2053
4.84k
                                              Address &addr) {
2054
4.84k
  const ArrayType *arrayType = origArrayType;
2055
2056
  // If it's a VLA, we have to load the stored size.  Note that
2057
  // this is the size of the VLA in bytes, not its size in elements.
2058
4.84k
  llvm::Value *numVLAElements = nullptr;
2059
4.84k
  if (isa<VariableArrayType>(arrayType)) {
2060
634
    numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
2061
2062
    // Walk into all VLAs.  This doesn't require changes to addr,
2063
    // which has type T* where T is the first non-VLA element type.
2064
671
    do {
2065
671
      QualType elementType = arrayType->getElementType();
2066
671
      arrayType = getContext().getAsArrayType(elementType);
2067
2068
      // If we only have VLA components, 'addr' requires no adjustment.
2069
671
      if (!arrayType) {
2070
630
        baseType = elementType;
2071
630
        return numVLAElements;
2072
630
      }
2073
671
    } while (
isa<VariableArrayType>(arrayType)41
);
2074
2075
    // We get out here only if we find a constant array type
2076
    // inside the VLA.
2077
634
  }
2078
2079
  // We have some number of constant-length arrays, so addr should
2080
  // have LLVM type [M x [N x [...]]]*.  Build a GEP that walks
2081
  // down to the first element of addr.
2082
4.21k
  SmallVector<llvm::Value*, 8> gepIndices;
2083
2084
  // GEP down to the array type.
2085
4.21k
  llvm::ConstantInt *zero = Builder.getInt32(0);
2086
4.21k
  gepIndices.push_back(zero);
2087
2088
4.21k
  uint64_t countFromCLAs = 1;
2089
4.21k
  QualType eltType;
2090
2091
4.21k
  llvm::ArrayType *llvmArrayType =
2092
4.21k
    dyn_cast<llvm::ArrayType>(addr.getElementType());
2093
8.20k
  while (llvmArrayType) {
2094
3.98k
    assert(isa<ConstantArrayType>(arrayType));
2095
0
    assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
2096
3.98k
             == llvmArrayType->getNumElements());
2097
2098
0
    gepIndices.push_back(zero);
2099
3.98k
    countFromCLAs *= llvmArrayType->getNumElements();
2100
3.98k
    eltType = arrayType->getElementType();
2101
2102
3.98k
    llvmArrayType =
2103
3.98k
      dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
2104
3.98k
    arrayType = getContext().getAsArrayType(arrayType->getElementType());
2105
3.98k
    assert((!llvmArrayType || arrayType) &&
2106
3.98k
           "LLVM and Clang types are out-of-synch");
2107
3.98k
  }
2108
2109
4.21k
  if (arrayType) {
2110
    // From this point onwards, the Clang array type has been emitted
2111
    // as some other type (probably a packed struct). Compute the array
2112
    // size, and just emit the 'begin' expression as a bitcast.
2113
698
    while (arrayType) {
2114
370
      countFromCLAs *=
2115
370
          cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
2116
370
      eltType = arrayType->getElementType();
2117
370
      arrayType = getContext().getAsArrayType(eltType);
2118
370
    }
2119
2120
328
    llvm::Type *baseType = ConvertType(eltType);
2121
328
    addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
2122
3.89k
  } else {
2123
    // Create the actual GEP.
2124
3.89k
    addr = Address(Builder.CreateInBoundsGEP(
2125
3.89k
        addr.getElementType(), addr.getPointer(), gepIndices, "array.begin"),
2126
3.89k
        ConvertTypeForMem(eltType),
2127
3.89k
        addr.getAlignment());
2128
3.89k
  }
2129
2130
4.21k
  baseType = eltType;
2131
2132
4.21k
  llvm::Value *numElements
2133
4.21k
    = llvm::ConstantInt::get(SizeTy, countFromCLAs);
2134
2135
  // If we had any VLA dimensions, factor them in.
2136
4.21k
  if (numVLAElements)
2137
4
    numElements = Builder.CreateNUWMul(numVLAElements, numElements);
2138
2139
4.21k
  return numElements;
2140
4.84k
}
2141
2142
2.17k
CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
2143
2.17k
  const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
2144
2.17k
  assert(vla && "type was not a variable array type!");
2145
0
  return getVLASize(vla);
2146
2.17k
}
2147
2148
CodeGenFunction::VlaSizePair
2149
5.83k
CodeGenFunction::getVLASize(const VariableArrayType *type) {
2150
  // The number of elements so far; always size_t.
2151
5.83k
  llvm::Value *numElements = nullptr;
2152
2153
5.83k
  QualType elementType;
2154
7.37k
  do {
2155
7.37k
    elementType = type->getElementType();
2156
7.37k
    llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
2157
7.37k
    assert(vlaSize && "no size for VLA!");
2158
0
    assert(vlaSize->getType() == SizeTy);
2159
2160
7.37k
    if (!numElements) {
2161
5.83k
      numElements = vlaSize;
2162
5.83k
    } else {
2163
      // It's undefined behavior if this wraps around, so mark it that way.
2164
      // FIXME: Teach -fsanitize=undefined to trap this.
2165
1.54k
      numElements = Builder.CreateNUWMul(numElements, vlaSize);
2166
1.54k
    }
2167
7.37k
  } while ((type = getContext().getAsVariableArrayType(elementType)));
2168
2169
0
  return { numElements, elementType };
2170
5.83k
}
2171
2172
CodeGenFunction::VlaSizePair
2173
3.21k
CodeGenFunction::getVLAElements1D(QualType type) {
2174
3.21k
  const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
2175
3.21k
  assert(vla && "type was not a variable array type!");
2176
0
  return getVLAElements1D(vla);
2177
3.21k
}
2178
2179
CodeGenFunction::VlaSizePair
2180
3.21k
CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
2181
3.21k
  llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
2182
3.21k
  assert(VlaSize && "no size for VLA!");
2183
0
  assert(VlaSize->getType() == SizeTy);
2184
0
  return { VlaSize, Vla->getElementType() };
2185
3.21k
}
2186
2187
2.87k
void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
2188
2.87k
  assert(type->isVariablyModifiedType() &&
2189
2.87k
         "Must pass variably modified type to EmitVLASizes!");
2190
2191
0
  EnsureInsertPoint();
2192
2193
  // We're going to walk down into the type and look for VLA
2194
  // expressions.
2195
4.08k
  do {
2196
4.08k
    assert(type->isVariablyModifiedType());
2197
2198
0
    const Type *ty = type.getTypePtr();
2199
4.08k
    switch (ty->getTypeClass()) {
2200
2201
0
#define TYPE(Class, Base)
2202
0
#define ABSTRACT_TYPE(Class, Base)
2203
0
#define NON_CANONICAL_TYPE(Class, Base)
2204
0
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
2205
0
#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2206
0
#include "clang/AST/TypeNodes.inc"
2207
0
      llvm_unreachable("unexpected dependent type!");
2208
2209
    // These types are never variably-modified.
2210
0
    case Type::Builtin:
2211
0
    case Type::Complex:
2212
0
    case Type::Vector:
2213
0
    case Type::ExtVector:
2214
0
    case Type::ConstantMatrix:
2215
0
    case Type::Record:
2216
0
    case Type::Enum:
2217
0
    case Type::Elaborated:
2218
0
    case Type::Using:
2219
0
    case Type::TemplateSpecialization:
2220
0
    case Type::ObjCTypeParam:
2221
0
    case Type::ObjCObject:
2222
0
    case Type::ObjCInterface:
2223
0
    case Type::ObjCObjectPointer:
2224
0
    case Type::BitInt:
2225
0
      llvm_unreachable("type class is never variably-modified!");
2226
2227
0
    case Type::Adjusted:
2228
0
      type = cast<AdjustedType>(ty)->getAdjustedType();
2229
0
      break;
2230
2231
70
    case Type::Decayed:
2232
70
      type = cast<DecayedType>(ty)->getPointeeType();
2233
70
      break;
2234
2235
91
    case Type::Pointer:
2236
91
      type = cast<PointerType>(ty)->getPointeeType();
2237
91
      break;
2238
2239
0
    case Type::BlockPointer:
2240
0
      type = cast<BlockPointerType>(ty)->getPointeeType();
2241
0
      break;
2242
2243
3
    case Type::LValueReference:
2244
3
    case Type::RValueReference:
2245
3
      type = cast<ReferenceType>(ty)->getPointeeType();
2246
3
      break;
2247
2248
0
    case Type::MemberPointer:
2249
0
      type = cast<MemberPointerType>(ty)->getPointeeType();
2250
0
      break;
2251
2252
2
    case Type::ConstantArray:
2253
12
    case Type::IncompleteArray:
2254
      // Losing element qualification here is fine.
2255
12
      type = cast<ArrayType>(ty)->getElementType();
2256
12
      break;
2257
2258
3.82k
    case Type::VariableArray: {
2259
      // Losing element qualification here is fine.
2260
3.82k
      const VariableArrayType *vat = cast<VariableArrayType>(ty);
2261
2262
      // Unknown size indication requires no size computation.
2263
      // Otherwise, evaluate and record it.
2264
3.82k
      if (const Expr *sizeExpr = vat->getSizeExpr()) {
2265
        // It's possible that we might have emitted this already,
2266
        // e.g. with a typedef and a pointer to it.
2267
3.82k
        llvm::Value *&entry = VLASizeMap[sizeExpr];
2268
3.82k
        if (!entry) {
2269
3.46k
          llvm::Value *size = EmitScalarExpr(sizeExpr);
2270
2271
          // C11 6.7.6.2p5:
2272
          //   If the size is an expression that is not an integer constant
2273
          //   expression [...] each time it is evaluated it shall have a value
2274
          //   greater than zero.
2275
3.46k
          if (SanOpts.has(SanitizerKind::VLABound)) {
2276
4
            SanitizerScope SanScope(this);
2277
4
            llvm::Value *Zero = llvm::Constant::getNullValue(size->getType());
2278
4
            clang::QualType SEType = sizeExpr->getType();
2279
4
            llvm::Value *CheckCondition =
2280
4
                SEType->isSignedIntegerType()
2281
4
                    ? 
Builder.CreateICmpSGT(size, Zero)2
2282
4
                    : 
Builder.CreateICmpUGT(size, Zero)2
;
2283
4
            llvm::Constant *StaticArgs[] = {
2284
4
                EmitCheckSourceLocation(sizeExpr->getBeginLoc()),
2285
4
                EmitCheckTypeDescriptor(SEType)};
2286
4
            EmitCheck(std::make_pair(CheckCondition, SanitizerKind::VLABound),
2287
4
                      SanitizerHandler::VLABoundNotPositive, StaticArgs, size);
2288
4
          }
2289
2290
          // Always zexting here would be wrong if it weren't
2291
          // undefined behavior to have a negative bound.
2292
          // FIXME: What about when size's type is larger than size_t?
2293
3.46k
          entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false);
2294
3.46k
        }
2295
3.82k
      }
2296
3.82k
      type = vat->getElementType();
2297
3.82k
      break;
2298
2
    }
2299
2300
1
    case Type::FunctionProto:
2301
1
    case Type::FunctionNoProto:
2302
1
      type = cast<FunctionType>(ty)->getReturnType();
2303
1
      break;
2304
2305
43
    case Type::Paren:
2306
45
    case Type::TypeOf:
2307
45
    case Type::UnaryTransform:
2308
45
    case Type::Attributed:
2309
45
    case Type::BTFTagAttributed:
2310
45
    case Type::SubstTemplateTypeParm:
2311
45
    case Type::MacroQualified:
2312
      // Keep walking after single level desugaring.
2313
45
      type = type.getSingleStepDesugaredType(getContext());
2314
45
      break;
2315
2316
28
    case Type::Typedef:
2317
28
    case Type::Decltype:
2318
28
    case Type::Auto:
2319
28
    case Type::DeducedTemplateSpecialization:
2320
      // Stop walking: nothing to do.
2321
28
      return;
2322
2323
14
    case Type::TypeOfExpr:
2324
      // Stop walking: emit typeof expression.
2325
14
      EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2326
14
      return;
2327
2328
1
    case Type::Atomic:
2329
1
      type = cast<AtomicType>(ty)->getValueType();
2330
1
      break;
2331
2332
0
    case Type::Pipe:
2333
0
      type = cast<PipeType>(ty)->getElementType();
2334
0
      break;
2335
4.08k
    }
2336
4.08k
  } while (
type->isVariablyModifiedType()4.04k
);
2337
2.87k
}
2338
2339
1.36k
Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2340
1.36k
  if (getContext().getBuiltinVaListType()->isArrayType())
2341
548
    return EmitPointerWithAlignment(E);
2342
819
  return EmitLValue(E).getAddress(*this);
2343
1.36k
}
2344
2345
40
Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2346
40
  return EmitLValue(E).getAddress(*this);
2347
40
}
2348
2349
void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2350
6.82k
                                              const APValue &Init) {
2351
6.82k
  assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2352
6.82k
  if (CGDebugInfo *Dbg = getDebugInfo())
2353
4.18k
    if (CGM.getCodeGenOpts().hasReducedDebugInfo())
2354
4.14k
      Dbg->EmitGlobalVariable(E->getDecl(), Init);
2355
6.82k
}
2356
2357
CodeGenFunction::PeepholeProtection
2358
1.69k
CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2359
  // At the moment, the only aggressive peephole we do in IR gen
2360
  // is trunc(zext) folding, but if we add more, we can easily
2361
  // extend this protection.
2362
2363
1.69k
  if (!rvalue.isScalar()) 
return PeepholeProtection()77
;
2364
1.62k
  llvm::Value *value = rvalue.getScalarVal();
2365
1.62k
  if (!isa<llvm::ZExtInst>(value)) 
return PeepholeProtection()1.54k
;
2366
2367
  // Just make an extra bitcast.
2368
77
  assert(HaveInsertPoint());
2369
0
  llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2370
77
                                                  Builder.GetInsertBlock());
2371
2372
77
  PeepholeProtection protection;
2373
77
  protection.Inst = inst;
2374
77
  return protection;
2375
1.62k
}
2376
2377
1.69k
void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2378
1.69k
  if (!protection.Inst) 
return1.62k
;
2379
2380
  // In theory, we could try to duplicate the peepholes now, but whatever.
2381
77
  protection.Inst->eraseFromParent();
2382
77
}
2383
2384
void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2385
                                              QualType Ty, SourceLocation Loc,
2386
                                              SourceLocation AssumptionLoc,
2387
                                              llvm::Value *Alignment,
2388
445
                                              llvm::Value *OffsetValue) {
2389
445
  if (Alignment->getType() != IntPtrTy)
2390
200
    Alignment =
2391
200
        Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
2392
445
  if (OffsetValue && 
OffsetValue->getType() != IntPtrTy16
)
2393
5
    OffsetValue =
2394
5
        Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
2395
445
  llvm::Value *TheCheck = nullptr;
2396
445
  if (SanOpts.has(SanitizerKind::Alignment)) {
2397
33
    llvm::Value *PtrIntValue =
2398
33
        Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
2399
2400
33
    if (OffsetValue) {
2401
9
      bool IsOffsetZero = false;
2402
9
      if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
2403
6
        IsOffsetZero = CI->isZero();
2404
2405
9
      if (!IsOffsetZero)
2406
9
        PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
2407
9
    }
2408
2409
33
    llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
2410
33
    llvm::Value *Mask =
2411
33
        Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
2412
33
    llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
2413
33
    TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
2414
33
  }
2415
445
  llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2416
445
      CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2417
2418
445
  if (!SanOpts.has(SanitizerKind::Alignment))
2419
412
    return;
2420
33
  emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2421
33
                               OffsetValue, TheCheck, Assumption);
2422
33
}
2423
2424
void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2425
                                              const Expr *E,
2426
                                              SourceLocation AssumptionLoc,
2427
                                              llvm::Value *Alignment,
2428
408
                                              llvm::Value *OffsetValue) {
2429
408
  if (auto *CE = dyn_cast<CastExpr>(E))
2430
168
    E = CE->getSubExprAsWritten();
2431
408
  QualType Ty = E->getType();
2432
408
  SourceLocation Loc = E->getExprLoc();
2433
2434
408
  emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2435
408
                          OffsetValue);
2436
408
}
2437
2438
llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2439
                                                 llvm::Value *AnnotatedVal,
2440
                                                 StringRef AnnotationStr,
2441
                                                 SourceLocation Location,
2442
28
                                                 const AnnotateAttr *Attr) {
2443
28
  SmallVector<llvm::Value *, 5> Args = {
2444
28
      AnnotatedVal,
2445
28
      Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
2446
28
      Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
2447
28
      CGM.EmitAnnotationLineNo(Location),
2448
28
  };
2449
28
  if (Attr)
2450
21
    Args.push_back(CGM.EmitAnnotationArgs(Attr));
2451
28
  return Builder.CreateCall(AnnotationFn, Args);
2452
28
}
2453
2454
8
void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2455
8
  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2456
  // FIXME We create a new bitcast for every annotation because that's what
2457
  // llvm-gcc was doing.
2458
0
  for (const auto *I : D->specific_attrs<AnnotateAttr>())
2459
12
    EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
2460
12
                       Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2461
12
                       I->getAnnotation(), D->getLocation(), I);
2462
8
}
2463
2464
Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2465
5
                                              Address Addr) {
2466
5
  assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2467
0
  llvm::Value *V = Addr.getPointer();
2468
5
  llvm::Type *VTy = V->getType();
2469
5
  auto *PTy = dyn_cast<llvm::PointerType>(VTy);
2470
5
  unsigned AS = PTy ? PTy->getAddressSpace() : 
00
;
2471
5
  llvm::PointerType *IntrinTy =
2472
5
      llvm::PointerType::getWithSamePointeeType(CGM.Int8PtrTy, AS);
2473
5
  llvm::Function *F =
2474
5
      CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation, IntrinTy);
2475
2476
9
  for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2477
    // FIXME Always emit the cast inst so we can differentiate between
2478
    // annotation on the first field of a struct and annotation on the struct
2479
    // itself.
2480
9
    if (VTy != IntrinTy)
2481
9
      V = Builder.CreateBitCast(V, IntrinTy);
2482
9
    V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
2483
9
    V = Builder.CreateBitCast(V, VTy);
2484
9
  }
2485
2486
5
  return Address(V, Addr.getElementType(), Addr.getAlignment());
2487
5
}
2488
2489
65.8k
CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2490
2491
CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2492
58.8k
    : CGF(CGF) {
2493
58.8k
  assert(!CGF->IsSanitizerScope);
2494
0
  CGF->IsSanitizerScope = true;
2495
58.8k
}
2496
2497
58.8k
CodeGenFunction::SanitizerScope::~SanitizerScope() {
2498
58.8k
  CGF->IsSanitizerScope = false;
2499
58.8k
}
2500
2501
void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2502
                                   const llvm::Twine &Name,
2503
                                   llvm::BasicBlock *BB,
2504
5.80M
                                   llvm::BasicBlock::iterator InsertPt) const {
2505
5.80M
  LoopStack.InsertHelper(I);
2506
5.80M
  if (IsSanitizerScope)
2507
15.6k
    CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
2508
5.80M
}
2509
2510
void CGBuilderInserter::InsertHelper(
2511
    llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2512
5.81M
    llvm::BasicBlock::iterator InsertPt) const {
2513
5.81M
  llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2514
5.81M
  if (CGF)
2515
5.80M
    CGF->InsertHelper(I, Name, BB, InsertPt);
2516
5.81M
}
2517
2518
// Emits an error if we don't have a valid set of target features for the
2519
// called function.
2520
void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2521
57.0k
                                          const FunctionDecl *TargetDecl) {
2522
57.0k
  return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2523
57.0k
}
2524
2525
// Emits an error if we don't have a valid set of target features for the
2526
// called function.
2527
void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
2528
71.8k
                                          const FunctionDecl *TargetDecl) {
2529
  // Early exit if this is an indirect call.
2530
71.8k
  if (!TargetDecl)
2531
0
    return;
2532
2533
  // Get the current enclosing function if it exists. If it doesn't
2534
  // we can't check the target features anyhow.
2535
71.8k
  const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2536
71.8k
  if (!FD)
2537
0
    return;
2538
2539
  // Grab the required features for the call. For a builtin this is listed in
2540
  // the td file with the default cpu, for an always_inline function this is any
2541
  // listed cpu and any listed features.
2542
71.8k
  unsigned BuiltinID = TargetDecl->getBuiltinID();
2543
71.8k
  std::string MissingFeature;
2544
71.8k
  llvm::StringMap<bool> CallerFeatureMap;
2545
71.8k
  CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2546
71.8k
  if (BuiltinID) {
2547
57.0k
    StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
2548
57.0k
    if (!Builtin::evaluateRequiredTargetFeatures(
2549
57.0k
        FeatureList, CallerFeatureMap)) {
2550
562
      CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2551
562
          << TargetDecl->getDeclName()
2552
562
          << FeatureList;
2553
562
    }
2554
57.0k
  } else 
if (14.8k
!TargetDecl->isMultiVersion()14.8k
&&
2555
14.8k
             TargetDecl->hasAttr<TargetAttr>()) {
2556
    // Get the required features for the callee.
2557
2558
14.8k
    const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2559
14.8k
    ParsedTargetAttr ParsedAttr =
2560
14.8k
        CGM.getContext().filterFunctionTargetAttrs(TD);
2561
2562
14.8k
    SmallVector<StringRef, 1> ReqFeatures;
2563
14.8k
    llvm::StringMap<bool> CalleeFeatureMap;
2564
14.8k
    CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2565
2566
16.4k
    for (const auto &F : ParsedAttr.Features) {
2567
16.4k
      if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2568
16.4k
        ReqFeatures.push_back(StringRef(F).substr(1));
2569
16.4k
    }
2570
2571
233k
    for (const auto &F : CalleeFeatureMap) {
2572
      // Only positive features are "required".
2573
233k
      if (F.getValue())
2574
233k
        ReqFeatures.push_back(F.getKey());
2575
233k
    }
2576
249k
    if (
!llvm::all_of(ReqFeatures, [&](StringRef Feature) 14.8k
{
2577
249k
      if (!CallerFeatureMap.lookup(Feature)) {
2578
19
        MissingFeature = Feature.str();
2579
19
        return false;
2580
19
      }
2581
249k
      return true;
2582
249k
    }))
2583
19
      CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2584
19
          << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2585
14.8k
  }
2586
71.8k
}
2587
2588
87
void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2589
87
  if (!CGM.getCodeGenOpts().SanitizeStats)
2590
77
    return;
2591
2592
10
  llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2593
10
  IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2594
10
  CGM.getSanStats().create(IRB, SSK);
2595
10
}
2596
2597
llvm::Value *
2598
340
CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
2599
340
  llvm::Value *Condition = nullptr;
2600
2601
340
  if (!RO.Conditions.Architecture.empty())
2602
102
    Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2603
2604
340
  if (!RO.Conditions.Features.empty()) {
2605
141
    llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2606
141
    Condition =
2607
141
        Condition ? 
Builder.CreateAnd(Condition, FeatureCond)2
:
FeatureCond139
;
2608
141
  }
2609
340
  return Condition;
2610
340
}
2611
2612
static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2613
                                             llvm::Function *Resolver,
2614
                                             CGBuilderTy &Builder,
2615
                                             llvm::Function *FuncToReturn,
2616
340
                                             bool SupportsIFunc) {
2617
340
  if (SupportsIFunc) {
2618
169
    Builder.CreateRet(FuncToReturn);
2619
169
    return;
2620
169
  }
2621
2622
171
  llvm::SmallVector<llvm::Value *, 10> Args(
2623
171
      llvm::make_pointer_range(Resolver->args()));
2624
2625
171
  llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2626
171
  Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2627
2628
171
  if (Resolver->getReturnType()->isVoidTy())
2629
43
    Builder.CreateRetVoid();
2630
128
  else
2631
128
    Builder.CreateRet(Result);
2632
171
}
2633
2634
void CodeGenFunction::EmitMultiVersionResolver(
2635
121
    llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2636
121
  assert(getContext().getTargetInfo().getTriple().isX86() &&
2637
121
         "Only implemented for x86 targets");
2638
2639
0
  bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2640
2641
  // Main function's basic block.
2642
121
  llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2643
121
  Builder.SetInsertPoint(CurBlock);
2644
121
  EmitX86CpuInit();
2645
2646
340
  for (const MultiVersionResolverOption &RO : Options) {
2647
340
    Builder.SetInsertPoint(CurBlock);
2648
340
    llvm::Value *Condition = FormResolverCondition(RO);
2649
2650
    // The 'default' or 'generic' case.
2651
340
    if (!Condition) {
2652
99
      assert(&RO == Options.end() - 1 &&
2653
99
             "Default or Generic case must be last");
2654
0
      CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2655
99
                                       SupportsIFunc);
2656
99
      return;
2657
99
    }
2658
2659
241
    llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2660
241
    CGBuilderTy RetBuilder(*this, RetBlock);
2661
241
    CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2662
241
                                     SupportsIFunc);
2663
241
    CurBlock = createBasicBlock("resolver_else", Resolver);
2664
241
    Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2665
241
  }
2666
2667
  // If no generic/default, emit an unreachable.
2668
22
  Builder.SetInsertPoint(CurBlock);
2669
22
  llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2670
22
  TrapCall->setDoesNotReturn();
2671
22
  TrapCall->setDoesNotThrow();
2672
22
  Builder.CreateUnreachable();
2673
22
  Builder.ClearInsertionPoint();
2674
22
}
2675
2676
// Loc - where the diagnostic will point, where in the source code this
2677
//  alignment has failed.
2678
// SecondaryLoc - if present (will be present if sufficiently different from
2679
//  Loc), the diagnostic will additionally point a "Note:" to this location.
2680
//  It should be the location where the __attribute__((assume_aligned))
2681
//  was written e.g.
2682
void CodeGenFunction::emitAlignmentAssumptionCheck(
2683
    llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2684
    SourceLocation SecondaryLoc, llvm::Value *Alignment,
2685
    llvm::Value *OffsetValue, llvm::Value *TheCheck,
2686
33
    llvm::Instruction *Assumption) {
2687
33
  assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2688
33
         cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
2689
33
             llvm::Intrinsic::getDeclaration(
2690
33
                 Builder.GetInsertBlock()->getParent()->getParent(),
2691
33
                 llvm::Intrinsic::assume) &&
2692
33
         "Assumption should be a call to llvm.assume().");
2693
0
  assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2694
33
         "Assumption should be the last instruction of the basic block, "
2695
33
         "since the basic block is still being generated.");
2696
2697
33
  if (!SanOpts.has(SanitizerKind::Alignment))
2698
0
    return;
2699
2700
  // Don't check pointers to volatile data. The behavior here is implementation-
2701
  // defined.
2702
33
  if (Ty->getPointeeType().isVolatileQualified())
2703
1
    return;
2704
2705
  // We need to temorairly remove the assumption so we can insert the
2706
  // sanitizer check before it, else the check will be dropped by optimizations.
2707
32
  Assumption->removeFromParent();
2708
2709
32
  {
2710
32
    SanitizerScope SanScope(this);
2711
2712
32
    if (!OffsetValue)
2713
23
      OffsetValue = Builder.getInt1(false); // no offset.
2714
2715
32
    llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2716
32
                                    EmitCheckSourceLocation(SecondaryLoc),
2717
32
                                    EmitCheckTypeDescriptor(Ty)};
2718
32
    llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2719
32
                                  EmitCheckValue(Alignment),
2720
32
                                  EmitCheckValue(OffsetValue)};
2721
32
    EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2722
32
              SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2723
32
  }
2724
2725
  // We are now in the (new, empty) "cont" basic block.
2726
  // Reintroduce the assumption.
2727
32
  Builder.Insert(Assumption);
2728
  // FIXME: Assumption still has it's original basic block as it's Parent.
2729
32
}
2730
2731
70.1k
llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
2732
70.1k
  if (CGDebugInfo *DI = getDebugInfo())
2733
31.5k
    return DI->SourceLocToDebugLoc(Location);
2734
2735
38.6k
  return llvm::DebugLoc();
2736
70.1k
}
2737
2738
llvm::Value *
2739
CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
2740
157k
                                                      Stmt::Likelihood LH) {
2741
157k
  switch (LH) {
2742
157k
  case Stmt::LH_None:
2743
157k
    return Cond;
2744
17
  case Stmt::LH_Likely:
2745
58
  case Stmt::LH_Unlikely:
2746
    // Don't generate llvm.expect on -O0 as the backend won't use it for
2747
    // anything.
2748
58
    if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2749
0
      return Cond;
2750
58
    llvm::Type *CondTy = Cond->getType();
2751
58
    assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean");
2752
0
    llvm::Function *FnExpect =
2753
58
        CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy);
2754
58
    llvm::Value *ExpectedValueOfCond =
2755
58
        llvm::ConstantInt::getBool(CondTy, LH == Stmt::LH_Likely);
2756
58
    return Builder.CreateCall(FnExpect, {Cond, ExpectedValueOfCond},
2757
58
                              Cond->getName() + ".expval");
2758
157k
  }
2759
0
  llvm_unreachable("Unknown Likelihood");
2760
0
}
2761
2762
llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
2763
                                                    unsigned NumElementsDst,
2764
11
                                                    const llvm::Twine &Name) {
2765
11
  auto *SrcTy = cast<llvm::FixedVectorType>(SrcVec->getType());
2766
11
  unsigned NumElementsSrc = SrcTy->getNumElements();
2767
11
  if (NumElementsSrc == NumElementsDst)
2768
0
    return SrcVec;
2769
2770
11
  std::vector<int> ShuffleMask(NumElementsDst, -1);
2771
11
  for (unsigned MaskIdx = 0;
2772
55
       MaskIdx < std::min<>(NumElementsDst, NumElementsSrc); 
++MaskIdx44
)
2773
44
    ShuffleMask[MaskIdx] = MaskIdx;
2774
2775
11
  return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name);
2776
11
}