Coverage Report

Created: 2020-09-22 08:39

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGExpr.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This contains code to emit Expr nodes as LLVM code.
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CGCXXABI.h"
14
#include "CGCall.h"
15
#include "CGCleanup.h"
16
#include "CGDebugInfo.h"
17
#include "CGObjCRuntime.h"
18
#include "CGOpenMPRuntime.h"
19
#include "CGRecordLayout.h"
20
#include "CodeGenFunction.h"
21
#include "CodeGenModule.h"
22
#include "ConstantEmitter.h"
23
#include "TargetInfo.h"
24
#include "clang/AST/ASTContext.h"
25
#include "clang/AST/Attr.h"
26
#include "clang/AST/DeclObjC.h"
27
#include "clang/AST/NSAPI.h"
28
#include "clang/Basic/Builtins.h"
29
#include "clang/Basic/CodeGenOptions.h"
30
#include "clang/Basic/SourceManager.h"
31
#include "llvm/ADT/Hashing.h"
32
#include "llvm/ADT/StringExtras.h"
33
#include "llvm/IR/DataLayout.h"
34
#include "llvm/IR/Intrinsics.h"
35
#include "llvm/IR/LLVMContext.h"
36
#include "llvm/IR/MDBuilder.h"
37
#include "llvm/Support/ConvertUTF.h"
38
#include "llvm/Support/MathExtras.h"
39
#include "llvm/Support/Path.h"
40
#include "llvm/Transforms/Utils/SanitizerStats.h"
41
42
#include <string>
43
44
using namespace clang;
45
using namespace CodeGen;
46
47
//===--------------------------------------------------------------------===//
48
//                        Miscellaneous Helper Methods
49
//===--------------------------------------------------------------------===//
50
51
1.94k
llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
52
1.94k
  unsigned addressSpace =
53
1.94k
      cast<llvm::PointerType>(value->getType())->getAddressSpace();
54
55
1.94k
  llvm::PointerType *destType = Int8PtrTy;
56
1.94k
  if (addressSpace)
57
15
    destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
58
59
1.94k
  if (value->getType() == destType) 
return value146
;
60
1.80k
  return Builder.CreateBitCast(value, destType);
61
1.80k
}
62
63
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
64
/// block.
65
Address CodeGenFunction::CreateTempAllocaWithoutCast(llvm::Type *Ty,
66
                                                     CharUnits Align,
67
                                                     const Twine &Name,
68
936k
                                                     llvm::Value *ArraySize) {
69
936k
  auto Alloca = CreateTempAlloca(Ty, Name, ArraySize);
70
936k
  Alloca->setAlignment(Align.getAsAlign());
71
936k
  return Address(Alloca, Align);
72
936k
}
73
74
/// CreateTempAlloca - This creates a alloca and inserts it into the entry
75
/// block. The alloca is casted to default address space if necessary.
76
Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
77
                                          const Twine &Name,
78
                                          llvm::Value *ArraySize,
79
935k
                                          Address *AllocaAddr) {
80
935k
  auto Alloca = CreateTempAllocaWithoutCast(Ty, Align, Name, ArraySize);
81
935k
  if (AllocaAddr)
82
223k
    *AllocaAddr = Alloca;
83
935k
  llvm::Value *V = Alloca.getPointer();
84
  // Alloca always returns a pointer in alloca address space, which may
85
  // be different from the type defined by the language. For example,
86
  // in C++ the auto variables are in the default address space. Therefore
87
  // cast alloca to the default address space when necessary.
88
935k
  if (getASTAllocaAddressSpace() != LangAS::Default) {
89
2.59k
    auto DestAddrSpace = getContext().getTargetAddressSpace(LangAS::Default);
90
2.59k
    llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
91
    // When ArraySize is nullptr, alloca is inserted at AllocaInsertPt,
92
    // otherwise alloca is inserted at the current insertion point of the
93
    // builder.
94
2.59k
    if (!ArraySize)
95
2.58k
      Builder.SetInsertPoint(AllocaInsertPt);
96
2.59k
    V = getTargetHooks().performAddrSpaceCast(
97
2.59k
        *this, V, getASTAllocaAddressSpace(), LangAS::Default,
98
2.59k
        Ty->getPointerTo(DestAddrSpace), /*non-null*/ true);
99
2.59k
  }
100
101
935k
  return Address(V, Align);
102
935k
}
103
104
/// CreateTempAlloca - This creates an alloca and inserts it into the entry
105
/// block if \p ArraySize is nullptr, otherwise inserts it at the current
106
/// insertion point of the builder.
107
llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
108
                                                    const Twine &Name,
109
943k
                                                    llvm::Value *ArraySize) {
110
943k
  if (ArraySize)
111
2.15k
    return Builder.CreateAlloca(Ty, ArraySize, Name);
112
941k
  return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
113
941k
                              ArraySize, Name, AllocaInsertPt);
114
941k
}
115
116
/// CreateDefaultAlignTempAlloca - This creates an alloca with the
117
/// default alignment of the corresponding LLVM type, which is *not*
118
/// guaranteed to be related in any way to the expected alignment of
119
/// an AST type that might have been lowered to Ty.
120
Address CodeGenFunction::CreateDefaultAlignTempAlloca(llvm::Type *Ty,
121
11.2k
                                                      const Twine &Name) {
122
11.2k
  CharUnits Align =
123
11.2k
    CharUnits::fromQuantity(CGM.getDataLayout().getABITypeAlignment(Ty));
124
11.2k
  return CreateTempAlloca(Ty, Align, Name);
125
11.2k
}
126
127
2.61k
void CodeGenFunction::InitTempAlloca(Address Var, llvm::Value *Init) {
128
2.61k
  auto *Alloca = Var.getPointer();
129
2.61k
  assert(isa<llvm::AllocaInst>(Alloca) ||
130
2.61k
         (isa<llvm::AddrSpaceCastInst>(Alloca) &&
131
2.61k
          isa<llvm::AllocaInst>(
132
2.61k
              cast<llvm::AddrSpaceCastInst>(Alloca)->getPointerOperand())));
133
134
2.61k
  auto *Store = new llvm::StoreInst(Init, Alloca, /*volatile*/ false,
135
2.61k
                                    Var.getAlignment().getAsAlign());
136
2.61k
  llvm::BasicBlock *Block = AllocaInsertPt->getParent();
137
2.61k
  Block->getInstList().insertAfter(AllocaInsertPt->getIterator(), Store);
138
2.61k
}
139
140
137k
Address CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) {
141
137k
  CharUnits Align = getContext().getTypeAlignInChars(Ty);
142
137k
  return CreateTempAlloca(ConvertType(Ty), Align, Name);
143
137k
}
144
145
Address CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name,
146
105k
                                       Address *Alloca) {
147
  // FIXME: Should we prefer the preferred type alignment here?
148
105k
  return CreateMemTemp(Ty, getContext().getTypeAlignInChars(Ty), Name, Alloca);
149
105k
}
150
151
Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
152
573k
                                       const Twine &Name, Address *Alloca) {
153
573k
  Address Result = CreateTempAlloca(ConvertTypeForMem(Ty), Align, Name,
154
573k
                                    /*ArraySize=*/nullptr, Alloca);
155
156
573k
  if (Ty->isConstantMatrixType()) {
157
98
    auto *ArrayTy = cast<llvm::ArrayType>(Result.getType()->getElementType());
158
98
    auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
159
98
                                                ArrayTy->getNumElements());
160
161
98
    Result = Address(
162
98
        Builder.CreateBitCast(Result.getPointer(), VectorTy->getPointerTo()),
163
98
        Result.getAlignment());
164
98
  }
165
573k
  return Result;
166
573k
}
167
168
Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty, CharUnits Align,
169
250
                                                  const Twine &Name) {
170
250
  return CreateTempAllocaWithoutCast(ConvertTypeForMem(Ty), Align, Name);
171
250
}
172
173
Address CodeGenFunction::CreateMemTempWithoutCast(QualType Ty,
174
0
                                                  const Twine &Name) {
175
0
  return CreateMemTempWithoutCast(Ty, getContext().getTypeAlignInChars(Ty),
176
0
                                  Name);
177
0
}
178
179
/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
180
/// expression and compare the result against zero, returning an Int1Ty value.
181
110k
llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
182
110k
  PGO.setCurrentStmt(E);
183
110k
  if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
184
0
    llvm::Value *MemPtr = EmitScalarExpr(E);
185
0
    return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
186
0
  }
187
188
110k
  QualType BoolTy = getContext().BoolTy;
189
110k
  SourceLocation Loc = E->getExprLoc();
190
110k
  if (!E->getType()->isAnyComplexType())
191
110k
    return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy, Loc);
192
193
1
  return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(), BoolTy,
194
1
                                       Loc);
195
1
}
196
197
/// EmitIgnoredExpr - Emit code to compute the specified expression,
198
/// ignoring the result.
199
303k
void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
200
303k
  if (E->isRValue())
201
153k
    return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
202
203
  // Just emit it as an l-value and drop the result.
204
149k
  EmitLValue(E);
205
149k
}
206
207
/// EmitAnyExpr - Emit code to compute the specified expression which
208
/// can have any type.  The result is returned as an RValue struct.
209
/// If this is an aggregate expression, AggSlot indicates where the
210
/// result should be returned.
211
RValue CodeGenFunction::EmitAnyExpr(const Expr *E,
212
                                    AggValueSlot aggSlot,
213
553k
                                    bool ignoreResult) {
214
553k
  switch (getEvaluationKind(E->getType())) {
215
540k
  case TEK_Scalar:
216
540k
    return RValue::get(EmitScalarExpr(E, ignoreResult));
217
871
  case TEK_Complex:
218
871
    return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult));
219
11.5k
  case TEK_Aggregate:
220
11.5k
    if (!ignoreResult && 
aggSlot.isIgnored()10.0k
)
221
0
      aggSlot = CreateAggTemp(E->getType(), "agg-temp");
222
11.5k
    EmitAggExpr(E, aggSlot);
223
11.5k
    return aggSlot.asRValue();
224
0
  }
225
0
  llvm_unreachable("bad evaluation kind");
226
0
}
227
228
/// EmitAnyExprToTemp - Similar to EmitAnyExpr(), however, the result will
229
/// always be accessible even if no aggregate location is provided.
230
279k
RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
231
279k
  AggValueSlot AggSlot = AggValueSlot::ignored();
232
233
279k
  if (hasAggregateEvaluationKind(E->getType()))
234
9.97k
    AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
235
279k
  return EmitAnyExpr(E, AggSlot);
236
279k
}
237
238
/// EmitAnyExprToMem - Evaluate an expression into a given memory
239
/// location.
240
void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
241
                                       Address Location,
242
                                       Qualifiers Quals,
243
21.4k
                                       bool IsInit) {
244
  // FIXME: This function should take an LValue as an argument.
245
21.4k
  switch (getEvaluationKind(E->getType())) {
246
12
  case TEK_Complex:
247
12
    EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()),
248
12
                              /*isInit*/ false);
249
12
    return;
250
251
10.5k
  case TEK_Aggregate: {
252
10.5k
    EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
253
10.5k
                                         AggValueSlot::IsDestructed_t(IsInit),
254
10.5k
                                         AggValueSlot::DoesNotNeedGCBarriers,
255
10.5k
                                         AggValueSlot::IsAliased_t(!IsInit),
256
10.5k
                                         AggValueSlot::MayOverlap));
257
10.5k
    return;
258
0
  }
259
260
10.9k
  case TEK_Scalar: {
261
10.9k
    RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
262
10.9k
    LValue LV = MakeAddrLValue(Location, E->getType());
263
10.9k
    EmitStoreThroughLValue(RV, LV);
264
10.9k
    return;
265
0
  }
266
0
  }
267
0
  llvm_unreachable("bad evaluation kind");
268
0
}
269
270
static void
271
pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
272
14.1k
                     const Expr *E, Address ReferenceTemporary) {
273
  // Objective-C++ ARC:
274
  //   If we are binding a reference to a temporary that has ownership, we
275
  //   need to perform retain/release operations on the temporary.
276
  //
277
  // FIXME: This should be looking at E, not M.
278
14.1k
  if (auto Lifetime = M->getType().getObjCLifetime()) {
279
10
    switch (Lifetime) {
280
0
    case Qualifiers::OCL_None:
281
0
    case Qualifiers::OCL_ExplicitNone:
282
      // Carry on to normal cleanup handling.
283
0
      break;
284
285
1
    case Qualifiers::OCL_Autoreleasing:
286
      // Nothing to do; cleaned up by an autorelease pool.
287
1
      return;
288
289
9
    case Qualifiers::OCL_Strong:
290
9
    case Qualifiers::OCL_Weak:
291
9
      switch (StorageDuration Duration = M->getStorageDuration()) {
292
2
      case SD_Static:
293
        // Note: we intentionally do not register a cleanup to release
294
        // the object on program termination.
295
2
        return;
296
297
0
      case SD_Thread:
298
        // FIXME: We should probably register a cleanup in this case.
299
0
        return;
300
301
7
      case SD_Automatic:
302
7
      case SD_FullExpression:
303
7
        CodeGenFunction::Destroyer *Destroy;
304
7
        CleanupKind CleanupKind;
305
7
        if (Lifetime == Qualifiers::OCL_Strong) {
306
6
          const ValueDecl *VD = M->getExtendingDecl();
307
6
          bool Precise =
308
6
              VD && 
isa<VarDecl>(VD)3
&&
VD->hasAttr<ObjCPreciseLifetimeAttr>()3
;
309
6
          CleanupKind = CGF.getARCCleanupKind();
310
0
          Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise
311
6
                            : &CodeGenFunction::destroyARCStrongImprecise;
312
1
        } else {
313
          // __weak objects always get EH cleanups; otherwise, exceptions
314
          // could cause really nasty crashes instead of mere leaks.
315
1
          CleanupKind = NormalAndEHCleanup;
316
1
          Destroy = &CodeGenFunction::destroyARCWeak;
317
1
        }
318
7
        if (Duration == SD_FullExpression)
319
3
          CGF.pushDestroy(CleanupKind, ReferenceTemporary,
320
3
                          M->getType(), *Destroy,
321
3
                          CleanupKind & EHCleanup);
322
4
        else
323
4
          CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
324
4
                                          M->getType(),
325
4
                                          *Destroy, CleanupKind & EHCleanup);
326
7
        return;
327
328
0
      case SD_Dynamic:
329
0
        llvm_unreachable("temporary cannot have dynamic storage duration");
330
0
      }
331
0
      llvm_unreachable("unknown storage duration");
332
14.1k
    }
333
14.1k
  }
334
335
14.1k
  CXXDestructorDecl *ReferenceTemporaryDtor = nullptr;
336
14.1k
  if (const RecordType *RT =
337
9.54k
          E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
338
    // Get the destructor for the reference temporary.
339
9.54k
    auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
340
9.54k
    if (!ClassDecl->hasTrivialDestructor())
341
1.21k
      ReferenceTemporaryDtor = ClassDecl->getDestructor();
342
9.54k
  }
343
344
14.1k
  if (!ReferenceTemporaryDtor)
345
12.9k
    return;
346
347
  // Call the destructor for the temporary.
348
1.21k
  switch (M->getStorageDuration()) {
349
63
  case SD_Static:
350
63
  case SD_Thread: {
351
63
    llvm::FunctionCallee CleanupFn;
352
63
    llvm::Constant *CleanupArg;
353
63
    if (E->getType()->isArrayType()) {
354
4
      CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
355
4
          ReferenceTemporary, E->getType(),
356
4
          CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions,
357
4
          dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
358
4
      CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
359
59
    } else {
360
59
      CleanupFn = CGF.CGM.getAddrAndTypeOfCXXStructor(
361
59
          GlobalDecl(ReferenceTemporaryDtor, Dtor_Complete));
362
59
      CleanupArg = cast<llvm::Constant>(ReferenceTemporary.getPointer());
363
59
    }
364
63
    CGF.CGM.getCXXABI().registerGlobalDtor(
365
63
        CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
366
63
    break;
367
63
  }
368
369
1.04k
  case SD_FullExpression:
370
1.04k
    CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
371
1.04k
                    CodeGenFunction::destroyCXXObject,
372
1.04k
                    CGF.getLangOpts().Exceptions);
373
1.04k
    break;
374
375
107
  case SD_Automatic:
376
107
    CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup,
377
107
                                    ReferenceTemporary, E->getType(),
378
107
                                    CodeGenFunction::destroyCXXObject,
379
107
                                    CGF.getLangOpts().Exceptions);
380
107
    break;
381
382
0
  case SD_Dynamic:
383
0
    llvm_unreachable("temporary cannot have dynamic storage duration");
384
1.21k
  }
385
1.21k
}
386
387
static Address createReferenceTemporary(CodeGenFunction &CGF,
388
                                        const MaterializeTemporaryExpr *M,
389
                                        const Expr *Inner,
390
14.1k
                                        Address *Alloca = nullptr) {
391
14.1k
  auto &TCG = CGF.getTargetHooks();
392
14.1k
  switch (M->getStorageDuration()) {
393
14.0k
  case SD_FullExpression:
394
14.0k
  case SD_Automatic: {
395
    // If we have a constant temporary array or record try to promote it into a
396
    // constant global under the same rules a normal constant would've been
397
    // promoted. This is easier on the optimizer and generally emits fewer
398
    // instructions.
399
14.0k
    QualType Ty = Inner->getType();
400
14.0k
    if (CGF.CGM.getCodeGenOpts().MergeAllConstants &&
401
775
        (Ty->isArrayType() || 
Ty->isRecordType()725
) &&
402
494
        CGF.CGM.isTypeConstant(Ty, true))
403
25
      if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
404
9
        if (auto AddrSpace = CGF.getTarget().getConstantAddressSpace()) {
405
9
          auto AS = AddrSpace.getValue();
406
9
          auto *GV = new llvm::GlobalVariable(
407
9
              CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
408
9
              llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
409
9
              llvm::GlobalValue::NotThreadLocal,
410
9
              CGF.getContext().getTargetAddressSpace(AS));
411
9
          CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
412
9
          GV->setAlignment(alignment.getAsAlign());
413
9
          llvm::Constant *C = GV;
414
9
          if (AS != LangAS::Default)
415
3
            C = TCG.performAddrSpaceCast(
416
3
                CGF.CGM, GV, AS, LangAS::Default,
417
3
                GV->getValueType()->getPointerTo(
418
3
                    CGF.getContext().getTargetAddressSpace(LangAS::Default)));
419
          // FIXME: Should we put the new global into a COMDAT?
420
9
          return Address(C, alignment);
421
9
        }
422
14.0k
      }
423
14.0k
    return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
424
14.0k
  }
425
106
  case SD_Thread:
426
106
  case SD_Static:
427
106
    return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
428
429
0
  case SD_Dynamic:
430
0
    llvm_unreachable("temporary can't have dynamic storage duration");
431
0
  }
432
0
  llvm_unreachable("unknown storage duration");
433
0
}
434
435
/// Helper method to check if the underlying ABI is AAPCS
436
24
static bool isAAPCS(const TargetInfo &TargetInfo) {
437
24
  return TargetInfo.getABI().startswith("aapcs");
438
24
}
439
440
LValue CodeGenFunction::
441
14.1k
EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
442
14.1k
  const Expr *E = M->getSubExpr();
443
444
14.1k
  assert((!M->getExtendingDecl() || !isa<VarDecl>(M->getExtendingDecl()) ||
445
14.1k
          !cast<VarDecl>(M->getExtendingDecl())->isARCPseudoStrong()) &&
446
14.1k
         "Reference should never be pseudo-strong!");
447
448
  // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
449
  // as that will cause the lifetime adjustment to be lost for ARC
450
14.1k
  auto ownership = M->getType().getObjCLifetime();
451
14.1k
  if (ownership != Qualifiers::OCL_None &&
452
11
      ownership != Qualifiers::OCL_ExplicitNone) {
453
11
    Address Object = createReferenceTemporary(*this, M, E);
454
11
    if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
455
3
      Object = Address(llvm::ConstantExpr::getBitCast(Var,
456
3
                           ConvertTypeForMem(E->getType())
457
3
                             ->getPointerTo(Object.getAddressSpace())),
458
3
                       Object.getAlignment());
459
460
      // createReferenceTemporary will promote the temporary to a global with a
461
      // constant initializer if it can.  It can only do this to a value of
462
      // ARC-manageable type if the value is global and therefore "immune" to
463
      // ref-counting operations.  Therefore we have no need to emit either a
464
      // dynamic initialization or a cleanup and we can just return the address
465
      // of the temporary.
466
3
      if (Var->hasInitializer())
467
1
        return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
468
469
2
      Var->setInitializer(CGM.EmitNullConstant(E->getType()));
470
2
    }
471
10
    LValue RefTempDst = MakeAddrLValue(Object, M->getType(),
472
10
                                       AlignmentSource::Decl);
473
474
10
    switch (getEvaluationKind(E->getType())) {
475
0
    default: llvm_unreachable("expected scalar or aggregate expression");
476
6
    case TEK_Scalar:
477
6
      EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
478
6
      break;
479
4
    case TEK_Aggregate: {
480
4
      EmitAggExpr(E, AggValueSlot::forAddr(Object,
481
4
                                           E->getType().getQualifiers(),
482
4
                                           AggValueSlot::IsDestructed,
483
4
                                           AggValueSlot::DoesNotNeedGCBarriers,
484
4
                                           AggValueSlot::IsNotAliased,
485
4
                                           AggValueSlot::DoesNotOverlap));
486
4
      break;
487
10
    }
488
10
    }
489
490
10
    pushTemporaryCleanup(*this, M, E, Object);
491
10
    return RefTempDst;
492
10
  }
493
494
14.1k
  SmallVector<const Expr *, 2> CommaLHSs;
495
14.1k
  SmallVector<SubobjectAdjustment, 2> Adjustments;
496
14.1k
  E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
497
498
14.1k
  for (const auto &Ignored : CommaLHSs)
499
0
    EmitIgnoredExpr(Ignored);
500
501
14.1k
  if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
502
3
    if (opaque->getType()->isRecordType()) {
503
3
      assert(Adjustments.empty());
504
3
      return EmitOpaqueValueLValue(opaque);
505
3
    }
506
14.1k
  }
507
508
  // Create and initialize the reference temporary.
509
14.1k
  Address Alloca = Address::invalid();
510
14.1k
  Address Object = createReferenceTemporary(*this, M, E, &Alloca);
511
14.1k
  if (auto *Var = dyn_cast<llvm::GlobalVariable>(
512
112
          Object.getPointer()->stripPointerCasts())) {
513
112
    Object = Address(llvm::ConstantExpr::getBitCast(
514
112
                         cast<llvm::Constant>(Object.getPointer()),
515
112
                         ConvertTypeForMem(E->getType())->getPointerTo()),
516
112
                     Object.getAlignment());
517
    // If the temporary is a global and has a constant initializer or is a
518
    // constant temporary that we promoted to a global, we may have already
519
    // initialized it.
520
112
    if (!Var->hasInitializer()) {
521
96
      Var->setInitializer(CGM.EmitNullConstant(E->getType()));
522
96
      EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
523
96
    }
524
14.0k
  } else {
525
14.0k
    switch (M->getStorageDuration()) {
526
210
    case SD_Automatic:
527
210
      if (auto *Size = EmitLifetimeStart(
528
22
              CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
529
22
              Alloca.getPointer())) {
530
22
        pushCleanupAfterFullExpr<CallLifetimeEnd>(NormalEHLifetimeMarker,
531
22
                                                  Alloca, Size);
532
22
      }
533
210
      break;
534
535
13.8k
    case SD_FullExpression: {
536
13.8k
      if (!ShouldEmitLifetimeMarkers)
537
13.3k
        break;
538
539
      // Avoid creating a conditional cleanup just to hold an llvm.lifetime.end
540
      // marker. Instead, start the lifetime of a conditional temporary earlier
541
      // so that it's unconditional. Don't do this with sanitizers which need
542
      // more precise lifetime marks.
543
505
      ConditionalEvaluation *OldConditional = nullptr;
544
505
      CGBuilderTy::InsertPoint OldIP;
545
505
      if (isInConditionalBranch() && 
!E->getType().isDestructedType()96
&&
546
21
          !SanOpts.has(SanitizerKind::HWAddress) &&
547
20
          !SanOpts.has(SanitizerKind::Memory) &&
548
19
          !CGM.getCodeGenOpts().SanitizeAddressUseAfterScope) {
549
18
        OldConditional = OutermostConditional;
550
18
        OutermostConditional = nullptr;
551
552
18
        OldIP = Builder.saveIP();
553
18
        llvm::BasicBlock *Block = OldConditional->getStartingBlock();
554
18
        Builder.restoreIP(CGBuilderTy::InsertPoint(
555
18
            Block, llvm::BasicBlock::iterator(Block->back())));
556
18
      }
557
558
505
      if (auto *Size = EmitLifetimeStart(
559
505
              CGM.getDataLayout().getTypeAllocSize(Alloca.getElementType()),
560
505
              Alloca.getPointer())) {
561
505
        pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, Alloca,
562
505
                                             Size);
563
505
      }
564
565
505
      if (OldConditional) {
566
18
        OutermostConditional = OldConditional;
567
18
        Builder.restoreIP(OldIP);
568
18
      }
569
505
      break;
570
505
    }
571
572
0
    default:
573
0
      break;
574
14.0k
    }
575
14.0k
    EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
576
14.0k
  }
577
14.1k
  pushTemporaryCleanup(*this, M, E, Object);
578
579
  // Perform derived-to-base casts and/or field accesses, to get from the
580
  // temporary object we created (and, potentially, for which we extended
581
  // the lifetime) to the subobject we're binding the reference to.
582
14.1k
  for (unsigned I = Adjustments.size(); I != 0; 
--I1
) {
583
1
    SubobjectAdjustment &Adjustment = Adjustments[I-1];
584
1
    switch (Adjustment.Kind) {
585
0
    case SubobjectAdjustment::DerivedToBaseAdjustment:
586
0
      Object =
587
0
          GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
588
0
                                Adjustment.DerivedToBase.BasePath->path_begin(),
589
0
                                Adjustment.DerivedToBase.BasePath->path_end(),
590
0
                                /*NullCheckValue=*/ false, E->getExprLoc());
591
0
      break;
592
593
1
    case SubobjectAdjustment::FieldAdjustment: {
594
1
      LValue LV = MakeAddrLValue(Object, E->getType(), AlignmentSource::Decl);
595
1
      LV = EmitLValueForField(LV, Adjustment.Field);
596
1
      assert(LV.isSimple() &&
597
1
             "materialized temporary field is not a simple lvalue");
598
1
      Object = LV.getAddress(*this);
599
1
      break;
600
0
    }
601
602
0
    case SubobjectAdjustment::MemberPointerAdjustment: {
603
0
      llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
604
0
      Object = EmitCXXMemberDataPointerAddress(E, Object, Ptr,
605
0
                                               Adjustment.Ptr.MPT);
606
0
      break;
607
0
    }
608
1
    }
609
1
  }
610
611
14.1k
  return MakeAddrLValue(Object, M->getType(), AlignmentSource::Decl);
612
14.1k
}
613
614
RValue
615
89.0k
CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) {
616
  // Emit the expression as an lvalue.
617
89.0k
  LValue LV = EmitLValue(E);
618
89.0k
  assert(LV.isSimple());
619
89.0k
  llvm::Value *Value = LV.getPointer(*this);
620
621
89.0k
  if (sanitizePerformTypeCheck() && 
!E->getType()->isFunctionType()55
) {
622
    // C++11 [dcl.ref]p5 (as amended by core issue 453):
623
    //   If a glvalue to which a reference is directly bound designates neither
624
    //   an existing object or function of an appropriate type nor a region of
625
    //   storage of suitable size and alignment to contain an object of the
626
    //   reference's type, the behavior is undefined.
627
55
    QualType Ty = E->getType();
628
55
    EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);
629
55
  }
630
631
89.0k
  return RValue::get(Value);
632
89.0k
}
633
634
635
/// getAccessedFieldNo - Given an encoded value and a result number, return the
636
/// input field number being accessed.
637
unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
638
381
                                             const llvm::Constant *Elts) {
639
381
  return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
640
381
      ->getZExtValue();
641
381
}
642
643
/// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h.
644
static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
645
49
                                    llvm::Value *High) {
646
49
  llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL);
647
49
  llvm::Value *K47 = Builder.getInt64(47);
648
49
  llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul);
649
49
  llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0);
650
49
  llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul);
651
49
  llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0);
652
49
  return Builder.CreateMul(B1, KMul);
653
49
}
654
655
714
bool CodeGenFunction::isNullPointerAllowed(TypeCheckKind TCK) {
656
714
  return TCK == TCK_DowncastPointer || 
TCK == TCK_Upcast704
||
657
685
         TCK == TCK_UpcastToVirtualBase || 
TCK == TCK_DynamicOperation682
;
658
714
}
659
660
418
bool CodeGenFunction::isVptrCheckRequired(TypeCheckKind TCK, QualType Ty) {
661
418
  CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
662
418
  return (RD && 
RD->hasDefinition()210
&&
RD->isDynamicClass()210
) &&
663
80
         (TCK == TCK_MemberAccess || 
TCK == TCK_MemberCall77
||
664
46
          TCK == TCK_DowncastPointer || 
TCK == TCK_DowncastReference39
||
665
35
          TCK == TCK_UpcastToVirtualBase || 
TCK == TCK_DynamicOperation32
);
666
418
}
667
668
560k
bool CodeGenFunction::sanitizePerformTypeCheck() const {
669
560k
  return SanOpts.has(SanitizerKind::Null) |
670
560k
         SanOpts.has(SanitizerKind::Alignment) |
671
560k
         SanOpts.has(SanitizerKind::ObjectSize) |
672
560k
         SanOpts.has(SanitizerKind::Vptr);
673
560k
}
674
675
void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
676
                                    llvm::Value *Ptr, QualType Ty,
677
                                    CharUnits Alignment,
678
                                    SanitizerSet SkippedChecks,
679
449k
                                    llvm::Value *ArraySize) {
680
449k
  if (!sanitizePerformTypeCheck())
681
449k
    return;
682
683
  // Don't check pointers outside the default address space. The null check
684
  // isn't correct, the object-size check isn't supported by LLVM, and we can't
685
  // communicate the addresses to the runtime handler for the vptr check.
686
717
  if (Ptr->getType()->getPointerAddressSpace())
687
2
    return;
688
689
  // Don't check pointers to volatile data. The behavior here is implementation-
690
  // defined.
691
715
  if (Ty.isVolatileQualified())
692
1
    return;
693
694
714
  SanitizerScope SanScope(this);
695
696
714
  SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks;
697
714
  llvm::BasicBlock *Done = nullptr;
698
699
  // Quickly determine whether we have a pointer to an alloca. It's possible
700
  // to skip null checks, and some alignment checks, for these pointers. This
701
  // can reduce compile-time significantly.
702
714
  auto PtrToAlloca = dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCasts());
703
704
714
  llvm::Value *True = llvm::ConstantInt::getTrue(getLLVMContext());
705
714
  llvm::Value *IsNonNull = nullptr;
706
714
  bool IsGuaranteedNonNull =
707
714
      SkippedChecks.has(SanitizerKind::Null) || 
PtrToAlloca568
;
708
714
  bool AllowNullPointers = isNullPointerAllowed(TCK);
709
714
  if ((SanOpts.has(SanitizerKind::Null) || 
AllowNullPointers376
) &&
710
353
      !IsGuaranteedNonNull) {
711
    // The glvalue must not be an empty glvalue.
712
230
    IsNonNull = Builder.CreateIsNotNull(Ptr);
713
714
    // The IR builder can constant-fold the null check if the pointer points to
715
    // a constant.
716
230
    IsGuaranteedNonNull = IsNonNull == True;
717
718
    // Skip the null check if the pointer is known to be non-null.
719
230
    if (!IsGuaranteedNonNull) {
720
224
      if (AllowNullPointers) {
721
        // When performing pointer casts, it's OK if the value is null.
722
        // Skip the remaining checks in that case.
723
17
        Done = createBasicBlock("null");
724
17
        llvm::BasicBlock *Rest = createBasicBlock("not.null");
725
17
        Builder.CreateCondBr(IsNonNull, Rest, Done);
726
17
        EmitBlock(Rest);
727
207
      } else {
728
207
        Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
729
207
      }
730
224
    }
731
230
  }
732
733
714
  if (SanOpts.has(SanitizerKind::ObjectSize) &&
734
141
      !SkippedChecks.has(SanitizerKind::ObjectSize) &&
735
105
      !Ty->isIncompleteType()) {
736
105
    uint64_t TySize = CGM.getMinimumObjectSize(Ty).getQuantity();
737
105
    llvm::Value *Size = llvm::ConstantInt::get(IntPtrTy, TySize);
738
105
    if (ArraySize)
739
5
      Size = Builder.CreateMul(Size, ArraySize);
740
741
    // Degenerate case: new X[0] does not need an objectsize check.
742
105
    llvm::Constant *ConstantSize = dyn_cast<llvm::Constant>(Size);
743
105
    if (!ConstantSize || 
!ConstantSize->isNullValue()104
) {
744
      // The glvalue must refer to a large enough storage region.
745
      // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
746
      //        to check this.
747
      // FIXME: Get object address space
748
103
      llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
749
103
      llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
750
103
      llvm::Value *Min = Builder.getFalse();
751
103
      llvm::Value *NullIsUnknown = Builder.getFalse();
752
103
      llvm::Value *Dynamic = Builder.getFalse();
753
103
      llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy);
754
103
      llvm::Value *LargeEnough = Builder.CreateICmpUGE(
755
103
          Builder.CreateCall(F, {CastAddr, Min, NullIsUnknown, Dynamic}), Size);
756
103
      Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
757
103
    }
758
105
  }
759
760
714
  uint64_t AlignVal = 0;
761
714
  llvm::Value *PtrAsInt = nullptr;
762
763
714
  if (SanOpts.has(SanitizerKind::Alignment) &&
764
305
      !SkippedChecks.has(SanitizerKind::Alignment)) {
765
267
    AlignVal = Alignment.getQuantity();
766
267
    if (!Ty->isIncompleteType() && !AlignVal)
767
104
      AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
768
104
                                             /*ForPointeeType=*/true)
769
104
                     .getQuantity();
770
771
    // The glvalue must be suitably aligned.
772
267
    if (AlignVal > 1 &&
773
184
        (!PtrToAlloca || 
PtrToAlloca->getAlignment() < AlignVal23
)) {
774
161
      PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
775
161
      llvm::Value *Align = Builder.CreateAnd(
776
161
          PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
777
161
      llvm::Value *Aligned =
778
161
          Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
779
161
      if (Aligned != True)
780
158
        Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment));
781
161
    }
782
267
  }
783
784
714
  if (Checks.size() > 0) {
785
    // Make sure we're not losing information. Alignment needs to be a power of
786
    // 2
787
340
    assert(!AlignVal || (uint64_t)1 << llvm::Log2_64(AlignVal) == AlignVal);
788
340
    llvm::Constant *StaticData[] = {
789
340
        EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty),
790
235
        llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2_64(AlignVal) : 
1105
),
791
340
        llvm::ConstantInt::get(Int8Ty, TCK)};
792
340
    EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
793
182
              PtrAsInt ? 
PtrAsInt158
: Ptr);
794
340
  }
795
796
  // If possible, check that the vptr indicates that there is a subobject of
797
  // type Ty at offset zero within this object.
798
  //
799
  // C++11 [basic.life]p5,6:
800
  //   [For storage which does not refer to an object within its lifetime]
801
  //   The program has undefined behavior if:
802
  //    -- the [pointer or glvalue] is used to access a non-static data member
803
  //       or call a non-static member function
804
714
  if (SanOpts.has(SanitizerKind::Vptr) &&
805
418
      !SkippedChecks.has(SanitizerKind::Vptr) && isVptrCheckRequired(TCK, Ty)) {
806
    // Ensure that the pointer is non-null before loading it. If there is no
807
    // compile-time guarantee, reuse the run-time null check or emit a new one.
808
50
    if (!IsGuaranteedNonNull) {
809
22
      if (!IsNonNull)
810
7
        IsNonNull = Builder.CreateIsNotNull(Ptr);
811
22
      if (!Done)
812
17
        Done = createBasicBlock("vptr.null");
813
22
      llvm::BasicBlock *VptrNotNull = createBasicBlock("vptr.not.null");
814
22
      Builder.CreateCondBr(IsNonNull, VptrNotNull, Done);
815
22
      EmitBlock(VptrNotNull);
816
22
    }
817
818
    // Compute a hash of the mangled name of the type.
819
    //
820
    // FIXME: This is not guaranteed to be deterministic! Move to a
821
    //        fingerprinting mechanism once LLVM provides one. For the time
822
    //        being the implementation happens to be deterministic.
823
50
    SmallString<64> MangledName;
824
50
    llvm::raw_svector_ostream Out(MangledName);
825
50
    CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
826
50
                                                     Out);
827
828
    // Blacklist based on the mangled type.
829
50
    if (!CGM.getContext().getSanitizerBlacklist().isBlacklistedType(
830
49
            SanitizerKind::Vptr, Out.str())) {
831
49
      llvm::hash_code TypeHash = hash_value(Out.str());
832
833
      // Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
834
49
      llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
835
49
      llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
836
49
      Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), getPointerAlign());
837
49
      llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
838
49
      llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
839
840
49
      llvm::Value *Hash = emitHash16Bytes(Builder, Low, High);
841
49
      Hash = Builder.CreateTrunc(Hash, IntPtrTy);
842
843
      // Look the hash up in our cache.
844
49
      const int CacheSize = 128;
845
49
      llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize);
846
49
      llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable,
847
49
                                                     "__ubsan_vptr_type_cache");
848
49
      llvm::Value *Slot = Builder.CreateAnd(Hash,
849
49
                                            llvm::ConstantInt::get(IntPtrTy,
850
49
                                                                   CacheSize-1));
851
49
      llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
852
49
      llvm::Value *CacheVal =
853
49
        Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(Cache, Indices),
854
49
                                  getPointerAlign());
855
856
      // If the hash isn't in the cache, call a runtime handler to perform the
857
      // hard work of checking whether the vptr is for an object of the right
858
      // type. This will either fill in the cache and return, or produce a
859
      // diagnostic.
860
49
      llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
861
49
      llvm::Constant *StaticData[] = {
862
49
        EmitCheckSourceLocation(Loc),
863
49
        EmitCheckTypeDescriptor(Ty),
864
49
        CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()),
865
49
        llvm::ConstantInt::get(Int8Ty, TCK)
866
49
      };
867
49
      llvm::Value *DynamicData[] = { Ptr, Hash };
868
49
      EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
869
49
                SanitizerHandler::DynamicTypeCacheMiss, StaticData,
870
49
                DynamicData);
871
49
    }
872
50
  }
873
874
714
  if (Done) {
875
34
    Builder.CreateBr(Done);
876
34
    EmitBlock(Done);
877
34
  }
878
714
}
879
880
/// Determine whether this expression refers to a flexible array member in a
881
/// struct. We disable array bounds checks for such members.
882
31
static bool isFlexibleArrayMemberExpr(const Expr *E) {
883
  // For compatibility with existing code, we treat arrays of length 0 or
884
  // 1 as flexible array members.
885
  // FIXME: This is inconsistent with the warning code in SemaChecking. Unify
886
  // the two mechanisms.
887
31
  const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe();
888
31
  if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) {
889
    // FIXME: Sema doesn't treat [1] as a flexible array member if the bound
890
    // was produced by macro expansion.
891
28
    if (CAT->getSize().ugt(1))
892
17
      return false;
893
3
  } else if (!isa<IncompleteArrayType>(AT))
894
2
    return false;
895
896
12
  E = E->IgnoreParens();
897
898
  // A flexible array member must be the last member in the class.
899
12
  if (const auto *ME = dyn_cast<MemberExpr>(E)) {
900
    // FIXME: If the base type of the member expr is not FD->getParent(),
901
    // this should not be treated as a flexible array member access.
902
5
    if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
903
      // FIXME: Sema doesn't treat a T[1] union member as a flexible array
904
      // member, only a T[0] or T[] member gets that treatment.
905
5
      if (FD->getParent()->isUnion())
906
4
        return true;
907
1
      RecordDecl::field_iterator FI(
908
1
          DeclContext::decl_iterator(const_cast<FieldDecl *>(FD)));
909
1
      return ++FI == FD->getParent()->field_end();
910
1
    }
911
7
  } else if (const auto *IRE = dyn_cast<ObjCIvarRefExpr>(E)) {
912
4
    return IRE->getDecl()->getNextIvar() == nullptr;
913
4
  }
914
915
3
  return false;
916
3
}
917
918
llvm::Value *CodeGenFunction::LoadPassedObjectSize(const Expr *E,
919
16
                                                   QualType EltTy) {
920
16
  ASTContext &C = getContext();
921
16
  uint64_t EltSize = C.getTypeSizeInChars(EltTy).getQuantity();
922
16
  if (!EltSize)
923
1
    return nullptr;
924
925
15
  auto *ArrayDeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
926
15
  if (!ArrayDeclRef)
927
7
    return nullptr;
928
929
8
  auto *ParamDecl = dyn_cast<ParmVarDecl>(ArrayDeclRef->getDecl());
930
8
  if (!ParamDecl)
931
2
    return nullptr;
932
933
6
  auto *POSAttr = ParamDecl->getAttr<PassObjectSizeAttr>();
934
6
  if (!POSAttr)
935
2
    return nullptr;
936
937
  // Don't load the size if it's a lower bound.
938
4
  int POSType = POSAttr->getType();
939
4
  if (POSType != 0 && 
POSType != 13
)
940
2
    return nullptr;
941
942
  // Find the implicit size parameter.
943
2
  auto PassedSizeIt = SizeArguments.find(ParamDecl);
944
2
  if (PassedSizeIt == SizeArguments.end())
945
0
    return nullptr;
946
947
2
  const ImplicitParamDecl *PassedSizeDecl = PassedSizeIt->second;
948
2
  assert(LocalDeclMap.count(PassedSizeDecl) && "Passed size not loadable");
949
2
  Address AddrOfSize = LocalDeclMap.find(PassedSizeDecl)->second;
950
2
  llvm::Value *SizeInBytes = EmitLoadOfScalar(AddrOfSize, /*Volatile=*/false,
951
2
                                              C.getSizeType(), E->getExprLoc());
952
2
  llvm::Value *SizeOfElement =
953
2
      llvm::ConstantInt::get(SizeInBytes->getType(), EltSize);
954
2
  return Builder.CreateUDiv(SizeInBytes, SizeOfElement);
955
2
}
956
957
/// If Base is known to point to the start of an array, return the length of
958
/// that array. Return 0 if the length cannot be determined.
959
static llvm::Value *getArrayIndexingBound(
960
40
    CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) {
961
  // For the vector indexing extension, the bound is the number of elements.
962
40
  if (const VectorType *VT = Base->getType()->getAs<VectorType>()) {
963
1
    IndexedType = Base->getType();
964
1
    return CGF.Builder.getInt32(VT->getNumElements());
965
1
  }
966
967
39
  Base = Base->IgnoreParens();
968
969
39
  if (const auto *CE = dyn_cast<CastExpr>(Base)) {
970
39
    if (CE->getCastKind() == CK_ArrayToPointerDecay &&
971
31
        !isFlexibleArrayMemberExpr(CE->getSubExpr())) {
972
24
      IndexedType = CE->getSubExpr()->getType();
973
24
      const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
974
24
      if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
975
21
        return CGF.Builder.getInt(CAT->getSize());
976
3
      else if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
977
2
        return CGF.getVLASize(VAT).NumElts;
978
      // Ignore pass_object_size here. It's not applicable on decayed pointers.
979
24
    }
980
39
  }
981
982
16
  QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
983
16
  if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
984
2
    IndexedType = Base->getType();
985
2
    return POS;
986
2
  }
987
988
14
  return nullptr;
989
14
}
990
991
void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
992
                                      llvm::Value *Index, QualType IndexType,
993
40
                                      bool Accessed) {
994
40
  assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
995
40
         "should not be called unless adding bounds checks");
996
40
  SanitizerScope SanScope(this);
997
998
40
  QualType IndexedType;
999
40
  llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType);
1000
40
  if (!Bound)
1001
14
    return;
1002
1003
26
  bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
1004
26
  llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
1005
26
  llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
1006
1007
26
  llvm::Constant *StaticData[] = {
1008
26
    EmitCheckSourceLocation(E->getExprLoc()),
1009
26
    EmitCheckTypeDescriptor(IndexedType),
1010
26
    EmitCheckTypeDescriptor(IndexType)
1011
26
  };
1012
23
  llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
1013
3
                                : Builder.CreateICmpULE(IndexVal, BoundVal);
1014
26
  EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds),
1015
26
            SanitizerHandler::OutOfBounds, StaticData, Index);
1016
26
}
1017
1018
1019
CodeGenFunction::ComplexPairTy CodeGenFunction::
1020
EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1021
8
                         bool isInc, bool isPre) {
1022
8
  ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
1023
1024
8
  llvm::Value *NextVal;
1025
8
  if (isa<llvm::IntegerType>(InVal.first->getType())) {
1026
2
    uint64_t AmountVal = isInc ? 1 : -1;
1027
4
    NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
1028
1029
    // Add the inc/dec to the real part.
1030
2
    NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1031
4
  } else {
1032
4
    QualType ElemTy = E->getType()->castAs<ComplexType>()->getElementType();
1033
4
    llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
1034
4
    if (!isInc)
1035
2
      FVal.changeSign();
1036
4
    NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
1037
1038
    // Add the inc/dec to the real part.
1039
2
    NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
1040
4
  }
1041
1042
8
  ComplexPairTy IncVal(NextVal, InVal.second);
1043
1044
  // Store the updated result through the lvalue.
1045
8
  EmitStoreOfComplex(IncVal, LV, /*init*/ false);
1046
8
  if (getLangOpts().OpenMP)
1047
0
    CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
1048
0
                                                              E->getSubExpr());
1049
1050
  // If this is a postinc, return the value read from memory, otherwise use the
1051
  // updated value.
1052
4
  return isPre ? IncVal : InVal;
1053
8
}
1054
1055
void CodeGenModule::EmitExplicitCastExprType(const ExplicitCastExpr *E,
1056
126k
                                             CodeGenFunction *CGF) {
1057
  // Bind VLAs in the cast type.
1058
126k
  if (CGF && 
E->getType()->isVariablyModifiedType()126k
)
1059
13
    CGF->EmitVariablyModifiedType(E->getType());
1060
1061
126k
  if (CGDebugInfo *DI = getModuleDebugInfo())
1062
38.6k
    DI->EmitExplicitCastType(E->getType());
1063
126k
}
1064
1065
//===----------------------------------------------------------------------===//
1066
//                         LValue Expression Emission
1067
//===----------------------------------------------------------------------===//
1068
1069
/// EmitPointerWithAlignment - Given an expression of pointer type, try to
1070
/// derive a more accurate bound on the alignment of the pointer.
1071
Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
1072
                                                  LValueBaseInfo *BaseInfo,
1073
205k
                                                  TBAAAccessInfo *TBAAInfo) {
1074
  // We allow this with ObjC object pointers because of fragile ABIs.
1075
205k
  assert(E->getType()->isPointerType() ||
1076
205k
         E->getType()->isObjCObjectPointerType());
1077
205k
  E = E->IgnoreParens();
1078
1079
  // Casts:
1080
205k
  if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
1081
132k
    if (const auto *ECE = dyn_cast<ExplicitCastExpr>(CE))
1082
2.83k
      CGM.EmitExplicitCastExprType(ECE, this);
1083
1084
132k
    switch (CE->getCastKind()) {
1085
    // Non-converting casts (but not C's implicit conversion from void*).
1086
9.45k
    case CK_BitCast:
1087
9.45k
    case CK_NoOp:
1088
9.45k
    case CK_AddressSpaceConversion:
1089
9.45k
      if (auto PtrTy = CE->getSubExpr()->getType()->getAs<PointerType>()) {
1090
9.44k
        if (PtrTy->getPointeeType()->isVoidType())
1091
735
          break;
1092
1093
8.71k
        LValueBaseInfo InnerBaseInfo;
1094
8.71k
        TBAAAccessInfo InnerTBAAInfo;
1095
8.71k
        Address Addr = EmitPointerWithAlignment(CE->getSubExpr(),
1096
8.71k
                                                &InnerBaseInfo,
1097
8.71k
                                                &InnerTBAAInfo);
1098
8.71k
        if (BaseInfo) 
*BaseInfo = InnerBaseInfo6.15k
;
1099
8.71k
        if (TBAAInfo) 
*TBAAInfo = InnerTBAAInfo6.06k
;
1100
1101
8.71k
        if (isa<ExplicitCastExpr>(CE)) {
1102
2.07k
          LValueBaseInfo TargetTypeBaseInfo;
1103
2.07k
          TBAAAccessInfo TargetTypeTBAAInfo;
1104
2.07k
          CharUnits Align = CGM.getNaturalPointeeTypeAlignment(
1105
2.07k
              E->getType(), &TargetTypeBaseInfo, &TargetTypeTBAAInfo);
1106
2.07k
          if (TBAAInfo)
1107
1.96k
            *TBAAInfo = CGM.mergeTBAAInfoForCast(*TBAAInfo,
1108
1.96k
                                                 TargetTypeTBAAInfo);
1109
          // If the source l-value is opaque, honor the alignment of the
1110
          // casted-to type.
1111
2.07k
          if (InnerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
1112
1.68k
            if (BaseInfo)
1113
1.65k
              BaseInfo->mergeForCast(TargetTypeBaseInfo);
1114
1.68k
            Addr = Address(Addr.getPointer(), Align);
1115
1.68k
          }
1116
2.07k
        }
1117
1118
8.71k
        if (SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
1119
4
            CE->getCastKind() == CK_BitCast) {
1120
2
          if (auto PT = E->getType()->getAs<PointerType>())
1121
2
            EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr.getPointer(),
1122
2
                                      /*MayBeNull=*/true,
1123
2
                                      CodeGenFunction::CFITCK_UnrelatedCast,
1124
2
                                      CE->getBeginLoc());
1125
2
        }
1126
8.71k
        return CE->getCastKind() != CK_AddressSpaceConversion
1127
8.70k
                   ? Builder.CreateBitCast(Addr, ConvertType(E->getType()))
1128
5
                   : Builder.CreateAddrSpaceCast(Addr,
1129
5
                                                 ConvertType(E->getType()));
1130
8.71k
      }
1131
4
      break;
1132
1133
    // Array-to-pointer decay.
1134
4.78k
    case CK_ArrayToPointerDecay:
1135
4.78k
      return EmitArrayToPointerDecay(CE->getSubExpr(), BaseInfo, TBAAInfo);
1136
1137
    // Derived-to-base conversions.
1138
10.4k
    case CK_UncheckedDerivedToBase:
1139
10.4k
    case CK_DerivedToBase: {
1140
      // TODO: Support accesses to members of base classes in TBAA. For now, we
1141
      // conservatively pretend that the complete object is of the base class
1142
      // type.
1143
10.4k
      if (TBAAInfo)
1144
9.19k
        *TBAAInfo = CGM.getTBAAAccessInfo(E->getType());
1145
10.4k
      Address Addr = EmitPointerWithAlignment(CE->getSubExpr(), BaseInfo);
1146
10.4k
      auto Derived = CE->getSubExpr()->getType()->getPointeeCXXRecordDecl();
1147
10.4k
      return GetAddressOfBaseClass(Addr, Derived,
1148
10.4k
                                   CE->path_begin(), CE->path_end(),
1149
10.4k
                                   ShouldNullCheckClassCastValue(CE),
1150
10.4k
                                   CE->getExprLoc());
1151
10.4k
    }
1152
1153
    // TODO: Is there any reason to treat base-to-derived conversions
1154
    // specially?
1155
107k
    default:
1156
107k
      break;
1157
181k
    }
1158
181k
  }
1159
1160
  // Unary &.
1161
181k
  if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
1162
3.29k
    if (UO->getOpcode() == UO_AddrOf) {
1163
3.19k
      LValue LV = EmitLValue(UO->getSubExpr());
1164
3.19k
      if (BaseInfo) 
*BaseInfo = LV.getBaseInfo()580
;
1165
3.19k
      if (TBAAInfo) 
*TBAAInfo = LV.getTBAAInfo()576
;
1166
3.19k
      return LV.getAddress(*this);
1167
3.19k
    }
1168
178k
  }
1169
1170
  // TODO: conditional operators, comma.
1171
1172
  // Otherwise, use the alignment of the type.
1173
178k
  CharUnits Align =
1174
178k
      CGM.getNaturalPointeeTypeAlignment(E->getType(), BaseInfo, TBAAInfo);
1175
178k
  return Address(EmitScalarExpr(E), Align);
1176
178k
}
1177
1178
125k
RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
1179
125k
  if (Ty->isVoidType())
1180
124k
    return RValue::get(nullptr);
1181
1182
403
  switch (getEvaluationKind(Ty)) {
1183
0
  case TEK_Complex: {
1184
0
    llvm::Type *EltTy =
1185
0
      ConvertType(Ty->castAs<ComplexType>()->getElementType());
1186
0
    llvm::Value *U = llvm::UndefValue::get(EltTy);
1187
0
    return RValue::getComplex(std::make_pair(U, U));
1188
0
  }
1189
1190
  // If this is a use of an undefined aggregate type, the aggregate must have an
1191
  // identifiable address.  Just because the contents of the value are undefined
1192
  // doesn't mean that the address can't be taken and compared.
1193
401
  case TEK_Aggregate: {
1194
401
    Address DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
1195
401
    return RValue::getAggregate(DestPtr);
1196
0
  }
1197
1198
2
  case TEK_Scalar:
1199
2
    return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
1200
0
  }
1201
0
  llvm_unreachable("bad evaluation kind");
1202
0
}
1203
1204
RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
1205
0
                                              const char *Name) {
1206
0
  ErrorUnsupported(E, Name);
1207
0
  return GetUndefRValue(E->getType());
1208
0
}
1209
1210
LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
1211
0
                                              const char *Name) {
1212
0
  ErrorUnsupported(E, Name);
1213
0
  llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
1214
0
  return MakeAddrLValue(Address(llvm::UndefValue::get(Ty), CharUnits::One()),
1215
0
                        E->getType());
1216
0
}
1217
1218
242k
bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
1219
242k
  const Expr *Base = Obj;
1220
368k
  while (!isa<CXXThisExpr>(Base)) {
1221
    // The result of a dynamic_cast can be null.
1222
285k
    if (isa<CXXDynamicCastExpr>(Base))
1223
3
      return false;
1224
1225
285k
    if (const auto *CE = dyn_cast<CastExpr>(Base)) {
1226
125k
      Base = CE->getSubExpr();
1227
160k
    } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
1228
852
      Base = PE->getSubExpr();
1229
159k
    } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
1230
4.58k
      if (UO->getOpcode() == UO_Extension)
1231
6
        Base = UO->getSubExpr();
1232
4.57k
      else
1233
4.57k
        return false;
1234
155k
    } else {
1235
155k
      return false;
1236
155k
    }
1237
285k
  }
1238
82.9k
  return true;
1239
242k
}
1240
1241
998k
LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
1242
998k
  LValue LV;
1243
998k
  if (SanOpts.has(SanitizerKind::ArrayBounds) && 
isa<ArraySubscriptExpr>(E)207
)
1244
34
    LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
1245
998k
  else
1246
998k
    LV = EmitLValue(E);
1247
998k
  if (!isa<DeclRefExpr>(E) && 
!LV.isBitField()172k
&&
LV.isSimple()171k
) {
1248
170k
    SanitizerSet SkippedChecks;
1249
170k
    if (const auto *ME = dyn_cast<MemberExpr>(E)) {
1250
91.4k
      bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
1251
91.4k
      if (IsBaseCXXThis)
1252
19.1k
        SkippedChecks.set(SanitizerKind::Alignment, true);
1253
91.4k
      if (IsBaseCXXThis || 
isa<DeclRefExpr>(ME->getBase())72.2k
)
1254
27.7k
        SkippedChecks.set(SanitizerKind::Null, true);
1255
91.4k
    }
1256
170k
    EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(*this), E->getType(),
1257
170k
                  LV.getAlignment(), SkippedChecks);
1258
170k
  }
1259
998k
  return LV;
1260
998k
}
1261
1262
/// EmitLValue - Emit code to compute a designator that specifies the location
1263
/// of the expression.
1264
///
1265
/// This can return one of two things: a simple address or a bitfield reference.
1266
/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
1267
/// an LLVM pointer type.
1268
///
1269
/// If this returns a bitfield reference, nothing about the pointee type of the
1270
/// LLVM value is known: For example, it may not be a pointer to an integer.
1271
///
1272
/// If this returns a normal address, and if the lvalue's C type is fixed size,
1273
/// this method guarantees that the returned pointer type will point to an LLVM
1274
/// type of the same size of the lvalue's type.  If the lvalue has a variable
1275
/// length type, this is not possible.
1276
///
1277
1.63M
LValue CodeGenFunction::EmitLValue(const Expr *E) {
1278
1.63M
  ApplyDebugLocation DL(*this, E);
1279
1.63M
  switch (E->getStmtClass()) {
1280
0
  default: return EmitUnsupportedLValue(E, "l-value expression");
1281
1282
0
  case Expr::ObjCPropertyRefExprClass:
1283
0
    llvm_unreachable("cannot emit a property reference directly");
1284
1285
1
  case Expr::ObjCSelectorExprClass:
1286
1
    return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
1287
6
  case Expr::ObjCIsaExprClass:
1288
6
    return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
1289
113k
  case Expr::BinaryOperatorClass:
1290
113k
    return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
1291
19.2k
  case Expr::CompoundAssignOperatorClass: {
1292
19.2k
    QualType Ty = E->getType();
1293
19.2k
    if (const AtomicType *AT = Ty->getAs<AtomicType>())
1294
0
      Ty = AT->getValueType();
1295
19.2k
    if (!Ty->isAnyComplexType())
1296
19.1k
      return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1297
56
    return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
1298
56
  }
1299
44.1k
  case Expr::CallExprClass:
1300
44.1k
  case Expr::CXXMemberCallExprClass:
1301
44.1k
  case Expr::CXXOperatorCallExprClass:
1302
44.1k
  case Expr::UserDefinedLiteralClass:
1303
44.1k
    return EmitCallExprLValue(cast<CallExpr>(E));
1304
0
  case Expr::CXXRewrittenBinaryOperatorClass:
1305
0
    return EmitLValue(cast<CXXRewrittenBinaryOperator>(E)->getSemanticForm());
1306
11
  case Expr::VAArgExprClass:
1307
11
    return EmitVAArgExprLValue(cast<VAArgExpr>(E));
1308
1.12M
  case Expr::DeclRefExprClass:
1309
1.12M
    return EmitDeclRefLValue(cast<DeclRefExpr>(E));
1310
4
  case Expr::ConstantExprClass: {
1311
4
    const ConstantExpr *CE = cast<ConstantExpr>(E);
1312
4
    if (llvm::Value *Result = ConstantEmitter(*this).tryEmitConstantExpr(CE)) {
1313
4
      QualType RetType = cast<CallExpr>(CE->getSubExpr()->IgnoreImplicit())
1314
4
                             ->getCallReturnType(getContext());
1315
4
      return MakeNaturalAlignAddrLValue(Result, RetType);
1316
4
    }
1317
0
    return EmitLValue(cast<ConstantExpr>(E)->getSubExpr());
1318
0
  }
1319
2.06k
  case Expr::ParenExprClass:
1320
2.06k
    return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
1321
0
  case Expr::GenericSelectionExprClass:
1322
0
    return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
1323
522
  case Expr::PredefinedExprClass:
1324
522
    return EmitPredefinedLValue(cast<PredefinedExpr>(E));
1325
48.1k
  case Expr::StringLiteralClass:
1326
48.1k
    return EmitStringLiteralLValue(cast<StringLiteral>(E));
1327
15
  case Expr::ObjCEncodeExprClass:
1328
15
    return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
1329
15
  case Expr::PseudoObjectExprClass:
1330
15
    return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
1331
6
  case Expr::InitListExprClass:
1332
6
    return EmitInitListLValue(cast<InitListExpr>(E));
1333
1
  case Expr::CXXTemporaryObjectExprClass:
1334
1
  case Expr::CXXConstructExprClass:
1335
1
    return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
1336
3
  case Expr::CXXBindTemporaryExprClass:
1337
3
    return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
1338
23
  case Expr::CXXUuidofExprClass:
1339
23
    return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E));
1340
4
  case Expr::LambdaExprClass:
1341
4
    return EmitAggExprToLValue(E);
1342
1343
1.08k
  case Expr::ExprWithCleanupsClass: {
1344
1.08k
    const auto *cleanups = cast<ExprWithCleanups>(E);
1345
1.08k
    RunCleanupsScope Scope(*this);
1346
1.08k
    LValue LV = EmitLValue(cleanups->getSubExpr());
1347
1.08k
    if (LV.isSimple()) {
1348
      // Defend against branches out of gnu statement expressions surrounded by
1349
      // cleanups.
1350
1.08k
      llvm::Value *V = LV.getPointer(*this);
1351
1.08k
      Scope.ForceCleanup({&V});
1352
1.08k
      return LValue::MakeAddr(Address(V, LV.getAlignment()), LV.getType(),
1353
1.08k
                              getContext(), LV.getBaseInfo(), LV.getTBAAInfo());
1354
1.08k
    }
1355
    // FIXME: Is it possible to create an ExprWithCleanups that produces a
1356
    // bitfield lvalue or some other non-simple lvalue?
1357
0
    return LV;
1358
0
  }
1359
1360
85
  case Expr::CXXDefaultArgExprClass: {
1361
85
    auto *DAE = cast<CXXDefaultArgExpr>(E);
1362
85
    CXXDefaultArgExprScope Scope(*this, DAE);
1363
85
    return EmitLValue(DAE->getExpr());
1364
0
  }
1365
14
  case Expr::CXXDefaultInitExprClass: {
1366
14
    auto *DIE = cast<CXXDefaultInitExpr>(E);
1367
14
    CXXDefaultInitExprScope Scope(*this, DIE);
1368
14
    return EmitLValue(DIE->getExpr());
1369
0
  }
1370
344
  case Expr::CXXTypeidExprClass:
1371
344
    return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
1372
1373
20
  case Expr::ObjCMessageExprClass:
1374
20
    return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
1375
1.53k
  case Expr::ObjCIvarRefExprClass:
1376
1.53k
    return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
1377
1
  case Expr::StmtExprClass:
1378
1
    return EmitStmtExprLValue(cast<StmtExpr>(E));
1379
48.4k
  case Expr::UnaryOperatorClass:
1380
48.4k
    return EmitUnaryOpLValue(cast<UnaryOperator>(E));
1381
44.7k
  case Expr::ArraySubscriptExprClass:
1382
44.7k
    return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
1383
17
  case Expr::MatrixSubscriptExprClass:
1384
17
    return EmitMatrixSubscriptExpr(cast<MatrixSubscriptExpr>(E));
1385
1.51k
  case Expr::OMPArraySectionExprClass:
1386
1.51k
    return EmitOMPArraySectionExpr(cast<OMPArraySectionExpr>(E));
1387
282
  case Expr::ExtVectorElementExprClass:
1388
282
    return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
1389
124k
  case Expr::MemberExprClass:
1390
124k
    return EmitMemberExpr(cast<MemberExpr>(E));
1391
1.83k
  case Expr::CompoundLiteralExprClass:
1392
1.83k
    return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
1393
461
  case Expr::ConditionalOperatorClass:
1394
461
    return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
1395
6
  case Expr::BinaryConditionalOperatorClass:
1396
6
    return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
1397
2
  case Expr::ChooseExprClass:
1398
2
    return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr());
1399
567
  case Expr::OpaqueValueExprClass:
1400
567
    return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
1401
6
  case Expr::SubstNonTypeTemplateParmExprClass:
1402
6
    return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
1403
39.0k
  case Expr::ImplicitCastExprClass:
1404
39.0k
  case Expr::CStyleCastExprClass:
1405
39.0k
  case Expr::CXXFunctionalCastExprClass:
1406
39.0k
  case Expr::CXXStaticCastExprClass:
1407
39.0k
  case Expr::CXXDynamicCastExprClass:
1408
39.0k
  case Expr::CXXReinterpretCastExprClass:
1409
39.0k
  case Expr::CXXConstCastExprClass:
1410
39.0k
  case Expr::CXXAddrspaceCastExprClass:
1411
39.0k
  case Expr::ObjCBridgedCastExprClass:
1412
39.0k
    return EmitCastLValue(cast<CastExpr>(E));
1413
1414
14.1k
  case Expr::MaterializeTemporaryExprClass:
1415
14.1k
    return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
1416
1417
2
  case Expr::CoawaitExprClass:
1418
2
    return EmitCoawaitLValue(cast<CoawaitExpr>(E));
1419
1
  case Expr::CoyieldExprClass:
1420
1
    return EmitCoyieldLValue(cast<CoyieldExpr>(E));
1421
1.63M
  }
1422
1.63M
}
1423
1424
/// Given an object of the given canonical type, can we safely copy a
1425
/// value out of it based on its initializer?
1426
407k
static bool isConstantEmittableObjectType(QualType type) {
1427
407k
  assert(type.isCanonical());
1428
407k
  assert(!type->isReferenceType());
1429
1430
  // Must be const-qualified but non-volatile.
1431
407k
  Qualifiers qs = type.getLocalQualifiers();
1432
407k
  if (!qs.hasConst() || 
qs.hasVolatile()24.1k
)
return false383k
;
1433
1434
  // Otherwise, all object types satisfy this except C++ classes with
1435
  // mutable subobjects or non-trivial copy/destroy behavior.
1436
24.1k
  if (const auto *RT = dyn_cast<RecordType>(type))
1437
0
    if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1438
0
      if (RD->hasMutableFields() || !RD->isTrivial())
1439
0
        return false;
1440
1441
24.1k
  return true;
1442
24.1k
}
1443
1444
/// Can we constant-emit a load of a reference to a variable of the
1445
/// given type?  This is different from predicates like
1446
/// Decl::mightBeUsableInConstantExpressions because we do want it to apply
1447
/// in situations that don't necessarily satisfy the language's rules
1448
/// for this (e.g. C++'s ODR-use rules).  For example, we want to able
1449
/// to do this with const float variables even if those variables
1450
/// aren't marked 'constexpr'.
1451
enum ConstantEmissionKind {
1452
  CEK_None,
1453
  CEK_AsReferenceOnly,
1454
  CEK_AsValueOrReference,
1455
  CEK_AsValueOnly
1456
};
1457
407k
static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
1458
407k
  type = type.getCanonicalType();
1459
407k
  if (const auto *ref = dyn_cast<ReferenceType>(type)) {
1460
2.09k
    if (isConstantEmittableObjectType(ref->getPointeeType()))
1461
9
      return CEK_AsValueOrReference;
1462
2.08k
    return CEK_AsReferenceOnly;
1463
2.08k
  }
1464
405k
  if (isConstantEmittableObjectType(type))
1465
24.1k
    return CEK_AsValueOnly;
1466
381k
  return CEK_None;
1467
381k
}
1468
1469
/// Try to emit a reference to the given value without producing it as
1470
/// an l-value.  This is just an optimization, but it avoids us needing
1471
/// to emit global copies of variables if they're named without triggering
1472
/// a formal use in a context where we can't emit a direct reference to them,
1473
/// for instance if a block or lambda or a member of a local class uses a
1474
/// const int variable or constexpr variable from an enclosing function.
1475
CodeGenFunction::ConstantEmission
1476
693k
CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
1477
693k
  ValueDecl *value = refExpr->getDecl();
1478
1479
  // The value needs to be an enum constant or a constant variable.
1480
693k
  ConstantEmissionKind CEK;
1481
693k
  if (isa<ParmVarDecl>(value)) {
1482
284k
    CEK = CEK_None;
1483
409k
  } else if (auto *var = dyn_cast<VarDecl>(value)) {
1484
407k
    CEK = checkVarTypeForConstantEmission(var->getType());
1485
2.01k
  } else if (isa<EnumConstantDecl>(value)) {
1486
2.01k
    CEK = CEK_AsValueOnly;
1487
7
  } else {
1488
7
    CEK = CEK_None;
1489
7
  }
1490
693k
  if (CEK == CEK_None) 
return ConstantEmission()665k
;
1491
1492
28.2k
  Expr::EvalResult result;
1493
28.2k
  bool resultIsReference;
1494
28.2k
  QualType resultType;
1495
1496
  // It's best to evaluate all the way as an r-value if that's permitted.
1497
28.2k
  if (CEK != CEK_AsReferenceOnly &&
1498
26.1k
      refExpr->EvaluateAsRValue(result, getContext())) {
1499
7.54k
    resultIsReference = false;
1500
7.54k
    resultType = refExpr->getType();
1501
1502
  // Otherwise, try to evaluate as an l-value.
1503
20.7k
  } else if (CEK != CEK_AsValueOnly &&
1504
2.09k
             refExpr->EvaluateAsLValue(result, getContext())) {
1505
50
    resultIsReference = true;
1506
50
    resultType = value->getType();
1507
1508
  // Failure.
1509
20.6k
  } else {
1510
20.6k
    return ConstantEmission();
1511
20.6k
  }
1512
1513
  // In any case, if the initializer has side-effects, abandon ship.
1514
7.59k
  if (result.HasSideEffects)
1515
0
    return ConstantEmission();
1516
1517
  // Emit as a constant.
1518
7.59k
  auto C = ConstantEmitter(*this).emitAbstract(refExpr->getLocation(),
1519
7.59k
                                               result.Val, resultType);
1520
1521
  // Make sure we emit a debug reference to the global variable.
1522
  // This should probably fire even for
1523
7.59k
  if (isa<VarDecl>(value)) {
1524
5.58k
    if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
1525
5.45k
      EmitDeclRefExprDbgValue(refExpr, result.Val);
1526
2.01k
  } else {
1527
2.01k
    assert(isa<EnumConstantDecl>(value));
1528
2.01k
    EmitDeclRefExprDbgValue(refExpr, result.Val);
1529
2.01k
  }
1530
1531
  // If we emitted a reference constant, we need to dereference that.
1532
7.59k
  if (resultIsReference)
1533
50
    return ConstantEmission::forReference(C);
1534
1535
7.54k
  return ConstantEmission::forValue(C);
1536
7.54k
}
1537
1538
static DeclRefExpr *tryToConvertMemberExprToDeclRefExpr(CodeGenFunction &CGF,
1539
195k
                                                        const MemberExpr *ME) {
1540
195k
  if (auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
1541
    // Try to emit static variable member expressions as DREs.
1542
51
    return DeclRefExpr::Create(
1543
51
        CGF.getContext(), NestedNameSpecifierLoc(), SourceLocation(), VD,
1544
51
        /*RefersToEnclosingVariableOrCapture=*/false, ME->getExprLoc(),
1545
51
        ME->getType(), ME->getValueKind(), nullptr, nullptr, ME->isNonOdrUse());
1546
51
  }
1547
195k
  return nullptr;
1548
195k
}
1549
1550
CodeGenFunction::ConstantEmission
1551
70.3k
CodeGenFunction::tryEmitAsConstant(const MemberExpr *ME) {
1552
70.3k
  if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, ME))
1553
23
    return tryEmitAsConstant(DRE);
1554
70.2k
  return ConstantEmission();
1555
70.2k
}
1556
1557
llvm::Value *CodeGenFunction::emitScalarConstant(
1558
7.57k
    const CodeGenFunction::ConstantEmission &Constant, Expr *E) {
1559
7.57k
  assert(Constant && "not a constant");
1560
7.57k
  if (Constant.isReference())
1561
47
    return EmitLoadOfLValue(Constant.getReferenceLValue(*this, E),
1562
47
                            E->getExprLoc())
1563
47
        .getScalarVal();
1564
7.53k
  return Constant.getValue();
1565
7.53k
}
1566
1567
llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
1568
904k
                                               SourceLocation Loc) {
1569
904k
  return EmitLoadOfScalar(lvalue.getAddress(*this), lvalue.isVolatile(),
1570
904k
                          lvalue.getType(), Loc, lvalue.getBaseInfo(),
1571
904k
                          lvalue.getTBAAInfo(), lvalue.isNontemporal());
1572
904k
}
1573
1574
1.84M
static bool hasBooleanRepresentation(QualType Ty) {
1575
1.84M
  if (Ty->isBooleanType())
1576
9.92k
    return true;
1577
1578
1.83M
  if (const EnumType *ET = Ty->getAs<EnumType>())
1579
2.02k
    return ET->getDecl()->getIntegerType()->isBooleanType();
1580
1581
1.83M
  if (const AtomicType *AT = Ty->getAs<AtomicType>())
1582
105
    return hasBooleanRepresentation(AT->getValueType());
1583
1584
1.83M
  return false;
1585
1.83M
}
1586
1587
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
1588
                            llvm::APInt &Min, llvm::APInt &End,
1589
80.5k
                            bool StrictEnums, bool IsBool) {
1590
80.5k
  const EnumType *ET = Ty->getAs<EnumType>();
1591
80.5k
  bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && 
StrictEnums4.92k
&&
1592
50
                                ET && 
!ET->getDecl()->isFixed()23
;
1593
80.5k
  if (!IsBool && 
!IsRegularCPlusPlusEnum80.3k
)
1594
80.2k
    return false;
1595
1596
273
  if (IsBool) {
1597
252
    Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0);
1598
252
    End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2);
1599
21
  } else {
1600
21
    const EnumDecl *ED = ET->getDecl();
1601
21
    llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType());
1602
21
    unsigned Bitwidth = LTy->getScalarSizeInBits();
1603
21
    unsigned NumNegativeBits = ED->getNumNegativeBits();
1604
21
    unsigned NumPositiveBits = ED->getNumPositiveBits();
1605
1606
21
    if (NumNegativeBits) {
1607
9
      unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
1608
9
      assert(NumBits <= Bitwidth);
1609
9
      End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
1610
9
      Min = -End;
1611
12
    } else {
1612
12
      assert(NumPositiveBits <= Bitwidth);
1613
12
      End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
1614
12
      Min = llvm::APInt(Bitwidth, 0);
1615
12
    }
1616
21
  }
1617
273
  return true;
1618
273
}
1619
1620
80.5k
llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
1621
80.5k
  llvm::APInt Min, End;
1622
80.5k
  if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
1623
80.5k
                       hasBooleanRepresentation(Ty)))
1624
80.2k
    return nullptr;
1625
1626
250
  llvm::MDBuilder MDHelper(getLLVMContext());
1627
250
  return MDHelper.createRange(Min, End);
1628
250
}
1629
1630
bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
1631
909k
                                           SourceLocation Loc) {
1632
909k
  bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
1633
909k
  bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
1634
909k
  if (!HasBoolCheck && 
!HasEnumCheck909k
)
1635
909k
    return false;
1636
1637
230
  bool IsBool = hasBooleanRepresentation(Ty) ||
1638
221
                NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
1639
230
  bool NeedsBoolCheck = HasBoolCheck && 
IsBool184
;
1640
230
  bool NeedsEnumCheck = HasEnumCheck && 
Ty->getAs<EnumType>()125
;
1641
230
  if (!NeedsBoolCheck && 
!NeedsEnumCheck211
)
1642
204
    return false;
1643
1644
  // Single-bit booleans don't need to be checked. Special-case this to avoid
1645
  // a bit width mismatch when handling bitfield values. This is handled by
1646
  // EmitFromMemory for the non-bitfield case.
1647
26
  if (IsBool &&
1648
19
      cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
1649
3
    return false;
1650
1651
23
  llvm::APInt Min, End;
1652
23
  if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
1653
0
    return true;
1654
1655
23
  auto &Ctx = getLLVMContext();
1656
23
  SanitizerScope SanScope(this);
1657
23
  llvm::Value *Check;
1658
23
  --End;
1659
23
  if (!Min) {
1660
21
    Check = Builder.CreateICmpULE(Value, llvm::ConstantInt::get(Ctx, End));
1661
2
  } else {
1662
2
    llvm::Value *Upper =
1663
2
        Builder.CreateICmpSLE(Value, llvm::ConstantInt::get(Ctx, End));
1664
2
    llvm::Value *Lower =
1665
2
        Builder.CreateICmpSGE(Value, llvm::ConstantInt::get(Ctx, Min));
1666
2
    Check = Builder.CreateAnd(Upper, Lower);
1667
2
  }
1668
23
  llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
1669
23
                                  EmitCheckTypeDescriptor(Ty)};
1670
23
  SanitizerMask Kind =
1671
16
      NeedsEnumCheck ? 
SanitizerKind::Enum7
: SanitizerKind::Bool;
1672
23
  EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
1673
23
            StaticArgs, EmitCheckValue(Value));
1674
23
  return true;
1675
23
}
1676
1677
llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
1678
                                               QualType Ty,
1679
                                               SourceLocation Loc,
1680
                                               LValueBaseInfo BaseInfo,
1681
                                               TBAAAccessInfo TBAAInfo,
1682
909k
                                               bool isNontemporal) {
1683
909k
  if (!CGM.getCodeGenOpts().PreserveVec3Type) {
1684
    // For better performance, handle vector loads differently.
1685
909k
    if (Ty->isVectorType()) {
1686
128k
      const llvm::Type *EltTy = Addr.getElementType();
1687
1688
128k
      const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
1689
1690
      // Handle vectors of size 3 like size 4 for better performance.
1691
128k
      if (VTy->getNumElements() == 3) {
1692
1693
        // Bitcast to vec4 type.
1694
51
        auto *vec4Ty = llvm::FixedVectorType::get(VTy->getElementType(), 4);
1695
51
        Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
1696
        // Now load value.
1697
51
        llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
1698
1699
        // Shuffle vector to get vec3.
1700
51
        V = Builder.CreateShuffleVector(V, llvm::UndefValue::get(vec4Ty),
1701
51
                                        ArrayRef<int>{0, 1, 2}, "extractVec");
1702
51
        return EmitFromMemory(V, Ty);
1703
51
      }
1704
909k
    }
1705
909k
  }
1706
1707
  // Atomic operations have to be done on integral types.
1708
909k
  LValue AtomicLValue =
1709
909k
      LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
1710
909k
  if (Ty->isAtomicType() || 
LValueIsSuitableForInlineAtomic(AtomicLValue)909k
) {
1711
54
    return EmitAtomicLoad(AtomicLValue, Loc).getScalarVal();
1712
54
  }
1713
1714
909k
  llvm::LoadInst *Load = Builder.CreateLoad(Addr, Volatile);
1715
909k
  if (isNontemporal) {
1716
167
    llvm::MDNode *Node = llvm::MDNode::get(
1717
167
        Load->getContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1718
167
    Load->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
1719
167
  }
1720
1721
909k
  CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
1722
1723
909k
  if (EmitScalarRangeCheck(Load, Ty, Loc)) {
1724
    // In order to prevent the optimizer from throwing away the check, don't
1725
    // attach range metadata to the load.
1726
909k
  } else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
1727
80.5k
    if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
1728
247
      Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
1729
1730
909k
  return EmitFromMemory(Load, Ty);
1731
909k
}
1732
1733
848k
llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
1734
  // Bool has a different representation in memory than in registers.
1735
848k
  if (hasBooleanRepresentation(Ty)) {
1736
    // This should really always be an i1, but sometimes it's already
1737
    // an i8, and it's awkward to track those cases down.
1738
4.73k
    if (Value->getType()->isIntegerTy(1))
1739
4.70k
      return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool");
1740
30
    assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
1741
30
           "wrong value rep of bool");
1742
30
  }
1743
1744
844k
  return Value;
1745
848k
}
1746
1747
918k
llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
1748
  // Bool has a different representation in memory than in registers.
1749
918k
  if (hasBooleanRepresentation(Ty)) {
1750
4.93k
    assert(Value->getType()->isIntegerTy(getContext().getTypeSize(Ty)) &&
1751
4.93k
           "wrong value rep of bool");
1752
4.93k
    return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
1753
4.93k
  }
1754
1755
913k
  return Value;
1756
913k
}
1757
1758
// Convert the pointer of \p Addr to a pointer to a vector (the value type of
1759
// MatrixType), if it points to a array (the memory type of MatrixType).
1760
static Address MaybeConvertMatrixAddress(Address Addr, CodeGenFunction &CGF,
1761
367
                                         bool IsVector = true) {
1762
367
  auto *ArrayTy = dyn_cast<llvm::ArrayType>(
1763
367
      cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
1764
367
  if (ArrayTy && 
IsVector155
) {
1765
155
    auto *VectorTy = llvm::FixedVectorType::get(ArrayTy->getElementType(),
1766
155
                                                ArrayTy->getNumElements());
1767
1768
155
    return Address(CGF.Builder.CreateElementBitCast(Addr, VectorTy));
1769
155
  }
1770
212
  auto *VectorTy = dyn_cast<llvm::VectorType>(
1771
212
      cast<llvm::PointerType>(Addr.getPointer()->getType())->getElementType());
1772
212
  if (VectorTy && !IsVector) {
1773
0
    auto *ArrayTy = llvm::ArrayType::get(
1774
0
        VectorTy->getElementType(),
1775
0
        cast<llvm::FixedVectorType>(VectorTy)->getNumElements());
1776
1777
0
    return Address(CGF.Builder.CreateElementBitCast(Addr, ArrayTy));
1778
0
  }
1779
1780
212
  return Addr;
1781
212
}
1782
1783
// Emit a store of a matrix LValue. This may require casting the original
1784
// pointer to memory address (ArrayType) to a pointer to the value type
1785
// (VectorType).
1786
static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
1787
197
                                    bool isInit, CodeGenFunction &CGF) {
1788
197
  Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(CGF), CGF,
1789
197
                                           value->getType()->isVectorTy());
1790
197
  CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
1791
197
                        lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
1792
197
                        lvalue.isNontemporal());
1793
197
}
1794
1795
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
1796
                                        bool Volatile, QualType Ty,
1797
                                        LValueBaseInfo BaseInfo,
1798
                                        TBAAAccessInfo TBAAInfo,
1799
847k
                                        bool isInit, bool isNontemporal) {
1800
847k
  if (!CGM.getCodeGenOpts().PreserveVec3Type) {
1801
    // Handle vectors differently to get better performance.
1802
847k
    if (Ty->isVectorType()) {
1803
107k
      llvm::Type *SrcTy = Value->getType();
1804
107k
      auto *VecTy = dyn_cast<llvm::VectorType>(SrcTy);
1805
      // Handle vec3 special.
1806
107k
      if (VecTy && 
cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3107k
) {
1807
        // Our source is a vec3, do a shuffle vector to make it a vec4.
1808
166
        Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy),
1809
166
                                            ArrayRef<int>{0, 1, 2, -1},
1810
166
                                            "extractVec");
1811
166
        SrcTy = llvm::FixedVectorType::get(VecTy->getElementType(), 4);
1812
166
      }
1813
107k
      if (Addr.getElementType() != SrcTy) {
1814
168
        Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp");
1815
168
      }
1816
107k
    }
1817
847k
  }
1818
1819
847k
  Value = EmitToMemory(Value, Ty);
1820
1821
847k
  LValue AtomicLValue =
1822
847k
      LValue::MakeAddr(Addr, Ty, getContext(), BaseInfo, TBAAInfo);
1823
847k
  if (Ty->isAtomicType() ||
1824
847k
      (!isInit && 
LValueIsSuitableForInlineAtomic(AtomicLValue)226k
)) {
1825
65
    EmitAtomicStore(RValue::get(Value), AtomicLValue, isInit);
1826
65
    return;
1827
65
  }
1828
1829
847k
  llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
1830
847k
  if (isNontemporal) {
1831
227
    llvm::MDNode *Node =
1832
227
        llvm::MDNode::get(Store->getContext(),
1833
227
                          llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
1834
227
    Store->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
1835
227
  }
1836
1837
847k
  CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
1838
847k
}
1839
1840
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
1841
847k
                                        bool isInit) {
1842
847k
  if (lvalue.getType()->isConstantMatrixType()) {
1843
197
    EmitStoreOfMatrixScalar(value, lvalue, isInit, *this);
1844
197
    return;
1845
197
  }
1846
1847
846k
  EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(),
1848
846k
                    lvalue.getType(), lvalue.getBaseInfo(),
1849
846k
                    lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
1850
846k
}
1851
1852
// Emit a load of a LValue of matrix type. This may require casting the pointer
1853
// to memory address (ArrayType) to a pointer to the value type (VectorType).
1854
static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc,
1855
153
                                     CodeGenFunction &CGF) {
1856
153
  assert(LV.getType()->isConstantMatrixType());
1857
153
  Address Addr = MaybeConvertMatrixAddress(LV.getAddress(CGF), CGF);
1858
153
  LV.setAddress(Addr);
1859
153
  return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
1860
153
}
1861
1862
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
1863
/// method emits the address of the lvalue, then loads the result as an rvalue,
1864
/// returning the rvalue.
1865
836k
RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
1866
836k
  if (LV.isObjCWeak()) {
1867
    // load of a __weak object.
1868
39
    Address AddrWeakObj = LV.getAddress(*this);
1869
39
    return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
1870
39
                                                             AddrWeakObj));
1871
39
  }
1872
836k
  if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
1873
    // In MRC mode, we do a load+autorelease.
1874
150
    if (!getLangOpts().ObjCAutoRefCount) {
1875
15
      return RValue::get(EmitARCLoadWeak(LV.getAddress(*this)));
1876
15
    }
1877
1878
    // In ARC mode, we load retained and then consume the value.
1879
135
    llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress(*this));
1880
135
    Object = EmitObjCConsumeObject(LV.getType(), Object);
1881
135
    return RValue::get(Object);
1882
135
  }
1883
1884
836k
  if (LV.isSimple()) {
1885
835k
    assert(!LV.getType()->isFunctionType());
1886
1887
835k
    if (LV.getType()->isConstantMatrixType())
1888
153
      return EmitLoadOfMatrixLValue(LV, Loc, *this);
1889
1890
    // Everything needs a load.
1891
835k
    return RValue::get(EmitLoadOfScalar(LV, Loc));
1892
835k
  }
1893
1894
1.07k
  if (LV.isVectorElt()) {
1895
49
    llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddress(),
1896
49
                                              LV.isVolatileQualified());
1897
49
    return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
1898
49
                                                    "vecext"));
1899
49
  }
1900
1901
  // If this is a reference to a subset of the elements of a vector, either
1902
  // shuffle the input or extract/insert them as appropriate.
1903
1.02k
  if (LV.isExtVectorElt()) {
1904
242
    return EmitLoadOfExtVectorElementLValue(LV);
1905
242
  }
1906
1907
  // Global Register variables always invoke intrinsics
1908
784
  if (LV.isGlobalReg())
1909
24
    return EmitLoadOfGlobalRegLValue(LV);
1910
1911
760
  if (LV.isMatrixElt()) {
1912
2
    llvm::LoadInst *Load =
1913
2
        Builder.CreateLoad(LV.getMatrixAddress(), LV.isVolatileQualified());
1914
2
    return RValue::get(
1915
2
        Builder.CreateExtractElement(Load, LV.getMatrixIdx(), "matrixext"));
1916
2
  }
1917
1918
758
  assert(LV.isBitField() && "Unknown LValue type!");
1919
758
  return EmitLoadOfBitfieldLValue(LV, Loc);
1920
758
}
1921
1922
RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
1923
778
                                                 SourceLocation Loc) {
1924
778
  const CGBitFieldInfo &Info = LV.getBitFieldInfo();
1925
1926
  // Get the output type.
1927
778
  llvm::Type *ResLTy = ConvertType(LV.getType());
1928
1929
778
  Address Ptr = LV.getBitFieldAddress();
1930
778
  llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
1931
1932
778
  if (Info.IsSigned) {
1933
460
    assert(static_cast<unsigned>(Info.Offset + Info.Size) <= Info.StorageSize);
1934
460
    unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size;
1935
460
    if (HighBits)
1936
323
      Val = Builder.CreateShl(Val, HighBits, "bf.shl");
1937
460
    if (Info.Offset + HighBits)
1938
402
      Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr");
1939
318
  } else {
1940
318
    if (Info.Offset)
1941
223
      Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr");
1942
318
    if (static_cast<unsigned>(Info.Offset) + Info.Size < Info.StorageSize)
1943
280
      Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize,
1944
280
                                                              Info.Size),
1945
280
                              "bf.clear");
1946
318
  }
1947
778
  Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
1948
778
  EmitScalarRangeCheck(Val, LV.getType(), Loc);
1949
778
  return RValue::get(Val);
1950
778
}
1951
1952
// If this is a reference to a subset of the elements of a vector, create an
1953
// appropriate shufflevector.
1954
244
RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
1955
244
  llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddress(),
1956
244
                                        LV.isVolatileQualified());
1957
1958
244
  const llvm::Constant *Elts = LV.getExtVectorElts();
1959
1960
  // If the result of the expression is a non-vector type, we must be extracting
1961
  // a single element.  Just codegen as an extractelement.
1962
244
  const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
1963
244
  if (!ExprVT) {
1964
203
    unsigned InIdx = getAccessedFieldNo(0, Elts);
1965
203
    llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
1966
203
    return RValue::get(Builder.CreateExtractElement(Vec, Elt));
1967
203
  }
1968
1969
  // Always use shuffle vector to try to retain the original program structure
1970
41
  unsigned NumResultElts = ExprVT->getNumElements();
1971
1972
41
  SmallVector<int, 4> Mask;
1973
163
  for (unsigned i = 0; i != NumResultElts; 
++i122
)
1974
122
    Mask.push_back(getAccessedFieldNo(i, Elts));
1975
1976
41
  Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
1977
41
                                    Mask);
1978
41
  return RValue::get(Vec);
1979
41
}
1980
1981
/// Generates lvalue for partial ext_vector access.
1982
1
Address CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
1983
1
  Address VectorAddress = LV.getExtVectorAddress();
1984
1
  QualType EQT = LV.getType()->castAs<VectorType>()->getElementType();
1985
1
  llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
1986
1987
1
  Address CastToPointerElement =
1988
1
    Builder.CreateElementBitCast(VectorAddress, VectorElementTy,
1989
1
                                 "conv.ptr.element");
1990
1991
1
  const llvm::Constant *Elts = LV.getExtVectorElts();
1992
1
  unsigned ix = getAccessedFieldNo(0, Elts);
1993
1994
1
  Address VectorBasePtrPlusIx =
1995
1
    Builder.CreateConstInBoundsGEP(CastToPointerElement, ix,
1996
1
                                   "vector.elt");
1997
1998
1
  return VectorBasePtrPlusIx;
1999
1
}
2000
2001
/// Load of global gamed gegisters are always calls to intrinsics.
2002
24
RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {
2003
24
  assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
2004
24
         "Bad type for register variable");
2005
24
  llvm::MDNode *RegName = cast<llvm::MDNode>(
2006
24
      cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
2007
2008
  // We accept integer and pointer types only
2009
24
  llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
2010
24
  llvm::Type *Ty = OrigTy;
2011
24
  if (OrigTy->isPointerTy())
2012
3
    Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2013
24
  llvm::Type *Types[] = { Ty };
2014
2015
24
  llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
2016
24
  llvm::Value *Call = Builder.CreateCall(
2017
24
      F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
2018
24
  if (OrigTy->isPointerTy())
2019
3
    Call = Builder.CreateIntToPtr(Call, OrigTy);
2020
24
  return RValue::get(Call);
2021
24
}
2022
2023
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
2024
/// lvalue, where both are guaranteed to the have the same type, and that type
2025
/// is 'Ty'.
2026
void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
2027
373k
                                             bool isInit) {
2028
373k
  if (!Dst.isSimple()) {
2029
787
    if (Dst.isVectorElt()) {
2030
      // Read/modify/write the vector, inserting the new element.
2031
414
      llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
2032
414
                                            Dst.isVolatileQualified());
2033
414
      Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
2034
414
                                        Dst.getVectorIdx(), "vecins");
2035
414
      Builder.CreateStore(Vec, Dst.getVectorAddress(),
2036
414
                          Dst.isVolatileQualified());
2037
414
      return;
2038
414
    }
2039
2040
    // If this is an update of extended vector elements, insert them as
2041
    // appropriate.
2042
373
    if (Dst.isExtVectorElt())
2043
29
      return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
2044
2045
344
    if (Dst.isGlobalReg())
2046
18
      return EmitStoreThroughGlobalRegLValue(Src, Dst);
2047
2048
326
    if (Dst.isMatrixElt()) {
2049
17
      llvm::Value *Vec = Builder.CreateLoad(Dst.getMatrixAddress());
2050
17
      Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
2051
17
                                        Dst.getMatrixIdx(), "matins");
2052
17
      Builder.CreateStore(Vec, Dst.getMatrixAddress(),
2053
17
                          Dst.isVolatileQualified());
2054
17
      return;
2055
17
    }
2056
2057
309
    assert(Dst.isBitField() && "Unknown LValue type");
2058
309
    return EmitStoreThroughBitfieldLValue(Src, Dst);
2059
309
  }
2060
2061
  // There's special magic for assigning into an ARC-qualified l-value.
2062
372k
  if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
2063
71
    switch (Lifetime) {
2064
0
    case Qualifiers::OCL_None:
2065
0
      llvm_unreachable("present but none");
2066
2067
41
    case Qualifiers::OCL_ExplicitNone:
2068
      // nothing special
2069
41
      break;
2070
2071
19
    case Qualifiers::OCL_Strong:
2072
19
      if (isInit) {
2073
3
        Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
2074
3
        break;
2075
3
      }
2076
16
      EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
2077
16
      return;
2078
2079
11
    case Qualifiers::OCL_Weak:
2080
11
      if (isInit)
2081
        // Initialize and then skip the primitive store.
2082
3
        EmitARCInitWeak(Dst.getAddress(*this), Src.getScalarVal());
2083
8
      else
2084
8
        EmitARCStoreWeak(Dst.getAddress(*this), Src.getScalarVal(),
2085
8
                         /*ignore*/ true);
2086
11
      return;
2087
2088
0
    case Qualifiers::OCL_Autoreleasing:
2089
0
      Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
2090
0
                                                     Src.getScalarVal()));
2091
      // fall into the normal path
2092
0
      break;
2093
372k
    }
2094
372k
  }
2095
2096
372k
  if (Dst.isObjCWeak() && 
!Dst.isNonGC()32
) {
2097
    // load of a __weak object.
2098
26
    Address LvalueDst = Dst.getAddress(*this);
2099
26
    llvm::Value *src = Src.getScalarVal();
2100
26
     CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
2101
26
    return;
2102
26
  }
2103
2104
372k
  if (Dst.isObjCStrong() && 
!Dst.isNonGC()248
) {
2105
    // load of a __strong object.
2106
216
    Address LvalueDst = Dst.getAddress(*this);
2107
216
    llvm::Value *src = Src.getScalarVal();
2108
216
    if (Dst.isObjCIvar()) {
2109
54
      assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
2110
54
      llvm::Type *ResultType = IntPtrTy;
2111
54
      Address dst = EmitPointerWithAlignment(Dst.getBaseIvarExp());
2112
54
      llvm::Value *RHS = dst.getPointer();
2113
54
      RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
2114
54
      llvm::Value *LHS =
2115
54
        Builder.CreatePtrToInt(LvalueDst.getPointer(), ResultType,
2116
54
                               "sub.ptr.lhs.cast");
2117
54
      llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
2118
54
      CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
2119
54
                                              BytesBetween);
2120
162
    } else if (Dst.isGlobalObjCRef()) {
2121
81
      CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
2122
81
                                                Dst.isThreadLocalRef());
2123
81
    }
2124
81
    else
2125
81
      CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
2126
216
    return;
2127
216
  }
2128
2129
372k
  assert(Src.isScalar() && "Can't emit an agg store with this method");
2130
372k
  EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
2131
372k
}
2132
2133
void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
2134
783
                                                     llvm::Value **Result) {
2135
783
  const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
2136
783
  llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
2137
783
  Address Ptr = Dst.getBitFieldAddress();
2138
2139
  // Get the source value, truncated to the width of the bit-field.
2140
783
  llvm::Value *SrcVal = Src.getScalarVal();
2141
2142
  // Cast the source to the storage type and shift it into place.
2143
783
  SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
2144
783
                                 /*isSigned=*/false);
2145
783
  llvm::Value *MaskedVal = SrcVal;
2146
2147
  // See if there are other bits in the bitfield's storage we'll need to load
2148
  // and mask together with source before storing.
2149
783
  if (Info.StorageSize != Info.Size) {
2150
709
    assert(Info.StorageSize > Info.Size && "Invalid bitfield size.");
2151
709
    llvm::Value *Val =
2152
709
      Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
2153
2154
    // Mask the source value as needed.
2155
709
    if (!hasBooleanRepresentation(Dst.getType()))
2156
695
      SrcVal = Builder.CreateAnd(SrcVal,
2157
695
                                 llvm::APInt::getLowBitsSet(Info.StorageSize,
2158
695
                                                            Info.Size),
2159
695
                                 "bf.value");
2160
709
    MaskedVal = SrcVal;
2161
709
    if (Info.Offset)
2162
387
      SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl");
2163
2164
    // Mask out the original value.
2165
709
    Val = Builder.CreateAnd(Val,
2166
709
                            ~llvm::APInt::getBitsSet(Info.StorageSize,
2167
709
                                                     Info.Offset,
2168
709
                                                     Info.Offset + Info.Size),
2169
709
                            "bf.clear");
2170
2171
    // Or together the unchanged values and the source value.
2172
709
    SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
2173
74
  } else {
2174
74
    assert(Info.Offset == 0);
2175
    // According to the AACPS:
2176
    // When a volatile bit-field is written, and its container does not overlap
2177
    // with any non-bit-field member, its container must be read exactly once and
2178
    // written exactly once using the access width appropriate to the type of the
2179
    // container. The two accesses are not atomic.
2180
74
    if (Dst.isVolatileQualified() && 
isAAPCS(CGM.getTarget())24
&&
2181
24
        CGM.getCodeGenOpts().ForceAAPCSBitfieldLoad)
2182
12
      Builder.CreateLoad(Ptr, true, "bf.load");
2183
74
  }
2184
2185
  // Write the new value back out.
2186
783
  Builder.CreateStore(SrcVal, Ptr, Dst.isVolatileQualified());
2187
2188
  // Return the new value of the bit-field, if requested.
2189
783
  if (Result) {
2190
473
    llvm::Value *ResultVal = MaskedVal;
2191
2192
    // Sign extend the value if needed.
2193
473
    if (Info.IsSigned) {
2194
280
      assert(Info.Size <= Info.StorageSize);
2195
280
      unsigned HighBits = Info.StorageSize - Info.Size;
2196
280
      if (HighBits) {
2197
242
        ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl");
2198
242
        ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr");
2199
242
      }
2200
280
    }
2201
2202
473
    ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned,
2203
473
                                      "bf.result.cast");
2204
473
    *Result = EmitFromMemory(ResultVal, Dst.getType());
2205
473
  }
2206
783
}
2207
2208
void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
2209
29
                                                               LValue Dst) {
2210
  // This access turns into a read/modify/write of the vector.  Load the input
2211
  // value now.
2212
29
  llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddress(),
2213
29
                                        Dst.isVolatileQualified());
2214
29
  const llvm::Constant *Elts = Dst.getExtVectorElts();
2215
2216
29
  llvm::Value *SrcVal = Src.getScalarVal();
2217
2218
29
  if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
2219
8
    unsigned NumSrcElts = VTy->getNumElements();
2220
8
    unsigned NumDstElts =
2221
8
        cast<llvm::FixedVectorType>(Vec->getType())->getNumElements();
2222
8
    if (NumDstElts == NumSrcElts) {
2223
      // Use shuffle vector is the src and destination are the same number of
2224
      // elements and restore the vector mask since it is on the side it will be
2225
      // stored.
2226
2
      SmallVector<int, 4> Mask(NumDstElts);
2227
6
      for (unsigned i = 0; i != NumSrcElts; 
++i4
)
2228
4
        Mask[getAccessedFieldNo(i, Elts)] = i;
2229
2230
2
      Vec = Builder.CreateShuffleVector(
2231
2
          SrcVal, llvm::UndefValue::get(Vec->getType()), Mask);
2232
6
    } else if (NumDstElts > NumSrcElts) {
2233
      // Extended the source vector to the same length and then shuffle it
2234
      // into the destination.
2235
      // FIXME: since we're shuffling with undef, can we just use the indices
2236
      //        into that?  This could be simpler.
2237
6
      SmallVector<int, 4> ExtMask;
2238
32
      for (unsigned i = 0; i != NumSrcElts; 
++i26
)
2239
26
        ExtMask.push_back(i);
2240
6
      ExtMask.resize(NumDstElts, -1);
2241
6
      llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(
2242
6
          SrcVal, llvm::UndefValue::get(SrcVal->getType()), ExtMask);
2243
      // build identity
2244
6
      SmallVector<int, 4> Mask;
2245
52
      for (unsigned i = 0; i != NumDstElts; 
++i46
)
2246
46
        Mask.push_back(i);
2247
2248
      // When the vector size is odd and .odd or .hi is used, the last element
2249
      // of the Elts constant array will be one past the size of the vector.
2250
      // Ignore the last element here, if it is greater than the mask size.
2251
6
      if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
2252
2
        NumSrcElts--;
2253
2254
      // modify when what gets shuffled in
2255
30
      for (unsigned i = 0; i != NumSrcElts; 
++i24
)
2256
24
        Mask[getAccessedFieldNo(i, Elts)] = i + NumDstElts;
2257
6
      Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, Mask);
2258
0
    } else {
2259
      // We should never shorten the vector
2260
0
      llvm_unreachable("unexpected shorten vector length");
2261
0
    }
2262
21
  } else {
2263
    // If the Src is a scalar (not a vector) it must be updating one element.
2264
21
    unsigned InIdx = getAccessedFieldNo(0, Elts);
2265
21
    llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx);
2266
21
    Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
2267
21
  }
2268
2269
29
  Builder.CreateStore(Vec, Dst.getExtVectorAddress(),
2270
29
                      Dst.isVolatileQualified());
2271
29
}
2272
2273
/// Store of global named registers are always calls to intrinsics.
2274
18
void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) {
2275
18
  assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
2276
18
         "Bad type for register variable");
2277
18
  llvm::MDNode *RegName = cast<llvm::MDNode>(
2278
18
      cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
2279
18
  assert(RegName && "Register LValue is not metadata");
2280
2281
  // We accept integer and pointer types only
2282
18
  llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType());
2283
18
  llvm::Type *Ty = OrigTy;
2284
18
  if (OrigTy->isPointerTy())
2285
3
    Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy);
2286
18
  llvm::Type *Types[] = { Ty };
2287
2288
18
  llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
2289
18
  llvm::Value *Value = Src.getScalarVal();
2290
18
  if (OrigTy->isPointerTy())
2291
3
    Value = Builder.CreatePtrToInt(Value, Ty);
2292
18
  Builder.CreateCall(
2293
18
      F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value});
2294
18
}
2295
2296
// setObjCGCLValueClass - sets class of the lvalue for the purpose of
2297
// generating write-barries API. It is currently a global, ivar,
2298
// or neither.
2299
static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
2300
                                 LValue &LV,
2301
1.14M
                                 bool IsMemberAccess=false) {
2302
1.14M
  if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
2303
1.14M
    return;
2304
2305
1.71k
  if (isa<ObjCIvarRefExpr>(E)) {
2306
252
    QualType ExpTy = E->getType();
2307
252
    if (IsMemberAccess && 
ExpTy->isPointerType()62
) {
2308
      // If ivar is a structure pointer, assigning to field of
2309
      // this struct follows gcc's behavior and makes it a non-ivar
2310
      // writer-barrier conservatively.
2311
14
      ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2312
14
      if (ExpTy->isRecordType()) {
2313
6
        LV.setObjCIvar(false);
2314
6
        return;
2315
6
      }
2316
246
    }
2317
246
    LV.setObjCIvar(true);
2318
246
    auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E));
2319
246
    LV.setBaseIvarExp(Exp->getBase());
2320
246
    LV.setObjCArray(E->getType()->isArrayType());
2321
246
    return;
2322
246
  }
2323
2324
1.46k
  if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) {
2325
892
    if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
2326
892
      if (VD->hasGlobalStorage()) {
2327
284
        LV.setGlobalObjCRef(true);
2328
284
        LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None);
2329
284
      }
2330
892
    }
2331
892
    LV.setObjCArray(E->getType()->isArrayType());
2332
892
    return;
2333
892
  }
2334
2335
573
  if (const auto *Exp = dyn_cast<UnaryOperator>(E)) {
2336
20
    setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2337
20
    return;
2338
20
  }
2339
2340
553
  if (const auto *Exp = dyn_cast<ParenExpr>(E)) {
2341
26
    setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2342
26
    if (LV.isObjCIvar()) {
2343
      // If cast is to a structure pointer, follow gcc's behavior and make it
2344
      // a non-ivar write-barrier.
2345
12
      QualType ExpTy = E->getType();
2346
12
      if (ExpTy->isPointerType())
2347
10
        ExpTy = ExpTy->castAs<PointerType>()->getPointeeType();
2348
12
      if (ExpTy->isRecordType())
2349
8
        LV.setObjCIvar(false);
2350
12
    }
2351
26
    return;
2352
26
  }
2353
2354
527
  if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) {
2355
0
    setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
2356
0
    return;
2357
0
  }
2358
2359
527
  if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) {
2360
173
    setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2361
173
    return;
2362
173
  }
2363
2364
354
  if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) {
2365
14
    setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2366
14
    return;
2367
14
  }
2368
2369
340
  if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
2370
0
    setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
2371
0
    return;
2372
0
  }
2373
2374
340
  if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
2375
139
    setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
2376
139
    if (LV.isObjCIvar() && 
!LV.isObjCArray()72
)
2377
      // Using array syntax to assigning to what an ivar points to is not
2378
      // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
2379
38
      LV.setObjCIvar(false);
2380
101
    else if (LV.isGlobalObjCRef() && 
!LV.isObjCArray()50
)
2381
      // Using array syntax to assigning to what global points to is not
2382
      // same as assigning to the global itself. {id *G;} G[i] = 0;
2383
4
      LV.setGlobalObjCRef(false);
2384
139
    return;
2385
139
  }
2386
2387
201
  if (const auto *Exp = dyn_cast<MemberExpr>(E)) {
2388
188
    setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
2389
    // We don't know if member is an 'ivar', but this flag is looked at
2390
    // only in the context of LV.isObjCIvar().
2391
188
    LV.setObjCArray(E->getType()->isArrayType());
2392
188
    return;
2393
188
  }
2394
201
}
2395
2396
static llvm::Value *
2397
EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
2398
                                llvm::Value *V, llvm::Type *IRType,
2399
90.0k
                                StringRef Name = StringRef()) {
2400
90.0k
  unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
2401
90.0k
  return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
2402
90.0k
}
2403
2404
static LValue EmitThreadPrivateVarDeclLValue(
2405
    CodeGenFunction &CGF, const VarDecl *VD, QualType T, Address Addr,
2406
248
    llvm::Type *RealVarTy, SourceLocation Loc) {
2407
248
  if (CGF.CGM.getLangOpts().OpenMPIRBuilder)
2408
0
    Addr = CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
2409
0
        CGF, VD, Addr, Loc);
2410
248
  else
2411
248
    Addr =
2412
248
        CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, Addr, Loc);
2413
2414
248
  Addr = CGF.Builder.CreateElementBitCast(Addr, RealVarTy);
2415
248
  return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2416
248
}
2417
2418
static Address emitDeclTargetVarDeclLValue(CodeGenFunction &CGF,
2419
104
                                           const VarDecl *VD, QualType T) {
2420
104
  llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
2421
104
      OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
2422
  // Return an invalid address if variable is MT_To and unified
2423
  // memory is not enabled. For all other cases: MT_Link and
2424
  // MT_To with unified memory, return a valid address.
2425
104
  if (!Res || 
(85
*Res == OMPDeclareTargetDeclAttr::MT_To85
&&
2426
77
               !CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory()))
2427
94
    return Address::invalid();
2428
10
  assert(((*Res == OMPDeclareTargetDeclAttr::MT_Link) ||
2429
10
          (*Res == OMPDeclareTargetDeclAttr::MT_To &&
2430
10
           CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory())) &&
2431
10
         "Expected link clause OR to clause with unified memory enabled.");
2432
10
  QualType PtrTy = CGF.getContext().getPointerType(VD->getType());
2433
10
  Address Addr = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetVar(VD);
2434
10
  return CGF.EmitLoadOfPointer(Addr, PtrTy->castAs<PointerType>());
2435
10
}
2436
2437
Address
2438
CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
2439
                                     LValueBaseInfo *PointeeBaseInfo,
2440
80.9k
                                     TBAAAccessInfo *PointeeTBAAInfo) {
2441
80.9k
  llvm::LoadInst *Load =
2442
80.9k
      Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile());
2443
80.9k
  CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
2444
2445
80.9k
  CharUnits Align = CGM.getNaturalTypeAlignment(
2446
80.9k
      RefLVal.getType()->getPointeeType(), PointeeBaseInfo, PointeeTBAAInfo,
2447
80.9k
      /* forPointeeType= */ true);
2448
80.9k
  return Address(Load, Align);
2449
80.9k
}
2450
2451
59.9k
LValue CodeGenFunction::EmitLoadOfReferenceLValue(LValue RefLVal) {
2452
59.9k
  LValueBaseInfo PointeeBaseInfo;
2453
59.9k
  TBAAAccessInfo PointeeTBAAInfo;
2454
59.9k
  Address PointeeAddr = EmitLoadOfReference(RefLVal, &PointeeBaseInfo,
2455
59.9k
                                            &PointeeTBAAInfo);
2456
59.9k
  return MakeAddrLValue(PointeeAddr, RefLVal.getType()->getPointeeType(),
2457
59.9k
                        PointeeBaseInfo, PointeeTBAAInfo);
2458
59.9k
}
2459
2460
Address CodeGenFunction::EmitLoadOfPointer(Address Ptr,
2461
                                           const PointerType *PtrTy,
2462
                                           LValueBaseInfo *BaseInfo,
2463
16.9k
                                           TBAAAccessInfo *TBAAInfo) {
2464
16.9k
  llvm::Value *Addr = Builder.CreateLoad(Ptr);
2465
16.9k
  return Address(Addr, CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(),
2466
16.9k
                                                   BaseInfo, TBAAInfo,
2467
16.9k
                                                   /*forPointeeType=*/true));
2468
16.9k
}
2469
2470
LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr,
2471
15.5k
                                                const PointerType *PtrTy) {
2472
15.5k
  LValueBaseInfo BaseInfo;
2473
15.5k
  TBAAAccessInfo TBAAInfo;
2474
15.5k
  Address Addr = EmitLoadOfPointer(PtrAddr, PtrTy, &BaseInfo, &TBAAInfo);
2475
15.5k
  return MakeAddrLValue(Addr, PtrTy->getPointeeType(), BaseInfo, TBAAInfo);
2476
15.5k
}
2477
2478
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
2479
90.2k
                                      const Expr *E, const VarDecl *VD) {
2480
90.2k
  QualType T = E->getType();
2481
2482
  // If it's thread_local, emit a call to its wrapper function instead.
2483
90.2k
  if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
2484
226
      CGF.CGM.getCXXABI().usesThreadWrapperFunction(VD))
2485
157
    return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
2486
  // Check if the variable is marked as declare target with link clause in
2487
  // device codegen.
2488
90.0k
  if (CGF.getLangOpts().OpenMPIsDevice) {
2489
104
    Address Addr = emitDeclTargetVarDeclLValue(CGF, VD, T);
2490
104
    if (Addr.isValid())
2491
10
      return CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2492
90.0k
  }
2493
2494
90.0k
  llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
2495
90.0k
  llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
2496
90.0k
  V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
2497
90.0k
  CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
2498
90.0k
  Address Addr(V, Alignment);
2499
  // Emit reference to the private copy of the variable if it is an OpenMP
2500
  // threadprivate variable.
2501
90.0k
  if (CGF.getLangOpts().OpenMP && 
!CGF.getLangOpts().OpenMPSimd25.5k
&&
2502
11.5k
      VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2503
138
    return EmitThreadPrivateVarDeclLValue(CGF, VD, T, Addr, RealVarTy,
2504
138
                                          E->getExprLoc());
2505
138
  }
2506
89.9k
  LValue LV = VD->getType()->isReferenceType() ?
2507
6.63k
      CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
2508
6.63k
                                    AlignmentSource::Decl) :
2509
83.2k
      CGF.MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2510
89.9k
  setObjCGCLValueClass(CGF.getContext(), E, LV);
2511
89.9k
  return LV;
2512
89.9k
}
2513
2514
static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM,
2515
154k
                                               GlobalDecl GD) {
2516
154k
  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2517
154k
  if (FD->hasAttr<WeakRefAttr>()) {
2518
11
    ConstantAddress aliasee = CGM.GetWeakRefReference(FD);
2519
11
    return aliasee.getPointer();
2520
11
  }
2521
2522
154k
  llvm::Constant *V = CGM.GetAddrOfFunction(GD);
2523
154k
  if (!FD->hasPrototype()) {
2524
823
    if (const FunctionProtoType *Proto =
2525
10
            FD->getType()->getAs<FunctionProtoType>()) {
2526
      // Ugly case: for a K&R-style definition, the type of the definition
2527
      // isn't the same as the type of a use.  Correct for this with a
2528
      // bitcast.
2529
10
      QualType NoProtoType =
2530
10
          CGM.getContext().getFunctionNoProtoType(Proto->getReturnType());
2531
10
      NoProtoType = CGM.getContext().getPointerType(NoProtoType);
2532
10
      V = llvm::ConstantExpr::getBitCast(V,
2533
10
                                      CGM.getTypes().ConvertType(NoProtoType));
2534
10
    }
2535
823
  }
2536
154k
  return V;
2537
154k
}
2538
2539
static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E,
2540
3.06k
                                     GlobalDecl GD) {
2541
3.06k
  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
2542
3.06k
  llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, GD);
2543
3.06k
  CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
2544
3.06k
  return CGF.MakeAddrLValue(V, E->getType(), Alignment,
2545
3.06k
                            AlignmentSource::Decl);
2546
3.06k
}
2547
2548
static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD,
2549
4.58k
                                      llvm::Value *ThisValue) {
2550
4.58k
  QualType TagType = CGF.getContext().getTagDeclType(FD->getParent());
2551
4.58k
  LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType);
2552
4.58k
  return CGF.EmitLValueForField(LV, FD);
2553
4.58k
}
2554
2555
/// Named Registers are named metadata pointing to the register name
2556
/// which will be read from/written to as an argument to the intrinsic
2557
/// @llvm.read/write_register.
2558
/// So far, only the name is being passed down, but other options such as
2559
/// register type, allocation type or even optimization options could be
2560
/// passed down via the metadata node.
2561
38
static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM) {
2562
38
  SmallString<64> Name("llvm.named.register.");
2563
38
  AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>();
2564
38
  assert(Asm->getLabel().size() < 64-Name.size() &&
2565
38
      "Register name too big");
2566
38
  Name.append(Asm->getLabel());
2567
38
  llvm::NamedMDNode *M =
2568
38
    CGM.getModule().getOrInsertNamedMetadata(Name);
2569
38
  if (M->getNumOperands() == 0) {
2570
19
    llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
2571
19
                                              Asm->getLabel());
2572
19
    llvm::Metadata *Ops[] = {Str};
2573
19
    M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
2574
19
  }
2575
2576
38
  CharUnits Alignment = CGM.getContext().getDeclAlign(VD);
2577
2578
38
  llvm::Value *Ptr =
2579
38
    llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0));
2580
38
  return LValue::MakeGlobalReg(Address(Ptr, Alignment), VD->getType());
2581
38
}
2582
2583
/// Determine whether we can emit a reference to \p VD from the current
2584
/// context, despite not necessarily having seen an odr-use of the variable in
2585
/// this context.
2586
static bool canEmitSpuriousReferenceToVariable(CodeGenFunction &CGF,
2587
                                               const DeclRefExpr *E,
2588
                                               const VarDecl *VD,
2589
12
                                               bool IsConstant) {
2590
  // For a variable declared in an enclosing scope, do not emit a spurious
2591
  // reference even if we have a capture, as that will emit an unwarranted
2592
  // reference to our capture state, and will likely generate worse code than
2593
  // emitting a local copy.
2594
12
  if (E->refersToEnclosingVariableOrCapture())
2595
0
    return false;
2596
2597
  // For a local declaration declared in this function, we can always reference
2598
  // it even if we don't have an odr-use.
2599
12
  if (VD->hasLocalStorage()) {
2600
8
    return VD->getDeclContext() ==
2601
8
           dyn_cast_or_null<DeclContext>(CGF.CurCodeDecl);
2602
8
  }
2603
2604
  // For a global declaration, we can emit a reference to it if we know
2605
  // for sure that we are able to emit a definition of it.
2606
4
  VD = VD->getDefinition(CGF.getContext());
2607
4
  if (!VD)
2608
1
    return false;
2609
2610
  // Don't emit a spurious reference if it might be to a variable that only
2611
  // exists on a different device / target.
2612
  // FIXME: This is unnecessarily broad. Check whether this would actually be a
2613
  // cross-target reference.
2614
3
  if (CGF.getLangOpts().OpenMP || 
CGF.getLangOpts().CUDA2
||
2615
2
      CGF.getLangOpts().OpenCL) {
2616
1
    return false;
2617
1
  }
2618
2619
  // We can emit a spurious reference only if the linkage implies that we'll
2620
  // be emitting a non-interposable symbol that will be retained until link
2621
  // time.
2622
2
  switch (CGF.CGM.getLLVMLinkageVarDefinition(VD, IsConstant)) {
2623
2
  case llvm::GlobalValue::ExternalLinkage:
2624
2
  case llvm::GlobalValue::LinkOnceODRLinkage:
2625
2
  case llvm::GlobalValue::WeakODRLinkage:
2626
2
  case llvm::GlobalValue::InternalLinkage:
2627
2
  case llvm::GlobalValue::PrivateLinkage:
2628
2
    return true;
2629
0
  default:
2630
0
    return false;
2631
2
  }
2632
2
}
2633
2634
1.12M
LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
2635
1.12M
  const NamedDecl *ND = E->getDecl();
2636
1.12M
  QualType T = E->getType();
2637
2638
1.12M
  assert(E->isNonOdrUse() != NOUR_Unevaluated &&
2639
1.12M
         "should not emit an unevaluated operand");
2640
2641
1.12M
  if (const auto *VD = dyn_cast<VarDecl>(ND)) {
2642
    // Global Named registers access via intrinsics only
2643
1.12M
    if (VD->getStorageClass() == SC_Register &&
2644
701
        VD->hasAttr<AsmLabelAttr>() && 
!VD->isLocalVarDecl()120
)
2645
38
      return EmitGlobalNamedRegister(VD, CGM);
2646
2647
    // If this DeclRefExpr does not constitute an odr-use of the variable,
2648
    // we're not permitted to emit a reference to it in general, and it might
2649
    // not be captured if capture would be necessary for a use. Emit the
2650
    // constant value directly instead.
2651
1.12M
    if (E->isNonOdrUse() == NOUR_Constant &&
2652
95
        (VD->getType()->isReferenceType() ||
2653
91
         
!canEmitSpuriousReferenceToVariable(*this, E, VD, true)12
)) {
2654
91
      VD->getAnyInitializer(VD);
2655
91
      llvm::Constant *Val = ConstantEmitter(*this).emitAbstract(
2656
91
          E->getLocation(), *VD->evaluateValue(), VD->getType());
2657
91
      assert(Val && "failed to emit constant expression");
2658
2659
91
      Address Addr = Address::invalid();
2660
91
      if (!VD->getType()->isReferenceType()) {
2661
        // Spill the constant value to a global.
2662
8
        Addr = CGM.createUnnamedGlobalFrom(*VD, Val,
2663
8
                                           getContext().getDeclAlign(VD));
2664
8
        llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
2665
8
        auto *PTy = llvm::PointerType::get(
2666
8
            VarTy, getContext().getTargetAddressSpace(VD->getType()));
2667
8
        if (PTy != Addr.getType())
2668
1
          Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy);
2669
83
      } else {
2670
        // Should we be using the alignment of the constant pointer we emitted?
2671
83
        CharUnits Alignment =
2672
83
            CGM.getNaturalTypeAlignment(E->getType(),
2673
83
                                        /* BaseInfo= */ nullptr,
2674
83
                                        /* TBAAInfo= */ nullptr,
2675
83
                                        /* forPointeeType= */ true);
2676
83
        Addr = Address(Val, Alignment);
2677
83
      }
2678
91
      return MakeAddrLValue(Addr, T, AlignmentSource::Decl);
2679
91
    }
2680
2681
    // FIXME: Handle other kinds of non-odr-use DeclRefExprs.
2682
2683
    // Check for captured variables.
2684
1.12M
    if (E->refersToEnclosingVariableOrCapture()) {
2685
104k
      VD = VD->getCanonicalDecl();
2686
104k
      if (auto *FD = LambdaCaptureFields.lookup(VD))
2687
3.75k
        return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
2688
100k
      if (CapturedStmtInfo) {
2689
95.5k
        auto I = LocalDeclMap.find(VD);
2690
95.5k
        if (I != LocalDeclMap.end()) {
2691
94.7k
          LValue CapLVal;
2692
94.7k
          if (VD->getType()->isReferenceType())
2693
4.22k
            CapLVal = EmitLoadOfReferenceLValue(I->second, VD->getType(),
2694
4.22k
                                                AlignmentSource::Decl);
2695
90.5k
          else
2696
90.5k
            CapLVal = MakeAddrLValue(I->second, T);
2697
          // Mark lvalue as nontemporal if the variable is marked as nontemporal
2698
          // in simd context.
2699
94.7k
          if (getLangOpts().OpenMP &&
2700
94.7k
              CGM.getOpenMPRuntime().isNontemporalDecl(VD))
2701
176
            CapLVal.setNontemporal(/*Value=*/true);
2702
94.7k
          return CapLVal;
2703
94.7k
        }
2704
827
        LValue CapLVal =
2705
827
            EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
2706
827
                                    CapturedStmtInfo->getContextValue());
2707
827
        CapLVal = MakeAddrLValue(
2708
827
            Address(CapLVal.getPointer(*this), getContext().getDeclAlign(VD)),
2709
827
            CapLVal.getType(), LValueBaseInfo(AlignmentSource::Decl),
2710
827
            CapLVal.getTBAAInfo());
2711
        // Mark lvalue as nontemporal if the variable is marked as nontemporal
2712
        // in simd context.
2713
827
        if (getLangOpts().OpenMP &&
2714
776
            CGM.getOpenMPRuntime().isNontemporalDecl(VD))
2715
0
          CapLVal.setNontemporal(/*Value=*/true);
2716
827
        return CapLVal;
2717
827
      }
2718
2719
5.14k
      assert(isa<BlockDecl>(CurCodeDecl));
2720
5.14k
      Address addr = GetAddrOfBlockDecl(VD);
2721
5.14k
      return MakeAddrLValue(addr, T, AlignmentSource::Decl);
2722
5.14k
    }
2723
1.12M
  }
2724
2725
  // FIXME: We should be able to assert this for FunctionDecls as well!
2726
  // FIXME: We should be able to assert this for all DeclRefExprs, not just
2727
  // those with a valid source location.
2728
1.02M
  assert((ND->isUsed(false) || !isa<VarDecl>(ND) || E->isNonOdrUse() ||
2729
1.02M
          !E->getLocation().isValid()) &&
2730
1.02M
         "Should not use decl without marking it used!");
2731
2732
1.02M
  if (ND->hasAttr<WeakRefAttr>()) {
2733
20
    const auto *VD = cast<ValueDecl>(ND);
2734
20
    ConstantAddress Aliasee = CGM.GetWeakRefReference(VD);
2735
20
    return MakeAddrLValue(Aliasee, T, AlignmentSource::Decl);
2736
20
  }
2737
2738
1.02M
  if (const auto *VD = dyn_cast<VarDecl>(ND)) {
2739
    // Check if this is a global variable.
2740
1.02M
    if (VD->hasLinkage() || 
VD->isStaticDataMember()930k
)
2741
90.2k
      return EmitGlobalVarDeclLValue(*this, E, VD);
2742
2743
930k
    Address addr = Address::invalid();
2744
2745
    // The variable should generally be present in the local decl map.
2746
930k
    auto iter = LocalDeclMap.find(VD);
2747
930k
    if (iter != LocalDeclMap.end()) {
2748
929k
      addr = iter->second;
2749
2750
    // Otherwise, it might be static local we haven't emitted yet for
2751
    // some reason; most likely, because it's in an outer function.
2752
612
    } else if (VD->isStaticLocal()) {
2753
612
      addr = Address(CGM.getOrCreateStaticVarDecl(
2754
612
          *VD, CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false)),
2755
612
                     getContext().getDeclAlign(VD));
2756
2757
    // No other cases for now.
2758
0
    } else {
2759
0
      llvm_unreachable("DeclRefExpr for Decl not entered in LocalDeclMap?");
2760
0
    }
2761
2762
2763
    // Check for OpenMP threadprivate variables.
2764
930k
    if (getLangOpts().OpenMP && 
!getLangOpts().OpenMPSimd331k
&&
2765
228k
        VD->hasAttr<OMPThreadPrivateDeclAttr>()) {
2766
110
      return EmitThreadPrivateVarDeclLValue(
2767
110
          *this, VD, T, addr, getTypes().ConvertTypeForMem(VD->getType()),
2768
110
          E->getExprLoc());
2769
110
    }
2770
2771
    // Drill into block byref variables.
2772
930k
    bool isBlockByref = VD->isEscapingByref();
2773
930k
    if (isBlockByref) {
2774
40
      addr = emitBlockByrefAddress(addr, VD);
2775
40
    }
2776
2777
    // Drill into reference types.
2778
930k
    LValue LV = VD->getType()->isReferenceType() ?
2779
49.0k
        EmitLoadOfReferenceLValue(addr, VD->getType(), AlignmentSource::Decl) :
2780
881k
        MakeAddrLValue(addr, T, AlignmentSource::Decl);
2781
2782
930k
    bool isLocalStorage = VD->hasLocalStorage();
2783
2784
930k
    bool NonGCable = isLocalStorage &&
2785
925k
                     !VD->getType()->isReferenceType() &&
2786
876k
                     !isBlockByref;
2787
930k
    if (NonGCable) {
2788
876k
      LV.getQuals().removeObjCGCAttr();
2789
876k
      LV.setNonGC(true);
2790
876k
    }
2791
2792
930k
    bool isImpreciseLifetime =
2793
930k
      (isLocalStorage && 
!VD->hasAttr<ObjCPreciseLifetimeAttr>()925k
);
2794
930k
    if (isImpreciseLifetime)
2795
925k
      LV.setARCPreciseLifetime(ARCImpreciseLifetime);
2796
930k
    setObjCGCLValueClass(getContext(), E, LV);
2797
930k
    return LV;
2798
930k
  }
2799
2800
3.07k
  if (const auto *FD = dyn_cast<FunctionDecl>(ND))
2801
3.06k
    return EmitFunctionDeclLValue(*this, E, FD);
2802
2803
  // FIXME: While we're emitting a binding from an enclosing scope, all other
2804
  // DeclRefExprs we see should be implicitly treated as if they also refer to
2805
  // an enclosing scope.
2806
13
  if (const auto *BD = dyn_cast<BindingDecl>(ND))
2807
8
    return EmitLValue(BD->getBinding());
2808
2809
  // We can form DeclRefExprs naming GUID declarations when reconstituting
2810
  // non-type template parameters into expressions.
2811
5
  if (const auto *GD = dyn_cast<MSGuidDecl>(ND))
2812
5
    return MakeAddrLValue(CGM.GetAddrOfMSGuidDecl(GD), T,
2813
5
                          AlignmentSource::Decl);
2814
2815
0
  llvm_unreachable("Unhandled DeclRefExpr");
2816
0
}
2817
2818
48.4k
LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
2819
  // __extension__ doesn't affect lvalue-ness.
2820
48.4k
  if (E->getOpcode() == UO_Extension)
2821
0
    return EmitLValue(E->getSubExpr());
2822
2823
48.4k
  QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
2824
48.4k
  switch (E->getOpcode()) {
2825
0
  default: llvm_unreachable("Unknown unary operator lvalue!");
2826
36.7k
  case UO_Deref: {
2827
36.7k
    QualType T = E->getSubExpr()->getType()->getPointeeType();
2828
36.7k
    assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
2829
2830
36.7k
    LValueBaseInfo BaseInfo;
2831
36.7k
    TBAAAccessInfo TBAAInfo;
2832
36.7k
    Address Addr = EmitPointerWithAlignment(E->getSubExpr(), &BaseInfo,
2833
36.7k
                                            &TBAAInfo);
2834
36.7k
    LValue LV = MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
2835
36.7k
    LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
2836
2837
    // We should not generate __weak write barrier on indirect reference
2838
    // of a pointer to object; as in void foo (__weak id *param); *param = 0;
2839
    // But, we continue to generate __strong write barrier on indirect write
2840
    // into a pointer to object.
2841
36.7k
    if (getLangOpts().ObjC &&
2842
9.23k
        getLangOpts().getGC() != LangOptions::NonGC &&
2843
44
        LV.isObjCWeak())
2844
10
      LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
2845
36.7k
    return LV;
2846
0
  }
2847
215
  case UO_Real:
2848
215
  case UO_Imag: {
2849
215
    LValue LV = EmitLValue(E->getSubExpr());
2850
215
    assert(LV.isSimple() && "real/imag on non-ordinary l-value");
2851
2852
    // __real is valid on scalars.  This is a faster way of testing that.
2853
    // __imag can only produce an rvalue on scalars.
2854
215
    if (E->getOpcode() == UO_Real &&
2855
109
        !LV.getAddress(*this).getElementType()->isStructTy()) {
2856
4
      assert(E->getSubExpr()->getType()->isArithmeticType());
2857
4
      return LV;
2858
4
    }
2859
2860
211
    QualType T = ExprTy->castAs<ComplexType>()->getElementType();
2861
2862
211
    Address Component =
2863
211
        (E->getOpcode() == UO_Real
2864
105
             ? emitAddrOfRealComponent(LV.getAddress(*this), LV.getType())
2865
106
             : emitAddrOfImagComponent(LV.getAddress(*this), LV.getType()));
2866
211
    LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
2867
211
                                   CGM.getTBAAInfoForSubobject(LV, T));
2868
211
    ElemLV.getQuals().addQualifiers(LV.getQuals());
2869
211
    return ElemLV;
2870
211
  }
2871
11.4k
  case UO_PreInc:
2872
11.4k
  case UO_PreDec: {
2873
11.4k
    LValue LV = EmitLValue(E->getSubExpr());
2874
11.4k
    bool isInc = E->getOpcode() == UO_PreInc;
2875
2876
11.4k
    if (E->getType()->isAnyComplexType())
2877
0
      EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
2878
11.4k
    else
2879
11.4k
      EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
2880
11.4k
    return LV;
2881
11.4k
  }
2882
48.4k
  }
2883
48.4k
}
2884
2885
48.1k
LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
2886
48.1k
  return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
2887
48.1k
                        E->getType(), AlignmentSource::Decl);
2888
48.1k
}
2889
2890
15
LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
2891
15
  return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
2892
15
                        E->getType(), AlignmentSource::Decl);
2893
15
}
2894
2895
522
LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
2896
522
  auto SL = E->getFunctionName();
2897
522
  assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
2898
522
  StringRef FnName = CurFn->getName();
2899
522
  if (FnName.startswith("\01"))
2900
84
    FnName = FnName.substr(1);
2901
522
  StringRef NameItems[] = {
2902
522
      PredefinedExpr::getIdentKindName(E->getIdentKind()), FnName};
2903
522
  std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
2904
522
  if (auto *BD = dyn_cast_or_null<BlockDecl>(CurCodeDecl)) {
2905
28
    std::string Name = std::string(SL->getString());
2906
28
    if (!Name.empty()) {
2907
26
      unsigned Discriminator =
2908
26
          CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
2909
26
      if (Discriminator)
2910
13
        Name += "_" + Twine(Discriminator + 1).str();
2911
26
      auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
2912
26
      return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
2913
2
    } else {
2914
2
      auto C =
2915
2
          CGM.GetAddrOfConstantCString(std::string(FnName), GVName.c_str());
2916
2
      return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
2917
2
    }
2918
494
  }
2919
494
  auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
2920
494
  return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
2921
494
}
2922
2923
/// Emit a type description suitable for use by a runtime sanitizer library. The
2924
/// format of a type descriptor is
2925
///
2926
/// \code
2927
///   { i16 TypeKind, i16 TypeInfo }
2928
/// \endcode
2929
///
2930
/// followed by an array of i8 containing the type name. TypeKind is 0 for an
2931
/// integer, 1 for a floating point value, and -1 for anything else.
2932
2.66k
llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
2933
  // Only emit each type's descriptor once.
2934
2.66k
  if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T))
2935
2.11k
    return C;
2936
2937
542
  uint16_t TypeKind = -1;
2938
542
  uint16_t TypeInfo = 0;
2939
2940
542
  if (T->isIntegerType()) {
2941
267
    TypeKind = 0;
2942
267
    TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) |
2943
137
               (T->isSignedIntegerType() ? 1 : 
0130
);
2944
275
  } else if (T->isFloatingType()) {
2945
10
    TypeKind = 1;
2946
10
    TypeInfo = getContext().getTypeSize(T);
2947
10
  }
2948
2949
  // Format the type name as if for a diagnostic, including quotes and
2950
  // optionally an 'aka'.
2951
542
  SmallString<32> Buffer;
2952
542
  CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
2953
542
                                    (intptr_t)T.getAsOpaquePtr(),
2954
542
                                    StringRef(), StringRef(), None, Buffer,
2955
542
                                    None);
2956
2957
542
  llvm::Constant *Components[] = {
2958
542
    Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
2959
542
    llvm::ConstantDataArray::getString(getLLVMContext(), Buffer)
2960
542
  };
2961
542
  llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components);
2962
2963
542
  auto *GV = new llvm::GlobalVariable(
2964
542
      CGM.getModule(), Descriptor->getType(),
2965
542
      /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
2966
542
  GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
2967
542
  CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV);
2968
2969
  // Remember the descriptor for this type.
2970
542
  CGM.setTypeDescriptorInMap(T, GV);
2971
2972
542
  return GV;
2973
542
}
2974
2975
2.66k
llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) {
2976
2.66k
  llvm::Type *TargetTy = IntPtrTy;
2977
2978
2.66k
  if (V->getType() == TargetTy)
2979
635
    return V;
2980
2981
  // Floating-point types which fit into intptr_t are bitcast to integers
2982
  // and then passed directly (after zero-extension, if necessary).
2983
2.02k
  if (V->getType()->isFloatingPointTy()) {
2984
12
    unsigned Bits = V->getType()->getPrimitiveSizeInBits();
2985
12
    if (Bits <= TargetTy->getIntegerBitWidth())
2986
11
      V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(),
2987
11
                                                         Bits));
2988
12
  }
2989
2990
  // Integers which fit in intptr_t are zero-extended and passed directly.
2991
2.02k
  if (V->getType()->isIntegerTy() &&
2992
1.65k
      V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth())
2993
1.65k
    return Builder.CreateZExt(V, TargetTy);
2994
2995
  // Pointers are passed directly, everything else is passed by address.
2996
379
  if (!V->getType()->isPointerTy()) {
2997
3
    Address Ptr = CreateDefaultAlignTempAlloca(V->getType());
2998
3
    Builder.CreateStore(V, Ptr);
2999
3
    V = Ptr.getPointer();
3000
3
  }
3001
379
  return Builder.CreatePtrToInt(V, TargetTy);
3002
379
}
3003
3004
/// Emit a representation of a SourceLocation for passing to a handler
3005
/// in a sanitizer runtime library. The format for this data is:
3006
/// \code
3007
///   struct SourceLocation {
3008
///     const char *Filename;
3009
///     int32_t Line, Column;
3010
///   };
3011
/// \endcode
3012
/// For an invalid SourceLocation, the Filename pointer is null.
3013
1.98k
llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
3014
1.98k
  llvm::Constant *Filename;
3015
1.98k
  int Line, Column;
3016
3017
1.98k
  PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
3018
1.98k
  if (PLoc.isValid()) {
3019
1.95k
    StringRef FilenameString = PLoc.getFilename();
3020
3021
1.95k
    int PathComponentsToStrip =
3022
1.95k
        CGM.getCodeGenOpts().EmitCheckPathComponentsToStrip;
3023
1.95k
    if (PathComponentsToStrip < 0) {
3024
3
      assert(PathComponentsToStrip != INT_MIN);
3025
3
      int PathComponentsToKeep = -PathComponentsToStrip;
3026
3
      auto I = llvm::sys::path::rbegin(FilenameString);
3027
3
      auto E = llvm::sys::path::rend(FilenameString);
3028
15
      while (I != E && 
--PathComponentsToKeep14
)
3029
12
        ++I;
3030
3031
3
      FilenameString = FilenameString.substr(I - E);
3032
1.95k
    } else if (PathComponentsToStrip > 0) {
3033
2
      auto I = llvm::sys::path::begin(FilenameString);
3034
2
      auto E = llvm::sys::path::end(FilenameString);
3035
15
      while (I != E && 
PathComponentsToStrip--14
)
3036
13
        ++I;
3037
3038
2
      if (I != E)
3039
1
        FilenameString =
3040
1
            FilenameString.substr(I - llvm::sys::path::begin(FilenameString));
3041
1
      else
3042
1
        FilenameString = llvm::sys::path::filename(FilenameString);
3043
2
    }
3044
3045
1.95k
    auto FilenameGV =
3046
1.95k
        CGM.GetAddrOfConstantCString(std::string(FilenameString), ".src");
3047
1.95k
    CGM.getSanitizerMetadata()->disableSanitizerForGlobal(
3048
1.95k
                          cast<llvm::GlobalVariable>(FilenameGV.getPointer()));
3049
1.95k
    Filename = FilenameGV.getPointer();
3050
1.95k
    Line = PLoc.getLine();
3051
1.95k
    Column = PLoc.getColumn();
3052
24
  } else {
3053
24
    Filename = llvm::Constant::getNullValue(Int8PtrTy);
3054
24
    Line = Column = 0;
3055
24
  }
3056
3057
1.98k
  llvm::Constant *Data[] = {Filename, Builder.getInt32(Line),
3058
1.98k
                            Builder.getInt32(Column)};
3059
3060
1.98k
  return llvm::ConstantStruct::getAnon(Data);
3061
1.98k
}
3062
3063
namespace {
3064
/// Specify under what conditions this check can be recovered
3065
enum class CheckRecoverableKind {
3066
  /// Always terminate program execution if this check fails.
3067
  Unrecoverable,
3068
  /// Check supports recovering, runtime has both fatal (noreturn) and
3069
  /// non-fatal handlers for this check.
3070
  Recoverable,
3071
  /// Runtime conditionally aborts, always need to support recovery.
3072
  AlwaysRecoverable
3073
};
3074
}
3075
3076
1.77k
static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {
3077
1.77k
  assert(Kind.countPopulation() == 1);
3078
1.77k
  if (Kind == SanitizerKind::Function || 
Kind == SanitizerKind::Vptr1.76k
)
3079
57
    return CheckRecoverableKind::AlwaysRecoverable;
3080
1.71k
  else if (Kind == SanitizerKind::Return || 
Kind == SanitizerKind::Unreachable1.71k
)
3081
19
    return CheckRecoverableKind::Unrecoverable;
3082
1.69k
  else
3083
1.69k
    return CheckRecoverableKind::Recoverable;
3084
1.77k
}
3085
3086
namespace {
3087
struct SanitizerHandlerInfo {
3088
  char const *const Name;
3089
  unsigned Version;
3090
};
3091
}
3092
3093
const SanitizerHandlerInfo SanitizerHandlers[] = {
3094
#define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
3095
    LIST_SANITIZER_CHECKS
3096
#undef SANITIZER_CHECK
3097
};
3098
3099
static void emitCheckHandlerCall(CodeGenFunction &CGF,
3100
                                 llvm::FunctionType *FnType,
3101
                                 ArrayRef<llvm::Value *> FnArgs,
3102
                                 SanitizerHandler CheckHandler,
3103
                                 CheckRecoverableKind RecoverKind, bool IsFatal,
3104
1.51k
                                 llvm::BasicBlock *ContBB) {
3105
1.51k
  assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
3106
1.51k
  Optional<ApplyDebugLocation> DL;
3107
1.51k
  if (!CGF.Builder.getCurrentDebugLocation()) {
3108
    // Ensure that the call has at least an artificial debug location.
3109
1.50k
    DL.emplace(CGF, SourceLocation());
3110
1.50k
  }
3111
1.51k
  bool NeedsAbortSuffix =
3112
1.51k
      IsFatal && 
RecoverKind != CheckRecoverableKind::Unrecoverable818
;
3113
1.51k
  bool MinimalRuntime = CGF.CGM.getCodeGenOpts().SanitizeMinimalRuntime;
3114
1.51k
  const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
3115
1.51k
  const StringRef CheckName = CheckInfo.Name;
3116
1.51k
  std::string FnName = "__ubsan_handle_" + CheckName.str();
3117
1.51k
  if (CheckInfo.Version && 
!MinimalRuntime369
)
3118
369
    FnName += "_v" + llvm::utostr(CheckInfo.Version);
3119
1.51k
  if (MinimalRuntime)
3120
3
    FnName += "_minimal";
3121
1.51k
  if (NeedsAbortSuffix)
3122
799
    FnName += "_abort";
3123
1.51k
  bool MayReturn =
3124
1.51k
      !IsFatal || 
RecoverKind == CheckRecoverableKind::AlwaysRecoverable818
;
3125
3126
1.51k
  llvm::AttrBuilder B;
3127
1.51k
  if (!MayReturn) {
3128
794
    B.addAttribute(llvm::Attribute::NoReturn)
3129
794
        .addAttribute(llvm::Attribute::NoUnwind);
3130
794
  }
3131
1.51k
  B.addAttribute(llvm::Attribute::UWTable);
3132
3133
1.51k
  llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
3134
1.51k
      FnType, FnName,
3135
1.51k
      llvm::AttributeList::get(CGF.getLLVMContext(),
3136
1.51k
                               llvm::AttributeList::FunctionIndex, B),
3137
1.51k
      /*Local=*/true);
3138
1.51k
  llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
3139
1.51k
  if (!MayReturn) {
3140
794
    HandlerCall->setDoesNotReturn();
3141
794
    CGF.Builder.CreateUnreachable();
3142
718
  } else {
3143
718
    CGF.Builder.CreateBr(ContBB);
3144
718
  }
3145
1.51k
}
3146
3147
void CodeGenFunction::EmitCheck(
3148
    ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
3149
    SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
3150
1.89k
    ArrayRef<llvm::Value *> DynamicArgs) {
3151
1.89k
  assert(IsSanitizerScope);
3152
1.89k
  assert(Checked.size() > 0);
3153
1.89k
  assert(CheckHandler >= 0 &&
3154
1.89k
         size_t(CheckHandler) < llvm::array_lengthof(SanitizerHandlers));
3155
1.89k
  const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
3156
3157
1.89k
  llvm::Value *FatalCond = nullptr;
3158
1.89k
  llvm::Value *RecoverableCond = nullptr;
3159
1.89k
  llvm::Value *TrapCond = nullptr;
3160
4.11k
  for (int i = 0, n = Checked.size(); i < n; 
++i2.21k
) {
3161
2.21k
    llvm::Value *Check = Checked[i].first;
3162
    // -fsanitize-trap= overrides -fsanitize-recover=.
3163
2.21k
    llvm::Value *&Cond =
3164
2.21k
        CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second)
3165
444
            ? TrapCond
3166
1.77k
            : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second)
3167
836
                  ? RecoverableCond
3168
935
                  : FatalCond;
3169
1.89k
    Cond = Cond ? 
Builder.CreateAnd(Cond, Check)316
: Check;
3170
2.21k
  }
3171
3172
1.89k
  if (TrapCond)
3173
387
    EmitTrapCheck(TrapCond);
3174
1.89k
  if (!FatalCond && 
!RecoverableCond1.08k
)
3175
386
    return;
3176
3177
1.51k
  llvm::Value *JointCond;
3178
1.51k
  if (FatalCond && 
RecoverableCond818
)
3179
0
    JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
3180
1.51k
  else
3181
1.51k
    JointCond = FatalCond ? 
FatalCond818
:
RecoverableCond694
;
3182
1.51k
  assert(JointCond);
3183
3184
1.51k
  CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
3185
1.51k
  assert(SanOpts.has(Checked[0].second));
3186
1.51k
#ifndef NDEBUG
3187
1.77k
  for (int i = 1, n = Checked.size(); i < n; 
++i260
) {
3188
260
    assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
3189
260
           "All recoverable kinds in a single check must be same!");
3190
260
    assert(SanOpts.has(Checked[i].second));
3191
260
  }
3192
1.51k
#endif
3193
3194
1.51k
  llvm::BasicBlock *Cont = createBasicBlock("cont");
3195
1.51k
  llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
3196
1.51k
  llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
3197
  // Give hint that we very much don't expect to execute the handler
3198
  // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
3199
1.51k
  llvm::MDBuilder MDHelper(getLLVMContext());
3200
1.51k
  llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
3201
1.51k
  Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
3202
1.51k
  EmitBlock(Handlers);
3203
3204
  // Handler functions take an i8* pointing to the (handler-specific) static
3205
  // information block, followed by a sequence of intptr_t arguments
3206
  // representing operand values.
3207
1.51k
  SmallVector<llvm::Value *, 4> Args;
3208
1.51k
  SmallVector<llvm::Type *, 4> ArgTypes;
3209
1.51k
  if (!CGM.getCodeGenOpts().SanitizeMinimalRuntime) {
3210
1.50k
    Args.reserve(DynamicArgs.size() + 1);
3211
1.50k
    ArgTypes.reserve(DynamicArgs.size() + 1);
3212
3213
    // Emit handler arguments and create handler function type.
3214
1.50k
    if (!StaticArgs.empty()) {
3215
1.49k
      llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3216
1.49k
      auto *InfoPtr =
3217
1.49k
          new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3218
1.49k
                                   llvm::GlobalVariable::PrivateLinkage, Info);
3219
1.49k
      InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3220
1.49k
      CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
3221
1.49k
      Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy));
3222
1.49k
      ArgTypes.push_back(Int8PtrTy);
3223
1.49k
    }
3224
3225
4.05k
    for (size_t i = 0, n = DynamicArgs.size(); i != n; 
++i2.54k
) {
3226
2.54k
      Args.push_back(EmitCheckValue(DynamicArgs[i]));
3227
2.54k
      ArgTypes.push_back(IntPtrTy);
3228
2.54k
    }
3229
1.50k
  }
3230
3231
1.51k
  llvm::FunctionType *FnType =
3232
1.51k
    llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
3233
3234
1.51k
  if (!FatalCond || 
!RecoverableCond818
) {
3235
    // Simple case: we need to generate a single handler call, either
3236
    // fatal, or non-fatal.
3237
1.51k
    emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
3238
1.51k
                         (FatalCond != nullptr), Cont);
3239
0
  } else {
3240
    // Emit two handler calls: first one for set of unrecoverable checks,
3241
    // another one for recoverable.
3242
0
    llvm::BasicBlock *NonFatalHandlerBB =
3243
0
        createBasicBlock("non_fatal." + CheckName);
3244
0
    llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
3245
0
    Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
3246
0
    EmitBlock(FatalHandlerBB);
3247
0
    emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
3248
0
                         NonFatalHandlerBB);
3249
0
    EmitBlock(NonFatalHandlerBB);
3250
0
    emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
3251
0
                         Cont);
3252
0
  }
3253
3254
1.51k
  EmitBlock(Cont);
3255
1.51k
}
3256
3257
void CodeGenFunction::EmitCfiSlowPathCheck(
3258
    SanitizerMask Kind, llvm::Value *Cond, llvm::ConstantInt *TypeId,
3259
7
    llvm::Value *Ptr, ArrayRef<llvm::Constant *> StaticArgs) {
3260
7
  llvm::BasicBlock *Cont = createBasicBlock("cfi.cont");
3261
3262
7
  llvm::BasicBlock *CheckBB = createBasicBlock("cfi.slowpath");
3263
7
  llvm::BranchInst *BI = Builder.CreateCondBr(Cond, Cont, CheckBB);
3264
3265
7
  llvm::MDBuilder MDHelper(getLLVMContext());
3266
7
  llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
3267
7
  BI->setMetadata(llvm::LLVMContext::MD_prof, Node);
3268
3269
7
  EmitBlock(CheckBB);
3270
3271
7
  bool WithDiag = !CGM.getCodeGenOpts().SanitizeTrap.has(Kind);
3272
3273
7
  llvm::CallInst *CheckCall;
3274
7
  llvm::FunctionCallee SlowPathFn;
3275
7
  if (WithDiag) {
3276
4
    llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
3277
4
    auto *InfoPtr =
3278
4
        new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
3279
4
                                 llvm::GlobalVariable::PrivateLinkage, Info);
3280
4
    InfoPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
3281
4
    CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
3282
3283
4
    SlowPathFn = CGM.getModule().getOrInsertFunction(
3284
4
        "__cfi_slowpath_diag",
3285
4
        llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy},
3286
4
                                false));
3287
4
    CheckCall = Builder.CreateCall(
3288
4
        SlowPathFn, {TypeId, Ptr, Builder.CreateBitCast(InfoPtr, Int8PtrTy)});
3289
3
  } else {
3290
3
    SlowPathFn = CGM.getModule().getOrInsertFunction(
3291
3
        "__cfi_slowpath",
3292
3
        llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy}, false));
3293
3
    CheckCall = Builder.CreateCall(SlowPathFn, {TypeId, Ptr});
3294
3
  }
3295
3296
7
  CGM.setDSOLocal(
3297
7
      cast<llvm::GlobalValue>(SlowPathFn.getCallee()->stripPointerCasts()));
3298
7
  CheckCall->setDoesNotThrow();
3299
3300
7
  EmitBlock(Cont);
3301
7
}
3302
3303
// Emit a stub for __cfi_check function so that the linker knows about this
3304
// symbol in LTO mode.
3305
14
void CodeGenFunction::EmitCfiCheckStub() {
3306
14
  llvm::Module *M = &CGM.getModule();
3307
14
  auto &Ctx = M->getContext();
3308
14
  llvm::Function *F = llvm::Function::Create(
3309
14
      llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false),
3310
14
      llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
3311
14
  CGM.setDSOLocal(F);
3312
14
  llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
3313
  // FIXME: consider emitting an intrinsic call like
3314
  // call void @llvm.cfi_check(i64 %0, i8* %1, i8* %2)
3315
  // which can be lowered in CrossDSOCFI pass to the actual contents of
3316
  // __cfi_check. This would allow inlining of __cfi_check calls.
3317
14
  llvm::CallInst::Create(
3318
14
      llvm::Intrinsic::getDeclaration(M, llvm::Intrinsic::trap), "", BB);
3319
14
  llvm::ReturnInst::Create(Ctx, nullptr, BB);
3320
14
}
3321
3322
// This function is basically a switch over the CFI failure kind, which is
3323
// extracted from CFICheckFailData (1st function argument). Each case is either
3324
// llvm.trap or a call to one of the two runtime handlers, based on
3325
// -fsanitize-trap and -fsanitize-recover settings.  Default case (invalid
3326
// failure kind) traps, but this should really never happen.  CFICheckFailData
3327
// can be nullptr if the calling module has -fsanitize-trap behavior for this
3328
// check kind; in this case __cfi_check_fail traps as well.
3329
14
void CodeGenFunction::EmitCfiCheckFail() {
3330
14
  SanitizerScope SanScope(this);
3331
14
  FunctionArgList Args;
3332
14
  ImplicitParamDecl ArgData(getContext(), getContext().VoidPtrTy,
3333
14
                            ImplicitParamDecl::Other);
3334
14
  ImplicitParamDecl ArgAddr(getContext(), getContext().VoidPtrTy,
3335
14
                            ImplicitParamDecl::Other);
3336
14
  Args.push_back(&ArgData);
3337
14
  Args.push_back(&ArgAddr);
3338
3339
14
  const CGFunctionInfo &FI =
3340
14
    CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, Args);
3341
3342
14
  llvm::Function *F = llvm::Function::Create(
3343
14
      llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
3344
14
      llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
3345
3346
14
  CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F);
3347
14
  CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
3348
14
  F->setVisibility(llvm::GlobalValue::HiddenVisibility);
3349
3350
14
  StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
3351
14
                SourceLocation());
3352
3353
  // This function should not be affected by blacklist. This function does
3354
  // not have a source location, but "src:*" would still apply. Revert any
3355
  // changes to SanOpts made in StartFunction.
3356
14
  SanOpts = CGM.getLangOpts().Sanitize;
3357
3358
14
  llvm::Value *Data =
3359
14
      EmitLoadOfScalar(GetAddrOfLocalVar(&ArgData), /*Volatile=*/false,
3360
14
                       CGM.getContext().VoidPtrTy, ArgData.getLocation());
3361
14
  llvm::Value *Addr =
3362
14
      EmitLoadOfScalar(GetAddrOfLocalVar(&ArgAddr), /*Volatile=*/false,
3363
14
                       CGM.getContext().VoidPtrTy, ArgAddr.getLocation());
3364
3365
  // Data == nullptr means the calling module has trap behaviour for this check.
3366
14
  llvm::Value *DataIsNotNullPtr =
3367
14
      Builder.CreateICmpNE(Data, llvm::ConstantPointerNull::get(Int8PtrTy));
3368
14
  EmitTrapCheck(DataIsNotNullPtr);
3369
3370
14
  llvm::StructType *SourceLocationTy =
3371
14
      llvm::StructType::get(VoidPtrTy, Int32Ty, Int32Ty);
3372
14
  llvm::StructType *CfiCheckFailDataTy =
3373
14
      llvm::StructType::get(Int8Ty, SourceLocationTy, VoidPtrTy);
3374
3375
14
  llvm::Value *V = Builder.CreateConstGEP2_32(
3376
14
      CfiCheckFailDataTy,
3377
14
      Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0,
3378
14
      0);
3379
14
  Address CheckKindAddr(V, getIntAlign());
3380
14
  llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
3381
3382
14
  llvm::Value *AllVtables = llvm::MetadataAsValue::get(
3383
14
      CGM.getLLVMContext(),
3384
14
      llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
3385
14
  llvm::Value *ValidVtable = Builder.CreateZExt(
3386
14
      Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::type_test),
3387
14
                         {Addr, AllVtables}),
3388
14
      IntPtrTy);
3389
3390
14
  const std::pair<int, SanitizerMask> CheckKinds[] = {
3391
14
      {CFITCK_VCall, SanitizerKind::CFIVCall},
3392
14
      {CFITCK_NVCall, SanitizerKind::CFINVCall},
3393
14
      {CFITCK_DerivedCast, SanitizerKind::CFIDerivedCast},
3394
14
      {CFITCK_UnrelatedCast, SanitizerKind::CFIUnrelatedCast},
3395
14
      {CFITCK_ICall, SanitizerKind::CFIICall}};
3396
3397
14
  SmallVector<std::pair<llvm::Value *, SanitizerMask>, 5> Checks;
3398
70
  for (auto CheckKindMaskPair : CheckKinds) {
3399
70
    int Kind = CheckKindMaskPair.first;
3400
70
    SanitizerMask Mask = CheckKindMaskPair.second;
3401
70
    llvm::Value *Cond =
3402
70
        Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
3403
70
    if (CGM.getLangOpts().Sanitize.has(Mask))
3404
17
      EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {},
3405
17
                {Data, Addr, ValidVtable});
3406
53
    else
3407
53
      EmitTrapCheck(Cond);
3408
70
  }
3409
3410
14
  FinishFunction();
3411
  // The only reference to this function will be created during LTO link.
3412
  // Make sure it survives until then.
3413
14
  CGM.addUsedGlobal(F);
3414
14
}
3415
3416
1.82k
void CodeGenFunction::EmitUnreachable(SourceLocation Loc) {
3417
1.82k
  if (SanOpts.has(SanitizerKind::Unreachable)) {
3418
18
    SanitizerScope SanScope(this);
3419
18
    EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
3420
18
                             SanitizerKind::Unreachable),
3421
18
              SanitizerHandler::BuiltinUnreachable,
3422
18
              EmitCheckSourceLocation(Loc), None);
3423
18
  }
3424
1.82k
  Builder.CreateUnreachable();
3425
1.82k
}
3426
3427
497
void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked) {
3428
497
  llvm::BasicBlock *Cont = createBasicBlock("cont");
3429
3430
  // If we're optimizing, collapse all calls to trap down to just one per
3431
  // function to save on code size.
3432
497
  if (!CGM.getCodeGenOpts().OptimizationLevel || 
!TrapBB41
) {
3433
470
    TrapBB = createBasicBlock("trap");
3434
470
    Builder.CreateCondBr(Checked, Cont, TrapBB);
3435
470
    EmitBlock(TrapBB);
3436
470
    llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
3437
470
    TrapCall->setDoesNotReturn();
3438
470
    TrapCall->setDoesNotThrow();
3439
470
    Builder.CreateUnreachable();
3440
27
  } else {
3441
27
    Builder.CreateCondBr(Checked, Cont, TrapBB);
3442
27
  }
3443
3444
497
  EmitBlock(Cont);
3445
497
}
3446
3447
704
llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
3448
704
  llvm::CallInst *TrapCall = Builder.CreateCall(CGM.getIntrinsic(IntrID));
3449
3450
704
  if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
3451
3
    auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
3452
3
                                  CGM.getCodeGenOpts().TrapFuncName);
3453
3
    TrapCall->addAttribute(llvm::AttributeList::FunctionIndex, A);
3454
3
  }
3455
3456
704
  return TrapCall;
3457
704
}
3458
3459
Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
3460
                                                 LValueBaseInfo *BaseInfo,
3461
55.4k
                                                 TBAAAccessInfo *TBAAInfo) {
3462
55.4k
  assert(E->getType()->isArrayType() &&
3463
55.4k
         "Array to pointer decay must have array source type!");
3464
3465
  // Expressions of array type can't be bitfields or vector elements.
3466
55.4k
  LValue LV = EmitLValue(E);
3467
55.4k
  Address Addr = LV.getAddress(*this);
3468
3469
  // If the array type was an incomplete type, we need to make sure
3470
  // the decay ends up being the right type.
3471
55.4k
  llvm::Type *NewTy = ConvertType(E->getType());
3472
55.4k
  Addr = Builder.CreateElementBitCast(Addr, NewTy);
3473
3474
  // Note that VLA pointers are always decayed, so we don't need to do
3475
  // anything here.
3476
55.4k
  if (!E->getType()->isVariableArrayType()) {
3477
51.0k
    assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3478
51.0k
           "Expected pointer to array");
3479
51.0k
    Addr = Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3480
51.0k
  }
3481
3482
  // The result of this decay conversion points to an array element within the
3483
  // base lvalue. However, since TBAA currently does not support representing
3484
  // accesses to elements of member arrays, we conservatively represent accesses
3485
  // to the pointee object as if it had no any base lvalue specified.
3486
  // TODO: Support TBAA for member arrays.
3487
55.4k
  QualType EltType = E->getType()->castAsArrayTypeUnsafe()->getElementType();
3488
55.4k
  if (BaseInfo) 
*BaseInfo = LV.getBaseInfo()4.60k
;
3489
55.4k
  if (TBAAInfo) 
*TBAAInfo = CGM.getTBAAAccessInfo(EltType)4.60k
;
3490
3491
55.4k
  return Builder.CreateElementBitCast(Addr, ConvertTypeForMem(EltType));
3492
55.4k
}
3493
3494
/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
3495
/// array to pointer, return the array subexpression.
3496
46.2k
static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
3497
  // If this isn't just an array->pointer decay, bail out.
3498
46.2k
  const auto *CE = dyn_cast<CastExpr>(E);
3499
46.2k
  if (!CE || 
CE->getCastKind() != CK_ArrayToPointerDecay45.7k
)
3500
25.3k
    return nullptr;
3501
3502
  // If this is a decay from variable width array, bail out.
3503
20.8k
  const Expr *SubExpr = CE->getSubExpr();
3504
20.8k
  if (SubExpr->getType()->isVariableArrayType())
3505
2.74k
    return nullptr;
3506
3507
18.1k
  return SubExpr;
3508
18.1k
}
3509
3510
static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
3511
                                          llvm::Value *ptr,
3512
                                          ArrayRef<llvm::Value*> indices,
3513
                                          bool inbounds,
3514
                                          bool signedIndices,
3515
                                          SourceLocation loc,
3516
47.8k
                                    const llvm::Twine &name = "arrayidx") {
3517
47.8k
  if (inbounds) {
3518
47.8k
    return CGF.EmitCheckedInBoundsGEP(ptr, indices, signedIndices,
3519
47.8k
                                      CodeGenFunction::NotSubtraction, loc,
3520
47.8k
                                      name);
3521
5
  } else {
3522
5
    return CGF.Builder.CreateGEP(ptr, indices, name);
3523
5
  }
3524
47.8k
}
3525
3526
static CharUnits getArrayElementAlign(CharUnits arrayAlign,
3527
                                      llvm::Value *idx,
3528
47.8k
                                      CharUnits eltSize) {
3529
  // If we have a constant index, we can use the exact offset of the
3530
  // element we're accessing.
3531
47.8k
  if (auto constantIdx = dyn_cast<llvm::ConstantInt>(idx)) {
3532
19.5k
    CharUnits offset = constantIdx->getZExtValue() * eltSize;
3533
19.5k
    return arrayAlign.alignmentAtOffset(offset);
3534
3535
  // Otherwise, use the worst-case alignment for any element.
3536
28.3k
  } else {
3537
28.3k
    return arrayAlign.alignmentOfArrayElement(eltSize);
3538
28.3k
  }
3539
47.8k
}
3540
3541
static QualType getFixedSizeElementType(const ASTContext &ctx,
3542
56
                                        const VariableArrayType *vla) {
3543
56
  QualType eltType;
3544
56
  do {
3545
56
    eltType = vla->getElementType();
3546
56
  } while ((vla = ctx.getAsVariableArrayType(eltType)));
3547
56
  return eltType;
3548
56
}
3549
3550
/// Given an array base, check whether its member access belongs to a record
3551
/// with preserve_access_index attribute or not.
3552
19.5k
static bool IsPreserveAIArrayBase(CodeGenFunction &CGF, const Expr *ArrayBase) {
3553
19.5k
  if (!ArrayBase || 
!CGF.getDebugInfo()17.7k
)
3554
14.2k
    return false;
3555
3556
  // Only support base as either a MemberExpr or DeclRefExpr.
3557
  // DeclRefExpr to cover cases like:
3558
  //    struct s { int a; int b[10]; };
3559
  //    struct s *p;
3560
  //    p[1].a
3561
  // p[1] will generate a DeclRefExpr and p[1].a is a MemberExpr.
3562
  // p->b[5] is a MemberExpr example.
3563
5.28k
  const Expr *E = ArrayBase->IgnoreImpCasts();
3564
5.28k
  if (const auto *ME = dyn_cast<MemberExpr>(E))
3565
180
    return ME->getMemberDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
3566
3567
5.10k
  if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
3568
4.89k
    const auto *VarDef = dyn_cast<VarDecl>(DRE->getDecl());
3569
4.89k
    if (!VarDef)
3570
0
      return false;
3571
3572
4.89k
    const auto *PtrT = VarDef->getType()->getAs<PointerType>();
3573
4.89k
    if (!PtrT)
3574
3.14k
      return false;
3575
3576
1.74k
    const auto *PointeeT = PtrT->getPointeeType()
3577
1.74k
                             ->getUnqualifiedDesugaredType();
3578
1.74k
    if (const auto *RecT = dyn_cast<RecordType>(PointeeT))
3579
20
      return RecT->getDecl()->hasAttr<BPFPreserveAccessIndexAttr>();
3580
1.72k
    return false;
3581
1.72k
  }
3582
3583
211
  return false;
3584
211
}
3585
3586
static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
3587
                                     ArrayRef<llvm::Value *> indices,
3588
                                     QualType eltType, bool inbounds,
3589
                                     bool signedIndices, SourceLocation loc,
3590
                                     QualType *arrayType = nullptr,
3591
                                     const Expr *Base = nullptr,
3592
47.8k
                                     const llvm::Twine &name = "arrayidx") {
3593
  // All the indices except that last must be zero.
3594
47.8k
#ifndef NDEBUG
3595
47.8k
  for (auto idx : indices.drop_back())
3596
47.8k
    assert(isa<llvm::ConstantInt>(idx) &&
3597
47.8k
           cast<llvm::ConstantInt>(idx)->isZero());
3598
47.8k
#endif
3599
3600
  // Determine the element size of the statically-sized base.  This is
3601
  // the thing that the indices are expressed in terms of.
3602
47.8k
  if (auto vla = CGF.getContext().getAsVariableArrayType(eltType)) {
3603
56
    eltType = getFixedSizeElementType(CGF.getContext(), vla);
3604
56
  }
3605
3606
  // We can use that to compute the best alignment of the element.
3607
47.8k
  CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
3608
47.8k
  CharUnits eltAlign =
3609
47.8k
    getArrayElementAlign(addr.getAlignment(), indices.back(), eltSize);
3610
3611
47.8k
  llvm::Value *eltPtr;
3612
47.8k
  auto LastIndex = dyn_cast<llvm::ConstantInt>(indices.back());
3613
47.8k
  if (!LastIndex ||
3614
47.8k
      
(19.5k
!CGF.IsInPreservedAIRegion19.5k
&&
!IsPreserveAIArrayBase(CGF, Base)19.5k
)) {
3615
47.8k
    eltPtr = emitArraySubscriptGEP(
3616
47.8k
        CGF, addr.getPointer(), indices, inbounds, signedIndices,
3617
47.8k
        loc, name);
3618
20
  } else {
3619
    // Remember the original array subscript for bpf target
3620
20
    unsigned idx = LastIndex->getZExtValue();
3621
20
    llvm::DIType *DbgInfo = nullptr;
3622
20
    if (arrayType)
3623
20
      DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(*arrayType, loc);
3624
20
    eltPtr = CGF.Builder.CreatePreserveArrayAccessIndex(addr.getElementType(),
3625
20
                                                        addr.getPointer(),
3626
20
                                                        indices.size() - 1,
3627
20
                                                        idx, DbgInfo);
3628
20
  }
3629
3630
47.8k
  return Address(eltPtr, eltAlign);
3631
47.8k
}
3632
3633
LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
3634
46.1k
                                               bool Accessed) {
3635
  // The index must always be an integer, which is not an aggregate.  Emit it
3636
  // in lexical order (this complexity is, sadly, required by C++17).
3637
46.1k
  llvm::Value *IdxPre =
3638
46.1k
      (E->getLHS() == E->getIdx()) ? 
EmitScalarExpr(E->getIdx())21
: nullptr;
3639
46.1k
  bool SignedIndices = false;
3640
46.1k
  auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
3641
46.1k
    auto *Idx = IdxPre;
3642
46.1k
    if (E->getLHS() != E->getIdx()) {
3643
46.1k
      assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
3644
46.1k
      Idx = EmitScalarExpr(E->getIdx());
3645
46.1k
    }
3646
3647
46.1k
    QualType IdxTy = E->getIdx()->getType();
3648
46.1k
    bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
3649
46.1k
    SignedIndices |= IdxSigned;
3650
3651
46.1k
    if (SanOpts.has(SanitizerKind::ArrayBounds))
3652
37
      EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
3653
3654
    // Extend or truncate the index type to 32 or 64-bits.
3655
46.1k
    if (Promote && 
Idx->getType() != IntPtrTy45.7k
)
3656
36.7k
      Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
3657
3658
46.1k
    return Idx;
3659
46.1k
  };
3660
46.1k
  IdxPre = nullptr;
3661
3662
  // If the base is a vector type, then we are forming a vector element lvalue
3663
  // with this subscript.
3664
46.1k
  if (E->getBase()->getType()->isVectorType() &&
3665
418
      !isa<ExtVectorElementExpr>(E->getBase())) {
3666
    // Emit the vector as an lvalue to get its address.
3667
417
    LValue LHS = EmitLValue(E->getBase());
3668
417
    auto *Idx = EmitIdxAfterBase(/*Promote*/false);
3669
417
    assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
3670
417
    return LValue::MakeVectorElt(LHS.getAddress(*this), Idx,
3671
417
                                 E->getBase()->getType(), LHS.getBaseInfo(),
3672
417
                                 TBAAAccessInfo());
3673
417
  }
3674
3675
  // All the other cases basically behave like simple offsetting.
3676
3677
  // Handle the extvector case we ignored above.
3678
45.7k
  if (isa<ExtVectorElementExpr>(E->getBase())) {
3679
1
    LValue LV = EmitLValue(E->getBase());
3680
1
    auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3681
1
    Address Addr = EmitExtVectorElementLValue(LV);
3682
3683
1
    QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
3684
1
    Addr = emitArraySubscriptGEP(*this, Addr, Idx, EltType, /*inbounds*/ true,
3685
1
                                 SignedIndices, E->getExprLoc());
3686
1
    return MakeAddrLValue(Addr, EltType, LV.getBaseInfo(),
3687
1
                          CGM.getTBAAInfoForSubobject(LV, EltType));
3688
1
  }
3689
3690
45.7k
  LValueBaseInfo EltBaseInfo;
3691
45.7k
  TBAAAccessInfo EltTBAAInfo;
3692
45.7k
  Address Addr = Address::invalid();
3693
45.7k
  if (const VariableArrayType *vla =
3694
1.56k
           getContext().getAsVariableArrayType(E->getType())) {
3695
    // The base must be a pointer, which is not an aggregate.  Emit
3696
    // it.  It needs to be emitted first in case it's what captures
3697
    // the VLA bounds.
3698
1.56k
    Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
3699
1.56k
    auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3700
3701
    // The element count here is the total number of non-VLA elements.
3702
1.56k
    llvm::Value *numElements = getVLASize(vla).NumElts;
3703
3704
    // Effectively, the multiply by the VLA size is part of the GEP.
3705
    // GEP indexes are signed, and scaling an index isn't permitted to
3706
    // signed-overflow, so we use the same semantics for our explicit
3707
    // multiply.  We suppress this if overflow is not undefined behavior.
3708
1.56k
    if (getLangOpts().isSignedOverflowDefined()) {
3709
0
      Idx = Builder.CreateMul(Idx, numElements);
3710
1.56k
    } else {
3711
1.56k
      Idx = Builder.CreateNSWMul(Idx, numElements);
3712
1.56k
    }
3713
3714
1.56k
    Addr = emitArraySubscriptGEP(*this, Addr, Idx, vla->getElementType(),
3715
1.56k
                                 !getLangOpts().isSignedOverflowDefined(),
3716
1.56k
                                 SignedIndices, E->getExprLoc());
3717
3718
44.2k
  } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
3719
    // Indexing over an interface, as in "NSString *P; P[4];"
3720
3721
    // Emit the base pointer.
3722
5
    Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
3723
5
    auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3724
3725
5
    CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
3726
5
    llvm::Value *InterfaceSizeVal =
3727
5
        llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
3728
3729
5
    llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
3730
3731
    // We don't necessarily build correct LLVM struct types for ObjC
3732
    // interfaces, so we can't rely on GEP to do this scaling
3733
    // correctly, so we need to cast to i8*.  FIXME: is this actually
3734
    // true?  A lot of other things in the fragile ABI would break...
3735
5
    llvm::Type *OrigBaseTy = Addr.getType();
3736
5
    Addr = Builder.CreateElementBitCast(Addr, Int8Ty);
3737
3738
    // Do the GEP.
3739
5
    CharUnits EltAlign =
3740
5
      getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
3741
5
    llvm::Value *EltPtr =
3742
5
        emitArraySubscriptGEP(*this, Addr.getPointer(), ScaledIdx, false,
3743
5
                              SignedIndices, E->getExprLoc());
3744
5
    Addr = Address(EltPtr, EltAlign);
3745
3746
    // Cast back.
3747
5
    Addr = Builder.CreateBitCast(Addr, OrigBaseTy);
3748
44.2k
  } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
3749
    // If this is A[i] where A is an array, the frontend will have decayed the
3750
    // base to be a ArrayToPointerDecay implicit cast.  While correct, it is
3751
    // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
3752
    // "gep x, i" here.  Emit one "gep A, 0, i".
3753
17.4k
    assert(Array->getType()->isArrayType() &&
3754
17.4k
           "Array to pointer decay must have array source type!");
3755
17.4k
    LValue ArrayLV;
3756
    // For simple multidimensional array indexing, set the 'accessed' flag for
3757
    // better bounds-checking of the base expression.
3758
17.4k
    if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
3759
1.34k
      ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
3760
16.0k
    else
3761
16.0k
      ArrayLV = EmitLValue(Array);
3762
17.4k
    auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3763
3764
    // Propagate the alignment from the array itself to the result.
3765
17.4k
    QualType arrayType = Array->getType();
3766
17.4k
    Addr = emitArraySubscriptGEP(
3767
17.4k
        *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
3768
17.4k
        E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
3769
17.4k
        E->getExprLoc(), &arrayType, E->getBase());
3770
17.4k
    EltBaseInfo = ArrayLV.getBaseInfo();
3771
17.4k
    EltTBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, E->getType());
3772
26.7k
  } else {
3773
    // The base must be a pointer; emit it with an estimate of its alignment.
3774
26.7k
    Addr = EmitPointerWithAlignment(E->getBase(), &EltBaseInfo, &EltTBAAInfo);
3775
26.7k
    auto *Idx = EmitIdxAfterBase(/*Promote*/true);
3776
26.7k
    QualType ptrType = E->getBase()->getType();
3777
26.7k
    Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
3778
26.7k
                                 !getLangOpts().isSignedOverflowDefined(),
3779
26.7k
                                 SignedIndices, E->getExprLoc(), &ptrType,
3780
26.7k
                                 E->getBase());
3781
26.7k
  }
3782
3783
45.7k
  LValue LV = MakeAddrLValue(Addr, E->getType(), EltBaseInfo, EltTBAAInfo);
3784
3785
45.7k
  if (getLangOpts().ObjC &&
3786
17.8k
      getLangOpts().getGC() != LangOptions::NonGC) {
3787
127
    LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
3788
127
    setObjCGCLValueClass(getContext(), E, LV);
3789
127
  }
3790
45.7k
  return LV;
3791
45.7k
}
3792
3793
17
LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) {
3794
17
  assert(
3795
17
      !E->isIncomplete() &&
3796
17
      "incomplete matrix subscript expressions should be rejected during Sema");
3797
17
  LValue Base = EmitLValue(E->getBase());
3798
17
  llvm::Value *RowIdx = EmitScalarExpr(E->getRowIdx());
3799
17
  llvm::Value *ColIdx = EmitScalarExpr(E->getColumnIdx());
3800
17
  llvm::Value *NumRows = Builder.getIntN(
3801
17
      RowIdx->getType()->getScalarSizeInBits(),
3802
17
      E->getBase()->getType()->getAs<ConstantMatrixType>()->getNumRows());
3803
17
  llvm::Value *FinalIdx =
3804
17
      Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
3805
17
  return LValue::MakeMatrixElt(
3806
17
      MaybeConvertMatrixAddress(Base.getAddress(*this), *this), FinalIdx,
3807
17
      E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
3808
17
}
3809
3810
static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
3811
                                       LValueBaseInfo &BaseInfo,
3812
                                       TBAAAccessInfo &TBAAInfo,
3813
                                       QualType BaseTy, QualType ElTy,
3814
1.39k
                                       bool IsLowerBound) {
3815
1.39k
  LValue BaseLVal;
3816
1.39k
  if (auto *ASE = dyn_cast<OMPArraySectionExpr>(Base->IgnoreParenImpCasts())) {
3817
316
    BaseLVal = CGF.EmitOMPArraySectionExpr(ASE, IsLowerBound);
3818
316
    if (BaseTy->isArrayType()) {
3819
80
      Address Addr = BaseLVal.getAddress(CGF);
3820
80
      BaseInfo = BaseLVal.getBaseInfo();
3821
3822
      // If the array type was an incomplete type, we need to make sure
3823
      // the decay ends up being the right type.
3824
80
      llvm::Type *NewTy = CGF.ConvertType(BaseTy);
3825
80
      Addr = CGF.Builder.CreateElementBitCast(Addr, NewTy);
3826
3827
      // Note that VLA pointers are always decayed, so we don't need to do
3828
      // anything here.
3829
80
      if (!BaseTy->isVariableArrayType()) {
3830
32
        assert(isa<llvm::ArrayType>(Addr.getElementType()) &&
3831
32
               "Expected pointer to array");
3832
32
        Addr = CGF.Builder.CreateConstArrayGEP(Addr, 0, "arraydecay");
3833
32
      }
3834
3835
80
      return CGF.Builder.CreateElementBitCast(Addr,
3836
80
                                              CGF.ConvertTypeForMem(ElTy));
3837
80
    }
3838
236
    LValueBaseInfo TypeBaseInfo;
3839
236
    TBAAAccessInfo TypeTBAAInfo;
3840
236
    CharUnits Align =
3841
236
        CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
3842
236
    BaseInfo.mergeForCast(TypeBaseInfo);
3843
236
    TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
3844
236
    return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)), Align);
3845
236
  }
3846
1.07k
  return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
3847
1.07k
}
3848
3849
LValue CodeGenFunction::EmitOMPArraySectionExpr(const OMPArraySectionExpr *E,
3850
2.11k
                                                bool IsLowerBound) {
3851
2.11k
  QualType BaseTy = OMPArraySectionExpr::getBaseOriginalType(E->getBase());
3852
2.11k
  QualType ResultExprTy;
3853
2.11k
  if (auto *AT = getContext().getAsArrayType(BaseTy))
3854
1.11k
    ResultExprTy = AT->getElementType();
3855
998
  else
3856
998
    ResultExprTy = BaseTy->getPointeeType();
3857
2.11k
  llvm::Value *Idx = nullptr;
3858
2.11k
  if (IsLowerBound || 
E->getColonLocFirst().isInvalid()381
) {
3859
    // Requesting lower bound or upper bound, but without provided length and
3860
    // without ':' symbol for the default length -> length = 1.
3861
    // Idx = LowerBound ?: 0;
3862
1.74k
    if (auto *LowerBound = E->getLowerBound()) {
3863
786
      Idx = Builder.CreateIntCast(
3864
786
          EmitScalarExpr(LowerBound), IntPtrTy,
3865
786
          LowerBound->getType()->hasSignedIntegerRepresentation());
3866
786
    } else
3867
955
      Idx = llvm::ConstantInt::getNullValue(IntPtrTy);
3868
371
  } else {
3869
    // Try to emit length or lower bound as constant. If this is possible, 1
3870
    // is subtracted from constant length or lower bound. Otherwise, emit LLVM
3871
    // IR (LB + Len) - 1.
3872
371
    auto &C = CGM.getContext();
3873
371
    auto *Length = E->getLength();
3874
371
    llvm::APSInt ConstLength;
3875
371
    if (Length) {
3876
      // Idx = LowerBound + Length - 1;
3877
349
      if (Optional<llvm::APSInt> CL = Length->getIntegerConstantExpr(C)) {
3878
171
        ConstLength = CL->zextOrTrunc(PointerWidthInBits);
3879
171
        Length = nullptr;
3880
171
      }
3881
349
      auto *LowerBound = E->getLowerBound();
3882
349
      llvm::APSInt ConstLowerBound(PointerWidthInBits, /*isUnsigned=*/false);
3883
349
      if (LowerBound) {
3884
228
        if (Optional<llvm::APSInt> LB = LowerBound->getIntegerConstantExpr(C)) {
3885
228
          ConstLowerBound = LB->zextOrTrunc(PointerWidthInBits);
3886
228
          LowerBound = nullptr;
3887
228
        }
3888
228
      }
3889
349
      if (!Length)
3890
171
        --ConstLength;
3891
178
      else if (!LowerBound)
3892
178
        --ConstLowerBound;
3893
3894
349
      if (Length || 
LowerBound171
) {
3895
178
        auto *LowerBoundVal =
3896
178
            LowerBound
3897
0
                ? Builder.CreateIntCast(
3898
0
                      EmitScalarExpr(LowerBound), IntPtrTy,
3899
0
                      LowerBound->getType()->hasSignedIntegerRepresentation())
3900
178
                : llvm::ConstantInt::get(IntPtrTy, ConstLowerBound);
3901
178
        auto *LengthVal =
3902
178
            Length
3903
178
                ? Builder.CreateIntCast(
3904
178
                      EmitScalarExpr(Length), IntPtrTy,
3905
178
                      Length->getType()->hasSignedIntegerRepresentation())
3906
0
                : llvm::ConstantInt::get(IntPtrTy, ConstLength);
3907
178
        Idx = Builder.CreateAdd(LowerBoundVal, LengthVal, "lb_add_len",
3908
178
                                /*HasNUW=*/false,
3909
178
                                !getLangOpts().isSignedOverflowDefined());
3910
178
        if (Length && LowerBound) {
3911
0
          Idx = Builder.CreateSub(
3912
0
              Idx, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "idx_sub_1",
3913
0
              /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
3914
0
        }
3915
178
      } else
3916
171
        Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength + ConstLowerBound);
3917
22
    } else {
3918
      // Idx = ArraySize - 1;
3919
22
      QualType ArrayTy = BaseTy->isPointerType()
3920
0
                             ? E->getBase()->IgnoreParenImpCasts()->getType()
3921
22
                             : BaseTy;
3922
22
      if (auto *VAT = C.getAsVariableArrayType(ArrayTy)) {
3923
16
        Length = VAT->getSizeExpr();
3924
16
        if (Optional<llvm::APSInt> L = Length->getIntegerConstantExpr(C)) {
3925
12
          ConstLength = *L;
3926
12
          Length = nullptr;
3927
12
        }
3928
6
      } else {
3929
6
        auto *CAT = C.getAsConstantArrayType(ArrayTy);
3930
6
        ConstLength = CAT->getSize();
3931
6
      }
3932
22
      if (Length) {
3933
4
        auto *LengthVal = Builder.CreateIntCast(
3934
4
            EmitScalarExpr(Length), IntPtrTy,
3935
4
            Length->getType()->hasSignedIntegerRepresentation());
3936
4
        Idx = Builder.CreateSub(
3937
4
            LengthVal, llvm::ConstantInt::get(IntPtrTy, /*V=*/1), "len_sub_1",
3938
4
            /*HasNUW=*/false, !getLangOpts().isSignedOverflowDefined());
3939
18
      } else {
3940
18
        ConstLength = ConstLength.zextOrTrunc(PointerWidthInBits);
3941
18
        --ConstLength;
3942
18
        Idx = llvm::ConstantInt::get(IntPtrTy, ConstLength);
3943
18
      }
3944
22
    }
3945
371
  }
3946
2.11k
  assert(Idx);
3947
3948
2.11k
  Address EltPtr = Address::invalid();
3949
2.11k
  LValueBaseInfo BaseInfo;
3950
2.11k
  TBAAAccessInfo TBAAInfo;
3951
2.11k
  if (auto *VLA = getContext().getAsVariableArrayType(ResultExprTy)) {
3952
    // The base must be a pointer, which is not an aggregate.  Emit
3953
    // it.  It needs to be emitted first in case it's what captures
3954
    // the VLA bounds.
3955
104
    Address Base =
3956
104
        emitOMPArraySectionBase(*this, E->getBase(), BaseInfo, TBAAInfo,
3957
104
                                BaseTy, VLA->getElementType(), IsLowerBound);
3958
    // The element count here is the total number of non-VLA elements.
3959
104
    llvm::Value *NumElements = getVLASize(VLA).NumElts;
3960
3961
    // Effectively, the multiply by the VLA size is part of the GEP.
3962
    // GEP indexes are signed, and scaling an index isn't permitted to
3963
    // signed-overflow, so we use the same semantics for our explicit
3964
    // multiply.  We suppress this if overflow is not undefined behavior.
3965
104
    if (getLangOpts().isSignedOverflowDefined())
3966
0
      Idx = Builder.CreateMul(Idx, NumElements);
3967
104
    else
3968
104
      Idx = Builder.CreateNSWMul(Idx, NumElements);
3969
104
    EltPtr = emitArraySubscriptGEP(*this, Base, Idx, VLA->getElementType(),
3970
104
                                   !getLangOpts().isSignedOverflowDefined(),
3971
104
                                   /*signedIndices=*/false, E->getExprLoc());
3972
2.00k
  } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
3973
    // If this is A[i] where A is an array, the frontend will have decayed the
3974
    // base to be a ArrayToPointerDecay implicit cast.  While correct, it is
3975
    // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
3976
    // "gep x, i" here.  Emit one "gep A, 0, i".
3977
718
    assert(Array->getType()->isArrayType() &&
3978
718
           "Array to pointer decay must have array source type!");
3979
718
    LValue ArrayLV;
3980
    // For simple multidimensional array indexing, set the 'accessed' flag for
3981
    // better bounds-checking of the base expression.
3982
718
    if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array))
3983
96
      ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
3984
622
    else
3985
622
      ArrayLV = EmitLValue(Array);
3986
3987
    // Propagate the alignment from the array itself to the result.
3988
718
    EltPtr = emitArraySubscriptGEP(
3989
718
        *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
3990
718
        ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
3991
718
        /*signedIndices=*/false, E->getExprLoc());
3992
718
    BaseInfo = ArrayLV.getBaseInfo();
3993
718
    TBAAInfo = CGM.getTBAAInfoForSubobject(ArrayLV, ResultExprTy);
3994
1.29k
  } else {
3995
1.29k
    Address Base = emitOMPArraySectionBase(*this, E->getBase(), BaseInfo,
3996
1.29k
                                           TBAAInfo, BaseTy, ResultExprTy,
3997
1.29k
                                           IsLowerBound);
3998
1.29k
    EltPtr = emitArraySubscriptGEP(*this, Base, Idx, ResultExprTy,
3999
1.29k
                                   !getLangOpts().isSignedOverflowDefined(),
4000
1.29k
                                   /*signedIndices=*/false, E->getExprLoc());
4001
1.29k
  }
4002
4003
2.11k
  return MakeAddrLValue(EltPtr, ResultExprTy, BaseInfo, TBAAInfo);
4004
2.11k
}
4005
4006
LValue CodeGenFunction::
4007
282
EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
4008
  // Emit the base vector as an l-value.
4009
282
  LValue Base;
4010
4011
  // ExtVectorElementExpr's base can either be a vector or pointer to vector.
4012
282
  if (E->isArrow()) {
4013
    // If it is a pointer to a vector, emit the address and form an lvalue with
4014
    // it.
4015
1
    LValueBaseInfo BaseInfo;
4016
1
    TBAAAccessInfo TBAAInfo;
4017
1
    Address Ptr = EmitPointerWithAlignment(E->getBase(), &BaseInfo, &TBAAInfo);
4018
1
    const auto *PT = E->getBase()->getType()->castAs<PointerType>();
4019
1
    Base = MakeAddrLValue(Ptr, PT->getPointeeType(), BaseInfo, TBAAInfo);
4020
1
    Base.getQuals().removeObjCGCAttr();
4021
281
  } else if (E->getBase()->isGLValue()) {
4022
    // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
4023
    // emit the base as an lvalue.
4024
273
    assert(E->getBase()->getType()->isVectorType());
4025
273
    Base = EmitLValue(E->getBase());
4026
8
  } else {
4027
    // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
4028
8
    assert(E->getBase()->getType()->isVectorType() &&
4029
8
           "Result must be a vector");
4030
8
    llvm::Value *Vec = EmitScalarExpr(E->getBase());
4031
4032
    // Store the vector to memory (because LValue wants an address).
4033
8
    Address VecMem = CreateMemTemp(E->getBase()->getType());
4034
8
    Builder.CreateStore(Vec, VecMem);
4035
8
    Base = MakeAddrLValue(VecMem, E->getBase()->getType(),
4036
8
                          AlignmentSource::Decl);
4037
8
  }
4038
4039
282
  QualType type =
4040
282
    E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
4041
4042
  // Encode the element access list into a vector of unsigned indices.
4043
282
  SmallVector<uint32_t, 4> Indices;
4044
282
  E->getEncodedElementAccess(Indices);
4045
4046
282
  if (Base.isSimple()) {
4047
270
    llvm::Constant *CV =
4048
270
        llvm::ConstantDataVector::get(getLLVMContext(), Indices);
4049
270
    return LValue::MakeExtVectorElt(Base.getAddress(*this), CV, type,
4050
270
                                    Base.getBaseInfo(), TBAAAccessInfo());
4051
270
  }
4052
12
  assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
4053
4054
12
  llvm::Constant *BaseElts = Base.getExtVectorElts();
4055
12
  SmallVector<llvm::Constant *, 4> CElts;
4056
4057
24
  for (unsigned i = 0, e = Indices.size(); i != e; 
++i12
)
4058
12
    CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
4059
12
  llvm::Constant *CV = llvm::ConstantVector::get(CElts);
4060
12
  return LValue::MakeExtVectorElt(Base.getExtVectorAddress(), CV, type,
4061
12
                                  Base.getBaseInfo(), TBAAAccessInfo());
4062
12
}
4063
4064
124k
LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
4065
124k
  if (DeclRefExpr *DRE = tryToConvertMemberExprToDeclRefExpr(*this, E)) {
4066
28
    EmitIgnoredExpr(E->getBase());
4067
28
    return EmitDeclRefLValue(DRE);
4068
28
  }
4069
4070
124k
  Expr *BaseExpr = E->getBase();
4071
  // If this is s.x, emit s as an lvalue.  If it is s->x, emit s as a scalar.
4072
124k
  LValue BaseLV;
4073
124k
  if (E->isArrow()) {
4074
85.8k
    LValueBaseInfo BaseInfo;
4075
85.8k
    TBAAAccessInfo TBAAInfo;
4076
85.8k
    Address Addr = EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
4077
85.8k
    QualType PtrTy = BaseExpr->getType()->getPointeeType();
4078
85.8k
    SanitizerSet SkippedChecks;
4079
85.8k
    bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
4080
85.8k
    if (IsBaseCXXThis)
4081
38.8k
      SkippedChecks.set(SanitizerKind::Alignment, true);
4082
85.8k
    if (IsBaseCXXThis || 
isa<DeclRefExpr>(BaseExpr)47.0k
)
4083
38.8k
      SkippedChecks.set(SanitizerKind::Null, true);
4084
85.8k
    EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy,
4085
85.8k
                  /*Alignment=*/CharUnits::Zero(), SkippedChecks);
4086
85.8k
    BaseLV = MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
4087
85.8k
  } else
4088
39.0k
    BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
4089
4090
124k
  NamedDecl *ND = E->getMemberDecl();
4091
124k
  if (auto *Field = dyn_cast<FieldDecl>(ND)) {
4092
124k
    LValue LV = EmitLValueForField(BaseLV, Field);
4093
124k
    setObjCGCLValueClass(getContext(), E, LV);
4094
124k
    if (getLangOpts().OpenMP) {
4095
      // If the member was explicitly marked as nontemporal, mark it as
4096
      // nontemporal. If the base lvalue is marked as nontemporal, mark access
4097
      // to children as nontemporal too.
4098
8.23k
      if ((IsWrappedCXXThis(BaseExpr) &&
4099
4.49k
           CGM.getOpenMPRuntime().isNontemporalDecl(Field)) ||
4100
8.17k
          BaseLV.isNontemporal())
4101
64
        LV.setNontemporal(/*Value=*/true);
4102
8.23k
    }
4103
124k
    return LV;
4104
124k
  }
4105
4106
0
  if (const auto *FD = dyn_cast<FunctionDecl>(ND))
4107
0
    return EmitFunctionDeclLValue(*this, E, FD);
4108
4109
0
  llvm_unreachable("Unhandled member declaration!");
4110
0
}
4111
4112
/// Given that we are currently emitting a lambda, emit an l-value for
4113
/// one of its members.
4114
106
LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {
4115
106
  assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda());
4116
106
  assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent());
4117
106
  QualType LambdaTagType =
4118
106
    getContext().getTagDeclType(Field->getParent());
4119
106
  LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType);
4120
106
  return EmitLValueForField(LambdaLV, Field);
4121
106
}
4122
4123
/// Get the field index in the debug info. The debug info structure/union
4124
/// will ignore the unnamed bitfields.
4125
unsigned CodeGenFunction::getDebugInfoFIndex(const RecordDecl *Rec,
4126
57
                                             unsigned FieldIndex) {
4127
57
  unsigned I = 0, Skipped = 0;
4128
4129
84
  for (auto F : Rec->getDefinition()->fields()) {
4130
84
    if (I == FieldIndex)
4131
57
      break;
4132
27
    if (F->isUnnamedBitfield())
4133
2
      Skipped++;
4134
27
    I++;
4135
27
  }
4136
4137
57
  return FieldIndex - Skipped;
4138
57
}
4139
4140
/// Get the address of a zero-sized field within a record. The resulting
4141
/// address doesn't necessarily have the right type.
4142
static Address emitAddrOfZeroSizeField(CodeGenFunction &CGF, Address Base,
4143
6
                                       const FieldDecl *Field) {
4144
6
  CharUnits Offset = CGF.getContext().toCharUnitsFromBits(
4145
6
      CGF.getContext().getFieldOffset(Field));
4146
6
  if (Offset.isZero())
4147
4
    return Base;
4148
2
  Base = CGF.Builder.CreateElementBitCast(Base, CGF.Int8Ty);
4149
2
  return CGF.Builder.CreateConstInBoundsByteGEP(Base, Offset);
4150
2
}
4151
4152
/// Drill down to the storage of a field without walking into
4153
/// reference types.
4154
///
4155
/// The resulting address doesn't necessarily have the right type.
4156
static Address emitAddrOfFieldStorage(CodeGenFunction &CGF, Address base,
4157
160k
                                      const FieldDecl *field) {
4158
160k
  if (field->isZeroSize(CGF.getContext()))
4159
6
    return emitAddrOfZeroSizeField(CGF, base, field);
4160
4161
160k
  const RecordDecl *rec = field->getParent();
4162
4163
160k
  unsigned idx =
4164
160k
    CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4165
4166
160k
  return CGF.Builder.CreateStructGEP(base, idx, field->getName());
4167
160k
}
4168
4169
static Address emitPreserveStructAccess(CodeGenFunction &CGF, LValue base,
4170
35
                                        Address addr, const FieldDecl *field) {
4171
35
  const RecordDecl *rec = field->getParent();
4172
35
  llvm::DIType *DbgInfo = CGF.getDebugInfo()->getOrCreateStandaloneType(
4173
35
      base.getType(), rec->getLocation());
4174
4175
35
  unsigned idx =
4176
35
      CGF.CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
4177
4178
35
  return CGF.Builder.CreatePreserveStructAccessIndex(
4179
35
      addr, idx, CGF.getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo);
4180
35
}
4181
4182
16
static bool hasAnyVptr(const QualType Type, const ASTContext &Context) {
4183
16
  const auto *RD = Type.getTypePtr()->getAsCXXRecordDecl();
4184
16
  if (!RD)
4185
3
    return false;
4186
4187
13
  if (RD->isDynamicClass())
4188
6
    return true;
4189
4190
7
  for (const auto &Base : RD->bases())
4191
2
    if (hasAnyVptr(Base.getType(), Context))
4192
2
      return true;
4193
4194
5
  for (const FieldDecl *Field : RD->fields())
4195
4
    if (hasAnyVptr(Field->getType(), Context))
4196
3
      return true;
4197
4198
2
  return false;
4199
5
}
4200
4201
LValue CodeGenFunction::EmitLValueForField(LValue base,
4202
162k
                                           const FieldDecl *field) {
4203
162k
  LValueBaseInfo BaseInfo = base.getBaseInfo();
4204
4205
162k
  if (field->isBitField()) {
4206
1.26k
    const CGRecordLayout &RL =
4207
1.26k
      CGM.getTypes().getCGRecordLayout(field->getParent());
4208
1.26k
    const CGBitFieldInfo &Info = RL.getBitFieldInfo(field);
4209
1.26k
    Address Addr = base.getAddress(*this);
4210
1.26k
    unsigned Idx = RL.getLLVMFieldNo(field);
4211
1.26k
    const RecordDecl *rec = field->getParent();
4212
1.26k
    if (!IsInPreservedAIRegion &&
4213
1.26k
        (!getDebugInfo() || 
!rec->hasAttr<BPFPreserveAccessIndexAttr>()169
)) {
4214
1.26k
      if (Idx != 0)
4215
        // For structs, we GEP to the field that the record layout suggests.
4216
392
        Addr = Builder.CreateStructGEP(Addr, Idx, field->getName());
4217
3
    } else {
4218
3
      llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateRecordType(
4219
3
          getContext().getRecordType(rec), rec->getLocation());
4220
3
      Addr = Builder.CreatePreserveStructAccessIndex(Addr, Idx,
4221
3
          getDebugInfoFIndex(rec, field->getFieldIndex()),
4222
3
          DbgInfo);
4223
3
    }
4224
4225
    // Get the access type.
4226
1.26k
    llvm::Type *FieldIntTy =
4227
1.26k
      llvm::Type::getIntNTy(getLLVMContext(), Info.StorageSize);
4228
1.26k
    if (Addr.getElementType() != FieldIntTy)
4229
967
      Addr = Builder.CreateElementBitCast(Addr, FieldIntTy);
4230
4231
1.26k
    QualType fieldType =
4232
1.26k
      field->getType().withCVRQualifiers(base.getVRQualifiers());
4233
    // TODO: Support TBAA for bit fields.
4234
1.26k
    LValueBaseInfo FieldBaseInfo(BaseInfo.getAlignmentSource());
4235
1.26k
    return LValue::MakeBitfield(Addr, Info, fieldType, FieldBaseInfo,
4236
1.26k
                                TBAAAccessInfo());
4237
1.26k
  }
4238
4239
  // Fields of may-alias structures are may-alias themselves.
4240
  // FIXME: this should get propagated down through anonymous structs
4241
  // and unions.
4242
161k
  QualType FieldType = field->getType();
4243
161k
  const RecordDecl *rec = field->getParent();
4244
161k
  AlignmentSource BaseAlignSource = BaseInfo.getAlignmentSource();
4245
161k
  LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(BaseAlignSource));
4246
161k
  TBAAAccessInfo FieldTBAAInfo;
4247
161k
  if (base.getTBAAInfo().isMayAlias() ||
4248
160k
          rec->hasAttr<MayAliasAttr>() || 
FieldType->isVectorType()160k
) {
4249
1.06k
    FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4250
160k
  } else if (rec->isUnion()) {
4251
    // TODO: Support TBAA for unions.
4252
4.95k
    FieldTBAAInfo = TBAAAccessInfo::getMayAliasInfo();
4253
155k
  } else {
4254
    // If no base type been assigned for the base access, then try to generate
4255
    // one for this base lvalue.
4256
155k
    FieldTBAAInfo = base.getTBAAInfo();
4257
155k
    if (!FieldTBAAInfo.BaseType) {
4258
155k
        FieldTBAAInfo.BaseType = CGM.getTBAABaseTypeInfo(base.getType());
4259
155k
        assert(!FieldTBAAInfo.Offset &&
4260
155k
               "Nonzero offset for an access with no base type!");
4261
155k
    }
4262
4263
    // Adjust offset to be relative to the base type.
4264
155k
    const ASTRecordLayout &Layout =
4265
155k
        getContext().getASTRecordLayout(field->getParent());
4266
155k
    unsigned CharWidth = getContext().getCharWidth();
4267
155k
    if (FieldTBAAInfo.BaseType)
4268
2.24k
      FieldTBAAInfo.Offset +=
4269
2.24k
          Layout.getFieldOffset(field->getFieldIndex()) / CharWidth;
4270
4271
    // Update the final access type and size.
4272
155k
    FieldTBAAInfo.AccessType = CGM.getTBAATypeInfo(FieldType);
4273
155k
    FieldTBAAInfo.Size =
4274
155k
        getContext().getTypeSizeInChars(FieldType).getQuantity();
4275
155k
  }
4276
4277
161k
  Address addr = base.getAddress(*this);
4278
161k
  if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
4279
155k
    if (CGM.getCodeGenOpts().StrictVTablePointers &&
4280
18
        ClassDef->isDynamicClass()) {
4281
      // Getting to any field of dynamic object requires stripping dynamic
4282
      // information provided by invariant.group.  This is because accessing
4283
      // fields may leak the real address of dynamic object, which could result
4284
      // in miscompilation when leaked pointer would be compared.
4285
5
      auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer());
4286
5
      addr = Address(stripped, addr.getAlignment());
4287
5
    }
4288
155k
  }
4289
4290
161k
  unsigned RecordCVR = base.getVRQualifiers();
4291
161k
  if (rec->isUnion()) {
4292
    // For unions, there is no pointer adjustment.
4293
5.16k
    if (CGM.getCodeGenOpts().StrictVTablePointers &&
4294
10
        hasAnyVptr(FieldType, getContext()))
4295
      // Because unions can easily skip invariant.barriers, we need to add
4296
      // a barrier every time CXXRecord field with vptr is referenced.
4297
6
      addr = Address(Builder.CreateLaunderInvariantGroup(addr.getPointer()),
4298
6
                     addr.getAlignment());
4299
4300
5.16k
    if (IsInPreservedAIRegion ||
4301
5.15k
        (getDebugInfo() && 
rec->hasAttr<BPFPreserveAccessIndexAttr>()3.44k
)) {
4302
      // Remember the original union field index
4303
19
      llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(base.getType(),
4304
19
          rec->getLocation());
4305
19
      addr = Address(
4306
19
          Builder.CreatePreserveUnionAccessIndex(
4307
19
              addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
4308
19
          addr.getAlignment());
4309
19
    }
4310
4311
5.16k
    if (FieldType->isReferenceType())
4312
3
      addr = Builder.CreateElementBitCast(
4313
3
          addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName());
4314
156k
  } else {
4315
156k
    if (!IsInPreservedAIRegion &&
4316
156k
        (!getDebugInfo() || 
!rec->hasAttr<BPFPreserveAccessIndexAttr>()107k
))
4317
      // For structs, we GEP to the field that the record layout suggests.
4318
156k
      addr = emitAddrOfFieldStorage(*this, addr, field);
4319
35
    else
4320
      // Remember the original struct field index
4321
35
      addr = emitPreserveStructAccess(*this, base, addr, field);
4322
156k
  }
4323
4324
  // If this is a reference field, load the reference right now.
4325
161k
  if (FieldType->isReferenceType()) {
4326
5.71k
    LValue RefLVal =
4327
5.71k
        MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
4328
5.71k
    if (RecordCVR & Qualifiers::Volatile)
4329
0
      RefLVal.getQuals().addVolatile();
4330
5.71k
    addr = EmitLoadOfReference(RefLVal, &FieldBaseInfo, &FieldTBAAInfo);
4331
4332
    // Qualifiers on the struct don't apply to the referencee.
4333
5.71k
    RecordCVR = 0;
4334
5.71k
    FieldType = FieldType->getPointeeType();
4335
5.71k
  }
4336
4337
  // Make sure that the address is pointing to the right type.  This is critical
4338
  // for both unions and structs.  A union needs a bitcast, a struct element
4339
  // will need a bitcast if the LLVM type laid out doesn't match the desired
4340
  // type.
4341
161k
  addr = Builder.CreateElementBitCast(
4342
161k
      addr, CGM.getTypes().ConvertTypeForMem(FieldType), field->getName());
4343
4344
161k
  if (field->hasAttr<AnnotateAttr>())
4345
2
    addr = EmitFieldAnnotations(field, addr);
4346
4347
161k
  LValue LV = MakeAddrLValue(addr, FieldType, FieldBaseInfo, FieldTBAAInfo);
4348
161k
  LV.getQuals().addCVRQualifiers(RecordCVR);
4349
4350
  // __weak attribute on a field is ignored.
4351
161k
  if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
4352
0
    LV.getQuals().removeObjCGCAttr();
4353
4354
161k
  return LV;
4355
161k
}
4356
4357
LValue
4358
CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
4359
22.2k
                                                  const FieldDecl *Field) {
4360
22.2k
  QualType FieldType = Field->getType();
4361
4362
22.2k
  if (!FieldType->isReferenceType())
4363
18.2k
    return EmitLValueForField(Base, Field);
4364
4365
3.98k
  Address V = emitAddrOfFieldStorage(*this, Base.getAddress(*this), Field);
4366
4367
  // Make sure that the address is pointing to the right type.
4368
3.98k
  llvm::Type *llvmType = ConvertTypeForMem(FieldType);
4369
3.98k
  V = Builder.CreateElementBitCast(V, llvmType, Field->getName());
4370
4371
  // TODO: Generate TBAA information that describes this access as a structure
4372
  // member access and not just an access to an object of the field's type. This
4373
  // should be similar to what we do in EmitLValueForField().
4374
3.98k
  LValueBaseInfo BaseInfo = Base.getBaseInfo();
4375
3.98k
  AlignmentSource FieldAlignSource = BaseInfo.getAlignmentSource();
4376
3.98k
  LValueBaseInfo FieldBaseInfo(getFieldAlignmentSource(FieldAlignSource));
4377
3.98k
  return MakeAddrLValue(V, FieldType, FieldBaseInfo,
4378
3.98k
                        CGM.getTBAAInfoForSubobject(Base, FieldType));
4379
3.98k
}
4380
4381
1.83k
LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
4382
1.83k
  if (E->isFileScope()) {
4383
3
    ConstantAddress GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
4384
3
    return MakeAddrLValue(GlobalPtr, E->getType(), AlignmentSource::Decl);
4385
3
  }
4386
1.83k
  if (E->getType()->isVariablyModifiedType())
4387
    // make sure to emit the VLA size.
4388
4
    EmitVariablyModifiedType(E->getType());
4389
4390
1.83k
  Address DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
4391
1.83k
  const Expr *InitExpr = E->getInitializer();
4392
1.83k
  LValue Result = MakeAddrLValue(DeclPtr, E->getType(), AlignmentSource::Decl);
4393
4394
1.83k
  EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
4395
1.83k
                   /*Init*/ true);
4396
4397
  // Block-scope compound literals are destroyed at the end of the enclosing
4398
  // scope in C.
4399
1.83k
  if (!getLangOpts().CPlusPlus)
4400
1.74k
    if (QualType::DestructionKind DtorKind = E->getType().isDestructedType())
4401
12
      pushLifetimeExtendedDestroy(getCleanupKind(DtorKind), DeclPtr,
4402
12
                                  E->getType(), getDestroyer(DtorKind),
4403
12
                                  DtorKind & EHCleanup);
4404
4405
1.83k
  return Result;
4406
1.83k
}
4407
4408
6
LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
4409
6
  if (!E->isGLValue())
4410
    // Initializing an aggregate temporary in C++11: T{...}.
4411
0
    return EmitAggExprToLValue(E);
4412
4413
  // An lvalue initializer list must be initializing a reference.
4414
6
  assert(E->isTransparent() && "non-transparent glvalue init list");
4415
6
  return EmitLValue(E->getInit(0));
4416
6
}
4417
4418
/// Emit the operand of a glvalue conditional operator. This is either a glvalue
4419
/// or a (possibly-parenthesized) throw-expression. If this is a throw, no
4420
/// LValue is returned and the current block has been terminated.
4421
static Optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
4422
912
                                                    const Expr *Operand) {
4423
912
  if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) {
4424
2
    CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false);
4425
2
    return None;
4426
2
  }
4427
4428
910
  return CGF.EmitLValue(Operand);
4429
910
}
4430
4431
LValue CodeGenFunction::
4432
467
EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
4433
467
  if (!expr->isGLValue()) {
4434
    // ?: here should be an aggregate.
4435
7
    assert(hasAggregateEvaluationKind(expr->getType()) &&
4436
7
           "Unexpected conditional operator!");
4437
7
    return EmitAggExprToLValue(expr);
4438
7
  }
4439
4440
460
  OpaqueValueMapping binding(*this, expr);
4441
4442
460
  const Expr *condExpr = expr->getCond();
4443
460
  bool CondExprBool;
4444
460
  if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
4445
4
    const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
4446
4
    if (!CondExprBool) 
std::swap(live, dead)1
;
4447
4448
4
    if (!ContainsLabel(dead)) {
4449
      // If the true case is live, we need to track its region.
4450
4
      if (CondExprBool)
4451
3
        incrementProfileCounter(expr);
4452
      // If a throw expression we emit it and return an undefined lvalue
4453
      // because it can't be used.
4454
4
      if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
4455
1
        EmitCXXThrowExpr(ThrowExpr);
4456
1
        llvm::Type *Ty =
4457
1
            llvm::PointerType::getUnqual(ConvertType(dead->getType()));
4458
1
        return MakeAddrLValue(
4459
1
            Address(llvm::UndefValue::get(Ty), CharUnits::One()),
4460
1
            dead->getType());
4461
1
      }
4462
3
      return EmitLValue(live);
4463
3
    }
4464
4
  }
4465
4466
456
  llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
4467
456
  llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
4468
456
  llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
4469
4470
456
  ConditionalEvaluation eval(*this);
4471
456
  EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, getProfileCount(expr));
4472
4473
  // Any temporaries created here are conditional.
4474
456
  EmitBlock(lhsBlock);
4475
456
  incrementProfileCounter(expr);
4476
456
  eval.begin(*this);
4477
456
  Optional<LValue> lhs =
4478
456
      EmitLValueOrThrowExpression(*this, expr->getTrueExpr());
4479
456
  eval.end(*this);
4480
4481
456
  if (lhs && 
!lhs->isSimple()455
)
4482
0
    return EmitUnsupportedLValue(expr, "conditional operator");
4483
4484
456
  lhsBlock = Builder.GetInsertBlock();
4485
456
  if (lhs)
4486
455
    Builder.CreateBr(contBlock);
4487
4488
  // Any temporaries created here are conditional.
4489
456
  EmitBlock(rhsBlock);
4490
456
  eval.begin(*this);
4491
456
  Optional<LValue> rhs =
4492
456
      EmitLValueOrThrowExpression(*this, expr->getFalseExpr());
4493
456
  eval.end(*this);
4494
456
  if (rhs && 
!rhs->isSimple()455
)
4495
0
    return EmitUnsupportedLValue(expr, "conditional operator");
4496
456
  rhsBlock = Builder.GetInsertBlock();
4497
4498
456
  EmitBlock(contBlock);
4499
4500
456
  if (lhs && 
rhs455
) {
4501
454
    llvm::PHINode *phi =
4502
454
        Builder.CreatePHI(lhs->getPointer(*this)->getType(), 2, "cond-lvalue");
4503
454
    phi->addIncoming(lhs->getPointer(*this), lhsBlock);
4504
454
    phi->addIncoming(rhs->getPointer(*this), rhsBlock);
4505
454
    Address result(phi, std::min(lhs->getAlignment(), rhs->getAlignment()));
4506
454
    AlignmentSource alignSource =
4507
454
      std::max(lhs->getBaseInfo().getAlignmentSource(),
4508
454
               rhs->getBaseInfo().getAlignmentSource());
4509
454
    TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator(
4510
454
        lhs->getTBAAInfo(), rhs->getTBAAInfo());
4511
454
    return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
4512
454
                          TBAAInfo);
4513
2
  } else {
4514
2
    assert((lhs || rhs) &&
4515
2
           "both operands of glvalue conditional are throw-expressions?");
4516
1
    return lhs ? *lhs : *rhs;
4517
2
  }
4518
456
}
4519
4520
/// EmitCastLValue - Casts are never lvalues unless that cast is to a reference
4521
/// type. If the cast is to a reference, we can have the usual lvalue result,
4522
/// otherwise if a cast is needed by the code generator in an lvalue context,
4523
/// then it must mean that we need the address of an aggregate in order to
4524
/// access one of its members.  This can happen for all the reasons that casts
4525
/// are permitted with aggregate result, including noop aggregate casts, and
4526
/// cast from scalar to union.
4527
39.0k
LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
4528
39.0k
  switch (E->getCastKind()) {
4529
0
  case CK_ToVoid:
4530
0
  case CK_BitCast:
4531
0
  case CK_LValueToRValueBitCast:
4532
0
  case CK_ArrayToPointerDecay:
4533
0
  case CK_FunctionToPointerDecay:
4534
0
  case CK_NullToMemberPointer:
4535
0
  case CK_NullToPointer:
4536
0
  case CK_IntegralToPointer:
4537
0
  case CK_PointerToIntegral:
4538
0
  case CK_PointerToBoolean:
4539
0
  case CK_VectorSplat:
4540
0
  case CK_IntegralCast:
4541
0
  case CK_BooleanToSignedIntegral:
4542
0
  case CK_IntegralToBoolean:
4543
0
  case CK_IntegralToFloating:
4544
0
  case CK_FloatingToIntegral:
4545
0
  case CK_FloatingToBoolean:
4546
0
  case CK_FloatingCast:
4547
0
  case CK_FloatingRealToComplex:
4548
0
  case CK_FloatingComplexToReal:
4549
0
  case CK_FloatingComplexToBoolean:
4550
0
  case CK_FloatingComplexCast:
4551
0
  case CK_FloatingComplexToIntegralComplex:
4552
0
  case CK_IntegralRealToComplex:
4553
0
  case CK_IntegralComplexToReal:
4554
0
  case CK_IntegralComplexToBoolean:
4555
0
  case CK_IntegralComplexCast:
4556
0
  case CK_IntegralComplexToFloatingComplex:
4557
0
  case CK_DerivedToBaseMemberPointer:
4558
0
  case CK_BaseToDerivedMemberPointer:
4559
0
  case CK_MemberPointerToBoolean:
4560
0
  case CK_ReinterpretMemberPointer:
4561
0
  case CK_AnyPointerToBlockPointerCast:
4562
0
  case CK_ARCProduceObject:
4563
0
  case CK_ARCConsumeObject:
4564
0
  case CK_ARCReclaimReturnedObject:
4565
0
  case CK_ARCExtendBlockObject:
4566
0
  case CK_CopyAndAutoreleaseBlockObject:
4567
0
  case CK_IntToOCLSampler:
4568
0
  case CK_FixedPointCast:
4569
0
  case CK_FixedPointToBoolean:
4570
0
  case CK_FixedPointToIntegral:
4571
0
  case CK_IntegralToFixedPoint:
4572
0
    return EmitUnsupportedLValue(E, "unexpected cast lvalue");
4573
4574
0
  case CK_Dependent:
4575
0
    llvm_unreachable("dependent cast kind in IR gen!");
4576
4577
0
  case CK_BuiltinFnToFnPtr:
4578
0
    llvm_unreachable("builtin functions are handled elsewhere");
4579
4580
  // These are never l-values; just use the aggregate emission code.
4581
1
  case CK_NonAtomicToAtomic:
4582
1
  case CK_AtomicToNonAtomic:
4583
1
    return EmitAggExprToLValue(E);
4584
4585
13
  case CK_Dynamic: {
4586
13
    LValue LV = EmitLValue(E->getSubExpr());
4587
13
    Address V = LV.getAddress(*this);
4588
13
    const auto *DCE = cast<CXXDynamicCastExpr>(E);
4589
13
    return MakeNaturalAlignAddrLValue(EmitDynamicCast(V, DCE), E->getType());
4590
1
  }
4591
4592
28.4k
  case CK_ConstructorConversion:
4593
28.4k
  case CK_UserDefinedConversion:
4594
28.4k
  case CK_CPointerToObjCPointerCast:
4595
28.4k
  case CK_BlockPointerToObjCPointerCast:
4596
28.4k
  case CK_NoOp:
4597
28.4k
  case CK_LValueToRValue:
4598
28.4k
    return EmitLValue(E->getSubExpr());
4599
4600
10.4k
  case CK_UncheckedDerivedToBase:
4601
10.4k
  case CK_DerivedToBase: {
4602
10.4k
    const auto *DerivedClassTy =
4603
10.4k
        E->getSubExpr()->getType()->castAs<RecordType>();
4604
10.4k
    auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
4605
4606
10.4k
    LValue LV = EmitLValue(E->getSubExpr());
4607
10.4k
    Address This = LV.getAddress(*this);
4608
4609
    // Perform the derived-to-base conversion
4610
10.4k
    Address Base = GetAddressOfBaseClass(
4611
10.4k
        This, DerivedClassDecl, E->path_begin(), E->path_end(),
4612
10.4k
        /*NullCheckValue=*/false, E->getExprLoc());
4613
4614
    // TODO: Support accesses to members of base classes in TBAA. For now, we
4615
    // conservatively pretend that the complete object is of the base class
4616
    // type.
4617
10.4k
    return MakeAddrLValue(Base, E->getType(), LV.getBaseInfo(),
4618
10.4k
                          CGM.getTBAAInfoForSubobject(LV, E->getType()));
4619
10.4k
  }
4620
3
  case CK_ToUnion:
4621
3
    return EmitAggExprToLValue(E);
4622
79
  case CK_BaseToDerived: {
4623
79
    const auto *DerivedClassTy = E->getType()->castAs<RecordType>();
4624
79
    auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
4625
4626
79
    LValue LV = EmitLValue(E->getSubExpr());
4627
4628
    // Perform the base-to-derived conversion
4629
79
    Address Derived = GetAddressOfDerivedClass(
4630
79
        LV.getAddress(*this), DerivedClassDecl, E->path_begin(), E->path_end(),
4631
79
        /*NullCheckValue=*/false);
4632
4633
    // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
4634
    // performed and the object is not of the derived type.
4635
79
    if (sanitizePerformTypeCheck())
4636
7
      EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(),
4637
7
                    Derived.getPointer(), E->getType());
4638
4639
79
    if (SanOpts.has(SanitizerKind::CFIDerivedCast))
4640
3
      EmitVTablePtrCheckForCast(E->getType(), Derived.getPointer(),
4641
3
                                /*MayBeNull=*/false, CFITCK_DerivedCast,
4642
3
                                E->getBeginLoc());
4643
4644
79
    return MakeAddrLValue(Derived, E->getType(), LV.getBaseInfo(),
4645
79
                          CGM.getTBAAInfoForSubobject(LV, E->getType()));
4646
10.4k
  }
4647
23
  case CK_LValueBitCast: {
4648
    // This must be a reinterpret_cast (or c-style equivalent).
4649
23
    const auto *CE = cast<ExplicitCastExpr>(E);
4650
4651
23
    CGM.EmitExplicitCastExprType(CE, this);
4652
23
    LValue LV = EmitLValue(E->getSubExpr());
4653
23
    Address V = Builder.CreateBitCast(LV.getAddress(*this),
4654
23
                                      ConvertType(CE->getTypeAsWritten()));
4655
4656
23
    if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
4657
4
      EmitVTablePtrCheckForCast(E->getType(), V.getPointer(),
4658
4
                                /*MayBeNull=*/false, CFITCK_UnrelatedCast,
4659
4
                                E->getBeginLoc());
4660
4661
23
    return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
4662
23
                          CGM.getTBAAInfoForSubobject(LV, E->getType()));
4663
10.4k
  }
4664
66
  case CK_AddressSpaceConversion: {
4665
66
    LValue LV = EmitLValue(E->getSubExpr());
4666
66
    QualType DestTy = getContext().getPointerType(E->getType());
4667
66
    llvm::Value *V = getTargetHooks().performAddrSpaceCast(
4668
66
        *this, LV.getPointer(*this),
4669
66
        E->getSubExpr()->getType().getAddressSpace(),
4670
66
        E->getType().getAddressSpace(), ConvertType(DestTy));
4671
66
    return MakeAddrLValue(Address(V, LV.getAddress(*this).getAlignment()),
4672
66
                          E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
4673
10.4k
  }
4674
5
  case CK_ObjCObjectLValueCast: {
4675
5
    LValue LV = EmitLValue(E->getSubExpr());
4676
5
    Address V = Builder.CreateElementBitCast(LV.getAddress(*this),
4677
5
                                             ConvertType(E->getType()));
4678
5
    return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
4679
5
                          CGM.getTBAAInfoForSubobject(LV, E->getType()));
4680
10.4k
  }
4681
0
  case CK_ZeroToOCLOpaqueType:
4682
0
    llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
4683
0
  }
4684
4685
0
  llvm_unreachable("Unhandled lvalue cast kind?");
4686
0
}
4687
4688
570
LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
4689
570
  assert(OpaqueValueMappingData::shouldBindAsLValue(e));
4690
570
  return getOrCreateOpaqueLValueMapping(e);
4691
570
}
4692
4693
LValue
4694
827
CodeGenFunction::getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e) {
4695
827
  assert(OpaqueValueMapping::shouldBindAsLValue(e));
4696
4697
827
  llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
4698
827
      it = OpaqueLValues.find(e);
4699
4700
827
  if (it != OpaqueLValues.end())
4701
721
    return it->second;
4702
4703
106
  assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
4704
106
  return EmitLValue(e->getSourceExpr());
4705
106
}
4706
4707
RValue
4708
2.20k
CodeGenFunction::getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e) {
4709
2.20k
  assert(!OpaqueValueMapping::shouldBindAsLValue(e));
4710
4711
2.20k
  llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
4712
2.20k
      it = OpaqueRValues.find(e);
4713
4714
2.20k
  if (it != OpaqueRValues.end())
4715
1.64k
    return it->second;
4716
4717
558
  assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
4718
558
  return EmitAnyExpr(e->getSourceExpr());
4719
558
}
4720
4721
RValue CodeGenFunction::EmitRValueForField(LValue LV,
4722
                                           const FieldDecl *FD,
4723
44
                                           SourceLocation Loc) {
4724
44
  QualType FT = FD->getType();
4725
44
  LValue FieldLV = EmitLValueForField(LV, FD);
4726
44
  switch (getEvaluationKind(FT)) {
4727
1
  case TEK_Complex:
4728
1
    return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
4729
0
  case TEK_Aggregate:
4730
0
    return FieldLV.asAggregateRValue(*this);
4731
43
  case TEK_Scalar:
4732
    // This routine is used to load fields one-by-one to perform a copy, so
4733
    // don't load reference fields.
4734
43
    if (FD->getType()->isReferenceType())
4735
1
      return RValue::get(FieldLV.getPointer(*this));
4736
    // Call EmitLoadOfScalar except when the lvalue is a bitfield to emit a
4737
    // primitive load.
4738
42
    if (FieldLV.isBitField())
4739
0
      return EmitLoadOfLValue(FieldLV, Loc);
4740
42
    return RValue::get(EmitLoadOfScalar(FieldLV, Loc));
4741
0
  }
4742
0
  llvm_unreachable("bad evaluation kind");
4743
0
}
4744
4745
//===--------------------------------------------------------------------===//
4746
//                             Expression Emission
4747
//===--------------------------------------------------------------------===//
4748
4749
RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
4750
285k
                                     ReturnValueSlot ReturnValue) {
4751
  // Builtins never have block type.
4752
285k
  if (E->getCallee()->getType()->isBlockPointerType())
4753
569
    return EmitBlockCallExpr(E, ReturnValue);
4754
4755
284k
  if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E))
4756
57.6k
    return EmitCXXMemberCallExpr(CE, ReturnValue);
4757
4758
226k
  if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
4759
28
    return EmitCUDAKernelCallExpr(CE, ReturnValue);
4760
4761
226k
  if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
4762
12.2k
    if (const CXXMethodDecl *MD =
4763
10.6k
          dyn_cast_or_null<CXXMethodDecl>(CE->getCalleeDecl()))
4764
10.6k
      return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
4765
4766
216k
  CGCallee callee = EmitCallee(E->getCallee());
4767
4768
216k
  if (callee.isBuiltin()) {
4769
60.5k
    return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
4770
60.5k
                           E, ReturnValue);
4771
60.5k
  }
4772
4773
155k
  if (callee.isPseudoDestructor()) {
4774
164
    return EmitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr());
4775
164
  }
4776
4777
155k
  return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue);
4778
155k
}
4779
4780
/// Emit a CallExpr without considering whether it might be a subclass.
4781
RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E,
4782
28
                                           ReturnValueSlot ReturnValue) {
4783
28
  CGCallee Callee = EmitCallee(E->getCallee());
4784
28
  return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue);
4785
28
}
4786
4787
211k
static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) {
4788
211k
  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
4789
4790
211k
  if (auto builtinID = FD->getBuiltinID()) {
4791
    // Replaceable builtin provide their own implementation of a builtin. Unless
4792
    // we are in the builtin implementation itself, don't call the actual
4793
    // builtin. If we are in the builtin implementation, avoid trivial infinite
4794
    // recursion.
4795
60.5k
    if (!FD->isInlineBuiltinDeclaration() ||
4796
18
        CGF.CurFn->getName() == FD->getName())
4797
60.5k
      return CGCallee::forBuiltin(builtinID, FD);
4798
151k
  }
4799
4800
151k
  llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, GD);
4801
151k
  return CGCallee::forDirect(calleePtr, GD);
4802
151k
}
4803
4804
427k
CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
4805
427k
  E = E->IgnoreParens();
4806
4807
  // Look through function-to-pointer decay.
4808
427k
  if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
4809
215k
    if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
4810
211k
        
ICE->getCastKind() == CK_BuiltinFnToFnPtr54.9k
) {
4811
211k
      return EmitCallee(ICE->getSubExpr());
4812
211k
    }
4813
4814
  // Resolve direct calls.
4815
212k
  } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
4816
211k
    if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
4817
211k
      return EmitDirectCallee(*this, FD);
4818
211k
    }
4819
595
  } else if (auto ME = dyn_cast<MemberExpr>(E)) {
4820
139
    if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
4821
139
      EmitIgnoredExpr(ME->getBase());
4822
139
      return EmitDirectCallee(*this, FD);
4823
139
    }
4824
4825
  // Look through template substitutions.
4826
456
  } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
4827
14
    return EmitCallee(NTTP->getReplacement());
4828
4829
  // Treat pseudo-destructor calls differently.
4830
442
  } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
4831
164
    return CGCallee::forPseudoDestructor(PDE);
4832
164
  }
4833
4834
  // Otherwise, we have an indirect reference.
4835
4.53k
  llvm::Value *calleePtr;
4836
4.53k
  QualType functionType;
4837
4.53k
  if (auto ptrType = E->getType()->getAs<PointerType>()) {
4838
4.51k
    calleePtr = EmitScalarExpr(E);
4839
4.51k
    functionType = ptrType->getPointeeType();
4840
21
  } else {
4841
21
    functionType = E->getType();
4842
21
    calleePtr = EmitLValue(E).getPointer(*this);
4843
21
  }
4844
4.53k
  assert(functionType->isFunctionType());
4845
4846
4.53k
  GlobalDecl GD;
4847
4.53k
  if (const auto *VD =
4848
2.55k
          dyn_cast_or_null<VarDecl>(E->getReferencedDeclOfCallee()))
4849
2.55k
    GD = GlobalDecl(VD);
4850
4851
4.53k
  CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), GD);
4852
4.53k
  CGCallee callee(calleeInfo, calleePtr);
4853
4.53k
  return callee;
4854
4.53k
}
4855
4856
113k
LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
4857
  // Comma expressions just emit their LHS then their RHS as an l-value.
4858
113k
  if (E->getOpcode() == BO_Comma) {
4859
527
    EmitIgnoredExpr(E->getLHS());
4860
527
    EnsureInsertPoint();
4861
527
    return EmitLValue(E->getRHS());
4862
527
  }
4863
4864
112k
  if (E->getOpcode() == BO_PtrMemD ||
4865
112k
      E->getOpcode() == BO_PtrMemI)
4866
81
    return EmitPointerToDataMemberBinaryExpr(E);
4867
4868
112k
  assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
4869
4870
  // Note that in all of these cases, __block variables need the RHS
4871
  // evaluated first just in case the variable gets moved by the RHS.
4872
4873
112k
  switch (getEvaluationKind(E->getType())) {
4874
112k
  case TEK_Scalar: {
4875
112k
    switch (E->getLHS()->getType().getObjCLifetime()) {
4876
29
    case Qualifiers::OCL_Strong:
4877
29
      return EmitARCStoreStrong(E, /*ignored*/ false).first;
4878
4879
0
    case Qualifiers::OCL_Autoreleasing:
4880
0
      return EmitARCStoreAutoreleasing(E).first;
4881
4882
    // No reason to do any of these differently.
4883
112k
    case Qualifiers::OCL_None:
4884
112k
    case Qualifiers::OCL_ExplicitNone:
4885
112k
    case Qualifiers::OCL_Weak:
4886
112k
      break;
4887
112k
    }
4888
4889
112k
    RValue RV = EmitAnyExpr(E->getRHS());
4890
112k
    LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store);
4891
112k
    if (RV.isScalar())
4892
112k
      EmitNullabilityCheck(LV, RV.getScalarVal(), E->getExprLoc());
4893
112k
    EmitStoreThroughLValue(RV, LV);
4894
112k
    if (getLangOpts().OpenMP)
4895
78.7k
      CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(*this,
4896
78.7k
                                                                E->getLHS());
4897
112k
    return LV;
4898
112k
  }
4899
4900
31
  case TEK_Complex:
4901
31
    return EmitComplexAssignmentLValue(E);
4902
4903
4
  case TEK_Aggregate:
4904
4
    return EmitAggExprToLValue(E);
4905
0
  }
4906
0
  llvm_unreachable("bad evaluation kind");
4907
0
}
4908
4909
44.1k
LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
4910
44.1k
  RValue RV = EmitCallExpr(E);
4911
4912
44.1k
  if (!RV.isScalar())
4913
15
    return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
4914
15
                          AlignmentSource::Decl);
4915
4916
44.1k
  assert(E->getCallReturnType(getContext())->isReferenceType() &&
4917
44.1k
         "Can't have a scalar return unless the return type is a "
4918
44.1k
         "reference type!");
4919
4920
44.1k
  return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
4921
44.1k
}
4922
4923
11
LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
4924
  // FIXME: This shouldn't require another copy.
4925
11
  return EmitAggExprToLValue(E);
4926
11
}
4927
4928
1
LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
4929
1
  assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
4930
1
         && "binding l-value to type which needs a temporary");
4931
1
  AggValueSlot Slot = CreateAggTemp(E->getType());
4932
1
  EmitCXXConstructExpr(E, Slot);
4933
1
  return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);
4934
1
}
4935
4936
LValue
4937
344
CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
4938
344
  return MakeNaturalAlignAddrLValue(EmitCXXTypeidExpr(E), E->getType());
4939
344
}
4940
4941
23
Address CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
4942
23
  return Builder.CreateElementBitCast(CGM.GetAddrOfMSGuidDecl(E->getGuidDecl()),
4943
23
                                      ConvertType(E->getType()));
4944
23
}
4945
4946
23
LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
4947
23
  return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType(),
4948
23
                        AlignmentSource::Decl);
4949
23
}
4950
4951
LValue
4952
3
CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
4953
3
  AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
4954
3
  Slot.setExternallyDestructed();
4955
3
  EmitAggExpr(E->getSubExpr(), Slot);
4956
3
  EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddress());
4957
3
  return MakeAddrLValue(Slot.getAddress(), E->getType(), AlignmentSource::Decl);
4958
3
}
4959
4960
20
LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
4961
20
  RValue RV = EmitObjCMessageExpr(E);
4962
4963
20
  if (!RV.isScalar())
4964
7
    return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
4965
7
                          AlignmentSource::Decl);
4966
4967
13
  assert(E->getMethodDecl()->getReturnType()->isReferenceType() &&
4968
13
         "Can't have a scalar return unless the return type is a "
4969
13
         "reference type!");
4970
4971
13
  return MakeNaturalAlignPointeeAddrLValue(RV.getScalarVal(), E->getType());
4972
13
}
4973
4974
1
LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
4975
1
  Address V =
4976
1
    CGM.getObjCRuntime().GetAddrOfSelector(*this, E->getSelector());
4977
1
  return MakeAddrLValue(V, E->getType(), AlignmentSource::Decl);
4978
1
}
4979
4980
llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
4981
259
                                             const ObjCIvarDecl *Ivar) {
4982
259
  return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
4983
259
}
4984
4985
LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
4986
                                          llvm::Value *BaseValue,
4987
                                          const ObjCIvarDecl *Ivar,
4988
2.22k
                                          unsigned CVRQualifiers) {
4989
2.22k
  return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
4990
2.22k
                                                   Ivar, CVRQualifiers);
4991
2.22k
}
4992
4993
1.53k
LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
4994
  // FIXME: A lot of the code below could be shared with EmitMemberExpr.
4995
1.53k
  llvm::Value *BaseValue = nullptr;
4996
1.53k
  const Expr *BaseExpr = E->getBase();
4997
1.53k
  Qualifiers BaseQuals;
4998
1.53k
  QualType ObjectTy;
4999
1.53k
  if (E->isArrow()) {
5000
1.52k
    BaseValue = EmitScalarExpr(BaseExpr);
5001
1.52k
    ObjectTy = BaseExpr->getType()->getPointeeType();
5002
1.52k
    BaseQuals = ObjectTy.getQualifiers();
5003
6
  } else {
5004
6
    LValue BaseLV = EmitLValue(BaseExpr);
5005
6
    BaseValue = BaseLV.getPointer(*this);
5006
6
    ObjectTy = BaseExpr->getType();
5007
6
    BaseQuals = ObjectTy.getQualifiers();
5008
6
  }
5009
5010
1.53k
  LValue LV =
5011
1.53k
    EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
5012
1.53k
                      BaseQuals.getCVRQualifiers());
5013
1.53k
  setObjCGCLValueClass(getContext(), E, LV);
5014
1.53k
  return LV;
5015
1.53k
}
5016
5017
1
LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
5018
  // Can only get l-value for message expression returning aggregate type
5019
1
  RValue RV = EmitAnyExprToTemp(E);
5020
1
  return MakeAddrLValue(RV.getAggregateAddress(), E->getType(),
5021
1
                        AlignmentSource::Decl);
5022
1
}
5023
5024
RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee,
5025
                                 const CallExpr *E, ReturnValueSlot ReturnValue,
5026
157k
                                 llvm::Value *Chain) {
5027
  // Get the actual function type. The callee type will always be a pointer to
5028
  // function type or a block pointer type.
5029
157k
  assert(CalleeType->isFunctionPointerType() &&
5030
157k
         "Call must have function pointer type!");
5031
5032
157k
  const Decl *TargetDecl =
5033
157k
      OrigCallee.getAbstractInfo().getCalleeDecl().getDecl();
5034
5035
157k
  CalleeType = getContext().getCanonicalType(CalleeType);
5036
5037
157k
  auto PointeeType = cast<PointerType>(CalleeType)->getPointeeType();
5038
5039
157k
  CGCallee Callee = OrigCallee;
5040
5041
157k
  if (getLangOpts().CPlusPlus && 
SanOpts.has(SanitizerKind::Function)107k
&&
5042
25
      (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
5043
8
    if (llvm::Constant *PrefixSig =
5044
8
            CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
5045
8
      SanitizerScope SanScope(this);
5046
      // Remove any (C++17) exception specifications, to allow calling e.g. a
5047
      // noexcept function through a non-noexcept pointer.
5048
8
      auto ProtoTy =
5049
8
        getContext().getFunctionTypeWithExceptionSpec(PointeeType, EST_None);
5050
8
      llvm::Constant *FTRTTIConst =
5051
8
          CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
5052
8
      llvm::Type *PrefixStructTyElems[] = {PrefixSig->getType(), Int32Ty};
5053
8
      llvm::StructType *PrefixStructTy = llvm::StructType::get(
5054
8
          CGM.getLLVMContext(), PrefixStructTyElems, /*isPacked=*/true);
5055
5056
8
      llvm::Value *CalleePtr = Callee.getFunctionPointer();
5057
5058
8
      llvm::Value *CalleePrefixStruct = Builder.CreateBitCast(
5059
8
          CalleePtr, llvm::PointerType::getUnqual(PrefixStructTy));
5060
8
      llvm::Value *CalleeSigPtr =
5061
8
          Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0);
5062
8
      llvm::Value *CalleeSig =
5063
8
          Builder.CreateAlignedLoad(CalleeSigPtr, getIntAlign());
5064
8
      llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
5065
5066
8
      llvm::BasicBlock *Cont = createBasicBlock("cont");
5067
8
      llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
5068
8
      Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
5069
5070
8
      EmitBlock(TypeCheck);
5071
8
      llvm::Value *CalleeRTTIPtr =
5072
8
          Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 1);
5073
8
      llvm::Value *CalleeRTTIEncoded =
5074
8
          Builder.CreateAlignedLoad(CalleeRTTIPtr, getPointerAlign());
5075
8
      llvm::Value *CalleeRTTI =
5076
8
          DecodeAddrUsedInPrologue(CalleePtr, CalleeRTTIEncoded);
5077
8
      llvm::Value *CalleeRTTIMatch =
5078
8
          Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst);
5079
8
      llvm::Constant *StaticData[] = {EmitCheckSourceLocation(E->getBeginLoc()),
5080
8
                                      EmitCheckTypeDescriptor(CalleeType)};
5081
8
      EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function),
5082
8
                SanitizerHandler::FunctionTypeMismatch, StaticData,
5083
8
                {CalleePtr, CalleeRTTI, FTRTTIConst});
5084
5085
8
      Builder.CreateBr(Cont);
5086
8
      EmitBlock(Cont);
5087
8
    }
5088
8
  }
5089
5090
157k
  const auto *FnType = cast<FunctionType>(PointeeType);
5091
5092
  // If we are checking indirect calls and this call is indirect, check that the
5093
  // function pointer is a member of the bit set for the function type.
5094
157k
  if (SanOpts.has(SanitizerKind::CFIICall) &&
5095
20
      (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
5096
14
    SanitizerScope SanScope(this);
5097
14
    EmitSanitizerStatReport(llvm::SanStat_CFI_ICall);
5098
5099
14
    llvm::Metadata *MD;
5100
14
    if (CGM.getCodeGenOpts().SanitizeCfiICallGeneralizePointers)
5101
1
      MD = CGM.CreateMetadataIdentifierGeneralized(QualType(FnType, 0));
5102
13
    else
5103
13
      MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0));
5104
5105
14
    llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
5106
5107
14
    llvm::Value *CalleePtr = Callee.getFunctionPointer();
5108
14
    llvm::Value *CastedCallee = Builder.CreateBitCast(CalleePtr, Int8PtrTy);
5109
14
    llvm::Value *TypeTest = Builder.CreateCall(
5110
14
        CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedCallee, TypeId});
5111
5112
14
    auto CrossDsoTypeId = CGM.CreateCrossDsoCfiTypeId(MD);
5113
14
    llvm::Constant *StaticData[] = {
5114
14
        llvm::ConstantInt::get(Int8Ty, CFITCK_ICall),
5115
14
        EmitCheckSourceLocation(E->getBeginLoc()),
5116
14
        EmitCheckTypeDescriptor(QualType(FnType, 0)),
5117
14
    };
5118
14
    if (CGM.getCodeGenOpts().SanitizeCfiCrossDso && 
CrossDsoTypeId5
) {
5119
5
      EmitCfiSlowPathCheck(SanitizerKind::CFIICall, TypeTest, CrossDsoTypeId,
5120
5
                           CastedCallee, StaticData);
5121
9
    } else {
5122
9
      EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIICall),
5123
9
                SanitizerHandler::CFICheckFail, StaticData,
5124
9
                {CastedCallee, llvm::UndefValue::get(IntPtrTy)});
5125
9
    }
5126
14
  }
5127
5128
157k
  CallArgList Args;
5129
157k
  if (Chain)
5130
8
    Args.add(RValue::get(Builder.CreateBitCast(Chain, CGM.VoidPtrTy)),
5131
8
             CGM.getContext().VoidPtrTy);
5132
5133
  // C++17 requires that we evaluate arguments to a call using assignment syntax
5134
  // right-to-left, and that we evaluate arguments to certain other operators
5135
  // left-to-right. Note that we allow this to ove