Coverage Report

Created: 2021-01-19 06:58

/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This contains code dealing with code generation of C++ expressions
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CGCUDARuntime.h"
14
#include "CGCXXABI.h"
15
#include "CGDebugInfo.h"
16
#include "CGObjCRuntime.h"
17
#include "CodeGenFunction.h"
18
#include "ConstantEmitter.h"
19
#include "TargetInfo.h"
20
#include "clang/Basic/CodeGenOptions.h"
21
#include "clang/CodeGen/CGFunctionInfo.h"
22
#include "llvm/IR/Intrinsics.h"
23
24
using namespace clang;
25
using namespace CodeGen;
26
27
namespace {
28
struct MemberCallInfo {
29
  RequiredArgs ReqArgs;
30
  // Number of prefix arguments for the call. Ignores the `this` pointer.
31
  unsigned PrefixSize;
32
};
33
}
34
35
static MemberCallInfo
36
commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
37
                                  llvm::Value *This, llvm::Value *ImplicitParam,
38
                                  QualType ImplicitParamTy, const CallExpr *CE,
39
94.9k
                                  CallArgList &Args, CallArgList *RtlArgs) {
40
94.9k
  assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
41
94.9k
         isa<CXXOperatorCallExpr>(CE));
42
94.9k
  assert(MD->isInstance() &&
43
94.9k
         "Trying to emit a member or operator call expr on a static method!");
44
45
  // Push the this ptr.
46
94.9k
  const CXXRecordDecl *RD =
47
94.9k
      CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD);
48
94.9k
  Args.add(RValue::get(This), CGF.getTypes().DeriveThisType(RD, MD));
49
50
  // If there is an implicit parameter (e.g. VTT), emit it.
51
94.9k
  if (ImplicitParam) {
52
191
    Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
53
191
  }
54
55
94.9k
  const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
56
94.9k
  RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
57
94.9k
  unsigned PrefixSize = Args.size() - 1;
58
59
  // And the rest of the call args.
60
94.9k
  if (RtlArgs) {
61
    // Special case: if the caller emitted the arguments right-to-left already
62
    // (prior to emitting the *this argument), we're done. This happens for
63
    // assignment operators.
64
737
    Args.addFrom(*RtlArgs);
65
94.1k
  } else if (CE) {
66
    // Special case: skip first argument of CXXOperatorCall (it is "this").
67
59.2k
    unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 
17.36k
: 0;
68
66.5k
    CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),
69
66.5k
                     CE->getDirectCallee());
70
27.5k
  } else {
71
27.5k
    assert(
72
27.5k
        FPT->getNumParams() == 0 &&
73
27.5k
        "No CallExpr specified for function with non-zero number of arguments");
74
27.5k
  }
75
94.9k
  return {required, PrefixSize};
76
94.9k
}
77
78
RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
79
    const CXXMethodDecl *MD, const CGCallee &Callee,
80
    ReturnValueSlot ReturnValue,
81
    llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
82
67.1k
    const CallExpr *CE, CallArgList *RtlArgs) {
83
67.1k
  const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
84
67.1k
  CallArgList Args;
85
67.1k
  MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall(
86
67.1k
      *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);
87
67.1k
  auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(
88
67.1k
      Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize);
89
67.1k
  return EmitCall(FnInfo, Callee, ReturnValue, Args, nullptr,
90
67.1k
                  CE ? CE->getExprLoc() : 
SourceLocation()0
);
91
67.1k
}
92
93
RValue CodeGenFunction::EmitCXXDestructorCall(
94
    GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy,
95
27.8k
    llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE) {
96
27.8k
  const CXXMethodDecl *DtorDecl = cast<CXXMethodDecl>(Dtor.getDecl());
97
98
27.8k
  assert(!ThisTy.isNull());
99
27.8k
  assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() &&
100
27.8k
         "Pointer/Object mixup");
101
102
27.8k
  LangAS SrcAS = ThisTy.getAddressSpace();
103
27.8k
  LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace();
104
27.8k
  if (SrcAS != DstAS) {
105
2
    QualType DstTy = DtorDecl->getThisType();
106
2
    llvm::Type *NewType = CGM.getTypes().ConvertType(DstTy);
107
2
    This = getTargetHooks().performAddrSpaceCast(*this, This, SrcAS, DstAS,
108
2
                                                 NewType);
109
2
  }
110
111
27.8k
  CallArgList Args;
112
27.8k
  commonEmitCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam,
113
27.8k
                                    ImplicitParamTy, CE, Args, nullptr);
114
27.8k
  return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee,
115
27.8k
                  ReturnValueSlot(), Args, nullptr,
116
27.5k
                  CE ? 
CE->getExprLoc()209
: SourceLocation{});
117
27.8k
}
118
119
RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
120
183
                                            const CXXPseudoDestructorExpr *E) {
121
183
  QualType DestroyedType = E->getDestroyedType();
122
183
  if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
123
    // Automatic Reference Counting:
124
    //   If the pseudo-expression names a retainable object with weak or
125
    //   strong lifetime, the object shall be released.
126
4
    Expr *BaseExpr = E->getBase();
127
4
    Address BaseValue = Address::invalid();
128
4
    Qualifiers BaseQuals;
129
130
    // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
131
4
    if (E->isArrow()) {
132
2
      BaseValue = EmitPointerWithAlignment(BaseExpr);
133
2
      const auto *PTy = BaseExpr->getType()->castAs<PointerType>();
134
2
      BaseQuals = PTy->getPointeeType().getQualifiers();
135
2
    } else {
136
2
      LValue BaseLV = EmitLValue(BaseExpr);
137
2
      BaseValue = BaseLV.getAddress(*this);
138
2
      QualType BaseTy = BaseExpr->getType();
139
2
      BaseQuals = BaseTy.getQualifiers();
140
2
    }
141
142
4
    switch (DestroyedType.getObjCLifetime()) {
143
0
    case Qualifiers::OCL_None:
144
0
    case Qualifiers::OCL_ExplicitNone:
145
0
    case Qualifiers::OCL_Autoreleasing:
146
0
      break;
147
148
2
    case Qualifiers::OCL_Strong:
149
2
      EmitARCRelease(Builder.CreateLoad(BaseValue,
150
2
                        DestroyedType.isVolatileQualified()),
151
2
                     ARCPreciseLifetime);
152
2
      break;
153
154
2
    case Qualifiers::OCL_Weak:
155
2
      EmitARCDestroyWeak(BaseValue);
156
2
      break;
157
179
    }
158
179
  } else {
159
    // C++ [expr.pseudo]p1:
160
    //   The result shall only be used as the operand for the function call
161
    //   operator (), and the result of such a call has type void. The only
162
    //   effect is the evaluation of the postfix-expression before the dot or
163
    //   arrow.
164
179
    EmitIgnoredExpr(E->getBase());
165
179
  }
166
167
183
  return RValue::get(nullptr);
168
183
}
169
170
483
static CXXRecordDecl *getCXXRecord(const Expr *E) {
171
483
  QualType T = E->getType();
172
483
  if (const PointerType *PTy = T->getAs<PointerType>())
173
31
    T = PTy->getPointeeType();
174
483
  const RecordType *Ty = T->castAs<RecordType>();
175
483
  return cast<CXXRecordDecl>(Ty->getDecl());
176
483
}
177
178
// Note: This function also emit constructor calls to support a MSVC
179
// extensions allowing explicit constructor function call.
180
RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
181
59.7k
                                              ReturnValueSlot ReturnValue) {
182
59.7k
  const Expr *callee = CE->getCallee()->IgnoreParens();
183
184
59.7k
  if (isa<BinaryOperator>(callee))
185
139
    return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
186
187
59.5k
  const MemberExpr *ME = cast<MemberExpr>(callee);
188
59.5k
  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
189
190
59.5k
  if (MD->isStatic()) {
191
    // The method is static, emit it as we would a regular call.
192
0
    CGCallee callee =
193
0
        CGCallee::forDirect(CGM.GetAddrOfFunction(MD), GlobalDecl(MD));
194
0
    return EmitCall(getContext().getPointerType(MD->getType()), callee, CE,
195
0
                    ReturnValue);
196
0
  }
197
198
59.5k
  bool HasQualifier = ME->hasQualifier();
199
57.8k
  NestedNameSpecifier *Qualifier = HasQualifier ? 
ME->getQualifier()1.70k
: nullptr;
200
59.5k
  bool IsArrow = ME->isArrow();
201
59.5k
  const Expr *Base = ME->getBase();
202
203
59.5k
  return EmitCXXMemberOrOperatorMemberCallExpr(
204
59.5k
      CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base);
205
59.5k
}
206
207
RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
208
    const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
209
    bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
210
70.0k
    const Expr *Base) {
211
70.0k
  assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
212
213
  // Compute the object pointer.
214
70.0k
  bool CanUseVirtualCall = MD->isVirtual() && 
!HasQualifier1.34k
;
215
216
70.0k
  const CXXMethodDecl *DevirtualizedMethod = nullptr;
217
70.0k
  if (CanUseVirtualCall &&
218
1.30k
      MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)) {
219
412
    const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
220
412
    DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
221
412
    assert(DevirtualizedMethod);
222
412
    const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
223
412
    const Expr *Inner = Base->IgnoreParenBaseCasts();
224
412
    if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
225
412
        MD->getReturnType().getCanonicalType())
226
      // If the return types are not the same, this might be a case where more
227
      // code needs to run to compensate for it. For example, the derived
228
      // method might return a type that inherits form from the return
229
      // type of MD and has a prefix.
230
      // For now we just avoid devirtualizing these covariant cases.
231
2
      DevirtualizedMethod = nullptr;
232
410
    else if (getCXXRecord(Inner) == DevirtualizedClass)
233
      // If the class of the Inner expression is where the dynamic method
234
      // is defined, build the this pointer from it.
235
339
      Base = Inner;
236
71
    else if (getCXXRecord(Base) != DevirtualizedClass) {
237
      // If the method is defined in a class that is not the best dynamic
238
      // one or the one of the full expression, we would have to build
239
      // a derived-to-base cast to compute the correct this pointer, but
240
      // we don't have support for that yet, so do a virtual call.
241
6
      DevirtualizedMethod = nullptr;
242
6
    }
243
412
  }
244
245
70.0k
  bool TrivialForCodegen =
246
70.0k
      MD->isTrivial() || 
(67.3k
MD->isDefaulted()67.3k
&&
MD->getParent()->isUnion()225
);
247
70.0k
  bool TrivialAssignment =
248
70.0k
      TrivialForCodegen &&
249
2.70k
      (MD->isCopyAssignmentOperator() || 
MD->isMoveAssignmentOperator()728
) &&
250
2.52k
      !MD->getParent()->mayInsertExtraPadding();
251
252
  // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
253
  // operator before the LHS.
254
70.0k
  CallArgList RtlArgStorage;
255
70.0k
  CallArgList *RtlArgs = nullptr;
256
70.0k
  LValue TrivialAssignmentRHS;
257
70.0k
  if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
258
10.4k
    if (OCE->isAssignmentOp()) {
259
3.07k
      if (TrivialAssignment) {
260
2.34k
        TrivialAssignmentRHS = EmitLValue(CE->getArg(1));
261
737
      } else {
262
737
        RtlArgs = &RtlArgStorage;
263
737
        EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(),
264
737
                     drop_begin(CE->arguments(), 1), CE->getDirectCallee(),
265
737
                     /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft);
266
737
      }
267
3.07k
    }
268
10.4k
  }
269
270
70.0k
  LValue This;
271
70.0k
  if (IsArrow) {
272
24.7k
    LValueBaseInfo BaseInfo;
273
24.7k
    TBAAAccessInfo TBAAInfo;
274
24.7k
    Address ThisValue = EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
275
24.7k
    This = MakeAddrLValue(ThisValue, Base->getType(), BaseInfo, TBAAInfo);
276
45.2k
  } else {
277
45.2k
    This = EmitLValue(Base);
278
45.2k
  }
279
280
70.0k
  if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
281
    // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's
282
    // constructing a new complete object of type Ctor.
283
12
    assert(!RtlArgs);
284
12
    assert(ReturnValue.isNull() && "Constructor shouldn't have return value");
285
12
    CallArgList Args;
286
12
    commonEmitCXXMemberOrOperatorCall(
287
12
        *this, Ctor, This.getPointer(*this), /*ImplicitParam=*/nullptr,
288
12
        /*ImplicitParamTy=*/QualType(), CE, Args, nullptr);
289
290
12
    EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
291
12
                           /*Delegating=*/false, This.getAddress(*this), Args,
292
12
                           AggValueSlot::DoesNotOverlap, CE->getExprLoc(),
293
12
                           /*NewPointerIsChecked=*/false);
294
12
    return RValue::get(nullptr);
295
12
  }
296
297
70.0k
  if (TrivialForCodegen) {
298
2.70k
    if (isa<CXXDestructorDecl>(MD))
299
177
      return RValue::get(nullptr);
300
301
2.52k
    if (TrivialAssignment) {
302
      // We don't like to generate the trivial copy/move assignment operator
303
      // when it isn't necessary; just produce the proper effect here.
304
      // It's important that we use the result of EmitLValue here rather than
305
      // emitting call arguments, in order to preserve TBAA information from
306
      // the RHS.
307
2.52k
      LValue RHS = isa<CXXOperatorCallExpr>(CE)
308
2.34k
                       ? TrivialAssignmentRHS
309
181
                       : EmitLValue(*CE->arg_begin());
310
2.52k
      EmitAggregateAssign(This, RHS, CE->getType());
311
2.52k
      return RValue::get(This.getPointer(*this));
312
2.52k
    }
313
314
2
    assert(MD->getParent()->mayInsertExtraPadding() &&
315
2
           "unknown trivial member function");
316
2
  }
317
318
  // Compute the function type we're calling.
319
67.3k
  const CXXMethodDecl *CalleeDecl =
320
66.9k
      DevirtualizedMethod ? 
DevirtualizedMethod404
: MD;
321
67.3k
  const CGFunctionInfo *FInfo = nullptr;
322
67.3k
  if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
323
217
    FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
324
217
        GlobalDecl(Dtor, Dtor_Complete));
325
67.1k
  else
326
67.1k
    FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
327
328
67.3k
  llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
329
330
  // C++11 [class.mfct.non-static]p2:
331
  //   If a non-static member function of a class X is called for an object that
332
  //   is not of type X, or of a type derived from X, the behavior is undefined.
333
67.3k
  SourceLocation CallLoc;
334
67.3k
  ASTContext &C = getContext();
335
67.3k
  if (CE)
336
67.3k
    CallLoc = CE->getExprLoc();
337
338
67.3k
  SanitizerSet SkippedChecks;
339
67.3k
  if (const auto *CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
340
59.2k
    auto *IOA = CMCE->getImplicitObjectArgument();
341
59.2k
    bool IsImplicitObjectCXXThis = IsWrappedCXXThis(IOA);
342
59.2k
    if (IsImplicitObjectCXXThis)
343
21.4k
      SkippedChecks.set(SanitizerKind::Alignment, true);
344
59.2k
    if (IsImplicitObjectCXXThis || 
isa<DeclRefExpr>(IOA)37.7k
)
345
34.9k
      SkippedChecks.set(SanitizerKind::Null, true);
346
59.2k
  }
347
67.3k
  EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc,
348
67.3k
                This.getPointer(*this),
349
67.3k
                C.getRecordType(CalleeDecl->getParent()),
350
67.3k
                /*Alignment=*/CharUnits::Zero(), SkippedChecks);
351
352
  // C++ [class.virtual]p12:
353
  //   Explicit qualification with the scope operator (5.1) suppresses the
354
  //   virtual call mechanism.
355
  //
356
  // We also don't emit a virtual call if the base expression has a record type
357
  // because then we know what the type is.
358
67.3k
  bool UseVirtualCall = CanUseVirtualCall && 
!DevirtualizedMethod1.30k
;
359
360
67.3k
  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl)) {
361
217
    assert(CE->arg_begin() == CE->arg_end() &&
362
217
           "Destructor shouldn't have explicit parameters");
363
217
    assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
364
217
    if (UseVirtualCall) {
365
12
      CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete,
366
12
                                                This.getAddress(*this),
367
12
                                                cast<CXXMemberCallExpr>(CE));
368
205
    } else {
369
205
      GlobalDecl GD(Dtor, Dtor_Complete);
370
205
      CGCallee Callee;
371
205
      if (getLangOpts().AppleKext && 
Dtor->isVirtual()2
&&
HasQualifier2
)
372
2
        Callee = BuildAppleKextVirtualCall(Dtor, Qualifier, Ty);
373
203
      else if (!DevirtualizedMethod)
374
201
        Callee =
375
201
            CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD, FInfo, Ty), GD);
376
2
      else {
377
2
        Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(GD, Ty), GD);
378
2
      }
379
380
205
      QualType ThisTy =
381
174
          IsArrow ? Base->getType()->getPointeeType() : 
Base->getType()31
;
382
205
      EmitCXXDestructorCall(GD, Callee, This.getPointer(*this), ThisTy,
383
205
                            /*ImplicitParam=*/nullptr,
384
205
                            /*ImplicitParamTy=*/QualType(), CE);
385
205
    }
386
217
    return RValue::get(nullptr);
387
217
  }
388
389
  // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
390
  // 'CalleeDecl' instead.
391
392
67.1k
  CGCallee Callee;
393
67.1k
  if (UseVirtualCall) {
394
890
    Callee = CGCallee::forVirtual(CE, MD, This.getAddress(*this), Ty);
395
66.2k
  } else {
396
66.2k
    if (SanOpts.has(SanitizerKind::CFINVCall) &&
397
8
        MD->getParent()->isDynamicClass()) {
398
8
      llvm::Value *VTable;
399
8
      const CXXRecordDecl *RD;
400
8
      std::tie(VTable, RD) = CGM.getCXXABI().LoadVTablePtr(
401
8
          *this, This.getAddress(*this), CalleeDecl->getParent());
402
8
      EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getBeginLoc());
403
8
    }
404
405
66.2k
    if (getLangOpts().AppleKext && 
MD->isVirtual()6
&&
HasQualifier6
)
406
6
      Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
407
66.2k
    else if (!DevirtualizedMethod)
408
65.8k
      Callee =
409
65.8k
          CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), GlobalDecl(MD));
410
402
    else {
411
402
      Callee =
412
402
          CGCallee::forDirect(CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),
413
402
                              GlobalDecl(DevirtualizedMethod));
414
402
    }
415
66.2k
  }
416
417
67.1k
  if (MD->isVirtual()) {
418
1.32k
    Address NewThisAddr =
419
1.32k
        CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
420
1.32k
            *this, CalleeDecl, This.getAddress(*this), UseVirtualCall);
421
1.32k
    This.setAddress(NewThisAddr);
422
1.32k
  }
423
424
67.1k
  return EmitCXXMemberOrOperatorCall(
425
67.1k
      CalleeDecl, Callee, ReturnValue, This.getPointer(*this),
426
67.1k
      /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs);
427
67.1k
}
428
429
RValue
430
CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
431
139
                                              ReturnValueSlot ReturnValue) {
432
139
  const BinaryOperator *BO =
433
139
      cast<BinaryOperator>(E->getCallee()->IgnoreParens());
434
139
  const Expr *BaseExpr = BO->getLHS();
435
139
  const Expr *MemFnExpr = BO->getRHS();
436
437
139
  const auto *MPT = MemFnExpr->getType()->castAs<MemberPointerType>();
438
139
  const auto *FPT = MPT->getPointeeType()->castAs<FunctionProtoType>();
439
139
  const auto *RD =
440
139
      cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
441
442
  // Emit the 'this' pointer.
443
139
  Address This = Address::invalid();
444
139
  if (BO->getOpcode() == BO_PtrMemI)
445
84
    This = EmitPointerWithAlignment(BaseExpr);
446
55
  else
447
55
    This = EmitLValue(BaseExpr).getAddress(*this);
448
449
139
  EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
450
139
                QualType(MPT->getClass(), 0));
451
452
  // Get the member function pointer.
453
139
  llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
454
455
  // Ask the ABI to load the callee.  Note that This is modified.
456
139
  llvm::Value *ThisPtrForCall = nullptr;
457
139
  CGCallee Callee =
458
139
    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
459
139
                                             ThisPtrForCall, MemFnPtr, MPT);
460
461
139
  CallArgList Args;
462
463
139
  QualType ThisType =
464
139
    getContext().getPointerType(getContext().getTagDeclType(RD));
465
466
  // Push the this ptr.
467
139
  Args.add(RValue::get(ThisPtrForCall), ThisType);
468
469
139
  RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
470
471
  // And the rest of the call args
472
139
  EmitCallArgs(Args, FPT, E->arguments());
473
139
  return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required,
474
139
                                                      /*PrefixSize=*/0),
475
139
                  Callee, ReturnValue, Args, nullptr, E->getExprLoc());
476
139
}
477
478
RValue
479
CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
480
                                               const CXXMethodDecl *MD,
481
10.4k
                                               ReturnValueSlot ReturnValue) {
482
10.4k
  assert(MD->isInstance() &&
483
10.4k
         "Trying to emit a member call expr on a static method!");
484
10.4k
  return EmitCXXMemberOrOperatorMemberCallExpr(
485
10.4k
      E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
486
10.4k
      /*IsArrow=*/false, E->getArg(0));
487
10.4k
}
488
489
RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
490
30
                                               ReturnValueSlot ReturnValue) {
491
30
  return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
492
30
}
493
494
static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
495
                                            Address DestPtr,
496
27
                                            const CXXRecordDecl *Base) {
497
27
  if (Base->isEmpty())
498
8
    return;
499
500
19
  DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
501
502
19
  const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
503
19
  CharUnits NVSize = Layout.getNonVirtualSize();
504
505
  // We cannot simply zero-initialize the entire base sub-object if vbptrs are
506
  // present, they are initialized by the most derived class before calling the
507
  // constructor.
508
19
  SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores;
509
19
  Stores.emplace_back(CharUnits::Zero(), NVSize);
510
511
  // Each store is split by the existence of a vbptr.
512
19
  CharUnits VBPtrWidth = CGF.getPointerSize();
513
19
  std::vector<CharUnits> VBPtrOffsets =
514
19
      CGF.CGM.getCXXABI().getVBPtrOffsets(Base);
515
6
  for (CharUnits VBPtrOffset : VBPtrOffsets) {
516
    // Stop before we hit any virtual base pointers located in virtual bases.
517
6
    if (VBPtrOffset >= NVSize)
518
2
      break;
519
4
    std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val();
520
4
    CharUnits LastStoreOffset = LastStore.first;
521
4
    CharUnits LastStoreSize = LastStore.second;
522
523
4
    CharUnits SplitBeforeOffset = LastStoreOffset;
524
4
    CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset;
525
4
    assert(!SplitBeforeSize.isNegative() && "negative store size!");
526
4
    if (!SplitBeforeSize.isZero())
527
2
      Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize);
528
529
4
    CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth;
530
4
    CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset;
531
4
    assert(!SplitAfterSize.isNegative() && "negative store size!");
532
4
    if (!SplitAfterSize.isZero())
533
4
      Stores.emplace_back(SplitAfterOffset, SplitAfterSize);
534
4
  }
535
536
  // If the type contains a pointer to data member we can't memset it to zero.
537
  // Instead, create a null constant and copy it to the destination.
538
  // TODO: there are other patterns besides zero that we can usefully memset,
539
  // like -1, which happens to be the pattern used by member-pointers.
540
  // TODO: isZeroInitializable can be over-conservative in the case where a
541
  // virtual base contains a member pointer.
542
19
  llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Base);
543
19
  if (!NullConstantForBase->isNullValue()) {
544
4
    llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable(
545
4
        CGF.CGM.getModule(), NullConstantForBase->getType(),
546
4
        /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,
547
4
        NullConstantForBase, Twine());
548
549
4
    CharUnits Align = std::max(Layout.getNonVirtualAlignment(),
550
4
                               DestPtr.getAlignment());
551
4
    NullVariable->setAlignment(Align.getAsAlign());
552
553
4
    Address SrcPtr = Address(CGF.EmitCastToVoidPtr(NullVariable), Align);
554
555
    // Get and call the appropriate llvm.memcpy overload.
556
4
    for (std::pair<CharUnits, CharUnits> Store : Stores) {
557
4
      CharUnits StoreOffset = Store.first;
558
4
      CharUnits StoreSize = Store.second;
559
4
      llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
560
4
      CGF.Builder.CreateMemCpy(
561
4
          CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
562
4
          CGF.Builder.CreateConstInBoundsByteGEP(SrcPtr, StoreOffset),
563
4
          StoreSizeVal);
564
4
    }
565
566
  // Otherwise, just memset the whole thing to zero.  This is legal
567
  // because in LLVM, all default initializers (other than the ones we just
568
  // handled above) are guaranteed to have a bit pattern of all zeros.
569
15
  } else {
570
17
    for (std::pair<CharUnits, CharUnits> Store : Stores) {
571
17
      CharUnits StoreOffset = Store.first;
572
17
      CharUnits StoreSize = Store.second;
573
17
      llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
574
17
      CGF.Builder.CreateMemSet(
575
17
          CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
576
17
          CGF.Builder.getInt8(0), StoreSizeVal);
577
17
    }
578
15
  }
579
19
}
580
581
void
582
CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
583
60.1k
                                      AggValueSlot Dest) {
584
60.1k
  assert(!Dest.isIgnored() && "Must have a destination!");
585
60.1k
  const CXXConstructorDecl *CD = E->getConstructor();
586
587
  // If we require zero initialization before (or instead of) calling the
588
  // constructor, as can be the case with a non-user-provided default
589
  // constructor, emit the zero initialization now, unless destination is
590
  // already zeroed.
591
60.1k
  if (E->requiresZeroInitialization() && 
!Dest.isZeroed()6.06k
) {
592
6.06k
    switch (E->getConstructionKind()) {
593
2
    case CXXConstructExpr::CK_Delegating:
594
6.03k
    case CXXConstructExpr::CK_Complete:
595
6.03k
      EmitNullInitialization(Dest.getAddress(), E->getType());
596
6.03k
      break;
597
0
    case CXXConstructExpr::CK_VirtualBase:
598
27
    case CXXConstructExpr::CK_NonVirtualBase:
599
27
      EmitNullBaseClassInitialization(*this, Dest.getAddress(),
600
27
                                      CD->getParent());
601
27
      break;
602
60.1k
    }
603
60.1k
  }
604
605
  // If this is a call to a trivial default constructor, do nothing.
606
60.1k
  if (CD->isTrivial() && 
CD->isDefaultConstructor()22.2k
)
607
7.31k
    return;
608
609
  // Elide the constructor if we're constructing from a temporary.
610
  // The temporary check is required because Sema sets this on NRVO
611
  // returns.
612
52.8k
  if (getLangOpts().ElideConstructors && 
E->isElidable()52.8k
) {
613
12.3k
    assert(getContext().hasSameUnqualifiedType(E->getType(),
614
12.3k
                                               E->getArg(0)->getType()));
615
12.3k
    if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
616
12.2k
      EmitAggExpr(E->getArg(0), Dest);
617
12.2k
      return;
618
12.2k
    }
619
40.5k
  }
620
621
40.5k
  if (const ArrayType *arrayType
622
908
        = getContext().getAsArrayType(E->getType())) {
623
908
    EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E,
624
908
                               Dest.isSanitizerChecked());
625
39.6k
  } else {
626
39.6k
    CXXCtorType Type = Ctor_Complete;
627
39.6k
    bool ForVirtualBase = false;
628
39.6k
    bool Delegating = false;
629
630
39.6k
    switch (E->getConstructionKind()) {
631
88
     case CXXConstructExpr::CK_Delegating:
632
      // We should be emitting a constructor; GlobalDecl will assert this
633
88
      Type = CurGD.getCtorType();
634
88
      Delegating = true;
635
88
      break;
636
637
31.6k
     case CXXConstructExpr::CK_Complete:
638
31.6k
      Type = Ctor_Complete;
639
31.6k
      break;
640
641
551
     case CXXConstructExpr::CK_VirtualBase:
642
551
      ForVirtualBase = true;
643
551
      LLVM_FALLTHROUGH;
644
645
7.94k
     case CXXConstructExpr::CK_NonVirtualBase:
646
7.94k
      Type = Ctor_Base;
647
39.6k
     }
648
649
     // Call the constructor.
650
39.6k
     EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E);
651
39.6k
  }
652
40.5k
}
653
654
void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src,
655
57
                                                 const Expr *Exp) {
656
57
  if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
657
2
    Exp = E->getSubExpr();
658
57
  assert(isa<CXXConstructExpr>(Exp) &&
659
57
         "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
660
57
  const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
661
57
  const CXXConstructorDecl *CD = E->getConstructor();
662
57
  RunCleanupsScope Scope(*this);
663
664
  // If we require zero initialization before (or instead of) calling the
665
  // constructor, as can be the case with a non-user-provided default
666
  // constructor, emit the zero initialization now.
667
  // FIXME. Do I still need this for a copy ctor synthesis?
668
57
  if (E->requiresZeroInitialization())
669
0
    EmitNullInitialization(Dest, E->getType());
670
671
57
  assert(!getContext().getAsConstantArrayType(E->getType())
672
57
         && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
673
57
  EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
674
57
}
675
676
static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
677
2.28k
                                        const CXXNewExpr *E) {
678
2.28k
  if (!E->isArray())
679
1.61k
    return CharUnits::Zero();
680
681
  // No cookie is required if the operator new[] being used is the
682
  // reserved placement operator new[].
683
666
  if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
684
16
    return CharUnits::Zero();
685
686
650
  return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
687
650
}
688
689
static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
690
                                        const CXXNewExpr *e,
691
                                        unsigned minElements,
692
                                        llvm::Value *&numElements,
693
1.95k
                                        llvm::Value *&sizeWithoutCookie) {
694
1.95k
  QualType type = e->getAllocatedType();
695
696
1.95k
  if (!e->isArray()) {
697
1.61k
    CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
698
1.61k
    sizeWithoutCookie
699
1.61k
      = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
700
1.61k
    return sizeWithoutCookie;
701
1.61k
  }
702
703
  // The width of size_t.
704
333
  unsigned sizeWidth = CGF.SizeTy->getBitWidth();
705
706
  // Figure out the cookie size.
707
333
  llvm::APInt cookieSize(sizeWidth,
708
333
                         CalculateCookiePadding(CGF, e).getQuantity());
709
710
  // Emit the array size expression.
711
  // We multiply the size of all dimensions for NumElements.
712
  // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
713
333
  numElements =
714
333
    ConstantEmitter(CGF).tryEmitAbstract(*e->getArraySize(), e->getType());
715
333
  if (!numElements)
716
108
    numElements = CGF.EmitScalarExpr(*e->getArraySize());
717
333
  assert(isa<llvm::IntegerType>(numElements->getType()));
718
719
  // The number of elements can be have an arbitrary integer type;
720
  // essentially, we need to multiply it by a constant factor, add a
721
  // cookie size, and verify that the result is representable as a
722
  // size_t.  That's just a gloss, though, and it's wrong in one
723
  // important way: if the count is negative, it's an error even if
724
  // the cookie size would bring the total size >= 0.
725
333
  bool isSigned
726
333
    = (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType();
727
333
  llvm::IntegerType *numElementsType
728
333
    = cast<llvm::IntegerType>(numElements->getType());
729
333
  unsigned numElementsWidth = numElementsType->getBitWidth();
730
731
  // Compute the constant factor.
732
333
  llvm::APInt arraySizeMultiplier(sizeWidth, 1);
733
357
  while (const ConstantArrayType *CAT
734
24
             = CGF.getContext().getAsConstantArrayType(type)) {
735
24
    type = CAT->getElementType();
736
24
    arraySizeMultiplier *= CAT->getSize();
737
24
  }
738
739
333
  CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
740
333
  llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
741
333
  typeSizeMultiplier *= arraySizeMultiplier;
742
743
  // This will be a size_t.
744
333
  llvm::Value *size;
745
746
  // If someone is doing 'new int[42]' there is no need to do a dynamic check.
747
  // Don't bloat the -O0 code.
748
333
  if (llvm::ConstantInt *numElementsC =
749
225
        dyn_cast<llvm::ConstantInt>(numElements)) {
750
225
    const llvm::APInt &count = numElementsC->getValue();
751
752
225
    bool hasAnyOverflow = false;
753
754
    // If 'count' was a negative number, it's an overflow.
755
225
    if (isSigned && 
count.isNegative()155
)
756
0
      hasAnyOverflow = true;
757
758
    // We want to do all this arithmetic in size_t.  If numElements is
759
    // wider than that, check whether it's already too big, and if so,
760
    // overflow.
761
225
    else if (numElementsWidth > sizeWidth &&
762
0
             numElementsWidth - sizeWidth > count.countLeadingZeros())
763
0
      hasAnyOverflow = true;
764
765
    // Okay, compute a count at the right width.
766
225
    llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
767
768
    // If there is a brace-initializer, we cannot allocate fewer elements than
769
    // there are initializers. If we do, that's treated like an overflow.
770
225
    if (adjustedCount.ult(minElements))
771
0
      hasAnyOverflow = true;
772
773
    // Scale numElements by that.  This might overflow, but we don't
774
    // care because it only overflows if allocationSize does, too, and
775
    // if that overflows then we shouldn't use this.
776
225
    numElements = llvm::ConstantInt::get(CGF.SizeTy,
777
225
                                         adjustedCount * arraySizeMultiplier);
778
779
    // Compute the size before cookie, and track whether it overflowed.
780
225
    bool overflow;
781
225
    llvm::APInt allocationSize
782
225
      = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
783
225
    hasAnyOverflow |= overflow;
784
785
    // Add in the cookie, and check whether it's overflowed.
786
225
    if (cookieSize != 0) {
787
      // Save the current size without a cookie.  This shouldn't be
788
      // used if there was overflow.
789
44
      sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
790
791
44
      allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
792
44
      hasAnyOverflow |= overflow;
793
44
    }
794
795
    // On overflow, produce a -1 so operator new will fail.
796
225
    if (hasAnyOverflow) {
797
0
      size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
798
225
    } else {
799
225
      size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
800
225
    }
801
802
  // Otherwise, we might need to use the overflow intrinsics.
803
108
  } else {
804
    // There are up to five conditions we need to test for:
805
    // 1) if isSigned, we need to check whether numElements is negative;
806
    // 2) if numElementsWidth > sizeWidth, we need to check whether
807
    //   numElements is larger than something representable in size_t;
808
    // 3) if minElements > 0, we need to check whether numElements is smaller
809
    //    than that.
810
    // 4) we need to compute
811
    //      sizeWithoutCookie := numElements * typeSizeMultiplier
812
    //    and check whether it overflows; and
813
    // 5) if we need a cookie, we need to compute
814
    //      size := sizeWithoutCookie + cookieSize
815
    //    and check whether it overflows.
816
817
108
    llvm::Value *hasOverflow = nullptr;
818
819
    // If numElementsWidth > sizeWidth, then one way or another, we're
820
    // going to have to do a comparison for (2), and this happens to
821
    // take care of (1), too.
822
108
    if (numElementsWidth > sizeWidth) {
823
0
      llvm::APInt threshold(numElementsWidth, 1);
824
0
      threshold <<= sizeWidth;
825
826
0
      llvm::Value *thresholdV
827
0
        = llvm::ConstantInt::get(numElementsType, threshold);
828
829
0
      hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
830
0
      numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
831
832
    // Otherwise, if we're signed, we want to sext up to size_t.
833
108
    } else if (isSigned) {
834
28
      if (numElementsWidth < sizeWidth)
835
8
        numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
836
837
      // If there's a non-1 type size multiplier, then we can do the
838
      // signedness check at the same time as we do the multiply
839
      // because a negative number times anything will cause an
840
      // unsigned overflow.  Otherwise, we have to do it here. But at least
841
      // in this case, we can subsume the >= minElements check.
842
28
      if (typeSizeMultiplier == 1)
843
8
        hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
844
8
                              llvm::ConstantInt::get(CGF.SizeTy, minElements));
845
846
    // Otherwise, zext up to size_t if necessary.
847
80
    } else if (numElementsWidth < sizeWidth) {
848
0
      numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
849
0
    }
850
851
108
    assert(numElements->getType() == CGF.SizeTy);
852
853
108
    if (minElements) {
854
      // Don't allow allocation of fewer elements than we have initializers.
855
9
      if (!hasOverflow) {
856
6
        hasOverflow = CGF.Builder.CreateICmpULT(numElements,
857
6
                              llvm::ConstantInt::get(CGF.SizeTy, minElements));
858
3
      } else if (numElementsWidth > sizeWidth) {
859
        // The other existing overflow subsumes this check.
860
        // We do an unsigned comparison, since any signed value < -1 is
861
        // taken care of either above or below.
862
0
        hasOverflow = CGF.Builder.CreateOr(hasOverflow,
863
0
                          CGF.Builder.CreateICmpULT(numElements,
864
0
                              llvm::ConstantInt::get(CGF.SizeTy, minElements)));
865
0
      }
866
9
    }
867
868
108
    size = numElements;
869
870
    // Multiply by the type size if necessary.  This multiplier
871
    // includes all the factors for nested arrays.
872
    //
873
    // This step also causes numElements to be scaled up by the
874
    // nested-array factor if necessary.  Overflow on this computation
875
    // can be ignored because the result shouldn't be used if
876
    // allocation fails.
877
108
    if (typeSizeMultiplier != 1) {
878
53
      llvm::Function *umul_with_overflow
879
53
        = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
880
881
53
      llvm::Value *tsmV =
882
53
        llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
883
53
      llvm::Value *result =
884
53
          CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV});
885
886
53
      llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
887
53
      if (hasOverflow)
888
6
        hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
889
47
      else
890
47
        hasOverflow = overflowed;
891
892
53
      size = CGF.Builder.CreateExtractValue(result, 0);
893
894
      // Also scale up numElements by the array size multiplier.
895
53
      if (arraySizeMultiplier != 1) {
896
        // If the base element type size is 1, then we can re-use the
897
        // multiply we just did.
898
10
        if (typeSize.isOne()) {
899
0
          assert(arraySizeMultiplier == typeSizeMultiplier);
900
0
          numElements = size;
901
902
        // Otherwise we need a separate multiply.
903
10
        } else {
904
10
          llvm::Value *asmV =
905
10
            llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
906
10
          numElements = CGF.Builder.CreateMul(numElements, asmV);
907
10
        }
908
10
      }
909
55
    } else {
910
      // numElements doesn't need to be scaled.
911
55
      assert(arraySizeMultiplier == 1);
912
55
    }
913
914
    // Add in the cookie size if necessary.
915
108
    if (cookieSize != 0) {
916
19
      sizeWithoutCookie = size;
917
918
19
      llvm::Function *uadd_with_overflow
919
19
        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
920
921
19
      llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
922
19
      llvm::Value *result =
923
19
          CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV});
924
925
19
      llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
926
19
      if (hasOverflow)
927
18
        hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
928
1
      else
929
1
        hasOverflow = overflowed;
930
931
19
      size = CGF.Builder.CreateExtractValue(result, 0);
932
19
    }
933
934
    // If we had any possibility of dynamic overflow, make a select to
935
    // overwrite 'size' with an all-ones value, which should cause
936
    // operator new to throw.
937
108
    if (hasOverflow)
938
62
      size = CGF.Builder.CreateSelect(hasOverflow,
939
62
                                 llvm::Constant::getAllOnesValue(CGF.SizeTy),
940
62
                                      size);
941
108
  }
942
943
333
  if (cookieSize == 0)
944
270
    sizeWithoutCookie = size;
945
333
  else
946
333
    assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
947
948
333
  return size;
949
333
}
950
951
static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
952
                                    QualType AllocType, Address NewPtr,
953
1.62k
                                    AggValueSlot::Overlap_t MayOverlap) {
954
  // FIXME: Refactor with EmitExprAsInit.
955
1.62k
  switch (CGF.getEvaluationKind(AllocType)) {
956
262
  case TEK_Scalar:
957
262
    CGF.EmitScalarInit(Init, nullptr,
958
262
                       CGF.MakeAddrLValue(NewPtr, AllocType), false);
959
262
    return;
960
1
  case TEK_Complex:
961
1
    CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType),
962
1
                                  /*isInit*/ true);
963
1
    return;
964
1.36k
  case TEK_Aggregate: {
965
1.36k
    AggValueSlot Slot
966
1.36k
      = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
967
1.36k
                              AggValueSlot::IsDestructed,
968
1.36k
                              AggValueSlot::DoesNotNeedGCBarriers,
969
1.36k
                              AggValueSlot::IsNotAliased,
970
1.36k
                              MayOverlap, AggValueSlot::IsNotZeroed,
971
1.36k
                              AggValueSlot::IsSanitizerChecked);
972
1.36k
    CGF.EmitAggExpr(Init, Slot);
973
1.36k
    return;
974
0
  }
975
0
  }
976
0
  llvm_unreachable("bad evaluation kind");
977
0
}
978
979
void CodeGenFunction::EmitNewArrayInitializer(
980
    const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,
981
    Address BeginPtr, llvm::Value *NumElements,
982
333
    llvm::Value *AllocSizeWithoutCookie) {
983
  // If we have a type with trivial initialization and no initializer,
984
  // there's nothing to do.
985
333
  if (!E->hasInitializer())
986
179
    return;
987
988
154
  Address CurPtr = BeginPtr;
989
990
154
  unsigned InitListElements = 0;
991
992
154
  const Expr *Init = E->getInitializer();
993
154
  Address EndOfInit = Address::invalid();
994
154
  QualType::DestructionKind DtorKind = ElementType.isDestructedType();
995
154
  EHScopeStack::stable_iterator Cleanup;
996
154
  llvm::Instruction *CleanupDominator = nullptr;
997
998
154
  CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType);
999
154
  CharUnits ElementAlign =
1000
154
    BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
1001
1002
  // Attempt to perform zero-initialization using memset.
1003
21
  auto TryMemsetInitialization = [&]() -> bool {
1004
    // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
1005
    // we can initialize with a memset to -1.
1006
21
    if (!CGM.getTypes().isZeroInitializable(ElementType))
1007
4
      return false;
1008
1009
    // Optimization: since zero initialization will just set the memory
1010
    // to all zeroes, generate a single memset to do it in one shot.
1011
1012
    // Subtract out the size of any elements we've already initialized.
1013
17
    auto *RemainingSize = AllocSizeWithoutCookie;
1014
17
    if (InitListElements) {
1015
      // We know this can't overflow; we check this when doing the allocation.
1016
9
      auto *InitializedSize = llvm::ConstantInt::get(
1017
9
          RemainingSize->getType(),
1018
9
          getContext().getTypeSizeInChars(ElementType).getQuantity() *
1019
9
              InitListElements);
1020
9
      RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
1021
9
    }
1022
1023
    // Create the memset.
1024
17
    Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
1025
17
    return true;
1026
17
  };
1027
1028
  // If the initializer is an initializer list, first do the explicit elements.
1029
154
  if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
1030
    // Initializing from a (braced) string literal is a special case; the init
1031
    // list element does not initialize a (single) array element.
1032
28
    if (ILE->isStringLiteralInit()) {
1033
      // Initialize the initial portion of length equal to that of the string
1034
      // literal. The allocation must be for at least this much; we emitted a
1035
      // check for that earlier.
1036
8
      AggValueSlot Slot =
1037
8
          AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(),
1038
8
                                AggValueSlot::IsDestructed,
1039
8
                                AggValueSlot::DoesNotNeedGCBarriers,
1040
8
                                AggValueSlot::IsNotAliased,
1041
8
                                AggValueSlot::DoesNotOverlap,
1042
8
                                AggValueSlot::IsNotZeroed,
1043
8
                                AggValueSlot::IsSanitizerChecked);
1044
8
      EmitAggExpr(ILE->getInit(0), Slot);
1045
1046
      // Move past these elements.
1047
8
      InitListElements =
1048
8
          cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
1049
8
              ->getSize().getZExtValue();
1050
8
      CurPtr =
1051
8
          Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
1052
8
                                            Builder.getSize(InitListElements),
1053
8
                                            "string.init.end"),
1054
8
                  CurPtr.getAlignment().alignmentAtOffset(InitListElements *
1055
8
                                                          ElementSize));
1056
1057
      // Zero out the rest, if any remain.
1058
8
      llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
1059
8
      if (!ConstNum || 
!ConstNum->equalsInt(InitListElements)6
) {
1060
2
        bool OK = TryMemsetInitialization();
1061
2
        (void)OK;
1062
2
        assert(OK && "couldn't memset character type?");
1063
2
      }
1064
8
      return;
1065
8
    }
1066
1067
20
    InitListElements = ILE->getNumInits();
1068
1069
    // If this is a multi-dimensional array new, we will initialize multiple
1070
    // elements with each init list element.
1071
20
    QualType AllocType = E->getAllocatedType();
1072
20
    if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
1073
3
            AllocType->getAsArrayTypeUnsafe())) {
1074
3
      ElementTy = ConvertTypeForMem(AllocType);
1075
3
      CurPtr = Builder.CreateElementBitCast(CurPtr, ElementTy);
1076
3
      InitListElements *= getContext().getConstantArrayElementCount(CAT);
1077
3
    }
1078
1079
    // Enter a partial-destruction Cleanup if necessary.
1080
20
    if (needsEHCleanup(DtorKind)) {
1081
      // In principle we could tell the Cleanup where we are more
1082
      // directly, but the control flow can get so varied here that it
1083
      // would actually be quite complex.  Therefore we go through an
1084
      // alloca.
1085
2
      EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(),
1086
2
                                   "array.init.end");
1087
2
      CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit);
1088
2
      pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit,
1089
2
                                       ElementType, ElementAlign,
1090
2
                                       getDestroyer(DtorKind));
1091
2
      Cleanup = EHStack.stable_begin();
1092
2
    }
1093
1094
20
    CharUnits StartAlign = CurPtr.getAlignment();
1095
64
    for (unsigned i = 0, e = ILE->getNumInits(); i != e; 
++i44
) {
1096
      // Tell the cleanup that it needs to destroy up to this
1097
      // element.  TODO: some of these stores can be trivially
1098
      // observed to be unnecessary.
1099
44
      if (EndOfInit.isValid()) {
1100
6
        auto FinishedPtr =
1101
6
          Builder.CreateBitCast(CurPtr.getPointer(), BeginPtr.getType());
1102
6
        Builder.CreateStore(FinishedPtr, EndOfInit);
1103
6
      }
1104
      // FIXME: If the last initializer is an incomplete initializer list for
1105
      // an array, and we have an array filler, we can fold together the two
1106
      // initialization loops.
1107
44
      StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
1108
44
                              ILE->getInit(i)->getType(), CurPtr,
1109
44
                              AggValueSlot::DoesNotOverlap);
1110
44
      CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
1111
44
                                                 Builder.getSize(1),
1112
44
                                                 "array.exp.next"),
1113
44
                       StartAlign.alignmentAtOffset((i + 1) * ElementSize));
1114
44
    }
1115
1116
    // The remaining elements are filled with the array filler expression.
1117
20
    Init = ILE->getArrayFiller();
1118
1119
    // Extract the initializer for the individual array elements by pulling
1120
    // out the array filler from all the nested initializer lists. This avoids
1121
    // generating a nested loop for the initialization.
1122
22
    while (Init && 
Init->getType()->isConstantArrayType()14
) {
1123
2
      auto *SubILE = dyn_cast<InitListExpr>(Init);
1124
2
      if (!SubILE)
1125
0
        break;
1126
2
      assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
1127
2
      Init = SubILE->getArrayFiller();
1128
2
    }
1129
1130
    // Switch back to initializing one base element at a time.
1131
20
    CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
1132
20
  }
1133
1134
  // If all elements have already been initialized, skip any further
1135
  // initialization.
1136
146
  llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
1137
146
  if (ConstNum && 
ConstNum->getZExtValue() <= InitListElements97
) {
1138
    // If there was a Cleanup, deactivate it.
1139
12
    if (CleanupDominator)
1140
1
      DeactivateCleanupBlock(Cleanup, CleanupDominator);
1141
12
    return;
1142
12
  }
1143
1144
134
  assert(Init && "have trailing elements to initialize but no initializer");
1145
1146
  // If this is a constructor call, try to optimize it out, and failing that
1147
  // emit a single loop to initialize all remaining elements.
1148
134
  if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
1149
117
    CXXConstructorDecl *Ctor = CCE->getConstructor();
1150
117
    if (Ctor->isTrivial()) {
1151
      // If new expression did not specify value-initialization, then there
1152
      // is no initialization.
1153
53
      if (!CCE->requiresZeroInitialization() || 
Ctor->getParent()->isEmpty()3
)
1154
51
        return;
1155
1156
2
      if (TryMemsetInitialization())
1157
1
        return;
1158
65
    }
1159
1160
    // Store the new Cleanup position for irregular Cleanups.
1161
    //
1162
    // FIXME: Share this cleanup with the constructor call emission rather than
1163
    // having it create a cleanup of its own.
1164
65
    if (EndOfInit.isValid())
1165
1
      Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
1166
1167
    // Emit a constructor call loop to initialize the remaining elements.
1168
65
    if (InitListElements)
1169
2
      NumElements = Builder.CreateSub(
1170
2
          NumElements,
1171
2
          llvm::ConstantInt::get(NumElements->getType(), InitListElements));
1172
65
    EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
1173
65
                               /*NewPointerIsChecked*/true,
1174
65
                               CCE->requiresZeroInitialization());
1175
65
    return;
1176
65
  }
1177
1178
  // If this is value-initialization, we can usually use memset.
1179
17
  ImplicitValueInitExpr IVIE(ElementType);
1180
17
  if (isa<ImplicitValueInitExpr>(Init)) {
1181
14
    if (TryMemsetInitialization())
1182
11
      return;
1183
1184
    // Switch to an ImplicitValueInitExpr for the element type. This handles
1185
    // only one case: multidimensional array new of pointers to members. In
1186
    // all other cases, we already have an initializer for the array element.
1187
3
    Init = &IVIE;
1188
3
  }
1189
1190
  // At this point we should have found an initializer for the individual
1191
  // elements of the array.
1192
6
  assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
1193
6
         "got wrong type of element to initialize");
1194
1195
  // If we have an empty initializer list, we can usually use memset.
1196
6
  if (auto *ILE = dyn_cast<InitListExpr>(Init))
1197
3
    if (ILE->getNumInits() == 0 && 
TryMemsetInitialization()0
)
1198
0
      return;
1199
1200
  // If we have a struct whose every field is value-initialized, we can
1201
  // usually use memset.
1202
6
  if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
1203
3
    if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
1204
3
      if (RType->getDecl()->isStruct()) {
1205
3
        unsigned NumElements = 0;
1206
3
        if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl()))
1207
3
          NumElements = CXXRD->getNumBases();
1208
3
        for (auto *Field : RType->getDecl()->fields())
1209
5
          if (!Field->isUnnamedBitfield())
1210
5
            ++NumElements;
1211
        // FIXME: Recurse into nested InitListExprs.
1212
3
        if (ILE->getNumInits() == NumElements)
1213
8
          
for (unsigned i = 0, e = ILE->getNumInits(); 3
i != e;
++i5
)
1214
5
            if (!isa<ImplicitValueInitExpr>(ILE->getInit(i)))
1215
0
              --NumElements;
1216
3
        if (ILE->getNumInits() == NumElements && TryMemsetInitialization())
1217
3
          return;
1218
3
      }
1219
3
    }
1220
3
  }
1221
1222
  // Create the loop blocks.
1223
3
  llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
1224
3
  llvm::BasicBlock *LoopBB = createBasicBlock("new.loop");
1225
3
  llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
1226
1227
  // Find the end of the array, hoisted out of the loop.
1228
3
  llvm::Value *EndPtr =
1229
3
    Builder.CreateInBoundsGEP(BeginPtr.getPointer(), NumElements, "array.end");
1230
1231
  // If the number of elements isn't constant, we have to now check if there is
1232
  // anything left to initialize.
1233
3
  if (!ConstNum) {
1234
0
    llvm::Value *IsEmpty =
1235
0
      Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty");
1236
0
    Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
1237
0
  }
1238
1239
  // Enter the loop.
1240
3
  EmitBlock(LoopBB);
1241
1242
  // Set up the current-element phi.
1243
3
  llvm::PHINode *CurPtrPhi =
1244
3
    Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
1245
3
  CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB);
1246
1247
3
  CurPtr = Address(CurPtrPhi, ElementAlign);
1248
1249
  // Store the new Cleanup position for irregular Cleanups.
1250
3
  if (EndOfInit.isValid())
1251
0
    Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
1252
1253
  // Enter a partial-destruction Cleanup if necessary.
1254
3
  if (!CleanupDominator && needsEHCleanup(DtorKind)) {
1255
0
    pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(),
1256
0
                                   ElementType, ElementAlign,
1257
0
                                   getDestroyer(DtorKind));
1258
0
    Cleanup = EHStack.stable_begin();
1259
0
    CleanupDominator = Builder.CreateUnreachable();
1260
0
  }
1261
1262
  // Emit the initializer into this element.
1263
3
  StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr,
1264
3
                          AggValueSlot::DoesNotOverlap);
1265
1266
  // Leave the Cleanup if we entered one.
1267
3
  if (CleanupDominator) {
1268
0
    DeactivateCleanupBlock(Cleanup, CleanupDominator);
1269
0
    CleanupDominator->eraseFromParent();
1270
0
  }
1271
1272
  // Advance to the next element by adjusting the pointer type as necessary.
1273
3
  llvm::Value *NextPtr =
1274
3
    Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1,
1275
3
                                       "array.next");
1276
1277
  // Check whether we've gotten to the end of the array and, if so,
1278
  // exit the loop.
1279
3
  llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend");
1280
3
  Builder.CreateCondBr(IsEnd, ContBB, LoopBB);
1281
3
  CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock());
1282
1283
3
  EmitBlock(ContBB);
1284
3
}
1285
1286
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
1287
                               QualType ElementType, llvm::Type *ElementTy,
1288
                               Address NewPtr, llvm::Value *NumElements,
1289
1.95k
                               llvm::Value *AllocSizeWithoutCookie) {
1290
1.95k
  ApplyDebugLocation DL(CGF, E);
1291
1.95k
  if (E->isArray())
1292
333
    CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements,
1293
333
                                AllocSizeWithoutCookie);
1294
1.61k
  else if (const Expr *Init = E->getInitializer())
1295
1.57k
    StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr,
1296
1.57k
                            AggValueSlot::DoesNotOverlap);
1297
1.95k
}
1298
1299
/// Emit a call to an operator new or operator delete function, as implicitly
1300
/// created by new-expressions and delete-expressions.
1301
static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
1302
                                const FunctionDecl *CalleeDecl,
1303
                                const FunctionProtoType *CalleeType,
1304
4.09k
                                const CallArgList &Args) {
1305
4.09k
  llvm::CallBase *CallOrInvoke;
1306
4.09k
  llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);
1307
4.09k
  CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl));
1308
4.09k
  RValue RV =
1309
4.09k
      CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
1310
4.09k
                       Args, CalleeType, /*ChainCall=*/false),
1311
4.09k
                   Callee, ReturnValueSlot(), Args, &CallOrInvoke);
1312
1313
  /// C++1y [expr.new]p10:
1314
  ///   [In a new-expression,] an implementation is allowed to omit a call
1315
  ///   to a replaceable global allocation function.
1316
  ///
1317
  /// We model such elidable calls with the 'builtin' attribute.
1318
4.09k
  llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr);
1319
4.09k
  if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&
1320
3.77k
      Fn && 
Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)3.77k
) {
1321
3.77k
    CallOrInvoke->addAttribute(llvm::AttributeList::FunctionIndex,
1322
3.77k
                               llvm::Attribute::Builtin);
1323
3.77k
  }
1324
1325
4.09k
  return RV;
1326
4.09k
}
1327
1328
RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
1329
                                                 const CallExpr *TheCall,
1330
600
                                                 bool IsDelete) {
1331
600
  CallArgList Args;
1332
600
  EmitCallArgs(Args, Type, TheCall->arguments());
1333
  // Find the allocation or deallocation function that we're calling.
1334
600
  ASTContext &Ctx = getContext();
1335
600
  DeclarationName Name = Ctx.DeclarationNames
1336
318
      .getCXXOperatorName(IsDelete ? OO_Delete : 
OO_New282
);
1337
1338
600
  for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
1339
610
    if (auto *FD = dyn_cast<FunctionDecl>(Decl))
1340
610
      if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
1341
600
        return EmitNewDeleteCall(*this, FD, Type, Args);
1342
600
  
llvm_unreachable0
("predeclared global operator new/delete is missing");
1343
600
}
1344
1345
namespace {
1346
/// The parameters to pass to a usual operator delete.
1347
struct UsualDeleteParams {
1348
  bool DestroyingDelete = false;
1349
  bool Size = false;
1350
  bool Alignment = false;
1351
};
1352
}
1353
1354
2.19k
static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) {
1355
2.19k
  UsualDeleteParams Params;
1356
1357
2.19k
  const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>();
1358
2.19k
  auto AI = FPT->param_type_begin(), AE = FPT->param_type_end();
1359
1360
  // The first argument is always a void*.
1361
2.19k
  ++AI;
1362
1363
  // The next parameter may be a std::destroying_delete_t.
1364
2.19k
  if (FD->isDestroyingOperatorDelete()) {
1365
30
    Params.DestroyingDelete = true;
1366
30
    assert(AI != AE);
1367
30
    ++AI;
1368
30
  }
1369
1370
  // Figure out what other parameters we should be implicitly passing.
1371
2.19k
  if (AI != AE && 
(*AI)->isIntegerType()122
) {
1372
90
    Params.Size = true;
1373
90
    ++AI;
1374
90
  }
1375
1376
2.19k
  if (AI != AE && 
(*AI)->isAlignValT()58
) {
1377
58
    Params.Alignment = true;
1378
58
    ++AI;
1379
58
  }
1380
1381
2.19k
  assert(AI == AE && "unexpected usual deallocation function parameter");
1382
2.19k
  return Params;
1383
2.19k
}
1384
1385
namespace {
1386
  /// A cleanup to call the given 'operator delete' function upon abnormal
1387
  /// exit from a new expression. Templated on a traits type that deals with
1388
  /// ensuring that the arguments dominate the cleanup if necessary.
1389
  template<typename Traits>
1390
  class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
1391
    /// Type used to hold llvm::Value*s.
1392
    typedef typename Traits::ValueTy ValueTy;
1393
    /// Type used to hold RValues.
1394
    typedef typename Traits::RValueTy RValueTy;
1395
    struct PlacementArg {
1396
      RValueTy ArgValue;
1397
      QualType ArgType;
1398
    };
1399
1400
    unsigned NumPlacementArgs : 31;
1401
    unsigned PassAlignmentToPlacementDelete : 1;
1402
    const FunctionDecl *OperatorDelete;
1403
    ValueTy Ptr;
1404
    ValueTy AllocSize;
1405
    CharUnits AllocAlign;
1406
1407
80
    PlacementArg *getPlacementArgs() {
1408
80
      return reinterpret_cast<PlacementArg *>(this + 1);
1409
80
    }
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::DirectCleanupTraits>::getPlacementArgs()
Line
Count
Source
1407
72
    PlacementArg *getPlacementArgs() {
1408
72
      return reinterpret_cast<PlacementArg *>(this + 1);
1409
72
    }
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::ConditionalCleanupTraits>::getPlacementArgs()
Line
Count
Source
1407
8
    PlacementArg *getPlacementArgs() {
1408
8
      return reinterpret_cast<PlacementArg *>(this + 1);
1409
8
    }
1410
1411
  public:
1412
809
    static size_t getExtraSize(size_t NumPlacementArgs) {
1413
809
      return NumPlacementArgs * sizeof(PlacementArg);
1414
809
    }
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::DirectCleanupTraits>::getExtraSize(unsigned long)
Line
Count
Source
1412
803
    static size_t getExtraSize(size_t NumPlacementArgs) {
1413
803
      return NumPlacementArgs * sizeof(PlacementArg);
1414
803
    }
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::ConditionalCleanupTraits>::getExtraSize(unsigned long)
Line
Count
Source
1412
6
    static size_t getExtraSize(size_t NumPlacementArgs) {
1413
6
      return NumPlacementArgs * sizeof(PlacementArg);
1414
6
    }
1415
1416
    CallDeleteDuringNew(size_t NumPlacementArgs,
1417
                        const FunctionDecl *OperatorDelete, ValueTy Ptr,
1418
                        ValueTy AllocSize, bool PassAlignmentToPlacementDelete,
1419
                        CharUnits AllocAlign)
1420
      : NumPlacementArgs(NumPlacementArgs),
1421
        PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete),
1422
        OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize),
1423
809
        AllocAlign(AllocAlign) {}
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::DirectCleanupTraits>::CallDeleteDuringNew(unsigned long, clang::FunctionDecl const*, llvm::Value*, llvm::Value*, bool, clang::CharUnits)
Line
Count
Source
1423
803
        AllocAlign(AllocAlign) {}
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::ConditionalCleanupTraits>::CallDeleteDuringNew(unsigned long, clang::FunctionDecl const*, clang::CodeGen::DominatingValue<clang::CodeGen::RValue>::saved_type, clang::CodeGen::DominatingValue<clang::CodeGen::RValue>::saved_type, bool, clang::CharUnits)
Line
Count
Source
1423
6
        AllocAlign(AllocAlign) {}
1424
1425
40
    void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
1426
40
      assert(I < NumPlacementArgs && "index out of range");
1427
40
      getPlacementArgs()[I] = {Arg, Type};
1428
40
    }
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::DirectCleanupTraits>::setPlacementArg(unsigned int, clang::CodeGen::RValue, clang::QualType)
Line
Count
Source
1425
36
    void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
1426
36
      assert(I < NumPlacementArgs && "index out of range");
1427
36
      getPlacementArgs()[I] = {Arg, Type};
1428
36
    }
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::ConditionalCleanupTraits>::setPlacementArg(unsigned int, clang::CodeGen::DominatingValue<clang::CodeGen::RValue>::saved_type, clang::QualType)
Line
Count
Source
1425
4
    void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
1426
4
      assert(I < NumPlacementArgs && "index out of range");
1427
4
      getPlacementArgs()[I] = {Arg, Type};
1428
4
    }
1429
1430
574
    void Emit(CodeGenFunction &CGF, Flags flags) override {
1431
574
      const auto *FPT = OperatorDelete->getType()->castAs<FunctionProtoType>();
1432
574
      CallArgList DeleteArgs;
1433
1434
      // The first argument is always a void* (or C* for a destroying operator
1435
      // delete for class type C).
1436
574
      DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0));
1437
1438
      // Figure out what other parameters we should be implicitly passing.
1439
574
      UsualDeleteParams Params;
1440
574
      if (NumPlacementArgs) {
1441
        // A placement deallocation function is implicitly passed an alignment
1442
        // if the placement allocation function was, but is never passed a size.
1443
36
        Params.Alignment = PassAlignmentToPlacementDelete;
1444
538
      } else {
1445
        // For a non-placement new-expression, 'operator delete' can take a
1446
        // size and/or an alignment if it has the right parameters.
1447
538
        Params = getUsualDeleteParams(OperatorDelete);
1448
538
      }
1449
1450
574
      assert(!Params.DestroyingDelete &&
1451
574
             "should not call destroying delete in a new-expression");
1452
1453
      // The second argument can be a std::size_t (for non-placement delete).
1454
574
      if (Params.Size)
1455
6
        DeleteArgs.add(Traits::get(CGF, AllocSize),
1456
6
                       CGF.getContext().getSizeType());
1457
1458
      // The next (second or third) argument can be a std::align_val_t, which
1459
      // is an enum whose underlying type is std::size_t.
1460
      // FIXME: Use the right type as the parameter type. Note that in a call
1461
      // to operator delete(size_t, ...), we may not have it available.
1462
574
      if (Params.Alignment)
1463
36
        DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
1464
36
                           CGF.SizeTy, AllocAlign.getQuantity())),
1465
36
                       CGF.getContext().getSizeType());
1466
1467
      // Pass the rest of the arguments, which must match exactly.
1468
614
      for (unsigned I = 0; I != NumPlacementArgs; 
++I40
) {
1469
40
        auto Arg = getPlacementArgs()[I];
1470
40
        DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
1471
40
      }
1472
1473
      // Call 'operator delete'.
1474
574
      EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1475
574
    }
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::DirectCleanupTraits>::Emit(clang::CodeGen::CodeGenFunction&, clang::CodeGen::EHScopeStack::Cleanup::Flags)
Line
Count
Source
1430
568
    void Emit(CodeGenFunction &CGF, Flags flags) override {
1431
568
      const auto *FPT = OperatorDelete->getType()->castAs<FunctionProtoType>();
1432
568
      CallArgList DeleteArgs;
1433
1434
      // The first argument is always a void* (or C* for a destroying operator
1435
      // delete for class type C).
1436
568
      DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0));
1437
1438
      // Figure out what other parameters we should be implicitly passing.
1439
568
      UsualDeleteParams Params;
1440
568
      if (NumPlacementArgs) {
1441
        // A placement deallocation function is implicitly passed an alignment
1442
        // if the placement allocation function was, but is never passed a size.
1443
34
        Params.Alignment = PassAlignmentToPlacementDelete;
1444
534
      } else {
1445
        // For a non-placement new-expression, 'operator delete' can take a
1446
        // size and/or an alignment if it has the right parameters.
1447
534
        Params = getUsualDeleteParams(OperatorDelete);
1448
534
      }
1449
1450
568
      assert(!Params.DestroyingDelete &&
1451
568
             "should not call destroying delete in a new-expression");
1452
1453
      // The second argument can be a std::size_t (for non-placement delete).
1454
568
      if (Params.Size)
1455
6
        DeleteArgs.add(Traits::get(CGF, AllocSize),
1456
6
                       CGF.getContext().getSizeType());
1457
1458
      // The next (second or third) argument can be a std::align_val_t, which
1459
      // is an enum whose underlying type is std::size_t.
1460
      // FIXME: Use the right type as the parameter type. Note that in a call
1461
      // to operator delete(size_t, ...), we may not have it available.
1462
568
      if (Params.Alignment)
1463
36
        DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
1464
36
                           CGF.SizeTy, AllocAlign.getQuantity())),
1465
36
                       CGF.getContext().getSizeType());
1466
1467
      // Pass the rest of the arguments, which must match exactly.
1468
604
      for (unsigned I = 0; I != NumPlacementArgs; 
++I36
) {
1469
36
        auto Arg = getPlacementArgs()[I];
1470
36
        DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
1471
36
      }
1472
1473
      // Call 'operator delete'.
1474
568
      EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1475
568
    }
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::ConditionalCleanupTraits>::Emit(clang::CodeGen::CodeGenFunction&, clang::CodeGen::EHScopeStack::Cleanup::Flags)
Line
Count
Source
1430
6
    void Emit(CodeGenFunction &CGF, Flags flags) override {
1431
6
      const auto *FPT = OperatorDelete->getType()->castAs<FunctionProtoType>();
1432
6
      CallArgList DeleteArgs;
1433
1434
      // The first argument is always a void* (or C* for a destroying operator
1435
      // delete for class type C).
1436
6
      DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0));
1437
1438
      // Figure out what other parameters we should be implicitly passing.
1439
6
      UsualDeleteParams Params;
1440
6
      if (NumPlacementArgs) {
1441
        // A placement deallocation function is implicitly passed an alignment
1442
        // if the placement allocation function was, but is never passed a size.
1443
2
        Params.Alignment = PassAlignmentToPlacementDelete;
1444
4
      } else {
1445
        // For a non-placement new-expression, 'operator delete' can take a
1446
        // size and/or an alignment if it has the right parameters.
1447
4
        Params = getUsualDeleteParams(OperatorDelete);
1448
4
      }
1449
1450
6
      assert(!Params.DestroyingDelete &&
1451
6
             "should not call destroying delete in a new-expression");
1452
1453
      // The second argument can be a std::size_t (for non-placement delete).
1454
6
      if (Params.Size)
1455
0
        DeleteArgs.add(Traits::get(CGF, AllocSize),
1456
0
                       CGF.getContext().getSizeType());
1457
1458
      // The next (second or third) argument can be a std::align_val_t, which
1459
      // is an enum whose underlying type is std::size_t.
1460
      // FIXME: Use the right type as the parameter type. Note that in a call
1461
      // to operator delete(size_t, ...), we may not have it available.
1462
6
      if (Params.Alignment)
1463
0
        DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
1464
0
                           CGF.SizeTy, AllocAlign.getQuantity())),
1465
0
                       CGF.getContext().getSizeType());
1466
1467
      // Pass the rest of the arguments, which must match exactly.
1468
10
      for (unsigned I = 0; I != NumPlacementArgs; 
++I4
) {
1469
4
        auto Arg = getPlacementArgs()[I];
1470
4
        DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
1471
4
      }
1472
1473
      // Call 'operator delete'.
1474
6
      EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1475
6
    }
1476
  };
1477
}
1478
1479
/// Enter a cleanup to call 'operator delete' if the initializer in a
1480
/// new-expression throws.
1481
static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1482
                                  const CXXNewExpr *E,
1483
                                  Address NewPtr,
1484
                                  llvm::Value *AllocSize,
1485
                                  CharUnits AllocAlign,
1486
809
                                  const CallArgList &NewArgs) {
1487
769
  unsigned NumNonPlacementArgs = E->passAlignment() ? 
240
: 1;
1488
1489
  // If we're not inside a conditional branch, then the cleanup will
1490
  // dominate and we can do the easier (and more efficient) thing.
1491
809
  if (!CGF.isInConditionalBranch()) {
1492
803
    struct DirectCleanupTraits {
1493
803
      typedef llvm::Value *ValueTy;
1494
803
      typedef RValue RValueTy;
1495
574
      static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); }
1496
36
      static RValue get(CodeGenFunction &, RValueTy V) { return V; }
1497
803
    };
1498
1499
803
    typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
1500
1501
803
    DirectCleanup *Cleanup = CGF.EHStack
1502
803
      .pushCleanupWithExtra<DirectCleanup>(EHCleanup,
1503
803
                                           E->getNumPlacementArgs(),
1504
803
                                           E->getOperatorDelete(),
1505
803
                                           NewPtr.getPointer(),
1506
803
                                           AllocSize,
1507
803
                                           E->passAlignment(),
1508
803
                                           AllocAlign);
1509
839
    for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; 
++I36
) {
1510
36
      auto &Arg = NewArgs[I + NumNonPlacementArgs];
1511
36
      Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty);
1512
36
    }
1513
1514
803
    return;
1515
803
  }
1516
1517
  // Otherwise, we need to save all this stuff.
1518
6
  DominatingValue<RValue>::saved_type SavedNewPtr =
1519
6
    DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer()));
1520
6
  DominatingValue<RValue>::saved_type SavedAllocSize =
1521
6
    DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1522
1523
6
  struct ConditionalCleanupTraits {
1524
6
    typedef DominatingValue<RValue>::saved_type ValueTy;
1525
6
    typedef DominatingValue<RValue>::saved_type RValueTy;
1526
10
    static RValue get(CodeGenFunction &CGF, ValueTy V) {
1527
10
      return V.restore(CGF);
1528
10
    }
1529
6
  };
1530
6
  typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup;
1531
1532
6
  ConditionalCleanup *Cleanup = CGF.EHStack
1533
6
    .pushCleanupWithExtra<ConditionalCleanup>(EHCleanup,
1534
6
                                              E->getNumPlacementArgs(),
1535
6
                                              E->getOperatorDelete(),
1536
6
                                              SavedNewPtr,
1537
6
                                              SavedAllocSize,
1538
6
                                              E->passAlignment(),
1539
6
                                              AllocAlign);
1540
10
  for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; 
++I4
) {
1541
4
    auto &Arg = NewArgs[I + NumNonPlacementArgs];
1542
4
    Cleanup->setPlacementArg(
1543
4
        I, DominatingValue<RValue>::save(CGF, Arg.getRValue(CGF)), Arg.Ty);
1544
4
  }
1545
1546
6
  CGF.initFullExprCleanup();
1547
6
}
1548
1549
1.95k
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1550
  // The element type being allocated.
1551
1.95k
  QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1552
1553
  // 1. Build a call to the allocation function.
1554
1.95k
  FunctionDecl *allocator = E->getOperatorNew();
1555
1556
  // If there is a brace-initializer, cannot allocate fewer elements than inits.
1557
1.95k
  unsigned minElements = 0;
1558
1.95k
  if (E->isArray() && 
E->hasInitializer()333
) {
1559
154
    const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer());
1560
154
    if (ILE && 
ILE->isStringLiteralInit()28
)
1561
8
      minElements =
1562
8
          cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
1563
8
              ->getSize().getZExtValue();
1564
146
    else if (ILE)
1565
20
      minElements = ILE->getNumInits();
1566
154
  }
1567
1568
1.95k
  llvm::Value *numElements = nullptr;
1569
1.95k
  llvm::Value *allocSizeWithoutCookie = nullptr;
1570
1.95k
  llvm::Value *allocSize =
1571
1.95k
    EmitCXXNewAllocSize(*this, E, minElements, numElements,
1572
1.95k
                        allocSizeWithoutCookie);
1573
1.95k
  CharUnits allocAlign = getContext().getPreferredTypeAlignInChars(allocType);
1574
1575
  // Emit the allocation call.  If the allocator is a global placement
1576
  // operator, just "inline" it directly.
1577
1.95k
  Address allocation = Address::invalid();
1578
1.95k
  CallArgList allocatorArgs;
1579
1.95k
  if (allocator->isReservedGlobalPlacementOperator()) {
1580
678
    assert(E->getNumPlacementArgs() == 1);
1581
678
    const Expr *arg = *E->placement_arguments().begin();
1582
1583
678
    LValueBaseInfo BaseInfo;
1584
678
    allocation = EmitPointerWithAlignment(arg, &BaseInfo);
1585
1586
    // The pointer expression will, in many cases, be an opaque void*.
1587
    // In these cases, discard the computed alignment and use the
1588
    // formal alignment of the allocated type.
1589
678
    if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl)
1590
675
      allocation = Address(allocation.getPointer(), allocAlign);
1591
1592
    // Set up allocatorArgs for the call to operator delete if it's not
1593
    // the reserved global operator.
1594
678
    if (E->getOperatorDelete() &&
1595
563
        !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1596
2
      allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType());
1597
2
      allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType());
1598
2
    }
1599
1600
1.27k
  } else {
1601
1.27k
    const FunctionProtoType *allocatorType =
1602
1.27k
      allocator->getType()->castAs<FunctionProtoType>();
1603
1.27k
    unsigned ParamsToSkip = 0;
1604
1605
    // The allocation size is the first argument.
1606
1.27k
    QualType sizeType = getContext().getSizeType();
1607
1.27k
    allocatorArgs.add(RValue::get(allocSize), sizeType);
1608
1.27k
    ++ParamsToSkip;
1609
1610
1.27k
    if (allocSize != allocSizeWithoutCookie) {
1611
63
      CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
1612
63
      allocAlign = std::max(allocAlign, cookieAlign);
1613
63
    }
1614
1615
    // The allocation alignment may be passed as the second argument.
1616
1.27k
    if (E->passAlignment()) {
1617
44
      QualType AlignValT = sizeType;
1618
44
      if (allocatorType->getNumParams() > 1) {
1619
36
        AlignValT = allocatorType->getParamType(1);
1620
36
        assert(getContext().hasSameUnqualifiedType(
1621
36
                   AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),
1622
36
                   sizeType) &&
1623
36
               "wrong type for alignment parameter");
1624
36
        ++ParamsToSkip;
1625
8
      } else {
1626
        // Corner case, passing alignment to 'operator new(size_t, ...)'.
1627
8
        assert(allocator->isVariadic() && "can't pass alignment to allocator");
1628
8
      }
1629
44
      allocatorArgs.add(
1630
44
          RValue::get(llvm::ConstantInt::get(SizeTy, allocAlign.getQuantity())),
1631
44
          AlignValT);
1632
44
    }
1633
1634
    // FIXME: Why do we not pass a CalleeDecl here?
1635
1.27k
    EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
1636
1.27k
                 /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip);
1637
1638
1.27k
    RValue RV =
1639
1.27k
      EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
1640
1641
    // Set !heapallocsite metadata on the call to operator new.
1642
1.27k
    if (getDebugInfo())
1643
662
      if (auto *newCall = dyn_cast<llvm::CallBase>(RV.getScalarVal()))
1644
662
        getDebugInfo()->addHeapAllocSiteMetadata(newCall, allocType,
1645
662
                                                 E->getExprLoc());
1646
1647
    // If this was a call to a global replaceable allocation function that does
1648
    // not take an alignment argument, the allocator is known to produce
1649
    // storage that's suitably aligned for any object that fits, up to a known
1650
    // threshold. Otherwise assume it's suitably aligned for the allocated type.
1651
1.27k
    CharUnits allocationAlign = allocAlign;
1652
1.27k
    if (!E->passAlignment() &&
1653
1.22k
        allocator->isReplaceableGlobalAllocationFunction()) {
1654
1.13k
      unsigned AllocatorAlign = llvm::PowerOf2Floor(std::min<uint64_t>(
1655
1.13k
          Target.getNewAlign(), getContext().getTypeSize(allocType)));
1656
1.13k
      allocationAlign = std::max(
1657
1.13k
          allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));
1658
1.13k
    }
1659
1660
1.27k
    allocation = Address(RV.getScalarVal(), allocationAlign);
1661
1.27k
  }
1662
1663
  // Emit a null check on the allocation result if the allocation
1664
  // function is allowed to return null (because it has a non-throwing
1665
  // exception spec or is the reserved placement new) and we have an
1666
  // interesting initializer will be running sanitizers on the initialization.
1667
1.95k
  bool nullCheck = E->shouldNullCheckAllocation() &&
1668
26
                   (!allocType.isPODType(getContext()) || 
E->hasInitializer()10
||
1669
10
                    sanitizePerformTypeCheck());
1670
1671
1.95k
  llvm::BasicBlock *nullCheckBB = nullptr;
1672
1.95k
  llvm::BasicBlock *contBB = nullptr;
1673
1674
  // The null-check means that the initializer is conditionally
1675
  // evaluated.
1676
1.95k
  ConditionalEvaluation conditional(*this);
1677
1678
1.95k
  if (nullCheck) {
1679
22
    conditional.begin(*this);
1680
1681
22
    nullCheckBB = Builder.GetInsertBlock();
1682
22
    llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1683
22
    contBB = createBasicBlock("new.cont");
1684
1685
22
    llvm::Value *isNull =
1686
22
      Builder.CreateIsNull(allocation.getPointer(), "new.isnull");
1687
22
    Builder.CreateCondBr(isNull, contBB, notNullBB);
1688
22
    EmitBlock(notNullBB);
1689
22
  }
1690
1691
  // If there's an operator delete, enter a cleanup to call it if an
1692
  // exception is thrown.
1693
1.95k
  EHScopeStack::stable_iterator operatorDeleteCleanup;
1694
1.95k
  llvm::Instruction *cleanupDominator = nullptr;
1695
1.95k
  if (E->getOperatorDelete() &&
1696
1.37k
      !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
1697
809
    EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign,
1698
809
                          allocatorArgs);
1699
809
    operatorDeleteCleanup = EHStack.stable_begin();
1700
809
    cleanupDominator = Builder.CreateUnreachable();
1701
809
  }
1702
1703
1.95k
  assert((allocSize == allocSizeWithoutCookie) ==
1704
1.95k
         CalculateCookiePadding(*this, E).isZero());
1705
1.95k
  if (allocSize != allocSizeWithoutCookie) {
1706
63
    assert(E->isArray());
1707
63
    allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1708
63
                                                       numElements,
1709
63
                                                       E, allocType);
1710
63
  }
1711
1712
1.95k
  llvm::Type *elementTy = ConvertTypeForMem(allocType);
1713
1.95k
  Address result = Builder.CreateElementBitCast(allocation, elementTy);
1714
1715
  // Passing pointer through launder.invariant.group to avoid propagation of
1716
  // vptrs information which may be included in previous type.
1717
  // To not break LTO with different optimizations levels, we do it regardless
1718
  // of optimization level.
1719
1.95k
  if (CGM.getCodeGenOpts().StrictVTablePointers &&
1720
32
      allocator->isReservedGlobalPlacementOperator())
1721
5
    result = Address(Builder.CreateLaunderInvariantGroup(result.getPointer()),
1722
5
                     result.getAlignment());
1723
1724
  // Emit sanitizer checks for pointer value now, so that in the case of an
1725
  // array it was checked only once and not at each constructor call. We may
1726
  // have already checked that the pointer is non-null.
1727
  // FIXME: If we have an array cookie and a potentially-throwing allocator,
1728
  // we'll null check the wrong pointer here.
1729
1.95k
  SanitizerSet SkippedChecks;
1730
1.95k
  SkippedChecks.set(SanitizerKind::Null, nullCheck);
1731
1.95k
  EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall,
1732
1.95k
                E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),
1733
1.95k
                result.getPointer(), allocType, result.getAlignment(),
1734
1.95k
                SkippedChecks, numElements);
1735
1736
1.95k
  EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
1737
1.95k
                     allocSizeWithoutCookie);
1738
1.95k
  if (E->isArray()) {
1739
    // NewPtr is a pointer to the base element type.  If we're
1740
    // allocating an array of arrays, we'll need to cast back to the
1741
    // array pointer type.
1742
333
    llvm::Type *resultType = ConvertTypeForMem(E->getType());
1743
333
    if (result.getType() != resultType)
1744
20
      result = Builder.CreateBitCast(result, resultType);
1745
333
  }
1746
1747
  // Deactivate the 'operator delete' cleanup if we finished
1748
  // initialization.
1749
1.95k
  if (operatorDeleteCleanup.isValid()) {
1750
809
    DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1751
809
    cleanupDominator->eraseFromParent();
1752
809
  }
1753
1754
1.95k
  llvm::Value *resultPtr = result.getPointer();
1755
1.95k
  if (nullCheck) {
1756
22
    conditional.end(*this);
1757
1758
22
    llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1759
22
    EmitBlock(contBB);
1760
1761
22
    llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2);
1762
22
    PHI->addIncoming(resultPtr, notNullBB);
1763
22
    PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()),
1764
22
                     nullCheckBB);
1765
1766
22
    resultPtr = PHI;
1767
22
  }
1768
1769
1.95k
  return resultPtr;
1770
1.95k
}
1771
1772
void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1773
                                     llvm::Value *Ptr, QualType DeleteTy,
1774
                                     llvm::Value *NumElements,
1775
1.65k
                                     CharUnits CookieSize) {
1776
1.65k
  assert((!NumElements && CookieSize.isZero()) ||
1777
1.65k
         DeleteFD->getOverloadedOperator() == OO_Array_Delete);
1778
1779
1.65k
  const auto *DeleteFTy = DeleteFD->getType()->castAs<FunctionProtoType>();
1780
1.65k
  CallArgList DeleteArgs;
1781
1782
1.65k
  auto Params = getUsualDeleteParams(DeleteFD);
1783
1.65k
  auto ParamTypeIt = DeleteFTy->param_type_begin();
1784
1785
  // Pass the pointer itself.
1786
1.65k
  QualType ArgTy = *ParamTypeIt++;
1787
1.65k
  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1788
1.65k
  DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1789
1790
  // Pass the std::destroying_delete tag if present.
1791
1.65k
  llvm::AllocaInst *DestroyingDeleteTag = nullptr;
1792
1.65k
  if (Params.DestroyingDelete) {
1793
30
    QualType DDTag = *ParamTypeIt++;
1794
30
    llvm::Type *Ty = getTypes().ConvertType(DDTag);
1795
30
    CharUnits Align = CGM.getNaturalTypeAlignment(DDTag);
1796
30
    DestroyingDeleteTag = CreateTempAlloca(Ty, "destroying.delete.tag");
1797
30
    DestroyingDeleteTag->setAlignment(Align.getAsAlign());
1798
30
    DeleteArgs.add(RValue::getAggregate(Address(DestroyingDeleteTag, Align)), DDTag);
1799
30
  }
1800
1801
  // Pass the size if the delete function has a size_t parameter.
1802
1.65k
  if (Params.Size) {
1803
84
    QualType SizeType = *ParamTypeIt++;
1804
84
    CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1805
84
    llvm::Value *Size = llvm::ConstantInt::get(ConvertType(SizeType),
1806
84
                                               DeleteTypeSize.getQuantity());
1807
1808
    // For array new, multiply by the number of elements.
1809
84
    if (NumElements)
1810
23
      Size = Builder.CreateMul(Size, NumElements);
1811
1812
    // If there is a cookie, add the cookie size.
1813
84
    if (!CookieSize.isZero())
1814
23
      Size = Builder.CreateAdd(
1815
23
          Size, llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()));
1816
1817
84
    DeleteArgs.add(RValue::get(Size), SizeType);
1818
84
  }
1819
1820
  // Pass the alignment if the delete function has an align_val_t parameter.
1821
1.65k
  if (Params.Alignment) {
1822
38
    QualType AlignValType = *ParamTypeIt++;
1823
38
    CharUnits DeleteTypeAlign =
1824
38
        getContext().toCharUnitsFromBits(getContext().getTypeAlignIfKnown(
1825
38
            DeleteTy, true /* NeedsPreferredAlignment */));
1826
38
    llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType),
1827
38
                                                DeleteTypeAlign.getQuantity());
1828
38
    DeleteArgs.add(RValue::get(Align), AlignValType);
1829
38
  }
1830
1831
1.65k
  assert(ParamTypeIt == DeleteFTy->param_type_end() &&
1832
1.65k
         "unknown parameter to usual delete function");
1833
1834
  // Emit the call to delete.
1835
1.65k
  EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
1836
1837
  // If call argument lowering didn't use the destroying_delete_t alloca,
1838
  // remove it again.
1839
1.65k
  if (DestroyingDeleteTag && 
DestroyingDeleteTag->use_empty()30
)
1840
10
    DestroyingDeleteTag->eraseFromParent();
1841
1.65k
}
1842
1843
namespace {
1844
  /// Calls the given 'operator delete' on a single object.
1845
  struct CallObjectDelete final : EHScopeStack::Cleanup {
1846
    llvm::Value *Ptr;
1847
    const FunctionDecl *OperatorDelete;
1848
    QualType ElementType;
1849
1850
    CallObjectDelete(llvm::Value *Ptr,
1851
                     const FunctionDecl *OperatorDelete,
1852
                     QualType ElementType)
1853
522
      : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1854
1855
522
    void Emit(CodeGenFunction &CGF, Flags flags) override {
1856
522
      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1857
522
    }
1858
  };
1859
}
1860
1861
void
1862
CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
1863
                                             llvm::Value *CompletePtr,
1864
6
                                             QualType ElementType) {
1865
6
  EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
1866
6
                                        OperatorDelete, ElementType);
1867
6
}
1868
1869
/// Emit the code for deleting a single object with a destroying operator
1870
/// delete. If the element type has a non-virtual destructor, Ptr has already
1871
/// been converted to the type of the parameter of 'operator delete'. Otherwise
1872
/// Ptr points to an object of the static type.
1873
static void EmitDestroyingObjectDelete(CodeGenFunction &CGF,
1874
                                       const CXXDeleteExpr *DE, Address Ptr,
1875
30
                                       QualType ElementType) {
1876
30
  auto *Dtor = ElementType->getAsCXXRecordDecl()->getDestructor();
1877
30
  if (Dtor && Dtor->isVirtual())
1878
12
    CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1879
12
                                                Dtor);
1880
18
  else
1881
18
    CGF.EmitDeleteCall(DE->getOperatorDelete(), Ptr.getPointer(), ElementType);
1882
30
}
1883
1884
/// Emit the code for deleting a single object.
1885
/// \return \c true if we started emitting UnconditionalDeleteBlock, \c false
1886
/// if not.
1887
static bool EmitObjectDelete(CodeGenFunction &CGF,
1888
                             const CXXDeleteExpr *DE,
1889
                             Address Ptr,
1890
                             QualType ElementType,
1891
581
                             llvm::BasicBlock *UnconditionalDeleteBlock) {
1892
  // C++11 [expr.delete]p3:
1893
  //   If the static type of the object to be deleted is different from its
1894
  //   dynamic type, the static type shall be a base class of the dynamic type
1895
  //   of the object to be deleted and the static type shall have a virtual
1896
  //   destructor or the behavior is undefined.
1897
581
  CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall,
1898
581
                    DE->getExprLoc(), Ptr.getPointer(),
1899
581
                    ElementType);
1900
1901
581
  const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
1902
581
  assert(!OperatorDelete->isDestroyingOperatorDelete());
1903
1904
  // Find the destructor for the type, if applicable.  If the
1905
  // destructor is virtual, we'll just emit the vcall and return.
1906
581
  const CXXDestructorDecl *Dtor = nullptr;
1907
581
  if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1908
520
    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1909
520
    if (RD->hasDefinition() && 
!RD->hasTrivialDestructor()518
) {
1910
411
      Dtor = RD->getDestructor();
1911
1912
411
      if (Dtor->isVirtual()) {
1913
67
        bool UseVirtualCall = true;
1914
67
        const Expr *Base = DE->getArgument();
1915
67
        if (auto *DevirtualizedDtor =
1916
2
                dyn_cast_or_null<const CXXDestructorDecl>(
1917
2
                    Dtor->getDevirtualizedMethod(
1918
2
                        Base, CGF.CGM.getLangOpts().AppleKext))) {
1919
2
          UseVirtualCall = false;
1920
2
          const CXXRecordDecl *DevirtualizedClass =
1921
2
              DevirtualizedDtor->getParent();
1922
2
          if (declaresSameEntity(getCXXRecord(Base), DevirtualizedClass)) {
1923
            // Devirtualized to the class of the base type (the type of the
1924
            // whole expression).
1925
2
            Dtor = DevirtualizedDtor;
1926
0
          } else {
1927
            // Devirtualized to some other type. Would need to cast the this
1928
            // pointer to that type but we don't have support for that yet, so
1929
            // do a virtual call. FIXME: handle the case where it is
1930
            // devirtualized to the derived type (the type of the inner
1931
            // expression) as in EmitCXXMemberOrOperatorMemberCallExpr.
1932
0
            UseVirtualCall = true;
1933
0
          }
1934
2
        }
1935
67
        if (UseVirtualCall) {
1936
65
          CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1937
65
                                                      Dtor);
1938
65
          return false;
1939
65
        }
1940
516
      }
1941
411
    }
1942
520
  }
1943
1944
  // Make sure that we call delete even if the dtor throws.
1945
  // This doesn't have to a conditional cleanup because we're going
1946
  // to pop it off in a second.
1947
516
  CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1948
516
                                            Ptr.getPointer(),
1949
516
                                            OperatorDelete, ElementType);
1950
1951
516
  if (Dtor)
1952
346
    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1953
346
                              /*ForVirtualBase=*/false,
1954
346
                              /*Delegating=*/false,
1955
346
                              Ptr, ElementType);
1956
170
  else if (auto Lifetime = ElementType.getObjCLifetime()) {
1957
4
    switch (Lifetime) {
1958
0
    case Qualifiers::OCL_None:
1959
0
    case Qualifiers::OCL_ExplicitNone:
1960
0
    case Qualifiers::OCL_Autoreleasing:
1961
0
      break;
1962
1963
2
    case Qualifiers::OCL_Strong:
1964
2
      CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime);
1965
2
      break;
1966
1967
2
    case Qualifiers::OCL_Weak:
1968
2
      CGF.EmitARCDestroyWeak(Ptr);
1969
2
      break;
1970
516
    }
1971
516
  }
1972
1973
  // When optimizing for size, call 'operator delete' unconditionally.
1974
516
  if (CGF.CGM.getCodeGenOpts().OptimizeSize > 1) {
1975
7
    CGF.EmitBlock(UnconditionalDeleteBlock);
1976
7
    CGF.PopCleanupBlock();
1977
7
    return true;
1978
7
  }
1979
1980
509
  CGF.PopCleanupBlock();
1981
509
  return false;
1982
509
}
1983
1984
namespace {
1985
  /// Calls the given 'operator delete' on an array of objects.
1986
  struct CallArrayDelete final : EHScopeStack::Cleanup {
1987
    llvm::Value *Ptr;
1988
    const FunctionDecl *OperatorDelete;
1989
    llvm::Value *NumElements;
1990
    QualType ElementType;
1991
    CharUnits CookieSize;
1992
1993
    CallArrayDelete(llvm::Value *Ptr,
1994
                    const FunctionDecl *OperatorDelete,
1995
                    llvm::Value *NumElements,
1996
                    QualType ElementType,
1997
                    CharUnits CookieSize)
1998
      : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1999
241
        ElementType(ElementType), CookieSize(CookieSize) {}
2000
2001
244
    void Emit(CodeGenFunction &CGF, Flags flags) override {
2002
244
      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements,
2003
244
                         CookieSize);
2004
244
    }
2005
  };
2006
}
2007
2008
/// Emit the code for deleting an array of objects.
2009
static void EmitArrayDelete(CodeGenFunction &CGF,
2010
                            const CXXDeleteExpr *E,
2011
                            Address deletedPtr,
2012
241
                            QualType elementType) {
2013
241
  llvm::Value *numElements = nullptr;
2014
241
  llvm::Value *allocatedPtr = nullptr;
2015
241
  CharUnits cookieSize;
2016
241
  CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
2017
241
                                      numElements, allocatedPtr, cookieSize);
2018
2019
241
  assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
2020
2021
  // Make sure that we call delete even if one of the dtors throws.
2022
241
  const FunctionDecl *operatorDelete = E->getOperatorDelete();
2023
241
  CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
2024
241
                                           allocatedPtr, operatorDelete,
2025
241
                                           numElements, elementType,
2026
241
                                           cookieSize);
2027
2028
  // Destroy the elements.
2029
241
  if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
2030
42
    assert(numElements && "no element count for a type with a destructor!");
2031
2032
42
    CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
2033
42
    CharUnits elementAlign =
2034
42
      deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
2035
2036
42
    llvm::Value *arrayBegin = deletedPtr.getPointer();
2037
42
    llvm::Value *arrayEnd =
2038
42
      CGF.Builder.CreateInBoundsGEP(arrayBegin, numElements, "delete.end");
2039
2040
    // Note that it is legal to allocate a zero-length array, and we
2041
    // can never fold the check away because the length should always
2042
    // come from a cookie.
2043
42
    CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign,
2044
42
                         CGF.getDestroyer(dtorKind),
2045
42
                         /*checkZeroLength*/ true,
2046
42
                         CGF.needsEHCleanup(dtorKind));
2047
42
  }
2048
2049
  // Pop the cleanup block.
2050
241
  CGF.PopCleanupBlock();
2051
241
}
2052
2053
852
void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
2054
852
  const Expr *Arg = E->getArgument();
2055
852
  Address Ptr = EmitPointerWithAlignment(Arg);
2056
2057
  // Null check the pointer.
2058
  //
2059
  // We could avoid this null check if we can determine that the object
2060
  // destruction is trivial and doesn't require an array cookie; we can
2061
  // unconditionally perform the operator delete call in that case. For now, we
2062
  // assume that deleted pointers are null rarely enough that it's better to
2063
  // keep the branch. This might be worth revisiting for a -O0 code size win.
2064
852
  llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
2065
852
  llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
2066
2067
852
  llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull");
2068
2069
852
  Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
2070
852
  EmitBlock(DeleteNotNull);
2071
2072
852
  QualType DeleteTy = E->getDestroyedType();
2073
2074
  // A destroying operator delete overrides the entire operation of the
2075
  // delete expression.
2076
852
  if (E->getOperatorDelete()->isDestroyingOperatorDelete()) {
2077
30
    EmitDestroyingObjectDelete(*this, E, Ptr, DeleteTy);
2078
30
    EmitBlock(DeleteEnd);
2079
30
    return;
2080
30
  }
2081
2082
  // We might be deleting a pointer to array.  If so, GEP down to the
2083
  // first non-array element.
2084
  // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
2085
822
  if (DeleteTy->isConstantArrayType()) {
2086
9
    llvm::Value *Zero = Builder.getInt32(0);
2087
9
    SmallVector<llvm::Value*,8> GEP;
2088
2089
9
    GEP.push_back(Zero); // point at the outermost array
2090
2091
    // For each layer of array type we're pointing at:
2092
20
    while (const ConstantArrayType *Arr
2093
11
             = getContext().getAsConstantArrayType(DeleteTy)) {
2094
      // 1. Unpeel the array type.
2095
11
      DeleteTy = Arr->getElementType();
2096
2097
      // 2. GEP to the first element of the array.
2098
11
      GEP.push_back(Zero);
2099
11
    }
2100
2101
9
    Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getPointer(), GEP, "del.first"),
2102
9
                  Ptr.getAlignment());
2103
9
  }
2104
2105
822
  assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
2106
2107
822
  if (E->isArrayForm()) {
2108
241
    EmitArrayDelete(*this, E, Ptr, DeleteTy);
2109
241
    EmitBlock(DeleteEnd);
2110
581
  } else {
2111
581
    if (!EmitObjectDelete(*this, E, Ptr, DeleteTy, DeleteEnd))
2112
574
      EmitBlock(DeleteEnd);
2113
581
  }
2114
822
}
2115
2116
52
static bool isGLValueFromPointerDeref(const Expr *E) {
2117
52
  E = E->IgnoreParens();
2118
2119
52
  if (const auto *CE = dyn_cast<CastExpr>(E)) {
2120
6
    if (!CE->getSubExpr()->isGLValue())
2121
0
      return false;
2122
6
    return isGLValueFromPointerDeref(CE->getSubExpr());
2123
6
  }
2124
2125
46
  if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
2126
4
    return isGLValueFromPointerDeref(OVE->getSourceExpr());
2127
2128
42
  if (const auto *BO = dyn_cast<BinaryOperator>(E))
2129
1
    if (BO->getOpcode() == BO_Comma)
2130
1
      return isGLValueFromPointerDeref(BO->getRHS());
2131
2132
41
  if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E))
2133
7
    return isGLValueFromPointerDeref(ACO->getTrueExpr()) ||
2134
3
           isGLValueFromPointerDeref(ACO->getFalseExpr());
2135
2136
  // C++11 [expr.sub]p1:
2137
  //   The expression E1[E2] is identical (by definition) to *((E1)+(E2))
2138
34
  if (isa<ArraySubscriptExpr>(E))
2139
2
    return true;
2140
2141
32
  if (const auto *UO = dyn_cast<UnaryOperator>(E))
2142
19
    if (UO->getOpcode() == UO_Deref)
2143
19
      return true;
2144
2145
13
  return false;
2146
13
}
2147
2148
static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
2149
31
                                         llvm::Type *StdTypeInfoPtrTy) {
2150
  // Get the vtable pointer.
2151
31
  Address ThisPtr = CGF.EmitLValue(E).getAddress(CGF);
2152
2153
31
  QualType SrcRecordTy = E->getType();
2154
2155
  // C++ [class.cdtor]p4:
2156
  //   If the operand of typeid refers to the object under construction or
2157
  //   destruction and the static type of the operand is neither the constructor
2158
  //   or destructor’s class nor one of its bases, the behavior is undefined.
2159
31
  CGF.EmitTypeCheck(CodeGenFunction::TCK_DynamicOperation, E->getExprLoc(),
2160
31
                    ThisPtr.getPointer(), SrcRecordTy);
2161
2162
  // C++ [expr.typeid]p2:
2163
  //   If the glvalue expression is obtained by applying the unary * operator to
2164
  //   a pointer and the pointer is a null pointer value, the typeid expression
2165
  //   throws the std::bad_typeid exception.
2166
  //
2167
  // However, this paragraph's intent is not clear.  We choose a very generous
2168
  // interpretation which implores us to consider comma operators, conditional
2169
  // operators, parentheses and other such constructs.
2170
31
  if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(
2171
20
          isGLValueFromPointerDeref(E), SrcRecordTy)) {
2172
20
    llvm::BasicBlock *BadTypeidBlock =
2173
20
        CGF.createBasicBlock("typeid.bad_typeid");
2174
20
    llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
2175
2176
20
    llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer());
2177
20
    CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
2178
2179
20
    CGF.EmitBlock(BadTypeidBlock);
2180
20
    CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
2181
20
    CGF.EmitBlock(EndBlock);
2182
20
  }
2183
2184
31
  return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
2185
31
                                        StdTypeInfoPtrTy);
2186
31
}
2187
2188
338
llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
2189
338
  llvm::Type *StdTypeInfoPtrTy =
2190
338
    ConvertType(E->getType())->getPointerTo();
2191
2192
338
  if (E->isTypeOperand()) {
2193
266
    llvm::Constant *TypeInfo =
2194
266
        CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
2195
266
    return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
2196
266
  }
2197
2198
  // C++ [expr.typeid]p2:
2199
  //   When typeid is applied to a glvalue expression whose type is a
2200
  //   polymorphic class type, the result refers to a std::type_info object
2201
  //   representing the type of the most derived object (that is, the dynamic
2202
  //   type) to which the glvalue refers.
2203
  // If the operand is already most derived object, no need to look up vtable.
2204
72
  if (E->isPotentiallyEvaluated() && 
!E->isMostDerived(getContext())33
)
2205
31
    return EmitTypeidFromVTable(*this, E->getExprOperand(),
2206
31
                                StdTypeInfoPtrTy);
2207
2208
41
  QualType OperandTy = E->getExprOperand()->getType();
2209
41
  return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
2210
41
                               StdTypeInfoPtrTy);
2211
41
}
2212
2213
static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
2214
2
                                          QualType DestTy) {
2215
2
  llvm::Type *DestLTy = CGF.ConvertType(DestTy);
2216
2
  if (DestTy->isPointerType())
2217
1
    return llvm::Constant::getNullValue(DestLTy);
2218
2219
  /// C++ [expr.dynamic.cast]p9:
2220
  ///   A failed cast to reference type throws std::bad_cast
2221
1
  if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
2222
0
    return nullptr;
2223
2224
1
  CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
2225
1
  return llvm::UndefValue::get(DestLTy);
2226
1
}
2227
2228
llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
2229
77
                                              const CXXDynamicCastExpr *DCE) {
2230
77
  CGM.EmitExplicitCastExprType(DCE, this);
2231
77
  QualType DestTy = DCE->getTypeAsWritten();
2232
2233
77
  QualType SrcTy = DCE->getSubExpr()->getType();
2234
2235
  // C++ [expr.dynamic.cast]p7:
2236
  //   If T is "pointer to cv void," then the result is a pointer to the most
2237
  //   derived object pointed to by v.
2238
77
  const PointerType *DestPTy = DestTy->getAs<PointerType>();
2239
2240
77
  bool isDynamicCastToVoid;
2241
77
  QualType SrcRecordTy;
2242
77
  QualType DestRecordTy;
2243
77
  if (DestPTy) {
2244
64
    isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType();
2245
64
    SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
2246
64
    DestRecordTy = DestPTy->getPointeeType();
2247
13
  } else {
2248
13
    isDynamicCastToVoid = false;
2249
13
    SrcRecordTy = SrcTy;
2250
13
    DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
2251
13
  }
2252
2253
  // C++ [class.cdtor]p5:
2254
  //   If the operand of the dynamic_cast refers to the object under
2255
  //   construction or destruction and the static type of the operand is not a
2256
  //   pointer to or object of the constructor or destructor’s own class or one
2257
  //   of its bases, the dynamic_cast results in undefined behavior.
2258
77
  EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(),
2259
77
                SrcRecordTy);
2260
2261
77
  if (DCE->isAlwaysNull())
2262
2
    if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy))
2263
2
      return T;
2264
2265
75
  assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
2266
2267
  // C++ [expr.dynamic.cast]p4:
2268
  //   If the value of v is a null pointer value in the pointer case, the result
2269
  //   is the null pointer value of type T.
2270
75
  bool ShouldNullCheckSrcValue =
2271
75
      CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(),
2272
75
                                                         SrcRecordTy);
2273
2274
75
  llvm::BasicBlock *CastNull = nullptr;
2275
75
  llvm::BasicBlock *CastNotNull = nullptr;
2276
75
  llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
2277
2278
75
  if (ShouldNullCheckSrcValue) {
2279
59
    CastNull = createBasicBlock("dynamic_cast.null");
2280
59
    CastNotNull = createBasicBlock("dynamic_cast.notnull");
2281
2282
59
    llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer());
2283
59
    Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
2284
59
    EmitBlock(CastNotNull);
2285
59
  }
2286
2287
75
  llvm::Value *Value;
2288
75
  if (isDynamicCastToVoid) {
2289
6
    Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy,
2290
6
                                                  DestTy);
2291
69
  } else {
2292
69
    assert(DestRecordTy->isRecordType() &&
2293
69
           "destination type must be a record type!");
2294
69
    Value = CGM.getCXXABI().EmitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
2295
69
                                                DestTy, DestRecordTy, CastEnd);
2296
69
    CastNotNull = Builder.GetInsertBlock();
2297
69
  }
2298
2299
75
  if (ShouldNullCheckSrcValue) {
2300
59
    EmitBranch(CastEnd);
2301
2302
59
    EmitBlock(CastNull);
2303
59
    EmitBranch(CastEnd);
2304
59
  }
2305
2306
75
  EmitBlock(CastEnd);
2307
2308
75
  if (ShouldNullCheckSrcValue) {
2309
59
    llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
2310
59
    PHI->addIncoming(Value, CastNotNull);
2311
59
    PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
2312
2313
59
    Value = PHI;
2314
59
  }
2315
2316
75
  return Value;
2317
75
}