Coverage Report

Created: 2019-07-24 05:18

/Users/buildslave/jenkins/workspace/clang-stage2-coverage-R/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
Line
Count
Source (jump to first uncovered line)
1
//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
2
//
3
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4
// See https://llvm.org/LICENSE.txt for license information.
5
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6
//
7
//===----------------------------------------------------------------------===//
8
//
9
// This contains code dealing with code generation of C++ expressions
10
//
11
//===----------------------------------------------------------------------===//
12
13
#include "CGCUDARuntime.h"
14
#include "CGCXXABI.h"
15
#include "CGDebugInfo.h"
16
#include "CGObjCRuntime.h"
17
#include "CodeGenFunction.h"
18
#include "ConstantEmitter.h"
19
#include "TargetInfo.h"
20
#include "clang/Basic/CodeGenOptions.h"
21
#include "clang/CodeGen/CGFunctionInfo.h"
22
#include "llvm/IR/Intrinsics.h"
23
24
using namespace clang;
25
using namespace CodeGen;
26
27
namespace {
28
struct MemberCallInfo {
29
  RequiredArgs ReqArgs;
30
  // Number of prefix arguments for the call. Ignores the `this` pointer.
31
  unsigned PrefixSize;
32
};
33
}
34
35
static MemberCallInfo
36
commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
37
                                  llvm::Value *This, llvm::Value *ImplicitParam,
38
                                  QualType ImplicitParamTy, const CallExpr *CE,
39
281k
                                  CallArgList &Args, CallArgList *RtlArgs) {
40
281k
  assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
41
281k
         isa<CXXOperatorCallExpr>(CE));
42
281k
  assert(MD->isInstance() &&
43
281k
         "Trying to emit a member or operator call expr on a static method!");
44
281k
45
281k
  // Push the this ptr.
46
281k
  const CXXRecordDecl *RD =
47
281k
      CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD);
48
281k
  Args.add(RValue::get(This), CGF.getTypes().DeriveThisType(RD, MD));
49
281k
50
281k
  // If there is an implicit parameter (e.g. VTT), emit it.
51
281k
  if (ImplicitParam) {
52
425
    Args.add(RValue::get(ImplicitParam), ImplicitParamTy);
53
425
  }
54
281k
55
281k
  const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
56
281k
  RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
57
281k
  unsigned PrefixSize = Args.size() - 1;
58
281k
59
281k
  // And the rest of the call args.
60
281k
  if (RtlArgs) {
61
3.99k
    // Special case: if the caller emitted the arguments right-to-left already
62
3.99k
    // (prior to emitting the *this argument), we're done. This happens for
63
3.99k
    // assignment operators.
64
3.99k
    Args.addFrom(*RtlArgs);
65
277k
  } else if (CE) {
66
222k
    // Special case: skip first argument of CXXOperatorCall (it is "this").
67
222k
    unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 
131.5k
:
0191k
;
68
222k
    CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),
69
222k
                     CE->getDirectCallee());
70
222k
  } else {
71
54.2k
    assert(
72
54.2k
        FPT->getNumParams() == 0 &&
73
54.2k
        "No CallExpr specified for function with non-zero number of arguments");
74
54.2k
  }
75
281k
  return {required, PrefixSize};
76
281k
}
77
78
RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
79
    const CXXMethodDecl *MD, const CGCallee &Callee,
80
    ReturnValueSlot ReturnValue,
81
    llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
82
226k
    const CallExpr *CE, CallArgList *RtlArgs) {
83
226k
  const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
84
226k
  CallArgList Args;
85
226k
  MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall(
86
226k
      *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);
87
226k
  auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(
88
226k
      Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize);
89
226k
  return EmitCall(FnInfo, Callee, ReturnValue, Args, nullptr,
90
226k
                  CE ? CE->getExprLoc() : 
SourceLocation()0
);
91
226k
}
92
93
RValue CodeGenFunction::EmitCXXDestructorCall(
94
    GlobalDecl Dtor, const CGCallee &Callee, llvm::Value *This, QualType ThisTy,
95
54.2k
    llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE) {
96
54.2k
  const CXXMethodDecl *DtorDecl = cast<CXXMethodDecl>(Dtor.getDecl());
97
54.2k
98
54.2k
  assert(!ThisTy.isNull());
99
54.2k
  assert(ThisTy->getAsCXXRecordDecl() == DtorDecl->getParent() &&
100
54.2k
         "Pointer/Object mixup");
101
54.2k
102
54.2k
  LangAS SrcAS = ThisTy.getAddressSpace();
103
54.2k
  LangAS DstAS = DtorDecl->getMethodQualifiers().getAddressSpace();
104
54.2k
  if (SrcAS != DstAS) {
105
2
    QualType DstTy = DtorDecl->getThisType();
106
2
    llvm::Type *NewType = CGM.getTypes().ConvertType(DstTy);
107
2
    This = getTargetHooks().performAddrSpaceCast(*this, This, SrcAS, DstAS,
108
2
                                                 NewType);
109
2
  }
110
54.2k
111
54.2k
  CallArgList Args;
112
54.2k
  commonEmitCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam,
113
54.2k
                                    ImplicitParamTy, CE, Args, nullptr);
114
54.2k
  return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee,
115
54.2k
                  ReturnValueSlot(), Args);
116
54.2k
}
117
118
RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
119
475
                                            const CXXPseudoDestructorExpr *E) {
120
475
  QualType DestroyedType = E->getDestroyedType();
121
475
  if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
122
4
    // Automatic Reference Counting:
123
4
    //   If the pseudo-expression names a retainable object with weak or
124
4
    //   strong lifetime, the object shall be released.
125
4
    Expr *BaseExpr = E->getBase();
126
4
    Address BaseValue = Address::invalid();
127
4
    Qualifiers BaseQuals;
128
4
129
4
    // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
130
4
    if (E->isArrow()) {
131
2
      BaseValue = EmitPointerWithAlignment(BaseExpr);
132
2
      const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
133
2
      BaseQuals = PTy->getPointeeType().getQualifiers();
134
2
    } else {
135
2
      LValue BaseLV = EmitLValue(BaseExpr);
136
2
      BaseValue = BaseLV.getAddress();
137
2
      QualType BaseTy = BaseExpr->getType();
138
2
      BaseQuals = BaseTy.getQualifiers();
139
2
    }
140
4
141
4
    switch (DestroyedType.getObjCLifetime()) {
142
4
    case Qualifiers::OCL_None:
143
0
    case Qualifiers::OCL_ExplicitNone:
144
0
    case Qualifiers::OCL_Autoreleasing:
145
0
      break;
146
0
147
2
    case Qualifiers::OCL_Strong:
148
2
      EmitARCRelease(Builder.CreateLoad(BaseValue,
149
2
                        DestroyedType.isVolatileQualified()),
150
2
                     ARCPreciseLifetime);
151
2
      break;
152
0
153
2
    case Qualifiers::OCL_Weak:
154
2
      EmitARCDestroyWeak(BaseValue);
155
2
      break;
156
471
    }
157
471
  } else {
158
471
    // C++ [expr.pseudo]p1:
159
471
    //   The result shall only be used as the operand for the function call
160
471
    //   operator (), and the result of such a call has type void. The only
161
471
    //   effect is the evaluation of the postfix-expression before the dot or
162
471
    //   arrow.
163
471
    EmitIgnoredExpr(E->getBase());
164
471
  }
165
475
166
475
  return RValue::get(nullptr);
167
475
}
168
169
421
static CXXRecordDecl *getCXXRecord(const Expr *E) {
170
421
  QualType T = E->getType();
171
421
  if (const PointerType *PTy = T->getAs<PointerType>())
172
27
    T = PTy->getPointeeType();
173
421
  const RecordType *Ty = T->castAs<RecordType>();
174
421
  return cast<CXXRecordDecl>(Ty->getDecl());
175
421
}
176
177
// Note: This function also emit constructor calls to support a MSVC
178
// extensions allowing explicit constructor function call.
179
RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
180
192k
                                              ReturnValueSlot ReturnValue) {
181
192k
  const Expr *callee = CE->getCallee()->IgnoreParens();
182
192k
183
192k
  if (isa<BinaryOperator>(callee))
184
147
    return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
185
192k
186
192k
  const MemberExpr *ME = cast<MemberExpr>(callee);
187
192k
  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
188
192k
189
192k
  if (MD->isStatic()) {
190
0
    // The method is static, emit it as we would a regular call.
191
0
    CGCallee callee =
192
0
        CGCallee::forDirect(CGM.GetAddrOfFunction(MD), GlobalDecl(MD));
193
0
    return EmitCall(getContext().getPointerType(MD->getType()), callee, CE,
194
0
                    ReturnValue);
195
0
  }
196
192k
197
192k
  bool HasQualifier = ME->hasQualifier();
198
192k
  NestedNameSpecifier *Qualifier = HasQualifier ? 
ME->getQualifier()4.31k
:
nullptr187k
;
199
192k
  bool IsArrow = ME->isArrow();
200
192k
  const Expr *Base = ME->getBase();
201
192k
202
192k
  return EmitCXXMemberOrOperatorMemberCallExpr(
203
192k
      CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base);
204
192k
}
205
206
RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
207
    const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
208
    bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
209
232k
    const Expr *Base) {
210
232k
  assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
211
232k
212
232k
  // Compute the object pointer.
213
232k
  bool CanUseVirtualCall = MD->isVirtual() && 
!HasQualifier10.9k
;
214
232k
215
232k
  const CXXMethodDecl *DevirtualizedMethod = nullptr;
216
232k
  if (CanUseVirtualCall &&
217
232k
      
MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)10.7k
) {
218
366
    const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
219
366
    DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
220
366
    assert(DevirtualizedMethod);
221
366
    const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
222
366
    const Expr *Inner = Base->ignoreParenBaseCasts();
223
366
    if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
224
366
        MD->getReturnType().getCanonicalType())
225
2
      // If the return types are not the same, this might be a case where more
226
2
      // code needs to run to compensate for it. For example, the derived
227
2
      // method might return a type that inherits form from the return
228
2
      // type of MD and has a prefix.
229
2
      // For now we just avoid devirtualizing these covariant cases.
230
2
      DevirtualizedMethod = nullptr;
231
364
    else if (getCXXRecord(Inner) == DevirtualizedClass)
232
307
      // If the class of the Inner expression is where the dynamic method
233
307
      // is defined, build the this pointer from it.
234
307
      Base = Inner;
235
57
    else if (getCXXRecord(Base) != DevirtualizedClass) {
236
2
      // If the method is defined in a class that is not the best dynamic
237
2
      // one or the one of the full expression, we would have to build
238
2
      // a derived-to-base cast to compute the correct this pointer, but
239
2
      // we don't have support for that yet, so do a virtual call.
240
2
      DevirtualizedMethod = nullptr;
241
2
    }
242
366
  }
243
232k
244
232k
  // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
245
232k
  // operator before the LHS.
246
232k
  CallArgList RtlArgStorage;
247
232k
  CallArgList *RtlArgs = nullptr;
248
232k
  if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
249
40.3k
    if (OCE->isAssignmentOp()) {
250
8.80k
      RtlArgs = &RtlArgStorage;
251
8.80k
      EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(),
252
8.80k
                   drop_begin(CE->arguments(), 1), CE->getDirectCallee(),
253
8.80k
                   /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft);
254
8.80k
    }
255
40.3k
  }
256
232k
257
232k
  LValue This;
258
232k
  if (IsArrow) {
259
97.0k
    LValueBaseInfo BaseInfo;
260
97.0k
    TBAAAccessInfo TBAAInfo;
261
97.0k
    Address ThisValue = EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
262
97.0k
    This = MakeAddrLValue(ThisValue, Base->getType(), BaseInfo, TBAAInfo);
263
135k
  } else {
264
135k
    This = EmitLValue(Base);
265
135k
  }
266
232k
267
232k
  if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
268
12
    // This is the MSVC p->Ctor::Ctor(...) extension. We assume that's
269
12
    // constructing a new complete object of type Ctor.
270
12
    assert(!RtlArgs);
271
12
    assert(ReturnValue.isNull() && "Constructor shouldn't have return value");
272
12
    CallArgList Args;
273
12
    commonEmitCXXMemberOrOperatorCall(
274
12
        *this, Ctor, This.getPointer(), /*ImplicitParam=*/nullptr,
275
12
        /*ImplicitParamTy=*/QualType(), CE, Args, nullptr);
276
12
277
12
    EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
278
12
                           /*Delegating=*/false, This.getAddress(), Args,
279
12
                           AggValueSlot::DoesNotOverlap, CE->getExprLoc(),
280
12
                           /*NewPointerIsChecked=*/false);
281
12
    return RValue::get(nullptr);
282
12
  }
283
232k
284
232k
  if (MD->isTrivial() || 
(227k
MD->isDefaulted()227k
&&
MD->getParent()->isUnion()616
)) {
285
5.18k
    if (isa<CXXDestructorDecl>(MD)) 
return RValue::get(nullptr)208
;
286
4.97k
    if (!MD->getParent()->mayInsertExtraPadding()) {
287
4.97k
      if (MD->isCopyAssignmentOperator() || 
MD->isMoveAssignmentOperator()857
) {
288
4.97k
        // We don't like to generate the trivial copy/move assignment operator
289
4.97k
        // when it isn't necessary; just produce the proper effect here.
290
4.97k
        LValue RHS = isa<CXXOperatorCallExpr>(CE)
291
4.97k
                         ? MakeNaturalAlignAddrLValue(
292
4.80k
                               (*RtlArgs)[0].getRValue(*this).getScalarVal(),
293
4.80k
                               (*(CE->arg_begin() + 1))->getType())
294
4.97k
                         : 
EmitLValue(*CE->arg_begin())161
;
295
4.97k
        EmitAggregateAssign(This, RHS, CE->getType());
296
4.97k
        return RValue::get(This.getPointer());
297
4.97k
      }
298
0
      llvm_unreachable("unknown trivial member function");
299
0
    }
300
4.97k
  }
301
227k
302
227k
  // Compute the function type we're calling.
303
227k
  const CXXMethodDecl *CalleeDecl =
304
227k
      DevirtualizedMethod ? 
DevirtualizedMethod362
:
MD227k
;
305
227k
  const CGFunctionInfo *FInfo = nullptr;
306
227k
  if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
307
419
    FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
308
419
        GlobalDecl(Dtor, Dtor_Complete));
309
226k
  else
310
226k
    FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
311
227k
312
227k
  llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
313
227k
314
227k
  // C++11 [class.mfct.non-static]p2:
315
227k
  //   If a non-static member function of a class X is called for an object that
316
227k
  //   is not of type X, or of a type derived from X, the behavior is undefined.
317
227k
  SourceLocation CallLoc;
318
227k
  ASTContext &C = getContext();
319
227k
  if (CE)
320
227k
    CallLoc = CE->getExprLoc();
321
227k
322
227k
  SanitizerSet SkippedChecks;
323
227k
  if (const auto *CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
324
191k
    auto *IOA = CMCE->getImplicitObjectArgument();
325
191k
    bool IsImplicitObjectCXXThis = IsWrappedCXXThis(IOA);
326
191k
    if (IsImplicitObjectCXXThis)
327
75.5k
      SkippedChecks.set(SanitizerKind::Alignment, true);
328
191k
    if (IsImplicitObjectCXXThis || 
isa<DeclRefExpr>(IOA)116k
)
329
111k
      SkippedChecks.set(SanitizerKind::Null, true);
330
191k
  }
331
227k
  EmitTypeCheck(CodeGenFunction::TCK_MemberCall, CallLoc, This.getPointer(),
332
227k
                C.getRecordType(CalleeDecl->getParent()),
333
227k
                /*Alignment=*/CharUnits::Zero(), SkippedChecks);
334
227k
335
227k
  // C++ [class.virtual]p12:
336
227k
  //   Explicit qualification with the scope operator (5.1) suppresses the
337
227k
  //   virtual call mechanism.
338
227k
  //
339
227k
  // We also don't emit a virtual call if the base expression has a record type
340
227k
  // because then we know what the type is.
341
227k
  bool UseVirtualCall = CanUseVirtualCall && 
!DevirtualizedMethod10.7k
;
342
227k
343
227k
  if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl)) {
344
419
    assert(CE->arg_begin() == CE->arg_end() &&
345
419
           "Destructor shouldn't have explicit parameters");
346
419
    assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
347
419
    if (UseVirtualCall) {
348
54
      CGM.getCXXABI().EmitVirtualDestructorCall(
349
54
          *this, Dtor, Dtor_Complete, This.getAddress(),
350
54
          cast<CXXMemberCallExpr>(CE));
351
365
    } else {
352
365
      GlobalDecl GD(Dtor, Dtor_Complete);
353
365
      CGCallee Callee;
354
365
      if (getLangOpts().AppleKext && 
Dtor->isVirtual()2
&&
HasQualifier2
)
355
2
        Callee = BuildAppleKextVirtualCall(Dtor, Qualifier, Ty);
356
363
      else if (!DevirtualizedMethod)
357
361
        Callee =
358
361
            CGCallee::forDirect(CGM.getAddrOfCXXStructor(GD, FInfo, Ty), GD);
359
2
      else {
360
2
        Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(GD, Ty), GD);
361
2
      }
362
365
363
365
      QualType ThisTy =
364
365
          IsArrow ? 
Base->getType()->getPointeeType()351
:
Base->getType()14
;
365
365
      EmitCXXDestructorCall(GD, Callee, This.getPointer(), ThisTy,
366
365
                            /*ImplicitParam=*/nullptr,
367
365
                            /*ImplicitParamTy=*/QualType(), nullptr);
368
365
    }
369
419
    return RValue::get(nullptr);
370
419
  }
371
226k
372
226k
  // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
373
226k
  // 'CalleeDecl' instead.
374
226k
375
226k
  CGCallee Callee;
376
226k
  if (UseVirtualCall) {
377
10.3k
    Callee = CGCallee::forVirtual(CE, MD, This.getAddress(), Ty);
378
216k
  } else {
379
216k
    if (SanOpts.has(SanitizerKind::CFINVCall) &&
380
216k
        
MD->getParent()->isDynamicClass()7
) {
381
7
      llvm::Value *VTable;
382
7
      const CXXRecordDecl *RD;
383
7
      std::tie(VTable, RD) =
384
7
          CGM.getCXXABI().LoadVTablePtr(*this, This.getAddress(),
385
7
                                        MD->getParent());
386
7
      EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getBeginLoc());
387
7
    }
388
216k
389
216k
    if (getLangOpts().AppleKext && 
MD->isVirtual()6
&&
HasQualifier6
)
390
6
      Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
391
216k
    else if (!DevirtualizedMethod)
392
216k
      Callee =
393
216k
          CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), GlobalDecl(MD));
394
360
    else {
395
360
      Callee =
396
360
          CGCallee::forDirect(CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),
397
360
                              GlobalDecl(DevirtualizedMethod));
398
360
    }
399
216k
  }
400
226k
401
226k
  if (MD->isVirtual()) {
402
10.8k
    Address NewThisAddr =
403
10.8k
        CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
404
10.8k
            *this, CalleeDecl, This.getAddress(), UseVirtualCall);
405
10.8k
    This.setAddress(NewThisAddr);
406
10.8k
  }
407
226k
408
226k
  return EmitCXXMemberOrOperatorCall(
409
226k
      CalleeDecl, Callee, ReturnValue, This.getPointer(),
410
226k
      /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs);
411
226k
}
412
413
RValue
414
CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
415
147
                                              ReturnValueSlot ReturnValue) {
416
147
  const BinaryOperator *BO =
417
147
      cast<BinaryOperator>(E->getCallee()->IgnoreParens());
418
147
  const Expr *BaseExpr = BO->getLHS();
419
147
  const Expr *MemFnExpr = BO->getRHS();
420
147
421
147
  const MemberPointerType *MPT =
422
147
    MemFnExpr->getType()->castAs<MemberPointerType>();
423
147
424
147
  const FunctionProtoType *FPT =
425
147
    MPT->getPointeeType()->castAs<FunctionProtoType>();
426
147
  const CXXRecordDecl *RD =
427
147
    cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
428
147
429
147
  // Emit the 'this' pointer.
430
147
  Address This = Address::invalid();
431
147
  if (BO->getOpcode() == BO_PtrMemI)
432
90
    This = EmitPointerWithAlignment(BaseExpr);
433
57
  else
434
57
    This = EmitLValue(BaseExpr).getAddress();
435
147
436
147
  EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
437
147
                QualType(MPT->getClass(), 0));
438
147
439
147
  // Get the member function pointer.
440
147
  llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
441
147
442
147
  // Ask the ABI to load the callee.  Note that This is modified.
443
147
  llvm::Value *ThisPtrForCall = nullptr;
444
147
  CGCallee Callee =
445
147
    CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
446
147
                                             ThisPtrForCall, MemFnPtr, MPT);
447
147
448
147
  CallArgList Args;
449
147
450
147
  QualType ThisType =
451
147
    getContext().getPointerType(getContext().getTagDeclType(RD));
452
147
453
147
  // Push the this ptr.
454
147
  Args.add(RValue::get(ThisPtrForCall), ThisType);
455
147
456
147
  RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
457
147
458
147
  // And the rest of the call args
459
147
  EmitCallArgs(Args, FPT, E->arguments());
460
147
  return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required,
461
147
                                                      /*PrefixSize=*/0),
462
147
                  Callee, ReturnValue, Args, nullptr, E->getExprLoc());
463
147
}
464
465
RValue
466
CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
467
                                               const CXXMethodDecl *MD,
468
40.3k
                                               ReturnValueSlot ReturnValue) {
469
40.3k
  assert(MD->isInstance() &&
470
40.3k
         "Trying to emit a member call expr on a static method!");
471
40.3k
  return EmitCXXMemberOrOperatorMemberCallExpr(
472
40.3k
      E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
473
40.3k
      /*IsArrow=*/false, E->getArg(0));
474
40.3k
}
475
476
RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
477
15
                                               ReturnValueSlot ReturnValue) {
478
15
  return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
479
15
}
480
481
static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
482
                                            Address DestPtr,
483
897
                                            const CXXRecordDecl *Base) {
484
897
  if (Base->isEmpty())
485
878
    return;
486
19
487
19
  DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
488
19
489
19
  const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
490
19
  CharUnits NVSize = Layout.getNonVirtualSize();
491
19
492
19
  // We cannot simply zero-initialize the entire base sub-object if vbptrs are
493
19
  // present, they are initialized by the most derived class before calling the
494
19
  // constructor.
495
19
  SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores;
496
19
  Stores.emplace_back(CharUnits::Zero(), NVSize);
497
19
498
19
  // Each store is split by the existence of a vbptr.
499
19
  CharUnits VBPtrWidth = CGF.getPointerSize();
500
19
  std::vector<CharUnits> VBPtrOffsets =
501
19
      CGF.CGM.getCXXABI().getVBPtrOffsets(Base);
502
19
  for (CharUnits VBPtrOffset : VBPtrOffsets) {
503
6
    // Stop before we hit any virtual base pointers located in virtual bases.
504
6
    if (VBPtrOffset >= NVSize)
505
2
      break;
506
4
    std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val();
507
4
    CharUnits LastStoreOffset = LastStore.first;
508
4
    CharUnits LastStoreSize = LastStore.second;
509
4
510
4
    CharUnits SplitBeforeOffset = LastStoreOffset;
511
4
    CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset;
512
4
    assert(!SplitBeforeSize.isNegative() && "negative store size!");
513
4
    if (!SplitBeforeSize.isZero())
514
2
      Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize);
515
4
516
4
    CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth;
517
4
    CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset;
518
4
    assert(!SplitAfterSize.isNegative() && "negative store size!");
519
4
    if (!SplitAfterSize.isZero())
520
4
      Stores.emplace_back(SplitAfterOffset, SplitAfterSize);
521
4
  }
522
19
523
19
  // If the type contains a pointer to data member we can't memset it to zero.
524
19
  // Instead, create a null constant and copy it to the destination.
525
19
  // TODO: there are other patterns besides zero that we can usefully memset,
526
19
  // like -1, which happens to be the pattern used by member-pointers.
527
19
  // TODO: isZeroInitializable can be over-conservative in the case where a
528
19
  // virtual base contains a member pointer.
529
19
  llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Base);
530
19
  if (!NullConstantForBase->isNullValue()) {
531
4
    llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable(
532
4
        CGF.CGM.getModule(), NullConstantForBase->getType(),
533
4
        /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,
534
4
        NullConstantForBase, Twine());
535
4
536
4
    CharUnits Align = std::max(Layout.getNonVirtualAlignment(),
537
4
                               DestPtr.getAlignment());
538
4
    NullVariable->setAlignment(Align.getQuantity());
539
4
540
4
    Address SrcPtr = Address(CGF.EmitCastToVoidPtr(NullVariable), Align);
541
4
542
4
    // Get and call the appropriate llvm.memcpy overload.
543
4
    for (std::pair<CharUnits, CharUnits> Store : Stores) {
544
4
      CharUnits StoreOffset = Store.first;
545
4
      CharUnits StoreSize = Store.second;
546
4
      llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
547
4
      CGF.Builder.CreateMemCpy(
548
4
          CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
549
4
          CGF.Builder.CreateConstInBoundsByteGEP(SrcPtr, StoreOffset),
550
4
          StoreSizeVal);
551
4
    }
552
4
553
4
  // Otherwise, just memset the whole thing to zero.  This is legal
554
4
  // because in LLVM, all default initializers (other than the ones we just
555
4
  // handled above) are guaranteed to have a bit pattern of all zeros.
556
15
  } else {
557
17
    for (std::pair<CharUnits, CharUnits> Store : Stores) {
558
17
      CharUnits StoreOffset = Store.first;
559
17
      CharUnits StoreSize = Store.second;
560
17
      llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize);
561
17
      CGF.Builder.CreateMemSet(
562
17
          CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset),
563
17
          CGF.Builder.getInt8(0), StoreSizeVal);
564
17
    }
565
15
  }
566
19
}
567
568
void
569
CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
570
116k
                                      AggValueSlot Dest) {
571
116k
  assert(!Dest.isIgnored() && "Must have a destination!");
572
116k
  const CXXConstructorDecl *CD = E->getConstructor();
573
116k
574
116k
  // If we require zero initialization before (or instead of) calling the
575
116k
  // constructor, as can be the case with a non-user-provided default
576
116k
  // constructor, emit the zero initialization now, unless destination is
577
116k
  // already zeroed.
578
116k
  if (E->requiresZeroInitialization() && 
!Dest.isZeroed()7.76k
) {
579
7.76k
    switch (E->getConstructionKind()) {
580
7.76k
    case CXXConstructExpr::CK_Delegating:
581
6.86k
    case CXXConstructExpr::CK_Complete:
582
6.86k
      EmitNullInitialization(Dest.getAddress(), E->getType());
583
6.86k
      break;
584
6.86k
    case CXXConstructExpr::CK_VirtualBase:
585
897
    case CXXConstructExpr::CK_NonVirtualBase:
586
897
      EmitNullBaseClassInitialization(*this, Dest.getAddress(),
587
897
                                      CD->getParent());
588
897
      break;
589
116k
    }
590
116k
  }
591
116k
592
116k
  // If this is a call to a trivial default constructor, do nothing.
593
116k
  if (CD->isTrivial() && 
CD->isDefaultConstructor()39.2k
)
594
9.79k
    return;
595
106k
596
106k
  // Elide the constructor if we're constructing from a temporary.
597
106k
  // The temporary check is required because Sema sets this on NRVO
598
106k
  // returns.
599
106k
  if (getLangOpts().ElideConstructors && 
E->isElidable()106k
) {
600
21.5k
    assert(getContext().hasSameUnqualifiedType(E->getType(),
601
21.5k
                                               E->getArg(0)->getType()));
602
21.5k
    if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
603
21.3k
      EmitAggExpr(E->getArg(0), Dest);
604
21.3k
      return;
605
21.3k
    }
606
85.2k
  }
607
85.2k
608
85.2k
  if (const ArrayType *arrayType
609
1.10k
        = getContext().getAsArrayType(E->getType())) {
610
1.10k
    EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E,
611
1.10k
                               Dest.isSanitizerChecked());
612
84.1k
  } else {
613
84.1k
    CXXCtorType Type = Ctor_Complete;
614
84.1k
    bool ForVirtualBase = false;
615
84.1k
    bool Delegating = false;
616
84.1k
617
84.1k
    switch (E->getConstructionKind()) {
618
84.1k
     case CXXConstructExpr::CK_Delegating:
619
47
      // We should be emitting a constructor; GlobalDecl will assert this
620
47
      Type = CurGD.getCtorType();
621
47
      Delegating = true;
622
47
      break;
623
84.1k
624
84.1k
     case CXXConstructExpr::CK_Complete:
625
72.1k
      Type = Ctor_Complete;
626
72.1k
      break;
627
84.1k
628
84.1k
     case CXXConstructExpr::CK_VirtualBase:
629
629
      ForVirtualBase = true;
630
629
      LLVM_FALLTHROUGH;
631
629
632
11.9k
     case CXXConstructExpr::CK_NonVirtualBase:
633
11.9k
      Type = Ctor_Base;
634
84.1k
     }
635
84.1k
636
84.1k
     // Call the constructor.
637
84.1k
     EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest, E);
638
84.1k
  }
639
85.2k
}
640
641
void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src,
642
79
                                                 const Expr *Exp) {
643
79
  if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
644
2
    Exp = E->getSubExpr();
645
79
  assert(isa<CXXConstructExpr>(Exp) &&
646
79
         "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
647
79
  const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
648
79
  const CXXConstructorDecl *CD = E->getConstructor();
649
79
  RunCleanupsScope Scope(*this);
650
79
651
79
  // If we require zero initialization before (or instead of) calling the
652
79
  // constructor, as can be the case with a non-user-provided default
653
79
  // constructor, emit the zero initialization now.
654
79
  // FIXME. Do I still need this for a copy ctor synthesis?
655
79
  if (E->requiresZeroInitialization())
656
0
    EmitNullInitialization(Dest, E->getType());
657
79
658
79
  assert(!getContext().getAsConstantArrayType(E->getType())
659
79
         && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
660
79
  EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
661
79
}
662
663
static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
664
746
                                        const CXXNewExpr *E) {
665
746
  if (!E->isArray())
666
0
    return CharUnits::Zero();
667
746
668
746
  // No cookie is required if the operator new[] being used is the
669
746
  // reserved placement operator new[].
670
746
  if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
671
10
    return CharUnits::Zero();
672
736
673
736
  return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
674
736
}
675
676
static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
677
                                        const CXXNewExpr *e,
678
                                        unsigned minElements,
679
                                        llvm::Value *&numElements,
680
5.05k
                                        llvm::Value *&sizeWithoutCookie) {
681
5.05k
  QualType type = e->getAllocatedType();
682
5.05k
683
5.05k
  if (!e->isArray()) {
684
4.31k
    CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
685
4.31k
    sizeWithoutCookie
686
4.31k
      = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
687
4.31k
    return sizeWithoutCookie;
688
4.31k
  }
689
746
690
746
  // The width of size_t.
691
746
  unsigned sizeWidth = CGF.SizeTy->getBitWidth();
692
746
693
746
  // Figure out the cookie size.
694
746
  llvm::APInt cookieSize(sizeWidth,
695
746
                         CalculateCookiePadding(CGF, e).getQuantity());
696
746
697
746
  // Emit the array size expression.
698
746
  // We multiply the size of all dimensions for NumElements.
699
746
  // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
700
746
  numElements =
701
746
    ConstantEmitter(CGF).tryEmitAbstract(*e->getArraySize(), e->getType());
702
746
  if (!numElements)
703
559
    numElements = CGF.EmitScalarExpr(*e->getArraySize());
704
746
  assert(isa<llvm::IntegerType>(numElements->getType()));
705
746
706
746
  // The number of elements can be have an arbitrary integer type;
707
746
  // essentially, we need to multiply it by a constant factor, add a
708
746
  // cookie size, and verify that the result is representable as a
709
746
  // size_t.  That's just a gloss, though, and it's wrong in one
710
746
  // important way: if the count is negative, it's an error even if
711
746
  // the cookie size would bring the total size >= 0.
712
746
  bool isSigned
713
746
    = (*e->getArraySize())->getType()->isSignedIntegerOrEnumerationType();
714
746
  llvm::IntegerType *numElementsType
715
746
    = cast<llvm::IntegerType>(numElements->getType());
716
746
  unsigned numElementsWidth = numElementsType->getBitWidth();
717
746
718
746
  // Compute the constant factor.
719
746
  llvm::APInt arraySizeMultiplier(sizeWidth, 1);
720
770
  while (const ConstantArrayType *CAT
721
24
             = CGF.getContext().getAsConstantArrayType(type)) {
722
24
    type = CAT->getElementType();
723
24
    arraySizeMultiplier *= CAT->getSize();
724
24
  }
725
746
726
746
  CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
727
746
  llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
728
746
  typeSizeMultiplier *= arraySizeMultiplier;
729
746
730
746
  // This will be a size_t.
731
746
  llvm::Value *size;
732
746
733
746
  // If someone is doing 'new int[42]' there is no need to do a dynamic check.
734
746
  // Don't bloat the -O0 code.
735
746
  if (llvm::ConstantInt *numElementsC =
736
187
        dyn_cast<llvm::ConstantInt>(numElements)) {
737
187
    const llvm::APInt &count = numElementsC->getValue();
738
187
739
187
    bool hasAnyOverflow = false;
740
187
741
187
    // If 'count' was a negative number, it's an overflow.
742
187
    if (isSigned && 
count.isNegative()93
)
743
0
      hasAnyOverflow = true;
744
187
745
187
    // We want to do all this arithmetic in size_t.  If numElements is
746
187
    // wider than that, check whether it's already too big, and if so,
747
187
    // overflow.
748
187
    else if (numElementsWidth > sizeWidth &&
749
187
             
numElementsWidth - sizeWidth > count.countLeadingZeros()0
)
750
0
      hasAnyOverflow = true;
751
187
752
187
    // Okay, compute a count at the right width.
753
187
    llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
754
187
755
187
    // If there is a brace-initializer, we cannot allocate fewer elements than
756
187
    // there are initializers. If we do, that's treated like an overflow.
757
187
    if (adjustedCount.ult(minElements))
758
0
      hasAnyOverflow = true;
759
187
760
187
    // Scale numElements by that.  This might overflow, but we don't
761
187
    // care because it only overflows if allocationSize does, too, and
762
187
    // if that overflows then we shouldn't use this.
763
187
    numElements = llvm::ConstantInt::get(CGF.SizeTy,
764
187
                                         adjustedCount * arraySizeMultiplier);
765
187
766
187
    // Compute the size before cookie, and track whether it overflowed.
767
187
    bool overflow;
768
187
    llvm::APInt allocationSize
769
187
      = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
770
187
    hasAnyOverflow |= overflow;
771
187
772
187
    // Add in the cookie, and check whether it's overflowed.
773
187
    if (cookieSize != 0) {
774
44
      // Save the current size without a cookie.  This shouldn't be
775
44
      // used if there was overflow.
776
44
      sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
777
44
778
44
      allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
779
44
      hasAnyOverflow |= overflow;
780
44
    }
781
187
782
187
    // On overflow, produce a -1 so operator new will fail.
783
187
    if (hasAnyOverflow) {
784
0
      size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
785
187
    } else {
786
187
      size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
787
187
    }
788
187
789
187
  // Otherwise, we might need to use the overflow intrinsics.
790
559
  } else {
791
559
    // There are up to five conditions we need to test for:
792
559
    // 1) if isSigned, we need to check whether numElements is negative;
793
559
    // 2) if numElementsWidth > sizeWidth, we need to check whether
794
559
    //   numElements is larger than something representable in size_t;
795
559
    // 3) if minElements > 0, we need to check whether numElements is smaller
796
559
    //    than that.
797
559
    // 4) we need to compute
798
559
    //      sizeWithoutCookie := numElements * typeSizeMultiplier
799
559
    //    and check whether it overflows; and
800
559
    // 5) if we need a cookie, we need to compute
801
559
    //      size := sizeWithoutCookie + cookieSize
802
559
    //    and check whether it overflows.
803
559
804
559
    llvm::Value *hasOverflow = nullptr;
805
559
806
559
    // If numElementsWidth > sizeWidth, then one way or another, we're
807
559
    // going to have to do a comparison for (2), and this happens to
808
559
    // take care of (1), too.
809
559
    if (numElementsWidth > sizeWidth) {
810
0
      llvm::APInt threshold(numElementsWidth, 1);
811
0
      threshold <<= sizeWidth;
812
0
813
0
      llvm::Value *thresholdV
814
0
        = llvm::ConstantInt::get(numElementsType, threshold);
815
0
816
0
      hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
817
0
      numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
818
0
819
0
    // Otherwise, if we're signed, we want to sext up to size_t.
820
559
    } else if (isSigned) {
821
54
      if (numElementsWidth < sizeWidth)
822
35
        numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
823
54
824
54
      // If there's a non-1 type size multiplier, then we can do the
825
54
      // signedness check at the same time as we do the multiply
826
54
      // because a negative number times anything will cause an
827
54
      // unsigned overflow.  Otherwise, we have to do it here. But at least
828
54
      // in this case, we can subsume the >= minElements check.
829
54
      if (typeSizeMultiplier == 1)
830
13
        hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
831
13
                              llvm::ConstantInt::get(CGF.SizeTy, minElements));
832
54
833
54
    // Otherwise, zext up to size_t if necessary.
834
505
    } else if (numElementsWidth < sizeWidth) {
835
15
      numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
836
15
    }
837
559
838
559
    assert(numElements->getType() == CGF.SizeTy);
839
559
840
559
    if (minElements) {
841
8
      // Don't allow allocation of fewer elements than we have initializers.
842
8
      if (!hasOverflow) {
843
6
        hasOverflow = CGF.Builder.CreateICmpULT(numElements,
844
6
                              llvm::ConstantInt::get(CGF.SizeTy, minElements));
845
6
      } else 
if (2
numElementsWidth > sizeWidth2
) {
846
0
        // The other existing overflow subsumes this check.
847
0
        // We do an unsigned comparison, since any signed value < -1 is
848
0
        // taken care of either above or below.
849
0
        hasOverflow = CGF.Builder.CreateOr(hasOverflow,
850
0
                          CGF.Builder.CreateICmpULT(numElements,
851
0
                              llvm::ConstantInt::get(CGF.SizeTy, minElements)));
852
0
      }
853
8
    }
854
559
855
559
    size = numElements;
856
559
857
559
    // Multiply by the type size if necessary.  This multiplier
858
559
    // includes all the factors for nested arrays.
859
559
    //
860
559
    // This step also causes numElements to be scaled up by the
861
559
    // nested-array factor if necessary.  Overflow on this computation
862
559
    // can be ignored because the result shouldn't be used if
863
559
    // allocation fails.
864
559
    if (typeSizeMultiplier != 1) {
865
221
      llvm::Function *umul_with_overflow
866
221
        = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
867
221
868
221
      llvm::Value *tsmV =
869
221
        llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
870
221
      llvm::Value *result =
871
221
          CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV});
872
221
873
221
      llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
874
221
      if (hasOverflow)
875
6
        hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
876
215
      else
877
215
        hasOverflow = overflowed;
878
221
879
221
      size = CGF.Builder.CreateExtractValue(result, 0);
880
221
881
221
      // Also scale up numElements by the array size multiplier.
882
221
      if (arraySizeMultiplier != 1) {
883
10
        // If the base element type size is 1, then we can re-use the
884
10
        // multiply we just did.
885
10
        if (typeSize.isOne()) {
886
0
          assert(arraySizeMultiplier == typeSizeMultiplier);
887
0
          numElements = size;
888
0
889
0
        // Otherwise we need a separate multiply.
890
10
        } else {
891
10
          llvm::Value *asmV =
892
10
            llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
893
10
          numElements = CGF.Builder.CreateMul(numElements, asmV);
894
10
        }
895
10
      }
896
338
    } else {
897
338
      // numElements doesn't need to be scaled.
898
338
      assert(arraySizeMultiplier == 1);
899
338
    }
900
559
901
559
    // Add in the cookie size if necessary.
902
559
    if (cookieSize != 0) {
903
26
      sizeWithoutCookie = size;
904
26
905
26
      llvm::Function *uadd_with_overflow
906
26
        = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
907
26
908
26
      llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
909
26
      llvm::Value *result =
910
26
          CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV});
911
26
912
26
      llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
913
26
      if (hasOverflow)
914
25
        hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
915
1
      else
916
1
        hasOverflow = overflowed;
917
26
918
26
      size = CGF.Builder.CreateExtractValue(result, 0);
919
26
    }
920
559
921
559
    // If we had any possibility of dynamic overflow, make a select to
922
559
    // overwrite 'size' with an all-ones value, which should cause
923
559
    // operator new to throw.
924
559
    if (hasOverflow)
925
235
      size = CGF.Builder.CreateSelect(hasOverflow,
926
235
                                 llvm::Constant::getAllOnesValue(CGF.SizeTy),
927
235
                                      size);
928
559
  }
929
746
930
746
  if (cookieSize == 0)
931
676
    sizeWithoutCookie = size;
932
746
  else
933
746
    assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
934
746
935
746
  return size;
936
746
}
937
938
static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
939
                                    QualType AllocType, Address NewPtr,
940
4.32k
                                    AggValueSlot::Overlap_t MayOverlap) {
941
4.32k
  // FIXME: Refactor with EmitExprAsInit.
942
4.32k
  switch (CGF.getEvaluationKind(AllocType)) {
943
4.32k
  case TEK_Scalar:
944
690
    CGF.EmitScalarInit(Init, nullptr,
945
690
                       CGF.MakeAddrLValue(NewPtr, AllocType), false);
946
690
    return;
947
4.32k
  case TEK_Complex:
948
1
    CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType),
949
1
                                  /*isInit*/ true);
950
1
    return;
951
4.32k
  case TEK_Aggregate: {
952
3.63k
    AggValueSlot Slot
953
3.63k
      = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(),
954
3.63k
                              AggValueSlot::IsDestructed,
955
3.63k
                              AggValueSlot::DoesNotNeedGCBarriers,
956
3.63k
                              AggValueSlot::IsNotAliased,
957
3.63k
                              MayOverlap, AggValueSlot::IsNotZeroed,
958
3.63k
                              AggValueSlot::IsSanitizerChecked);
959
3.63k
    CGF.EmitAggExpr(Init, Slot);
960
3.63k
    return;
961
0
  }
962
0
  }
963
0
  llvm_unreachable("bad evaluation kind");
964
0
}
965
966
void CodeGenFunction::EmitNewArrayInitializer(
967
    const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy,
968
    Address BeginPtr, llvm::Value *NumElements,
969
746
    llvm::Value *AllocSizeWithoutCookie) {
970
746
  // If we have a type with trivial initialization and no initializer,
971
746
  // there's nothing to do.
972
746
  if (!E->hasInitializer())
973
559
    return;
974
187
975
187
  Address CurPtr = BeginPtr;
976
187
977
187
  unsigned InitListElements = 0;
978
187
979
187
  const Expr *Init = E->getInitializer();
980
187
  Address EndOfInit = Address::invalid();
981
187
  QualType::DestructionKind DtorKind = ElementType.isDestructedType();
982
187
  EHScopeStack::stable_iterator Cleanup;
983
187
  llvm::Instruction *CleanupDominator = nullptr;
984
187
985
187
  CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType);
986
187
  CharUnits ElementAlign =
987
187
    BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
988
187
989
187
  // Attempt to perform zero-initialization using memset.
990
187
  auto TryMemsetInitialization = [&]() -> bool {
991
21
    // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
992
21
    // we can initialize with a memset to -1.
993
21
    if (!CGM.getTypes().isZeroInitializable(ElementType))
994
4
      return false;
995
17
996
17
    // Optimization: since zero initialization will just set the memory
997
17
    // to all zeroes, generate a single memset to do it in one shot.
998
17
999
17
    // Subtract out the size of any elements we've already initialized.
1000
17
    auto *RemainingSize = AllocSizeWithoutCookie;
1001
17
    if (InitListElements) {
1002
9
      // We know this can't overflow; we check this when doing the allocation.
1003
9
      auto *InitializedSize = llvm::ConstantInt::get(
1004
9
          RemainingSize->getType(),
1005
9
          getContext().getTypeSizeInChars(ElementType).getQuantity() *
1006
9
              InitListElements);
1007
9
      RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
1008
9
    }
1009
17
1010
17
    // Create the memset.
1011
17
    Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
1012
17
    return true;
1013
17
  };
1014
187
1015
187
  // If the initializer is an initializer list, first do the explicit elements.
1016
187
  if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
1017
25
    // Initializing from a (braced) string literal is a special case; the init
1018
25
    // list element does not initialize a (single) array element.
1019
25
    if (ILE->isStringLiteralInit()) {
1020
8
      // Initialize the initial portion of length equal to that of the string
1021
8
      // literal. The allocation must be for at least this much; we emitted a
1022
8
      // check for that earlier.
1023
8
      AggValueSlot Slot =
1024
8
          AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(),
1025
8
                                AggValueSlot::IsDestructed,
1026
8
                                AggValueSlot::DoesNotNeedGCBarriers,
1027
8
                                AggValueSlot::IsNotAliased,
1028
8
                                AggValueSlot::DoesNotOverlap,
1029
8
                                AggValueSlot::IsNotZeroed,
1030
8
                                AggValueSlot::IsSanitizerChecked);
1031
8
      EmitAggExpr(ILE->getInit(0), Slot);
1032
8
1033
8
      // Move past these elements.
1034
8
      InitListElements =
1035
8
          cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
1036
8
              ->getSize().getZExtValue();
1037
8
      CurPtr =
1038
8
          Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
1039
8
                                            Builder.getSize(InitListElements),
1040
8
                                            "string.init.end"),
1041
8
                  CurPtr.getAlignment().alignmentAtOffset(InitListElements *
1042
8
                                                          ElementSize));
1043
8
1044
8
      // Zero out the rest, if any remain.
1045
8
      llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
1046
8
      if (!ConstNum || 
!ConstNum->equalsInt(InitListElements)6
) {
1047
2
        bool OK = TryMemsetInitialization();
1048
2
        (void)OK;
1049
2
        assert(OK && "couldn't memset character type?");
1050
2
      }
1051
8
      return;
1052
8
    }
1053
17
1054
17
    InitListElements = ILE->getNumInits();
1055
17
1056
17
    // If this is a multi-dimensional array new, we will initialize multiple
1057
17
    // elements with each init list element.
1058
17
    QualType AllocType = E->getAllocatedType();
1059
17
    if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
1060
3
            AllocType->getAsArrayTypeUnsafe())) {
1061
3
      ElementTy = ConvertTypeForMem(AllocType);
1062
3
      CurPtr = Builder.CreateElementBitCast(CurPtr, ElementTy);
1063
3
      InitListElements *= getContext().getConstantArrayElementCount(CAT);
1064
3
    }
1065
17
1066
17
    // Enter a partial-destruction Cleanup if necessary.
1067
17
    if (needsEHCleanup(DtorKind)) {
1068
0
      // In principle we could tell the Cleanup where we are more
1069
0
      // directly, but the control flow can get so varied here that it
1070
0
      // would actually be quite complex.  Therefore we go through an
1071
0
      // alloca.
1072
0
      EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(),
1073
0
                                   "array.init.end");
1074
0
      CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit);
1075
0
      pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit,
1076
0
                                       ElementType, ElementAlign,
1077
0
                                       getDestroyer(DtorKind));
1078
0
      Cleanup = EHStack.stable_begin();
1079
0
    }
1080
17
1081
17
    CharUnits StartAlign = CurPtr.getAlignment();
1082
55
    for (unsigned i = 0, e = ILE->getNumInits(); i != e; 
++i38
) {
1083
38
      // Tell the cleanup that it needs to destroy up to this
1084
38
      // element.  TODO: some of these stores can be trivially
1085
38
      // observed to be unnecessary.
1086
38
      if (EndOfInit.isValid()) {
1087
0
        auto FinishedPtr =
1088
0
          Builder.CreateBitCast(CurPtr.getPointer(), BeginPtr.getType());
1089
0
        Builder.CreateStore(FinishedPtr, EndOfInit);
1090
0
      }
1091
38
      // FIXME: If the last initializer is an incomplete initializer list for
1092
38
      // an array, and we have an array filler, we can fold together the two
1093
38
      // initialization loops.
1094
38
      StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
1095
38
                              ILE->getInit(i)->getType(), CurPtr,
1096
38
                              AggValueSlot::DoesNotOverlap);
1097
38
      CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
1098
38
                                                 Builder.getSize(1),
1099
38
                                                 "array.exp.next"),
1100
38
                       StartAlign.alignmentAtOffset((i + 1) * ElementSize));
1101
38
    }
1102
17
1103
17
    // The remaining elements are filled with the array filler expression.
1104
17
    Init = ILE->getArrayFiller();
1105
17
1106
17
    // Extract the initializer for the individual array elements by pulling
1107
17
    // out the array filler from all the nested initializer lists. This avoids
1108
17
    // generating a nested loop for the initialization.
1109
19
    while (Init && 
Init->getType()->isConstantArrayType()12
) {
1110
2
      auto *SubILE = dyn_cast<InitListExpr>(Init);
1111
2
      if (!SubILE)
1112
0
        break;
1113
2
      assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?");
1114
2
      Init = SubILE->getArrayFiller();
1115
2
    }
1116
17
1117
17
    // Switch back to initializing one base element at a time.
1118
17
    CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
1119
17
  }
1120
187
1121
187
  // If all elements have already been initialized, skip any further
1122
187
  // initialization.
1123
187
  llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
1124
179
  if (ConstNum && 
ConstNum->getZExtValue() <= InitListElements93
) {
1125
9
    // If there was a Cleanup, deactivate it.
1126
9
    if (CleanupDominator)
1127
0
      DeactivateCleanupBlock(Cleanup, CleanupDominator);
1128
9
    return;
1129
9
  }
1130
170
1131
170
  assert(Init && "have trailing elements to initialize but no initializer");
1132
170
1133
170
  // If this is a constructor call, try to optimize it out, and failing that
1134
170
  // emit a single loop to initialize all remaining elements.
1135
170
  if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
1136
153
    CXXConstructorDecl *Ctor = CCE->getConstructor();
1137
153
    if (Ctor->isTrivial()) {
1138
63
      // If new expression did not specify value-initialization, then there
1139
63
      // is no initialization.
1140
63
      if (!CCE->requiresZeroInitialization() || 
Ctor->getParent()->isEmpty()3
)
1141
61
        return;
1142
2
1143
2
      if (TryMemsetInitialization())
1144
1
        return;
1145
91
    }
1146
91
1147
91
    // Store the new Cleanup position for irregular Cleanups.
1148
91
    //
1149
91
    // FIXME: Share this cleanup with the constructor call emission rather than
1150
91
    // having it create a cleanup of its own.
1151
91
    if (EndOfInit.isValid())
1152
0
      Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
1153
91
1154
91
    // Emit a constructor call loop to initialize the remaining elements.
1155
91
    if (InitListElements)
1156
1
      NumElements = Builder.CreateSub(
1157
1
          NumElements,
1158
1
          llvm::ConstantInt::get(NumElements->getType(), InitListElements));
1159
91
    EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
1160
91
                               /*NewPointerIsChecked*/true,
1161
91
                               CCE->requiresZeroInitialization());
1162
91
    return;
1163
91
  }
1164
17
1165
17
  // If this is value-initialization, we can usually use memset.
1166
17
  ImplicitValueInitExpr IVIE(ElementType);
1167
17
  if (isa<ImplicitValueInitExpr>(Init)) {
1168
14
    if (TryMemsetInitialization())
1169
11
      return;
1170
3
1171
3
    // Switch to an ImplicitValueInitExpr for the element type. This handles
1172
3
    // only one case: multidimensional array new of pointers to members. In
1173
3
    // all other cases, we already have an initializer for the array element.
1174
3
    Init = &IVIE;
1175
3
  }
1176
17
1177
17
  // At this point we should have found an initializer for the individual
1178
17
  // elements of the array.
1179
17
  assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) &&
1180
6
         "got wrong type of element to initialize");
1181
6
1182
6
  // If we have an empty initializer list, we can usually use memset.
1183
6
  if (auto *ILE = dyn_cast<InitListExpr>(Init))
1184
3
    if (ILE->getNumInits() == 0 && 
TryMemsetInitialization()0
)
1185
0
      return;
1186
6
1187
6
  // If we have a struct whose every field is value-initialized, we can
1188
6
  // usually use memset.
1189
6
  if (auto *ILE = dyn_cast<InitListExpr>(Init)) {
1190
3
    if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
1191
3
      if (RType->getDecl()->isStruct()) {
1192
3
        unsigned NumElements = 0;
1193
3
        if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl()))
1194
3
          NumElements = CXXRD->getNumBases();
1195
3
        for (auto *Field : RType->getDecl()->fields())
1196
5
          if (!Field->isUnnamedBitfield())
1197
5
            ++NumElements;
1198
3
        // FIXME: Recurse into nested InitListExprs.
1199
3
        if (ILE->getNumInits() == NumElements)
1200
8
          
for (unsigned i = 0, e = ILE->getNumInits(); 3
i != e;
++i5
)
1201
5
            if (!isa<ImplicitValueInitExpr>(ILE->getInit(i)))
1202
0
              --NumElements;
1203
3
        if (ILE->getNumInits() == NumElements && TryMemsetInitialization())
1204
3
          return;
1205
3
      }
1206
3
    }
1207
3
  }
1208
3
1209
3
  // Create the loop blocks.
1210
3
  llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
1211
3
  llvm::BasicBlock *LoopBB = createBasicBlock("new.loop");
1212
3
  llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end");
1213
3
1214
3
  // Find the end of the array, hoisted out of the loop.
1215
3
  llvm::Value *EndPtr =
1216
3
    Builder.CreateInBoundsGEP(BeginPtr.getPointer(), NumElements, "array.end");
1217
3
1218
3
  // If the number of elements isn't constant, we have to now check if there is
1219
3
  // anything left to initialize.
1220
3
  if (!ConstNum) {
1221
0
    llvm::Value *IsEmpty =
1222
0
      Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty");
1223
0
    Builder.CreateCondBr(IsEmpty, ContBB, LoopBB);
1224
0
  }
1225
3
1226
3
  // Enter the loop.
1227
3
  EmitBlock(LoopBB);
1228
3
1229
3
  // Set up the current-element phi.
1230
3
  llvm::PHINode *CurPtrPhi =
1231
3
    Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
1232
3
  CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB);
1233
3
1234
3
  CurPtr = Address(CurPtrPhi, ElementAlign);
1235
3
1236
3
  // Store the new Cleanup position for irregular Cleanups.
1237
3
  if (EndOfInit.isValid())
1238
0
    Builder.CreateStore(CurPtr.getPointer(), EndOfInit);
1239
3
1240
3
  // Enter a partial-destruction Cleanup if necessary.
1241
3
  if (!CleanupDominator && needsEHCleanup(DtorKind)) {
1242
0
    pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(),
1243
0
                                   ElementType, ElementAlign,
1244
0
                                   getDestroyer(DtorKind));
1245
0
    Cleanup = EHStack.stable_begin();
1246
0
    CleanupDominator = Builder.CreateUnreachable();
1247
0
  }
1248
3
1249
3
  // Emit the initializer into this element.
1250
3
  StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr,
1251
3
                          AggValueSlot::DoesNotOverlap);
1252
3
1253
3
  // Leave the Cleanup if we entered one.
1254
3
  if (CleanupDominator) {
1255
0
    DeactivateCleanupBlock(Cleanup, CleanupDominator);
1256
0
    CleanupDominator->eraseFromParent();
1257
0
  }
1258
3
1259
3
  // Advance to the next element by adjusting the pointer type as necessary.
1260
3
  llvm::Value *NextPtr =
1261
3
    Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1,
1262
3
                                       "array.next");
1263
3
1264
3
  // Check whether we've gotten to the end of the array and, if so,
1265
3
  // exit the loop.
1266
3
  llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend");
1267
3
  Builder.CreateCondBr(IsEnd, ContBB, LoopBB);
1268
3
  CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock());
1269
3
1270
3
  EmitBlock(ContBB);
1271
3
}
1272
1273
static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
1274
                               QualType ElementType, llvm::Type *ElementTy,
1275
                               Address NewPtr, llvm::Value *NumElements,
1276
5.05k
                               llvm::Value *AllocSizeWithoutCookie) {
1277
5.05k
  ApplyDebugLocation DL(CGF, E);
1278
5.05k
  if (E->isArray())
1279
746
    CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements,
1280
746
                                AllocSizeWithoutCookie);
1281
4.31k
  else if (const Expr *Init = E->getInitializer())
1282
4.28k
    StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr,
1283
4.28k
                            AggValueSlot::DoesNotOverlap);
1284
5.05k
}
1285
1286
/// Emit a call to an operator new or operator delete function, as implicitly
1287
/// created by new-expressions and delete-expressions.
1288
static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
1289
                                const FunctionDecl *CalleeDecl,
1290
                                const FunctionProtoType *CalleeType,
1291
8.60k
                                const CallArgList &Args) {
1292
8.60k
  llvm::CallBase *CallOrInvoke;
1293
8.60k
  llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);
1294
8.60k
  CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(CalleeDecl));
1295
8.60k
  RValue RV =
1296
8.60k
      CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
1297
8.60k
                       Args, CalleeType, /*ChainCall=*/false),
1298
8.60k
                   Callee, ReturnValueSlot(), Args, &CallOrInvoke);
1299
8.60k
1300
8.60k
  /// C++1y [expr.new]p10:
1301
8.60k
  ///   [In a new-expression,] an implementation is allowed to omit a call
1302
8.60k
  ///   to a replaceable global allocation function.
1303
8.60k
  ///
1304
8.60k
  /// We model such elidable calls with the 'builtin' attribute.
1305
8.60k
  llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr);
1306
8.60k
  if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&
1307
8.60k
      
Fn7.57k
&&
Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)7.57k
) {
1308
7.57k
    CallOrInvoke->addAttribute(llvm::AttributeList::FunctionIndex,
1309
7.57k
                               llvm::Attribute::Builtin);
1310
7.57k
  }
1311
8.60k
1312
8.60k
  return RV;
1313
8.60k
}
1314
1315
RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
1316
                                                 const CallExpr *TheCall,
1317
430
                                                 bool IsDelete) {
1318
430
  CallArgList Args;
1319
430
  EmitCallArgs(Args, Type->getParamTypes(), TheCall->arguments());
1320
430
  // Find the allocation or deallocation function that we're calling.
1321
430
  ASTContext &Ctx = getContext();
1322
430
  DeclarationName Name = Ctx.DeclarationNames
1323
430
      .getCXXOperatorName(IsDelete ? 
OO_Delete219
:
OO_New211
);
1324
430
1325
430
  for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name))
1326
434
    if (auto *FD = dyn_cast<FunctionDecl>(Decl))
1327
434
      if (Ctx.hasSameType(FD->getType(), QualType(Type, 0)))
1328
430
        return EmitNewDeleteCall(*this, FD, Type, Args);
1329
430
  
llvm_unreachable0
("predeclared global operator new/delete is missing");
1330
430
}
1331
1332
namespace {
1333
/// The parameters to pass to a usual operator delete.
1334
struct UsualDeleteParams {
1335
  bool DestroyingDelete = false;
1336
  bool Size = false;
1337
  bool Alignment = false;
1338
};
1339
}
1340
1341
4.77k
static UsualDeleteParams getUsualDeleteParams(const FunctionDecl *FD) {
1342
4.77k
  UsualDeleteParams Params;
1343
4.77k
1344
4.77k
  const FunctionProtoType *FPT = FD->getType()->castAs<FunctionProtoType>();
1345
4.77k
  auto AI = FPT->param_type_begin(), AE = FPT->param_type_end();
1346
4.77k
1347
4.77k
  // The first argument is always a void*.
1348
4.77k
  ++AI;
1349
4.77k
1350
4.77k
  // The next parameter may be a std::destroying_delete_t.
1351
4.77k
  if (FD->isDestroyingOperatorDelete()) {
1352
10
    Params.DestroyingDelete = true;
1353
10
    assert(AI != AE);
1354
10
    ++AI;
1355
10
  }
1356
4.77k
1357
4.77k
  // Figure out what other parameters we should be implicitly passing.
1358
4.77k
  if (AI != AE && 
(*AI)->isIntegerType()117
) {
1359
85
    Params.Size = true;
1360
85
    ++AI;
1361
85
  }
1362
4.77k
1363
4.77k
  if (AI != AE && 
(*AI)->isAlignValT()46
) {
1364
46
    Params.Alignment = true;
1365
46
    ++AI;
1366
46
  }
1367
4.77k
1368
4.77k
  assert(AI == AE && "unexpected usual deallocation function parameter");
1369
4.77k
  return Params;
1370
4.77k
}
1371
1372
namespace {
1373
  /// A cleanup to call the given 'operator delete' function upon abnormal
1374
  /// exit from a new expression. Templated on a traits type that deals with
1375
  /// ensuring that the arguments dominate the cleanup if necessary.
1376
  template<typename Traits>
1377
  class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
1378
    /// Type used to hold llvm::Value*s.
1379
    typedef typename Traits::ValueTy ValueTy;
1380
    /// Type used to hold RValues.
1381
    typedef typename Traits::RValueTy RValueTy;
1382
    struct PlacementArg {
1383
      RValueTy ArgValue;
1384
      QualType ArgType;
1385
    };
1386
1387
    unsigned NumPlacementArgs : 31;
1388
    unsigned PassAlignmentToPlacementDelete : 1;
1389
    const FunctionDecl *OperatorDelete;
1390
    ValueTy Ptr;
1391
    ValueTy AllocSize;
1392
    CharUnits AllocAlign;
1393
1394
156
    PlacementArg *getPlacementArgs() {
1395
156
      return reinterpret_cast<PlacementArg *>(this + 1);
1396
156
    }
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::DirectCleanupTraits>::getPlacementArgs()
Line
Count
Source
1394
148
    PlacementArg *getPlacementArgs() {
1395
148
      return reinterpret_cast<PlacementArg *>(this + 1);
1396
148
    }
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::ConditionalCleanupTraits>::getPlacementArgs()
Line
Count
Source
1394
8
    PlacementArg *getPlacementArgs() {
1395
8
      return reinterpret_cast<PlacementArg *>(this + 1);
1396
8
    }
1397
1398
  public:
1399
2.07k
    static size_t getExtraSize(size_t NumPlacementArgs) {
1400
2.07k
      return NumPlacementArgs * sizeof(PlacementArg);
1401
2.07k
    }
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::DirectCleanupTraits>::getExtraSize(unsigned long)
Line
Count
Source
1399
2.06k
    static size_t getExtraSize(size_t NumPlacementArgs) {
1400
2.06k
      return NumPlacementArgs * sizeof(PlacementArg);
1401
2.06k
    }
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::ConditionalCleanupTraits>::getExtraSize(unsigned long)
Line
Count
Source
1399
11
    static size_t getExtraSize(size_t NumPlacementArgs) {
1400
11
      return NumPlacementArgs * sizeof(PlacementArg);
1401
11
    }
1402
1403
    CallDeleteDuringNew(size_t NumPlacementArgs,
1404
                        const FunctionDecl *OperatorDelete, ValueTy Ptr,
1405
                        ValueTy AllocSize, bool PassAlignmentToPlacementDelete,
1406
                        CharUnits AllocAlign)
1407
      : NumPlacementArgs(NumPlacementArgs),
1408
        PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete),
1409
        OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize),
1410
2.07k
        AllocAlign(AllocAlign) {}
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::DirectCleanupTraits>::CallDeleteDuringNew(unsigned long, clang::FunctionDecl const*, llvm::Value*, llvm::Value*, bool, clang::CharUnits)
Line
Count
Source
1410
2.06k
        AllocAlign(AllocAlign) {}
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::ConditionalCleanupTraits>::CallDeleteDuringNew(unsigned long, clang::FunctionDecl const*, clang::CodeGen::DominatingValue<clang::CodeGen::RValue>::saved_type, clang::CodeGen::DominatingValue<clang::CodeGen::RValue>::saved_type, bool, clang::CharUnits)
Line
Count
Source
1410
11
        AllocAlign(AllocAlign) {}
1411
1412
91
    void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
1413
91
      assert(I < NumPlacementArgs && "index out of range");
1414
91
      getPlacementArgs()[I] = {Arg, Type};
1415
91
    }
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::DirectCleanupTraits>::setPlacementArg(unsigned int, clang::CodeGen::RValue, clang::QualType)
Line
Count
Source
1412
87
    void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
1413
87
      assert(I < NumPlacementArgs && "index out of range");
1414
87
      getPlacementArgs()[I] = {Arg, Type};
1415
87
    }
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::ConditionalCleanupTraits>::setPlacementArg(unsigned int, clang::CodeGen::DominatingValue<clang::CodeGen::RValue>::saved_type, clang::QualType)
Line
Count
Source
1412
4
    void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
1413
4
      assert(I < NumPlacementArgs && "index out of range");
1414
4
      getPlacementArgs()[I] = {Arg, Type};
1415
4
    }
1416
1417
1.59k
    void Emit(CodeGenFunction &CGF, Flags flags) override {
1418
1.59k
      const FunctionProtoType *FPT =
1419
1.59k
          OperatorDelete->getType()->getAs<FunctionProtoType>();
1420
1.59k
      CallArgList DeleteArgs;
1421
1.59k
1422
1.59k
      // The first argument is always a void* (or C* for a destroying operator
1423
1.59k
      // delete for class type C).
1424
1.59k
      DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0));
1425
1.59k
1426
1.59k
      // Figure out what other parameters we should be implicitly passing.
1427
1.59k
      UsualDeleteParams Params;
1428
1.59k
      if (NumPlacementArgs) {
1429
61
        // A placement deallocation function is implicitly passed an alignment
1430
61
        // if the placement allocation function was, but is never passed a size.
1431
61
        Params.Alignment = PassAlignmentToPlacementDelete;
1432
1.53k
      } else {
1433
1.53k
        // For a non-placement new-expression, 'operator delete' can take a
1434
1.53k
        // size and/or an alignment if it has the right parameters.
1435
1.53k
        Params = getUsualDeleteParams(OperatorDelete);
1436
1.53k
      }
1437
1.59k
1438
1.59k
      assert(!Params.DestroyingDelete &&
1439
1.59k
             "should not call destroying delete in a new-expression");
1440
1.59k
1441
1.59k
      // The second argument can be a std::size_t (for non-placement delete).
1442
1.59k
      if (Params.Size)
1443
6
        DeleteArgs.add(Traits::get(CGF, AllocSize),
1444
6
                       CGF.getContext().getSizeType());
1445
1.59k
1446
1.59k
      // The next (second or third) argument can be a std::align_val_t, which
1447
1.59k
      // is an enum whose underlying type is std::size_t.
1448
1.59k
      // FIXME: Use the right type as the parameter type. Note that in a call
1449
1.59k
      // to operator delete(size_t, ...), we may not have it available.
1450
1.59k
      if (Params.Alignment)
1451
36
        DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
1452
36
                           CGF.SizeTy, AllocAlign.getQuantity())),
1453
36
                       CGF.getContext().getSizeType());
1454
1.59k
1455
1.59k
      // Pass the rest of the arguments, which must match exactly.
1456
1.65k
      for (unsigned I = 0; I != NumPlacementArgs; 
++I65
) {
1457
65
        auto Arg = getPlacementArgs()[I];
1458
65
        DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
1459
65
      }
1460
1.59k
1461
1.59k
      // Call 'operator delete'.
1462
1.59k
      EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1463
1.59k
    }
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::DirectCleanupTraits>::Emit(clang::CodeGen::CodeGenFunction&, clang::CodeGen::EHScopeStack::Cleanup::Flags)
Line
Count
Source
1417
1.58k
    void Emit(CodeGenFunction &CGF, Flags flags) override {
1418
1.58k
      const FunctionProtoType *FPT =
1419
1.58k
          OperatorDelete->getType()->getAs<FunctionProtoType>();
1420
1.58k
      CallArgList DeleteArgs;
1421
1.58k
1422
1.58k
      // The first argument is always a void* (or C* for a destroying operator
1423
1.58k
      // delete for class type C).
1424
1.58k
      DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0));
1425
1.58k
1426
1.58k
      // Figure out what other parameters we should be implicitly passing.
1427
1.58k
      UsualDeleteParams Params;
1428
1.58k
      if (NumPlacementArgs) {
1429
59
        // A placement deallocation function is implicitly passed an alignment
1430
59
        // if the placement allocation function was, but is never passed a size.
1431
59
        Params.Alignment = PassAlignmentToPlacementDelete;
1432
1.52k
      } else {
1433
1.52k
        // For a non-placement new-expression, 'operator delete' can take a
1434
1.52k
        // size and/or an alignment if it has the right parameters.
1435
1.52k
        Params = getUsualDeleteParams(OperatorDelete);
1436
1.52k
      }
1437
1.58k
1438
1.58k
      assert(!Params.DestroyingDelete &&
1439
1.58k
             "should not call destroying delete in a new-expression");
1440
1.58k
1441
1.58k
      // The second argument can be a std::size_t (for non-placement delete).
1442
1.58k
      if (Params.Size)
1443
6
        DeleteArgs.add(Traits::get(CGF, AllocSize),
1444
6
                       CGF.getContext().getSizeType());
1445
1.58k
1446
1.58k
      // The next (second or third) argument can be a std::align_val_t, which
1447
1.58k
      // is an enum whose underlying type is std::size_t.
1448
1.58k
      // FIXME: Use the right type as the parameter type. Note that in a call
1449
1.58k
      // to operator delete(size_t, ...), we may not have it available.
1450
1.58k
      if (Params.Alignment)
1451
36
        DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
1452
36
                           CGF.SizeTy, AllocAlign.getQuantity())),
1453
36
                       CGF.getContext().getSizeType());
1454
1.58k
1455
1.58k
      // Pass the rest of the arguments, which must match exactly.
1456
1.64k
      for (unsigned I = 0; I != NumPlacementArgs; 
++I61
) {
1457
61
        auto Arg = getPlacementArgs()[I];
1458
61
        DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
1459
61
      }
1460
1.58k
1461
1.58k
      // Call 'operator delete'.
1462
1.58k
      EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1463
1.58k
    }
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::ConditionalCleanupTraits>::Emit(clang::CodeGen::CodeGenFunction&, clang::CodeGen::EHScopeStack::Cleanup::Flags)
Line
Count
Source
1417
6
    void Emit(CodeGenFunction &CGF, Flags flags) override {
1418
6
      const FunctionProtoType *FPT =
1419
6
          OperatorDelete->getType()->getAs<FunctionProtoType>();
1420
6
      CallArgList DeleteArgs;
1421
6
1422
6
      // The first argument is always a void* (or C* for a destroying operator
1423
6
      // delete for class type C).
1424
6
      DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0));
1425
6
1426
6
      // Figure out what other parameters we should be implicitly passing.
1427
6
      UsualDeleteParams Params;
1428
6
      if (NumPlacementArgs) {
1429
2
        // A placement deallocation function is implicitly passed an alignment
1430
2
        // if the placement allocation function was, but is never passed a size.
1431
2
        Params.Alignment = PassAlignmentToPlacementDelete;
1432
4
      } else {
1433
4
        // For a non-placement new-expression, 'operator delete' can take a
1434
4
        // size and/or an alignment if it has the right parameters.
1435
4
        Params = getUsualDeleteParams(OperatorDelete);
1436
4
      }
1437
6
1438
6
      assert(!Params.DestroyingDelete &&
1439
6
             "should not call destroying delete in a new-expression");
1440
6
1441
6
      // The second argument can be a std::size_t (for non-placement delete).
1442
6
      if (Params.Size)
1443
0
        DeleteArgs.add(Traits::get(CGF, AllocSize),
1444
0
                       CGF.getContext().getSizeType());
1445
6
1446
6
      // The next (second or third) argument can be a std::align_val_t, which
1447
6
      // is an enum whose underlying type is std::size_t.
1448
6
      // FIXME: Use the right type as the parameter type. Note that in a call
1449
6
      // to operator delete(size_t, ...), we may not have it available.
1450
6
      if (Params.Alignment)
1451
0
        DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
1452
0
                           CGF.SizeTy, AllocAlign.getQuantity())),
1453
0
                       CGF.getContext().getSizeType());
1454
6
1455
6
      // Pass the rest of the arguments, which must match exactly.
1456
10
      for (unsigned I = 0; I != NumPlacementArgs; 
++I4
) {
1457
4
        auto Arg = getPlacementArgs()[I];
1458
4
        DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
1459
4
      }
1460
6
1461
6
      // Call 'operator delete'.
1462
6
      EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
1463
6
    }
1464
  };
1465
}
1466
1467
/// Enter a cleanup to call 'operator delete' if the initializer in a
1468
/// new-expression throws.
1469
static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
1470
                                  const CXXNewExpr *E,
1471
                                  Address NewPtr,
1472
                                  llvm::Value *AllocSize,
1473
                                  CharUnits AllocAlign,
1474
2.07k
                                  const CallArgList &NewArgs) {
1475
2.07k
  unsigned NumNonPlacementArgs = E->passAlignment() ? 
240
:
12.03k
;
1476
2.07k
1477
2.07k
  // If we're not inside a conditional branch, then the cleanup will
1478
2.07k
  // dominate and we can do the easier (and more efficient) thing.
1479
2.07k
  if (!CGF.isInConditionalBranch()) {
1480
2.06k
    struct DirectCleanupTraits {
1481
2.06k
      typedef llvm::Value *ValueTy;
1482
2.06k
      typedef RValue RValueTy;
1483
2.06k
      static RValue get(CodeGenFunction &, ValueTy V) 
{ return RValue::get(V); }1.59k
1484
2.06k
      static RValue get(CodeGenFunction &, RValueTy V) 
{ return V; }61
1485
2.06k
    };
1486
2.06k
1487
2.06k
    typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
1488
2.06k
1489
2.06k
    DirectCleanup *Cleanup = CGF.EHStack
1490
2.06k
      .pushCleanupWithExtra<DirectCleanup>(EHCleanup,
1491
2.06k
                                           E->getNumPlacementArgs(),
1492
2.06k
                                           E->getOperatorDelete(),
1493
2.06k
                                           NewPtr.getPointer(),
1494
2.06k
                                           AllocSize,
1495
2.06k
                                           E->passAlignment(),
1496
2.06k
                                           AllocAlign);
1497
2.15k
    for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; 
++I87
) {
1498
87
      auto &Arg = NewArgs[I + NumNonPlacementArgs];
1499
87
      Cleanup->setPlacementArg(I, Arg.getRValue(CGF), Arg.Ty);
1500
87
    }
1501
2.06k
1502
2.06k
    return;
1503
2.06k
  }
1504
11
1505
11
  // Otherwise, we need to save all this stuff.
1506
11
  DominatingValue<RValue>::saved_type SavedNewPtr =
1507
11
    DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer()));
1508
11
  DominatingValue<RValue>::saved_type SavedAllocSize =
1509
11
    DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
1510
11
1511
11
  struct ConditionalCleanupTraits {
1512
11
    typedef DominatingValue<RValue>::saved_type ValueTy;
1513
11
    typedef DominatingValue<RValue>::saved_type RValueTy;
1514
11
    static RValue get(CodeGenFunction &CGF, ValueTy V) {
1515
10
      return V.restore(CGF);
1516
10
    }
1517
11
  };
1518
11
  typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup;
1519
11
1520
11
  ConditionalCleanup *Cleanup = CGF.EHStack
1521
11
    .pushCleanupWithExtra<ConditionalCleanup>(EHCleanup,
1522
11
                                              E->getNumPlacementArgs(),
1523
11
                                              E->getOperatorDelete(),
1524
11
                                              SavedNewPtr,
1525
11
                                              SavedAllocSize,
1526
11
                                              E->passAlignment(),
1527
11
                                              AllocAlign);
1528
15
  for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; 
++I4
) {
1529
4
    auto &Arg = NewArgs[I + NumNonPlacementArgs];
1530
4
    Cleanup->setPlacementArg(
1531
4
        I, DominatingValue<RValue>::save(CGF, Arg.getRValue(CGF)), Arg.Ty);
1532
4
  }
1533
11
1534
11
  CGF.initFullExprCleanup();
1535
11
}
1536
1537
5.05k
llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
1538
5.05k
  // The element type being allocated.
1539
5.05k
  QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
1540
5.05k
1541
5.05k
  // 1. Build a call to the allocation function.
1542
5.05k
  FunctionDecl *allocator = E->getOperatorNew();
1543
5.05k
1544
5.05k
  // If there is a brace-initializer, cannot allocate fewer elements than inits.
1545
5.05k
  unsigned minElements = 0;
1546
5.05k
  if (E->isArray() && 
E->hasInitializer()746
) {
1547
187
    const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer());
1548
187
    if (ILE && 
ILE->isStringLiteralInit()25
)
1549
8
      minElements =
1550
8
          cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
1551
8
              ->getSize().getZExtValue();
1552
179
    else if (ILE)
1553
17
      minElements = ILE->getNumInits();
1554
187
  }
1555
5.05k
1556
5.05k
  llvm::Value *numElements = nullptr;
1557
5.05k
  llvm::Value *allocSizeWithoutCookie = nullptr;
1558
5.05k
  llvm::Value *allocSize =
1559
5.05k
    EmitCXXNewAllocSize(*this, E, minElements, numElements,
1560
5.05k
                        allocSizeWithoutCookie);
1561
5.05k
  CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
1562
5.05k
1563
5.05k
  // Emit the allocation call.  If the allocator is a global placement
1564
5.05k
  // operator, just "inline" it directly.
1565
5.05k
  Address allocation = Address::invalid();
1566
5.05k
  CallArgList allocatorArgs;
1567
5.05k
  if (allocator->isReservedGlobalPlacementOperator()) {
1568
1.71k
    assert(E->getNumPlacementArgs() == 1);
1569
1.71k
    const Expr *arg = *E->placement_arguments().begin();
1570
1.71k
1571
1.71k
    LValueBaseInfo BaseInfo;
1572
1.71k
    allocation = EmitPointerWithAlignment(arg, &BaseInfo);
1573
1.71k
1574
1.71k
    // The pointer expression will, in many cases, be an opaque void*.
1575
1.71k
    // In these cases, discard the computed alignment and use the
1576
1.71k
    // formal alignment of the allocated type.
1577
1.71k
    if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl)
1578
1.60k
      allocation = Address(allocation.getPointer(), allocAlign);
1579
1.71k
1580
1.71k
    // Set up allocatorArgs for the call to operator delete if it's not
1581
1.71k
    // the reserved global operator.
1582
1.71k
    if (E->getOperatorDelete() &&
1583
1.71k
        
!E->getOperatorDelete()->isReservedGlobalPlacementOperator()787
) {
1584
2
      allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType());
1585
2
      allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType());
1586
2
    }
1587
1.71k
1588
3.34k
  } else {
1589
3.34k
    const FunctionProtoType *allocatorType =
1590
3.34k
      allocator->getType()->castAs<FunctionProtoType>();
1591
3.34k
    unsigned ParamsToSkip = 0;
1592
3.34k
1593
3.34k
    // The allocation size is the first argument.
1594
3.34k
    QualType sizeType = getContext().getSizeType();
1595
3.34k
    allocatorArgs.add(RValue::get(allocSize), sizeType);
1596
3.34k
    ++ParamsToSkip;
1597
3.34k
1598
3.34k
    if (allocSize != allocSizeWithoutCookie) {
1599
70
      CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
1600
70
      allocAlign = std::max(allocAlign, cookieAlign);
1601
70
    }
1602
3.34k
1603
3.34k
    // The allocation alignment may be passed as the second argument.
1604
3.34k
    if (E->passAlignment()) {
1605
44
      QualType AlignValT = sizeType;
1606
44
      if (allocatorType->getNumParams() > 1) {
1607
36
        AlignValT = allocatorType->getParamType(1);
1608
36
        assert(getContext().hasSameUnqualifiedType(
1609
36
                   AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),
1610
36
                   sizeType) &&
1611
36
               "wrong type for alignment parameter");
1612
36
        ++ParamsToSkip;
1613
36
      } else {
1614
8
        // Corner case, passing alignment to 'operator new(size_t, ...)'.
1615
8
        assert(allocator->isVariadic() && "can't pass alignment to allocator");
1616
8
      }
1617
44
      allocatorArgs.add(
1618
44
          RValue::get(llvm::ConstantInt::get(SizeTy, allocAlign.getQuantity())),
1619
44
          AlignValT);
1620
44
    }
1621
3.34k
1622
3.34k
    // FIXME: Why do we not pass a CalleeDecl here?
1623
3.34k
    EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
1624
3.34k
                 /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip);
1625
3.34k
1626
3.34k
    RValue RV =
1627
3.34k
      EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
1628
3.34k
1629
3.34k
    // If this was a call to a global replaceable allocation function that does
1630
3.34k
    // not take an alignment argument, the allocator is known to produce
1631
3.34k
    // storage that's suitably aligned for any object that fits, up to a known
1632
3.34k
    // threshold. Otherwise assume it's suitably aligned for the allocated type.
1633
3.34k
    CharUnits allocationAlign = allocAlign;
1634
3.34k
    if (!E->passAlignment() &&
1635
3.34k
        
allocator->isReplaceableGlobalAllocationFunction()3.29k
) {
1636
2.61k
      unsigned AllocatorAlign = llvm::PowerOf2Floor(std::min<uint64_t>(
1637
2.61k
          Target.getNewAlign(), getContext().getTypeSize(allocType)));
1638
2.61k
      allocationAlign = std::max(
1639
2.61k
          allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));
1640
2.61k
    }
1641
3.34k
1642
3.34k
    allocation = Address(RV.getScalarVal(), allocationAlign);
1643
3.34k
  }
1644
5.05k
1645
5.05k
  // Emit a null check on the allocation result if the allocation
1646
5.05k
  // function is allowed to return null (because it has a non-throwing
1647
5.05k
  // exception spec or is the reserved placement new) and we have an
1648
5.05k
  // interesting initializer will be running sanitizers on the initialization.
1649
5.05k
  bool nullCheck = E->shouldNullCheckAllocation() &&
1650
5.05k
                   
(26
!allocType.isPODType(getContext())26
||
E->hasInitializer()10
||
1651
26
                    
sanitizePerformTypeCheck()10
);
1652
5.05k
1653
5.05k
  llvm::BasicBlock *nullCheckBB = nullptr;
1654
5.05k
  llvm::BasicBlock *contBB = nullptr;
1655
5.05k
1656
5.05k
  // The null-check means that the initializer is conditionally
1657
5.05k
  // evaluated.
1658
5.05k
  ConditionalEvaluation conditional(*this);
1659
5.05k
1660
5.05k
  if (nullCheck) {
1661
22
    conditional.begin(*this);
1662
22
1663
22
    nullCheckBB = Builder.GetInsertBlock();
1664
22
    llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
1665
22
    contBB = createBasicBlock("new.cont");
1666
22
1667
22
    llvm::Value *isNull =
1668
22
      Builder.CreateIsNull(allocation.getPointer(), "new.isnull");
1669
22
    Builder.CreateCondBr(isNull, contBB, notNullBB);
1670
22
    EmitBlock(notNullBB);
1671
22
  }
1672
5.05k
1673
5.05k
  // If there's an operator delete, enter a cleanup to call it if an
1674
5.05k
  // exception is thrown.
1675
5.05k
  EHScopeStack::stable_iterator operatorDeleteCleanup;
1676
5.05k
  llvm::Instruction *cleanupDominator = nullptr;
1677
5.05k
  if (E->getOperatorDelete() &&
1678
5.05k
      
!E->getOperatorDelete()->isReservedGlobalPlacementOperator()2.86k
) {
1679
2.07k
    EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign,
1680
2.07k
                          allocatorArgs);
1681
2.07k
    operatorDeleteCleanup = EHStack.stable_begin();
1682
2.07k
    cleanupDominator = Builder.CreateUnreachable();
1683
2.07k
  }
1684
5.05k
1685
5.05k
  assert((allocSize == allocSizeWithoutCookie) ==
1686
5.05k
         CalculateCookiePadding(*this, E).isZero());
1687
5.05k
  if (allocSize != allocSizeWithoutCookie) {
1688
70
    assert(E->isArray());
1689
70
    allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
1690
70
                                                       numElements,
1691
70
                                                       E, allocType);
1692
70
  }
1693
5.05k
1694
5.05k
  llvm::Type *elementTy = ConvertTypeForMem(allocType);
1695
5.05k
  Address result = Builder.CreateElementBitCast(allocation, elementTy);
1696
5.05k
1697
5.05k
  // Passing pointer through launder.invariant.group to avoid propagation of
1698
5.05k
  // vptrs information which may be included in previous type.
1699
5.05k
  // To not break LTO with different optimizations levels, we do it regardless
1700
5.05k
  // of optimization level.
1701
5.05k
  if (CGM.getCodeGenOpts().StrictVTablePointers &&
1702
5.05k
      
allocator->isReservedGlobalPlacementOperator()32
)
1703
5
    result = Address(Builder.CreateLaunderInvariantGroup(result.getPointer()),
1704
5
                     result.getAlignment());
1705
5.05k
1706
5.05k
  // Emit sanitizer checks for pointer value now, so that in the case of an
1707
5.05k
  // array it was checked only once and not at each constructor call. We may
1708
5.05k
  // have already checked that the pointer is non-null.
1709
5.05k
  // FIXME: If we have an array cookie and a potentially-throwing allocator,
1710
5.05k
  // we'll null check the wrong pointer here.
1711
5.05k
  SanitizerSet SkippedChecks;
1712
5.05k
  SkippedChecks.set(SanitizerKind::Null, nullCheck);
1713
5.05k
  EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall,
1714
5.05k
                E->getAllocatedTypeSourceInfo()->getTypeLoc().getBeginLoc(),
1715
5.05k
                result.getPointer(), allocType, result.getAlignment(),
1716
5.05k
                SkippedChecks, numElements);
1717
5.05k
1718
5.05k
  EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
1719
5.05k
                     allocSizeWithoutCookie);
1720
5.05k
  if (E->isArray()) {
1721
746
    // NewPtr is a pointer to the base element type.  If we're
1722
746
    // allocating an array of arrays, we'll need to cast back to the
1723
746
    // array pointer type.
1724
746
    llvm::Type *resultType = ConvertTypeForMem(E->getType());
1725
746
    if (result.getType() != resultType)
1726
20
      result = Builder.CreateBitCast(result, resultType);
1727
746
  }
1728
5.05k
1729
5.05k
  // Deactivate the 'operator delete' cleanup if we finished
1730
5.05k
  // initialization.
1731
5.05k
  if (operatorDeleteCleanup.isValid()) {
1732
2.07k
    DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
1733
2.07k
    cleanupDominator->eraseFromParent();
1734
2.07k
  }
1735
5.05k
1736
5.05k
  llvm::Value *resultPtr = result.getPointer();
1737
5.05k
  if (nullCheck) {
1738
22
    conditional.end(*this);
1739
22
1740
22
    llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
1741
22
    EmitBlock(contBB);
1742
22
1743
22
    llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2);
1744
22
    PHI->addIncoming(resultPtr, notNullBB);
1745
22
    PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()),
1746
22
                     nullCheckBB);
1747
22
1748
22
    resultPtr = PHI;
1749
22
  }
1750
5.05k
1751
5.05k
  return resultPtr;
1752
5.05k
}
1753
1754
void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
1755
                                     llvm::Value *Ptr, QualType DeleteTy,
1756
                                     llvm::Value *NumElements,
1757
3.24k
                                     CharUnits CookieSize) {
1758
3.24k
  assert((!NumElements && CookieSize.isZero()) ||
1759
3.24k
         DeleteFD->getOverloadedOperator() == OO_Array_Delete);
1760
3.24k
1761
3.24k
  const FunctionProtoType *DeleteFTy =
1762
3.24k
    DeleteFD->getType()->getAs<FunctionProtoType>();
1763
3.24k
1764
3.24k
  CallArgList DeleteArgs;
1765
3.24k
1766
3.24k
  auto Params = getUsualDeleteParams(DeleteFD);
1767
3.24k
  auto ParamTypeIt = DeleteFTy->param_type_begin();
1768
3.24k
1769
3.24k
  // Pass the pointer itself.
1770
3.24k
  QualType ArgTy = *ParamTypeIt++;
1771
3.24k
  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
1772
3.24k
  DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
1773
3.24k
1774
3.24k
  // Pass the std::destroying_delete tag if present.
1775
3.24k
  if (Params.DestroyingDelete) {
1776
10
    QualType DDTag = *ParamTypeIt++;
1777
10
    // Just pass an 'undef'. We expect the tag type to be an empty struct.
1778
10
    auto *V = llvm::UndefValue::get(getTypes().ConvertType(DDTag));
1779
10
    DeleteArgs.add(RValue::get(V), DDTag);
1780
10
  }
1781
3.24k
1782
3.24k
  // Pass the size if the delete function has a size_t parameter.
1783
3.24k
  if (Params.Size) {
1784
79
    QualType SizeType = *ParamTypeIt++;
1785
79
    CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
1786
79
    llvm::Value *Size = llvm::ConstantInt::get(ConvertType(SizeType),
1787
79
                                               DeleteTypeSize.getQuantity());
1788
79
1789
79
    // For array new, multiply by the number of elements.
1790
79
    if (NumElements)
1791
23
      Size = Builder.CreateMul(Size, NumElements);
1792
79
1793
79
    // If there is a cookie, add the cookie size.
1794
79
    if (!CookieSize.isZero())
1795
23
      Size = Builder.CreateAdd(
1796
23
          Size, llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()));
1797
79
1798
79
    DeleteArgs.add(RValue::get(Size), SizeType);
1799
79
  }
1800
3.24k
1801
3.24k
  // Pass the alignment if the delete function has an align_val_t parameter.
1802
3.24k
  if (Params.Alignment) {
1803
26
    QualType AlignValType = *ParamTypeIt++;
1804
26
    CharUnits DeleteTypeAlign = getContext().toCharUnitsFromBits(
1805
26
        getContext().getTypeAlignIfKnown(DeleteTy));
1806
26
    llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType),
1807
26
                                                DeleteTypeAlign.getQuantity());
1808
26
    DeleteArgs.add(RValue::get(Align), AlignValType);
1809
26
  }
1810
3.24k
1811
3.24k
  assert(ParamTypeIt == DeleteFTy->param_type_end() &&
1812
3.24k
         "unknown parameter to usual delete function");
1813
3.24k
1814
3.24k
  // Emit the call to delete.
1815
3.24k
  EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
1816
3.24k
}
1817
1818
namespace {
1819
  /// Calls the given 'operator delete' on a single object.
1820
  struct CallObjectDelete final : EHScopeStack::Cleanup {
1821
    llvm::Value *Ptr;
1822
    const FunctionDecl *OperatorDelete;
1823
    QualType ElementType;
1824
1825
    CallObjectDelete(llvm::Value *Ptr,
1826
                     const FunctionDecl *OperatorDelete,
1827
                     QualType ElementType)
1828
484
      : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
1829
1830
488
    void Emit(CodeGenFunction &CGF, Flags flags) override {
1831
488
      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
1832
488
    }
1833
  };
1834
}
1835
1836
void
1837
CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
1838
                                             llvm::Value *CompletePtr,
1839
5
                                             QualType ElementType) {
1840
5
  EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
1841
5
                                        OperatorDelete, ElementType);
1842
5
}
1843
1844
/// Emit the code for deleting a single object with a destroying operator
1845
/// delete. If the element type has a non-virtual destructor, Ptr has already
1846
/// been converted to the type of the parameter of 'operator delete'. Otherwise
1847
/// Ptr points to an object of the static type.
1848
static void EmitDestroyingObjectDelete(CodeGenFunction &CGF,
1849
                                       const CXXDeleteExpr *DE, Address Ptr,
1850
10
                                       QualType ElementType) {
1851
10
  auto *Dtor = ElementType->getAsCXXRecordDecl()->getDestructor();
1852
10
  if (Dtor && Dtor->isVirtual())
1853
4
    CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1854
4
                                                Dtor);
1855
6
  else
1856
6
    CGF.EmitDeleteCall(DE->getOperatorDelete(), Ptr.getPointer(), ElementType);
1857
10
}
1858
1859
/// Emit the code for deleting a single object.
1860
static void EmitObjectDelete(CodeGenFunction &CGF,
1861
                             const CXXDeleteExpr *DE,
1862
                             Address Ptr,
1863
746
                             QualType ElementType) {
1864
746
  // C++11 [expr.delete]p3:
1865
746
  //   If the static type of the object to be deleted is different from its
1866
746
  //   dynamic type, the static type shall be a base class of the dynamic type
1867
746
  //   of the object to be deleted and the static type shall have a virtual
1868
746
  //   destructor or the behavior is undefined.
1869
746
  CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall,
1870
746
                    DE->getExprLoc(), Ptr.getPointer(),
1871
746
                    ElementType);
1872
746
1873
746
  const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
1874
746
  assert(!OperatorDelete->isDestroyingOperatorDelete());
1875
746
1876
746
  // Find the destructor for the type, if applicable.  If the
1877
746
  // destructor is virtual, we'll just emit the vcall and return.
1878
746
  const CXXDestructorDecl *Dtor = nullptr;
1879
746
  if (const RecordType *RT = ElementType->getAs<RecordType>()) {
1880
696
    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1881
696
    if (RD->hasDefinition() && 
!RD->hasTrivialDestructor()695
) {
1882
571
      Dtor = RD->getDestructor();
1883
571
1884
571
      if (Dtor->isVirtual()) {
1885
267
        CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
1886
267
                                                    Dtor);
1887
267
        return;
1888
267
      }
1889
479
    }
1890
696
  }
1891
479
1892
479
  // Make sure that we call delete even if the dtor throws.
1893
479
  // This doesn't have to a conditional cleanup because we're going
1894
479
  // to pop it off in a second.
1895
479
  CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
1896
479
                                            Ptr.getPointer(),
1897
479
                                            OperatorDelete, ElementType);
1898
479
1899
479
  if (Dtor)
1900
304
    CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
1901
304
                              /*ForVirtualBase=*/false,
1902
304
                              /*Delegating=*/false,
1903
304
                              Ptr, ElementType);
1904
175
  else if (auto Lifetime = ElementType.getObjCLifetime()) {
1905
4
    switch (Lifetime) {
1906
4
    case Qualifiers::OCL_None:
1907
0
    case Qualifiers::OCL_ExplicitNone:
1908
0
    case Qualifiers::OCL_Autoreleasing:
1909
0
      break;
1910
0
1911
2
    case Qualifiers::OCL_Strong:
1912
2
      CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime);
1913
2
      break;
1914
0
1915
2
    case Qualifiers::OCL_Weak:
1916
2
      CGF.EmitARCDestroyWeak(Ptr);
1917
2
      break;
1918
479
    }
1919
479
  }
1920
479
1921
479
  CGF.PopCleanupBlock();
1922
479
}
1923
1924
namespace {
1925
  /// Calls the given 'operator delete' on an array of objects.
1926
  struct CallArrayDelete final : EHScopeStack::Cleanup {
1927
    llvm::Value *Ptr;
1928
    const FunctionDecl *OperatorDelete;
1929
    llvm::Value *NumElements;
1930
    QualType ElementType;
1931
    CharUnits CookieSize;
1932
1933
    CallArrayDelete(llvm::Value *Ptr,
1934
                    const FunctionDecl *OperatorDelete,
1935
                    llvm::Value *NumElements,
1936
                    QualType ElementType,
1937
                    CharUnits CookieSize)
1938
      : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
1939
731
        ElementType(ElementType), CookieSize(CookieSize) {}
1940
1941
734
    void Emit(CodeGenFunction &CGF, Flags flags) override {
1942
734
      CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements,
1943
734
                         CookieSize);
1944
734
    }
1945
  };
1946
}
1947
1948
/// Emit the code for deleting an array of objects.
1949
static void EmitArrayDelete(CodeGenFunction &CGF,
1950
                            const CXXDeleteExpr *E,
1951
                            Address deletedPtr,
1952
731
                            QualType elementType) {
1953
731
  llvm::Value *numElements = nullptr;
1954
731
  llvm::Value *allocatedPtr = nullptr;
1955
731
  CharUnits cookieSize;
1956
731
  CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
1957
731
                                      numElements, allocatedPtr, cookieSize);
1958
731
1959
731
  assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
1960
731
1961
731
  // Make sure that we call delete even if one of the dtors throws.
1962
731
  const FunctionDecl *operatorDelete = E->getOperatorDelete();
1963
731
  CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
1964
731
                                           allocatedPtr, operatorDelete,
1965
731
                                           numElements, elementType,
1966
731
                                           cookieSize);
1967
731
1968
731
  // Destroy the elements.
1969
731
  if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
1970
52
    assert(numElements && "no element count for a type with a destructor!");
1971
52
1972
52
    CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
1973
52
    CharUnits elementAlign =
1974
52
      deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
1975
52
1976
52
    llvm::Value *arrayBegin = deletedPtr.getPointer();
1977
52
    llvm::Value *arrayEnd =
1978
52
      CGF.Builder.CreateInBoundsGEP(arrayBegin, numElements, "delete.end");
1979
52
1980
52
    // Note that it is legal to allocate a zero-length array, and we
1981
52
    // can never fold the check away because the length should always
1982
52
    // come from a cookie.
1983
52
    CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign,
1984
52
                         CGF.getDestroyer(dtorKind),
1985
52
                         /*checkZeroLength*/ true,
1986
52
                         CGF.needsEHCleanup(dtorKind));
1987
52
  }
1988
731
1989
731
  // Pop the cleanup block.
1990
731
  CGF.PopCleanupBlock();
1991
731
}
1992
1993
1.48k
void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
1994
1.48k
  const Expr *Arg = E->getArgument();
1995
1.48k
  Address Ptr = EmitPointerWithAlignment(Arg);
1996
1.48k
1997
1.48k
  // Null check the pointer.
1998
1.48k
  llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
1999
1.48k
  llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
2000
1.48k
2001
1.48k
  llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull");
2002
1.48k
2003
1.48k
  Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
2004
1.48k
  EmitBlock(DeleteNotNull);
2005
1.48k
2006
1.48k
  QualType DeleteTy = E->getDestroyedType();
2007
1.48k
2008
1.48k
  // A destroying operator delete overrides the entire operation of the
2009
1.48k
  // delete expression.
2010
1.48k
  if (E->getOperatorDelete()->isDestroyingOperatorDelete()) {
2011
10
    EmitDestroyingObjectDelete(*this, E, Ptr, DeleteTy);
2012
10
    EmitBlock(DeleteEnd);
2013
10
    return;
2014
10
  }
2015
1.47k
2016
1.47k
  // We might be deleting a pointer to array.  If so, GEP down to the
2017
1.47k
  // first non-array element.
2018
1.47k
  // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
2019
1.47k
  if (DeleteTy->isConstantArrayType()) {
2020
7
    llvm::Value *Zero = Builder.getInt32(0);
2021
7
    SmallVector<llvm::Value*,8> GEP;
2022
7
2023
7
    GEP.push_back(Zero); // point at the outermost array
2024
7
2025
7
    // For each layer of array type we're pointing at:
2026
15
    while (const ConstantArrayType *Arr
2027
8
             = getContext().getAsConstantArrayType(DeleteTy)) {
2028
8
      // 1. Unpeel the array type.
2029
8
      DeleteTy = Arr->getElementType();
2030
8
2031
8
      // 2. GEP to the first element of the array.
2032
8
      GEP.push_back(Zero);
2033
8
    }
2034
7
2035
7
    Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getPointer(), GEP, "del.first"),
2036
7
                  Ptr.getAlignment());
2037
7
  }
2038
1.47k
2039
1.47k
  assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
2040
1.47k
2041
1.47k
  if (E->isArrayForm()) {
2042
731
    EmitArrayDelete(*this, E, Ptr, DeleteTy);
2043
746
  } else {
2044
746
    EmitObjectDelete(*this, E, Ptr, DeleteTy);
2045
746
  }
2046
1.47k
2047
1.47k
  EmitBlock(DeleteEnd);
2048
1.47k
}
2049
2050
49
static bool isGLValueFromPointerDeref(const Expr *E) {
2051
49
  E = E->IgnoreParens();
2052
49
2053
49
  if (const auto *CE = dyn_cast<CastExpr>(E)) {
2054
6
    if (!CE->getSubExpr()->isGLValue())
2055
0
      return false;
2056
6
    return isGLValueFromPointerDeref(CE->getSubExpr());
2057
6
  }
2058
43
2059
43
  if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E))
2060
4
    return isGLValueFromPointerDeref(OVE->getSourceExpr());
2061
39
2062
39
  if (const auto *BO = dyn_cast<BinaryOperator>(E))
2063
1
    if (BO->getOpcode() == BO_Comma)
2064
1
      return isGLValueFromPointerDeref(BO->getRHS());
2065
38
2066
38
  if (const auto *ACO = dyn_cast<AbstractConditionalOperator>(E))
2067
7
    return isGLValueFromPointerDeref(ACO->getTrueExpr()) ||
2068
7
           
isGLValueFromPointerDeref(ACO->getFalseExpr())3
;
2069
31
2070
31
  // C++11 [expr.sub]p1:
2071
31
  //   The expression E1[E2] is identical (by definition) to *((E1)+(E2))
2072
31
  if (isa<ArraySubscriptExpr>(E))
2073
2
    return true;
2074
29
2075
29
  if (const auto *UO = dyn_cast<UnaryOperator>(E))
2076
17
    if (UO->getOpcode() == UO_Deref)
2077
17
      return true;
2078
12
2079
12
  return false;
2080
12
}
2081
2082
static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
2083
28
                                         llvm::Type *StdTypeInfoPtrTy) {
2084
28
  // Get the vtable pointer.
2085
28
  Address ThisPtr = CGF.EmitLValue(E).getAddress();
2086
28
2087
28
  QualType SrcRecordTy = E->getType();
2088
28
2089
28
  // C++ [class.cdtor]p4:
2090
28
  //   If the operand of typeid refers to the object under construction or
2091
28
  //   destruction and the static type of the operand is neither the constructor
2092
28
  //   or destructor’s class nor one of its bases, the behavior is undefined.
2093
28
  CGF.EmitTypeCheck(CodeGenFunction::TCK_DynamicOperation, E->getExprLoc(),
2094
28
                    ThisPtr.getPointer(), SrcRecordTy);
2095
28
2096
28
  // C++ [expr.typeid]p2:
2097
28
  //   If the glvalue expression is obtained by applying the unary * operator to
2098
28
  //   a pointer and the pointer is a null pointer value, the typeid expression
2099
28
  //   throws the std::bad_typeid exception.
2100
28
  //
2101
28
  // However, this paragraph's intent is not clear.  We choose a very generous
2102
28
  // interpretation which implores us to consider comma operators, conditional
2103
28
  // operators, parentheses and other such constructs.
2104
28
  if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked(
2105
28
          isGLValueFromPointerDeref(E), SrcRecordTy)) {
2106
18
    llvm::BasicBlock *BadTypeidBlock =
2107
18
        CGF.createBasicBlock("typeid.bad_typeid");
2108
18
    llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end");
2109
18
2110
18
    llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer());
2111
18
    CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
2112
18
2113
18
    CGF.EmitBlock(BadTypeidBlock);
2114
18
    CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF);
2115
18
    CGF.EmitBlock(EndBlock);
2116
18
  }
2117
28
2118
28
  return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr,
2119
28
                                        StdTypeInfoPtrTy);
2120
28
}
2121
2122
235
llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
2123
235
  llvm::Type *StdTypeInfoPtrTy =
2124
235
    ConvertType(E->getType())->getPointerTo();
2125
235
2126
235
  if (E->isTypeOperand()) {
2127
192
    llvm::Constant *TypeInfo =
2128
192
        CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
2129
192
    return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
2130
192
  }
2131
43
2132
43
  // C++ [expr.typeid]p2:
2133
43
  //   When typeid is applied to a glvalue expression whose type is a
2134
43
  //   polymorphic class type, the result refers to a std::type_info object
2135
43
  //   representing the type of the most derived object (that is, the dynamic
2136
43
  //   type) to which the glvalue refers.
2137
43
  if (E->isPotentiallyEvaluated())
2138
28
    return EmitTypeidFromVTable(*this, E->getExprOperand(),
2139
28
                                StdTypeInfoPtrTy);
2140
15
2141
15
  QualType OperandTy = E->getExprOperand()->getType();
2142
15
  return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
2143
15
                               StdTypeInfoPtrTy);
2144
15
}
2145
2146
static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
2147
2
                                          QualType DestTy) {
2148
2
  llvm::Type *DestLTy = CGF.ConvertType(DestTy);
2149
2
  if (DestTy->isPointerType())
2150
1
    return llvm::Constant::getNullValue(DestLTy);
2151
1
2152
1
  /// C++ [expr.dynamic.cast]p9:
2153
1
  ///   A failed cast to reference type throws std::bad_cast
2154
1
  if (!CGF.CGM.getCXXABI().EmitBadCastCall(CGF))
2155
0
    return nullptr;
2156
1
2157
1
  CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
2158
1
  return llvm::UndefValue::get(DestLTy);
2159
1
}
2160
2161
llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr,
2162
1.12k
                                              const CXXDynamicCastExpr *DCE) {
2163
1.12k
  CGM.EmitExplicitCastExprType(DCE, this);
2164
1.12k
  QualType DestTy = DCE->getTypeAsWritten();
2165
1.12k
2166
1.12k
  QualType SrcTy = DCE->getSubExpr()->getType();
2167
1.12k
2168
1.12k
  // C++ [expr.dynamic.cast]p7:
2169
1.12k
  //   If T is "pointer to cv void," then the result is a pointer to the most
2170
1.12k
  //   derived object pointed to by v.
2171
1.12k
  const PointerType *DestPTy = DestTy->getAs<PointerType>();
2172
1.12k
2173
1.12k
  bool isDynamicCastToVoid;
2174
1.12k
  QualType SrcRecordTy;
2175
1.12k
  QualType DestRecordTy;
2176
1.12k
  if (DestPTy) {
2177
1.10k
    isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType();
2178
1.10k
    SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
2179
1.10k
    DestRecordTy = DestPTy->getPointeeType();
2180
1.10k
  } else {
2181
12
    isDynamicCastToVoid = false;
2182
12
    SrcRecordTy = SrcTy;
2183
12
    DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
2184
12
  }
2185
1.12k
2186
1.12k
  // C++ [class.cdtor]p5:
2187
1.12k
  //   If the operand of the dynamic_cast refers to the object under
2188
1.12k
  //   construction or destruction and the static type of the operand is not a
2189
1.12k
  //   pointer to or object of the constructor or destructor’s own class or one
2190
1.12k
  //   of its bases, the dynamic_cast results in undefined behavior.
2191
1.12k
  EmitTypeCheck(TCK_DynamicOperation, DCE->getExprLoc(), ThisAddr.getPointer(),
2192
1.12k
                SrcRecordTy);
2193
1.12k
2194
1.12k
  if (DCE->isAlwaysNull())
2195
2
    if (llvm::Value *T = EmitDynamicCastToNull(*this, DestTy))
2196
2
      return T;
2197
1.11k
2198
1.11k
  assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
2199
1.11k
2200
1.11k
  // C++ [expr.dynamic.cast]p4:
2201
1.11k
  //   If the value of v is a null pointer value in the pointer case, the result
2202
1.11k
  //   is the null pointer value of type T.
2203
1.11k
  bool ShouldNullCheckSrcValue =
2204
1.11k
      CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(),
2205
1.11k
                                                         SrcRecordTy);
2206
1.11k
2207
1.11k
  llvm::BasicBlock *CastNull = nullptr;
2208
1.11k
  llvm::BasicBlock *CastNotNull = nullptr;
2209
1.11k
  llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
2210
1.11k
2211
1.11k
  if (ShouldNullCheckSrcValue) {
2212
1.10k
    CastNull = createBasicBlock("dynamic_cast.null");
2213
1.10k
    CastNotNull = createBasicBlock("dynamic_cast.notnull");
2214
1.10k
2215
1.10k
    llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer());
2216
1.10k
    Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
2217
1.10k
    EmitBlock(CastNotNull);
2218
1.10k
  }
2219
1.11k
2220
1.11k
  llvm::Value *Value;
2221
1.11k
  if (isDynamicCastToVoid) {
2222
5
    Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy,
2223
5
                                                  DestTy);
2224
1.11k
  } else {
2225
1.11k
    assert(DestRecordTy->isRecordType() &&
2226
1.11k
           "destination type must be a record type!");
2227
1.11k
    Value = CGM.getCXXABI().EmitDynamicCastCall(*this, ThisAddr, SrcRecordTy,
2228
1.11k
                                                DestTy, DestRecordTy, CastEnd);
2229
1.11k
    CastNotNull = Builder.GetInsertBlock();
2230
1.11k
  }
2231
1.11k
2232
1.11k
  if (ShouldNullCheckSrcValue) {
2233
1.10k
    EmitBranch(CastEnd);
2234
1.10k
2235
1.10k
    EmitBlock(CastNull);
2236
1.10k
    EmitBranch(CastEnd);
2237
1.10k
  }
2238
1.11k
2239
1.11k
  EmitBlock(CastEnd);
2240
1.11k
2241
1.11k
  if (ShouldNullCheckSrcValue) {
2242
1.10k
    llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
2243
1.10k
    PHI->addIncoming(Value, CastNotNull);
2244
1.10k
    PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
2245
1.10k
2246
1.10k
    Value = PHI;
2247
1.10k
  }
2248
1.11k
2249
1.11k
  return Value;
2250
1.11k
}