/Users/buildslave/jenkins/sharedspace/clang-stage2-coverage-R@2/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===// |
2 | | // |
3 | | // The LLVM Compiler Infrastructure |
4 | | // |
5 | | // This file is distributed under the University of Illinois Open Source |
6 | | // License. See LICENSE.TXT for details. |
7 | | // |
8 | | //===----------------------------------------------------------------------===// |
9 | | // |
10 | | // This contains code dealing with code generation of C++ expressions |
11 | | // |
12 | | //===----------------------------------------------------------------------===// |
13 | | |
14 | | #include "CodeGenFunction.h" |
15 | | #include "CGCUDARuntime.h" |
16 | | #include "CGCXXABI.h" |
17 | | #include "CGDebugInfo.h" |
18 | | #include "CGObjCRuntime.h" |
19 | | #include "ConstantEmitter.h" |
20 | | #include "clang/CodeGen/CGFunctionInfo.h" |
21 | | #include "clang/Frontend/CodeGenOptions.h" |
22 | | #include "llvm/IR/CallSite.h" |
23 | | #include "llvm/IR/Intrinsics.h" |
24 | | |
25 | | using namespace clang; |
26 | | using namespace CodeGen; |
27 | | |
28 | | namespace { |
29 | | struct MemberCallInfo { |
30 | | RequiredArgs ReqArgs; |
31 | | // Number of prefix arguments for the call. Ignores the `this` pointer. |
32 | | unsigned PrefixSize; |
33 | | }; |
34 | | } |
35 | | |
36 | | static MemberCallInfo |
37 | | commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD, |
38 | | llvm::Value *This, llvm::Value *ImplicitParam, |
39 | | QualType ImplicitParamTy, const CallExpr *CE, |
40 | 65.3k | CallArgList &Args, CallArgList *RtlArgs) { |
41 | 65.3k | assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) || |
42 | 65.3k | isa<CXXOperatorCallExpr>(CE)); |
43 | 65.3k | assert(MD->isInstance() && |
44 | 65.3k | "Trying to emit a member or operator call expr on a static method!"); |
45 | 65.3k | ASTContext &C = CGF.getContext(); |
46 | 65.3k | |
47 | 65.3k | // Push the this ptr. |
48 | 65.3k | const CXXRecordDecl *RD = |
49 | 65.3k | CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD); |
50 | 65.3k | Args.add(RValue::get(This), |
51 | 65.3k | RD ? C.getPointerType(C.getTypeDeclType(RD))65.2k : C.VoidPtrTy51 ); |
52 | 65.3k | |
53 | 65.3k | // If there is an implicit parameter (e.g. VTT), emit it. |
54 | 65.3k | if (ImplicitParam65.3k ) { |
55 | 111 | Args.add(RValue::get(ImplicitParam), ImplicitParamTy); |
56 | 111 | } |
57 | 65.3k | |
58 | 65.3k | const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); |
59 | 65.3k | RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size(), MD); |
60 | 65.3k | unsigned PrefixSize = Args.size() - 1; |
61 | 65.3k | |
62 | 65.3k | // And the rest of the call args. |
63 | 65.3k | if (RtlArgs65.3k ) { |
64 | 1.93k | // Special case: if the caller emitted the arguments right-to-left already |
65 | 1.93k | // (prior to emitting the *this argument), we're done. This happens for |
66 | 1.93k | // assignment operators. |
67 | 1.93k | Args.addFrom(*RtlArgs); |
68 | 65.3k | } else if (63.3k CE63.3k ) { |
69 | 41.2k | // Special case: skip first argument of CXXOperatorCall (it is "this"). |
70 | 41.2k | unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 16.72k : 034.5k ; |
71 | 41.2k | CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip), |
72 | 41.2k | CE->getDirectCallee()); |
73 | 63.3k | } else { |
74 | 22.0k | assert( |
75 | 22.0k | FPT->getNumParams() == 0 && |
76 | 22.0k | "No CallExpr specified for function with non-zero number of arguments"); |
77 | 22.0k | } |
78 | 65.3k | return {required, PrefixSize}; |
79 | 65.3k | } |
80 | | |
81 | | RValue CodeGenFunction::EmitCXXMemberOrOperatorCall( |
82 | | const CXXMethodDecl *MD, const CGCallee &Callee, |
83 | | ReturnValueSlot ReturnValue, |
84 | | llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy, |
85 | 64.3k | const CallExpr *CE, CallArgList *RtlArgs) { |
86 | 64.3k | const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); |
87 | 64.3k | CallArgList Args; |
88 | 64.3k | MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall( |
89 | 64.3k | *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs); |
90 | 64.3k | auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall( |
91 | 64.3k | Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize); |
92 | 64.3k | return EmitCall(FnInfo, Callee, ReturnValue, Args); |
93 | 64.3k | } |
94 | | |
95 | | RValue CodeGenFunction::EmitCXXDestructorCall( |
96 | | const CXXDestructorDecl *DD, const CGCallee &Callee, llvm::Value *This, |
97 | | llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE, |
98 | 1.00k | StructorType Type) { |
99 | 1.00k | CallArgList Args; |
100 | 1.00k | commonEmitCXXMemberOrOperatorCall(*this, DD, This, ImplicitParam, |
101 | 1.00k | ImplicitParamTy, CE, Args, nullptr); |
102 | 1.00k | return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(DD, Type), |
103 | 1.00k | Callee, ReturnValueSlot(), Args); |
104 | 1.00k | } |
105 | | |
106 | | RValue CodeGenFunction::EmitCXXPseudoDestructorExpr( |
107 | 5 | const CXXPseudoDestructorExpr *E) { |
108 | 5 | QualType DestroyedType = E->getDestroyedType(); |
109 | 5 | if (DestroyedType.hasStrongOrWeakObjCLifetime()5 ) { |
110 | 4 | // Automatic Reference Counting: |
111 | 4 | // If the pseudo-expression names a retainable object with weak or |
112 | 4 | // strong lifetime, the object shall be released. |
113 | 4 | Expr *BaseExpr = E->getBase(); |
114 | 4 | Address BaseValue = Address::invalid(); |
115 | 4 | Qualifiers BaseQuals; |
116 | 4 | |
117 | 4 | // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. |
118 | 4 | if (E->isArrow()4 ) { |
119 | 2 | BaseValue = EmitPointerWithAlignment(BaseExpr); |
120 | 2 | const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>(); |
121 | 2 | BaseQuals = PTy->getPointeeType().getQualifiers(); |
122 | 4 | } else { |
123 | 2 | LValue BaseLV = EmitLValue(BaseExpr); |
124 | 2 | BaseValue = BaseLV.getAddress(); |
125 | 2 | QualType BaseTy = BaseExpr->getType(); |
126 | 2 | BaseQuals = BaseTy.getQualifiers(); |
127 | 2 | } |
128 | 4 | |
129 | 4 | switch (DestroyedType.getObjCLifetime()) { |
130 | 0 | case Qualifiers::OCL_None: |
131 | 0 | case Qualifiers::OCL_ExplicitNone: |
132 | 0 | case Qualifiers::OCL_Autoreleasing: |
133 | 0 | break; |
134 | 0 |
|
135 | 2 | case Qualifiers::OCL_Strong: |
136 | 2 | EmitARCRelease(Builder.CreateLoad(BaseValue, |
137 | 2 | DestroyedType.isVolatileQualified()), |
138 | 2 | ARCPreciseLifetime); |
139 | 2 | break; |
140 | 0 |
|
141 | 2 | case Qualifiers::OCL_Weak: |
142 | 2 | EmitARCDestroyWeak(BaseValue); |
143 | 2 | break; |
144 | 5 | } |
145 | 1 | } else { |
146 | 1 | // C++ [expr.pseudo]p1: |
147 | 1 | // The result shall only be used as the operand for the function call |
148 | 1 | // operator (), and the result of such a call has type void. The only |
149 | 1 | // effect is the evaluation of the postfix-expression before the dot or |
150 | 1 | // arrow. |
151 | 1 | EmitIgnoredExpr(E->getBase()); |
152 | 1 | } |
153 | 5 | |
154 | 5 | return RValue::get(nullptr); |
155 | 5 | } |
156 | | |
157 | 339 | static CXXRecordDecl *getCXXRecord(const Expr *E) { |
158 | 339 | QualType T = E->getType(); |
159 | 339 | if (const PointerType *PTy = T->getAs<PointerType>()) |
160 | 25 | T = PTy->getPointeeType(); |
161 | 339 | const RecordType *Ty = T->castAs<RecordType>(); |
162 | 339 | return cast<CXXRecordDecl>(Ty->getDecl()); |
163 | 339 | } |
164 | | |
165 | | // Note: This function also emit constructor calls to support a MSVC |
166 | | // extensions allowing explicit constructor function call. |
167 | | RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE, |
168 | 34.8k | ReturnValueSlot ReturnValue) { |
169 | 34.8k | const Expr *callee = CE->getCallee()->IgnoreParens(); |
170 | 34.8k | |
171 | 34.8k | if (isa<BinaryOperator>(callee)) |
172 | 115 | return EmitCXXMemberPointerCallExpr(CE, ReturnValue); |
173 | 34.7k | |
174 | 34.7k | const MemberExpr *ME = cast<MemberExpr>(callee); |
175 | 34.7k | const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl()); |
176 | 34.7k | |
177 | 34.7k | if (MD->isStatic()34.7k ) { |
178 | 0 | // The method is static, emit it as we would a regular call. |
179 | 0 | CGCallee callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD), MD); |
180 | 0 | return EmitCall(getContext().getPointerType(MD->getType()), callee, CE, |
181 | 0 | ReturnValue); |
182 | 0 | } |
183 | 34.7k | |
184 | 34.7k | bool HasQualifier = ME->hasQualifier(); |
185 | 34.7k | NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier()1.32k : nullptr33.4k ; |
186 | 34.8k | bool IsArrow = ME->isArrow(); |
187 | 34.8k | const Expr *Base = ME->getBase(); |
188 | 34.8k | |
189 | 34.8k | return EmitCXXMemberOrOperatorMemberCallExpr( |
190 | 34.8k | CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base); |
191 | 34.8k | } |
192 | | |
193 | | RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr( |
194 | | const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue, |
195 | | bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow, |
196 | 44.2k | const Expr *Base) { |
197 | 44.2k | assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE)); |
198 | 44.2k | |
199 | 44.2k | // Compute the object pointer. |
200 | 5.07k | bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier; |
201 | 44.2k | |
202 | 44.2k | const CXXMethodDecl *DevirtualizedMethod = nullptr; |
203 | 44.2k | if (CanUseVirtualCall && |
204 | 44.2k | MD->getDevirtualizedMethod(Base, getLangOpts().AppleKext)4.80k ) { |
205 | 285 | const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType(); |
206 | 285 | DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl); |
207 | 285 | assert(DevirtualizedMethod); |
208 | 285 | const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent(); |
209 | 285 | const Expr *Inner = Base->ignoreParenBaseCasts(); |
210 | 285 | if (DevirtualizedMethod->getReturnType().getCanonicalType() != |
211 | 285 | MD->getReturnType().getCanonicalType()) |
212 | 285 | // If the return types are not the same, this might be a case where more |
213 | 285 | // code needs to run to compensate for it. For example, the derived |
214 | 285 | // method might return a type that inherits form from the return |
215 | 285 | // type of MD and has a prefix. |
216 | 285 | // For now we just avoid devirtualizing these covariant cases. |
217 | 2 | DevirtualizedMethod = nullptr; |
218 | 283 | else if (283 getCXXRecord(Inner) == DevirtualizedClass283 ) |
219 | 283 | // If the class of the Inner expression is where the dynamic method |
220 | 283 | // is defined, build the this pointer from it. |
221 | 227 | Base = Inner; |
222 | 56 | else if (56 getCXXRecord(Base) != DevirtualizedClass56 ) { |
223 | 2 | // If the method is defined in a class that is not the best dynamic |
224 | 2 | // one or the one of the full expression, we would have to build |
225 | 2 | // a derived-to-base cast to compute the correct this pointer, but |
226 | 2 | // we don't have support for that yet, so do a virtual call. |
227 | 2 | DevirtualizedMethod = nullptr; |
228 | 2 | } |
229 | 285 | } |
230 | 44.2k | |
231 | 44.2k | // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment |
232 | 44.2k | // operator before the LHS. |
233 | 44.2k | CallArgList RtlArgStorage; |
234 | 44.2k | CallArgList *RtlArgs = nullptr; |
235 | 44.2k | if (auto *OCE44.2k = dyn_cast<CXXOperatorCallExpr>(CE)) { |
236 | 9.53k | if (OCE->isAssignmentOp()9.53k ) { |
237 | 2.81k | RtlArgs = &RtlArgStorage; |
238 | 2.81k | EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(), |
239 | 2.81k | drop_begin(CE->arguments(), 1), CE->getDirectCallee(), |
240 | 2.81k | /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft); |
241 | 2.81k | } |
242 | 9.53k | } |
243 | 44.2k | |
244 | 44.2k | Address This = Address::invalid(); |
245 | 44.2k | if (IsArrow) |
246 | 17.9k | This = EmitPointerWithAlignment(Base); |
247 | 44.2k | else |
248 | 26.3k | This = EmitLValue(Base).getAddress(); |
249 | 44.2k | |
250 | 44.2k | |
251 | 44.2k | if (MD->isTrivial() || 44.2k (MD->isDefaulted() && 43.2k MD->getParent()->isUnion()170 )) { |
252 | 1.05k | if (isa<CXXDestructorDecl>(MD)1.05k ) return RValue::get(nullptr)21 ; |
253 | 1.03k | if (1.03k isa<CXXConstructorDecl>(MD) && |
254 | 3 | cast<CXXConstructorDecl>(MD)->isDefaultConstructor()) |
255 | 1 | return RValue::get(nullptr); |
256 | 1.03k | |
257 | 1.03k | if (1.03k !MD->getParent()->mayInsertExtraPadding()1.03k ) { |
258 | 1.02k | if (MD->isCopyAssignmentOperator() || 1.02k MD->isMoveAssignmentOperator()107 ) { |
259 | 1.02k | // We don't like to generate the trivial copy/move assignment operator |
260 | 1.02k | // when it isn't necessary; just produce the proper effect here. |
261 | 1.02k | LValue RHS = isa<CXXOperatorCallExpr>(CE) |
262 | 879 | ? MakeNaturalAlignAddrLValue( |
263 | 879 | (*RtlArgs)[0].RV.getScalarVal(), |
264 | 879 | (*(CE->arg_begin() + 1))->getType()) |
265 | 147 | : EmitLValue(*CE->arg_begin()); |
266 | 1.02k | EmitAggregateAssign(This, RHS.getAddress(), CE->getType()); |
267 | 1.02k | return RValue::get(This.getPointer()); |
268 | 1.02k | } |
269 | 2 | |
270 | 2 | if (2 isa<CXXConstructorDecl>(MD) && |
271 | 2 | cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()2 ) { |
272 | 2 | // Trivial move and copy ctor are the same. |
273 | 2 | assert(CE->getNumArgs() == 1 && "unexpected argcount for trivial ctor"); |
274 | 2 | Address RHS = EmitLValue(*CE->arg_begin()).getAddress(); |
275 | 2 | EmitAggregateCopy(This, RHS, (*CE->arg_begin())->getType()); |
276 | 2 | return RValue::get(This.getPointer()); |
277 | 2 | } |
278 | 0 | llvm_unreachable0 ("unknown trivial member function"); |
279 | 0 | } |
280 | 1.05k | } |
281 | 43.2k | |
282 | 43.2k | // Compute the function type we're calling. |
283 | 43.2k | const CXXMethodDecl *CalleeDecl = |
284 | 43.2k | DevirtualizedMethod ? DevirtualizedMethod281 : MD42.9k ; |
285 | 43.2k | const CGFunctionInfo *FInfo = nullptr; |
286 | 43.2k | if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl)) |
287 | 31 | FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration( |
288 | 31 | Dtor, StructorType::Complete); |
289 | 43.2k | else if (const auto *43.2k Ctor43.2k = dyn_cast<CXXConstructorDecl>(CalleeDecl)) |
290 | 3 | FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration( |
291 | 3 | Ctor, StructorType::Complete); |
292 | 43.2k | else |
293 | 43.1k | FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl); |
294 | 43.2k | |
295 | 43.2k | llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo); |
296 | 43.2k | |
297 | 43.2k | // C++11 [class.mfct.non-static]p2: |
298 | 43.2k | // If a non-static member function of a class X is called for an object that |
299 | 43.2k | // is not of type X, or of a type derived from X, the behavior is undefined. |
300 | 43.2k | SourceLocation CallLoc; |
301 | 43.2k | ASTContext &C = getContext(); |
302 | 43.2k | if (CE) |
303 | 43.2k | CallLoc = CE->getExprLoc(); |
304 | 43.2k | |
305 | 43.2k | SanitizerSet SkippedChecks; |
306 | 43.2k | if (const auto *CMCE43.2k = dyn_cast<CXXMemberCallExpr>(CE)) { |
307 | 34.5k | auto *IOA = CMCE->getImplicitObjectArgument(); |
308 | 34.5k | bool IsImplicitObjectCXXThis = IsWrappedCXXThis(IOA); |
309 | 34.5k | if (IsImplicitObjectCXXThis) |
310 | 10.5k | SkippedChecks.set(SanitizerKind::Alignment, true); |
311 | 34.5k | if (IsImplicitObjectCXXThis || 34.5k isa<DeclRefExpr>(IOA)23.9k ) |
312 | 16.1k | SkippedChecks.set(SanitizerKind::Null, true); |
313 | 34.5k | } |
314 | 43.2k | EmitTypeCheck( |
315 | 3 | isa<CXXConstructorDecl>(CalleeDecl) ? CodeGenFunction::TCK_ConstructorCall |
316 | 43.2k | : CodeGenFunction::TCK_MemberCall, |
317 | 43.2k | CallLoc, This.getPointer(), C.getRecordType(CalleeDecl->getParent()), |
318 | 43.2k | /*Alignment=*/CharUnits::Zero(), SkippedChecks); |
319 | 43.2k | |
320 | 43.2k | // FIXME: Uses of 'MD' past this point need to be audited. We may need to use |
321 | 43.2k | // 'CalleeDecl' instead. |
322 | 43.2k | |
323 | 43.2k | // C++ [class.virtual]p12: |
324 | 43.2k | // Explicit qualification with the scope operator (5.1) suppresses the |
325 | 43.2k | // virtual call mechanism. |
326 | 43.2k | // |
327 | 43.2k | // We also don't emit a virtual call if the base expression has a record type |
328 | 43.2k | // because then we know what the type is. |
329 | 4.80k | bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod; |
330 | 43.2k | |
331 | 43.2k | if (const CXXDestructorDecl *Dtor43.2k = dyn_cast<CXXDestructorDecl>(MD)) { |
332 | 31 | assert(CE->arg_begin() == CE->arg_end() && |
333 | 31 | "Destructor shouldn't have explicit parameters"); |
334 | 31 | assert(ReturnValue.isNull() && "Destructor shouldn't have return value"); |
335 | 31 | if (UseVirtualCall31 ) { |
336 | 14 | CGM.getCXXABI().EmitVirtualDestructorCall( |
337 | 14 | *this, Dtor, Dtor_Complete, This, cast<CXXMemberCallExpr>(CE)); |
338 | 31 | } else { |
339 | 17 | CGCallee Callee; |
340 | 17 | if (getLangOpts().AppleKext && 17 MD->isVirtual()2 && HasQualifier2 ) |
341 | 2 | Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty); |
342 | 15 | else if (15 !DevirtualizedMethod15 ) |
343 | 14 | Callee = CGCallee::forDirect( |
344 | 14 | CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty), |
345 | 14 | Dtor); |
346 | 1 | else { |
347 | 1 | const CXXDestructorDecl *DDtor = |
348 | 1 | cast<CXXDestructorDecl>(DevirtualizedMethod); |
349 | 1 | Callee = CGCallee::forDirect( |
350 | 1 | CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty), |
351 | 1 | DDtor); |
352 | 1 | } |
353 | 17 | EmitCXXMemberOrOperatorCall( |
354 | 17 | CalleeDecl, Callee, ReturnValue, This.getPointer(), |
355 | 17 | /*ImplicitParam=*/nullptr, QualType(), CE, nullptr); |
356 | 17 | } |
357 | 31 | return RValue::get(nullptr); |
358 | 31 | } |
359 | 43.2k | |
360 | 43.2k | CGCallee Callee; |
361 | 43.2k | if (const CXXConstructorDecl *Ctor43.2k = dyn_cast<CXXConstructorDecl>(MD)) { |
362 | 3 | Callee = CGCallee::forDirect( |
363 | 3 | CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty), |
364 | 3 | Ctor); |
365 | 43.2k | } else if (43.1k UseVirtualCall43.1k ) { |
366 | 4.51k | Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty, |
367 | 4.51k | CE->getLocStart()); |
368 | 43.1k | } else { |
369 | 38.6k | if (SanOpts.has(SanitizerKind::CFINVCall) && |
370 | 38.6k | MD->getParent()->isDynamicClass()6 ) { |
371 | 6 | llvm::Value *VTable = GetVTablePtr(This, Int8PtrTy, MD->getParent()); |
372 | 6 | EmitVTablePtrCheckForCall(MD->getParent(), VTable, CFITCK_NVCall, |
373 | 6 | CE->getLocStart()); |
374 | 6 | } |
375 | 38.6k | |
376 | 38.6k | if (getLangOpts().AppleKext && 38.6k MD->isVirtual()6 && HasQualifier6 ) |
377 | 6 | Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty); |
378 | 38.6k | else if (38.6k !DevirtualizedMethod38.6k ) |
379 | 38.3k | Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), MD); |
380 | 280 | else { |
381 | 280 | Callee = CGCallee::forDirect( |
382 | 280 | CGM.GetAddrOfFunction(DevirtualizedMethod, Ty), |
383 | 280 | DevirtualizedMethod); |
384 | 280 | } |
385 | 43.1k | } |
386 | 43.2k | |
387 | 43.2k | if (MD->isVirtual()43.2k ) { |
388 | 5.05k | This = CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall( |
389 | 5.05k | *this, CalleeDecl, This, UseVirtualCall); |
390 | 5.05k | } |
391 | 44.2k | |
392 | 44.2k | return EmitCXXMemberOrOperatorCall( |
393 | 44.2k | CalleeDecl, Callee, ReturnValue, This.getPointer(), |
394 | 44.2k | /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs); |
395 | 44.2k | } |
396 | | |
397 | | RValue |
398 | | CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E, |
399 | 115 | ReturnValueSlot ReturnValue) { |
400 | 115 | const BinaryOperator *BO = |
401 | 115 | cast<BinaryOperator>(E->getCallee()->IgnoreParens()); |
402 | 115 | const Expr *BaseExpr = BO->getLHS(); |
403 | 115 | const Expr *MemFnExpr = BO->getRHS(); |
404 | 115 | |
405 | 115 | const MemberPointerType *MPT = |
406 | 115 | MemFnExpr->getType()->castAs<MemberPointerType>(); |
407 | 115 | |
408 | 115 | const FunctionProtoType *FPT = |
409 | 115 | MPT->getPointeeType()->castAs<FunctionProtoType>(); |
410 | 115 | const CXXRecordDecl *RD = |
411 | 115 | cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl()); |
412 | 115 | |
413 | 115 | // Emit the 'this' pointer. |
414 | 115 | Address This = Address::invalid(); |
415 | 115 | if (BO->getOpcode() == BO_PtrMemI) |
416 | 72 | This = EmitPointerWithAlignment(BaseExpr); |
417 | 115 | else |
418 | 43 | This = EmitLValue(BaseExpr).getAddress(); |
419 | 115 | |
420 | 115 | EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(), |
421 | 115 | QualType(MPT->getClass(), 0)); |
422 | 115 | |
423 | 115 | // Get the member function pointer. |
424 | 115 | llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr); |
425 | 115 | |
426 | 115 | // Ask the ABI to load the callee. Note that This is modified. |
427 | 115 | llvm::Value *ThisPtrForCall = nullptr; |
428 | 115 | CGCallee Callee = |
429 | 115 | CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This, |
430 | 115 | ThisPtrForCall, MemFnPtr, MPT); |
431 | 115 | |
432 | 115 | CallArgList Args; |
433 | 115 | |
434 | 115 | QualType ThisType = |
435 | 115 | getContext().getPointerType(getContext().getTagDeclType(RD)); |
436 | 115 | |
437 | 115 | // Push the this ptr. |
438 | 115 | Args.add(RValue::get(ThisPtrForCall), ThisType); |
439 | 115 | |
440 | 115 | RequiredArgs required = |
441 | 115 | RequiredArgs::forPrototypePlus(FPT, 1, /*FD=*/nullptr); |
442 | 115 | |
443 | 115 | // And the rest of the call args |
444 | 115 | EmitCallArgs(Args, FPT, E->arguments()); |
445 | 115 | return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required, |
446 | 115 | /*PrefixSize=*/0), |
447 | 115 | Callee, ReturnValue, Args); |
448 | 115 | } |
449 | | |
450 | | RValue |
451 | | CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E, |
452 | | const CXXMethodDecl *MD, |
453 | 9.53k | ReturnValueSlot ReturnValue) { |
454 | 9.53k | assert(MD->isInstance() && |
455 | 9.53k | "Trying to emit a member call expr on a static method!"); |
456 | 9.53k | return EmitCXXMemberOrOperatorMemberCallExpr( |
457 | 9.53k | E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr, |
458 | 9.53k | /*IsArrow=*/false, E->getArg(0)); |
459 | 9.53k | } |
460 | | |
461 | | RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E, |
462 | 3 | ReturnValueSlot ReturnValue) { |
463 | 3 | return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue); |
464 | 3 | } |
465 | | |
466 | | static void EmitNullBaseClassInitialization(CodeGenFunction &CGF, |
467 | | Address DestPtr, |
468 | 13 | const CXXRecordDecl *Base) { |
469 | 13 | if (Base->isEmpty()) |
470 | 2 | return; |
471 | 11 | |
472 | 11 | DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty); |
473 | 11 | |
474 | 11 | const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base); |
475 | 11 | CharUnits NVSize = Layout.getNonVirtualSize(); |
476 | 11 | |
477 | 11 | // We cannot simply zero-initialize the entire base sub-object if vbptrs are |
478 | 11 | // present, they are initialized by the most derived class before calling the |
479 | 11 | // constructor. |
480 | 11 | SmallVector<std::pair<CharUnits, CharUnits>, 1> Stores; |
481 | 11 | Stores.emplace_back(CharUnits::Zero(), NVSize); |
482 | 11 | |
483 | 11 | // Each store is split by the existence of a vbptr. |
484 | 11 | CharUnits VBPtrWidth = CGF.getPointerSize(); |
485 | 11 | std::vector<CharUnits> VBPtrOffsets = |
486 | 11 | CGF.CGM.getCXXABI().getVBPtrOffsets(Base); |
487 | 6 | for (CharUnits VBPtrOffset : VBPtrOffsets) { |
488 | 6 | // Stop before we hit any virtual base pointers located in virtual bases. |
489 | 6 | if (VBPtrOffset >= NVSize) |
490 | 2 | break; |
491 | 4 | std::pair<CharUnits, CharUnits> LastStore = Stores.pop_back_val(); |
492 | 4 | CharUnits LastStoreOffset = LastStore.first; |
493 | 4 | CharUnits LastStoreSize = LastStore.second; |
494 | 4 | |
495 | 4 | CharUnits SplitBeforeOffset = LastStoreOffset; |
496 | 4 | CharUnits SplitBeforeSize = VBPtrOffset - SplitBeforeOffset; |
497 | 4 | assert(!SplitBeforeSize.isNegative() && "negative store size!"); |
498 | 4 | if (!SplitBeforeSize.isZero()) |
499 | 2 | Stores.emplace_back(SplitBeforeOffset, SplitBeforeSize); |
500 | 4 | |
501 | 4 | CharUnits SplitAfterOffset = VBPtrOffset + VBPtrWidth; |
502 | 4 | CharUnits SplitAfterSize = LastStoreSize - SplitAfterOffset; |
503 | 4 | assert(!SplitAfterSize.isNegative() && "negative store size!"); |
504 | 4 | if (!SplitAfterSize.isZero()) |
505 | 4 | Stores.emplace_back(SplitAfterOffset, SplitAfterSize); |
506 | 6 | } |
507 | 11 | |
508 | 11 | // If the type contains a pointer to data member we can't memset it to zero. |
509 | 11 | // Instead, create a null constant and copy it to the destination. |
510 | 11 | // TODO: there are other patterns besides zero that we can usefully memset, |
511 | 11 | // like -1, which happens to be the pattern used by member-pointers. |
512 | 11 | // TODO: isZeroInitializable can be over-conservative in the case where a |
513 | 11 | // virtual base contains a member pointer. |
514 | 11 | llvm::Constant *NullConstantForBase = CGF.CGM.EmitNullConstantForBase(Base); |
515 | 11 | if (!NullConstantForBase->isNullValue()11 ) { |
516 | 2 | llvm::GlobalVariable *NullVariable = new llvm::GlobalVariable( |
517 | 2 | CGF.CGM.getModule(), NullConstantForBase->getType(), |
518 | 2 | /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, |
519 | 2 | NullConstantForBase, Twine()); |
520 | 2 | |
521 | 2 | CharUnits Align = std::max(Layout.getNonVirtualAlignment(), |
522 | 2 | DestPtr.getAlignment()); |
523 | 2 | NullVariable->setAlignment(Align.getQuantity()); |
524 | 2 | |
525 | 2 | Address SrcPtr = Address(CGF.EmitCastToVoidPtr(NullVariable), Align); |
526 | 2 | |
527 | 2 | // Get and call the appropriate llvm.memcpy overload. |
528 | 2 | for (std::pair<CharUnits, CharUnits> Store : Stores) { |
529 | 2 | CharUnits StoreOffset = Store.first; |
530 | 2 | CharUnits StoreSize = Store.second; |
531 | 2 | llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize); |
532 | 2 | CGF.Builder.CreateMemCpy( |
533 | 2 | CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset), |
534 | 2 | CGF.Builder.CreateConstInBoundsByteGEP(SrcPtr, StoreOffset), |
535 | 2 | StoreSizeVal); |
536 | 2 | } |
537 | 2 | |
538 | 2 | // Otherwise, just memset the whole thing to zero. This is legal |
539 | 2 | // because in LLVM, all default initializers (other than the ones we just |
540 | 2 | // handled above) are guaranteed to have a bit pattern of all zeros. |
541 | 11 | } else { |
542 | 11 | for (std::pair<CharUnits, CharUnits> Store : Stores) { |
543 | 11 | CharUnits StoreOffset = Store.first; |
544 | 11 | CharUnits StoreSize = Store.second; |
545 | 11 | llvm::Value *StoreSizeVal = CGF.CGM.getSize(StoreSize); |
546 | 11 | CGF.Builder.CreateMemSet( |
547 | 11 | CGF.Builder.CreateConstInBoundsByteGEP(DestPtr, StoreOffset), |
548 | 11 | CGF.Builder.getInt8(0), StoreSizeVal); |
549 | 11 | } |
550 | 9 | } |
551 | 13 | } |
552 | | |
553 | | void |
554 | | CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E, |
555 | 21.5k | AggValueSlot Dest) { |
556 | 21.5k | assert(!Dest.isIgnored() && "Must have a destination!"); |
557 | 21.5k | const CXXConstructorDecl *CD = E->getConstructor(); |
558 | 21.5k | |
559 | 21.5k | // If we require zero initialization before (or instead of) calling the |
560 | 21.5k | // constructor, as can be the case with a non-user-provided default |
561 | 21.5k | // constructor, emit the zero initialization now, unless destination is |
562 | 21.5k | // already zeroed. |
563 | 21.5k | if (E->requiresZeroInitialization() && 21.5k !Dest.isZeroed()969 ) { |
564 | 967 | switch (E->getConstructionKind()) { |
565 | 954 | case CXXConstructExpr::CK_Delegating: |
566 | 954 | case CXXConstructExpr::CK_Complete: |
567 | 954 | EmitNullInitialization(Dest.getAddress(), E->getType()); |
568 | 954 | break; |
569 | 13 | case CXXConstructExpr::CK_VirtualBase: |
570 | 13 | case CXXConstructExpr::CK_NonVirtualBase: |
571 | 13 | EmitNullBaseClassInitialization(*this, Dest.getAddress(), |
572 | 13 | CD->getParent()); |
573 | 13 | break; |
574 | 21.5k | } |
575 | 21.5k | } |
576 | 21.5k | |
577 | 21.5k | // If this is a call to a trivial default constructor, do nothing. |
578 | 21.5k | if (21.5k CD->isTrivial() && 21.5k CD->isDefaultConstructor()4.56k ) |
579 | 1.57k | return; |
580 | 19.9k | |
581 | 19.9k | // Elide the constructor if we're constructing from a temporary. |
582 | 19.9k | // The temporary check is required because Sema sets this on NRVO |
583 | 19.9k | // returns. |
584 | 19.9k | if (19.9k getLangOpts().ElideConstructors && 19.9k E->isElidable()19.9k ) { |
585 | 2.82k | assert(getContext().hasSameUnqualifiedType(E->getType(), |
586 | 2.82k | E->getArg(0)->getType())); |
587 | 2.82k | if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())2.82k ) { |
588 | 2.75k | EmitAggExpr(E->getArg(0), Dest); |
589 | 2.75k | return; |
590 | 2.75k | } |
591 | 17.2k | } |
592 | 17.2k | |
593 | 17.2k | if (const ArrayType *17.2k arrayType17.2k |
594 | 223 | = getContext().getAsArrayType(E->getType())) { |
595 | 223 | EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddress(), E); |
596 | 17.2k | } else { |
597 | 16.9k | CXXCtorType Type = Ctor_Complete; |
598 | 16.9k | bool ForVirtualBase = false; |
599 | 16.9k | bool Delegating = false; |
600 | 16.9k | |
601 | 16.9k | switch (E->getConstructionKind()) { |
602 | 8 | case CXXConstructExpr::CK_Delegating: |
603 | 8 | // We should be emitting a constructor; GlobalDecl will assert this |
604 | 8 | Type = CurGD.getCtorType(); |
605 | 8 | Delegating = true; |
606 | 8 | break; |
607 | 16.9k | |
608 | 13.4k | case CXXConstructExpr::CK_Complete: |
609 | 13.4k | Type = Ctor_Complete; |
610 | 13.4k | break; |
611 | 16.9k | |
612 | 455 | case CXXConstructExpr::CK_VirtualBase: |
613 | 455 | ForVirtualBase = true; |
614 | 455 | // fall-through |
615 | 455 | |
616 | 3.58k | case CXXConstructExpr::CK_NonVirtualBase: |
617 | 3.58k | Type = Ctor_Base; |
618 | 16.9k | } |
619 | 16.9k | |
620 | 16.9k | // Call the constructor. |
621 | 16.9k | EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, |
622 | 16.9k | Dest.getAddress(), E); |
623 | 16.9k | } |
624 | 21.5k | } |
625 | | |
626 | | void CodeGenFunction::EmitSynthesizedCXXCopyCtor(Address Dest, Address Src, |
627 | 66 | const Expr *Exp) { |
628 | 66 | if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp)) |
629 | 5 | Exp = E->getSubExpr(); |
630 | 66 | assert(isa<CXXConstructExpr>(Exp) && |
631 | 66 | "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr"); |
632 | 66 | const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp); |
633 | 66 | const CXXConstructorDecl *CD = E->getConstructor(); |
634 | 66 | RunCleanupsScope Scope(*this); |
635 | 66 | |
636 | 66 | // If we require zero initialization before (or instead of) calling the |
637 | 66 | // constructor, as can be the case with a non-user-provided default |
638 | 66 | // constructor, emit the zero initialization now. |
639 | 66 | // FIXME. Do I still need this for a copy ctor synthesis? |
640 | 66 | if (E->requiresZeroInitialization()) |
641 | 0 | EmitNullInitialization(Dest, E->getType()); |
642 | 66 | |
643 | 66 | assert(!getContext().getAsConstantArrayType(E->getType()) |
644 | 66 | && "EmitSynthesizedCXXCopyCtor - Copied-in Array"); |
645 | 66 | EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E); |
646 | 66 | } |
647 | | |
648 | | static CharUnits CalculateCookiePadding(CodeGenFunction &CGF, |
649 | 552 | const CXXNewExpr *E) { |
650 | 552 | if (!E->isArray()) |
651 | 0 | return CharUnits::Zero(); |
652 | 552 | |
653 | 552 | // No cookie is required if the operator new[] being used is the |
654 | 552 | // reserved placement operator new[]. |
655 | 552 | if (552 E->getOperatorNew()->isReservedGlobalPlacementOperator()552 ) |
656 | 7 | return CharUnits::Zero(); |
657 | 545 | |
658 | 545 | return CGF.CGM.getCXXABI().GetArrayCookieSize(E); |
659 | 545 | } |
660 | | |
661 | | static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, |
662 | | const CXXNewExpr *e, |
663 | | unsigned minElements, |
664 | | llvm::Value *&numElements, |
665 | 2.09k | llvm::Value *&sizeWithoutCookie) { |
666 | 2.09k | QualType type = e->getAllocatedType(); |
667 | 2.09k | |
668 | 2.09k | if (!e->isArray()2.09k ) { |
669 | 1.54k | CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); |
670 | 1.54k | sizeWithoutCookie |
671 | 1.54k | = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity()); |
672 | 1.54k | return sizeWithoutCookie; |
673 | 1.54k | } |
674 | 552 | |
675 | 552 | // The width of size_t. |
676 | 552 | unsigned sizeWidth = CGF.SizeTy->getBitWidth(); |
677 | 552 | |
678 | 552 | // Figure out the cookie size. |
679 | 552 | llvm::APInt cookieSize(sizeWidth, |
680 | 552 | CalculateCookiePadding(CGF, e).getQuantity()); |
681 | 552 | |
682 | 552 | // Emit the array size expression. |
683 | 552 | // We multiply the size of all dimensions for NumElements. |
684 | 552 | // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6. |
685 | 552 | numElements = |
686 | 552 | ConstantEmitter(CGF).tryEmitAbstract(e->getArraySize(), e->getType()); |
687 | 552 | if (!numElements) |
688 | 434 | numElements = CGF.EmitScalarExpr(e->getArraySize()); |
689 | 552 | assert(isa<llvm::IntegerType>(numElements->getType())); |
690 | 552 | |
691 | 552 | // The number of elements can be have an arbitrary integer type; |
692 | 552 | // essentially, we need to multiply it by a constant factor, add a |
693 | 552 | // cookie size, and verify that the result is representable as a |
694 | 552 | // size_t. That's just a gloss, though, and it's wrong in one |
695 | 552 | // important way: if the count is negative, it's an error even if |
696 | 552 | // the cookie size would bring the total size >= 0. |
697 | 552 | bool isSigned |
698 | 552 | = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType(); |
699 | 552 | llvm::IntegerType *numElementsType |
700 | 552 | = cast<llvm::IntegerType>(numElements->getType()); |
701 | 552 | unsigned numElementsWidth = numElementsType->getBitWidth(); |
702 | 552 | |
703 | 552 | // Compute the constant factor. |
704 | 552 | llvm::APInt arraySizeMultiplier(sizeWidth, 1); |
705 | 576 | while (const ConstantArrayType *CAT |
706 | 24 | = CGF.getContext().getAsConstantArrayType(type)) { |
707 | 24 | type = CAT->getElementType(); |
708 | 24 | arraySizeMultiplier *= CAT->getSize(); |
709 | 24 | } |
710 | 552 | |
711 | 552 | CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type); |
712 | 552 | llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity()); |
713 | 552 | typeSizeMultiplier *= arraySizeMultiplier; |
714 | 552 | |
715 | 552 | // This will be a size_t. |
716 | 552 | llvm::Value *size; |
717 | 552 | |
718 | 552 | // If someone is doing 'new int[42]' there is no need to do a dynamic check. |
719 | 552 | // Don't bloat the -O0 code. |
720 | 552 | if (llvm::ConstantInt *numElementsC = |
721 | 118 | dyn_cast<llvm::ConstantInt>(numElements)) { |
722 | 118 | const llvm::APInt &count = numElementsC->getValue(); |
723 | 118 | |
724 | 118 | bool hasAnyOverflow = false; |
725 | 118 | |
726 | 118 | // If 'count' was a negative number, it's an overflow. |
727 | 118 | if (isSigned && 118 count.isNegative()98 ) |
728 | 0 | hasAnyOverflow = true; |
729 | 118 | |
730 | 118 | // We want to do all this arithmetic in size_t. If numElements is |
731 | 118 | // wider than that, check whether it's already too big, and if so, |
732 | 118 | // overflow. |
733 | 118 | else if (118 numElementsWidth > sizeWidth && |
734 | 0 | numElementsWidth - sizeWidth > count.countLeadingZeros()) |
735 | 0 | hasAnyOverflow = true; |
736 | 118 | |
737 | 118 | // Okay, compute a count at the right width. |
738 | 118 | llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth); |
739 | 118 | |
740 | 118 | // If there is a brace-initializer, we cannot allocate fewer elements than |
741 | 118 | // there are initializers. If we do, that's treated like an overflow. |
742 | 118 | if (adjustedCount.ult(minElements)) |
743 | 0 | hasAnyOverflow = true; |
744 | 118 | |
745 | 118 | // Scale numElements by that. This might overflow, but we don't |
746 | 118 | // care because it only overflows if allocationSize does, too, and |
747 | 118 | // if that overflows then we shouldn't use this. |
748 | 118 | numElements = llvm::ConstantInt::get(CGF.SizeTy, |
749 | 118 | adjustedCount * arraySizeMultiplier); |
750 | 118 | |
751 | 118 | // Compute the size before cookie, and track whether it overflowed. |
752 | 118 | bool overflow; |
753 | 118 | llvm::APInt allocationSize |
754 | 118 | = adjustedCount.umul_ov(typeSizeMultiplier, overflow); |
755 | 118 | hasAnyOverflow |= overflow; |
756 | 118 | |
757 | 118 | // Add in the cookie, and check whether it's overflowed. |
758 | 118 | if (cookieSize != 0118 ) { |
759 | 35 | // Save the current size without a cookie. This shouldn't be |
760 | 35 | // used if there was overflow. |
761 | 35 | sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); |
762 | 35 | |
763 | 35 | allocationSize = allocationSize.uadd_ov(cookieSize, overflow); |
764 | 35 | hasAnyOverflow |= overflow; |
765 | 35 | } |
766 | 118 | |
767 | 118 | // On overflow, produce a -1 so operator new will fail. |
768 | 118 | if (hasAnyOverflow118 ) { |
769 | 0 | size = llvm::Constant::getAllOnesValue(CGF.SizeTy); |
770 | 118 | } else { |
771 | 118 | size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize); |
772 | 118 | } |
773 | 118 | |
774 | 118 | // Otherwise, we might need to use the overflow intrinsics. |
775 | 552 | } else { |
776 | 434 | // There are up to five conditions we need to test for: |
777 | 434 | // 1) if isSigned, we need to check whether numElements is negative; |
778 | 434 | // 2) if numElementsWidth > sizeWidth, we need to check whether |
779 | 434 | // numElements is larger than something representable in size_t; |
780 | 434 | // 3) if minElements > 0, we need to check whether numElements is smaller |
781 | 434 | // than that. |
782 | 434 | // 4) we need to compute |
783 | 434 | // sizeWithoutCookie := numElements * typeSizeMultiplier |
784 | 434 | // and check whether it overflows; and |
785 | 434 | // 5) if we need a cookie, we need to compute |
786 | 434 | // size := sizeWithoutCookie + cookieSize |
787 | 434 | // and check whether it overflows. |
788 | 434 | |
789 | 434 | llvm::Value *hasOverflow = nullptr; |
790 | 434 | |
791 | 434 | // If numElementsWidth > sizeWidth, then one way or another, we're |
792 | 434 | // going to have to do a comparison for (2), and this happens to |
793 | 434 | // take care of (1), too. |
794 | 434 | if (numElementsWidth > sizeWidth434 ) { |
795 | 2 | llvm::APInt threshold(numElementsWidth, 1); |
796 | 2 | threshold <<= sizeWidth; |
797 | 2 | |
798 | 2 | llvm::Value *thresholdV |
799 | 2 | = llvm::ConstantInt::get(numElementsType, threshold); |
800 | 2 | |
801 | 2 | hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV); |
802 | 2 | numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy); |
803 | 2 | |
804 | 2 | // Otherwise, if we're signed, we want to sext up to size_t. |
805 | 434 | } else if (432 isSigned432 ) { |
806 | 270 | if (numElementsWidth < sizeWidth) |
807 | 246 | numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy); |
808 | 270 | |
809 | 270 | // If there's a non-1 type size multiplier, then we can do the |
810 | 270 | // signedness check at the same time as we do the multiply |
811 | 270 | // because a negative number times anything will cause an |
812 | 270 | // unsigned overflow. Otherwise, we have to do it here. But at least |
813 | 270 | // in this case, we can subsume the >= minElements check. |
814 | 270 | if (typeSizeMultiplier == 1) |
815 | 100 | hasOverflow = CGF.Builder.CreateICmpSLT(numElements, |
816 | 100 | llvm::ConstantInt::get(CGF.SizeTy, minElements)); |
817 | 270 | |
818 | 270 | // Otherwise, zext up to size_t if necessary. |
819 | 432 | } else if (162 numElementsWidth < sizeWidth162 ) { |
820 | 44 | numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy); |
821 | 44 | } |
822 | 434 | |
823 | 434 | assert(numElements->getType() == CGF.SizeTy); |
824 | 434 | |
825 | 434 | if (minElements434 ) { |
826 | 8 | // Don't allow allocation of fewer elements than we have initializers. |
827 | 8 | if (!hasOverflow8 ) { |
828 | 6 | hasOverflow = CGF.Builder.CreateICmpULT(numElements, |
829 | 6 | llvm::ConstantInt::get(CGF.SizeTy, minElements)); |
830 | 8 | } else if (2 numElementsWidth > sizeWidth2 ) { |
831 | 0 | // The other existing overflow subsumes this check. |
832 | 0 | // We do an unsigned comparison, since any signed value < -1 is |
833 | 0 | // taken care of either above or below. |
834 | 0 | hasOverflow = CGF.Builder.CreateOr(hasOverflow, |
835 | 0 | CGF.Builder.CreateICmpULT(numElements, |
836 | 0 | llvm::ConstantInt::get(CGF.SizeTy, minElements))); |
837 | 0 | } |
838 | 8 | } |
839 | 434 | |
840 | 434 | size = numElements; |
841 | 434 | |
842 | 434 | // Multiply by the type size if necessary. This multiplier |
843 | 434 | // includes all the factors for nested arrays. |
844 | 434 | // |
845 | 434 | // This step also causes numElements to be scaled up by the |
846 | 434 | // nested-array factor if necessary. Overflow on this computation |
847 | 434 | // can be ignored because the result shouldn't be used if |
848 | 434 | // allocation fails. |
849 | 434 | if (typeSizeMultiplier != 1434 ) { |
850 | 228 | llvm::Value *umul_with_overflow |
851 | 228 | = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy); |
852 | 228 | |
853 | 228 | llvm::Value *tsmV = |
854 | 228 | llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier); |
855 | 228 | llvm::Value *result = |
856 | 228 | CGF.Builder.CreateCall(umul_with_overflow, {size, tsmV}); |
857 | 228 | |
858 | 228 | llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); |
859 | 228 | if (hasOverflow) |
860 | 8 | hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed); |
861 | 228 | else |
862 | 220 | hasOverflow = overflowed; |
863 | 228 | |
864 | 228 | size = CGF.Builder.CreateExtractValue(result, 0); |
865 | 228 | |
866 | 228 | // Also scale up numElements by the array size multiplier. |
867 | 228 | if (arraySizeMultiplier != 1228 ) { |
868 | 10 | // If the base element type size is 1, then we can re-use the |
869 | 10 | // multiply we just did. |
870 | 10 | if (typeSize.isOne()10 ) { |
871 | 0 | assert(arraySizeMultiplier == typeSizeMultiplier); |
872 | 0 | numElements = size; |
873 | 0 |
|
874 | 0 | // Otherwise we need a separate multiply. |
875 | 10 | } else { |
876 | 10 | llvm::Value *asmV = |
877 | 10 | llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier); |
878 | 10 | numElements = CGF.Builder.CreateMul(numElements, asmV); |
879 | 10 | } |
880 | 10 | } |
881 | 434 | } else { |
882 | 206 | // numElements doesn't need to be scaled. |
883 | 206 | assert(arraySizeMultiplier == 1); |
884 | 206 | } |
885 | 434 | |
886 | 434 | // Add in the cookie size if necessary. |
887 | 434 | if (cookieSize != 0434 ) { |
888 | 34 | sizeWithoutCookie = size; |
889 | 34 | |
890 | 34 | llvm::Value *uadd_with_overflow |
891 | 34 | = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy); |
892 | 34 | |
893 | 34 | llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize); |
894 | 34 | llvm::Value *result = |
895 | 34 | CGF.Builder.CreateCall(uadd_with_overflow, {size, cookieSizeV}); |
896 | 34 | |
897 | 34 | llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1); |
898 | 34 | if (hasOverflow) |
899 | 34 | hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed); |
900 | 34 | else |
901 | 0 | hasOverflow = overflowed; |
902 | 34 | |
903 | 34 | size = CGF.Builder.CreateExtractValue(result, 0); |
904 | 34 | } |
905 | 434 | |
906 | 434 | // If we had any possibility of dynamic overflow, make a select to |
907 | 434 | // overwrite 'size' with an all-ones value, which should cause |
908 | 434 | // operator new to throw. |
909 | 434 | if (hasOverflow) |
910 | 328 | size = CGF.Builder.CreateSelect(hasOverflow, |
911 | 328 | llvm::Constant::getAllOnesValue(CGF.SizeTy), |
912 | 328 | size); |
913 | 434 | } |
914 | 552 | |
915 | 552 | if (cookieSize == 0) |
916 | 483 | sizeWithoutCookie = size; |
917 | 2.09k | else |
918 | 2.09k | assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?"); |
919 | 2.09k | |
920 | 2.09k | return size; |
921 | 2.09k | } |
922 | | |
923 | | static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init, |
924 | 1.54k | QualType AllocType, Address NewPtr) { |
925 | 1.54k | // FIXME: Refactor with EmitExprAsInit. |
926 | 1.54k | switch (CGF.getEvaluationKind(AllocType)) { |
927 | 48 | case TEK_Scalar: |
928 | 48 | CGF.EmitScalarInit(Init, nullptr, |
929 | 48 | CGF.MakeAddrLValue(NewPtr, AllocType), false); |
930 | 48 | return; |
931 | 1 | case TEK_Complex: |
932 | 1 | CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType), |
933 | 1 | /*isInit*/ true); |
934 | 1 | return; |
935 | 1.49k | case TEK_Aggregate: { |
936 | 1.49k | AggValueSlot Slot |
937 | 1.49k | = AggValueSlot::forAddr(NewPtr, AllocType.getQualifiers(), |
938 | 1.49k | AggValueSlot::IsDestructed, |
939 | 1.49k | AggValueSlot::DoesNotNeedGCBarriers, |
940 | 1.49k | AggValueSlot::IsNotAliased); |
941 | 1.49k | CGF.EmitAggExpr(Init, Slot); |
942 | 1.49k | return; |
943 | 0 | } |
944 | 0 | } |
945 | 0 | llvm_unreachable0 ("bad evaluation kind"); |
946 | 0 | } |
947 | | |
948 | | void CodeGenFunction::EmitNewArrayInitializer( |
949 | | const CXXNewExpr *E, QualType ElementType, llvm::Type *ElementTy, |
950 | | Address BeginPtr, llvm::Value *NumElements, |
951 | 552 | llvm::Value *AllocSizeWithoutCookie) { |
952 | 552 | // If we have a type with trivial initialization and no initializer, |
953 | 552 | // there's nothing to do. |
954 | 552 | if (!E->hasInitializer()) |
955 | 396 | return; |
956 | 156 | |
957 | 156 | Address CurPtr = BeginPtr; |
958 | 156 | |
959 | 156 | unsigned InitListElements = 0; |
960 | 156 | |
961 | 156 | const Expr *Init = E->getInitializer(); |
962 | 156 | Address EndOfInit = Address::invalid(); |
963 | 156 | QualType::DestructionKind DtorKind = ElementType.isDestructedType(); |
964 | 156 | EHScopeStack::stable_iterator Cleanup; |
965 | 156 | llvm::Instruction *CleanupDominator = nullptr; |
966 | 156 | |
967 | 156 | CharUnits ElementSize = getContext().getTypeSizeInChars(ElementType); |
968 | 156 | CharUnits ElementAlign = |
969 | 156 | BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize); |
970 | 156 | |
971 | 156 | // Attempt to perform zero-initialization using memset. |
972 | 22 | auto TryMemsetInitialization = [&]() -> bool { |
973 | 22 | // FIXME: If the type is a pointer-to-data-member under the Itanium ABI, |
974 | 22 | // we can initialize with a memset to -1. |
975 | 22 | if (!CGM.getTypes().isZeroInitializable(ElementType)) |
976 | 4 | return false; |
977 | 18 | |
978 | 18 | // Optimization: since zero initialization will just set the memory |
979 | 18 | // to all zeroes, generate a single memset to do it in one shot. |
980 | 18 | |
981 | 18 | // Subtract out the size of any elements we've already initialized. |
982 | 18 | auto *RemainingSize = AllocSizeWithoutCookie; |
983 | 18 | if (InitListElements18 ) { |
984 | 9 | // We know this can't overflow; we check this when doing the allocation. |
985 | 9 | auto *InitializedSize = llvm::ConstantInt::get( |
986 | 9 | RemainingSize->getType(), |
987 | 9 | getContext().getTypeSizeInChars(ElementType).getQuantity() * |
988 | 9 | InitListElements); |
989 | 9 | RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize); |
990 | 9 | } |
991 | 22 | |
992 | 22 | // Create the memset. |
993 | 22 | Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false); |
994 | 22 | return true; |
995 | 22 | }; |
996 | 156 | |
997 | 156 | // If the initializer is an initializer list, first do the explicit elements. |
998 | 156 | if (const InitListExpr *ILE156 = dyn_cast<InitListExpr>(Init)) { |
999 | 21 | // Initializing from a (braced) string literal is a special case; the init |
1000 | 21 | // list element does not initialize a (single) array element. |
1001 | 21 | if (ILE->isStringLiteralInit()21 ) { |
1002 | 6 | // Initialize the initial portion of length equal to that of the string |
1003 | 6 | // literal. The allocation must be for at least this much; we emitted a |
1004 | 6 | // check for that earlier. |
1005 | 6 | AggValueSlot Slot = |
1006 | 6 | AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(), |
1007 | 6 | AggValueSlot::IsDestructed, |
1008 | 6 | AggValueSlot::DoesNotNeedGCBarriers, |
1009 | 6 | AggValueSlot::IsNotAliased); |
1010 | 6 | EmitAggExpr(ILE->getInit(0), Slot); |
1011 | 6 | |
1012 | 6 | // Move past these elements. |
1013 | 6 | InitListElements = |
1014 | 6 | cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe()) |
1015 | 6 | ->getSize().getZExtValue(); |
1016 | 6 | CurPtr = |
1017 | 6 | Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(), |
1018 | 6 | Builder.getSize(InitListElements), |
1019 | 6 | "string.init.end"), |
1020 | 6 | CurPtr.getAlignment().alignmentAtOffset(InitListElements * |
1021 | 6 | ElementSize)); |
1022 | 6 | |
1023 | 6 | // Zero out the rest, if any remain. |
1024 | 6 | llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements); |
1025 | 6 | if (!ConstNum || 6 !ConstNum->equalsInt(InitListElements)4 ) { |
1026 | 2 | bool OK = TryMemsetInitialization(); |
1027 | 2 | (void)OK; |
1028 | 2 | assert(OK && "couldn't memset character type?"); |
1029 | 2 | } |
1030 | 6 | return; |
1031 | 6 | } |
1032 | 15 | |
1033 | 15 | InitListElements = ILE->getNumInits(); |
1034 | 15 | |
1035 | 15 | // If this is a multi-dimensional array new, we will initialize multiple |
1036 | 15 | // elements with each init list element. |
1037 | 15 | QualType AllocType = E->getAllocatedType(); |
1038 | 15 | if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>( |
1039 | 3 | AllocType->getAsArrayTypeUnsafe())) { |
1040 | 3 | ElementTy = ConvertTypeForMem(AllocType); |
1041 | 3 | CurPtr = Builder.CreateElementBitCast(CurPtr, ElementTy); |
1042 | 3 | InitListElements *= getContext().getConstantArrayElementCount(CAT); |
1043 | 3 | } |
1044 | 15 | |
1045 | 15 | // Enter a partial-destruction Cleanup if necessary. |
1046 | 15 | if (needsEHCleanup(DtorKind)15 ) { |
1047 | 0 | // In principle we could tell the Cleanup where we are more |
1048 | 0 | // directly, but the control flow can get so varied here that it |
1049 | 0 | // would actually be quite complex. Therefore we go through an |
1050 | 0 | // alloca. |
1051 | 0 | EndOfInit = CreateTempAlloca(BeginPtr.getType(), getPointerAlign(), |
1052 | 0 | "array.init.end"); |
1053 | 0 | CleanupDominator = Builder.CreateStore(BeginPtr.getPointer(), EndOfInit); |
1054 | 0 | pushIrregularPartialArrayCleanup(BeginPtr.getPointer(), EndOfInit, |
1055 | 0 | ElementType, ElementAlign, |
1056 | 0 | getDestroyer(DtorKind)); |
1057 | 0 | Cleanup = EHStack.stable_begin(); |
1058 | 0 | } |
1059 | 15 | |
1060 | 15 | CharUnits StartAlign = CurPtr.getAlignment(); |
1061 | 49 | for (unsigned i = 0, e = ILE->getNumInits(); i != e49 ; ++i34 ) { |
1062 | 34 | // Tell the cleanup that it needs to destroy up to this |
1063 | 34 | // element. TODO: some of these stores can be trivially |
1064 | 34 | // observed to be unnecessary. |
1065 | 34 | if (EndOfInit.isValid()34 ) { |
1066 | 0 | auto FinishedPtr = |
1067 | 0 | Builder.CreateBitCast(CurPtr.getPointer(), BeginPtr.getType()); |
1068 | 0 | Builder.CreateStore(FinishedPtr, EndOfInit); |
1069 | 0 | } |
1070 | 34 | // FIXME: If the last initializer is an incomplete initializer list for |
1071 | 34 | // an array, and we have an array filler, we can fold together the two |
1072 | 34 | // initialization loops. |
1073 | 34 | StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), |
1074 | 34 | ILE->getInit(i)->getType(), CurPtr); |
1075 | 34 | CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(), |
1076 | 34 | Builder.getSize(1), |
1077 | 34 | "array.exp.next"), |
1078 | 34 | StartAlign.alignmentAtOffset((i + 1) * ElementSize)); |
1079 | 34 | } |
1080 | 15 | |
1081 | 15 | // The remaining elements are filled with the array filler expression. |
1082 | 15 | Init = ILE->getArrayFiller(); |
1083 | 15 | |
1084 | 15 | // Extract the initializer for the individual array elements by pulling |
1085 | 15 | // out the array filler from all the nested initializer lists. This avoids |
1086 | 15 | // generating a nested loop for the initialization. |
1087 | 17 | while (Init && 17 Init->getType()->isConstantArrayType()10 ) { |
1088 | 2 | auto *SubILE = dyn_cast<InitListExpr>(Init); |
1089 | 2 | if (!SubILE) |
1090 | 0 | break; |
1091 | 2 | assert(SubILE->getNumInits() == 0 && "explicit inits in array filler?"); |
1092 | 2 | Init = SubILE->getArrayFiller(); |
1093 | 2 | } |
1094 | 21 | |
1095 | 21 | // Switch back to initializing one base element at a time. |
1096 | 21 | CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType()); |
1097 | 21 | } |
1098 | 156 | |
1099 | 156 | // If all elements have already been initialized, skip any further |
1100 | 156 | // initialization. |
1101 | 150 | llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements); |
1102 | 150 | if (ConstNum && 150 ConstNum->getZExtValue() <= InitListElements71 ) { |
1103 | 7 | // If there was a Cleanup, deactivate it. |
1104 | 7 | if (CleanupDominator) |
1105 | 0 | DeactivateCleanupBlock(Cleanup, CleanupDominator); |
1106 | 7 | return; |
1107 | 7 | } |
1108 | 143 | |
1109 | 150 | assert(Init && "have trailing elements to initialize but no initializer"); |
1110 | 143 | |
1111 | 143 | // If this is a constructor call, try to optimize it out, and failing that |
1112 | 143 | // emit a single loop to initialize all remaining elements. |
1113 | 143 | if (const CXXConstructExpr *CCE143 = dyn_cast<CXXConstructExpr>(Init)) { |
1114 | 125 | CXXConstructorDecl *Ctor = CCE->getConstructor(); |
1115 | 125 | if (Ctor->isTrivial()125 ) { |
1116 | 56 | // If new expression did not specify value-initialization, then there |
1117 | 56 | // is no initialization. |
1118 | 56 | if (!CCE->requiresZeroInitialization() || 56 Ctor->getParent()->isEmpty()3 ) |
1119 | 54 | return; |
1120 | 2 | |
1121 | 2 | if (2 TryMemsetInitialization()2 ) |
1122 | 1 | return; |
1123 | 70 | } |
1124 | 70 | |
1125 | 70 | // Store the new Cleanup position for irregular Cleanups. |
1126 | 70 | // |
1127 | 70 | // FIXME: Share this cleanup with the constructor call emission rather than |
1128 | 70 | // having it create a cleanup of its own. |
1129 | 70 | if (70 EndOfInit.isValid()70 ) |
1130 | 0 | Builder.CreateStore(CurPtr.getPointer(), EndOfInit); |
1131 | 70 | |
1132 | 70 | // Emit a constructor call loop to initialize the remaining elements. |
1133 | 70 | if (InitListElements) |
1134 | 1 | NumElements = Builder.CreateSub( |
1135 | 1 | NumElements, |
1136 | 1 | llvm::ConstantInt::get(NumElements->getType(), InitListElements)); |
1137 | 125 | EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE, |
1138 | 125 | CCE->requiresZeroInitialization()); |
1139 | 125 | return; |
1140 | 125 | } |
1141 | 18 | |
1142 | 18 | // If this is value-initialization, we can usually use memset. |
1143 | 18 | ImplicitValueInitExpr IVIE(ElementType); |
1144 | 18 | if (isa<ImplicitValueInitExpr>(Init)18 ) { |
1145 | 15 | if (TryMemsetInitialization()) |
1146 | 12 | return; |
1147 | 3 | |
1148 | 3 | // Switch to an ImplicitValueInitExpr for the element type. This handles |
1149 | 3 | // only one case: multidimensional array new of pointers to members. In |
1150 | 3 | // all other cases, we already have an initializer for the array element. |
1151 | 3 | Init = &IVIE; |
1152 | 3 | } |
1153 | 18 | |
1154 | 18 | // At this point we should have found an initializer for the individual |
1155 | 18 | // elements of the array. |
1156 | 6 | assert(getContext().hasSameUnqualifiedType(ElementType, Init->getType()) && |
1157 | 6 | "got wrong type of element to initialize"); |
1158 | 6 | |
1159 | 6 | // If we have an empty initializer list, we can usually use memset. |
1160 | 6 | if (auto *ILE = dyn_cast<InitListExpr>(Init)) |
1161 | 3 | if (3 ILE->getNumInits() == 0 && 3 TryMemsetInitialization()0 ) |
1162 | 0 | return; |
1163 | 6 | |
1164 | 6 | // If we have a struct whose every field is value-initialized, we can |
1165 | 6 | // usually use memset. |
1166 | 6 | if (auto *6 ILE6 = dyn_cast<InitListExpr>(Init)) { |
1167 | 3 | if (const RecordType *RType3 = ILE->getType()->getAs<RecordType>()) { |
1168 | 3 | if (RType->getDecl()->isStruct()3 ) { |
1169 | 3 | unsigned NumElements = 0; |
1170 | 3 | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RType->getDecl())) |
1171 | 3 | NumElements = CXXRD->getNumBases(); |
1172 | 3 | for (auto *Field : RType->getDecl()->fields()) |
1173 | 5 | if (5 !Field->isUnnamedBitfield()5 ) |
1174 | 5 | ++NumElements; |
1175 | 3 | // FIXME: Recurse into nested InitListExprs. |
1176 | 3 | if (ILE->getNumInits() == NumElements) |
1177 | 8 | for (unsigned i = 0, e = ILE->getNumInits(); 3 i != e8 ; ++i5 ) |
1178 | 5 | if (5 !isa<ImplicitValueInitExpr>(ILE->getInit(i))5 ) |
1179 | 0 | --NumElements; |
1180 | 3 | if (ILE->getNumInits() == NumElements && 3 TryMemsetInitialization()3 ) |
1181 | 3 | return; |
1182 | 3 | } |
1183 | 3 | } |
1184 | 3 | } |
1185 | 3 | |
1186 | 3 | // Create the loop blocks. |
1187 | 3 | llvm::BasicBlock *EntryBB = Builder.GetInsertBlock(); |
1188 | 3 | llvm::BasicBlock *LoopBB = createBasicBlock("new.loop"); |
1189 | 3 | llvm::BasicBlock *ContBB = createBasicBlock("new.loop.end"); |
1190 | 3 | |
1191 | 3 | // Find the end of the array, hoisted out of the loop. |
1192 | 3 | llvm::Value *EndPtr = |
1193 | 3 | Builder.CreateInBoundsGEP(BeginPtr.getPointer(), NumElements, "array.end"); |
1194 | 3 | |
1195 | 3 | // If the number of elements isn't constant, we have to now check if there is |
1196 | 3 | // anything left to initialize. |
1197 | 3 | if (!ConstNum3 ) { |
1198 | 0 | llvm::Value *IsEmpty = |
1199 | 0 | Builder.CreateICmpEQ(CurPtr.getPointer(), EndPtr, "array.isempty"); |
1200 | 0 | Builder.CreateCondBr(IsEmpty, ContBB, LoopBB); |
1201 | 0 | } |
1202 | 3 | |
1203 | 3 | // Enter the loop. |
1204 | 3 | EmitBlock(LoopBB); |
1205 | 3 | |
1206 | 3 | // Set up the current-element phi. |
1207 | 3 | llvm::PHINode *CurPtrPhi = |
1208 | 3 | Builder.CreatePHI(CurPtr.getType(), 2, "array.cur"); |
1209 | 3 | CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB); |
1210 | 3 | |
1211 | 3 | CurPtr = Address(CurPtrPhi, ElementAlign); |
1212 | 3 | |
1213 | 3 | // Store the new Cleanup position for irregular Cleanups. |
1214 | 3 | if (EndOfInit.isValid()) |
1215 | 0 | Builder.CreateStore(CurPtr.getPointer(), EndOfInit); |
1216 | 3 | |
1217 | 3 | // Enter a partial-destruction Cleanup if necessary. |
1218 | 3 | if (!CleanupDominator && 3 needsEHCleanup(DtorKind)3 ) { |
1219 | 0 | pushRegularPartialArrayCleanup(BeginPtr.getPointer(), CurPtr.getPointer(), |
1220 | 0 | ElementType, ElementAlign, |
1221 | 0 | getDestroyer(DtorKind)); |
1222 | 0 | Cleanup = EHStack.stable_begin(); |
1223 | 0 | CleanupDominator = Builder.CreateUnreachable(); |
1224 | 0 | } |
1225 | 3 | |
1226 | 3 | // Emit the initializer into this element. |
1227 | 3 | StoreAnyExprIntoOneUnit(*this, Init, Init->getType(), CurPtr); |
1228 | 3 | |
1229 | 3 | // Leave the Cleanup if we entered one. |
1230 | 3 | if (CleanupDominator3 ) { |
1231 | 0 | DeactivateCleanupBlock(Cleanup, CleanupDominator); |
1232 | 0 | CleanupDominator->eraseFromParent(); |
1233 | 0 | } |
1234 | 552 | |
1235 | 552 | // Advance to the next element by adjusting the pointer type as necessary. |
1236 | 552 | llvm::Value *NextPtr = |
1237 | 552 | Builder.CreateConstInBoundsGEP1_32(ElementTy, CurPtr.getPointer(), 1, |
1238 | 552 | "array.next"); |
1239 | 552 | |
1240 | 552 | // Check whether we've gotten to the end of the array and, if so, |
1241 | 552 | // exit the loop. |
1242 | 552 | llvm::Value *IsEnd = Builder.CreateICmpEQ(NextPtr, EndPtr, "array.atend"); |
1243 | 552 | Builder.CreateCondBr(IsEnd, ContBB, LoopBB); |
1244 | 552 | CurPtrPhi->addIncoming(NextPtr, Builder.GetInsertBlock()); |
1245 | 552 | |
1246 | 552 | EmitBlock(ContBB); |
1247 | 552 | } |
1248 | | |
1249 | | static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E, |
1250 | | QualType ElementType, llvm::Type *ElementTy, |
1251 | | Address NewPtr, llvm::Value *NumElements, |
1252 | 2.09k | llvm::Value *AllocSizeWithoutCookie) { |
1253 | 2.09k | ApplyDebugLocation DL(CGF, E); |
1254 | 2.09k | if (E->isArray()) |
1255 | 552 | CGF.EmitNewArrayInitializer(E, ElementType, ElementTy, NewPtr, NumElements, |
1256 | 552 | AllocSizeWithoutCookie); |
1257 | 1.54k | else if (const Expr *1.54k Init1.54k = E->getInitializer()) |
1258 | 1.51k | StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr); |
1259 | 2.09k | } |
1260 | | |
1261 | | /// Emit a call to an operator new or operator delete function, as implicitly |
1262 | | /// created by new-expressions and delete-expressions. |
1263 | | static RValue EmitNewDeleteCall(CodeGenFunction &CGF, |
1264 | | const FunctionDecl *CalleeDecl, |
1265 | | const FunctionProtoType *CalleeType, |
1266 | 6.37k | const CallArgList &Args) { |
1267 | 6.37k | llvm::Instruction *CallOrInvoke; |
1268 | 6.37k | llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl); |
1269 | 6.37k | CGCallee Callee = CGCallee::forDirect(CalleePtr, CalleeDecl); |
1270 | 6.37k | RValue RV = |
1271 | 6.37k | CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall( |
1272 | 6.37k | Args, CalleeType, /*chainCall=*/false), |
1273 | 6.37k | Callee, ReturnValueSlot(), Args, &CallOrInvoke); |
1274 | 6.37k | |
1275 | 6.37k | /// C++1y [expr.new]p10: |
1276 | 6.37k | /// [In a new-expression,] an implementation is allowed to omit a call |
1277 | 6.37k | /// to a replaceable global allocation function. |
1278 | 6.37k | /// |
1279 | 6.37k | /// We model such elidable calls with the 'builtin' attribute. |
1280 | 6.37k | llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr); |
1281 | 6.37k | if (CalleeDecl->isReplaceableGlobalAllocationFunction() && |
1282 | 6.37k | Fn5.94k && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)5.93k ) { |
1283 | 5.93k | // FIXME: Add addAttribute to CallSite. |
1284 | 5.93k | if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke)) |
1285 | 5.65k | CI->addAttribute(llvm::AttributeList::FunctionIndex, |
1286 | 5.65k | llvm::Attribute::Builtin); |
1287 | 282 | else if (llvm::InvokeInst *282 II282 = dyn_cast<llvm::InvokeInst>(CallOrInvoke)) |
1288 | 282 | II->addAttribute(llvm::AttributeList::FunctionIndex, |
1289 | 282 | llvm::Attribute::Builtin); |
1290 | 282 | else |
1291 | 0 | llvm_unreachable("unexpected kind of call instruction"); |
1292 | 5.93k | } |
1293 | 6.37k | |
1294 | 6.37k | return RV; |
1295 | 6.37k | } |
1296 | | |
1297 | | RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, |
1298 | | const Expr *Arg, |
1299 | 4 | bool IsDelete) { |
1300 | 4 | CallArgList Args; |
1301 | 4 | const Stmt *ArgS = Arg; |
1302 | 4 | EmitCallArgs(Args, *Type->param_type_begin(), llvm::makeArrayRef(ArgS)); |
1303 | 4 | // Find the allocation or deallocation function that we're calling. |
1304 | 4 | ASTContext &Ctx = getContext(); |
1305 | 4 | DeclarationName Name = Ctx.DeclarationNames |
1306 | 4 | .getCXXOperatorName(IsDelete ? OO_Delete2 : OO_New2 ); |
1307 | 4 | for (auto *Decl : Ctx.getTranslationUnitDecl()->lookup(Name)) |
1308 | 4 | if (auto *4 FD4 = dyn_cast<FunctionDecl>(Decl)) |
1309 | 4 | if (4 Ctx.hasSameType(FD->getType(), QualType(Type, 0))4 ) |
1310 | 4 | return EmitNewDeleteCall(*this, cast<FunctionDecl>(Decl), Type, Args); |
1311 | 0 | llvm_unreachable0 ("predeclared global operator new/delete is missing"); |
1312 | 0 | } |
1313 | | |
1314 | | static std::pair<bool, bool> |
1315 | 4.33k | shouldPassSizeAndAlignToUsualDelete(const FunctionProtoType *FPT) { |
1316 | 4.33k | auto AI = FPT->param_type_begin(), AE = FPT->param_type_end(); |
1317 | 4.33k | |
1318 | 4.33k | // The first argument is always a void*. |
1319 | 4.33k | ++AI; |
1320 | 4.33k | |
1321 | 4.33k | // Figure out what other parameters we should be implicitly passing. |
1322 | 4.33k | bool PassSize = false; |
1323 | 4.33k | bool PassAlignment = false; |
1324 | 4.33k | |
1325 | 4.33k | if (AI != AE && 4.33k (*AI)->isIntegerType()84 ) { |
1326 | 60 | PassSize = true; |
1327 | 60 | ++AI; |
1328 | 60 | } |
1329 | 4.33k | |
1330 | 4.33k | if (AI != AE && 4.33k (*AI)->isAlignValT()30 ) { |
1331 | 30 | PassAlignment = true; |
1332 | 30 | ++AI; |
1333 | 30 | } |
1334 | 4.33k | |
1335 | 4.33k | assert(AI == AE && "unexpected usual deallocation function parameter"); |
1336 | 4.33k | return {PassSize, PassAlignment}; |
1337 | 4.33k | } |
1338 | | |
1339 | | namespace { |
1340 | | /// A cleanup to call the given 'operator delete' function upon abnormal |
1341 | | /// exit from a new expression. Templated on a traits type that deals with |
1342 | | /// ensuring that the arguments dominate the cleanup if necessary. |
1343 | | template<typename Traits> |
1344 | | class CallDeleteDuringNew final : public EHScopeStack::Cleanup { |
1345 | | /// Type used to hold llvm::Value*s. |
1346 | | typedef typename Traits::ValueTy ValueTy; |
1347 | | /// Type used to hold RValues. |
1348 | | typedef typename Traits::RValueTy RValueTy; |
1349 | | struct PlacementArg { |
1350 | | RValueTy ArgValue; |
1351 | | QualType ArgType; |
1352 | | }; |
1353 | | |
1354 | | unsigned NumPlacementArgs : 31; |
1355 | | unsigned PassAlignmentToPlacementDelete : 1; |
1356 | | const FunctionDecl *OperatorDelete; |
1357 | | ValueTy Ptr; |
1358 | | ValueTy AllocSize; |
1359 | | CharUnits AllocAlign; |
1360 | | |
1361 | 68 | PlacementArg *getPlacementArgs() { |
1362 | 68 | return reinterpret_cast<PlacementArg *>(this + 1); |
1363 | 68 | } CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::DirectCleanupTraits>::getPlacementArgs() Line | Count | Source | 1361 | 60 | PlacementArg *getPlacementArgs() { | 1362 | 60 | return reinterpret_cast<PlacementArg *>(this + 1); | 1363 | 60 | } |
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::ConditionalCleanupTraits>::getPlacementArgs() Line | Count | Source | 1361 | 8 | PlacementArg *getPlacementArgs() { | 1362 | 8 | return reinterpret_cast<PlacementArg *>(this + 1); | 1363 | 8 | } |
|
1364 | | |
1365 | | public: |
1366 | 1.47k | static size_t getExtraSize(size_t NumPlacementArgs) { |
1367 | 1.47k | return NumPlacementArgs * sizeof(PlacementArg); |
1368 | 1.47k | } CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::ConditionalCleanupTraits>::getExtraSize(unsigned long) Line | Count | Source | 1366 | 16 | static size_t getExtraSize(size_t NumPlacementArgs) { | 1367 | 16 | return NumPlacementArgs * sizeof(PlacementArg); | 1368 | 16 | } |
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::DirectCleanupTraits>::getExtraSize(unsigned long) Line | Count | Source | 1366 | 1.45k | static size_t getExtraSize(size_t NumPlacementArgs) { | 1367 | 1.45k | return NumPlacementArgs * sizeof(PlacementArg); | 1368 | 1.45k | } |
|
1369 | | |
1370 | | CallDeleteDuringNew(size_t NumPlacementArgs, |
1371 | | const FunctionDecl *OperatorDelete, ValueTy Ptr, |
1372 | | ValueTy AllocSize, bool PassAlignmentToPlacementDelete, |
1373 | | CharUnits AllocAlign) |
1374 | | : NumPlacementArgs(NumPlacementArgs), |
1375 | | PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete), |
1376 | | OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize), |
1377 | 1.47k | AllocAlign(AllocAlign) {} CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::ConditionalCleanupTraits>::CallDeleteDuringNew(unsigned long, clang::FunctionDecl const*, clang::CodeGen::DominatingValue<clang::CodeGen::RValue>::saved_type, clang::CodeGen::DominatingValue<clang::CodeGen::RValue>::saved_type, bool, clang::CharUnits) Line | Count | Source | 1377 | 16 | AllocAlign(AllocAlign) {} |
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::DirectCleanupTraits>::CallDeleteDuringNew(unsigned long, clang::FunctionDecl const*, llvm::Value*, llvm::Value*, bool, clang::CharUnits) Line | Count | Source | 1377 | 1.45k | AllocAlign(AllocAlign) {} |
|
1378 | | |
1379 | 34 | void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) { |
1380 | 34 | assert(I < NumPlacementArgs && "index out of range"); |
1381 | 34 | getPlacementArgs()[I] = {Arg, Type}; |
1382 | 34 | } CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::DirectCleanupTraits>::setPlacementArg(unsigned int, clang::CodeGen::RValue, clang::QualType) Line | Count | Source | 1379 | 30 | void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) { | 1380 | 30 | assert(I < NumPlacementArgs && "index out of range"); | 1381 | 30 | getPlacementArgs()[I] = {Arg, Type}; | 1382 | 30 | } |
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::ConditionalCleanupTraits>::setPlacementArg(unsigned int, clang::CodeGen::DominatingValue<clang::CodeGen::RValue>::saved_type, clang::QualType) Line | Count | Source | 1379 | 4 | void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) { | 1380 | 4 | assert(I < NumPlacementArgs && "index out of range"); | 1381 | 4 | getPlacementArgs()[I] = {Arg, Type}; | 1382 | 4 | } |
|
1383 | | |
1384 | 1.01k | void Emit(CodeGenFunction &CGF, Flags flags) override { |
1385 | 1.01k | const FunctionProtoType *FPT = |
1386 | 1.01k | OperatorDelete->getType()->getAs<FunctionProtoType>(); |
1387 | 1.01k | CallArgList DeleteArgs; |
1388 | 1.01k | |
1389 | 1.01k | // The first argument is always a void*. |
1390 | 1.01k | DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0)); |
1391 | 1.01k | |
1392 | 1.01k | // Figure out what other parameters we should be implicitly passing. |
1393 | 1.01k | bool PassSize = false; |
1394 | 1.01k | bool PassAlignment = false; |
1395 | 1.01k | if (NumPlacementArgs1.01k ) { |
1396 | 30 | // A placement deallocation function is implicitly passed an alignment |
1397 | 30 | // if the placement allocation function was, but is never passed a size. |
1398 | 30 | PassAlignment = PassAlignmentToPlacementDelete; |
1399 | 1.01k | } else { |
1400 | 982 | // For a non-placement new-expression, 'operator delete' can take a |
1401 | 982 | // size and/or an alignment if it has the right parameters. |
1402 | 982 | std::tie(PassSize, PassAlignment) = |
1403 | 982 | shouldPassSizeAndAlignToUsualDelete(FPT); |
1404 | 982 | } |
1405 | 1.01k | |
1406 | 1.01k | // The second argument can be a std::size_t (for non-placement delete). |
1407 | 1.01k | if (PassSize) |
1408 | 6 | DeleteArgs.add(Traits::get(CGF, AllocSize), |
1409 | 6 | CGF.getContext().getSizeType()); |
1410 | 1.01k | |
1411 | 1.01k | // The next (second or third) argument can be a std::align_val_t, which |
1412 | 1.01k | // is an enum whose underlying type is std::size_t. |
1413 | 1.01k | // FIXME: Use the right type as the parameter type. Note that in a call |
1414 | 1.01k | // to operator delete(size_t, ...), we may not have it available. |
1415 | 1.01k | if (PassAlignment) |
1416 | 27 | DeleteArgs.add(RValue::get(llvm::ConstantInt::get( |
1417 | 27 | CGF.SizeTy, AllocAlign.getQuantity())), |
1418 | 27 | CGF.getContext().getSizeType()); |
1419 | 1.01k | |
1420 | 1.01k | // Pass the rest of the arguments, which must match exactly. |
1421 | 1.04k | for (unsigned I = 0; I != NumPlacementArgs1.04k ; ++I34 ) { |
1422 | 34 | auto Arg = getPlacementArgs()[I]; |
1423 | 34 | DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType); |
1424 | 34 | } |
1425 | 1.01k | |
1426 | 1.01k | // Call 'operator delete'. |
1427 | 1.01k | EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs); |
1428 | 1.01k | } CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::DirectCleanupTraits>::Emit(clang::CodeGen::CodeGenFunction&, clang::CodeGen::EHScopeStack::Cleanup::Flags) Line | Count | Source | 1384 | 1.00k | void Emit(CodeGenFunction &CGF, Flags flags) override { | 1385 | 1.00k | const FunctionProtoType *FPT = | 1386 | 1.00k | OperatorDelete->getType()->getAs<FunctionProtoType>(); | 1387 | 1.00k | CallArgList DeleteArgs; | 1388 | 1.00k | | 1389 | 1.00k | // The first argument is always a void*. | 1390 | 1.00k | DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0)); | 1391 | 1.00k | | 1392 | 1.00k | // Figure out what other parameters we should be implicitly passing. | 1393 | 1.00k | bool PassSize = false; | 1394 | 1.00k | bool PassAlignment = false; | 1395 | 1.00k | if (NumPlacementArgs1.00k ) { | 1396 | 28 | // A placement deallocation function is implicitly passed an alignment | 1397 | 28 | // if the placement allocation function was, but is never passed a size. | 1398 | 28 | PassAlignment = PassAlignmentToPlacementDelete; | 1399 | 1.00k | } else { | 1400 | 978 | // For a non-placement new-expression, 'operator delete' can take a | 1401 | 978 | // size and/or an alignment if it has the right parameters. | 1402 | 978 | std::tie(PassSize, PassAlignment) = | 1403 | 978 | shouldPassSizeAndAlignToUsualDelete(FPT); | 1404 | 978 | } | 1405 | 1.00k | | 1406 | 1.00k | // The second argument can be a std::size_t (for non-placement delete). | 1407 | 1.00k | if (PassSize) | 1408 | 6 | DeleteArgs.add(Traits::get(CGF, AllocSize), | 1409 | 6 | CGF.getContext().getSizeType()); | 1410 | 1.00k | | 1411 | 1.00k | // The next (second or third) argument can be a std::align_val_t, which | 1412 | 1.00k | // is an enum whose underlying type is std::size_t. | 1413 | 1.00k | // FIXME: Use the right type as the parameter type. Note that in a call | 1414 | 1.00k | // to operator delete(size_t, ...), we may not have it available. | 1415 | 1.00k | if (PassAlignment) | 1416 | 27 | DeleteArgs.add(RValue::get(llvm::ConstantInt::get( | 1417 | 27 | CGF.SizeTy, AllocAlign.getQuantity())), | 1418 | 27 | CGF.getContext().getSizeType()); | 1419 | 1.00k | | 1420 | 1.00k | // Pass the rest of the arguments, which must match exactly. | 1421 | 1.03k | for (unsigned I = 0; I != NumPlacementArgs1.03k ; ++I30 ) { | 1422 | 30 | auto Arg = getPlacementArgs()[I]; | 1423 | 30 | DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType); | 1424 | 30 | } | 1425 | 1.00k | | 1426 | 1.00k | // Call 'operator delete'. | 1427 | 1.00k | EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs); | 1428 | 1.00k | } |
CGExprCXX.cpp:(anonymous namespace)::CallDeleteDuringNew<EnterNewDeleteCleanup(clang::CodeGen::CodeGenFunction&, clang::CXXNewExpr const*, clang::CodeGen::Address, llvm::Value*, clang::CharUnits, clang::CodeGen::CallArgList const&)::ConditionalCleanupTraits>::Emit(clang::CodeGen::CodeGenFunction&, clang::CodeGen::EHScopeStack::Cleanup::Flags) Line | Count | Source | 1384 | 6 | void Emit(CodeGenFunction &CGF, Flags flags) override { | 1385 | 6 | const FunctionProtoType *FPT = | 1386 | 6 | OperatorDelete->getType()->getAs<FunctionProtoType>(); | 1387 | 6 | CallArgList DeleteArgs; | 1388 | 6 | | 1389 | 6 | // The first argument is always a void*. | 1390 | 6 | DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0)); | 1391 | 6 | | 1392 | 6 | // Figure out what other parameters we should be implicitly passing. | 1393 | 6 | bool PassSize = false; | 1394 | 6 | bool PassAlignment = false; | 1395 | 6 | if (NumPlacementArgs6 ) { | 1396 | 2 | // A placement deallocation function is implicitly passed an alignment | 1397 | 2 | // if the placement allocation function was, but is never passed a size. | 1398 | 2 | PassAlignment = PassAlignmentToPlacementDelete; | 1399 | 6 | } else { | 1400 | 4 | // For a non-placement new-expression, 'operator delete' can take a | 1401 | 4 | // size and/or an alignment if it has the right parameters. | 1402 | 4 | std::tie(PassSize, PassAlignment) = | 1403 | 4 | shouldPassSizeAndAlignToUsualDelete(FPT); | 1404 | 4 | } | 1405 | 6 | | 1406 | 6 | // The second argument can be a std::size_t (for non-placement delete). | 1407 | 6 | if (PassSize) | 1408 | 0 | DeleteArgs.add(Traits::get(CGF, AllocSize), | 1409 | 0 | CGF.getContext().getSizeType()); | 1410 | 6 | | 1411 | 6 | // The next (second or third) argument can be a std::align_val_t, which | 1412 | 6 | // is an enum whose underlying type is std::size_t. | 1413 | 6 | // FIXME: Use the right type as the parameter type. Note that in a call | 1414 | 6 | // to operator delete(size_t, ...), we may not have it available. | 1415 | 6 | if (PassAlignment) | 1416 | 0 | DeleteArgs.add(RValue::get(llvm::ConstantInt::get( | 1417 | 0 | CGF.SizeTy, AllocAlign.getQuantity())), | 1418 | 0 | CGF.getContext().getSizeType()); | 1419 | 6 | | 1420 | 6 | // Pass the rest of the arguments, which must match exactly. | 1421 | 10 | for (unsigned I = 0; I != NumPlacementArgs10 ; ++I4 ) { | 1422 | 4 | auto Arg = getPlacementArgs()[I]; | 1423 | 4 | DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType); | 1424 | 4 | } | 1425 | 6 | | 1426 | 6 | // Call 'operator delete'. | 1427 | 6 | EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs); | 1428 | 6 | } |
|
1429 | | }; |
1430 | | } |
1431 | | |
1432 | | /// Enter a cleanup to call 'operator delete' if the initializer in a |
1433 | | /// new-expression throws. |
1434 | | static void EnterNewDeleteCleanup(CodeGenFunction &CGF, |
1435 | | const CXXNewExpr *E, |
1436 | | Address NewPtr, |
1437 | | llvm::Value *AllocSize, |
1438 | | CharUnits AllocAlign, |
1439 | 1.47k | const CallArgList &NewArgs) { |
1440 | 1.47k | unsigned NumNonPlacementArgs = E->passAlignment() ? 230 : 11.44k ; |
1441 | 1.47k | |
1442 | 1.47k | // If we're not inside a conditional branch, then the cleanup will |
1443 | 1.47k | // dominate and we can do the easier (and more efficient) thing. |
1444 | 1.47k | if (!CGF.isInConditionalBranch()1.47k ) { |
1445 | 1.45k | struct DirectCleanupTraits { |
1446 | 1.45k | typedef llvm::Value *ValueTy; |
1447 | 1.45k | typedef RValue RValueTy; |
1448 | 1.01k | static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); } |
1449 | 30 | static RValue get(CodeGenFunction &, RValueTy V) { return V; } |
1450 | 1.45k | }; |
1451 | 1.45k | |
1452 | 1.45k | typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup; |
1453 | 1.45k | |
1454 | 1.45k | DirectCleanup *Cleanup = CGF.EHStack |
1455 | 1.45k | .pushCleanupWithExtra<DirectCleanup>(EHCleanup, |
1456 | 1.45k | E->getNumPlacementArgs(), |
1457 | 1.45k | E->getOperatorDelete(), |
1458 | 1.45k | NewPtr.getPointer(), |
1459 | 1.45k | AllocSize, |
1460 | 1.45k | E->passAlignment(), |
1461 | 1.45k | AllocAlign); |
1462 | 1.48k | for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N1.48k ; ++I30 ) { |
1463 | 30 | auto &Arg = NewArgs[I + NumNonPlacementArgs]; |
1464 | 30 | Cleanup->setPlacementArg(I, Arg.RV, Arg.Ty); |
1465 | 30 | } |
1466 | 1.45k | |
1467 | 1.45k | return; |
1468 | 1.45k | } |
1469 | 16 | |
1470 | 16 | // Otherwise, we need to save all this stuff. |
1471 | 16 | DominatingValue<RValue>::saved_type SavedNewPtr = |
1472 | 16 | DominatingValue<RValue>::save(CGF, RValue::get(NewPtr.getPointer())); |
1473 | 16 | DominatingValue<RValue>::saved_type SavedAllocSize = |
1474 | 16 | DominatingValue<RValue>::save(CGF, RValue::get(AllocSize)); |
1475 | 16 | |
1476 | 16 | struct ConditionalCleanupTraits { |
1477 | 16 | typedef DominatingValue<RValue>::saved_type ValueTy; |
1478 | 16 | typedef DominatingValue<RValue>::saved_type RValueTy; |
1479 | 10 | static RValue get(CodeGenFunction &CGF, ValueTy V) { |
1480 | 10 | return V.restore(CGF); |
1481 | 10 | } |
1482 | 16 | }; |
1483 | 16 | typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup; |
1484 | 16 | |
1485 | 16 | ConditionalCleanup *Cleanup = CGF.EHStack |
1486 | 16 | .pushCleanupWithExtra<ConditionalCleanup>(EHCleanup, |
1487 | 16 | E->getNumPlacementArgs(), |
1488 | 16 | E->getOperatorDelete(), |
1489 | 16 | SavedNewPtr, |
1490 | 16 | SavedAllocSize, |
1491 | 16 | E->passAlignment(), |
1492 | 16 | AllocAlign); |
1493 | 20 | for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N20 ; ++I4 ) { |
1494 | 4 | auto &Arg = NewArgs[I + NumNonPlacementArgs]; |
1495 | 4 | Cleanup->setPlacementArg(I, DominatingValue<RValue>::save(CGF, Arg.RV), |
1496 | 4 | Arg.Ty); |
1497 | 4 | } |
1498 | 1.47k | |
1499 | 1.47k | CGF.initFullExprCleanup(); |
1500 | 1.47k | } |
1501 | | |
1502 | 2.09k | llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) { |
1503 | 2.09k | // The element type being allocated. |
1504 | 2.09k | QualType allocType = getContext().getBaseElementType(E->getAllocatedType()); |
1505 | 2.09k | |
1506 | 2.09k | // 1. Build a call to the allocation function. |
1507 | 2.09k | FunctionDecl *allocator = E->getOperatorNew(); |
1508 | 2.09k | |
1509 | 2.09k | // If there is a brace-initializer, cannot allocate fewer elements than inits. |
1510 | 2.09k | unsigned minElements = 0; |
1511 | 2.09k | if (E->isArray() && 2.09k E->hasInitializer()552 ) { |
1512 | 156 | const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()); |
1513 | 156 | if (ILE && 156 ILE->isStringLiteralInit()21 ) |
1514 | 6 | minElements = |
1515 | 6 | cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe()) |
1516 | 6 | ->getSize().getZExtValue(); |
1517 | 150 | else if (150 ILE150 ) |
1518 | 15 | minElements = ILE->getNumInits(); |
1519 | 156 | } |
1520 | 2.09k | |
1521 | 2.09k | llvm::Value *numElements = nullptr; |
1522 | 2.09k | llvm::Value *allocSizeWithoutCookie = nullptr; |
1523 | 2.09k | llvm::Value *allocSize = |
1524 | 2.09k | EmitCXXNewAllocSize(*this, E, minElements, numElements, |
1525 | 2.09k | allocSizeWithoutCookie); |
1526 | 2.09k | CharUnits allocAlign = getContext().getTypeAlignInChars(allocType); |
1527 | 2.09k | |
1528 | 2.09k | // Emit the allocation call. If the allocator is a global placement |
1529 | 2.09k | // operator, just "inline" it directly. |
1530 | 2.09k | Address allocation = Address::invalid(); |
1531 | 2.09k | CallArgList allocatorArgs; |
1532 | 2.09k | if (allocator->isReservedGlobalPlacementOperator()2.09k ) { |
1533 | 78 | assert(E->getNumPlacementArgs() == 1); |
1534 | 78 | const Expr *arg = *E->placement_arguments().begin(); |
1535 | 78 | |
1536 | 78 | LValueBaseInfo BaseInfo; |
1537 | 78 | allocation = EmitPointerWithAlignment(arg, &BaseInfo); |
1538 | 78 | |
1539 | 78 | // The pointer expression will, in many cases, be an opaque void*. |
1540 | 78 | // In these cases, discard the computed alignment and use the |
1541 | 78 | // formal alignment of the allocated type. |
1542 | 78 | if (BaseInfo.getAlignmentSource() != AlignmentSource::Decl) |
1543 | 75 | allocation = Address(allocation.getPointer(), allocAlign); |
1544 | 78 | |
1545 | 78 | // Set up allocatorArgs for the call to operator delete if it's not |
1546 | 78 | // the reserved global operator. |
1547 | 78 | if (E->getOperatorDelete() && |
1548 | 78 | !E->getOperatorDelete()->isReservedGlobalPlacementOperator()2 ) { |
1549 | 2 | allocatorArgs.add(RValue::get(allocSize), getContext().getSizeType()); |
1550 | 2 | allocatorArgs.add(RValue::get(allocation.getPointer()), arg->getType()); |
1551 | 2 | } |
1552 | 78 | |
1553 | 2.09k | } else { |
1554 | 2.01k | const FunctionProtoType *allocatorType = |
1555 | 2.01k | allocator->getType()->castAs<FunctionProtoType>(); |
1556 | 2.01k | unsigned ParamsToSkip = 0; |
1557 | 2.01k | |
1558 | 2.01k | // The allocation size is the first argument. |
1559 | 2.01k | QualType sizeType = getContext().getSizeType(); |
1560 | 2.01k | allocatorArgs.add(RValue::get(allocSize), sizeType); |
1561 | 2.01k | ++ParamsToSkip; |
1562 | 2.01k | |
1563 | 2.01k | if (allocSize != allocSizeWithoutCookie2.01k ) { |
1564 | 69 | CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI. |
1565 | 69 | allocAlign = std::max(allocAlign, cookieAlign); |
1566 | 69 | } |
1567 | 2.01k | |
1568 | 2.01k | // The allocation alignment may be passed as the second argument. |
1569 | 2.01k | if (E->passAlignment()2.01k ) { |
1570 | 33 | QualType AlignValT = sizeType; |
1571 | 33 | if (allocatorType->getNumParams() > 133 ) { |
1572 | 27 | AlignValT = allocatorType->getParamType(1); |
1573 | 27 | assert(getContext().hasSameUnqualifiedType( |
1574 | 27 | AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(), |
1575 | 27 | sizeType) && |
1576 | 27 | "wrong type for alignment parameter"); |
1577 | 27 | ++ParamsToSkip; |
1578 | 33 | } else { |
1579 | 6 | // Corner case, passing alignment to 'operator new(size_t, ...)'. |
1580 | 6 | assert(allocator->isVariadic() && "can't pass alignment to allocator"); |
1581 | 6 | } |
1582 | 33 | allocatorArgs.add( |
1583 | 33 | RValue::get(llvm::ConstantInt::get(SizeTy, allocAlign.getQuantity())), |
1584 | 33 | AlignValT); |
1585 | 33 | } |
1586 | 2.01k | |
1587 | 2.01k | // FIXME: Why do we not pass a CalleeDecl here? |
1588 | 2.01k | EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(), |
1589 | 2.01k | /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip); |
1590 | 2.01k | |
1591 | 2.01k | RValue RV = |
1592 | 2.01k | EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs); |
1593 | 2.01k | |
1594 | 2.01k | // If this was a call to a global replaceable allocation function that does |
1595 | 2.01k | // not take an alignment argument, the allocator is known to produce |
1596 | 2.01k | // storage that's suitably aligned for any object that fits, up to a known |
1597 | 2.01k | // threshold. Otherwise assume it's suitably aligned for the allocated type. |
1598 | 2.01k | CharUnits allocationAlign = allocAlign; |
1599 | 2.01k | if (!E->passAlignment() && |
1600 | 2.01k | allocator->isReplaceableGlobalAllocationFunction()1.98k ) { |
1601 | 1.67k | unsigned AllocatorAlign = llvm::PowerOf2Floor(std::min<uint64_t>( |
1602 | 1.67k | Target.getNewAlign(), getContext().getTypeSize(allocType))); |
1603 | 1.67k | allocationAlign = std::max( |
1604 | 1.67k | allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign)); |
1605 | 1.67k | } |
1606 | 2.01k | |
1607 | 2.01k | allocation = Address(RV.getScalarVal(), allocationAlign); |
1608 | 2.01k | } |
1609 | 2.09k | |
1610 | 2.09k | // Emit a null check on the allocation result if the allocation |
1611 | 2.09k | // function is allowed to return null (because it has a non-throwing |
1612 | 2.09k | // exception spec or is the reserved placement new) and we have an |
1613 | 2.09k | // interesting initializer. |
1614 | 2.09k | bool nullCheck = E->shouldNullCheckAllocation(getContext()) && |
1615 | 10 | (!allocType.isPODType(getContext()) || 10 E->hasInitializer()0 ); |
1616 | 2.09k | |
1617 | 2.09k | llvm::BasicBlock *nullCheckBB = nullptr; |
1618 | 2.09k | llvm::BasicBlock *contBB = nullptr; |
1619 | 2.09k | |
1620 | 2.09k | // The null-check means that the initializer is conditionally |
1621 | 2.09k | // evaluated. |
1622 | 2.09k | ConditionalEvaluation conditional(*this); |
1623 | 2.09k | |
1624 | 2.09k | if (nullCheck2.09k ) { |
1625 | 10 | conditional.begin(*this); |
1626 | 10 | |
1627 | 10 | nullCheckBB = Builder.GetInsertBlock(); |
1628 | 10 | llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull"); |
1629 | 10 | contBB = createBasicBlock("new.cont"); |
1630 | 10 | |
1631 | 10 | llvm::Value *isNull = |
1632 | 10 | Builder.CreateIsNull(allocation.getPointer(), "new.isnull"); |
1633 | 10 | Builder.CreateCondBr(isNull, contBB, notNullBB); |
1634 | 10 | EmitBlock(notNullBB); |
1635 | 10 | } |
1636 | 2.09k | |
1637 | 2.09k | // If there's an operator delete, enter a cleanup to call it if an |
1638 | 2.09k | // exception is thrown. |
1639 | 2.09k | EHScopeStack::stable_iterator operatorDeleteCleanup; |
1640 | 2.09k | llvm::Instruction *cleanupDominator = nullptr; |
1641 | 2.09k | if (E->getOperatorDelete() && |
1642 | 2.09k | !E->getOperatorDelete()->isReservedGlobalPlacementOperator()1.47k ) { |
1643 | 1.47k | EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign, |
1644 | 1.47k | allocatorArgs); |
1645 | 1.47k | operatorDeleteCleanup = EHStack.stable_begin(); |
1646 | 1.47k | cleanupDominator = Builder.CreateUnreachable(); |
1647 | 1.47k | } |
1648 | 2.09k | |
1649 | 2.09k | assert((allocSize == allocSizeWithoutCookie) == |
1650 | 2.09k | CalculateCookiePadding(*this, E).isZero()); |
1651 | 2.09k | if (allocSize != allocSizeWithoutCookie2.09k ) { |
1652 | 69 | assert(E->isArray()); |
1653 | 69 | allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation, |
1654 | 69 | numElements, |
1655 | 69 | E, allocType); |
1656 | 69 | } |
1657 | 2.09k | |
1658 | 2.09k | llvm::Type *elementTy = ConvertTypeForMem(allocType); |
1659 | 2.09k | Address result = Builder.CreateElementBitCast(allocation, elementTy); |
1660 | 2.09k | |
1661 | 2.09k | // Passing pointer through invariant.group.barrier to avoid propagation of |
1662 | 2.09k | // vptrs information which may be included in previous type. |
1663 | 2.09k | // To not break LTO with different optimizations levels, we do it regardless |
1664 | 2.09k | // of optimization level. |
1665 | 2.09k | if (CGM.getCodeGenOpts().StrictVTablePointers && |
1666 | 16 | allocator->isReservedGlobalPlacementOperator()) |
1667 | 3 | result = Address(Builder.CreateInvariantGroupBarrier(result.getPointer()), |
1668 | 3 | result.getAlignment()); |
1669 | 2.09k | |
1670 | 2.09k | EmitNewInitializer(*this, E, allocType, elementTy, result, numElements, |
1671 | 2.09k | allocSizeWithoutCookie); |
1672 | 2.09k | if (E->isArray()2.09k ) { |
1673 | 552 | // NewPtr is a pointer to the base element type. If we're |
1674 | 552 | // allocating an array of arrays, we'll need to cast back to the |
1675 | 552 | // array pointer type. |
1676 | 552 | llvm::Type *resultType = ConvertTypeForMem(E->getType()); |
1677 | 552 | if (result.getType() != resultType) |
1678 | 20 | result = Builder.CreateBitCast(result, resultType); |
1679 | 552 | } |
1680 | 2.09k | |
1681 | 2.09k | // Deactivate the 'operator delete' cleanup if we finished |
1682 | 2.09k | // initialization. |
1683 | 2.09k | if (operatorDeleteCleanup.isValid()2.09k ) { |
1684 | 1.47k | DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator); |
1685 | 1.47k | cleanupDominator->eraseFromParent(); |
1686 | 1.47k | } |
1687 | 2.09k | |
1688 | 2.09k | llvm::Value *resultPtr = result.getPointer(); |
1689 | 2.09k | if (nullCheck2.09k ) { |
1690 | 10 | conditional.end(*this); |
1691 | 10 | |
1692 | 10 | llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); |
1693 | 10 | EmitBlock(contBB); |
1694 | 10 | |
1695 | 10 | llvm::PHINode *PHI = Builder.CreatePHI(resultPtr->getType(), 2); |
1696 | 10 | PHI->addIncoming(resultPtr, notNullBB); |
1697 | 10 | PHI->addIncoming(llvm::Constant::getNullValue(resultPtr->getType()), |
1698 | 10 | nullCheckBB); |
1699 | 10 | |
1700 | 10 | resultPtr = PHI; |
1701 | 10 | } |
1702 | 2.09k | |
1703 | 2.09k | return resultPtr; |
1704 | 2.09k | } |
1705 | | |
1706 | | void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD, |
1707 | | llvm::Value *Ptr, QualType DeleteTy, |
1708 | | llvm::Value *NumElements, |
1709 | 3.34k | CharUnits CookieSize) { |
1710 | 3.34k | assert((!NumElements && CookieSize.isZero()) || |
1711 | 3.34k | DeleteFD->getOverloadedOperator() == OO_Array_Delete); |
1712 | 3.34k | |
1713 | 3.34k | const FunctionProtoType *DeleteFTy = |
1714 | 3.34k | DeleteFD->getType()->getAs<FunctionProtoType>(); |
1715 | 3.34k | |
1716 | 3.34k | CallArgList DeleteArgs; |
1717 | 3.34k | |
1718 | 3.34k | std::pair<bool, bool> PassSizeAndAlign = |
1719 | 3.34k | shouldPassSizeAndAlignToUsualDelete(DeleteFTy); |
1720 | 3.34k | |
1721 | 3.34k | auto ParamTypeIt = DeleteFTy->param_type_begin(); |
1722 | 3.34k | |
1723 | 3.34k | // Pass the pointer itself. |
1724 | 3.34k | QualType ArgTy = *ParamTypeIt++; |
1725 | 3.34k | llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy)); |
1726 | 3.34k | DeleteArgs.add(RValue::get(DeletePtr), ArgTy); |
1727 | 3.34k | |
1728 | 3.34k | // Pass the size if the delete function has a size_t parameter. |
1729 | 3.34k | if (PassSizeAndAlign.first3.34k ) { |
1730 | 54 | QualType SizeType = *ParamTypeIt++; |
1731 | 54 | CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy); |
1732 | 54 | llvm::Value *Size = llvm::ConstantInt::get(ConvertType(SizeType), |
1733 | 54 | DeleteTypeSize.getQuantity()); |
1734 | 54 | |
1735 | 54 | // For array new, multiply by the number of elements. |
1736 | 54 | if (NumElements) |
1737 | 23 | Size = Builder.CreateMul(Size, NumElements); |
1738 | 54 | |
1739 | 54 | // If there is a cookie, add the cookie size. |
1740 | 54 | if (!CookieSize.isZero()) |
1741 | 23 | Size = Builder.CreateAdd( |
1742 | 23 | Size, llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity())); |
1743 | 54 | |
1744 | 54 | DeleteArgs.add(RValue::get(Size), SizeType); |
1745 | 54 | } |
1746 | 3.34k | |
1747 | 3.34k | // Pass the alignment if the delete function has an align_val_t parameter. |
1748 | 3.34k | if (PassSizeAndAlign.second3.34k ) { |
1749 | 15 | QualType AlignValType = *ParamTypeIt++; |
1750 | 15 | CharUnits DeleteTypeAlign = getContext().toCharUnitsFromBits( |
1751 | 15 | getContext().getTypeAlignIfKnown(DeleteTy)); |
1752 | 15 | llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType), |
1753 | 15 | DeleteTypeAlign.getQuantity()); |
1754 | 15 | DeleteArgs.add(RValue::get(Align), AlignValType); |
1755 | 15 | } |
1756 | 3.34k | |
1757 | 3.34k | assert(ParamTypeIt == DeleteFTy->param_type_end() && |
1758 | 3.34k | "unknown parameter to usual delete function"); |
1759 | 3.34k | |
1760 | 3.34k | // Emit the call to delete. |
1761 | 3.34k | EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs); |
1762 | 3.34k | } |
1763 | | |
1764 | | namespace { |
1765 | | /// Calls the given 'operator delete' on a single object. |
1766 | | struct CallObjectDelete final : EHScopeStack::Cleanup { |
1767 | | llvm::Value *Ptr; |
1768 | | const FunctionDecl *OperatorDelete; |
1769 | | QualType ElementType; |
1770 | | |
1771 | | CallObjectDelete(llvm::Value *Ptr, |
1772 | | const FunctionDecl *OperatorDelete, |
1773 | | QualType ElementType) |
1774 | 387 | : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {} |
1775 | | |
1776 | 621 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
1777 | 621 | CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType); |
1778 | 621 | } |
1779 | | }; |
1780 | | } |
1781 | | |
1782 | | void |
1783 | | CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete, |
1784 | | llvm::Value *CompletePtr, |
1785 | 5 | QualType ElementType) { |
1786 | 5 | EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr, |
1787 | 5 | OperatorDelete, ElementType); |
1788 | 5 | } |
1789 | | |
1790 | | /// Emit the code for deleting a single object. |
1791 | | static void EmitObjectDelete(CodeGenFunction &CGF, |
1792 | | const CXXDeleteExpr *DE, |
1793 | | Address Ptr, |
1794 | 726 | QualType ElementType) { |
1795 | 726 | // C++11 [expr.delete]p3: |
1796 | 726 | // If the static type of the object to be deleted is different from its |
1797 | 726 | // dynamic type, the static type shall be a base class of the dynamic type |
1798 | 726 | // of the object to be deleted and the static type shall have a virtual |
1799 | 726 | // destructor or the behavior is undefined. |
1800 | 726 | CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall, |
1801 | 726 | DE->getExprLoc(), Ptr.getPointer(), |
1802 | 726 | ElementType); |
1803 | 726 | |
1804 | 726 | // Find the destructor for the type, if applicable. If the |
1805 | 726 | // destructor is virtual, we'll just emit the vcall and return. |
1806 | 726 | const CXXDestructorDecl *Dtor = nullptr; |
1807 | 726 | if (const RecordType *RT726 = ElementType->getAs<RecordType>()) { |
1808 | 677 | CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); |
1809 | 677 | if (RD->hasDefinition() && 677 !RD->hasTrivialDestructor()676 ) { |
1810 | 599 | Dtor = RD->getDestructor(); |
1811 | 599 | |
1812 | 599 | if (Dtor->isVirtual()599 ) { |
1813 | 344 | CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType, |
1814 | 344 | Dtor); |
1815 | 344 | return; |
1816 | 344 | } |
1817 | 382 | } |
1818 | 677 | } |
1819 | 382 | |
1820 | 382 | // Make sure that we call delete even if the dtor throws. |
1821 | 382 | // This doesn't have to a conditional cleanup because we're going |
1822 | 382 | // to pop it off in a second. |
1823 | 382 | const FunctionDecl *OperatorDelete = DE->getOperatorDelete(); |
1824 | 382 | CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, |
1825 | 382 | Ptr.getPointer(), |
1826 | 382 | OperatorDelete, ElementType); |
1827 | 382 | |
1828 | 382 | if (Dtor) |
1829 | 255 | CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, |
1830 | 255 | /*ForVirtualBase=*/false, |
1831 | 255 | /*Delegating=*/false, |
1832 | 255 | Ptr); |
1833 | 127 | else if (auto 127 Lifetime127 = ElementType.getObjCLifetime()) { |
1834 | 4 | switch (Lifetime) { |
1835 | 0 | case Qualifiers::OCL_None: |
1836 | 0 | case Qualifiers::OCL_ExplicitNone: |
1837 | 0 | case Qualifiers::OCL_Autoreleasing: |
1838 | 0 | break; |
1839 | 0 |
|
1840 | 2 | case Qualifiers::OCL_Strong: |
1841 | 2 | CGF.EmitARCDestroyStrong(Ptr, ARCPreciseLifetime); |
1842 | 2 | break; |
1843 | 0 | |
1844 | 2 | case Qualifiers::OCL_Weak: |
1845 | 2 | CGF.EmitARCDestroyWeak(Ptr); |
1846 | 2 | break; |
1847 | 382 | } |
1848 | 382 | } |
1849 | 382 | |
1850 | 382 | CGF.PopCleanupBlock(); |
1851 | 382 | } |
1852 | | |
1853 | | namespace { |
1854 | | /// Calls the given 'operator delete' on an array of objects. |
1855 | | struct CallArrayDelete final : EHScopeStack::Cleanup { |
1856 | | llvm::Value *Ptr; |
1857 | | const FunctionDecl *OperatorDelete; |
1858 | | llvm::Value *NumElements; |
1859 | | QualType ElementType; |
1860 | | CharUnits CookieSize; |
1861 | | |
1862 | | CallArrayDelete(llvm::Value *Ptr, |
1863 | | const FunctionDecl *OperatorDelete, |
1864 | | llvm::Value *NumElements, |
1865 | | QualType ElementType, |
1866 | | CharUnits CookieSize) |
1867 | | : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements), |
1868 | 699 | ElementType(ElementType), CookieSize(CookieSize) {} |
1869 | | |
1870 | 722 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
1871 | 722 | CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements, |
1872 | 722 | CookieSize); |
1873 | 722 | } |
1874 | | }; |
1875 | | } |
1876 | | |
1877 | | /// Emit the code for deleting an array of objects. |
1878 | | static void EmitArrayDelete(CodeGenFunction &CGF, |
1879 | | const CXXDeleteExpr *E, |
1880 | | Address deletedPtr, |
1881 | 699 | QualType elementType) { |
1882 | 699 | llvm::Value *numElements = nullptr; |
1883 | 699 | llvm::Value *allocatedPtr = nullptr; |
1884 | 699 | CharUnits cookieSize; |
1885 | 699 | CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType, |
1886 | 699 | numElements, allocatedPtr, cookieSize); |
1887 | 699 | |
1888 | 699 | assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer"); |
1889 | 699 | |
1890 | 699 | // Make sure that we call delete even if one of the dtors throws. |
1891 | 699 | const FunctionDecl *operatorDelete = E->getOperatorDelete(); |
1892 | 699 | CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup, |
1893 | 699 | allocatedPtr, operatorDelete, |
1894 | 699 | numElements, elementType, |
1895 | 699 | cookieSize); |
1896 | 699 | |
1897 | 699 | // Destroy the elements. |
1898 | 699 | if (QualType::DestructionKind dtorKind699 = elementType.isDestructedType()) { |
1899 | 58 | assert(numElements && "no element count for a type with a destructor!"); |
1900 | 58 | |
1901 | 58 | CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); |
1902 | 58 | CharUnits elementAlign = |
1903 | 58 | deletedPtr.getAlignment().alignmentOfArrayElement(elementSize); |
1904 | 58 | |
1905 | 58 | llvm::Value *arrayBegin = deletedPtr.getPointer(); |
1906 | 58 | llvm::Value *arrayEnd = |
1907 | 58 | CGF.Builder.CreateInBoundsGEP(arrayBegin, numElements, "delete.end"); |
1908 | 58 | |
1909 | 58 | // Note that it is legal to allocate a zero-length array, and we |
1910 | 58 | // can never fold the check away because the length should always |
1911 | 58 | // come from a cookie. |
1912 | 58 | CGF.emitArrayDestroy(arrayBegin, arrayEnd, elementType, elementAlign, |
1913 | 58 | CGF.getDestroyer(dtorKind), |
1914 | 58 | /*checkZeroLength*/ true, |
1915 | 58 | CGF.needsEHCleanup(dtorKind)); |
1916 | 58 | } |
1917 | 699 | |
1918 | 699 | // Pop the cleanup block. |
1919 | 699 | CGF.PopCleanupBlock(); |
1920 | 699 | } |
1921 | | |
1922 | 1.42k | void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) { |
1923 | 1.42k | const Expr *Arg = E->getArgument(); |
1924 | 1.42k | Address Ptr = EmitPointerWithAlignment(Arg); |
1925 | 1.42k | |
1926 | 1.42k | // Null check the pointer. |
1927 | 1.42k | llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull"); |
1928 | 1.42k | llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end"); |
1929 | 1.42k | |
1930 | 1.42k | llvm::Value *IsNull = Builder.CreateIsNull(Ptr.getPointer(), "isnull"); |
1931 | 1.42k | |
1932 | 1.42k | Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull); |
1933 | 1.42k | EmitBlock(DeleteNotNull); |
1934 | 1.42k | |
1935 | 1.42k | // We might be deleting a pointer to array. If so, GEP down to the |
1936 | 1.42k | // first non-array element. |
1937 | 1.42k | // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*) |
1938 | 1.42k | QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType(); |
1939 | 1.42k | if (DeleteTy->isConstantArrayType()1.42k ) { |
1940 | 7 | llvm::Value *Zero = Builder.getInt32(0); |
1941 | 7 | SmallVector<llvm::Value*,8> GEP; |
1942 | 7 | |
1943 | 7 | GEP.push_back(Zero); // point at the outermost array |
1944 | 7 | |
1945 | 7 | // For each layer of array type we're pointing at: |
1946 | 15 | while (const ConstantArrayType *Arr |
1947 | 8 | = getContext().getAsConstantArrayType(DeleteTy)) { |
1948 | 8 | // 1. Unpeel the array type. |
1949 | 8 | DeleteTy = Arr->getElementType(); |
1950 | 8 | |
1951 | 8 | // 2. GEP to the first element of the array. |
1952 | 8 | GEP.push_back(Zero); |
1953 | 8 | } |
1954 | 7 | |
1955 | 7 | Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getPointer(), GEP, "del.first"), |
1956 | 7 | Ptr.getAlignment()); |
1957 | 7 | } |
1958 | 1.42k | |
1959 | 1.42k | assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType()); |
1960 | 1.42k | |
1961 | 1.42k | if (E->isArrayForm()1.42k ) { |
1962 | 699 | EmitArrayDelete(*this, E, Ptr, DeleteTy); |
1963 | 1.42k | } else { |
1964 | 726 | EmitObjectDelete(*this, E, Ptr, DeleteTy); |
1965 | 726 | } |
1966 | 1.42k | |
1967 | 1.42k | EmitBlock(DeleteEnd); |
1968 | 1.42k | } |
1969 | | |
1970 | 49 | static bool isGLValueFromPointerDeref(const Expr *E) { |
1971 | 49 | E = E->IgnoreParens(); |
1972 | 49 | |
1973 | 49 | if (const auto *CE49 = dyn_cast<CastExpr>(E)) { |
1974 | 6 | if (!CE->getSubExpr()->isGLValue()) |
1975 | 0 | return false; |
1976 | 6 | return isGLValueFromPointerDeref(CE->getSubExpr()); |
1977 | 6 | } |
1978 | 43 | |
1979 | 43 | if (const auto *43 OVE43 = dyn_cast<OpaqueValueExpr>(E)) |
1980 | 4 | return isGLValueFromPointerDeref(OVE->getSourceExpr()); |
1981 | 39 | |
1982 | 39 | if (const auto *39 BO39 = dyn_cast<BinaryOperator>(E)) |
1983 | 1 | if (1 BO->getOpcode() == BO_Comma1 ) |
1984 | 1 | return isGLValueFromPointerDeref(BO->getRHS()); |
1985 | 38 | |
1986 | 38 | if (const auto *38 ACO38 = dyn_cast<AbstractConditionalOperator>(E)) |
1987 | 7 | return isGLValueFromPointerDeref(ACO->getTrueExpr()) || |
1988 | 3 | isGLValueFromPointerDeref(ACO->getFalseExpr()); |
1989 | 31 | |
1990 | 31 | // C++11 [expr.sub]p1: |
1991 | 31 | // The expression E1[E2] is identical (by definition) to *((E1)+(E2)) |
1992 | 31 | if (31 isa<ArraySubscriptExpr>(E)31 ) |
1993 | 2 | return true; |
1994 | 29 | |
1995 | 29 | if (const auto *29 UO29 = dyn_cast<UnaryOperator>(E)) |
1996 | 17 | if (17 UO->getOpcode() == UO_Deref17 ) |
1997 | 17 | return true; |
1998 | 12 | |
1999 | 12 | return false; |
2000 | 12 | } |
2001 | | |
2002 | | static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E, |
2003 | 28 | llvm::Type *StdTypeInfoPtrTy) { |
2004 | 28 | // Get the vtable pointer. |
2005 | 28 | Address ThisPtr = CGF.EmitLValue(E).getAddress(); |
2006 | 28 | |
2007 | 28 | // C++ [expr.typeid]p2: |
2008 | 28 | // If the glvalue expression is obtained by applying the unary * operator to |
2009 | 28 | // a pointer and the pointer is a null pointer value, the typeid expression |
2010 | 28 | // throws the std::bad_typeid exception. |
2011 | 28 | // |
2012 | 28 | // However, this paragraph's intent is not clear. We choose a very generous |
2013 | 28 | // interpretation which implores us to consider comma operators, conditional |
2014 | 28 | // operators, parentheses and other such constructs. |
2015 | 28 | QualType SrcRecordTy = E->getType(); |
2016 | 28 | if (CGF.CGM.getCXXABI().shouldTypeidBeNullChecked( |
2017 | 28 | isGLValueFromPointerDeref(E), SrcRecordTy)) { |
2018 | 18 | llvm::BasicBlock *BadTypeidBlock = |
2019 | 18 | CGF.createBasicBlock("typeid.bad_typeid"); |
2020 | 18 | llvm::BasicBlock *EndBlock = CGF.createBasicBlock("typeid.end"); |
2021 | 18 | |
2022 | 18 | llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr.getPointer()); |
2023 | 18 | CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock); |
2024 | 18 | |
2025 | 18 | CGF.EmitBlock(BadTypeidBlock); |
2026 | 18 | CGF.CGM.getCXXABI().EmitBadTypeidCall(CGF); |
2027 | 18 | CGF.EmitBlock(EndBlock); |
2028 | 18 | } |
2029 | 28 | |
2030 | 28 | return CGF.CGM.getCXXABI().EmitTypeid(CGF, SrcRecordTy, ThisPtr, |
2031 | 28 | StdTypeInfoPtrTy); |
2032 | 28 | } |
2033 | | |
2034 | 231 | llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) { |
2035 | 231 | llvm::Type *StdTypeInfoPtrTy = |
2036 | 231 | ConvertType(E->getType())->getPointerTo(); |
2037 | 231 | |
2038 | 231 | if (E->isTypeOperand()231 ) { |
2039 | 180 | llvm::Constant *TypeInfo = |
2040 | 180 | CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext())); |
2041 | 180 | return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy); |
2042 | 180 | } |
2043 | 51 | |
2044 | 51 | // C++ [expr.typeid]p2: |
2045 | 51 | // When typeid is applied to a glvalue expression whose type is a |
2046 | 51 | // polymorphic class type, the result refers to a std::type_info object |
2047 | 51 | // representing the type of the most derived object (that is, the dynamic |
2048 | 51 | // type) to which the glvalue refers. |
2049 | 51 | if (51 E->isPotentiallyEvaluated()51 ) |
2050 | 28 | return EmitTypeidFromVTable(*this, E->getExprOperand(), |
2051 | 28 | StdTypeInfoPtrTy); |
2052 | 23 | |
2053 | 23 | QualType OperandTy = E->getExprOperand()->getType(); |
2054 | 23 | return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy), |
2055 | 23 | StdTypeInfoPtrTy); |
2056 | 23 | } |
2057 | | |
2058 | | static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF, |
2059 | 2 | QualType DestTy) { |
2060 | 2 | llvm::Type *DestLTy = CGF.ConvertType(DestTy); |
2061 | 2 | if (DestTy->isPointerType()) |
2062 | 1 | return llvm::Constant::getNullValue(DestLTy); |
2063 | 1 | |
2064 | 1 | /// C++ [expr.dynamic.cast]p9: |
2065 | 1 | /// A failed cast to reference type throws std::bad_cast |
2066 | 1 | if (1 !CGF.CGM.getCXXABI().EmitBadCastCall(CGF)1 ) |
2067 | 0 | return nullptr; |
2068 | 1 | |
2069 | 1 | CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end")); |
2070 | 1 | return llvm::UndefValue::get(DestLTy); |
2071 | 1 | } |
2072 | | |
2073 | | llvm::Value *CodeGenFunction::EmitDynamicCast(Address ThisAddr, |
2074 | 63 | const CXXDynamicCastExpr *DCE) { |
2075 | 63 | CGM.EmitExplicitCastExprType(DCE, this); |
2076 | 63 | QualType DestTy = DCE->getTypeAsWritten(); |
2077 | 63 | |
2078 | 63 | if (DCE->isAlwaysNull()) |
2079 | 2 | if (llvm::Value *2 T2 = EmitDynamicCastToNull(*this, DestTy)) |
2080 | 2 | return T; |
2081 | 61 | |
2082 | 61 | QualType SrcTy = DCE->getSubExpr()->getType(); |
2083 | 61 | |
2084 | 61 | // C++ [expr.dynamic.cast]p7: |
2085 | 61 | // If T is "pointer to cv void," then the result is a pointer to the most |
2086 | 61 | // derived object pointed to by v. |
2087 | 61 | const PointerType *DestPTy = DestTy->getAs<PointerType>(); |
2088 | 61 | |
2089 | 61 | bool isDynamicCastToVoid; |
2090 | 61 | QualType SrcRecordTy; |
2091 | 61 | QualType DestRecordTy; |
2092 | 61 | if (DestPTy61 ) { |
2093 | 52 | isDynamicCastToVoid = DestPTy->getPointeeType()->isVoidType(); |
2094 | 52 | SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType(); |
2095 | 52 | DestRecordTy = DestPTy->getPointeeType(); |
2096 | 61 | } else { |
2097 | 9 | isDynamicCastToVoid = false; |
2098 | 9 | SrcRecordTy = SrcTy; |
2099 | 9 | DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType(); |
2100 | 9 | } |
2101 | 61 | |
2102 | 61 | assert(SrcRecordTy->isRecordType() && "source type must be a record type!"); |
2103 | 61 | |
2104 | 61 | // C++ [expr.dynamic.cast]p4: |
2105 | 61 | // If the value of v is a null pointer value in the pointer case, the result |
2106 | 61 | // is the null pointer value of type T. |
2107 | 61 | bool ShouldNullCheckSrcValue = |
2108 | 61 | CGM.getCXXABI().shouldDynamicCastCallBeNullChecked(SrcTy->isPointerType(), |
2109 | 61 | SrcRecordTy); |
2110 | 61 | |
2111 | 61 | llvm::BasicBlock *CastNull = nullptr; |
2112 | 61 | llvm::BasicBlock *CastNotNull = nullptr; |
2113 | 61 | llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end"); |
2114 | 61 | |
2115 | 61 | if (ShouldNullCheckSrcValue61 ) { |
2116 | 50 | CastNull = createBasicBlock("dynamic_cast.null"); |
2117 | 50 | CastNotNull = createBasicBlock("dynamic_cast.notnull"); |
2118 | 50 | |
2119 | 50 | llvm::Value *IsNull = Builder.CreateIsNull(ThisAddr.getPointer()); |
2120 | 50 | Builder.CreateCondBr(IsNull, CastNull, CastNotNull); |
2121 | 50 | EmitBlock(CastNotNull); |
2122 | 50 | } |
2123 | 61 | |
2124 | 61 | llvm::Value *Value; |
2125 | 61 | if (isDynamicCastToVoid61 ) { |
2126 | 5 | Value = CGM.getCXXABI().EmitDynamicCastToVoid(*this, ThisAddr, SrcRecordTy, |
2127 | 5 | DestTy); |
2128 | 61 | } else { |
2129 | 56 | assert(DestRecordTy->isRecordType() && |
2130 | 56 | "destination type must be a record type!"); |
2131 | 56 | Value = CGM.getCXXABI().EmitDynamicCastCall(*this, ThisAddr, SrcRecordTy, |
2132 | 56 | DestTy, DestRecordTy, CastEnd); |
2133 | 56 | CastNotNull = Builder.GetInsertBlock(); |
2134 | 56 | } |
2135 | 61 | |
2136 | 61 | if (ShouldNullCheckSrcValue61 ) { |
2137 | 50 | EmitBranch(CastEnd); |
2138 | 50 | |
2139 | 50 | EmitBlock(CastNull); |
2140 | 50 | EmitBranch(CastEnd); |
2141 | 50 | } |
2142 | 61 | |
2143 | 61 | EmitBlock(CastEnd); |
2144 | 61 | |
2145 | 61 | if (ShouldNullCheckSrcValue61 ) { |
2146 | 50 | llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); |
2147 | 50 | PHI->addIncoming(Value, CastNotNull); |
2148 | 50 | PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull); |
2149 | 50 | |
2150 | 50 | Value = PHI; |
2151 | 50 | } |
2152 | 63 | |
2153 | 63 | return Value; |
2154 | 63 | } |
2155 | | |
2156 | 390 | void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) { |
2157 | 390 | RunCleanupsScope Scope(*this); |
2158 | 390 | LValue SlotLV = MakeAddrLValue(Slot.getAddress(), E->getType()); |
2159 | 390 | |
2160 | 390 | CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin(); |
2161 | 390 | for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(), |
2162 | 390 | e = E->capture_init_end(); |
2163 | 967 | i != e967 ; ++i, ++CurField577 ) { |
2164 | 577 | // Emit initialization |
2165 | 577 | LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField); |
2166 | 577 | if (CurField->hasCapturedVLAType()577 ) { |
2167 | 24 | auto VAT = CurField->getCapturedVLAType(); |
2168 | 24 | EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV); |
2169 | 577 | } else { |
2170 | 553 | EmitInitializerForField(*CurField, LV, *i); |
2171 | 553 | } |
2172 | 577 | } |
2173 | 390 | } |