/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This contains code to emit Aggregate Expr nodes as LLVM code. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "CGCXXABI.h" |
14 | | #include "CGObjCRuntime.h" |
15 | | #include "CodeGenFunction.h" |
16 | | #include "CodeGenModule.h" |
17 | | #include "ConstantEmitter.h" |
18 | | #include "TargetInfo.h" |
19 | | #include "clang/AST/ASTContext.h" |
20 | | #include "clang/AST/Attr.h" |
21 | | #include "clang/AST/DeclCXX.h" |
22 | | #include "clang/AST/DeclTemplate.h" |
23 | | #include "clang/AST/StmtVisitor.h" |
24 | | #include "llvm/IR/Constants.h" |
25 | | #include "llvm/IR/Function.h" |
26 | | #include "llvm/IR/GlobalVariable.h" |
27 | | #include "llvm/IR/IntrinsicInst.h" |
28 | | #include "llvm/IR/Intrinsics.h" |
29 | | using namespace clang; |
30 | | using namespace CodeGen; |
31 | | |
32 | | //===----------------------------------------------------------------------===// |
33 | | // Aggregate Expression Emitter |
34 | | //===----------------------------------------------------------------------===// |
35 | | |
36 | | namespace { |
37 | | class AggExprEmitter : public StmtVisitor<AggExprEmitter> { |
38 | | CodeGenFunction &CGF; |
39 | | CGBuilderTy &Builder; |
40 | | AggValueSlot Dest; |
41 | | bool IsResultUnused; |
42 | | |
43 | 72.4k | AggValueSlot EnsureSlot(QualType T) { |
44 | 72.4k | if (!Dest.isIgnored()) return Dest72.3k ; |
45 | 75 | return CGF.CreateAggTemp(T, "agg.tmp.ensured"); |
46 | 72.4k | } |
47 | 7.80k | void EnsureDest(QualType T) { |
48 | 7.80k | if (!Dest.isIgnored()) return7.64k ; |
49 | 164 | Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured"); |
50 | 164 | } |
51 | | |
52 | | // Calls `Fn` with a valid return value slot, potentially creating a temporary |
53 | | // to do so. If a temporary is created, an appropriate copy into `Dest` will |
54 | | // be emitted, as will lifetime markers. |
55 | | // |
56 | | // The given function should take a ReturnValueSlot, and return an RValue that |
57 | | // points to said slot. |
58 | | void withReturnValueSlot(const Expr *E, |
59 | | llvm::function_ref<RValue(ReturnValueSlot)> Fn); |
60 | | |
61 | | public: |
62 | | AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused) |
63 | | : CGF(cgf), Builder(CGF.Builder), Dest(Dest), |
64 | 84.4k | IsResultUnused(IsResultUnused) { } |
65 | | |
66 | | //===--------------------------------------------------------------------===// |
67 | | // Utilities |
68 | | //===--------------------------------------------------------------------===// |
69 | | |
70 | | /// EmitAggLoadOfLValue - Given an expression with aggregate type that |
71 | | /// represents a value lvalue, this method emits the address of the lvalue, |
72 | | /// then loads the result into DestPtr. |
73 | | void EmitAggLoadOfLValue(const Expr *E); |
74 | | |
75 | | enum ExprValueKind { |
76 | | EVK_RValue, |
77 | | EVK_NonRValue |
78 | | }; |
79 | | |
80 | | /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. |
81 | | /// SrcIsRValue is true if source comes from an RValue. |
82 | | void EmitFinalDestCopy(QualType type, const LValue &src, |
83 | | ExprValueKind SrcValueKind = EVK_NonRValue); |
84 | | void EmitFinalDestCopy(QualType type, RValue src); |
85 | | void EmitCopy(QualType type, const AggValueSlot &dest, |
86 | | const AggValueSlot &src); |
87 | | |
88 | | void EmitMoveFromReturnSlot(const Expr *E, RValue Src); |
89 | | |
90 | | void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, |
91 | | QualType ArrayQTy, InitListExpr *E); |
92 | | |
93 | 3.66k | AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) { |
94 | 3.66k | if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T)24 ) |
95 | 20 | return AggValueSlot::NeedsGCBarriers; |
96 | 3.64k | return AggValueSlot::DoesNotNeedGCBarriers; |
97 | 3.66k | } |
98 | | |
99 | | bool TypeRequiresGCollection(QualType T); |
100 | | |
101 | | //===--------------------------------------------------------------------===// |
102 | | // Visitor Methods |
103 | | //===--------------------------------------------------------------------===// |
104 | | |
105 | 128k | void Visit(Expr *E) { |
106 | 128k | ApplyDebugLocation DL(CGF, E); |
107 | 128k | StmtVisitor<AggExprEmitter>::Visit(E); |
108 | 128k | } |
109 | | |
110 | 0 | void VisitStmt(Stmt *S) { |
111 | 0 | CGF.ErrorUnsupported(S, "aggregate expression"); |
112 | 0 | } |
113 | 102 | void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); } |
114 | 0 | void VisitGenericSelectionExpr(GenericSelectionExpr *GE) { |
115 | 0 | Visit(GE->getResultExpr()); |
116 | 0 | } |
117 | 10 | void VisitCoawaitExpr(CoawaitExpr *E) { |
118 | 10 | CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused); |
119 | 10 | } |
120 | 0 | void VisitCoyieldExpr(CoyieldExpr *E) { |
121 | 0 | CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused); |
122 | 0 | } |
123 | 0 | void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); } |
124 | 639 | void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); } |
125 | 0 | void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { |
126 | 0 | return Visit(E->getReplacement()); |
127 | 0 | } |
128 | | |
129 | 6 | void VisitConstantExpr(ConstantExpr *E) { |
130 | 6 | EnsureDest(E->getType()); |
131 | | |
132 | 6 | if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) { |
133 | 5 | CGF.EmitAggregateStore(Result, Dest.getAddress(), |
134 | 5 | E->getType().isVolatileQualified()); |
135 | 5 | return; |
136 | 5 | } |
137 | 1 | return Visit(E->getSubExpr()); |
138 | 6 | } |
139 | | |
140 | | // l-values. |
141 | 2.54k | void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); } |
142 | 7 | void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); } |
143 | 98 | void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); } |
144 | 23 | void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); } |
145 | | void VisitCompoundLiteralExpr(CompoundLiteralExpr *E); |
146 | 6 | void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { |
147 | 6 | EmitAggLoadOfLValue(E); |
148 | 6 | } |
149 | 0 | void VisitPredefinedExpr(const PredefinedExpr *E) { |
150 | 0 | EmitAggLoadOfLValue(E); |
151 | 0 | } |
152 | | |
153 | | // Operators. |
154 | | void VisitCastExpr(CastExpr *E); |
155 | | void VisitCallExpr(const CallExpr *E); |
156 | | void VisitStmtExpr(const StmtExpr *E); |
157 | | void VisitBinaryOperator(const BinaryOperator *BO); |
158 | | void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO); |
159 | | void VisitBinAssign(const BinaryOperator *E); |
160 | | void VisitBinComma(const BinaryOperator *E); |
161 | | void VisitBinCmp(const BinaryOperator *E); |
162 | 0 | void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { |
163 | 0 | Visit(E->getSemanticForm()); |
164 | 0 | } |
165 | | |
166 | | void VisitObjCMessageExpr(ObjCMessageExpr *E); |
167 | 5 | void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { |
168 | 5 | EmitAggLoadOfLValue(E); |
169 | 5 | } |
170 | | |
171 | | void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E); |
172 | | void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO); |
173 | | void VisitChooseExpr(const ChooseExpr *CE); |
174 | | void VisitInitListExpr(InitListExpr *E); |
175 | | void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, |
176 | | llvm::Value *outerBegin = nullptr); |
177 | | void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E); |
178 | 0 | void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing. |
179 | 383 | void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { |
180 | 383 | CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); |
181 | 383 | Visit(DAE->getExpr()); |
182 | 383 | } |
183 | 277 | void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { |
184 | 277 | CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); |
185 | 277 | Visit(DIE->getExpr()); |
186 | 277 | } |
187 | | void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); |
188 | | void VisitCXXConstructExpr(const CXXConstructExpr *E); |
189 | | void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E); |
190 | | void VisitLambdaExpr(LambdaExpr *E); |
191 | | void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E); |
192 | | void VisitExprWithCleanups(ExprWithCleanups *E); |
193 | | void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E); |
194 | 0 | void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); } |
195 | | void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E); |
196 | | void VisitOpaqueValueExpr(OpaqueValueExpr *E); |
197 | | |
198 | 56 | void VisitPseudoObjectExpr(PseudoObjectExpr *E) { |
199 | 56 | if (E->isGLValue()) { |
200 | 0 | LValue LV = CGF.EmitPseudoObjectLValue(E); |
201 | 0 | return EmitFinalDestCopy(E->getType(), LV); |
202 | 0 | } |
203 | | |
204 | 56 | CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType())); |
205 | 56 | } |
206 | | |
207 | | void VisitVAArgExpr(VAArgExpr *E); |
208 | | |
209 | | void EmitInitializationToLValue(Expr *E, LValue Address); |
210 | | void EmitNullInitializationToLValue(LValue Address); |
211 | | // case Expr::ChooseExprClass: |
212 | 1 | void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); } |
213 | 21 | void VisitAtomicExpr(AtomicExpr *E) { |
214 | 21 | RValue Res = CGF.EmitAtomicExpr(E); |
215 | 21 | EmitFinalDestCopy(E->getType(), Res); |
216 | 21 | } |
217 | | }; |
218 | | } // end anonymous namespace. |
219 | | |
220 | | //===----------------------------------------------------------------------===// |
221 | | // Utilities |
222 | | //===----------------------------------------------------------------------===// |
223 | | |
224 | | /// EmitAggLoadOfLValue - Given an expression with aggregate type that |
225 | | /// represents a value lvalue, this method emits the address of the lvalue, |
226 | | /// then loads the result into DestPtr. |
227 | 2.70k | void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) { |
228 | 2.70k | LValue LV = CGF.EmitLValue(E); |
229 | | |
230 | | // If the type of the l-value is atomic, then do an atomic load. |
231 | 2.70k | if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)2.69k ) { |
232 | 13 | CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest); |
233 | 13 | return; |
234 | 13 | } |
235 | | |
236 | 2.68k | EmitFinalDestCopy(E->getType(), LV); |
237 | 2.68k | } |
238 | | |
239 | | /// True if the given aggregate type requires special GC API calls. |
240 | 24 | bool AggExprEmitter::TypeRequiresGCollection(QualType T) { |
241 | | // Only record types have members that might require garbage collection. |
242 | 24 | const RecordType *RecordTy = T->getAs<RecordType>(); |
243 | 24 | if (!RecordTy) return false0 ; |
244 | | |
245 | | // Don't mess with non-trivial C++ types. |
246 | 24 | RecordDecl *Record = RecordTy->getDecl(); |
247 | 24 | if (isa<CXXRecordDecl>(Record) && |
248 | 24 | (5 cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor()5 || |
249 | 5 | !cast<CXXRecordDecl>(Record)->hasTrivialDestructor())) |
250 | 0 | return false; |
251 | | |
252 | | // Check whether the type has an object member. |
253 | 24 | return Record->hasObjectMember(); |
254 | 24 | } |
255 | | |
256 | | void AggExprEmitter::withReturnValueSlot( |
257 | 7.81k | const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) { |
258 | 7.81k | QualType RetTy = E->getType(); |
259 | 7.81k | bool RequiresDestruction = |
260 | 7.81k | !Dest.isExternallyDestructed() && |
261 | 7.81k | RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct2.19k ; |
262 | | |
263 | | // If it makes no observable difference, save a memcpy + temporary. |
264 | | // |
265 | | // We need to always provide our own temporary if destruction is required. |
266 | | // Otherwise, EmitCall will emit its own, notice that it's "unused", and end |
267 | | // its lifetime before we have the chance to emit a proper destructor call. |
268 | 7.81k | bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection()7.63k || |
269 | 7.81k | (7.63k RequiresDestruction7.63k && !Dest.getAddress().isValid()12 ); |
270 | | |
271 | 7.81k | Address RetAddr = Address::invalid(); |
272 | 7.81k | Address RetAllocaAddr = Address::invalid(); |
273 | | |
274 | 7.81k | EHScopeStack::stable_iterator LifetimeEndBlock; |
275 | 7.81k | llvm::Value *LifetimeSizePtr = nullptr; |
276 | 7.81k | llvm::IntrinsicInst *LifetimeStartInst = nullptr; |
277 | 7.81k | if (!UseTemp) { |
278 | 7.62k | RetAddr = Dest.getAddress(); |
279 | 7.62k | } else { |
280 | 190 | RetAddr = CGF.CreateMemTemp(RetTy, "tmp", &RetAllocaAddr); |
281 | 190 | llvm::TypeSize Size = |
282 | 190 | CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy)); |
283 | 190 | LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer()); |
284 | 190 | if (LifetimeSizePtr) { |
285 | 7 | LifetimeStartInst = |
286 | 7 | cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint())); |
287 | 7 | assert(LifetimeStartInst->getIntrinsicID() == |
288 | 7 | llvm::Intrinsic::lifetime_start && |
289 | 7 | "Last insertion wasn't a lifetime.start?"); |
290 | | |
291 | 0 | CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>( |
292 | 7 | NormalEHLifetimeMarker, RetAllocaAddr, LifetimeSizePtr); |
293 | 7 | LifetimeEndBlock = CGF.EHStack.stable_begin(); |
294 | 7 | } |
295 | 190 | } |
296 | | |
297 | 0 | RValue Src = |
298 | 7.81k | EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused, |
299 | 7.81k | Dest.isExternallyDestructed())); |
300 | | |
301 | 7.81k | if (!UseTemp) |
302 | 7.62k | return; |
303 | | |
304 | 190 | assert(Dest.isIgnored() || Dest.getPointer() != Src.getAggregatePointer()); |
305 | 0 | EmitFinalDestCopy(E->getType(), Src); |
306 | | |
307 | 190 | if (!RequiresDestruction && LifetimeStartInst178 ) { |
308 | | // If there's no dtor to run, the copy was the last use of our temporary. |
309 | | // Since we're not guaranteed to be in an ExprWithCleanups, clean up |
310 | | // eagerly. |
311 | 5 | CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst); |
312 | 5 | CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAllocaAddr.getPointer()); |
313 | 5 | } |
314 | 190 | } |
315 | | |
316 | | /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. |
317 | 215 | void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) { |
318 | 215 | assert(src.isAggregate() && "value must be aggregate value!"); |
319 | 0 | LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type); |
320 | 215 | EmitFinalDestCopy(type, srcLV, EVK_RValue); |
321 | 215 | } |
322 | | |
323 | | /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. |
324 | | void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src, |
325 | 3.77k | ExprValueKind SrcValueKind) { |
326 | | // If Dest is ignored, then we're evaluating an aggregate expression |
327 | | // in a context that doesn't care about the result. Note that loads |
328 | | // from volatile l-values force the existence of a non-ignored |
329 | | // destination. |
330 | 3.77k | if (Dest.isIgnored()) |
331 | 483 | return; |
332 | | |
333 | | // Copy non-trivial C structs here. |
334 | 3.29k | LValue DstLV = CGF.MakeAddrLValue( |
335 | 3.29k | Dest.getAddress(), Dest.isVolatile() ? type.withVolatile()25 : type3.26k ); |
336 | | |
337 | 3.29k | if (SrcValueKind == EVK_RValue) { |
338 | 203 | if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) { |
339 | 8 | if (Dest.isPotentiallyAliased()) |
340 | 8 | CGF.callCStructMoveAssignmentOperator(DstLV, src); |
341 | 0 | else |
342 | 0 | CGF.callCStructMoveConstructor(DstLV, src); |
343 | 8 | return; |
344 | 8 | } |
345 | 3.08k | } else { |
346 | 3.08k | if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { |
347 | 77 | if (Dest.isPotentiallyAliased()) |
348 | 22 | CGF.callCStructCopyAssignmentOperator(DstLV, src); |
349 | 55 | else |
350 | 55 | CGF.callCStructCopyConstructor(DstLV, src); |
351 | 77 | return; |
352 | 77 | } |
353 | 3.08k | } |
354 | | |
355 | 3.20k | AggValueSlot srcAgg = AggValueSlot::forLValue( |
356 | 3.20k | src, CGF, AggValueSlot::IsDestructed, needsGC(type), |
357 | 3.20k | AggValueSlot::IsAliased, AggValueSlot::MayOverlap); |
358 | 3.20k | EmitCopy(type, Dest, srcAgg); |
359 | 3.20k | } |
360 | | |
361 | | /// Perform a copy from the source into the destination. |
362 | | /// |
363 | | /// \param type - the type of the aggregate being copied; qualifiers are |
364 | | /// ignored |
365 | | void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest, |
366 | 3.21k | const AggValueSlot &src) { |
367 | 3.21k | if (dest.requiresGCollection()) { |
368 | 6 | CharUnits sz = dest.getPreferredSize(CGF.getContext(), type); |
369 | 6 | llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity()); |
370 | 6 | CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, |
371 | 6 | dest.getAddress(), |
372 | 6 | src.getAddress(), |
373 | 6 | size); |
374 | 6 | return; |
375 | 6 | } |
376 | | |
377 | | // If the result of the assignment is used, copy the LHS there also. |
378 | | // It's volatile if either side is. Use the minimum alignment of |
379 | | // the two sides. |
380 | 3.20k | LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type); |
381 | 3.20k | LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type); |
382 | 3.20k | CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(), |
383 | 3.20k | dest.isVolatile() || src.isVolatile()3.18k ); |
384 | 3.20k | } |
385 | | |
386 | | /// Emit the initializer for a std::initializer_list initialized with a |
387 | | /// real initializer list. |
388 | | void |
389 | 192 | AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { |
390 | | // Emit an array containing the elements. The array is externally destructed |
391 | | // if the std::initializer_list object is. |
392 | 192 | ASTContext &Ctx = CGF.getContext(); |
393 | 192 | LValue Array = CGF.EmitLValue(E->getSubExpr()); |
394 | 192 | assert(Array.isSimple() && "initializer_list array not a simple lvalue"); |
395 | 0 | Address ArrayPtr = Array.getAddress(CGF); |
396 | | |
397 | 192 | const ConstantArrayType *ArrayType = |
398 | 192 | Ctx.getAsConstantArrayType(E->getSubExpr()->getType()); |
399 | 192 | assert(ArrayType && "std::initializer_list constructed from non-array"); |
400 | | |
401 | | // FIXME: Perform the checks on the field types in SemaInit. |
402 | 0 | RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl(); |
403 | 192 | RecordDecl::field_iterator Field = Record->field_begin(); |
404 | 192 | if (Field == Record->field_end()) { |
405 | 1 | CGF.ErrorUnsupported(E, "weird std::initializer_list"); |
406 | 1 | return; |
407 | 1 | } |
408 | | |
409 | | // Start pointer. |
410 | 191 | if (!Field->getType()->isPointerType() || |
411 | 191 | !Ctx.hasSameType(Field->getType()->getPointeeType(), |
412 | 191 | ArrayType->getElementType())) { |
413 | 0 | CGF.ErrorUnsupported(E, "weird std::initializer_list"); |
414 | 0 | return; |
415 | 0 | } |
416 | | |
417 | 191 | AggValueSlot Dest = EnsureSlot(E->getType()); |
418 | 191 | LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); |
419 | 191 | LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field); |
420 | 191 | llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0); |
421 | 191 | llvm::Value *IdxStart[] = { Zero, Zero }; |
422 | 191 | llvm::Value *ArrayStart = Builder.CreateInBoundsGEP( |
423 | 191 | ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxStart, "arraystart"); |
424 | 191 | CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start); |
425 | 191 | ++Field; |
426 | | |
427 | 191 | if (Field == Record->field_end()) { |
428 | 0 | CGF.ErrorUnsupported(E, "weird std::initializer_list"); |
429 | 0 | return; |
430 | 0 | } |
431 | | |
432 | 191 | llvm::Value *Size = Builder.getInt(ArrayType->getSize()); |
433 | 191 | LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field); |
434 | 191 | if (Field->getType()->isPointerType() && |
435 | 191 | Ctx.hasSameType(Field->getType()->getPointeeType(), |
436 | 7 | ArrayType->getElementType())) { |
437 | | // End pointer. |
438 | 7 | llvm::Value *IdxEnd[] = { Zero, Size }; |
439 | 7 | llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP( |
440 | 7 | ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxEnd, "arrayend"); |
441 | 7 | CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength); |
442 | 184 | } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) { |
443 | | // Length. |
444 | 184 | CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength); |
445 | 184 | } else { |
446 | 0 | CGF.ErrorUnsupported(E, "weird std::initializer_list"); |
447 | 0 | return; |
448 | 0 | } |
449 | 191 | } |
450 | | |
451 | | /// Determine if E is a trivial array filler, that is, one that is |
452 | | /// equivalent to zero-initialization. |
453 | 1.67k | static bool isTrivialFiller(Expr *E) { |
454 | 1.67k | if (!E) |
455 | 1.60k | return true; |
456 | | |
457 | 72 | if (isa<ImplicitValueInitExpr>(E)) |
458 | 38 | return true; |
459 | | |
460 | 34 | if (auto *ILE = dyn_cast<InitListExpr>(E)) { |
461 | 6 | if (ILE->getNumInits()) |
462 | 5 | return false; |
463 | 1 | return isTrivialFiller(ILE->getArrayFiller()); |
464 | 6 | } |
465 | | |
466 | 28 | if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E)) |
467 | 25 | return Cons->getConstructor()->isDefaultConstructor() && |
468 | 25 | Cons->getConstructor()->isTrivial()23 ; |
469 | | |
470 | | // FIXME: Are there other cases where we can avoid emitting an initializer? |
471 | 3 | return false; |
472 | 28 | } |
473 | | |
474 | | /// Emit initialization of an array from an initializer list. |
475 | | void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, |
476 | 1.71k | QualType ArrayQTy, InitListExpr *E) { |
477 | 1.71k | uint64_t NumInitElements = E->getNumInits(); |
478 | | |
479 | 1.71k | uint64_t NumArrayElements = AType->getNumElements(); |
480 | 1.71k | assert(NumInitElements <= NumArrayElements); |
481 | | |
482 | 0 | QualType elementType = |
483 | 1.71k | CGF.getContext().getAsArrayType(ArrayQTy)->getElementType(); |
484 | | |
485 | | // DestPtr is an array*. Construct an elementType* by drilling |
486 | | // down a level. |
487 | 1.71k | llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); |
488 | 1.71k | llvm::Value *indices[] = { zero, zero }; |
489 | 1.71k | llvm::Value *begin = Builder.CreateInBoundsGEP( |
490 | 1.71k | DestPtr.getElementType(), DestPtr.getPointer(), indices, |
491 | 1.71k | "arrayinit.begin"); |
492 | | |
493 | 1.71k | CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); |
494 | 1.71k | CharUnits elementAlign = |
495 | 1.71k | DestPtr.getAlignment().alignmentOfArrayElement(elementSize); |
496 | 1.71k | llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType); |
497 | | |
498 | | // Consider initializing the array by copying from a global. For this to be |
499 | | // more efficient than per-element initialization, the size of the elements |
500 | | // with explicit initializers should be large enough. |
501 | 1.71k | if (NumInitElements * elementSize.getQuantity() > 16 && |
502 | 1.71k | elementType.isTriviallyCopyableType(CGF.getContext())142 ) { |
503 | 93 | CodeGen::CodeGenModule &CGM = CGF.CGM; |
504 | 93 | ConstantEmitter Emitter(CGF); |
505 | 93 | LangAS AS = ArrayQTy.getAddressSpace(); |
506 | 93 | if (llvm::Constant *C = Emitter.tryEmitForInitializer(E, AS, ArrayQTy)) { |
507 | 33 | auto GV = new llvm::GlobalVariable( |
508 | 33 | CGM.getModule(), C->getType(), |
509 | 33 | CGM.isTypeConstant(ArrayQTy, /* ExcludeCtorDtor= */ true), |
510 | 33 | llvm::GlobalValue::PrivateLinkage, C, "constinit", |
511 | 33 | /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal, |
512 | 33 | CGM.getContext().getTargetAddressSpace(AS)); |
513 | 33 | Emitter.finalize(GV); |
514 | 33 | CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy); |
515 | 33 | GV->setAlignment(Align.getAsAlign()); |
516 | 33 | Address GVAddr(GV, GV->getValueType(), Align); |
517 | 33 | EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, ArrayQTy)); |
518 | 33 | return; |
519 | 33 | } |
520 | 93 | } |
521 | | |
522 | | // Exception safety requires us to destroy all the |
523 | | // already-constructed members if an initializer throws. |
524 | | // For that, we'll need an EH cleanup. |
525 | 1.67k | QualType::DestructionKind dtorKind = elementType.isDestructedType(); |
526 | 1.67k | Address endOfInit = Address::invalid(); |
527 | 1.67k | EHScopeStack::stable_iterator cleanup; |
528 | 1.67k | llvm::Instruction *cleanupDominator = nullptr; |
529 | 1.67k | if (CGF.needsEHCleanup(dtorKind)) { |
530 | | // In principle we could tell the cleanup where we are more |
531 | | // directly, but the control flow can get so varied here that it |
532 | | // would actually be quite complex. Therefore we go through an |
533 | | // alloca. |
534 | 76 | endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(), |
535 | 76 | "arrayinit.endOfInit"); |
536 | 76 | cleanupDominator = Builder.CreateStore(begin, endOfInit); |
537 | 76 | CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType, |
538 | 76 | elementAlign, |
539 | 76 | CGF.getDestroyer(dtorKind)); |
540 | 76 | cleanup = CGF.EHStack.stable_begin(); |
541 | | |
542 | | // Otherwise, remember that we didn't need a cleanup. |
543 | 1.60k | } else { |
544 | 1.60k | dtorKind = QualType::DK_none; |
545 | 1.60k | } |
546 | | |
547 | 1.67k | llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1); |
548 | | |
549 | | // The 'current element to initialize'. The invariants on this |
550 | | // variable are complicated. Essentially, after each iteration of |
551 | | // the loop, it points to the last initialized element, except |
552 | | // that it points to the beginning of the array before any |
553 | | // elements have been initialized. |
554 | 1.67k | llvm::Value *element = begin; |
555 | | |
556 | | // Emit the explicit initializers. |
557 | 5.09k | for (uint64_t i = 0; i != NumInitElements; ++i3.41k ) { |
558 | | // Advance to the next element. |
559 | 3.41k | if (i > 0) { |
560 | 1.75k | element = Builder.CreateInBoundsGEP( |
561 | 1.75k | llvmElementType, element, one, "arrayinit.element"); |
562 | | |
563 | | // Tell the cleanup that it needs to destroy up to this |
564 | | // element. TODO: some of these stores can be trivially |
565 | | // observed to be unnecessary. |
566 | 1.75k | if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit)128 ; |
567 | 1.75k | } |
568 | | |
569 | 3.41k | LValue elementLV = CGF.MakeAddrLValue( |
570 | 3.41k | Address(element, llvmElementType, elementAlign), elementType); |
571 | 3.41k | EmitInitializationToLValue(E->getInit(i), elementLV); |
572 | 3.41k | } |
573 | | |
574 | | // Check whether there's a non-trivial array-fill expression. |
575 | 1.67k | Expr *filler = E->getArrayFiller(); |
576 | 1.67k | bool hasTrivialFiller = isTrivialFiller(filler); |
577 | | |
578 | | // Any remaining elements need to be zero-initialized, possibly |
579 | | // using the filler expression. We can skip this if the we're |
580 | | // emitting to zeroed memory. |
581 | 1.67k | if (NumInitElements != NumArrayElements && |
582 | 1.67k | !(66 Dest.isZeroed()66 && hasTrivialFiller25 && |
583 | 66 | CGF.getTypes().isZeroInitializable(elementType)16 )) { |
584 | | |
585 | | // Use an actual loop. This is basically |
586 | | // do { *array++ = filler; } while (array != end); |
587 | | |
588 | | // Advance to the start of the rest of the array. |
589 | 50 | if (NumInitElements) { |
590 | 28 | element = Builder.CreateInBoundsGEP( |
591 | 28 | llvmElementType, element, one, "arrayinit.start"); |
592 | 28 | if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit)2 ; |
593 | 28 | } |
594 | | |
595 | | // Compute the end of the array. |
596 | 50 | llvm::Value *end = Builder.CreateInBoundsGEP( |
597 | 50 | llvmElementType, begin, |
598 | 50 | llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end"); |
599 | | |
600 | 50 | llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); |
601 | 50 | llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body"); |
602 | | |
603 | | // Jump into the body. |
604 | 50 | CGF.EmitBlock(bodyBB); |
605 | 50 | llvm::PHINode *currentElement = |
606 | 50 | Builder.CreatePHI(element->getType(), 2, "arrayinit.cur"); |
607 | 50 | currentElement->addIncoming(element, entryBB); |
608 | | |
609 | | // Emit the actual filler expression. |
610 | 50 | { |
611 | | // C++1z [class.temporary]p5: |
612 | | // when a default constructor is called to initialize an element of |
613 | | // an array with no corresponding initializer [...] the destruction of |
614 | | // every temporary created in a default argument is sequenced before |
615 | | // the construction of the next array element, if any |
616 | 50 | CodeGenFunction::RunCleanupsScope CleanupsScope(CGF); |
617 | 50 | LValue elementLV = CGF.MakeAddrLValue( |
618 | 50 | Address(currentElement, llvmElementType, elementAlign), elementType); |
619 | 50 | if (filler) |
620 | 50 | EmitInitializationToLValue(filler, elementLV); |
621 | 0 | else |
622 | 0 | EmitNullInitializationToLValue(elementLV); |
623 | 50 | } |
624 | | |
625 | | // Move on to the next element. |
626 | 50 | llvm::Value *nextElement = Builder.CreateInBoundsGEP( |
627 | 50 | llvmElementType, currentElement, one, "arrayinit.next"); |
628 | | |
629 | | // Tell the EH cleanup that we finished with the last element. |
630 | 50 | if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit)2 ; |
631 | | |
632 | | // Leave the loop if we're done. |
633 | 50 | llvm::Value *done = Builder.CreateICmpEQ(nextElement, end, |
634 | 50 | "arrayinit.done"); |
635 | 50 | llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end"); |
636 | 50 | Builder.CreateCondBr(done, endBB, bodyBB); |
637 | 50 | currentElement->addIncoming(nextElement, Builder.GetInsertBlock()); |
638 | | |
639 | 50 | CGF.EmitBlock(endBB); |
640 | 50 | } |
641 | | |
642 | | // Leave the partial-array cleanup if we entered one. |
643 | 1.67k | if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator)76 ; |
644 | 1.67k | } |
645 | | |
646 | | //===----------------------------------------------------------------------===// |
647 | | // Visitor Methods |
648 | | //===----------------------------------------------------------------------===// |
649 | | |
650 | 13.8k | void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){ |
651 | 13.8k | Visit(E->getSubExpr()); |
652 | 13.8k | } |
653 | | |
654 | 23 | void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) { |
655 | | // If this is a unique OVE, just visit its source expression. |
656 | 23 | if (e->isUnique()) |
657 | 7 | Visit(e->getSourceExpr()); |
658 | 16 | else |
659 | 16 | EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e)); |
660 | 23 | } |
661 | | |
662 | | void |
663 | 613 | AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { |
664 | 613 | if (Dest.isPotentiallyAliased() && |
665 | 613 | E->getType().isPODType(CGF.getContext())20 ) { |
666 | | // For a POD type, just emit a load of the lvalue + a copy, because our |
667 | | // compound literal might alias the destination. |
668 | 20 | EmitAggLoadOfLValue(E); |
669 | 20 | return; |
670 | 20 | } |
671 | | |
672 | 593 | AggValueSlot Slot = EnsureSlot(E->getType()); |
673 | | |
674 | | // Block-scope compound literals are destroyed at the end of the enclosing |
675 | | // scope in C. |
676 | 593 | bool Destruct = |
677 | 593 | !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed()281 ; |
678 | 593 | if (Destruct) |
679 | 20 | Slot.setExternallyDestructed(); |
680 | | |
681 | 593 | CGF.EmitAggExpr(E->getInitializer(), Slot); |
682 | | |
683 | 593 | if (Destruct) |
684 | 20 | if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) |
685 | 0 | CGF.pushLifetimeExtendedDestroy( |
686 | 0 | CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(), |
687 | 0 | CGF.getDestroyer(DtorKind), DtorKind & EHCleanup); |
688 | 593 | } |
689 | | |
690 | | /// Attempt to look through various unimportant expressions to find a |
691 | | /// cast of the given kind. |
692 | 11 | static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) { |
693 | 11 | op = op->IgnoreParenNoopCasts(ctx); |
694 | 11 | if (auto castE = dyn_cast<CastExpr>(op)) { |
695 | 0 | if (castE->getCastKind() == kind) |
696 | 0 | return castE->getSubExpr(); |
697 | 0 | } |
698 | 11 | return nullptr; |
699 | 11 | } |
700 | | |
701 | 13.5k | void AggExprEmitter::VisitCastExpr(CastExpr *E) { |
702 | 13.5k | if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E)) |
703 | 3.40k | CGF.CGM.EmitExplicitCastExprType(ECE, &CGF); |
704 | 13.5k | switch (E->getCastKind()) { |
705 | 0 | case CK_Dynamic: { |
706 | | // FIXME: Can this actually happen? We have no test coverage for it. |
707 | 0 | assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?"); |
708 | 0 | LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(), |
709 | 0 | CodeGenFunction::TCK_Load); |
710 | | // FIXME: Do we also need to handle property references here? |
711 | 0 | if (LV.isSimple()) |
712 | 0 | CGF.EmitDynamicCast(LV.getAddress(CGF), cast<CXXDynamicCastExpr>(E)); |
713 | 0 | else |
714 | 0 | CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast"); |
715 | |
|
716 | 0 | if (!Dest.isIgnored()) |
717 | 0 | CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination"); |
718 | 0 | break; |
719 | 0 | } |
720 | | |
721 | 7 | case CK_ToUnion: { |
722 | | // Evaluate even if the destination is ignored. |
723 | 7 | if (Dest.isIgnored()) { |
724 | 1 | CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), |
725 | 1 | /*ignoreResult=*/true); |
726 | 1 | break; |
727 | 1 | } |
728 | | |
729 | | // GCC union extension |
730 | 6 | QualType Ty = E->getSubExpr()->getType(); |
731 | 6 | Address CastPtr = |
732 | 6 | Builder.CreateElementBitCast(Dest.getAddress(), CGF.ConvertType(Ty)); |
733 | 6 | EmitInitializationToLValue(E->getSubExpr(), |
734 | 6 | CGF.MakeAddrLValue(CastPtr, Ty)); |
735 | 6 | break; |
736 | 7 | } |
737 | | |
738 | 4 | case CK_LValueToRValueBitCast: { |
739 | 4 | if (Dest.isIgnored()) { |
740 | 0 | CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), |
741 | 0 | /*ignoreResult=*/true); |
742 | 0 | break; |
743 | 0 | } |
744 | | |
745 | 4 | LValue SourceLV = CGF.EmitLValue(E->getSubExpr()); |
746 | 4 | Address SourceAddress = |
747 | 4 | Builder.CreateElementBitCast(SourceLV.getAddress(CGF), CGF.Int8Ty); |
748 | 4 | Address DestAddress = |
749 | 4 | Builder.CreateElementBitCast(Dest.getAddress(), CGF.Int8Ty); |
750 | 4 | llvm::Value *SizeVal = llvm::ConstantInt::get( |
751 | 4 | CGF.SizeTy, |
752 | 4 | CGF.getContext().getTypeSizeInChars(E->getType()).getQuantity()); |
753 | 4 | Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal); |
754 | 4 | break; |
755 | 4 | } |
756 | | |
757 | 0 | case CK_DerivedToBase: |
758 | 0 | case CK_BaseToDerived: |
759 | 0 | case CK_UncheckedDerivedToBase: { |
760 | 0 | llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: " |
761 | 0 | "should have been unpacked before we got here"); |
762 | 0 | } |
763 | |
|
764 | 19 | case CK_NonAtomicToAtomic: |
765 | 29 | case CK_AtomicToNonAtomic: { |
766 | 29 | bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic); |
767 | | |
768 | | // Determine the atomic and value types. |
769 | 29 | QualType atomicType = E->getSubExpr()->getType(); |
770 | 29 | QualType valueType = E->getType(); |
771 | 29 | if (isToAtomic) std::swap(atomicType, valueType)19 ; |
772 | | |
773 | 29 | assert(atomicType->isAtomicType()); |
774 | 0 | assert(CGF.getContext().hasSameUnqualifiedType(valueType, |
775 | 29 | atomicType->castAs<AtomicType>()->getValueType())); |
776 | | |
777 | | // Just recurse normally if we're ignoring the result or the |
778 | | // atomic type doesn't change representation. |
779 | 29 | if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) { |
780 | 18 | return Visit(E->getSubExpr()); |
781 | 18 | } |
782 | | |
783 | 11 | CastKind peepholeTarget = |
784 | 11 | (isToAtomic ? CK_AtomicToNonAtomic7 : CK_NonAtomicToAtomic4 ); |
785 | | |
786 | | // These two cases are reverses of each other; try to peephole them. |
787 | 11 | if (Expr *op = |
788 | 11 | findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) { |
789 | 0 | assert(CGF.getContext().hasSameUnqualifiedType(op->getType(), |
790 | 0 | E->getType()) && |
791 | 0 | "peephole significantly changed types?"); |
792 | 0 | return Visit(op); |
793 | 0 | } |
794 | | |
795 | | // If we're converting an r-value of non-atomic type to an r-value |
796 | | // of atomic type, just emit directly into the relevant sub-object. |
797 | 11 | if (isToAtomic) { |
798 | 7 | AggValueSlot valueDest = Dest; |
799 | 7 | if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) { |
800 | | // Zero-initialize. (Strictly speaking, we only need to initialize |
801 | | // the padding at the end, but this is simpler.) |
802 | 7 | if (!Dest.isZeroed()) |
803 | 7 | CGF.EmitNullInitialization(Dest.getAddress(), atomicType); |
804 | | |
805 | | // Build a GEP to refer to the subobject. |
806 | 7 | Address valueAddr = |
807 | 7 | CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0); |
808 | 7 | valueDest = AggValueSlot::forAddr(valueAddr, |
809 | 7 | valueDest.getQualifiers(), |
810 | 7 | valueDest.isExternallyDestructed(), |
811 | 7 | valueDest.requiresGCollection(), |
812 | 7 | valueDest.isPotentiallyAliased(), |
813 | 7 | AggValueSlot::DoesNotOverlap, |
814 | 7 | AggValueSlot::IsZeroed); |
815 | 7 | } |
816 | | |
817 | 7 | CGF.EmitAggExpr(E->getSubExpr(), valueDest); |
818 | 7 | return; |
819 | 7 | } |
820 | | |
821 | | // Otherwise, we're converting an atomic type to a non-atomic type. |
822 | | // Make an atomic temporary, emit into that, and then copy the value out. |
823 | 4 | AggValueSlot atomicSlot = |
824 | 4 | CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp"); |
825 | 4 | CGF.EmitAggExpr(E->getSubExpr(), atomicSlot); |
826 | | |
827 | 4 | Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0); |
828 | 4 | RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile()); |
829 | 4 | return EmitFinalDestCopy(valueType, rvalue); |
830 | 11 | } |
831 | 0 | case CK_AddressSpaceConversion: |
832 | 0 | return Visit(E->getSubExpr()); |
833 | | |
834 | 2.84k | case CK_LValueToRValue: |
835 | | // If we're loading from a volatile type, force the destination |
836 | | // into existence. |
837 | 2.84k | if (E->getSubExpr()->getType().isVolatileQualified()) { |
838 | 24 | bool Destruct = |
839 | 24 | !Dest.isExternallyDestructed() && |
840 | 24 | E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct12 ; |
841 | 24 | if (Destruct) |
842 | 2 | Dest.setExternallyDestructed(); |
843 | 24 | EnsureDest(E->getType()); |
844 | 24 | Visit(E->getSubExpr()); |
845 | | |
846 | 24 | if (Destruct) |
847 | 2 | CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(), |
848 | 2 | E->getType()); |
849 | | |
850 | 24 | return; |
851 | 24 | } |
852 | | |
853 | 2.84k | LLVM_FALLTHROUGH2.82k ;2.82k |
854 | | |
855 | | |
856 | 7.04k | case CK_NoOp: |
857 | 7.11k | case CK_UserDefinedConversion: |
858 | 13.4k | case CK_ConstructorConversion: |
859 | 13.4k | assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), |
860 | 13.4k | E->getType()) && |
861 | 13.4k | "Implicit cast types must be compatible"); |
862 | 0 | Visit(E->getSubExpr()); |
863 | 13.4k | break; |
864 | | |
865 | 0 | case CK_LValueBitCast: |
866 | 0 | llvm_unreachable("should not be emitting lvalue bitcast as rvalue"); |
867 | |
|
868 | 0 | case CK_Dependent: |
869 | 0 | case CK_BitCast: |
870 | 0 | case CK_ArrayToPointerDecay: |
871 | 0 | case CK_FunctionToPointerDecay: |
872 | 0 | case CK_NullToPointer: |
873 | 0 | case CK_NullToMemberPointer: |
874 | 0 | case CK_BaseToDerivedMemberPointer: |
875 | 0 | case CK_DerivedToBaseMemberPointer: |
876 | 0 | case CK_MemberPointerToBoolean: |
877 | 0 | case CK_ReinterpretMemberPointer: |
878 | 0 | case CK_IntegralToPointer: |
879 | 0 | case CK_PointerToIntegral: |
880 | 0 | case CK_PointerToBoolean: |
881 | 0 | case CK_ToVoid: |
882 | 0 | case CK_VectorSplat: |
883 | 0 | case CK_IntegralCast: |
884 | 0 | case CK_BooleanToSignedIntegral: |
885 | 0 | case CK_IntegralToBoolean: |
886 | 0 | case CK_IntegralToFloating: |
887 | 0 | case CK_FloatingToIntegral: |
888 | 0 | case CK_FloatingToBoolean: |
889 | 0 | case CK_FloatingCast: |
890 | 0 | case CK_CPointerToObjCPointerCast: |
891 | 0 | case CK_BlockPointerToObjCPointerCast: |
892 | 0 | case CK_AnyPointerToBlockPointerCast: |
893 | 0 | case CK_ObjCObjectLValueCast: |
894 | 0 | case CK_FloatingRealToComplex: |
895 | 0 | case CK_FloatingComplexToReal: |
896 | 0 | case CK_FloatingComplexToBoolean: |
897 | 0 | case CK_FloatingComplexCast: |
898 | 0 | case CK_FloatingComplexToIntegralComplex: |
899 | 0 | case CK_IntegralRealToComplex: |
900 | 0 | case CK_IntegralComplexToReal: |
901 | 0 | case CK_IntegralComplexToBoolean: |
902 | 0 | case CK_IntegralComplexCast: |
903 | 0 | case CK_IntegralComplexToFloatingComplex: |
904 | 0 | case CK_ARCProduceObject: |
905 | 0 | case CK_ARCConsumeObject: |
906 | 0 | case CK_ARCReclaimReturnedObject: |
907 | 0 | case CK_ARCExtendBlockObject: |
908 | 0 | case CK_CopyAndAutoreleaseBlockObject: |
909 | 0 | case CK_BuiltinFnToFnPtr: |
910 | 0 | case CK_ZeroToOCLOpaqueType: |
911 | 0 | case CK_MatrixCast: |
912 | |
|
913 | 0 | case CK_IntToOCLSampler: |
914 | 0 | case CK_FloatingToFixedPoint: |
915 | 0 | case CK_FixedPointToFloating: |
916 | 0 | case CK_FixedPointCast: |
917 | 0 | case CK_FixedPointToBoolean: |
918 | 0 | case CK_FixedPointToIntegral: |
919 | 0 | case CK_IntegralToFixedPoint: |
920 | 0 | llvm_unreachable("cast kind invalid for aggregate types"); |
921 | 13.5k | } |
922 | 13.5k | } |
923 | | |
924 | 7.74k | void AggExprEmitter::VisitCallExpr(const CallExpr *E) { |
925 | 7.74k | if (E->getCallReturnType(CGF.getContext())->isReferenceType()) { |
926 | 0 | EmitAggLoadOfLValue(E); |
927 | 0 | return; |
928 | 0 | } |
929 | | |
930 | 7.74k | withReturnValueSlot(E, [&](ReturnValueSlot Slot) { |
931 | 7.74k | return CGF.EmitCallExpr(E, Slot); |
932 | 7.74k | }); |
933 | 7.74k | } |
934 | | |
935 | 73 | void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) { |
936 | 73 | withReturnValueSlot(E, [&](ReturnValueSlot Slot) { |
937 | 73 | return CGF.EmitObjCMessageExpr(E, Slot); |
938 | 73 | }); |
939 | 73 | } |
940 | | |
941 | 44 | void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { |
942 | 44 | CGF.EmitIgnoredExpr(E->getLHS()); |
943 | 44 | Visit(E->getRHS()); |
944 | 44 | } |
945 | | |
946 | 647 | void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) { |
947 | 647 | CodeGenFunction::StmtExprEvaluation eval(CGF); |
948 | 647 | CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest); |
949 | 647 | } |
950 | | |
951 | | enum CompareKind { |
952 | | CK_Less, |
953 | | CK_Greater, |
954 | | CK_Equal, |
955 | | }; |
956 | | |
957 | | static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, |
958 | | const BinaryOperator *E, llvm::Value *LHS, |
959 | | llvm::Value *RHS, CompareKind Kind, |
960 | 31 | const char *NameSuffix = "") { |
961 | 31 | QualType ArgTy = E->getLHS()->getType(); |
962 | 31 | if (const ComplexType *CT = ArgTy->getAs<ComplexType>()) |
963 | 0 | ArgTy = CT->getElementType(); |
964 | | |
965 | 31 | if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) { |
966 | 0 | assert(Kind == CK_Equal && |
967 | 0 | "member pointers may only be compared for equality"); |
968 | 0 | return CGF.CGM.getCXXABI().EmitMemberPointerComparison( |
969 | 0 | CGF, LHS, RHS, MPT, /*IsInequality*/ false); |
970 | 0 | } |
971 | | |
972 | | // Compute the comparison instructions for the specified comparison kind. |
973 | 31 | struct CmpInstInfo { |
974 | 31 | const char *Name; |
975 | 31 | llvm::CmpInst::Predicate FCmp; |
976 | 31 | llvm::CmpInst::Predicate SCmp; |
977 | 31 | llvm::CmpInst::Predicate UCmp; |
978 | 31 | }; |
979 | 31 | CmpInstInfo InstInfo = [&]() -> CmpInstInfo { |
980 | 31 | using FI = llvm::FCmpInst; |
981 | 31 | using II = llvm::ICmpInst; |
982 | 31 | switch (Kind) { |
983 | 15 | case CK_Less: |
984 | 15 | return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT}; |
985 | 1 | case CK_Greater: |
986 | 1 | return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT}; |
987 | 15 | case CK_Equal: |
988 | 15 | return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ}; |
989 | 31 | } |
990 | 0 | llvm_unreachable("Unrecognised CompareKind enum"); |
991 | 0 | }(); |
992 | | |
993 | 31 | if (ArgTy->hasFloatingRepresentation()) |
994 | 3 | return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS, |
995 | 3 | llvm::Twine(InstInfo.Name) + NameSuffix); |
996 | 28 | if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()2 ) { |
997 | 28 | auto Inst = |
998 | 28 | ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp18 : InstInfo.UCmp10 ; |
999 | 28 | return Builder.CreateICmp(Inst, LHS, RHS, |
1000 | 28 | llvm::Twine(InstInfo.Name) + NameSuffix); |
1001 | 28 | } |
1002 | | |
1003 | 0 | llvm_unreachable("unsupported aggregate binary expression should have " |
1004 | 0 | "already been handled"); |
1005 | 0 | } |
1006 | | |
1007 | 15 | void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) { |
1008 | 15 | using llvm::BasicBlock; |
1009 | 15 | using llvm::PHINode; |
1010 | 15 | using llvm::Value; |
1011 | 15 | assert(CGF.getContext().hasSameType(E->getLHS()->getType(), |
1012 | 15 | E->getRHS()->getType())); |
1013 | 0 | const ComparisonCategoryInfo &CmpInfo = |
1014 | 15 | CGF.getContext().CompCategories.getInfoForType(E->getType()); |
1015 | 15 | assert(CmpInfo.Record->isTriviallyCopyable() && |
1016 | 15 | "cannot copy non-trivially copyable aggregate"); |
1017 | | |
1018 | 0 | QualType ArgTy = E->getLHS()->getType(); |
1019 | | |
1020 | 15 | if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType()2 && |
1021 | 15 | !ArgTy->isNullPtrType()1 && !ArgTy->isPointerType()1 && |
1022 | 15 | !ArgTy->isMemberPointerType()0 && !ArgTy->isAnyComplexType()0 ) { |
1023 | 0 | return CGF.ErrorUnsupported(E, "aggregate three-way comparison"); |
1024 | 0 | } |
1025 | 15 | bool IsComplex = ArgTy->isAnyComplexType(); |
1026 | | |
1027 | | // Evaluate the operands to the expression and extract their values. |
1028 | 30 | auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> { |
1029 | 30 | RValue RV = CGF.EmitAnyExpr(E); |
1030 | 30 | if (RV.isScalar()) |
1031 | 30 | return {RV.getScalarVal(), nullptr}; |
1032 | 0 | if (RV.isAggregate()) |
1033 | 0 | return {RV.getAggregatePointer(), nullptr}; |
1034 | 0 | assert(RV.isComplex()); |
1035 | 0 | return RV.getComplexVal(); |
1036 | 0 | }; |
1037 | 15 | auto LHSValues = EmitOperand(E->getLHS()), |
1038 | 15 | RHSValues = EmitOperand(E->getRHS()); |
1039 | | |
1040 | 31 | auto EmitCmp = [&](CompareKind K) { |
1041 | 31 | Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first, |
1042 | 31 | K, IsComplex ? ".r"0 : ""); |
1043 | 31 | if (!IsComplex) |
1044 | 31 | return Cmp; |
1045 | 0 | assert(K == CompareKind::CK_Equal); |
1046 | 0 | Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second, |
1047 | 0 | RHSValues.second, K, ".i"); |
1048 | 0 | return Builder.CreateAnd(Cmp, CmpImag, "and.eq"); |
1049 | 31 | }; |
1050 | 46 | auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) { |
1051 | 46 | return Builder.getInt(VInfo->getIntValue()); |
1052 | 46 | }; |
1053 | | |
1054 | 15 | Value *Select; |
1055 | 15 | if (ArgTy->isNullPtrType()) { |
1056 | 0 | Select = EmitCmpRes(CmpInfo.getEqualOrEquiv()); |
1057 | 15 | } else if (!CmpInfo.isPartial()) { |
1058 | 14 | Value *SelectOne = |
1059 | 14 | Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), |
1060 | 14 | EmitCmpRes(CmpInfo.getGreater()), "sel.lt"); |
1061 | 14 | Select = Builder.CreateSelect(EmitCmp(CK_Equal), |
1062 | 14 | EmitCmpRes(CmpInfo.getEqualOrEquiv()), |
1063 | 14 | SelectOne, "sel.eq"); |
1064 | 14 | } else { |
1065 | 1 | Value *SelectEq = Builder.CreateSelect( |
1066 | 1 | EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()), |
1067 | 1 | EmitCmpRes(CmpInfo.getUnordered()), "sel.eq"); |
1068 | 1 | Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater), |
1069 | 1 | EmitCmpRes(CmpInfo.getGreater()), |
1070 | 1 | SelectEq, "sel.gt"); |
1071 | 1 | Select = Builder.CreateSelect( |
1072 | 1 | EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt"); |
1073 | 1 | } |
1074 | | // Create the return value in the destination slot. |
1075 | 15 | EnsureDest(E->getType()); |
1076 | 15 | LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); |
1077 | | |
1078 | | // Emit the address of the first (and only) field in the comparison category |
1079 | | // type, and initialize it from the constant integer value selected above. |
1080 | 15 | LValue FieldLV = CGF.EmitLValueForFieldInitialization( |
1081 | 15 | DestLV, *CmpInfo.Record->field_begin()); |
1082 | 15 | CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true); |
1083 | | |
1084 | | // All done! The result is in the Dest slot. |
1085 | 15 | } |
1086 | | |
1087 | 0 | void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) { |
1088 | 0 | if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) |
1089 | 0 | VisitPointerToDataMemberBinaryOperator(E); |
1090 | 0 | else |
1091 | 0 | CGF.ErrorUnsupported(E, "aggregate binary expression"); |
1092 | 0 | } |
1093 | | |
1094 | | void AggExprEmitter::VisitPointerToDataMemberBinaryOperator( |
1095 | 0 | const BinaryOperator *E) { |
1096 | 0 | LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E); |
1097 | 0 | EmitFinalDestCopy(E->getType(), LV); |
1098 | 0 | } |
1099 | | |
1100 | | /// Is the value of the given expression possibly a reference to or |
1101 | | /// into a __block variable? |
1102 | 706 | static bool isBlockVarRef(const Expr *E) { |
1103 | | // Make sure we look through parens. |
1104 | 706 | E = E->IgnoreParens(); |
1105 | | |
1106 | | // Check for a direct reference to a __block variable. |
1107 | 706 | if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { |
1108 | 242 | const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl()); |
1109 | 242 | return (var && var->hasAttr<BlocksAttr>()); |
1110 | 242 | } |
1111 | | |
1112 | | // More complicated stuff. |
1113 | | |
1114 | | // Binary operators. |
1115 | 464 | if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) { |
1116 | | // For an assignment or pointer-to-member operation, just care |
1117 | | // about the LHS. |
1118 | 0 | if (op->isAssignmentOp() || op->isPtrMemOp()) |
1119 | 0 | return isBlockVarRef(op->getLHS()); |
1120 | | |
1121 | | // For a comma, just care about the RHS. |
1122 | 0 | if (op->getOpcode() == BO_Comma) |
1123 | 0 | return isBlockVarRef(op->getRHS()); |
1124 | | |
1125 | | // FIXME: pointer arithmetic? |
1126 | 0 | return false; |
1127 | | |
1128 | | // Check both sides of a conditional operator. |
1129 | 464 | } else if (const AbstractConditionalOperator *op |
1130 | 464 | = dyn_cast<AbstractConditionalOperator>(E)) { |
1131 | 0 | return isBlockVarRef(op->getTrueExpr()) |
1132 | 0 | || isBlockVarRef(op->getFalseExpr()); |
1133 | | |
1134 | | // OVEs are required to support BinaryConditionalOperators. |
1135 | 464 | } else if (const OpaqueValueExpr *op |
1136 | 464 | = dyn_cast<OpaqueValueExpr>(E)) { |
1137 | 0 | if (const Expr *src = op->getSourceExpr()) |
1138 | 0 | return isBlockVarRef(src); |
1139 | | |
1140 | | // Casts are necessary to get things like (*(int*)&var) = foo(). |
1141 | | // We don't really care about the kind of cast here, except |
1142 | | // we don't want to look through l2r casts, because it's okay |
1143 | | // to get the *value* in a __block variable. |
1144 | 464 | } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) { |
1145 | 233 | if (cast->getCastKind() == CK_LValueToRValue) |
1146 | 219 | return false; |
1147 | 14 | return isBlockVarRef(cast->getSubExpr()); |
1148 | | |
1149 | | // Handle unary operators. Again, just aggressively look through |
1150 | | // it, ignoring the operation. |
1151 | 233 | } else if (const UnaryOperator *231 uop231 = dyn_cast<UnaryOperator>(E)) { |
1152 | 202 | return isBlockVarRef(uop->getSubExpr()); |
1153 | | |
1154 | | // Look into the base of a field access. |
1155 | 202 | } else if (const MemberExpr *29 mem29 = dyn_cast<MemberExpr>(E)) { |
1156 | 5 | return isBlockVarRef(mem->getBase()); |
1157 | | |
1158 | | // Look into the base of a subscript. |
1159 | 24 | } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) { |
1160 | 17 | return isBlockVarRef(sub->getBase()); |
1161 | 17 | } |
1162 | | |
1163 | 7 | return false; |
1164 | 464 | } |
1165 | | |
1166 | 468 | void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { |
1167 | | // For an assignment to work, the value on the right has |
1168 | | // to be compatible with the value on the left. |
1169 | 468 | assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), |
1170 | 468 | E->getRHS()->getType()) |
1171 | 468 | && "Invalid assignment"); |
1172 | | |
1173 | | // If the LHS might be a __block variable, and the RHS can |
1174 | | // potentially cause a block copy, we need to evaluate the RHS first |
1175 | | // so that the assignment goes the right place. |
1176 | | // This is pretty semantically fragile. |
1177 | 468 | if (isBlockVarRef(E->getLHS()) && |
1178 | 468 | E->getRHS()->HasSideEffects(CGF.getContext())3 ) { |
1179 | | // Ensure that we have a destination, and evaluate the RHS into that. |
1180 | 3 | EnsureDest(E->getRHS()->getType()); |
1181 | 3 | Visit(E->getRHS()); |
1182 | | |
1183 | | // Now emit the LHS and copy into it. |
1184 | 3 | LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); |
1185 | | |
1186 | | // That copy is an atomic copy if the LHS is atomic. |
1187 | 3 | if (LHS.getType()->isAtomicType() || |
1188 | 3 | CGF.LValueIsSuitableForInlineAtomic(LHS)) { |
1189 | 0 | CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); |
1190 | 0 | return; |
1191 | 0 | } |
1192 | | |
1193 | 3 | EmitCopy(E->getLHS()->getType(), |
1194 | 3 | AggValueSlot::forLValue(LHS, CGF, AggValueSlot::IsDestructed, |
1195 | 3 | needsGC(E->getLHS()->getType()), |
1196 | 3 | AggValueSlot::IsAliased, |
1197 | 3 | AggValueSlot::MayOverlap), |
1198 | 3 | Dest); |
1199 | 3 | return; |
1200 | 3 | } |
1201 | | |
1202 | 465 | LValue LHS = CGF.EmitLValue(E->getLHS()); |
1203 | | |
1204 | | // If we have an atomic type, evaluate into the destination and then |
1205 | | // do an atomic copy. |
1206 | 465 | if (LHS.getType()->isAtomicType() || |
1207 | 465 | CGF.LValueIsSuitableForInlineAtomic(LHS)456 ) { |
1208 | 12 | EnsureDest(E->getRHS()->getType()); |
1209 | 12 | Visit(E->getRHS()); |
1210 | 12 | CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); |
1211 | 12 | return; |
1212 | 12 | } |
1213 | | |
1214 | | // Codegen the RHS so that it stores directly into the LHS. |
1215 | 453 | AggValueSlot LHSSlot = AggValueSlot::forLValue( |
1216 | 453 | LHS, CGF, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()), |
1217 | 453 | AggValueSlot::IsAliased, AggValueSlot::MayOverlap); |
1218 | | // A non-volatile aggregate destination might have volatile member. |
1219 | 453 | if (!LHSSlot.isVolatile() && |
1220 | 453 | CGF.hasVolatileMember(E->getLHS()->getType())437 ) |
1221 | 7 | LHSSlot.setVolatile(true); |
1222 | | |
1223 | 453 | CGF.EmitAggExpr(E->getRHS(), LHSSlot); |
1224 | | |
1225 | | // Copy into the destination if the assignment isn't ignored. |
1226 | 453 | EmitFinalDestCopy(E->getType(), LHS); |
1227 | | |
1228 | 453 | if (!Dest.isIgnored() && !Dest.isExternallyDestructed()9 && |
1229 | 453 | E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct4 ) |
1230 | 2 | CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(), |
1231 | 2 | E->getType()); |
1232 | 453 | } |
1233 | | |
1234 | | void AggExprEmitter:: |
1235 | 63 | VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { |
1236 | 63 | llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); |
1237 | 63 | llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); |
1238 | 63 | llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); |
1239 | | |
1240 | | // Bind the common expression if necessary. |
1241 | 63 | CodeGenFunction::OpaqueValueMapping binding(CGF, E); |
1242 | | |
1243 | 63 | CodeGenFunction::ConditionalEvaluation eval(CGF); |
1244 | 63 | CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock, |
1245 | 63 | CGF.getProfileCount(E)); |
1246 | | |
1247 | | // Save whether the destination's lifetime is externally managed. |
1248 | 63 | bool isExternallyDestructed = Dest.isExternallyDestructed(); |
1249 | 63 | bool destructNonTrivialCStruct = |
1250 | 63 | !isExternallyDestructed && |
1251 | 63 | E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct12 ; |
1252 | 63 | isExternallyDestructed |= destructNonTrivialCStruct; |
1253 | 63 | Dest.setExternallyDestructed(isExternallyDestructed); |
1254 | | |
1255 | 63 | eval.begin(CGF); |
1256 | 63 | CGF.EmitBlock(LHSBlock); |
1257 | 63 | CGF.incrementProfileCounter(E); |
1258 | 63 | Visit(E->getTrueExpr()); |
1259 | 63 | eval.end(CGF); |
1260 | | |
1261 | 63 | assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!"); |
1262 | 0 | CGF.Builder.CreateBr(ContBlock); |
1263 | | |
1264 | | // If the result of an agg expression is unused, then the emission |
1265 | | // of the LHS might need to create a destination slot. That's fine |
1266 | | // with us, and we can safely emit the RHS into the same slot, but |
1267 | | // we shouldn't claim that it's already being destructed. |
1268 | 63 | Dest.setExternallyDestructed(isExternallyDestructed); |
1269 | | |
1270 | 63 | eval.begin(CGF); |
1271 | 63 | CGF.EmitBlock(RHSBlock); |
1272 | 63 | Visit(E->getFalseExpr()); |
1273 | 63 | eval.end(CGF); |
1274 | | |
1275 | 63 | if (destructNonTrivialCStruct) |
1276 | 2 | CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(), |
1277 | 2 | E->getType()); |
1278 | | |
1279 | 63 | CGF.EmitBlock(ContBlock); |
1280 | 63 | } |
1281 | | |
1282 | 0 | void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) { |
1283 | 0 | Visit(CE->getChosenSubExpr()); |
1284 | 0 | } |
1285 | | |
1286 | 371 | void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { |
1287 | 371 | Address ArgValue = Address::invalid(); |
1288 | 371 | Address ArgPtr = CGF.EmitVAArg(VE, ArgValue); |
1289 | | |
1290 | | // If EmitVAArg fails, emit an error. |
1291 | 371 | if (!ArgPtr.isValid()) { |
1292 | 0 | CGF.ErrorUnsupported(VE, "aggregate va_arg expression"); |
1293 | 0 | return; |
1294 | 0 | } |
1295 | | |
1296 | 371 | EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType())); |
1297 | 371 | } |
1298 | | |
1299 | 7.74k | void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { |
1300 | | // Ensure that we have a slot, but if we already do, remember |
1301 | | // whether it was externally destructed. |
1302 | 7.74k | bool wasExternallyDestructed = Dest.isExternallyDestructed(); |
1303 | 7.74k | EnsureDest(E->getType()); |
1304 | | |
1305 | | // We're going to push a destructor if there isn't already one. |
1306 | 7.74k | Dest.setExternallyDestructed(); |
1307 | | |
1308 | 7.74k | Visit(E->getSubExpr()); |
1309 | | |
1310 | | // Push that destructor we promised. |
1311 | 7.74k | if (!wasExternallyDestructed) |
1312 | 813 | CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress()); |
1313 | 7.74k | } |
1314 | | |
1315 | | void |
1316 | 66.0k | AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { |
1317 | 66.0k | AggValueSlot Slot = EnsureSlot(E->getType()); |
1318 | 66.0k | CGF.EmitCXXConstructExpr(E, Slot); |
1319 | 66.0k | } |
1320 | | |
1321 | | void AggExprEmitter::VisitCXXInheritedCtorInitExpr( |
1322 | 200 | const CXXInheritedCtorInitExpr *E) { |
1323 | 200 | AggValueSlot Slot = EnsureSlot(E->getType()); |
1324 | 200 | CGF.EmitInheritedCXXConstructorCall( |
1325 | 200 | E->getConstructor(), E->constructsVBase(), Slot.getAddress(), |
1326 | 200 | E->inheritedFromVBase(), E); |
1327 | 200 | } |
1328 | | |
1329 | | void |
1330 | 1.69k | AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { |
1331 | 1.69k | AggValueSlot Slot = EnsureSlot(E->getType()); |
1332 | 1.69k | LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType()); |
1333 | | |
1334 | | // We'll need to enter cleanup scopes in case any of the element |
1335 | | // initializers throws an exception. |
1336 | 1.69k | SmallVector<EHScopeStack::stable_iterator, 16> Cleanups; |
1337 | 1.69k | llvm::Instruction *CleanupDominator = nullptr; |
1338 | | |
1339 | 1.69k | CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin(); |
1340 | 1.69k | for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(), |
1341 | 1.69k | e = E->capture_init_end(); |
1342 | 4.39k | i != e; ++i, ++CurField2.70k ) { |
1343 | | // Emit initialization |
1344 | 2.70k | LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField); |
1345 | 2.70k | if (CurField->hasCapturedVLAType()) { |
1346 | 24 | CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV); |
1347 | 24 | continue; |
1348 | 24 | } |
1349 | | |
1350 | 2.68k | EmitInitializationToLValue(*i, LV); |
1351 | | |
1352 | | // Push a destructor if necessary. |
1353 | 2.68k | if (QualType::DestructionKind DtorKind = |
1354 | 2.68k | CurField->getType().isDestructedType()) { |
1355 | 17 | assert(LV.isSimple()); |
1356 | 17 | if (CGF.needsEHCleanup(DtorKind)) { |
1357 | 10 | if (!CleanupDominator) |
1358 | 9 | CleanupDominator = CGF.Builder.CreateAlignedLoad( |
1359 | 9 | CGF.Int8Ty, |
1360 | 9 | llvm::Constant::getNullValue(CGF.Int8PtrTy), |
1361 | 9 | CharUnits::One()); // placeholder |
1362 | | |
1363 | 10 | CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), CurField->getType(), |
1364 | 10 | CGF.getDestroyer(DtorKind), false); |
1365 | 10 | Cleanups.push_back(CGF.EHStack.stable_begin()); |
1366 | 10 | } |
1367 | 17 | } |
1368 | 2.68k | } |
1369 | | |
1370 | | // Deactivate all the partial cleanups in reverse order, which |
1371 | | // generally means popping them. |
1372 | 1.70k | for (unsigned i = Cleanups.size(); i != 0; --i10 ) |
1373 | 10 | CGF.DeactivateCleanupBlock(Cleanups[i-1], CleanupDominator); |
1374 | | |
1375 | | // Destroy the placeholder if we made one. |
1376 | 1.69k | if (CleanupDominator) |
1377 | 9 | CleanupDominator->eraseFromParent(); |
1378 | 1.69k | } |
1379 | | |
1380 | 7.23k | void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { |
1381 | 7.23k | CodeGenFunction::RunCleanupsScope cleanups(CGF); |
1382 | 7.23k | Visit(E->getSubExpr()); |
1383 | 7.23k | } |
1384 | | |
1385 | 0 | void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { |
1386 | 0 | QualType T = E->getType(); |
1387 | 0 | AggValueSlot Slot = EnsureSlot(T); |
1388 | 0 | EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T)); |
1389 | 0 | } |
1390 | | |
1391 | 19 | void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { |
1392 | 19 | QualType T = E->getType(); |
1393 | 19 | AggValueSlot Slot = EnsureSlot(T); |
1394 | 19 | EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T)); |
1395 | 19 | } |
1396 | | |
1397 | | /// Determine whether the given cast kind is known to always convert values |
1398 | | /// with all zero bits in their value representation to values with all zero |
1399 | | /// bits in their value representation. |
1400 | 563 | static bool castPreservesZero(const CastExpr *CE) { |
1401 | 563 | switch (CE->getCastKind()) { |
1402 | | // No-ops. |
1403 | 15 | case CK_NoOp: |
1404 | 15 | case CK_UserDefinedConversion: |
1405 | 15 | case CK_ConstructorConversion: |
1406 | 131 | case CK_BitCast: |
1407 | 131 | case CK_ToUnion: |
1408 | 131 | case CK_ToVoid: |
1409 | | // Conversions between (possibly-complex) integral, (possibly-complex) |
1410 | | // floating-point, and bool. |
1411 | 131 | case CK_BooleanToSignedIntegral: |
1412 | 132 | case CK_FloatingCast: |
1413 | 132 | case CK_FloatingComplexCast: |
1414 | 132 | case CK_FloatingComplexToBoolean: |
1415 | 132 | case CK_FloatingComplexToIntegralComplex: |
1416 | 132 | case CK_FloatingComplexToReal: |
1417 | 136 | case CK_FloatingRealToComplex: |
1418 | 136 | case CK_FloatingToBoolean: |
1419 | 143 | case CK_FloatingToIntegral: |
1420 | 341 | case CK_IntegralCast: |
1421 | 341 | case CK_IntegralComplexCast: |
1422 | 341 | case CK_IntegralComplexToBoolean: |
1423 | 341 | case CK_IntegralComplexToFloatingComplex: |
1424 | 345 | case CK_IntegralComplexToReal: |
1425 | 349 | case CK_IntegralRealToComplex: |
1426 | 349 | case CK_IntegralToBoolean: |
1427 | 357 | case CK_IntegralToFloating: |
1428 | | // Reinterpreting integers as pointers and vice versa. |
1429 | 361 | case CK_IntegralToPointer: |
1430 | 364 | case CK_PointerToIntegral: |
1431 | | // Language extensions. |
1432 | 368 | case CK_VectorSplat: |
1433 | 368 | case CK_MatrixCast: |
1434 | 370 | case CK_NonAtomicToAtomic: |
1435 | 372 | case CK_AtomicToNonAtomic: |
1436 | 372 | return true; |
1437 | | |
1438 | 0 | case CK_BaseToDerivedMemberPointer: |
1439 | 0 | case CK_DerivedToBaseMemberPointer: |
1440 | 0 | case CK_MemberPointerToBoolean: |
1441 | 0 | case CK_NullToMemberPointer: |
1442 | 0 | case CK_ReinterpretMemberPointer: |
1443 | | // FIXME: ABI-dependent. |
1444 | 0 | return false; |
1445 | | |
1446 | 0 | case CK_AnyPointerToBlockPointerCast: |
1447 | 0 | case CK_BlockPointerToObjCPointerCast: |
1448 | 0 | case CK_CPointerToObjCPointerCast: |
1449 | 0 | case CK_ObjCObjectLValueCast: |
1450 | 0 | case CK_IntToOCLSampler: |
1451 | 0 | case CK_ZeroToOCLOpaqueType: |
1452 | | // FIXME: Check these. |
1453 | 0 | return false; |
1454 | | |
1455 | 0 | case CK_FixedPointCast: |
1456 | 0 | case CK_FixedPointToBoolean: |
1457 | 0 | case CK_FixedPointToFloating: |
1458 | 0 | case CK_FixedPointToIntegral: |
1459 | 0 | case CK_FloatingToFixedPoint: |
1460 | 0 | case CK_IntegralToFixedPoint: |
1461 | | // FIXME: Do all fixed-point types represent zero as all 0 bits? |
1462 | 0 | return false; |
1463 | | |
1464 | 0 | case CK_AddressSpaceConversion: |
1465 | 0 | case CK_BaseToDerived: |
1466 | 0 | case CK_DerivedToBase: |
1467 | 0 | case CK_Dynamic: |
1468 | 18 | case CK_NullToPointer: |
1469 | 18 | case CK_PointerToBoolean: |
1470 | | // FIXME: Preserves zeroes only if zero pointers and null pointers have the |
1471 | | // same representation in all involved address spaces. |
1472 | 18 | return false; |
1473 | | |
1474 | 0 | case CK_ARCConsumeObject: |
1475 | 0 | case CK_ARCExtendBlockObject: |
1476 | 0 | case CK_ARCProduceObject: |
1477 | 0 | case CK_ARCReclaimReturnedObject: |
1478 | 0 | case CK_CopyAndAutoreleaseBlockObject: |
1479 | 2 | case CK_ArrayToPointerDecay: |
1480 | 2 | case CK_FunctionToPointerDecay: |
1481 | 2 | case CK_BuiltinFnToFnPtr: |
1482 | 2 | case CK_Dependent: |
1483 | 2 | case CK_LValueBitCast: |
1484 | 173 | case CK_LValueToRValue: |
1485 | 173 | case CK_LValueToRValueBitCast: |
1486 | 173 | case CK_UncheckedDerivedToBase: |
1487 | 173 | return false; |
1488 | 563 | } |
1489 | 0 | llvm_unreachable("Unhandled clang::CastKind enum"); |
1490 | 0 | } |
1491 | | |
1492 | | /// isSimpleZero - If emitting this value will obviously just cause a store of |
1493 | | /// zero to memory, return true. This can return false if uncertain, so it just |
1494 | | /// handles simple cases. |
1495 | 5.26k | static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) { |
1496 | 5.26k | E = E->IgnoreParens(); |
1497 | 5.63k | while (auto *CE = dyn_cast<CastExpr>(E)) { |
1498 | 563 | if (!castPreservesZero(CE)) |
1499 | 191 | break; |
1500 | 372 | E = CE->getSubExpr()->IgnoreParens(); |
1501 | 372 | } |
1502 | | |
1503 | | // 0 |
1504 | 5.26k | if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) |
1505 | 454 | return IL->getValue() == 0; |
1506 | | // +0.0 |
1507 | 4.81k | if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E)) |
1508 | 84 | return FL->getValue().isPosZero(); |
1509 | | // int() |
1510 | 4.72k | if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)4.39k ) && |
1511 | 4.72k | CGF.getTypes().isZeroInitializable(E->getType())329 ) |
1512 | 329 | return true; |
1513 | | // (int*)0 - Null pointer expressions. |
1514 | 4.39k | if (const CastExpr *ICE = dyn_cast<CastExpr>(E)) |
1515 | 191 | return ICE->getCastKind() == CK_NullToPointer && |
1516 | 191 | CGF.getTypes().isPointerZeroInitializable(E->getType())18 && |
1517 | 191 | !E->HasSideEffects(CGF.getContext())18 ; |
1518 | | // '\0' |
1519 | 4.20k | if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) |
1520 | 10 | return CL->getValue() == 0; |
1521 | | |
1522 | | // Otherwise, hard case: conservatively return false. |
1523 | 4.19k | return false; |
1524 | 4.20k | } |
1525 | | |
1526 | | |
1527 | | void |
1528 | 9.41k | AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) { |
1529 | 9.41k | QualType type = LV.getType(); |
1530 | | // FIXME: Ignore result? |
1531 | | // FIXME: Are initializers affected by volatile? |
1532 | 9.41k | if (Dest.isZeroed() && isSimpleZero(E, CGF)308 ) { |
1533 | | // Storing "i32 0" to a zero'd memory location is a noop. |
1534 | 190 | return; |
1535 | 9.22k | } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)9.11k ) { |
1536 | 112 | return EmitNullInitializationToLValue(LV); |
1537 | 9.11k | } else if (isa<NoInitExpr>(E)) { |
1538 | | // Do nothing. |
1539 | 14 | return; |
1540 | 9.10k | } else if (type->isReferenceType()) { |
1541 | 2.41k | RValue RV = CGF.EmitReferenceBindingToExpr(E); |
1542 | 2.41k | return CGF.EmitStoreThroughLValue(RV, LV); |
1543 | 2.41k | } |
1544 | | |
1545 | 6.68k | switch (CGF.getEvaluationKind(type)) { |
1546 | 11 | case TEK_Complex: |
1547 | 11 | CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true); |
1548 | 11 | return; |
1549 | 3.21k | case TEK_Aggregate: |
1550 | 3.21k | CGF.EmitAggExpr( |
1551 | 3.21k | E, AggValueSlot::forLValue(LV, CGF, AggValueSlot::IsDestructed, |
1552 | 3.21k | AggValueSlot::DoesNotNeedGCBarriers, |
1553 | 3.21k | AggValueSlot::IsNotAliased, |
1554 | 3.21k | AggValueSlot::MayOverlap, Dest.isZeroed())); |
1555 | 3.21k | return; |
1556 | 3.45k | case TEK_Scalar: |
1557 | 3.45k | if (LV.isSimple()) { |
1558 | 3.44k | CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false); |
1559 | 3.44k | } else { |
1560 | 13 | CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV); |
1561 | 13 | } |
1562 | 3.45k | return; |
1563 | 6.68k | } |
1564 | 0 | llvm_unreachable("bad evaluation kind"); |
1565 | 0 | } |
1566 | | |
1567 | 147 | void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) { |
1568 | 147 | QualType type = lv.getType(); |
1569 | | |
1570 | | // If the destination slot is already zeroed out before the aggregate is |
1571 | | // copied into it, we don't have to emit any zeros here. |
1572 | 147 | if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type)11 ) |
1573 | 11 | return; |
1574 | | |
1575 | 136 | if (CGF.hasScalarEvaluationKind(type)) { |
1576 | | // For non-aggregates, we can store the appropriate null constant. |
1577 | 112 | llvm::Value *null = CGF.CGM.EmitNullConstant(type); |
1578 | | // Note that the following is not equivalent to |
1579 | | // EmitStoreThroughBitfieldLValue for ARC types. |
1580 | 112 | if (lv.isBitField()) { |
1581 | 1 | CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv); |
1582 | 111 | } else { |
1583 | 111 | assert(lv.isSimple()); |
1584 | 0 | CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true); |
1585 | 111 | } |
1586 | 112 | } else { |
1587 | | // There's a potential optimization opportunity in combining |
1588 | | // memsets; that would be easy for arrays, but relatively |
1589 | | // difficult for structures with the current code. |
1590 | 24 | CGF.EmitNullInitialization(lv.getAddress(CGF), lv.getType()); |
1591 | 24 | } |
1592 | 136 | } |
1593 | | |
1594 | 3.67k | void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { |
1595 | | #if 0 |
1596 | | // FIXME: Assess perf here? Figure out what cases are worth optimizing here |
1597 | | // (Length of globals? Chunks of zeroed-out space?). |
1598 | | // |
1599 | | // If we can, prefer a copy from a global; this is a lot less code for long |
1600 | | // globals, and it's easier for the current optimizers to analyze. |
1601 | | if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) { |
1602 | | llvm::GlobalVariable* GV = |
1603 | | new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true, |
1604 | | llvm::GlobalValue::InternalLinkage, C, ""); |
1605 | | EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType())); |
1606 | | return; |
1607 | | } |
1608 | | #endif |
1609 | 3.67k | if (E->hadArrayRangeDesignator()) |
1610 | 0 | CGF.ErrorUnsupported(E, "GNU array range designator extension"); |
1611 | | |
1612 | 3.67k | if (E->isTransparent()) |
1613 | 16 | return Visit(E->getInit(0)); |
1614 | | |
1615 | 3.66k | AggValueSlot Dest = EnsureSlot(E->getType()); |
1616 | | |
1617 | 3.66k | LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); |
1618 | | |
1619 | | // Handle initialization of an array. |
1620 | 3.66k | if (E->getType()->isArrayType()) { |
1621 | 1.71k | auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType()); |
1622 | 1.71k | EmitArrayInit(Dest.getAddress(), AType, E->getType(), E); |
1623 | 1.71k | return; |
1624 | 1.71k | } |
1625 | | |
1626 | 1.95k | assert(E->getType()->isRecordType() && "Only support structs/unions here!"); |
1627 | | |
1628 | | // Do struct initialization; this code just sets each individual member |
1629 | | // to the approprate value. This makes bitfield support automatic; |
1630 | | // the disadvantage is that the generated code is more difficult for |
1631 | | // the optimizer, especially with bitfields. |
1632 | 0 | unsigned NumInitElements = E->getNumInits(); |
1633 | 1.95k | RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl(); |
1634 | | |
1635 | | // We'll need to enter cleanup scopes in case any of the element |
1636 | | // initializers throws an exception. |
1637 | 1.95k | SmallVector<EHScopeStack::stable_iterator, 16> cleanups; |
1638 | 1.95k | llvm::Instruction *cleanupDominator = nullptr; |
1639 | 1.95k | auto addCleanup = [&](const EHScopeStack::stable_iterator &cleanup) { |
1640 | 16 | cleanups.push_back(cleanup); |
1641 | 16 | if (!cleanupDominator) // create placeholder once needed |
1642 | 8 | cleanupDominator = CGF.Builder.CreateAlignedLoad( |
1643 | 8 | CGF.Int8Ty, llvm::Constant::getNullValue(CGF.Int8PtrTy), |
1644 | 8 | CharUnits::One()); |
1645 | 16 | }; |
1646 | | |
1647 | 1.95k | unsigned curInitIndex = 0; |
1648 | | |
1649 | | // Emit initialization of base classes. |
1650 | 1.95k | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) { |
1651 | 1.40k | assert(E->getNumInits() >= CXXRD->getNumBases() && |
1652 | 1.40k | "missing initializer for base class"); |
1653 | 16 | for (auto &Base : CXXRD->bases()) { |
1654 | 16 | assert(!Base.isVirtual() && "should not see vbases here"); |
1655 | 0 | auto *BaseRD = Base.getType()->getAsCXXRecordDecl(); |
1656 | 16 | Address V = CGF.GetAddressOfDirectBaseInCompleteClass( |
1657 | 16 | Dest.getAddress(), CXXRD, BaseRD, |
1658 | 16 | /*isBaseVirtual*/ false); |
1659 | 16 | AggValueSlot AggSlot = AggValueSlot::forAddr( |
1660 | 16 | V, Qualifiers(), |
1661 | 16 | AggValueSlot::IsDestructed, |
1662 | 16 | AggValueSlot::DoesNotNeedGCBarriers, |
1663 | 16 | AggValueSlot::IsNotAliased, |
1664 | 16 | CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual())); |
1665 | 16 | CGF.EmitAggExpr(E->getInit(curInitIndex++), AggSlot); |
1666 | | |
1667 | 16 | if (QualType::DestructionKind dtorKind = |
1668 | 16 | Base.getType().isDestructedType()) { |
1669 | 8 | CGF.pushDestroy(dtorKind, V, Base.getType()); |
1670 | 8 | addCleanup(CGF.EHStack.stable_begin()); |
1671 | 8 | } |
1672 | 16 | } |
1673 | 1.40k | } |
1674 | | |
1675 | | // Prepare a 'this' for CXXDefaultInitExprs. |
1676 | 0 | CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress()); |
1677 | | |
1678 | 1.95k | if (record->isUnion()) { |
1679 | | // Only initialize one field of a union. The field itself is |
1680 | | // specified by the initializer list. |
1681 | 95 | if (!E->getInitializedFieldInUnion()) { |
1682 | | // Empty union; we have nothing to do. |
1683 | | |
1684 | 16 | #ifndef NDEBUG |
1685 | | // Make sure that it's really an empty and not a failure of |
1686 | | // semantic analysis. |
1687 | 16 | for (const auto *Field : record->fields()) |
1688 | 0 | assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed"); |
1689 | 16 | #endif |
1690 | 16 | return; |
1691 | 16 | } |
1692 | | |
1693 | | // FIXME: volatility |
1694 | 79 | FieldDecl *Field = E->getInitializedFieldInUnion(); |
1695 | | |
1696 | 79 | LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field); |
1697 | 79 | if (NumInitElements) { |
1698 | | // Store the initializer into the field |
1699 | 63 | EmitInitializationToLValue(E->getInit(0), FieldLoc); |
1700 | 63 | } else { |
1701 | | // Default-initialize to null. |
1702 | 16 | EmitNullInitializationToLValue(FieldLoc); |
1703 | 16 | } |
1704 | | |
1705 | 79 | return; |
1706 | 95 | } |
1707 | | |
1708 | | // Here we iterate over the fields; this makes it simpler to both |
1709 | | // default-initialize fields and skip over unnamed fields. |
1710 | 3.19k | for (const auto *field : record->fields())1.85k { |
1711 | | // We're done once we hit the flexible array member. |
1712 | 3.19k | if (field->getType()->isIncompleteArrayType()) |
1713 | 2 | break; |
1714 | | |
1715 | | // Always skip anonymous bitfields. |
1716 | 3.19k | if (field->isUnnamedBitfield()) |
1717 | 32 | continue; |
1718 | | |
1719 | | // We're done if we reach the end of the explicit initializers, we |
1720 | | // have a zeroed object, and the rest of the fields are |
1721 | | // zero-initializable. |
1722 | 3.16k | if (curInitIndex == NumInitElements && Dest.isZeroed()0 && |
1723 | 3.16k | CGF.getTypes().isZeroInitializable(E->getType())0 ) |
1724 | 0 | break; |
1725 | | |
1726 | | |
1727 | 3.16k | LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field); |
1728 | | // We never generate write-barries for initialized fields. |
1729 | 3.16k | LV.setNonGC(true); |
1730 | | |
1731 | 3.16k | if (curInitIndex < NumInitElements) { |
1732 | | // Store the initializer into the field. |
1733 | 3.16k | EmitInitializationToLValue(E->getInit(curInitIndex++), LV); |
1734 | 3.16k | } else { |
1735 | | // We're out of initializers; default-initialize to null |
1736 | 0 | EmitNullInitializationToLValue(LV); |
1737 | 0 | } |
1738 | | |
1739 | | // Push a destructor if necessary. |
1740 | | // FIXME: if we have an array of structures, all explicitly |
1741 | | // initialized, we can end up pushing a linear number of cleanups. |
1742 | 3.16k | bool pushedCleanup = false; |
1743 | 3.16k | if (QualType::DestructionKind dtorKind |
1744 | 3.16k | = field->getType().isDestructedType()) { |
1745 | 64 | assert(LV.isSimple()); |
1746 | 64 | if (CGF.needsEHCleanup(dtorKind)) { |
1747 | 8 | CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), field->getType(), |
1748 | 8 | CGF.getDestroyer(dtorKind), false); |
1749 | 8 | addCleanup(CGF.EHStack.stable_begin()); |
1750 | 8 | pushedCleanup = true; |
1751 | 8 | } |
1752 | 64 | } |
1753 | | |
1754 | | // If the GEP didn't get used because of a dead zero init or something |
1755 | | // else, clean it up for -O0 builds and general tidiness. |
1756 | 3.16k | if (!pushedCleanup && LV.isSimple()3.15k ) |
1757 | 3.11k | if (llvm::GetElementPtrInst *GEP = |
1758 | 3.11k | dyn_cast<llvm::GetElementPtrInst>(LV.getPointer(CGF))) |
1759 | 3.02k | if (GEP->use_empty()) |
1760 | 221 | GEP->eraseFromParent(); |
1761 | 3.16k | } |
1762 | | |
1763 | | // Deactivate all the partial cleanups in reverse order, which |
1764 | | // generally means popping them. |
1765 | 1.85k | assert((cleanupDominator || cleanups.empty()) && |
1766 | 1.85k | "Missing cleanupDominator before deactivating cleanup blocks"); |
1767 | 1.87k | for (unsigned i = cleanups.size(); i != 0; --i16 ) |
1768 | 16 | CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator); |
1769 | | |
1770 | | // Destroy the placeholder if we made one. |
1771 | 1.85k | if (cleanupDominator) |
1772 | 8 | cleanupDominator->eraseFromParent(); |
1773 | 1.85k | } |
1774 | | |
1775 | | void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, |
1776 | 39 | llvm::Value *outerBegin) { |
1777 | | // Emit the common subexpression. |
1778 | 39 | CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr()); |
1779 | | |
1780 | 39 | Address destPtr = EnsureSlot(E->getType()).getAddress(); |
1781 | 39 | uint64_t numElements = E->getArraySize().getZExtValue(); |
1782 | | |
1783 | 39 | if (!numElements) |
1784 | 0 | return; |
1785 | | |
1786 | | // destPtr is an array*. Construct an elementType* by drilling down a level. |
1787 | 39 | llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); |
1788 | 39 | llvm::Value *indices[] = {zero, zero}; |
1789 | 39 | llvm::Value *begin = Builder.CreateInBoundsGEP( |
1790 | 39 | destPtr.getElementType(), destPtr.getPointer(), indices, |
1791 | 39 | "arrayinit.begin"); |
1792 | | |
1793 | | // Prepare to special-case multidimensional array initialization: we avoid |
1794 | | // emitting multiple destructor loops in that case. |
1795 | 39 | if (!outerBegin) |
1796 | 32 | outerBegin = begin; |
1797 | 39 | ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr()); |
1798 | | |
1799 | 39 | QualType elementType = |
1800 | 39 | CGF.getContext().getAsArrayType(E->getType())->getElementType(); |
1801 | 39 | CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); |
1802 | 39 | CharUnits elementAlign = |
1803 | 39 | destPtr.getAlignment().alignmentOfArrayElement(elementSize); |
1804 | 39 | llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType); |
1805 | | |
1806 | 39 | llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); |
1807 | 39 | llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body"); |
1808 | | |
1809 | | // Jump into the body. |
1810 | 39 | CGF.EmitBlock(bodyBB); |
1811 | 39 | llvm::PHINode *index = |
1812 | 39 | Builder.CreatePHI(zero->getType(), 2, "arrayinit.index"); |
1813 | 39 | index->addIncoming(zero, entryBB); |
1814 | 39 | llvm::Value *element = |
1815 | 39 | Builder.CreateInBoundsGEP(llvmElementType, begin, index); |
1816 | | |
1817 | | // Prepare for a cleanup. |
1818 | 39 | QualType::DestructionKind dtorKind = elementType.isDestructedType(); |
1819 | 39 | EHScopeStack::stable_iterator cleanup; |
1820 | 39 | if (CGF.needsEHCleanup(dtorKind) && !InnerLoop7 ) { |
1821 | 6 | if (outerBegin->getType() != element->getType()) |
1822 | 1 | outerBegin = Builder.CreateBitCast(outerBegin, element->getType()); |
1823 | 6 | CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType, |
1824 | 6 | elementAlign, |
1825 | 6 | CGF.getDestroyer(dtorKind)); |
1826 | 6 | cleanup = CGF.EHStack.stable_begin(); |
1827 | 33 | } else { |
1828 | 33 | dtorKind = QualType::DK_none; |
1829 | 33 | } |
1830 | | |
1831 | | // Emit the actual filler expression. |
1832 | 39 | { |
1833 | | // Temporaries created in an array initialization loop are destroyed |
1834 | | // at the end of each iteration. |
1835 | 39 | CodeGenFunction::RunCleanupsScope CleanupsScope(CGF); |
1836 | 39 | CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index); |
1837 | 39 | LValue elementLV = CGF.MakeAddrLValue( |
1838 | 39 | Address(element, llvmElementType, elementAlign), elementType); |
1839 | | |
1840 | 39 | if (InnerLoop) { |
1841 | | // If the subexpression is an ArrayInitLoopExpr, share its cleanup. |
1842 | 7 | auto elementSlot = AggValueSlot::forLValue( |
1843 | 7 | elementLV, CGF, AggValueSlot::IsDestructed, |
1844 | 7 | AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, |
1845 | 7 | AggValueSlot::DoesNotOverlap); |
1846 | 7 | AggExprEmitter(CGF, elementSlot, false) |
1847 | 7 | .VisitArrayInitLoopExpr(InnerLoop, outerBegin); |
1848 | 7 | } else |
1849 | 32 | EmitInitializationToLValue(E->getSubExpr(), elementLV); |
1850 | 39 | } |
1851 | | |
1852 | | // Move on to the next element. |
1853 | 39 | llvm::Value *nextIndex = Builder.CreateNUWAdd( |
1854 | 39 | index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next"); |
1855 | 39 | index->addIncoming(nextIndex, Builder.GetInsertBlock()); |
1856 | | |
1857 | | // Leave the loop if we're done. |
1858 | 39 | llvm::Value *done = Builder.CreateICmpEQ( |
1859 | 39 | nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements), |
1860 | 39 | "arrayinit.done"); |
1861 | 39 | llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end"); |
1862 | 39 | Builder.CreateCondBr(done, endBB, bodyBB); |
1863 | | |
1864 | 39 | CGF.EmitBlock(endBB); |
1865 | | |
1866 | | // Leave the partial-array cleanup if we entered one. |
1867 | 39 | if (dtorKind) |
1868 | 6 | CGF.DeactivateCleanupBlock(cleanup, index); |
1869 | 39 | } |
1870 | | |
1871 | 8 | void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) { |
1872 | 8 | AggValueSlot Dest = EnsureSlot(E->getType()); |
1873 | | |
1874 | 8 | LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); |
1875 | 8 | EmitInitializationToLValue(E->getBase(), DestLV); |
1876 | 8 | VisitInitListExpr(E->getUpdater()); |
1877 | 8 | } |
1878 | | |
1879 | | //===----------------------------------------------------------------------===// |
1880 | | // Entry Points into this File |
1881 | | //===----------------------------------------------------------------------===// |
1882 | | |
1883 | | /// GetNumNonZeroBytesInInit - Get an approximate count of the number of |
1884 | | /// non-zero bytes that will be stored when outputting the initializer for the |
1885 | | /// specified initializer expression. |
1886 | 4.95k | static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) { |
1887 | 4.95k | if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E)) |
1888 | 71 | E = MTE->getSubExpr(); |
1889 | 4.95k | E = E->IgnoreParenNoopCasts(CGF.getContext()); |
1890 | | |
1891 | | // 0 and 0.0 won't require any non-zero stores! |
1892 | 4.95k | if (isSimpleZero(E, CGF)) return CharUnits::Zero()256 ; |
1893 | | |
1894 | | // If this is an initlist expr, sum up the size of sizes of the (present) |
1895 | | // elements. If this is something weird, assume the whole thing is non-zero. |
1896 | 4.70k | const InitListExpr *ILE = dyn_cast<InitListExpr>(E); |
1897 | 4.70k | while (ILE && ILE->isTransparent()321 ) |
1898 | 5 | ILE = dyn_cast<InitListExpr>(ILE->getInit(0)); |
1899 | 4.70k | if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType())316 ) |
1900 | 4.38k | return CGF.getContext().getTypeSizeInChars(E->getType()); |
1901 | | |
1902 | | // InitListExprs for structs have to be handled carefully. If there are |
1903 | | // reference members, we need to consider the size of the reference, not the |
1904 | | // referencee. InitListExprs for unions and arrays can't have references. |
1905 | 316 | if (const RecordType *RT = E->getType()->getAs<RecordType>()) { |
1906 | 199 | if (!RT->isUnionType()) { |
1907 | 176 | RecordDecl *SD = RT->getDecl(); |
1908 | 176 | CharUnits NumNonZeroBytes = CharUnits::Zero(); |
1909 | | |
1910 | 176 | unsigned ILEElement = 0; |
1911 | 176 | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD)) |
1912 | 136 | while (ILEElement != CXXRD->getNumBases()) |
1913 | 0 | NumNonZeroBytes += |
1914 | 0 | GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF); |
1915 | 598 | for (const auto *Field : SD->fields()) { |
1916 | | // We're done once we hit the flexible array member or run out of |
1917 | | // InitListExpr elements. |
1918 | 598 | if (Field->getType()->isIncompleteArrayType() || |
1919 | 598 | ILEElement == ILE->getNumInits()) |
1920 | 0 | break; |
1921 | 598 | if (Field->isUnnamedBitfield()) |
1922 | 0 | continue; |
1923 | | |
1924 | 598 | const Expr *E = ILE->getInit(ILEElement++); |
1925 | | |
1926 | | // Reference values are always non-null and have the width of a pointer. |
1927 | 598 | if (Field->getType()->isReferenceType()) |
1928 | 4 | NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits( |
1929 | 4 | CGF.getTarget().getPointerWidth(0)); |
1930 | 594 | else |
1931 | 594 | NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF); |
1932 | 598 | } |
1933 | | |
1934 | 176 | return NumNonZeroBytes; |
1935 | 176 | } |
1936 | 199 | } |
1937 | | |
1938 | | // FIXME: This overestimates the number of non-zero bytes for bit-fields. |
1939 | 140 | CharUnits NumNonZeroBytes = CharUnits::Zero(); |
1940 | 547 | for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i407 ) |
1941 | 407 | NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF); |
1942 | 140 | return NumNonZeroBytes; |
1943 | 316 | } |
1944 | | |
1945 | | /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of |
1946 | | /// zeros in it, emit a memset and avoid storing the individual zeros. |
1947 | | /// |
1948 | | static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, |
1949 | 84.4k | CodeGenFunction &CGF) { |
1950 | | // If the slot is already known to be zeroed, nothing to do. Don't mess with |
1951 | | // volatile stores. |
1952 | 84.4k | if (Slot.isZeroed() || Slot.isVolatile()84.3k || !Slot.getAddress().isValid()84.3k ) |
1953 | 1.63k | return; |
1954 | | |
1955 | | // C++ objects with a user-declared constructor don't need zero'ing. |
1956 | 82.8k | if (CGF.getLangOpts().CPlusPlus) |
1957 | 77.4k | if (const RecordType *RT = CGF.getContext() |
1958 | 77.4k | .getBaseElementType(E->getType())->getAs<RecordType>()) { |
1959 | 77.1k | const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); |
1960 | 77.1k | if (RD->hasUserDeclaredConstructor()) |
1961 | 55.2k | return; |
1962 | 77.1k | } |
1963 | | |
1964 | | // If the type is 16-bytes or smaller, prefer individual stores over memset. |
1965 | 27.5k | CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType()); |
1966 | 27.5k | if (Size <= CharUnits::fromQuantity(16)) |
1967 | 23.6k | return; |
1968 | | |
1969 | | // Check to see if over 3/4 of the initializer are known to be zero. If so, |
1970 | | // we prefer to emit memset + individual stores for the rest. |
1971 | 3.95k | CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF); |
1972 | 3.95k | if (NumNonZeroBytes*4 > Size) |
1973 | 3.87k | return; |
1974 | | |
1975 | | // Okay, it seems like a good idea to use an initial memset, emit the call. |
1976 | 85 | llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity()); |
1977 | | |
1978 | 85 | Address Loc = Slot.getAddress(); |
1979 | 85 | Loc = CGF.Builder.CreateElementBitCast(Loc, CGF.Int8Ty); |
1980 | 85 | CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false); |
1981 | | |
1982 | | // Tell the AggExprEmitter that the slot is known zero. |
1983 | 85 | Slot.setZeroed(); |
1984 | 85 | } |
1985 | | |
1986 | | |
1987 | | |
1988 | | |
1989 | | /// EmitAggExpr - Emit the computation of the specified expression of aggregate |
1990 | | /// type. The result is computed into DestPtr. Note that if DestPtr is null, |
1991 | | /// the value of the aggregate expression is not needed. If VolatileDest is |
1992 | | /// true, DestPtr cannot be 0. |
1993 | 84.4k | void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) { |
1994 | 84.4k | assert(E && hasAggregateEvaluationKind(E->getType()) && |
1995 | 84.4k | "Invalid aggregate expression to emit"); |
1996 | 0 | assert((Slot.getAddress().isValid() || Slot.isIgnored()) && |
1997 | 84.4k | "slot has bits but no address"); |
1998 | | |
1999 | | // Optimize the slot if possible. |
2000 | 0 | CheckAggExprForMemSetUse(Slot, E, *this); |
2001 | | |
2002 | 84.4k | AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E)); |
2003 | 84.4k | } |
2004 | | |
2005 | 126 | LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) { |
2006 | 126 | assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!"); |
2007 | 0 | Address Temp = CreateMemTemp(E->getType()); |
2008 | 126 | LValue LV = MakeAddrLValue(Temp, E->getType()); |
2009 | 126 | EmitAggExpr(E, AggValueSlot::forLValue( |
2010 | 126 | LV, *this, AggValueSlot::IsNotDestructed, |
2011 | 126 | AggValueSlot::DoesNotNeedGCBarriers, |
2012 | 126 | AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap)); |
2013 | 126 | return LV; |
2014 | 126 | } |
2015 | | |
2016 | | AggValueSlot::Overlap_t |
2017 | 4.68k | CodeGenFunction::getOverlapForFieldInit(const FieldDecl *FD) { |
2018 | 4.68k | if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType()3 ) |
2019 | 4.68k | return AggValueSlot::DoesNotOverlap; |
2020 | | |
2021 | | // If the field lies entirely within the enclosing class's nvsize, its tail |
2022 | | // padding cannot overlap any already-initialized object. (The only subobjects |
2023 | | // with greater addresses that might already be initialized are vbases.) |
2024 | 3 | const RecordDecl *ClassRD = FD->getParent(); |
2025 | 3 | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD); |
2026 | 3 | if (Layout.getFieldOffset(FD->getFieldIndex()) + |
2027 | 3 | getContext().getTypeSize(FD->getType()) <= |
2028 | 3 | (uint64_t)getContext().toBits(Layout.getNonVirtualSize())) |
2029 | 1 | return AggValueSlot::DoesNotOverlap; |
2030 | | |
2031 | | // The tail padding may contain values we need to preserve. |
2032 | 2 | return AggValueSlot::MayOverlap; |
2033 | 3 | } |
2034 | | |
2035 | | AggValueSlot::Overlap_t CodeGenFunction::getOverlapForBaseInit( |
2036 | 9.81k | const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) { |
2037 | | // If the most-derived object is a field declared with [[no_unique_address]], |
2038 | | // the tail padding of any virtual base could be reused for other subobjects |
2039 | | // of that field's class. |
2040 | 9.81k | if (IsVirtual) |
2041 | 783 | return AggValueSlot::MayOverlap; |
2042 | | |
2043 | | // If the base class is laid out entirely within the nvsize of the derived |
2044 | | // class, its tail padding cannot yet be initialized, so we can issue |
2045 | | // stores at the full width of the base class. |
2046 | 9.03k | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); |
2047 | 9.03k | if (Layout.getBaseClassOffset(BaseRD) + |
2048 | 9.03k | getContext().getASTRecordLayout(BaseRD).getSize() <= |
2049 | 9.03k | Layout.getNonVirtualSize()) |
2050 | 8.73k | return AggValueSlot::DoesNotOverlap; |
2051 | | |
2052 | | // The tail padding may contain values we need to preserve. |
2053 | 294 | return AggValueSlot::MayOverlap; |
2054 | 9.03k | } |
2055 | | |
2056 | | void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty, |
2057 | | AggValueSlot::Overlap_t MayOverlap, |
2058 | 14.0k | bool isVolatile) { |
2059 | 14.0k | assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); |
2060 | | |
2061 | 0 | Address DestPtr = Dest.getAddress(*this); |
2062 | 14.0k | Address SrcPtr = Src.getAddress(*this); |
2063 | | |
2064 | 14.0k | if (getLangOpts().CPlusPlus) { |
2065 | 11.1k | if (const RecordType *RT = Ty->getAs<RecordType>()) { |
2066 | 9.64k | CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl()); |
2067 | 9.64k | assert((Record->hasTrivialCopyConstructor() || |
2068 | 9.64k | Record->hasTrivialCopyAssignment() || |
2069 | 9.64k | Record->hasTrivialMoveConstructor() || |
2070 | 9.64k | Record->hasTrivialMoveAssignment() || |
2071 | 9.64k | Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) && |
2072 | 9.64k | "Trying to aggregate-copy a type without a trivial copy/move " |
2073 | 9.64k | "constructor or assignment operator"); |
2074 | | // Ignore empty classes in C++. |
2075 | 9.64k | if (Record->isEmpty()) |
2076 | 2.93k | return; |
2077 | 9.64k | } |
2078 | 11.1k | } |
2079 | | |
2080 | 11.1k | if (getLangOpts().CUDAIsDevice) { |
2081 | 0 | if (Ty->isCUDADeviceBuiltinSurfaceType()) { |
2082 | 0 | if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest, |
2083 | 0 | Src)) |
2084 | 0 | return; |
2085 | 0 | } else if (Ty->isCUDADeviceBuiltinTextureType()) { |
2086 | 0 | if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest, |
2087 | 0 | Src)) |
2088 | 0 | return; |
2089 | 0 | } |
2090 | 0 | } |
2091 | | |
2092 | | // Aggregate assignment turns into llvm.memcpy. This is almost valid per |
2093 | | // C99 6.5.16.1p3, which states "If the value being stored in an object is |
2094 | | // read from another object that overlaps in anyway the storage of the first |
2095 | | // object, then the overlap shall be exact and the two objects shall have |
2096 | | // qualified or unqualified versions of a compatible type." |
2097 | | // |
2098 | | // memcpy is not defined if the source and destination pointers are exactly |
2099 | | // equal, but other compilers do this optimization, and almost every memcpy |
2100 | | // implementation handles this case safely. If there is a libc that does not |
2101 | | // safely handle this, we can add a target hook. |
2102 | | |
2103 | | // Get data size info for this aggregate. Don't copy the tail padding if this |
2104 | | // might be a potentially-overlapping subobject, since the tail padding might |
2105 | | // be occupied by a different object. Otherwise, copying it is fine. |
2106 | 11.1k | TypeInfoChars TypeInfo; |
2107 | 11.1k | if (MayOverlap) |
2108 | 4.83k | TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty); |
2109 | 6.31k | else |
2110 | 6.31k | TypeInfo = getContext().getTypeInfoInChars(Ty); |
2111 | | |
2112 | 11.1k | llvm::Value *SizeVal = nullptr; |
2113 | 11.1k | if (TypeInfo.Width.isZero()) { |
2114 | | // But note that getTypeInfo returns 0 for a VLA. |
2115 | 158 | if (auto *VAT = dyn_cast_or_null<VariableArrayType>( |
2116 | 158 | getContext().getAsArrayType(Ty))) { |
2117 | 100 | QualType BaseEltTy; |
2118 | 100 | SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr); |
2119 | 100 | TypeInfo = getContext().getTypeInfoInChars(BaseEltTy); |
2120 | 100 | assert(!TypeInfo.Width.isZero()); |
2121 | 0 | SizeVal = Builder.CreateNUWMul( |
2122 | 100 | SizeVal, |
2123 | 100 | llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity())); |
2124 | 100 | } |
2125 | 158 | } |
2126 | 11.1k | if (!SizeVal) { |
2127 | 11.0k | SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()); |
2128 | 11.0k | } |
2129 | | |
2130 | | // FIXME: If we have a volatile struct, the optimizer can remove what might |
2131 | | // appear to be `extra' memory ops: |
2132 | | // |
2133 | | // volatile struct { int i; } a, b; |
2134 | | // |
2135 | | // int main() { |
2136 | | // a = b; |
2137 | | // a = b; |
2138 | | // } |
2139 | | // |
2140 | | // we need to use a different call here. We use isVolatile to indicate when |
2141 | | // either the source or the destination is volatile. |
2142 | | |
2143 | 11.1k | DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty); |
2144 | 11.1k | SrcPtr = Builder.CreateElementBitCast(SrcPtr, Int8Ty); |
2145 | | |
2146 | | // Don't do any of the memmove_collectable tests if GC isn't set. |
2147 | 11.1k | if (CGM.getLangOpts().getGC() == LangOptions::NonGC) { |
2148 | | // fall through |
2149 | 11.1k | } else if (const RecordType *30 RecordTy30 = Ty->getAs<RecordType>()) { |
2150 | 28 | RecordDecl *Record = RecordTy->getDecl(); |
2151 | 28 | if (Record->hasObjectMember()) { |
2152 | 23 | CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, |
2153 | 23 | SizeVal); |
2154 | 23 | return; |
2155 | 23 | } |
2156 | 28 | } else if (2 Ty->isArrayType()2 ) { |
2157 | 2 | QualType BaseType = getContext().getBaseElementType(Ty); |
2158 | 2 | if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) { |
2159 | 1 | if (RecordTy->getDecl()->hasObjectMember()) { |
2160 | 1 | CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, |
2161 | 1 | SizeVal); |
2162 | 1 | return; |
2163 | 1 | } |
2164 | 1 | } |
2165 | 2 | } |
2166 | | |
2167 | 11.1k | auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile); |
2168 | | |
2169 | | // Determine the metadata to describe the position of any padding in this |
2170 | | // memcpy, as well as the TBAA tags for the members of the struct, in case |
2171 | | // the optimizer wishes to expand it in to scalar memory operations. |
2172 | 11.1k | if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty)) |
2173 | 310 | Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag); |
2174 | | |
2175 | 11.1k | if (CGM.getCodeGenOpts().NewStructPathTBAA) { |
2176 | 7 | TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer( |
2177 | 7 | Dest.getTBAAInfo(), Src.getTBAAInfo()); |
2178 | 7 | CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo); |
2179 | 7 | } |
2180 | 11.1k | } |