/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGExprAgg.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This contains code to emit Aggregate Expr nodes as LLVM code. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "CGCXXABI.h" |
14 | | #include "CGObjCRuntime.h" |
15 | | #include "CodeGenFunction.h" |
16 | | #include "CodeGenModule.h" |
17 | | #include "ConstantEmitter.h" |
18 | | #include "TargetInfo.h" |
19 | | #include "clang/AST/ASTContext.h" |
20 | | #include "clang/AST/Attr.h" |
21 | | #include "clang/AST/DeclCXX.h" |
22 | | #include "clang/AST/DeclTemplate.h" |
23 | | #include "clang/AST/StmtVisitor.h" |
24 | | #include "llvm/IR/Constants.h" |
25 | | #include "llvm/IR/Function.h" |
26 | | #include "llvm/IR/GlobalVariable.h" |
27 | | #include "llvm/IR/IntrinsicInst.h" |
28 | | #include "llvm/IR/Intrinsics.h" |
29 | | using namespace clang; |
30 | | using namespace CodeGen; |
31 | | |
32 | | //===----------------------------------------------------------------------===// |
33 | | // Aggregate Expression Emitter |
34 | | //===----------------------------------------------------------------------===// |
35 | | |
36 | | namespace { |
37 | | class AggExprEmitter : public StmtVisitor<AggExprEmitter> { |
38 | | CodeGenFunction &CGF; |
39 | | CGBuilderTy &Builder; |
40 | | AggValueSlot Dest; |
41 | | bool IsResultUnused; |
42 | | |
43 | 73.7k | AggValueSlot EnsureSlot(QualType T) { |
44 | 73.7k | if (!Dest.isIgnored()) return Dest73.6k ; |
45 | 78 | return CGF.CreateAggTemp(T, "agg.tmp.ensured"); |
46 | 73.7k | } |
47 | 7.08k | void EnsureDest(QualType T) { |
48 | 7.08k | if (!Dest.isIgnored()) return6.91k ; |
49 | 174 | Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured"); |
50 | 174 | } |
51 | | |
52 | | // Calls `Fn` with a valid return value slot, potentially creating a temporary |
53 | | // to do so. If a temporary is created, an appropriate copy into `Dest` will |
54 | | // be emitted, as will lifetime markers. |
55 | | // |
56 | | // The given function should take a ReturnValueSlot, and return an RValue that |
57 | | // points to said slot. |
58 | | void withReturnValueSlot(const Expr *E, |
59 | | llvm::function_ref<RValue(ReturnValueSlot)> Fn); |
60 | | |
61 | | public: |
62 | | AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest, bool IsResultUnused) |
63 | 87.5k | : CGF(cgf), Builder(CGF.Builder), Dest(Dest), |
64 | 87.5k | IsResultUnused(IsResultUnused) { } |
65 | | |
66 | | //===--------------------------------------------------------------------===// |
67 | | // Utilities |
68 | | //===--------------------------------------------------------------------===// |
69 | | |
70 | | /// EmitAggLoadOfLValue - Given an expression with aggregate type that |
71 | | /// represents a value lvalue, this method emits the address of the lvalue, |
72 | | /// then loads the result into DestPtr. |
73 | | void EmitAggLoadOfLValue(const Expr *E); |
74 | | |
75 | | enum ExprValueKind { |
76 | | EVK_RValue, |
77 | | EVK_NonRValue |
78 | | }; |
79 | | |
80 | | /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. |
81 | | /// SrcIsRValue is true if source comes from an RValue. |
82 | | void EmitFinalDestCopy(QualType type, const LValue &src, |
83 | | ExprValueKind SrcValueKind = EVK_NonRValue); |
84 | | void EmitFinalDestCopy(QualType type, RValue src); |
85 | | void EmitCopy(QualType type, const AggValueSlot &dest, |
86 | | const AggValueSlot &src); |
87 | | |
88 | | void EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, QualType ArrayQTy, |
89 | | Expr *ExprToVisit, ArrayRef<Expr *> Args, |
90 | | Expr *ArrayFiller); |
91 | | |
92 | 3.76k | AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) { |
93 | 3.76k | if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T)24 ) |
94 | 20 | return AggValueSlot::NeedsGCBarriers; |
95 | 3.74k | return AggValueSlot::DoesNotNeedGCBarriers; |
96 | 3.76k | } |
97 | | |
98 | | bool TypeRequiresGCollection(QualType T); |
99 | | |
100 | | //===--------------------------------------------------------------------===// |
101 | | // Visitor Methods |
102 | | //===--------------------------------------------------------------------===// |
103 | | |
104 | 129k | void Visit(Expr *E) { |
105 | 129k | ApplyDebugLocation DL(CGF, E); |
106 | 129k | StmtVisitor<AggExprEmitter>::Visit(E); |
107 | 129k | } |
108 | | |
109 | 0 | void VisitStmt(Stmt *S) { |
110 | 0 | CGF.ErrorUnsupported(S, "aggregate expression"); |
111 | 0 | } |
112 | 103 | void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); } |
113 | 0 | void VisitGenericSelectionExpr(GenericSelectionExpr *GE) { |
114 | 0 | Visit(GE->getResultExpr()); |
115 | 0 | } |
116 | 5 | void VisitCoawaitExpr(CoawaitExpr *E) { |
117 | 5 | CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused); |
118 | 5 | } |
119 | 0 | void VisitCoyieldExpr(CoyieldExpr *E) { |
120 | 0 | CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused); |
121 | 0 | } |
122 | 0 | void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); } |
123 | 639 | void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); } |
124 | 0 | void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { |
125 | 0 | return Visit(E->getReplacement()); |
126 | 0 | } |
127 | | |
128 | 16 | void VisitConstantExpr(ConstantExpr *E) { |
129 | 16 | EnsureDest(E->getType()); |
130 | | |
131 | 16 | if (llvm::Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) { |
132 | 15 | Address StoreDest = Dest.getAddress(); |
133 | | // The emitted value is guaranteed to have the same size as the |
134 | | // destination but can have a different type. Just do a bitcast in this |
135 | | // case to avoid incorrect GEPs. |
136 | 15 | if (Result->getType() != StoreDest.getType()) |
137 | 15 | StoreDest = StoreDest.withElementType(Result->getType()); |
138 | | |
139 | 15 | CGF.EmitAggregateStore(Result, StoreDest, |
140 | 15 | E->getType().isVolatileQualified()); |
141 | 15 | return; |
142 | 15 | } |
143 | 1 | return Visit(E->getSubExpr()); |
144 | 16 | } |
145 | | |
146 | | // l-values. |
147 | 2.60k | void VisitDeclRefExpr(DeclRefExpr *E) { EmitAggLoadOfLValue(E); } |
148 | 7 | void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); } |
149 | 100 | void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); } |
150 | 23 | void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); } |
151 | | void VisitCompoundLiteralExpr(CompoundLiteralExpr *E); |
152 | 6 | void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { |
153 | 6 | EmitAggLoadOfLValue(E); |
154 | 6 | } |
155 | 0 | void VisitPredefinedExpr(const PredefinedExpr *E) { |
156 | 0 | EmitAggLoadOfLValue(E); |
157 | 0 | } |
158 | | |
159 | | // Operators. |
160 | | void VisitCastExpr(CastExpr *E); |
161 | | void VisitCallExpr(const CallExpr *E); |
162 | | void VisitStmtExpr(const StmtExpr *E); |
163 | | void VisitBinaryOperator(const BinaryOperator *BO); |
164 | | void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO); |
165 | | void VisitBinAssign(const BinaryOperator *E); |
166 | | void VisitBinComma(const BinaryOperator *E); |
167 | | void VisitBinCmp(const BinaryOperator *E); |
168 | 0 | void VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) { |
169 | 0 | Visit(E->getSemanticForm()); |
170 | 0 | } |
171 | | |
172 | | void VisitObjCMessageExpr(ObjCMessageExpr *E); |
173 | 5 | void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { |
174 | 5 | EmitAggLoadOfLValue(E); |
175 | 5 | } |
176 | | |
177 | | void VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E); |
178 | | void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO); |
179 | | void VisitChooseExpr(const ChooseExpr *CE); |
180 | | void VisitInitListExpr(InitListExpr *E); |
181 | | void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args, |
182 | | FieldDecl *InitializedFieldInUnion, |
183 | | Expr *ArrayFiller); |
184 | | void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, |
185 | | llvm::Value *outerBegin = nullptr); |
186 | | void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E); |
187 | 0 | void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing. |
188 | 392 | void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { |
189 | 392 | CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE); |
190 | 392 | Visit(DAE->getExpr()); |
191 | 392 | } |
192 | 287 | void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { |
193 | 287 | CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE); |
194 | 287 | Visit(DIE->getExpr()); |
195 | 287 | } |
196 | | void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E); |
197 | | void VisitCXXConstructExpr(const CXXConstructExpr *E); |
198 | | void VisitCXXInheritedCtorInitExpr(const CXXInheritedCtorInitExpr *E); |
199 | | void VisitLambdaExpr(LambdaExpr *E); |
200 | | void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E); |
201 | | void VisitExprWithCleanups(ExprWithCleanups *E); |
202 | | void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E); |
203 | 0 | void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); } |
204 | | void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E); |
205 | | void VisitOpaqueValueExpr(OpaqueValueExpr *E); |
206 | | |
207 | 57 | void VisitPseudoObjectExpr(PseudoObjectExpr *E) { |
208 | 57 | if (E->isGLValue()) { |
209 | 0 | LValue LV = CGF.EmitPseudoObjectLValue(E); |
210 | 0 | return EmitFinalDestCopy(E->getType(), LV); |
211 | 0 | } |
212 | | |
213 | 57 | AggValueSlot Slot = EnsureSlot(E->getType()); |
214 | 57 | bool NeedsDestruction = |
215 | 57 | !Slot.isExternallyDestructed() && |
216 | 57 | E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct13 ; |
217 | 57 | if (NeedsDestruction) |
218 | 1 | Slot.setExternallyDestructed(); |
219 | 57 | CGF.EmitPseudoObjectRValue(E, Slot); |
220 | 57 | if (NeedsDestruction) |
221 | 1 | CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Slot.getAddress(), |
222 | 1 | E->getType()); |
223 | 57 | } |
224 | | |
225 | | void VisitVAArgExpr(VAArgExpr *E); |
226 | | void VisitCXXParenListInitExpr(CXXParenListInitExpr *E); |
227 | | void VisitCXXParenListOrInitListExpr(Expr *ExprToVisit, ArrayRef<Expr *> Args, |
228 | | Expr *ArrayFiller); |
229 | | |
230 | | void EmitInitializationToLValue(Expr *E, LValue Address); |
231 | | void EmitNullInitializationToLValue(LValue Address); |
232 | | // case Expr::ChooseExprClass: |
233 | 1 | void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); } |
234 | 25 | void VisitAtomicExpr(AtomicExpr *E) { |
235 | 25 | RValue Res = CGF.EmitAtomicExpr(E); |
236 | 25 | EmitFinalDestCopy(E->getType(), Res); |
237 | 25 | } |
238 | | }; |
239 | | } // end anonymous namespace. |
240 | | |
241 | | //===----------------------------------------------------------------------===// |
242 | | // Utilities |
243 | | //===----------------------------------------------------------------------===// |
244 | | |
245 | | /// EmitAggLoadOfLValue - Given an expression with aggregate type that |
246 | | /// represents a value lvalue, this method emits the address of the lvalue, |
247 | | /// then loads the result into DestPtr. |
248 | 2.76k | void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) { |
249 | 2.76k | LValue LV = CGF.EmitLValue(E); |
250 | | |
251 | | // If the type of the l-value is atomic, then do an atomic load. |
252 | 2.76k | if (LV.getType()->isAtomicType() || CGF.LValueIsSuitableForInlineAtomic(LV)2.75k ) { |
253 | 13 | CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest); |
254 | 13 | return; |
255 | 13 | } |
256 | | |
257 | 2.74k | EmitFinalDestCopy(E->getType(), LV); |
258 | 2.74k | } |
259 | | |
260 | | /// True if the given aggregate type requires special GC API calls. |
261 | 24 | bool AggExprEmitter::TypeRequiresGCollection(QualType T) { |
262 | | // Only record types have members that might require garbage collection. |
263 | 24 | const RecordType *RecordTy = T->getAs<RecordType>(); |
264 | 24 | if (!RecordTy) return false0 ; |
265 | | |
266 | | // Don't mess with non-trivial C++ types. |
267 | 24 | RecordDecl *Record = RecordTy->getDecl(); |
268 | 24 | if (isa<CXXRecordDecl>(Record) && |
269 | 24 | (5 cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor()5 || |
270 | 5 | !cast<CXXRecordDecl>(Record)->hasTrivialDestructor())) |
271 | 0 | return false; |
272 | | |
273 | | // Check whether the type has an object member. |
274 | 24 | return Record->hasObjectMember(); |
275 | 24 | } |
276 | | |
277 | | void AggExprEmitter::withReturnValueSlot( |
278 | 9.48k | const Expr *E, llvm::function_ref<RValue(ReturnValueSlot)> EmitCall) { |
279 | 9.48k | QualType RetTy = E->getType(); |
280 | 9.48k | bool RequiresDestruction = |
281 | 9.48k | !Dest.isExternallyDestructed() && |
282 | 9.48k | RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct1.86k ; |
283 | | |
284 | | // If it makes no observable difference, save a memcpy + temporary. |
285 | | // |
286 | | // We need to always provide our own temporary if destruction is required. |
287 | | // Otherwise, EmitCall will emit its own, notice that it's "unused", and end |
288 | | // its lifetime before we have the chance to emit a proper destructor call. |
289 | 9.48k | bool UseTemp = Dest.isPotentiallyAliased() || Dest.requiresGCollection()9.31k || |
290 | 9.48k | (9.31k RequiresDestruction9.31k && !Dest.getAddress().isValid()12 ); |
291 | | |
292 | 9.48k | Address RetAddr = Address::invalid(); |
293 | 9.48k | Address RetAllocaAddr = Address::invalid(); |
294 | | |
295 | 9.48k | EHScopeStack::stable_iterator LifetimeEndBlock; |
296 | 9.48k | llvm::Value *LifetimeSizePtr = nullptr; |
297 | 9.48k | llvm::IntrinsicInst *LifetimeStartInst = nullptr; |
298 | 9.48k | if (!UseTemp) { |
299 | 9.29k | RetAddr = Dest.getAddress(); |
300 | 9.29k | } else { |
301 | 190 | RetAddr = CGF.CreateMemTemp(RetTy, "tmp", &RetAllocaAddr); |
302 | 190 | llvm::TypeSize Size = |
303 | 190 | CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy)); |
304 | 190 | LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer()); |
305 | 190 | if (LifetimeSizePtr) { |
306 | 7 | LifetimeStartInst = |
307 | 7 | cast<llvm::IntrinsicInst>(std::prev(Builder.GetInsertPoint())); |
308 | 7 | assert(LifetimeStartInst->getIntrinsicID() == |
309 | 7 | llvm::Intrinsic::lifetime_start && |
310 | 7 | "Last insertion wasn't a lifetime.start?"); |
311 | | |
312 | 7 | CGF.pushFullExprCleanup<CodeGenFunction::CallLifetimeEnd>( |
313 | 7 | NormalEHLifetimeMarker, RetAllocaAddr, LifetimeSizePtr); |
314 | 7 | LifetimeEndBlock = CGF.EHStack.stable_begin(); |
315 | 7 | } |
316 | 190 | } |
317 | | |
318 | 9.48k | RValue Src = |
319 | 9.48k | EmitCall(ReturnValueSlot(RetAddr, Dest.isVolatile(), IsResultUnused, |
320 | 9.48k | Dest.isExternallyDestructed())); |
321 | | |
322 | 9.48k | if (!UseTemp) |
323 | 9.29k | return; |
324 | | |
325 | 190 | assert(Dest.isIgnored() || Dest.getPointer() != Src.getAggregatePointer()); |
326 | 190 | EmitFinalDestCopy(E->getType(), Src); |
327 | | |
328 | 190 | if (!RequiresDestruction && LifetimeStartInst178 ) { |
329 | | // If there's no dtor to run, the copy was the last use of our temporary. |
330 | | // Since we're not guaranteed to be in an ExprWithCleanups, clean up |
331 | | // eagerly. |
332 | 5 | CGF.DeactivateCleanupBlock(LifetimeEndBlock, LifetimeStartInst); |
333 | 5 | CGF.EmitLifetimeEnd(LifetimeSizePtr, RetAllocaAddr.getPointer()); |
334 | 5 | } |
335 | 190 | } |
336 | | |
337 | | /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. |
338 | 219 | void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src) { |
339 | 219 | assert(src.isAggregate() && "value must be aggregate value!"); |
340 | 219 | LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddress(), type); |
341 | 219 | EmitFinalDestCopy(type, srcLV, EVK_RValue); |
342 | 219 | } |
343 | | |
344 | | /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired. |
345 | | void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src, |
346 | 3.87k | ExprValueKind SrcValueKind) { |
347 | | // If Dest is ignored, then we're evaluating an aggregate expression |
348 | | // in a context that doesn't care about the result. Note that loads |
349 | | // from volatile l-values force the existence of a non-ignored |
350 | | // destination. |
351 | 3.87k | if (Dest.isIgnored()) |
352 | 515 | return; |
353 | | |
354 | | // Copy non-trivial C structs here. |
355 | 3.36k | LValue DstLV = CGF.MakeAddrLValue( |
356 | 3.36k | Dest.getAddress(), Dest.isVolatile() ? type.withVolatile()25 : type3.33k ); |
357 | | |
358 | 3.36k | if (SrcValueKind == EVK_RValue) { |
359 | 207 | if (type.isNonTrivialToPrimitiveDestructiveMove() == QualType::PCK_Struct) { |
360 | 8 | if (Dest.isPotentiallyAliased()) |
361 | 8 | CGF.callCStructMoveAssignmentOperator(DstLV, src); |
362 | 0 | else |
363 | 0 | CGF.callCStructMoveConstructor(DstLV, src); |
364 | 8 | return; |
365 | 8 | } |
366 | 3.15k | } else { |
367 | 3.15k | if (type.isNonTrivialToPrimitiveCopy() == QualType::PCK_Struct) { |
368 | 81 | if (Dest.isPotentiallyAliased()) |
369 | 23 | CGF.callCStructCopyAssignmentOperator(DstLV, src); |
370 | 58 | else |
371 | 58 | CGF.callCStructCopyConstructor(DstLV, src); |
372 | 81 | return; |
373 | 81 | } |
374 | 3.15k | } |
375 | | |
376 | 3.27k | AggValueSlot srcAgg = AggValueSlot::forLValue( |
377 | 3.27k | src, CGF, AggValueSlot::IsDestructed, needsGC(type), |
378 | 3.27k | AggValueSlot::IsAliased, AggValueSlot::MayOverlap); |
379 | 3.27k | EmitCopy(type, Dest, srcAgg); |
380 | 3.27k | } |
381 | | |
382 | | /// Perform a copy from the source into the destination. |
383 | | /// |
384 | | /// \param type - the type of the aggregate being copied; qualifiers are |
385 | | /// ignored |
386 | | void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest, |
387 | 3.27k | const AggValueSlot &src) { |
388 | 3.27k | if (dest.requiresGCollection()) { |
389 | 6 | CharUnits sz = dest.getPreferredSize(CGF.getContext(), type); |
390 | 6 | llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity()); |
391 | 6 | CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, |
392 | 6 | dest.getAddress(), |
393 | 6 | src.getAddress(), |
394 | 6 | size); |
395 | 6 | return; |
396 | 6 | } |
397 | | |
398 | | // If the result of the assignment is used, copy the LHS there also. |
399 | | // It's volatile if either side is. Use the minimum alignment of |
400 | | // the two sides. |
401 | 3.27k | LValue DestLV = CGF.MakeAddrLValue(dest.getAddress(), type); |
402 | 3.27k | LValue SrcLV = CGF.MakeAddrLValue(src.getAddress(), type); |
403 | 3.27k | CGF.EmitAggregateCopy(DestLV, SrcLV, type, dest.mayOverlap(), |
404 | 3.27k | dest.isVolatile() || src.isVolatile()3.25k ); |
405 | 3.27k | } |
406 | | |
407 | | /// Emit the initializer for a std::initializer_list initialized with a |
408 | | /// real initializer list. |
409 | | void |
410 | 140 | AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) { |
411 | | // Emit an array containing the elements. The array is externally destructed |
412 | | // if the std::initializer_list object is. |
413 | 140 | ASTContext &Ctx = CGF.getContext(); |
414 | 140 | LValue Array = CGF.EmitLValue(E->getSubExpr()); |
415 | 140 | assert(Array.isSimple() && "initializer_list array not a simple lvalue"); |
416 | 140 | Address ArrayPtr = Array.getAddress(CGF); |
417 | | |
418 | 140 | const ConstantArrayType *ArrayType = |
419 | 140 | Ctx.getAsConstantArrayType(E->getSubExpr()->getType()); |
420 | 140 | assert(ArrayType && "std::initializer_list constructed from non-array"); |
421 | | |
422 | | // FIXME: Perform the checks on the field types in SemaInit. |
423 | 140 | RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl(); |
424 | 140 | RecordDecl::field_iterator Field = Record->field_begin(); |
425 | 140 | if (Field == Record->field_end()) { |
426 | 1 | CGF.ErrorUnsupported(E, "weird std::initializer_list"); |
427 | 1 | return; |
428 | 1 | } |
429 | | |
430 | | // Start pointer. |
431 | 139 | if (!Field->getType()->isPointerType() || |
432 | 139 | !Ctx.hasSameType(Field->getType()->getPointeeType(), |
433 | 139 | ArrayType->getElementType())) { |
434 | 0 | CGF.ErrorUnsupported(E, "weird std::initializer_list"); |
435 | 0 | return; |
436 | 0 | } |
437 | | |
438 | 139 | AggValueSlot Dest = EnsureSlot(E->getType()); |
439 | 139 | LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); |
440 | 139 | LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field); |
441 | 139 | llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0); |
442 | 139 | llvm::Value *IdxStart[] = { Zero, Zero }; |
443 | 139 | llvm::Value *ArrayStart = Builder.CreateInBoundsGEP( |
444 | 139 | ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxStart, "arraystart"); |
445 | 139 | CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start); |
446 | 139 | ++Field; |
447 | | |
448 | 139 | if (Field == Record->field_end()) { |
449 | 0 | CGF.ErrorUnsupported(E, "weird std::initializer_list"); |
450 | 0 | return; |
451 | 0 | } |
452 | | |
453 | 139 | llvm::Value *Size = Builder.getInt(ArrayType->getSize()); |
454 | 139 | LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field); |
455 | 139 | if (Field->getType()->isPointerType() && |
456 | 139 | Ctx.hasSameType(Field->getType()->getPointeeType(), |
457 | 7 | ArrayType->getElementType())) { |
458 | | // End pointer. |
459 | 7 | llvm::Value *IdxEnd[] = { Zero, Size }; |
460 | 7 | llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP( |
461 | 7 | ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxEnd, "arrayend"); |
462 | 7 | CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength); |
463 | 132 | } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) { |
464 | | // Length. |
465 | 132 | CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength); |
466 | 132 | } else { |
467 | 0 | CGF.ErrorUnsupported(E, "weird std::initializer_list"); |
468 | 0 | return; |
469 | 0 | } |
470 | 139 | } |
471 | | |
472 | | /// Determine if E is a trivial array filler, that is, one that is |
473 | | /// equivalent to zero-initialization. |
474 | 1.85k | static bool isTrivialFiller(Expr *E) { |
475 | 1.85k | if (!E) |
476 | 1.78k | return true; |
477 | | |
478 | 69 | if (isa<ImplicitValueInitExpr>(E)) |
479 | 34 | return true; |
480 | | |
481 | 35 | if (auto *ILE = dyn_cast<InitListExpr>(E)) { |
482 | 7 | if (ILE->getNumInits()) |
483 | 6 | return false; |
484 | 1 | return isTrivialFiller(ILE->getArrayFiller()); |
485 | 7 | } |
486 | | |
487 | 28 | if (auto *Cons = dyn_cast_or_null<CXXConstructExpr>(E)) |
488 | 25 | return Cons->getConstructor()->isDefaultConstructor() && |
489 | 25 | Cons->getConstructor()->isTrivial()23 ; |
490 | | |
491 | | // FIXME: Are there other cases where we can avoid emitting an initializer? |
492 | 3 | return false; |
493 | 28 | } |
494 | | |
495 | | /// Emit initialization of an array from an initializer list. ExprToVisit must |
496 | | /// be either an InitListEpxr a CXXParenInitListExpr. |
497 | | void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType, |
498 | | QualType ArrayQTy, Expr *ExprToVisit, |
499 | 1.87k | ArrayRef<Expr *> Args, Expr *ArrayFiller) { |
500 | 1.87k | uint64_t NumInitElements = Args.size(); |
501 | | |
502 | 1.87k | uint64_t NumArrayElements = AType->getNumElements(); |
503 | 1.87k | assert(NumInitElements <= NumArrayElements); |
504 | | |
505 | 1.87k | QualType elementType = |
506 | 1.87k | CGF.getContext().getAsArrayType(ArrayQTy)->getElementType(); |
507 | | |
508 | | // DestPtr is an array*. Construct an elementType* by drilling |
509 | | // down a level. |
510 | 1.87k | llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); |
511 | 1.87k | llvm::Value *indices[] = { zero, zero }; |
512 | 1.87k | llvm::Value *begin = Builder.CreateInBoundsGEP( |
513 | 1.87k | DestPtr.getElementType(), DestPtr.getPointer(), indices, |
514 | 1.87k | "arrayinit.begin"); |
515 | | |
516 | 1.87k | CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); |
517 | 1.87k | CharUnits elementAlign = |
518 | 1.87k | DestPtr.getAlignment().alignmentOfArrayElement(elementSize); |
519 | 1.87k | llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType); |
520 | | |
521 | | // Consider initializing the array by copying from a global. For this to be |
522 | | // more efficient than per-element initialization, the size of the elements |
523 | | // with explicit initializers should be large enough. |
524 | 1.87k | if (NumInitElements * elementSize.getQuantity() > 16 && |
525 | 1.87k | elementType.isTriviallyCopyableType(CGF.getContext())116 ) { |
526 | 69 | CodeGen::CodeGenModule &CGM = CGF.CGM; |
527 | 69 | ConstantEmitter Emitter(CGF); |
528 | 69 | LangAS AS = ArrayQTy.getAddressSpace(); |
529 | 69 | if (llvm::Constant *C = |
530 | 69 | Emitter.tryEmitForInitializer(ExprToVisit, AS, ArrayQTy)) { |
531 | 22 | auto GV = new llvm::GlobalVariable( |
532 | 22 | CGM.getModule(), C->getType(), |
533 | 22 | /* isConstant= */ true, llvm::GlobalValue::PrivateLinkage, C, |
534 | 22 | "constinit", |
535 | 22 | /* InsertBefore= */ nullptr, llvm::GlobalVariable::NotThreadLocal, |
536 | 22 | CGM.getContext().getTargetAddressSpace(AS)); |
537 | 22 | Emitter.finalize(GV); |
538 | 22 | CharUnits Align = CGM.getContext().getTypeAlignInChars(ArrayQTy); |
539 | 22 | GV->setAlignment(Align.getAsAlign()); |
540 | 22 | Address GVAddr(GV, GV->getValueType(), Align); |
541 | 22 | EmitFinalDestCopy(ArrayQTy, CGF.MakeAddrLValue(GVAddr, ArrayQTy)); |
542 | 22 | return; |
543 | 22 | } |
544 | 69 | } |
545 | | |
546 | | // Exception safety requires us to destroy all the |
547 | | // already-constructed members if an initializer throws. |
548 | | // For that, we'll need an EH cleanup. |
549 | 1.85k | QualType::DestructionKind dtorKind = elementType.isDestructedType(); |
550 | 1.85k | Address endOfInit = Address::invalid(); |
551 | 1.85k | EHScopeStack::stable_iterator cleanup; |
552 | 1.85k | llvm::Instruction *cleanupDominator = nullptr; |
553 | 1.85k | if (CGF.needsEHCleanup(dtorKind)) { |
554 | | // In principle we could tell the cleanup where we are more |
555 | | // directly, but the control flow can get so varied here that it |
556 | | // would actually be quite complex. Therefore we go through an |
557 | | // alloca. |
558 | 74 | endOfInit = CGF.CreateTempAlloca(begin->getType(), CGF.getPointerAlign(), |
559 | 74 | "arrayinit.endOfInit"); |
560 | 74 | cleanupDominator = Builder.CreateStore(begin, endOfInit); |
561 | 74 | CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType, |
562 | 74 | elementAlign, |
563 | 74 | CGF.getDestroyer(dtorKind)); |
564 | 74 | cleanup = CGF.EHStack.stable_begin(); |
565 | | |
566 | | // Otherwise, remember that we didn't need a cleanup. |
567 | 1.77k | } else { |
568 | 1.77k | dtorKind = QualType::DK_none; |
569 | 1.77k | } |
570 | | |
571 | 1.85k | llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1); |
572 | | |
573 | | // The 'current element to initialize'. The invariants on this |
574 | | // variable are complicated. Essentially, after each iteration of |
575 | | // the loop, it points to the last initialized element, except |
576 | | // that it points to the beginning of the array before any |
577 | | // elements have been initialized. |
578 | 1.85k | llvm::Value *element = begin; |
579 | | |
580 | | // Emit the explicit initializers. |
581 | 5.45k | for (uint64_t i = 0; i != NumInitElements; ++i3.60k ) { |
582 | | // Advance to the next element. |
583 | 3.60k | if (i > 0) { |
584 | 1.77k | element = Builder.CreateInBoundsGEP( |
585 | 1.77k | llvmElementType, element, one, "arrayinit.element"); |
586 | | |
587 | | // Tell the cleanup that it needs to destroy up to this |
588 | | // element. TODO: some of these stores can be trivially |
589 | | // observed to be unnecessary. |
590 | 1.77k | if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit)123 ; |
591 | 1.77k | } |
592 | | |
593 | 3.60k | LValue elementLV = CGF.MakeAddrLValue( |
594 | 3.60k | Address(element, llvmElementType, elementAlign), elementType); |
595 | 3.60k | EmitInitializationToLValue(Args[i], elementLV); |
596 | 3.60k | } |
597 | | |
598 | | // Check whether there's a non-trivial array-fill expression. |
599 | 1.85k | bool hasTrivialFiller = isTrivialFiller(ArrayFiller); |
600 | | |
601 | | // Any remaining elements need to be zero-initialized, possibly |
602 | | // using the filler expression. We can skip this if the we're |
603 | | // emitting to zeroed memory. |
604 | 1.85k | if (NumInitElements != NumArrayElements && |
605 | 1.85k | !(63 Dest.isZeroed()63 && hasTrivialFiller21 && |
606 | 63 | CGF.getTypes().isZeroInitializable(elementType)12 )) { |
607 | | |
608 | | // Use an actual loop. This is basically |
609 | | // do { *array++ = filler; } while (array != end); |
610 | | |
611 | | // Advance to the start of the rest of the array. |
612 | 51 | if (NumInitElements) { |
613 | 28 | element = Builder.CreateInBoundsGEP( |
614 | 28 | llvmElementType, element, one, "arrayinit.start"); |
615 | 28 | if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit)2 ; |
616 | 28 | } |
617 | | |
618 | | // Compute the end of the array. |
619 | 51 | llvm::Value *end = Builder.CreateInBoundsGEP( |
620 | 51 | llvmElementType, begin, |
621 | 51 | llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end"); |
622 | | |
623 | 51 | llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); |
624 | 51 | llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body"); |
625 | | |
626 | | // Jump into the body. |
627 | 51 | CGF.EmitBlock(bodyBB); |
628 | 51 | llvm::PHINode *currentElement = |
629 | 51 | Builder.CreatePHI(element->getType(), 2, "arrayinit.cur"); |
630 | 51 | currentElement->addIncoming(element, entryBB); |
631 | | |
632 | | // Emit the actual filler expression. |
633 | 51 | { |
634 | | // C++1z [class.temporary]p5: |
635 | | // when a default constructor is called to initialize an element of |
636 | | // an array with no corresponding initializer [...] the destruction of |
637 | | // every temporary created in a default argument is sequenced before |
638 | | // the construction of the next array element, if any |
639 | 51 | CodeGenFunction::RunCleanupsScope CleanupsScope(CGF); |
640 | 51 | LValue elementLV = CGF.MakeAddrLValue( |
641 | 51 | Address(currentElement, llvmElementType, elementAlign), elementType); |
642 | 51 | if (ArrayFiller) |
643 | 51 | EmitInitializationToLValue(ArrayFiller, elementLV); |
644 | 0 | else |
645 | 0 | EmitNullInitializationToLValue(elementLV); |
646 | 51 | } |
647 | | |
648 | | // Move on to the next element. |
649 | 51 | llvm::Value *nextElement = Builder.CreateInBoundsGEP( |
650 | 51 | llvmElementType, currentElement, one, "arrayinit.next"); |
651 | | |
652 | | // Tell the EH cleanup that we finished with the last element. |
653 | 51 | if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit)2 ; |
654 | | |
655 | | // Leave the loop if we're done. |
656 | 51 | llvm::Value *done = Builder.CreateICmpEQ(nextElement, end, |
657 | 51 | "arrayinit.done"); |
658 | 51 | llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end"); |
659 | 51 | Builder.CreateCondBr(done, endBB, bodyBB); |
660 | 51 | currentElement->addIncoming(nextElement, Builder.GetInsertBlock()); |
661 | | |
662 | 51 | CGF.EmitBlock(endBB); |
663 | 51 | } |
664 | | |
665 | | // Leave the partial-array cleanup if we entered one. |
666 | 1.85k | if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator)74 ; |
667 | 1.85k | } |
668 | | |
669 | | //===----------------------------------------------------------------------===// |
670 | | // Visitor Methods |
671 | | //===----------------------------------------------------------------------===// |
672 | | |
673 | 13.0k | void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){ |
674 | 13.0k | Visit(E->getSubExpr()); |
675 | 13.0k | } |
676 | | |
677 | 25 | void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) { |
678 | | // If this is a unique OVE, just visit its source expression. |
679 | 25 | if (e->isUnique()) |
680 | 8 | Visit(e->getSourceExpr()); |
681 | 17 | else |
682 | 17 | EmitFinalDestCopy(e->getType(), CGF.getOrCreateOpaqueLValueMapping(e)); |
683 | 25 | } |
684 | | |
685 | | void |
686 | 766 | AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { |
687 | 766 | if (Dest.isPotentiallyAliased() && |
688 | 766 | E->getType().isPODType(CGF.getContext())20 ) { |
689 | | // For a POD type, just emit a load of the lvalue + a copy, because our |
690 | | // compound literal might alias the destination. |
691 | 20 | EmitAggLoadOfLValue(E); |
692 | 20 | return; |
693 | 20 | } |
694 | | |
695 | 746 | AggValueSlot Slot = EnsureSlot(E->getType()); |
696 | | |
697 | | // Block-scope compound literals are destroyed at the end of the enclosing |
698 | | // scope in C. |
699 | 746 | bool Destruct = |
700 | 746 | !CGF.getLangOpts().CPlusPlus && !Slot.isExternallyDestructed()532 ; |
701 | 746 | if (Destruct) |
702 | 20 | Slot.setExternallyDestructed(); |
703 | | |
704 | 746 | CGF.EmitAggExpr(E->getInitializer(), Slot); |
705 | | |
706 | 746 | if (Destruct) |
707 | 20 | if (QualType::DestructionKind DtorKind = E->getType().isDestructedType()) |
708 | 0 | CGF.pushLifetimeExtendedDestroy( |
709 | 0 | CGF.getCleanupKind(DtorKind), Slot.getAddress(), E->getType(), |
710 | 0 | CGF.getDestroyer(DtorKind), DtorKind & EHCleanup); |
711 | 746 | } |
712 | | |
713 | | /// Attempt to look through various unimportant expressions to find a |
714 | | /// cast of the given kind. |
715 | 11 | static Expr *findPeephole(Expr *op, CastKind kind, const ASTContext &ctx) { |
716 | 11 | op = op->IgnoreParenNoopCasts(ctx); |
717 | 11 | if (auto castE = dyn_cast<CastExpr>(op)) { |
718 | 0 | if (castE->getCastKind() == kind) |
719 | 0 | return castE->getSubExpr(); |
720 | 0 | } |
721 | 11 | return nullptr; |
722 | 11 | } |
723 | | |
724 | 13.7k | void AggExprEmitter::VisitCastExpr(CastExpr *E) { |
725 | 13.7k | if (const auto *ECE = dyn_cast<ExplicitCastExpr>(E)) |
726 | 4.40k | CGF.CGM.EmitExplicitCastExprType(ECE, &CGF); |
727 | 13.7k | switch (E->getCastKind()) { |
728 | 0 | case CK_Dynamic: { |
729 | | // FIXME: Can this actually happen? We have no test coverage for it. |
730 | 0 | assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?"); |
731 | 0 | LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(), |
732 | 0 | CodeGenFunction::TCK_Load); |
733 | | // FIXME: Do we also need to handle property references here? |
734 | 0 | if (LV.isSimple()) |
735 | 0 | CGF.EmitDynamicCast(LV.getAddress(CGF), cast<CXXDynamicCastExpr>(E)); |
736 | 0 | else |
737 | 0 | CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast"); |
738 | |
|
739 | 0 | if (!Dest.isIgnored()) |
740 | 0 | CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination"); |
741 | 0 | break; |
742 | 0 | } |
743 | | |
744 | 7 | case CK_ToUnion: { |
745 | | // Evaluate even if the destination is ignored. |
746 | 7 | if (Dest.isIgnored()) { |
747 | 1 | CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), |
748 | 1 | /*ignoreResult=*/true); |
749 | 1 | break; |
750 | 1 | } |
751 | | |
752 | | // GCC union extension |
753 | 6 | QualType Ty = E->getSubExpr()->getType(); |
754 | 6 | Address CastPtr = Dest.getAddress().withElementType(CGF.ConvertType(Ty)); |
755 | 6 | EmitInitializationToLValue(E->getSubExpr(), |
756 | 6 | CGF.MakeAddrLValue(CastPtr, Ty)); |
757 | 6 | break; |
758 | 7 | } |
759 | | |
760 | 4 | case CK_LValueToRValueBitCast: { |
761 | 4 | if (Dest.isIgnored()) { |
762 | 0 | CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(), |
763 | 0 | /*ignoreResult=*/true); |
764 | 0 | break; |
765 | 0 | } |
766 | | |
767 | 4 | LValue SourceLV = CGF.EmitLValue(E->getSubExpr()); |
768 | 4 | Address SourceAddress = |
769 | 4 | SourceLV.getAddress(CGF).withElementType(CGF.Int8Ty); |
770 | 4 | Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty); |
771 | 4 | llvm::Value *SizeVal = llvm::ConstantInt::get( |
772 | 4 | CGF.SizeTy, |
773 | 4 | CGF.getContext().getTypeSizeInChars(E->getType()).getQuantity()); |
774 | 4 | Builder.CreateMemCpy(DestAddress, SourceAddress, SizeVal); |
775 | 4 | break; |
776 | 4 | } |
777 | | |
778 | 0 | case CK_DerivedToBase: |
779 | 0 | case CK_BaseToDerived: |
780 | 0 | case CK_UncheckedDerivedToBase: { |
781 | 0 | llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: " |
782 | 0 | "should have been unpacked before we got here"); |
783 | 0 | } |
784 | |
|
785 | 20 | case CK_NonAtomicToAtomic: |
786 | 30 | case CK_AtomicToNonAtomic: { |
787 | 30 | bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic); |
788 | | |
789 | | // Determine the atomic and value types. |
790 | 30 | QualType atomicType = E->getSubExpr()->getType(); |
791 | 30 | QualType valueType = E->getType(); |
792 | 30 | if (isToAtomic) std::swap(atomicType, valueType)20 ; |
793 | | |
794 | 30 | assert(atomicType->isAtomicType()); |
795 | 30 | assert(CGF.getContext().hasSameUnqualifiedType(valueType, |
796 | 30 | atomicType->castAs<AtomicType>()->getValueType())); |
797 | | |
798 | | // Just recurse normally if we're ignoring the result or the |
799 | | // atomic type doesn't change representation. |
800 | 30 | if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) { |
801 | 19 | return Visit(E->getSubExpr()); |
802 | 19 | } |
803 | | |
804 | 11 | CastKind peepholeTarget = |
805 | 11 | (isToAtomic ? CK_AtomicToNonAtomic7 : CK_NonAtomicToAtomic4 ); |
806 | | |
807 | | // These two cases are reverses of each other; try to peephole them. |
808 | 11 | if (Expr *op = |
809 | 11 | findPeephole(E->getSubExpr(), peepholeTarget, CGF.getContext())) { |
810 | 0 | assert(CGF.getContext().hasSameUnqualifiedType(op->getType(), |
811 | 0 | E->getType()) && |
812 | 0 | "peephole significantly changed types?"); |
813 | 0 | return Visit(op); |
814 | 0 | } |
815 | | |
816 | | // If we're converting an r-value of non-atomic type to an r-value |
817 | | // of atomic type, just emit directly into the relevant sub-object. |
818 | 11 | if (isToAtomic) { |
819 | 7 | AggValueSlot valueDest = Dest; |
820 | 7 | if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) { |
821 | | // Zero-initialize. (Strictly speaking, we only need to initialize |
822 | | // the padding at the end, but this is simpler.) |
823 | 7 | if (!Dest.isZeroed()) |
824 | 7 | CGF.EmitNullInitialization(Dest.getAddress(), atomicType); |
825 | | |
826 | | // Build a GEP to refer to the subobject. |
827 | 7 | Address valueAddr = |
828 | 7 | CGF.Builder.CreateStructGEP(valueDest.getAddress(), 0); |
829 | 7 | valueDest = AggValueSlot::forAddr(valueAddr, |
830 | 7 | valueDest.getQualifiers(), |
831 | 7 | valueDest.isExternallyDestructed(), |
832 | 7 | valueDest.requiresGCollection(), |
833 | 7 | valueDest.isPotentiallyAliased(), |
834 | 7 | AggValueSlot::DoesNotOverlap, |
835 | 7 | AggValueSlot::IsZeroed); |
836 | 7 | } |
837 | | |
838 | 7 | CGF.EmitAggExpr(E->getSubExpr(), valueDest); |
839 | 7 | return; |
840 | 7 | } |
841 | | |
842 | | // Otherwise, we're converting an atomic type to a non-atomic type. |
843 | | // Make an atomic temporary, emit into that, and then copy the value out. |
844 | 4 | AggValueSlot atomicSlot = |
845 | 4 | CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp"); |
846 | 4 | CGF.EmitAggExpr(E->getSubExpr(), atomicSlot); |
847 | | |
848 | 4 | Address valueAddr = Builder.CreateStructGEP(atomicSlot.getAddress(), 0); |
849 | 4 | RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile()); |
850 | 4 | return EmitFinalDestCopy(valueType, rvalue); |
851 | 11 | } |
852 | 0 | case CK_AddressSpaceConversion: |
853 | 0 | return Visit(E->getSubExpr()); |
854 | | |
855 | 3.10k | case CK_LValueToRValue: |
856 | | // If we're loading from a volatile type, force the destination |
857 | | // into existence. |
858 | 3.10k | if (E->getSubExpr()->getType().isVolatileQualified()) { |
859 | 24 | bool Destruct = |
860 | 24 | !Dest.isExternallyDestructed() && |
861 | 24 | E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct12 ; |
862 | 24 | if (Destruct) |
863 | 2 | Dest.setExternallyDestructed(); |
864 | 24 | EnsureDest(E->getType()); |
865 | 24 | Visit(E->getSubExpr()); |
866 | | |
867 | 24 | if (Destruct) |
868 | 2 | CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(), |
869 | 2 | E->getType()); |
870 | | |
871 | 24 | return; |
872 | 24 | } |
873 | | |
874 | 3.10k | [[fallthrough]];3.08k |
875 | | |
876 | | |
877 | 6.28k | case CK_NoOp: |
878 | 6.36k | case CK_UserDefinedConversion: |
879 | 13.6k | case CK_ConstructorConversion: |
880 | 13.6k | assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(), |
881 | 13.6k | E->getType()) && |
882 | 13.6k | "Implicit cast types must be compatible"); |
883 | 13.6k | Visit(E->getSubExpr()); |
884 | 13.6k | break; |
885 | | |
886 | 0 | case CK_LValueBitCast: |
887 | 0 | llvm_unreachable("should not be emitting lvalue bitcast as rvalue"); |
888 | |
|
889 | 0 | case CK_Dependent: |
890 | 0 | case CK_BitCast: |
891 | 0 | case CK_ArrayToPointerDecay: |
892 | 0 | case CK_FunctionToPointerDecay: |
893 | 0 | case CK_NullToPointer: |
894 | 0 | case CK_NullToMemberPointer: |
895 | 0 | case CK_BaseToDerivedMemberPointer: |
896 | 0 | case CK_DerivedToBaseMemberPointer: |
897 | 0 | case CK_MemberPointerToBoolean: |
898 | 0 | case CK_ReinterpretMemberPointer: |
899 | 0 | case CK_IntegralToPointer: |
900 | 0 | case CK_PointerToIntegral: |
901 | 0 | case CK_PointerToBoolean: |
902 | 0 | case CK_ToVoid: |
903 | 0 | case CK_VectorSplat: |
904 | 0 | case CK_IntegralCast: |
905 | 0 | case CK_BooleanToSignedIntegral: |
906 | 0 | case CK_IntegralToBoolean: |
907 | 0 | case CK_IntegralToFloating: |
908 | 0 | case CK_FloatingToIntegral: |
909 | 0 | case CK_FloatingToBoolean: |
910 | 0 | case CK_FloatingCast: |
911 | 0 | case CK_CPointerToObjCPointerCast: |
912 | 0 | case CK_BlockPointerToObjCPointerCast: |
913 | 0 | case CK_AnyPointerToBlockPointerCast: |
914 | 0 | case CK_ObjCObjectLValueCast: |
915 | 0 | case CK_FloatingRealToComplex: |
916 | 0 | case CK_FloatingComplexToReal: |
917 | 0 | case CK_FloatingComplexToBoolean: |
918 | 0 | case CK_FloatingComplexCast: |
919 | 0 | case CK_FloatingComplexToIntegralComplex: |
920 | 0 | case CK_IntegralRealToComplex: |
921 | 0 | case CK_IntegralComplexToReal: |
922 | 0 | case CK_IntegralComplexToBoolean: |
923 | 0 | case CK_IntegralComplexCast: |
924 | 0 | case CK_IntegralComplexToFloatingComplex: |
925 | 0 | case CK_ARCProduceObject: |
926 | 0 | case CK_ARCConsumeObject: |
927 | 0 | case CK_ARCReclaimReturnedObject: |
928 | 0 | case CK_ARCExtendBlockObject: |
929 | 0 | case CK_CopyAndAutoreleaseBlockObject: |
930 | 0 | case CK_BuiltinFnToFnPtr: |
931 | 0 | case CK_ZeroToOCLOpaqueType: |
932 | 0 | case CK_MatrixCast: |
933 | |
|
934 | 0 | case CK_IntToOCLSampler: |
935 | 0 | case CK_FloatingToFixedPoint: |
936 | 0 | case CK_FixedPointToFloating: |
937 | 0 | case CK_FixedPointCast: |
938 | 0 | case CK_FixedPointToBoolean: |
939 | 0 | case CK_FixedPointToIntegral: |
940 | 0 | case CK_IntegralToFixedPoint: |
941 | 0 | llvm_unreachable("cast kind invalid for aggregate types"); |
942 | 13.7k | } |
943 | 13.7k | } |
944 | | |
945 | 9.42k | void AggExprEmitter::VisitCallExpr(const CallExpr *E) { |
946 | 9.42k | if (E->getCallReturnType(CGF.getContext())->isReferenceType()) { |
947 | 0 | EmitAggLoadOfLValue(E); |
948 | 0 | return; |
949 | 0 | } |
950 | | |
951 | 9.42k | withReturnValueSlot(E, [&](ReturnValueSlot Slot) { |
952 | 9.42k | return CGF.EmitCallExpr(E, Slot); |
953 | 9.42k | }); |
954 | 9.42k | } |
955 | | |
956 | 68 | void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) { |
957 | 68 | withReturnValueSlot(E, [&](ReturnValueSlot Slot) { |
958 | 68 | return CGF.EmitObjCMessageExpr(E, Slot); |
959 | 68 | }); |
960 | 68 | } |
961 | | |
962 | 46 | void AggExprEmitter::VisitBinComma(const BinaryOperator *E) { |
963 | 46 | CGF.EmitIgnoredExpr(E->getLHS()); |
964 | 46 | Visit(E->getRHS()); |
965 | 46 | } |
966 | | |
967 | 647 | void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) { |
968 | 647 | CodeGenFunction::StmtExprEvaluation eval(CGF); |
969 | 647 | CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest); |
970 | 647 | } |
971 | | |
972 | | enum CompareKind { |
973 | | CK_Less, |
974 | | CK_Greater, |
975 | | CK_Equal, |
976 | | }; |
977 | | |
978 | | static llvm::Value *EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, |
979 | | const BinaryOperator *E, llvm::Value *LHS, |
980 | | llvm::Value *RHS, CompareKind Kind, |
981 | 31 | const char *NameSuffix = "") { |
982 | 31 | QualType ArgTy = E->getLHS()->getType(); |
983 | 31 | if (const ComplexType *CT = ArgTy->getAs<ComplexType>()) |
984 | 0 | ArgTy = CT->getElementType(); |
985 | | |
986 | 31 | if (const auto *MPT = ArgTy->getAs<MemberPointerType>()) { |
987 | 0 | assert(Kind == CK_Equal && |
988 | 0 | "member pointers may only be compared for equality"); |
989 | 0 | return CGF.CGM.getCXXABI().EmitMemberPointerComparison( |
990 | 0 | CGF, LHS, RHS, MPT, /*IsInequality*/ false); |
991 | 0 | } |
992 | | |
993 | | // Compute the comparison instructions for the specified comparison kind. |
994 | 31 | struct CmpInstInfo { |
995 | 31 | const char *Name; |
996 | 31 | llvm::CmpInst::Predicate FCmp; |
997 | 31 | llvm::CmpInst::Predicate SCmp; |
998 | 31 | llvm::CmpInst::Predicate UCmp; |
999 | 31 | }; |
1000 | 31 | CmpInstInfo InstInfo = [&]() -> CmpInstInfo { |
1001 | 31 | using FI = llvm::FCmpInst; |
1002 | 31 | using II = llvm::ICmpInst; |
1003 | 31 | switch (Kind) { |
1004 | 15 | case CK_Less: |
1005 | 15 | return {"cmp.lt", FI::FCMP_OLT, II::ICMP_SLT, II::ICMP_ULT}; |
1006 | 1 | case CK_Greater: |
1007 | 1 | return {"cmp.gt", FI::FCMP_OGT, II::ICMP_SGT, II::ICMP_UGT}; |
1008 | 15 | case CK_Equal: |
1009 | 15 | return {"cmp.eq", FI::FCMP_OEQ, II::ICMP_EQ, II::ICMP_EQ}; |
1010 | 31 | } |
1011 | 0 | llvm_unreachable("Unrecognised CompareKind enum"); |
1012 | 0 | }(); |
1013 | | |
1014 | 31 | if (ArgTy->hasFloatingRepresentation()) |
1015 | 3 | return Builder.CreateFCmp(InstInfo.FCmp, LHS, RHS, |
1016 | 3 | llvm::Twine(InstInfo.Name) + NameSuffix); |
1017 | 28 | if (ArgTy->isIntegralOrEnumerationType() || ArgTy->isPointerType()2 ) { |
1018 | 28 | auto Inst = |
1019 | 28 | ArgTy->hasSignedIntegerRepresentation() ? InstInfo.SCmp18 : InstInfo.UCmp10 ; |
1020 | 28 | return Builder.CreateICmp(Inst, LHS, RHS, |
1021 | 28 | llvm::Twine(InstInfo.Name) + NameSuffix); |
1022 | 28 | } |
1023 | | |
1024 | 0 | llvm_unreachable("unsupported aggregate binary expression should have " |
1025 | 0 | "already been handled"); |
1026 | 0 | } |
1027 | | |
1028 | 15 | void AggExprEmitter::VisitBinCmp(const BinaryOperator *E) { |
1029 | 15 | using llvm::BasicBlock; |
1030 | 15 | using llvm::PHINode; |
1031 | 15 | using llvm::Value; |
1032 | 15 | assert(CGF.getContext().hasSameType(E->getLHS()->getType(), |
1033 | 15 | E->getRHS()->getType())); |
1034 | 15 | const ComparisonCategoryInfo &CmpInfo = |
1035 | 15 | CGF.getContext().CompCategories.getInfoForType(E->getType()); |
1036 | 15 | assert(CmpInfo.Record->isTriviallyCopyable() && |
1037 | 15 | "cannot copy non-trivially copyable aggregate"); |
1038 | | |
1039 | 15 | QualType ArgTy = E->getLHS()->getType(); |
1040 | | |
1041 | 15 | if (!ArgTy->isIntegralOrEnumerationType() && !ArgTy->isRealFloatingType()2 && |
1042 | 15 | !ArgTy->isNullPtrType()1 && !ArgTy->isPointerType()1 && |
1043 | 15 | !ArgTy->isMemberPointerType()0 && !ArgTy->isAnyComplexType()0 ) { |
1044 | 0 | return CGF.ErrorUnsupported(E, "aggregate three-way comparison"); |
1045 | 0 | } |
1046 | 15 | bool IsComplex = ArgTy->isAnyComplexType(); |
1047 | | |
1048 | | // Evaluate the operands to the expression and extract their values. |
1049 | 30 | auto EmitOperand = [&](Expr *E) -> std::pair<Value *, Value *> { |
1050 | 30 | RValue RV = CGF.EmitAnyExpr(E); |
1051 | 30 | if (RV.isScalar()) |
1052 | 30 | return {RV.getScalarVal(), nullptr}; |
1053 | 0 | if (RV.isAggregate()) |
1054 | 0 | return {RV.getAggregatePointer(), nullptr}; |
1055 | 0 | assert(RV.isComplex()); |
1056 | 0 | return RV.getComplexVal(); |
1057 | 0 | }; |
1058 | 15 | auto LHSValues = EmitOperand(E->getLHS()), |
1059 | 15 | RHSValues = EmitOperand(E->getRHS()); |
1060 | | |
1061 | 31 | auto EmitCmp = [&](CompareKind K) { |
1062 | 31 | Value *Cmp = EmitCompare(Builder, CGF, E, LHSValues.first, RHSValues.first, |
1063 | 31 | K, IsComplex ? ".r"0 : ""); |
1064 | 31 | if (!IsComplex) |
1065 | 31 | return Cmp; |
1066 | 0 | assert(K == CompareKind::CK_Equal); |
1067 | 0 | Value *CmpImag = EmitCompare(Builder, CGF, E, LHSValues.second, |
1068 | 0 | RHSValues.second, K, ".i"); |
1069 | 0 | return Builder.CreateAnd(Cmp, CmpImag, "and.eq"); |
1070 | 0 | }; |
1071 | 46 | auto EmitCmpRes = [&](const ComparisonCategoryInfo::ValueInfo *VInfo) { |
1072 | 46 | return Builder.getInt(VInfo->getIntValue()); |
1073 | 46 | }; |
1074 | | |
1075 | 15 | Value *Select; |
1076 | 15 | if (ArgTy->isNullPtrType()) { |
1077 | 0 | Select = EmitCmpRes(CmpInfo.getEqualOrEquiv()); |
1078 | 15 | } else if (!CmpInfo.isPartial()) { |
1079 | 14 | Value *SelectOne = |
1080 | 14 | Builder.CreateSelect(EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), |
1081 | 14 | EmitCmpRes(CmpInfo.getGreater()), "sel.lt"); |
1082 | 14 | Select = Builder.CreateSelect(EmitCmp(CK_Equal), |
1083 | 14 | EmitCmpRes(CmpInfo.getEqualOrEquiv()), |
1084 | 14 | SelectOne, "sel.eq"); |
1085 | 14 | } else { |
1086 | 1 | Value *SelectEq = Builder.CreateSelect( |
1087 | 1 | EmitCmp(CK_Equal), EmitCmpRes(CmpInfo.getEqualOrEquiv()), |
1088 | 1 | EmitCmpRes(CmpInfo.getUnordered()), "sel.eq"); |
1089 | 1 | Value *SelectGT = Builder.CreateSelect(EmitCmp(CK_Greater), |
1090 | 1 | EmitCmpRes(CmpInfo.getGreater()), |
1091 | 1 | SelectEq, "sel.gt"); |
1092 | 1 | Select = Builder.CreateSelect( |
1093 | 1 | EmitCmp(CK_Less), EmitCmpRes(CmpInfo.getLess()), SelectGT, "sel.lt"); |
1094 | 1 | } |
1095 | | // Create the return value in the destination slot. |
1096 | 15 | EnsureDest(E->getType()); |
1097 | 15 | LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); |
1098 | | |
1099 | | // Emit the address of the first (and only) field in the comparison category |
1100 | | // type, and initialize it from the constant integer value selected above. |
1101 | 15 | LValue FieldLV = CGF.EmitLValueForFieldInitialization( |
1102 | 15 | DestLV, *CmpInfo.Record->field_begin()); |
1103 | 15 | CGF.EmitStoreThroughLValue(RValue::get(Select), FieldLV, /*IsInit*/ true); |
1104 | | |
1105 | | // All done! The result is in the Dest slot. |
1106 | 15 | } |
1107 | | |
1108 | 0 | void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) { |
1109 | 0 | if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) |
1110 | 0 | VisitPointerToDataMemberBinaryOperator(E); |
1111 | 0 | else |
1112 | 0 | CGF.ErrorUnsupported(E, "aggregate binary expression"); |
1113 | 0 | } |
1114 | | |
1115 | | void AggExprEmitter::VisitPointerToDataMemberBinaryOperator( |
1116 | 0 | const BinaryOperator *E) { |
1117 | 0 | LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E); |
1118 | 0 | EmitFinalDestCopy(E->getType(), LV); |
1119 | 0 | } |
1120 | | |
1121 | | /// Is the value of the given expression possibly a reference to or |
1122 | | /// into a __block variable? |
1123 | 740 | static bool isBlockVarRef(const Expr *E) { |
1124 | | // Make sure we look through parens. |
1125 | 740 | E = E->IgnoreParens(); |
1126 | | |
1127 | | // Check for a direct reference to a __block variable. |
1128 | 740 | if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { |
1129 | 271 | const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl()); |
1130 | 271 | return (var && var->hasAttr<BlocksAttr>()); |
1131 | 271 | } |
1132 | | |
1133 | | // More complicated stuff. |
1134 | | |
1135 | | // Binary operators. |
1136 | 469 | if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) { |
1137 | | // For an assignment or pointer-to-member operation, just care |
1138 | | // about the LHS. |
1139 | 0 | if (op->isAssignmentOp() || op->isPtrMemOp()) |
1140 | 0 | return isBlockVarRef(op->getLHS()); |
1141 | | |
1142 | | // For a comma, just care about the RHS. |
1143 | 0 | if (op->getOpcode() == BO_Comma) |
1144 | 0 | return isBlockVarRef(op->getRHS()); |
1145 | | |
1146 | | // FIXME: pointer arithmetic? |
1147 | 0 | return false; |
1148 | | |
1149 | | // Check both sides of a conditional operator. |
1150 | 469 | } else if (const AbstractConditionalOperator *op |
1151 | 469 | = dyn_cast<AbstractConditionalOperator>(E)) { |
1152 | 0 | return isBlockVarRef(op->getTrueExpr()) |
1153 | 0 | || isBlockVarRef(op->getFalseExpr()); |
1154 | | |
1155 | | // OVEs are required to support BinaryConditionalOperators. |
1156 | 469 | } else if (const OpaqueValueExpr *op |
1157 | 469 | = dyn_cast<OpaqueValueExpr>(E)) { |
1158 | 0 | if (const Expr *src = op->getSourceExpr()) |
1159 | 0 | return isBlockVarRef(src); |
1160 | | |
1161 | | // Casts are necessary to get things like (*(int*)&var) = foo(). |
1162 | | // We don't really care about the kind of cast here, except |
1163 | | // we don't want to look through l2r casts, because it's okay |
1164 | | // to get the *value* in a __block variable. |
1165 | 469 | } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) { |
1166 | 235 | if (cast->getCastKind() == CK_LValueToRValue) |
1167 | 221 | return false; |
1168 | 14 | return isBlockVarRef(cast->getSubExpr()); |
1169 | | |
1170 | | // Handle unary operators. Again, just aggressively look through |
1171 | | // it, ignoring the operation. |
1172 | 235 | } else if (const UnaryOperator *234 uop234 = dyn_cast<UnaryOperator>(E)) { |
1173 | 202 | return isBlockVarRef(uop->getSubExpr()); |
1174 | | |
1175 | | // Look into the base of a field access. |
1176 | 202 | } else if (const MemberExpr *32 mem32 = dyn_cast<MemberExpr>(E)) { |
1177 | 5 | return isBlockVarRef(mem->getBase()); |
1178 | | |
1179 | | // Look into the base of a subscript. |
1180 | 27 | } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) { |
1181 | 19 | return isBlockVarRef(sub->getBase()); |
1182 | 19 | } |
1183 | | |
1184 | 8 | return false; |
1185 | 469 | } |
1186 | | |
1187 | 500 | void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { |
1188 | | // For an assignment to work, the value on the right has |
1189 | | // to be compatible with the value on the left. |
1190 | 500 | assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(), |
1191 | 500 | E->getRHS()->getType()) |
1192 | 500 | && "Invalid assignment"); |
1193 | | |
1194 | | // If the LHS might be a __block variable, and the RHS can |
1195 | | // potentially cause a block copy, we need to evaluate the RHS first |
1196 | | // so that the assignment goes the right place. |
1197 | | // This is pretty semantically fragile. |
1198 | 500 | if (isBlockVarRef(E->getLHS()) && |
1199 | 500 | E->getRHS()->HasSideEffects(CGF.getContext())3 ) { |
1200 | | // Ensure that we have a destination, and evaluate the RHS into that. |
1201 | 3 | EnsureDest(E->getRHS()->getType()); |
1202 | 3 | Visit(E->getRHS()); |
1203 | | |
1204 | | // Now emit the LHS and copy into it. |
1205 | 3 | LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); |
1206 | | |
1207 | | // That copy is an atomic copy if the LHS is atomic. |
1208 | 3 | if (LHS.getType()->isAtomicType() || |
1209 | 3 | CGF.LValueIsSuitableForInlineAtomic(LHS)) { |
1210 | 0 | CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); |
1211 | 0 | return; |
1212 | 0 | } |
1213 | | |
1214 | 3 | EmitCopy(E->getLHS()->getType(), |
1215 | 3 | AggValueSlot::forLValue(LHS, CGF, AggValueSlot::IsDestructed, |
1216 | 3 | needsGC(E->getLHS()->getType()), |
1217 | 3 | AggValueSlot::IsAliased, |
1218 | 3 | AggValueSlot::MayOverlap), |
1219 | 3 | Dest); |
1220 | 3 | return; |
1221 | 3 | } |
1222 | | |
1223 | 497 | LValue LHS = CGF.EmitLValue(E->getLHS()); |
1224 | | |
1225 | | // If we have an atomic type, evaluate into the destination and then |
1226 | | // do an atomic copy. |
1227 | 497 | if (LHS.getType()->isAtomicType() || |
1228 | 497 | CGF.LValueIsSuitableForInlineAtomic(LHS)488 ) { |
1229 | 12 | EnsureDest(E->getRHS()->getType()); |
1230 | 12 | Visit(E->getRHS()); |
1231 | 12 | CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false); |
1232 | 12 | return; |
1233 | 12 | } |
1234 | | |
1235 | | // Codegen the RHS so that it stores directly into the LHS. |
1236 | 485 | AggValueSlot LHSSlot = AggValueSlot::forLValue( |
1237 | 485 | LHS, CGF, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()), |
1238 | 485 | AggValueSlot::IsAliased, AggValueSlot::MayOverlap); |
1239 | | // A non-volatile aggregate destination might have volatile member. |
1240 | 485 | if (!LHSSlot.isVolatile() && |
1241 | 485 | CGF.hasVolatileMember(E->getLHS()->getType())469 ) |
1242 | 7 | LHSSlot.setVolatile(true); |
1243 | | |
1244 | 485 | CGF.EmitAggExpr(E->getRHS(), LHSSlot); |
1245 | | |
1246 | | // Copy into the destination if the assignment isn't ignored. |
1247 | 485 | EmitFinalDestCopy(E->getType(), LHS); |
1248 | | |
1249 | 485 | if (!Dest.isIgnored() && !Dest.isExternallyDestructed()9 && |
1250 | 485 | E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct4 ) |
1251 | 2 | CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(), |
1252 | 2 | E->getType()); |
1253 | 485 | } |
1254 | | |
1255 | | void AggExprEmitter:: |
1256 | 65 | VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { |
1257 | 65 | llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); |
1258 | 65 | llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); |
1259 | 65 | llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); |
1260 | | |
1261 | | // Bind the common expression if necessary. |
1262 | 65 | CodeGenFunction::OpaqueValueMapping binding(CGF, E); |
1263 | | |
1264 | 65 | CodeGenFunction::ConditionalEvaluation eval(CGF); |
1265 | 65 | CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock, |
1266 | 65 | CGF.getProfileCount(E)); |
1267 | | |
1268 | | // Save whether the destination's lifetime is externally managed. |
1269 | 65 | bool isExternallyDestructed = Dest.isExternallyDestructed(); |
1270 | 65 | bool destructNonTrivialCStruct = |
1271 | 65 | !isExternallyDestructed && |
1272 | 65 | E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct13 ; |
1273 | 65 | isExternallyDestructed |= destructNonTrivialCStruct; |
1274 | 65 | Dest.setExternallyDestructed(isExternallyDestructed); |
1275 | | |
1276 | 65 | eval.begin(CGF); |
1277 | 65 | CGF.EmitBlock(LHSBlock); |
1278 | 65 | CGF.incrementProfileCounter(E); |
1279 | 65 | Visit(E->getTrueExpr()); |
1280 | 65 | eval.end(CGF); |
1281 | | |
1282 | 65 | assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!"); |
1283 | 65 | CGF.Builder.CreateBr(ContBlock); |
1284 | | |
1285 | | // If the result of an agg expression is unused, then the emission |
1286 | | // of the LHS might need to create a destination slot. That's fine |
1287 | | // with us, and we can safely emit the RHS into the same slot, but |
1288 | | // we shouldn't claim that it's already being destructed. |
1289 | 65 | Dest.setExternallyDestructed(isExternallyDestructed); |
1290 | | |
1291 | 65 | eval.begin(CGF); |
1292 | 65 | CGF.EmitBlock(RHSBlock); |
1293 | 65 | Visit(E->getFalseExpr()); |
1294 | 65 | eval.end(CGF); |
1295 | | |
1296 | 65 | if (destructNonTrivialCStruct) |
1297 | 2 | CGF.pushDestroy(QualType::DK_nontrivial_c_struct, Dest.getAddress(), |
1298 | 2 | E->getType()); |
1299 | | |
1300 | 65 | CGF.EmitBlock(ContBlock); |
1301 | 65 | } |
1302 | | |
1303 | 0 | void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) { |
1304 | 0 | Visit(CE->getChosenSubExpr()); |
1305 | 0 | } |
1306 | | |
1307 | 387 | void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { |
1308 | 387 | Address ArgValue = Address::invalid(); |
1309 | 387 | Address ArgPtr = CGF.EmitVAArg(VE, ArgValue); |
1310 | | |
1311 | | // If EmitVAArg fails, emit an error. |
1312 | 387 | if (!ArgPtr.isValid()) { |
1313 | 0 | CGF.ErrorUnsupported(VE, "aggregate va_arg expression"); |
1314 | 0 | return; |
1315 | 0 | } |
1316 | | |
1317 | 387 | EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType())); |
1318 | 387 | } |
1319 | | |
1320 | 7.01k | void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { |
1321 | | // Ensure that we have a slot, but if we already do, remember |
1322 | | // whether it was externally destructed. |
1323 | 7.01k | bool wasExternallyDestructed = Dest.isExternallyDestructed(); |
1324 | 7.01k | EnsureDest(E->getType()); |
1325 | | |
1326 | | // We're going to push a destructor if there isn't already one. |
1327 | 7.01k | Dest.setExternallyDestructed(); |
1328 | | |
1329 | 7.01k | Visit(E->getSubExpr()); |
1330 | | |
1331 | | // Push that destructor we promised. |
1332 | 7.01k | if (!wasExternallyDestructed) |
1333 | 848 | CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddress()); |
1334 | 7.01k | } |
1335 | | |
1336 | | void |
1337 | 66.4k | AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) { |
1338 | 66.4k | AggValueSlot Slot = EnsureSlot(E->getType()); |
1339 | 66.4k | CGF.EmitCXXConstructExpr(E, Slot); |
1340 | 66.4k | } |
1341 | | |
1342 | | void AggExprEmitter::VisitCXXInheritedCtorInitExpr( |
1343 | 188 | const CXXInheritedCtorInitExpr *E) { |
1344 | 188 | AggValueSlot Slot = EnsureSlot(E->getType()); |
1345 | 188 | CGF.EmitInheritedCXXConstructorCall( |
1346 | 188 | E->getConstructor(), E->constructsVBase(), Slot.getAddress(), |
1347 | 188 | E->inheritedFromVBase(), E); |
1348 | 188 | } |
1349 | | |
1350 | | void |
1351 | 1.88k | AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) { |
1352 | 1.88k | AggValueSlot Slot = EnsureSlot(E->getType()); |
1353 | 1.88k | LValue SlotLV = CGF.MakeAddrLValue(Slot.getAddress(), E->getType()); |
1354 | | |
1355 | | // We'll need to enter cleanup scopes in case any of the element |
1356 | | // initializers throws an exception. |
1357 | 1.88k | SmallVector<EHScopeStack::stable_iterator, 16> Cleanups; |
1358 | 1.88k | llvm::Instruction *CleanupDominator = nullptr; |
1359 | | |
1360 | 1.88k | CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin(); |
1361 | 1.88k | for (LambdaExpr::const_capture_init_iterator i = E->capture_init_begin(), |
1362 | 1.88k | e = E->capture_init_end(); |
1363 | 4.71k | i != e; ++i, ++CurField2.82k ) { |
1364 | | // Emit initialization |
1365 | 2.82k | LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField); |
1366 | 2.82k | if (CurField->hasCapturedVLAType()) { |
1367 | 24 | CGF.EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV); |
1368 | 24 | continue; |
1369 | 24 | } |
1370 | | |
1371 | 2.80k | EmitInitializationToLValue(*i, LV); |
1372 | | |
1373 | | // Push a destructor if necessary. |
1374 | 2.80k | if (QualType::DestructionKind DtorKind = |
1375 | 2.80k | CurField->getType().isDestructedType()) { |
1376 | 16 | assert(LV.isSimple()); |
1377 | 16 | if (CGF.needsEHCleanup(DtorKind)) { |
1378 | 9 | if (!CleanupDominator) |
1379 | 8 | CleanupDominator = CGF.Builder.CreateAlignedLoad( |
1380 | 8 | CGF.Int8Ty, |
1381 | 8 | llvm::Constant::getNullValue(CGF.Int8PtrTy), |
1382 | 8 | CharUnits::One()); // placeholder |
1383 | | |
1384 | 9 | CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), CurField->getType(), |
1385 | 9 | CGF.getDestroyer(DtorKind), false); |
1386 | 9 | Cleanups.push_back(CGF.EHStack.stable_begin()); |
1387 | 9 | } |
1388 | 16 | } |
1389 | 2.80k | } |
1390 | | |
1391 | | // Deactivate all the partial cleanups in reverse order, which |
1392 | | // generally means popping them. |
1393 | 1.89k | for (unsigned i = Cleanups.size(); 1.88k i != 0; --i9 ) |
1394 | 9 | CGF.DeactivateCleanupBlock(Cleanups[i-1], CleanupDominator); |
1395 | | |
1396 | | // Destroy the placeholder if we made one. |
1397 | 1.88k | if (CleanupDominator) |
1398 | 8 | CleanupDominator->eraseFromParent(); |
1399 | 1.88k | } |
1400 | | |
1401 | 6.20k | void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) { |
1402 | 6.20k | CodeGenFunction::RunCleanupsScope cleanups(CGF); |
1403 | 6.20k | Visit(E->getSubExpr()); |
1404 | 6.20k | } |
1405 | | |
1406 | 0 | void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) { |
1407 | 0 | QualType T = E->getType(); |
1408 | 0 | AggValueSlot Slot = EnsureSlot(T); |
1409 | 0 | EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T)); |
1410 | 0 | } |
1411 | | |
1412 | 19 | void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { |
1413 | 19 | QualType T = E->getType(); |
1414 | 19 | AggValueSlot Slot = EnsureSlot(T); |
1415 | 19 | EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddress(), T)); |
1416 | 19 | } |
1417 | | |
1418 | | /// Determine whether the given cast kind is known to always convert values |
1419 | | /// with all zero bits in their value representation to values with all zero |
1420 | | /// bits in their value representation. |
1421 | 471 | static bool castPreservesZero(const CastExpr *CE) { |
1422 | 471 | switch (CE->getCastKind()) { |
1423 | | // No-ops. |
1424 | 13 | case CK_NoOp: |
1425 | 13 | case CK_UserDefinedConversion: |
1426 | 13 | case CK_ConstructorConversion: |
1427 | 96 | case CK_BitCast: |
1428 | 96 | case CK_ToUnion: |
1429 | 96 | case CK_ToVoid: |
1430 | | // Conversions between (possibly-complex) integral, (possibly-complex) |
1431 | | // floating-point, and bool. |
1432 | 96 | case CK_BooleanToSignedIntegral: |
1433 | 97 | case CK_FloatingCast: |
1434 | 97 | case CK_FloatingComplexCast: |
1435 | 97 | case CK_FloatingComplexToBoolean: |
1436 | 97 | case CK_FloatingComplexToIntegralComplex: |
1437 | 97 | case CK_FloatingComplexToReal: |
1438 | 101 | case CK_FloatingRealToComplex: |
1439 | 101 | case CK_FloatingToBoolean: |
1440 | 108 | case CK_FloatingToIntegral: |
1441 | 286 | case CK_IntegralCast: |
1442 | 286 | case CK_IntegralComplexCast: |
1443 | 286 | case CK_IntegralComplexToBoolean: |
1444 | 286 | case CK_IntegralComplexToFloatingComplex: |
1445 | 290 | case CK_IntegralComplexToReal: |
1446 | 294 | case CK_IntegralRealToComplex: |
1447 | 294 | case CK_IntegralToBoolean: |
1448 | 302 | case CK_IntegralToFloating: |
1449 | | // Reinterpreting integers as pointers and vice versa. |
1450 | 306 | case CK_IntegralToPointer: |
1451 | 309 | case CK_PointerToIntegral: |
1452 | | // Language extensions. |
1453 | 313 | case CK_VectorSplat: |
1454 | 313 | case CK_MatrixCast: |
1455 | 315 | case CK_NonAtomicToAtomic: |
1456 | 317 | case CK_AtomicToNonAtomic: |
1457 | 317 | return true; |
1458 | | |
1459 | 0 | case CK_BaseToDerivedMemberPointer: |
1460 | 0 | case CK_DerivedToBaseMemberPointer: |
1461 | 0 | case CK_MemberPointerToBoolean: |
1462 | 0 | case CK_NullToMemberPointer: |
1463 | 0 | case CK_ReinterpretMemberPointer: |
1464 | | // FIXME: ABI-dependent. |
1465 | 0 | return false; |
1466 | | |
1467 | 0 | case CK_AnyPointerToBlockPointerCast: |
1468 | 0 | case CK_BlockPointerToObjCPointerCast: |
1469 | 0 | case CK_CPointerToObjCPointerCast: |
1470 | 0 | case CK_ObjCObjectLValueCast: |
1471 | 0 | case CK_IntToOCLSampler: |
1472 | 0 | case CK_ZeroToOCLOpaqueType: |
1473 | | // FIXME: Check these. |
1474 | 0 | return false; |
1475 | | |
1476 | 0 | case CK_FixedPointCast: |
1477 | 0 | case CK_FixedPointToBoolean: |
1478 | 0 | case CK_FixedPointToFloating: |
1479 | 0 | case CK_FixedPointToIntegral: |
1480 | 0 | case CK_FloatingToFixedPoint: |
1481 | 0 | case CK_IntegralToFixedPoint: |
1482 | | // FIXME: Do all fixed-point types represent zero as all 0 bits? |
1483 | 0 | return false; |
1484 | | |
1485 | 0 | case CK_AddressSpaceConversion: |
1486 | 0 | case CK_BaseToDerived: |
1487 | 0 | case CK_DerivedToBase: |
1488 | 0 | case CK_Dynamic: |
1489 | 16 | case CK_NullToPointer: |
1490 | 16 | case CK_PointerToBoolean: |
1491 | | // FIXME: Preserves zeroes only if zero pointers and null pointers have the |
1492 | | // same representation in all involved address spaces. |
1493 | 16 | return false; |
1494 | | |
1495 | 0 | case CK_ARCConsumeObject: |
1496 | 0 | case CK_ARCExtendBlockObject: |
1497 | 0 | case CK_ARCProduceObject: |
1498 | 0 | case CK_ARCReclaimReturnedObject: |
1499 | 0 | case CK_CopyAndAutoreleaseBlockObject: |
1500 | 2 | case CK_ArrayToPointerDecay: |
1501 | 2 | case CK_FunctionToPointerDecay: |
1502 | 2 | case CK_BuiltinFnToFnPtr: |
1503 | 2 | case CK_Dependent: |
1504 | 2 | case CK_LValueBitCast: |
1505 | 138 | case CK_LValueToRValue: |
1506 | 138 | case CK_LValueToRValueBitCast: |
1507 | 138 | case CK_UncheckedDerivedToBase: |
1508 | 138 | return false; |
1509 | 471 | } |
1510 | 0 | llvm_unreachable("Unhandled clang::CastKind enum"); |
1511 | 0 | } |
1512 | | |
1513 | | /// isSimpleZero - If emitting this value will obviously just cause a store of |
1514 | | /// zero to memory, return true. This can return false if uncertain, so it just |
1515 | | /// handles simple cases. |
1516 | 4.92k | static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) { |
1517 | 4.92k | E = E->IgnoreParens(); |
1518 | 5.24k | while (auto *CE = dyn_cast<CastExpr>(E)) { |
1519 | 471 | if (!castPreservesZero(CE)) |
1520 | 154 | break; |
1521 | 317 | E = CE->getSubExpr()->IgnoreParens(); |
1522 | 317 | } |
1523 | | |
1524 | | // 0 |
1525 | 4.92k | if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) |
1526 | 375 | return IL->getValue() == 0; |
1527 | | // +0.0 |
1528 | 4.55k | if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E)) |
1529 | 64 | return FL->getValue().isPosZero(); |
1530 | | // int() |
1531 | 4.48k | if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)4.16k ) && |
1532 | 4.48k | CGF.getTypes().isZeroInitializable(E->getType())321 ) |
1533 | 321 | return true; |
1534 | | // (int*)0 - Null pointer expressions. |
1535 | 4.16k | if (const CastExpr *ICE = dyn_cast<CastExpr>(E)) |
1536 | 154 | return ICE->getCastKind() == CK_NullToPointer && |
1537 | 154 | CGF.getTypes().isPointerZeroInitializable(E->getType())16 && |
1538 | 154 | !E->HasSideEffects(CGF.getContext())16 ; |
1539 | | // '\0' |
1540 | 4.01k | if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) |
1541 | 7 | return CL->getValue() == 0; |
1542 | | |
1543 | | // Otherwise, hard case: conservatively return false. |
1544 | 4.00k | return false; |
1545 | 4.01k | } |
1546 | | |
1547 | | |
1548 | | void |
1549 | 9.94k | AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) { |
1550 | 9.94k | QualType type = LV.getType(); |
1551 | | // FIXME: Ignore result? |
1552 | | // FIXME: Are initializers affected by volatile? |
1553 | 9.94k | if (Dest.isZeroed() && isSimpleZero(E, CGF)295 ) { |
1554 | | // Storing "i32 0" to a zero'd memory location is a noop. |
1555 | 183 | return; |
1556 | 9.76k | } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)9.64k ) { |
1557 | 118 | return EmitNullInitializationToLValue(LV); |
1558 | 9.64k | } else if (isa<NoInitExpr>(E)) { |
1559 | | // Do nothing. |
1560 | 14 | return; |
1561 | 9.63k | } else if (type->isReferenceType()) { |
1562 | 2.53k | RValue RV = CGF.EmitReferenceBindingToExpr(E); |
1563 | 2.53k | return CGF.EmitStoreThroughLValue(RV, LV); |
1564 | 2.53k | } |
1565 | | |
1566 | 7.10k | switch (CGF.getEvaluationKind(type)) { |
1567 | 22 | case TEK_Complex: |
1568 | 22 | CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true); |
1569 | 22 | return; |
1570 | 3.58k | case TEK_Aggregate: |
1571 | 3.58k | CGF.EmitAggExpr( |
1572 | 3.58k | E, AggValueSlot::forLValue(LV, CGF, AggValueSlot::IsDestructed, |
1573 | 3.58k | AggValueSlot::DoesNotNeedGCBarriers, |
1574 | 3.58k | AggValueSlot::IsNotAliased, |
1575 | 3.58k | AggValueSlot::MayOverlap, Dest.isZeroed())); |
1576 | 3.58k | return; |
1577 | 3.49k | case TEK_Scalar: |
1578 | 3.49k | if (LV.isSimple()) { |
1579 | 3.47k | CGF.EmitScalarInit(E, /*D=*/nullptr, LV, /*Captured=*/false); |
1580 | 3.47k | } else { |
1581 | 25 | CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV); |
1582 | 25 | } |
1583 | 3.49k | return; |
1584 | 7.10k | } |
1585 | 0 | llvm_unreachable("bad evaluation kind"); |
1586 | 0 | } |
1587 | | |
1588 | 150 | void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) { |
1589 | 150 | QualType type = lv.getType(); |
1590 | | |
1591 | | // If the destination slot is already zeroed out before the aggregate is |
1592 | | // copied into it, we don't have to emit any zeros here. |
1593 | 150 | if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type)11 ) |
1594 | 11 | return; |
1595 | | |
1596 | 139 | if (CGF.hasScalarEvaluationKind(type)) { |
1597 | | // For non-aggregates, we can store the appropriate null constant. |
1598 | 115 | llvm::Value *null = CGF.CGM.EmitNullConstant(type); |
1599 | | // Note that the following is not equivalent to |
1600 | | // EmitStoreThroughBitfieldLValue for ARC types. |
1601 | 115 | if (lv.isBitField()) { |
1602 | 1 | CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv); |
1603 | 114 | } else { |
1604 | 114 | assert(lv.isSimple()); |
1605 | 114 | CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true); |
1606 | 114 | } |
1607 | 115 | } else { |
1608 | | // There's a potential optimization opportunity in combining |
1609 | | // memsets; that would be easy for arrays, but relatively |
1610 | | // difficult for structures with the current code. |
1611 | 24 | CGF.EmitNullInitialization(lv.getAddress(CGF), lv.getType()); |
1612 | 24 | } |
1613 | 139 | } |
1614 | | |
1615 | 25 | void AggExprEmitter::VisitCXXParenListInitExpr(CXXParenListInitExpr *E) { |
1616 | 25 | VisitCXXParenListOrInitListExpr(E, E->getInitExprs(), |
1617 | 25 | E->getInitializedFieldInUnion(), |
1618 | 25 | E->getArrayFiller()); |
1619 | 25 | } |
1620 | | |
1621 | 4.17k | void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { |
1622 | 4.17k | if (E->hadArrayRangeDesignator()) |
1623 | 0 | CGF.ErrorUnsupported(E, "GNU array range designator extension"); |
1624 | | |
1625 | 4.17k | if (E->isTransparent()) |
1626 | 16 | return Visit(E->getInit(0)); |
1627 | | |
1628 | 4.16k | VisitCXXParenListOrInitListExpr( |
1629 | 4.16k | E, E->inits(), E->getInitializedFieldInUnion(), E->getArrayFiller()); |
1630 | 4.16k | } |
1631 | | |
1632 | | void AggExprEmitter::VisitCXXParenListOrInitListExpr( |
1633 | | Expr *ExprToVisit, ArrayRef<Expr *> InitExprs, |
1634 | 4.18k | FieldDecl *InitializedFieldInUnion, Expr *ArrayFiller) { |
1635 | | #if 0 |
1636 | | // FIXME: Assess perf here? Figure out what cases are worth optimizing here |
1637 | | // (Length of globals? Chunks of zeroed-out space?). |
1638 | | // |
1639 | | // If we can, prefer a copy from a global; this is a lot less code for long |
1640 | | // globals, and it's easier for the current optimizers to analyze. |
1641 | | if (llvm::Constant *C = |
1642 | | CGF.CGM.EmitConstantExpr(ExprToVisit, ExprToVisit->getType(), &CGF)) { |
1643 | | llvm::GlobalVariable* GV = |
1644 | | new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true, |
1645 | | llvm::GlobalValue::InternalLinkage, C, ""); |
1646 | | EmitFinalDestCopy(ExprToVisit->getType(), |
1647 | | CGF.MakeAddrLValue(GV, ExprToVisit->getType())); |
1648 | | return; |
1649 | | } |
1650 | | #endif |
1651 | | |
1652 | 4.18k | AggValueSlot Dest = EnsureSlot(ExprToVisit->getType()); |
1653 | | |
1654 | 4.18k | LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), ExprToVisit->getType()); |
1655 | | |
1656 | | // Handle initialization of an array. |
1657 | 4.18k | if (ExprToVisit->getType()->isConstantArrayType()) { |
1658 | 1.87k | auto AType = cast<llvm::ArrayType>(Dest.getAddress().getElementType()); |
1659 | 1.87k | EmitArrayInit(Dest.getAddress(), AType, ExprToVisit->getType(), ExprToVisit, |
1660 | 1.87k | InitExprs, ArrayFiller); |
1661 | 1.87k | return; |
1662 | 2.31k | } else if (ExprToVisit->getType()->isVariableArrayType()) { |
1663 | | // A variable array type that has an initializer can only do empty |
1664 | | // initialization. And because this feature is not exposed as an extension |
1665 | | // in C++, we can safely memset the array memory to zero. |
1666 | 4 | assert(InitExprs.size() == 0 && |
1667 | 4 | "you can only use an empty initializer with VLAs"); |
1668 | 4 | CGF.EmitNullInitialization(Dest.getAddress(), ExprToVisit->getType()); |
1669 | 4 | return; |
1670 | 4 | } |
1671 | | |
1672 | 2.30k | assert(ExprToVisit->getType()->isRecordType() && |
1673 | 2.30k | "Only support structs/unions here!"); |
1674 | | |
1675 | | // Do struct initialization; this code just sets each individual member |
1676 | | // to the approprate value. This makes bitfield support automatic; |
1677 | | // the disadvantage is that the generated code is more difficult for |
1678 | | // the optimizer, especially with bitfields. |
1679 | 2.30k | unsigned NumInitElements = InitExprs.size(); |
1680 | 2.30k | RecordDecl *record = ExprToVisit->getType()->castAs<RecordType>()->getDecl(); |
1681 | | |
1682 | | // We'll need to enter cleanup scopes in case any of the element |
1683 | | // initializers throws an exception. |
1684 | 2.30k | SmallVector<EHScopeStack::stable_iterator, 16> cleanups; |
1685 | 2.30k | llvm::Instruction *cleanupDominator = nullptr; |
1686 | 2.30k | auto addCleanup = [&](const EHScopeStack::stable_iterator &cleanup) { |
1687 | 16 | cleanups.push_back(cleanup); |
1688 | 16 | if (!cleanupDominator) // create placeholder once needed |
1689 | 8 | cleanupDominator = CGF.Builder.CreateAlignedLoad( |
1690 | 8 | CGF.Int8Ty, llvm::Constant::getNullValue(CGF.Int8PtrTy), |
1691 | 8 | CharUnits::One()); |
1692 | 16 | }; |
1693 | | |
1694 | 2.30k | unsigned curInitIndex = 0; |
1695 | | |
1696 | | // Emit initialization of base classes. |
1697 | 2.30k | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(record)) { |
1698 | 1.36k | assert(NumInitElements >= CXXRD->getNumBases() && |
1699 | 1.36k | "missing initializer for base class"); |
1700 | 1.36k | for (auto &Base : CXXRD->bases()) { |
1701 | 20 | assert(!Base.isVirtual() && "should not see vbases here"); |
1702 | 20 | auto *BaseRD = Base.getType()->getAsCXXRecordDecl(); |
1703 | 20 | Address V = CGF.GetAddressOfDirectBaseInCompleteClass( |
1704 | 20 | Dest.getAddress(), CXXRD, BaseRD, |
1705 | 20 | /*isBaseVirtual*/ false); |
1706 | 20 | AggValueSlot AggSlot = AggValueSlot::forAddr( |
1707 | 20 | V, Qualifiers(), |
1708 | 20 | AggValueSlot::IsDestructed, |
1709 | 20 | AggValueSlot::DoesNotNeedGCBarriers, |
1710 | 20 | AggValueSlot::IsNotAliased, |
1711 | 20 | CGF.getOverlapForBaseInit(CXXRD, BaseRD, Base.isVirtual())); |
1712 | 20 | CGF.EmitAggExpr(InitExprs[curInitIndex++], AggSlot); |
1713 | | |
1714 | 20 | if (QualType::DestructionKind dtorKind = |
1715 | 20 | Base.getType().isDestructedType()) { |
1716 | 8 | CGF.pushDestroy(dtorKind, V, Base.getType()); |
1717 | 8 | addCleanup(CGF.EHStack.stable_begin()); |
1718 | 8 | } |
1719 | 20 | } |
1720 | 1.36k | } |
1721 | | |
1722 | | // Prepare a 'this' for CXXDefaultInitExprs. |
1723 | 2.30k | CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddress()); |
1724 | | |
1725 | 2.30k | if (record->isUnion()) { |
1726 | | // Only initialize one field of a union. The field itself is |
1727 | | // specified by the initializer list. |
1728 | 128 | if (!InitializedFieldInUnion) { |
1729 | | // Empty union; we have nothing to do. |
1730 | | |
1731 | 40 | #ifndef NDEBUG |
1732 | | // Make sure that it's really an empty and not a failure of |
1733 | | // semantic analysis. |
1734 | 40 | for (const auto *Field : record->fields()) |
1735 | 0 | assert((Field->isUnnamedBitfield() || Field->isAnonymousStructOrUnion()) && "Only unnamed bitfields or ananymous class allowed"); |
1736 | 40 | #endif |
1737 | 40 | return; |
1738 | 40 | } |
1739 | | |
1740 | | // FIXME: volatility |
1741 | 88 | FieldDecl *Field = InitializedFieldInUnion; |
1742 | | |
1743 | 88 | LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field); |
1744 | 88 | if (NumInitElements) { |
1745 | | // Store the initializer into the field |
1746 | 75 | EmitInitializationToLValue(InitExprs[0], FieldLoc); |
1747 | 75 | } else { |
1748 | | // Default-initialize to null. |
1749 | 13 | EmitNullInitializationToLValue(FieldLoc); |
1750 | 13 | } |
1751 | | |
1752 | 88 | return; |
1753 | 128 | } |
1754 | | |
1755 | | // Here we iterate over the fields; this makes it simpler to both |
1756 | | // default-initialize fields and skip over unnamed fields. |
1757 | 3.45k | for (const auto *field : record->fields())2.18k { |
1758 | | // We're done once we hit the flexible array member. |
1759 | 3.45k | if (field->getType()->isIncompleteArrayType()) |
1760 | 2 | break; |
1761 | | |
1762 | | // Always skip anonymous bitfields. |
1763 | 3.45k | if (field->isUnnamedBitfield()) |
1764 | 82 | continue; |
1765 | | |
1766 | | // We're done if we reach the end of the explicit initializers, we |
1767 | | // have a zeroed object, and the rest of the fields are |
1768 | | // zero-initializable. |
1769 | 3.36k | if (curInitIndex == NumInitElements && Dest.isZeroed()0 && |
1770 | 3.36k | CGF.getTypes().isZeroInitializable(ExprToVisit->getType())0 ) |
1771 | 0 | break; |
1772 | | |
1773 | | |
1774 | 3.36k | LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, field); |
1775 | | // We never generate write-barries for initialized fields. |
1776 | 3.36k | LV.setNonGC(true); |
1777 | | |
1778 | 3.36k | if (curInitIndex < NumInitElements) { |
1779 | | // Store the initializer into the field. |
1780 | 3.36k | EmitInitializationToLValue(InitExprs[curInitIndex++], LV); |
1781 | 3.36k | } else { |
1782 | | // We're out of initializers; default-initialize to null |
1783 | 0 | EmitNullInitializationToLValue(LV); |
1784 | 0 | } |
1785 | | |
1786 | | // Push a destructor if necessary. |
1787 | | // FIXME: if we have an array of structures, all explicitly |
1788 | | // initialized, we can end up pushing a linear number of cleanups. |
1789 | 3.36k | bool pushedCleanup = false; |
1790 | 3.36k | if (QualType::DestructionKind dtorKind |
1791 | 3.36k | = field->getType().isDestructedType()) { |
1792 | 66 | assert(LV.isSimple()); |
1793 | 66 | if (CGF.needsEHCleanup(dtorKind)) { |
1794 | 8 | CGF.pushDestroy(EHCleanup, LV.getAddress(CGF), field->getType(), |
1795 | 8 | CGF.getDestroyer(dtorKind), false); |
1796 | 8 | addCleanup(CGF.EHStack.stable_begin()); |
1797 | 8 | pushedCleanup = true; |
1798 | 8 | } |
1799 | 66 | } |
1800 | | |
1801 | | // If the GEP didn't get used because of a dead zero init or something |
1802 | | // else, clean it up for -O0 builds and general tidiness. |
1803 | 3.36k | if (!pushedCleanup && LV.isSimple()3.36k ) |
1804 | 3.31k | if (llvm::GetElementPtrInst *GEP = |
1805 | 3.31k | dyn_cast<llvm::GetElementPtrInst>(LV.getPointer(CGF))) |
1806 | 3.21k | if (GEP->use_empty()) |
1807 | 278 | GEP->eraseFromParent(); |
1808 | 3.36k | } |
1809 | | |
1810 | | // Deactivate all the partial cleanups in reverse order, which |
1811 | | // generally means popping them. |
1812 | 2.18k | assert((cleanupDominator || cleanups.empty()) && |
1813 | 2.18k | "Missing cleanupDominator before deactivating cleanup blocks"); |
1814 | 2.19k | for (unsigned i = cleanups.size(); 2.18k i != 0; --i16 ) |
1815 | 16 | CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator); |
1816 | | |
1817 | | // Destroy the placeholder if we made one. |
1818 | 2.18k | if (cleanupDominator) |
1819 | 8 | cleanupDominator->eraseFromParent(); |
1820 | 2.18k | } |
1821 | | |
1822 | | void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E, |
1823 | 36 | llvm::Value *outerBegin) { |
1824 | | // Emit the common subexpression. |
1825 | 36 | CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr()); |
1826 | | |
1827 | 36 | Address destPtr = EnsureSlot(E->getType()).getAddress(); |
1828 | 36 | uint64_t numElements = E->getArraySize().getZExtValue(); |
1829 | | |
1830 | 36 | if (!numElements) |
1831 | 0 | return; |
1832 | | |
1833 | | // destPtr is an array*. Construct an elementType* by drilling down a level. |
1834 | 36 | llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); |
1835 | 36 | llvm::Value *indices[] = {zero, zero}; |
1836 | 36 | llvm::Value *begin = Builder.CreateInBoundsGEP( |
1837 | 36 | destPtr.getElementType(), destPtr.getPointer(), indices, |
1838 | 36 | "arrayinit.begin"); |
1839 | | |
1840 | | // Prepare to special-case multidimensional array initialization: we avoid |
1841 | | // emitting multiple destructor loops in that case. |
1842 | 36 | if (!outerBegin) |
1843 | 29 | outerBegin = begin; |
1844 | 36 | ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr()); |
1845 | | |
1846 | 36 | QualType elementType = |
1847 | 36 | CGF.getContext().getAsArrayType(E->getType())->getElementType(); |
1848 | 36 | CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType); |
1849 | 36 | CharUnits elementAlign = |
1850 | 36 | destPtr.getAlignment().alignmentOfArrayElement(elementSize); |
1851 | 36 | llvm::Type *llvmElementType = CGF.ConvertTypeForMem(elementType); |
1852 | | |
1853 | 36 | llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); |
1854 | 36 | llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body"); |
1855 | | |
1856 | | // Jump into the body. |
1857 | 36 | CGF.EmitBlock(bodyBB); |
1858 | 36 | llvm::PHINode *index = |
1859 | 36 | Builder.CreatePHI(zero->getType(), 2, "arrayinit.index"); |
1860 | 36 | index->addIncoming(zero, entryBB); |
1861 | 36 | llvm::Value *element = |
1862 | 36 | Builder.CreateInBoundsGEP(llvmElementType, begin, index); |
1863 | | |
1864 | | // Prepare for a cleanup. |
1865 | 36 | QualType::DestructionKind dtorKind = elementType.isDestructedType(); |
1866 | 36 | EHScopeStack::stable_iterator cleanup; |
1867 | 36 | if (CGF.needsEHCleanup(dtorKind) && !InnerLoop7 ) { |
1868 | 6 | if (outerBegin->getType() != element->getType()) |
1869 | 0 | outerBegin = Builder.CreateBitCast(outerBegin, element->getType()); |
1870 | 6 | CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType, |
1871 | 6 | elementAlign, |
1872 | 6 | CGF.getDestroyer(dtorKind)); |
1873 | 6 | cleanup = CGF.EHStack.stable_begin(); |
1874 | 30 | } else { |
1875 | 30 | dtorKind = QualType::DK_none; |
1876 | 30 | } |
1877 | | |
1878 | | // Emit the actual filler expression. |
1879 | 36 | { |
1880 | | // Temporaries created in an array initialization loop are destroyed |
1881 | | // at the end of each iteration. |
1882 | 36 | CodeGenFunction::RunCleanupsScope CleanupsScope(CGF); |
1883 | 36 | CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index); |
1884 | 36 | LValue elementLV = CGF.MakeAddrLValue( |
1885 | 36 | Address(element, llvmElementType, elementAlign), elementType); |
1886 | | |
1887 | 36 | if (InnerLoop) { |
1888 | | // If the subexpression is an ArrayInitLoopExpr, share its cleanup. |
1889 | 7 | auto elementSlot = AggValueSlot::forLValue( |
1890 | 7 | elementLV, CGF, AggValueSlot::IsDestructed, |
1891 | 7 | AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, |
1892 | 7 | AggValueSlot::DoesNotOverlap); |
1893 | 7 | AggExprEmitter(CGF, elementSlot, false) |
1894 | 7 | .VisitArrayInitLoopExpr(InnerLoop, outerBegin); |
1895 | 7 | } else |
1896 | 29 | EmitInitializationToLValue(E->getSubExpr(), elementLV); |
1897 | 36 | } |
1898 | | |
1899 | | // Move on to the next element. |
1900 | 36 | llvm::Value *nextIndex = Builder.CreateNUWAdd( |
1901 | 36 | index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next"); |
1902 | 36 | index->addIncoming(nextIndex, Builder.GetInsertBlock()); |
1903 | | |
1904 | | // Leave the loop if we're done. |
1905 | 36 | llvm::Value *done = Builder.CreateICmpEQ( |
1906 | 36 | nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements), |
1907 | 36 | "arrayinit.done"); |
1908 | 36 | llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end"); |
1909 | 36 | Builder.CreateCondBr(done, endBB, bodyBB); |
1910 | | |
1911 | 36 | CGF.EmitBlock(endBB); |
1912 | | |
1913 | | // Leave the partial-array cleanup if we entered one. |
1914 | 36 | if (dtorKind) |
1915 | 6 | CGF.DeactivateCleanupBlock(cleanup, index); |
1916 | 36 | } |
1917 | | |
1918 | 8 | void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) { |
1919 | 8 | AggValueSlot Dest = EnsureSlot(E->getType()); |
1920 | | |
1921 | 8 | LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType()); |
1922 | 8 | EmitInitializationToLValue(E->getBase(), DestLV); |
1923 | 8 | VisitInitListExpr(E->getUpdater()); |
1924 | 8 | } |
1925 | | |
1926 | | //===----------------------------------------------------------------------===// |
1927 | | // Entry Points into this File |
1928 | | //===----------------------------------------------------------------------===// |
1929 | | |
1930 | | /// GetNumNonZeroBytesInInit - Get an approximate count of the number of |
1931 | | /// non-zero bytes that will be stored when outputting the initializer for the |
1932 | | /// specified initializer expression. |
1933 | 4.63k | static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) { |
1934 | 4.63k | if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E)) |
1935 | 52 | E = MTE->getSubExpr(); |
1936 | 4.63k | E = E->IgnoreParenNoopCasts(CGF.getContext()); |
1937 | | |
1938 | | // 0 and 0.0 won't require any non-zero stores! |
1939 | 4.63k | if (isSimpleZero(E, CGF)) return CharUnits::Zero()242 ; |
1940 | | |
1941 | | // If this is an initlist expr, sum up the size of sizes of the (present) |
1942 | | // elements. If this is something weird, assume the whole thing is non-zero. |
1943 | 4.39k | const InitListExpr *ILE = dyn_cast<InitListExpr>(E); |
1944 | 4.39k | while (ILE && ILE->isTransparent()282 ) |
1945 | 5 | ILE = dyn_cast<InitListExpr>(ILE->getInit(0)); |
1946 | 4.39k | if (!ILE || !CGF.getTypes().isZeroInitializable(ILE->getType())277 ) |
1947 | 4.11k | return CGF.getContext().getTypeSizeInChars(E->getType()); |
1948 | | |
1949 | | // InitListExprs for structs have to be handled carefully. If there are |
1950 | | // reference members, we need to consider the size of the reference, not the |
1951 | | // referencee. InitListExprs for unions and arrays can't have references. |
1952 | 277 | if (const RecordType *RT = E->getType()->getAs<RecordType>()) { |
1953 | 186 | if (!RT->isUnionType()) { |
1954 | 163 | RecordDecl *SD = RT->getDecl(); |
1955 | 163 | CharUnits NumNonZeroBytes = CharUnits::Zero(); |
1956 | | |
1957 | 163 | unsigned ILEElement = 0; |
1958 | 163 | if (auto *CXXRD = dyn_cast<CXXRecordDecl>(SD)) |
1959 | 119 | while (ILEElement != CXXRD->getNumBases()) |
1960 | 0 | NumNonZeroBytes += |
1961 | 0 | GetNumNonZeroBytesInInit(ILE->getInit(ILEElement++), CGF); |
1962 | 533 | for (const auto *Field : SD->fields()) { |
1963 | | // We're done once we hit the flexible array member or run out of |
1964 | | // InitListExpr elements. |
1965 | 533 | if (Field->getType()->isIncompleteArrayType() || |
1966 | 533 | ILEElement == ILE->getNumInits()) |
1967 | 0 | break; |
1968 | 533 | if (Field->isUnnamedBitfield()) |
1969 | 0 | continue; |
1970 | | |
1971 | 533 | const Expr *E = ILE->getInit(ILEElement++); |
1972 | | |
1973 | | // Reference values are always non-null and have the width of a pointer. |
1974 | 533 | if (Field->getType()->isReferenceType()) |
1975 | 4 | NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits( |
1976 | 4 | CGF.getTarget().getPointerWidth(LangAS::Default)); |
1977 | 529 | else |
1978 | 529 | NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF); |
1979 | 533 | } |
1980 | | |
1981 | 163 | return NumNonZeroBytes; |
1982 | 163 | } |
1983 | 186 | } |
1984 | | |
1985 | | // FIXME: This overestimates the number of non-zero bytes for bit-fields. |
1986 | 114 | CharUnits NumNonZeroBytes = CharUnits::Zero(); |
1987 | 445 | for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i331 ) |
1988 | 331 | NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF); |
1989 | 114 | return NumNonZeroBytes; |
1990 | 277 | } |
1991 | | |
1992 | | /// CheckAggExprForMemSetUse - If the initializer is large and has a lot of |
1993 | | /// zeros in it, emit a memset and avoid storing the individual zeros. |
1994 | | /// |
1995 | | static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E, |
1996 | 87.5k | CodeGenFunction &CGF) { |
1997 | | // If the slot is already known to be zeroed, nothing to do. Don't mess with |
1998 | | // volatile stores. |
1999 | 87.5k | if (Slot.isZeroed() || Slot.isVolatile()87.4k || !Slot.getAddress().isValid()87.4k ) |
2000 | 1.49k | return; |
2001 | | |
2002 | | // C++ objects with a user-declared constructor don't need zero'ing. |
2003 | 86.0k | if (CGF.getLangOpts().CPlusPlus) |
2004 | 79.6k | if (const RecordType *RT = CGF.getContext() |
2005 | 79.6k | .getBaseElementType(E->getType())->getAs<RecordType>()) { |
2006 | 79.4k | const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); |
2007 | 79.4k | if (RD->hasUserDeclaredConstructor()) |
2008 | 58.9k | return; |
2009 | 79.4k | } |
2010 | | |
2011 | | // If the type is 16-bytes or smaller, prefer individual stores over memset. |
2012 | 27.0k | CharUnits Size = Slot.getPreferredSize(CGF.getContext(), E->getType()); |
2013 | 27.0k | if (Size <= CharUnits::fromQuantity(16)) |
2014 | 23.3k | return; |
2015 | | |
2016 | | // Check to see if over 3/4 of the initializer are known to be zero. If so, |
2017 | | // we prefer to emit memset + individual stores for the rest. |
2018 | 3.77k | CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF); |
2019 | 3.77k | if (NumNonZeroBytes*4 > Size) |
2020 | 3.69k | return; |
2021 | | |
2022 | | // Okay, it seems like a good idea to use an initial memset, emit the call. |
2023 | 78 | llvm::Constant *SizeVal = CGF.Builder.getInt64(Size.getQuantity()); |
2024 | | |
2025 | 78 | Address Loc = Slot.getAddress().withElementType(CGF.Int8Ty); |
2026 | 78 | CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal, false); |
2027 | | |
2028 | | // Tell the AggExprEmitter that the slot is known zero. |
2029 | 78 | Slot.setZeroed(); |
2030 | 78 | } |
2031 | | |
2032 | | |
2033 | | |
2034 | | |
2035 | | /// EmitAggExpr - Emit the computation of the specified expression of aggregate |
2036 | | /// type. The result is computed into DestPtr. Note that if DestPtr is null, |
2037 | | /// the value of the aggregate expression is not needed. If VolatileDest is |
2038 | | /// true, DestPtr cannot be 0. |
2039 | 87.5k | void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) { |
2040 | 87.5k | assert(E && hasAggregateEvaluationKind(E->getType()) && |
2041 | 87.5k | "Invalid aggregate expression to emit"); |
2042 | 87.5k | assert((Slot.getAddress().isValid() || Slot.isIgnored()) && |
2043 | 87.5k | "slot has bits but no address"); |
2044 | | |
2045 | | // Optimize the slot if possible. |
2046 | 87.5k | CheckAggExprForMemSetUse(Slot, E, *this); |
2047 | | |
2048 | 87.5k | AggExprEmitter(*this, Slot, Slot.isIgnored()).Visit(const_cast<Expr*>(E)); |
2049 | 87.5k | } |
2050 | | |
2051 | 176 | LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) { |
2052 | 176 | assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!"); |
2053 | 176 | Address Temp = CreateMemTemp(E->getType()); |
2054 | 176 | LValue LV = MakeAddrLValue(Temp, E->getType()); |
2055 | 176 | EmitAggExpr(E, AggValueSlot::forLValue( |
2056 | 176 | LV, *this, AggValueSlot::IsNotDestructed, |
2057 | 176 | AggValueSlot::DoesNotNeedGCBarriers, |
2058 | 176 | AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap)); |
2059 | 176 | return LV; |
2060 | 176 | } |
2061 | | |
2062 | | AggValueSlot::Overlap_t |
2063 | 4.38k | CodeGenFunction::getOverlapForFieldInit(const FieldDecl *FD) { |
2064 | 4.38k | if (!FD->hasAttr<NoUniqueAddressAttr>() || !FD->getType()->isRecordType()9 ) |
2065 | 4.37k | return AggValueSlot::DoesNotOverlap; |
2066 | | |
2067 | | // If the field lies entirely within the enclosing class's nvsize, its tail |
2068 | | // padding cannot overlap any already-initialized object. (The only subobjects |
2069 | | // with greater addresses that might already be initialized are vbases.) |
2070 | 9 | const RecordDecl *ClassRD = FD->getParent(); |
2071 | 9 | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassRD); |
2072 | 9 | if (Layout.getFieldOffset(FD->getFieldIndex()) + |
2073 | 9 | getContext().getTypeSize(FD->getType()) <= |
2074 | 9 | (uint64_t)getContext().toBits(Layout.getNonVirtualSize())) |
2075 | 7 | return AggValueSlot::DoesNotOverlap; |
2076 | | |
2077 | | // The tail padding may contain values we need to preserve. |
2078 | 2 | return AggValueSlot::MayOverlap; |
2079 | 9 | } |
2080 | | |
2081 | | AggValueSlot::Overlap_t CodeGenFunction::getOverlapForBaseInit( |
2082 | 9.05k | const CXXRecordDecl *RD, const CXXRecordDecl *BaseRD, bool IsVirtual) { |
2083 | | // If the most-derived object is a field declared with [[no_unique_address]], |
2084 | | // the tail padding of any virtual base could be reused for other subobjects |
2085 | | // of that field's class. |
2086 | 9.05k | if (IsVirtual) |
2087 | 888 | return AggValueSlot::MayOverlap; |
2088 | | |
2089 | | // If the base class is laid out entirely within the nvsize of the derived |
2090 | | // class, its tail padding cannot yet be initialized, so we can issue |
2091 | | // stores at the full width of the base class. |
2092 | 8.16k | const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); |
2093 | 8.16k | if (Layout.getBaseClassOffset(BaseRD) + |
2094 | 8.16k | getContext().getASTRecordLayout(BaseRD).getSize() <= |
2095 | 8.16k | Layout.getNonVirtualSize()) |
2096 | 7.90k | return AggValueSlot::DoesNotOverlap; |
2097 | | |
2098 | | // The tail padding may contain values we need to preserve. |
2099 | 265 | return AggValueSlot::MayOverlap; |
2100 | 8.16k | } |
2101 | | |
2102 | | void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty, |
2103 | | AggValueSlot::Overlap_t MayOverlap, |
2104 | 16.8k | bool isVolatile) { |
2105 | 16.8k | assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex"); |
2106 | | |
2107 | 16.8k | Address DestPtr = Dest.getAddress(*this); |
2108 | 16.8k | Address SrcPtr = Src.getAddress(*this); |
2109 | | |
2110 | 16.8k | if (getLangOpts().CPlusPlus) { |
2111 | 13.7k | if (const RecordType *RT = Ty->getAs<RecordType>()) { |
2112 | 12.1k | CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl()); |
2113 | 12.1k | assert((Record->hasTrivialCopyConstructor() || |
2114 | 12.1k | Record->hasTrivialCopyAssignment() || |
2115 | 12.1k | Record->hasTrivialMoveConstructor() || |
2116 | 12.1k | Record->hasTrivialMoveAssignment() || |
2117 | 12.1k | Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) && |
2118 | 12.1k | "Trying to aggregate-copy a type without a trivial copy/move " |
2119 | 12.1k | "constructor or assignment operator"); |
2120 | | // Ignore empty classes in C++. |
2121 | 12.1k | if (Record->isEmpty()) |
2122 | 2.71k | return; |
2123 | 12.1k | } |
2124 | 13.7k | } |
2125 | | |
2126 | 14.1k | if (getLangOpts().CUDAIsDevice) { |
2127 | 0 | if (Ty->isCUDADeviceBuiltinSurfaceType()) { |
2128 | 0 | if (getTargetHooks().emitCUDADeviceBuiltinSurfaceDeviceCopy(*this, Dest, |
2129 | 0 | Src)) |
2130 | 0 | return; |
2131 | 0 | } else if (Ty->isCUDADeviceBuiltinTextureType()) { |
2132 | 0 | if (getTargetHooks().emitCUDADeviceBuiltinTextureDeviceCopy(*this, Dest, |
2133 | 0 | Src)) |
2134 | 0 | return; |
2135 | 0 | } |
2136 | 0 | } |
2137 | | |
2138 | | // Aggregate assignment turns into llvm.memcpy. This is almost valid per |
2139 | | // C99 6.5.16.1p3, which states "If the value being stored in an object is |
2140 | | // read from another object that overlaps in anyway the storage of the first |
2141 | | // object, then the overlap shall be exact and the two objects shall have |
2142 | | // qualified or unqualified versions of a compatible type." |
2143 | | // |
2144 | | // memcpy is not defined if the source and destination pointers are exactly |
2145 | | // equal, but other compilers do this optimization, and almost every memcpy |
2146 | | // implementation handles this case safely. If there is a libc that does not |
2147 | | // safely handle this, we can add a target hook. |
2148 | | |
2149 | | // Get data size info for this aggregate. Don't copy the tail padding if this |
2150 | | // might be a potentially-overlapping subobject, since the tail padding might |
2151 | | // be occupied by a different object. Otherwise, copying it is fine. |
2152 | 14.1k | TypeInfoChars TypeInfo; |
2153 | 14.1k | if (MayOverlap) |
2154 | 4.91k | TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty); |
2155 | 9.23k | else |
2156 | 9.23k | TypeInfo = getContext().getTypeInfoInChars(Ty); |
2157 | | |
2158 | 14.1k | llvm::Value *SizeVal = nullptr; |
2159 | 14.1k | if (TypeInfo.Width.isZero()) { |
2160 | | // But note that getTypeInfo returns 0 for a VLA. |
2161 | 171 | if (auto *VAT = dyn_cast_or_null<VariableArrayType>( |
2162 | 171 | getContext().getAsArrayType(Ty))) { |
2163 | 100 | QualType BaseEltTy; |
2164 | 100 | SizeVal = emitArrayLength(VAT, BaseEltTy, DestPtr); |
2165 | 100 | TypeInfo = getContext().getTypeInfoInChars(BaseEltTy); |
2166 | 100 | assert(!TypeInfo.Width.isZero()); |
2167 | 100 | SizeVal = Builder.CreateNUWMul( |
2168 | 100 | SizeVal, |
2169 | 100 | llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity())); |
2170 | 100 | } |
2171 | 171 | } |
2172 | 14.1k | if (!SizeVal) { |
2173 | 14.0k | SizeVal = llvm::ConstantInt::get(SizeTy, TypeInfo.Width.getQuantity()); |
2174 | 14.0k | } |
2175 | | |
2176 | | // FIXME: If we have a volatile struct, the optimizer can remove what might |
2177 | | // appear to be `extra' memory ops: |
2178 | | // |
2179 | | // volatile struct { int i; } a, b; |
2180 | | // |
2181 | | // int main() { |
2182 | | // a = b; |
2183 | | // a = b; |
2184 | | // } |
2185 | | // |
2186 | | // we need to use a different call here. We use isVolatile to indicate when |
2187 | | // either the source or the destination is volatile. |
2188 | | |
2189 | 14.1k | DestPtr = DestPtr.withElementType(Int8Ty); |
2190 | 14.1k | SrcPtr = SrcPtr.withElementType(Int8Ty); |
2191 | | |
2192 | | // Don't do any of the memmove_collectable tests if GC isn't set. |
2193 | 14.1k | if (CGM.getLangOpts().getGC() == LangOptions::NonGC) { |
2194 | | // fall through |
2195 | 14.1k | } else if (const RecordType *30 RecordTy30 = Ty->getAs<RecordType>()) { |
2196 | 28 | RecordDecl *Record = RecordTy->getDecl(); |
2197 | 28 | if (Record->hasObjectMember()) { |
2198 | 23 | CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, |
2199 | 23 | SizeVal); |
2200 | 23 | return; |
2201 | 23 | } |
2202 | 28 | } else if (2 Ty->isArrayType()2 ) { |
2203 | 2 | QualType BaseType = getContext().getBaseElementType(Ty); |
2204 | 2 | if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) { |
2205 | 1 | if (RecordTy->getDecl()->hasObjectMember()) { |
2206 | 1 | CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr, |
2207 | 1 | SizeVal); |
2208 | 1 | return; |
2209 | 1 | } |
2210 | 1 | } |
2211 | 2 | } |
2212 | | |
2213 | 14.1k | auto Inst = Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, isVolatile); |
2214 | | |
2215 | | // Determine the metadata to describe the position of any padding in this |
2216 | | // memcpy, as well as the TBAA tags for the members of the struct, in case |
2217 | | // the optimizer wishes to expand it in to scalar memory operations. |
2218 | 14.1k | if (llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty)) |
2219 | 420 | Inst->setMetadata(llvm::LLVMContext::MD_tbaa_struct, TBAAStructTag); |
2220 | | |
2221 | 14.1k | if (CGM.getCodeGenOpts().NewStructPathTBAA) { |
2222 | 7 | TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForMemoryTransfer( |
2223 | 7 | Dest.getTBAAInfo(), Src.getTBAAInfo()); |
2224 | 7 | CGM.DecorateInstructionWithTBAA(Inst, TBAAInfo); |
2225 | 7 | } |
2226 | 14.1k | } |