/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGDecl.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This contains code to emit Decl nodes as LLVM code. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "CGBlocks.h" |
14 | | #include "CGCXXABI.h" |
15 | | #include "CGCleanup.h" |
16 | | #include "CGDebugInfo.h" |
17 | | #include "CGOpenCLRuntime.h" |
18 | | #include "CGOpenMPRuntime.h" |
19 | | #include "CodeGenFunction.h" |
20 | | #include "CodeGenModule.h" |
21 | | #include "ConstantEmitter.h" |
22 | | #include "PatternInit.h" |
23 | | #include "TargetInfo.h" |
24 | | #include "clang/AST/ASTContext.h" |
25 | | #include "clang/AST/Attr.h" |
26 | | #include "clang/AST/CharUnits.h" |
27 | | #include "clang/AST/Decl.h" |
28 | | #include "clang/AST/DeclObjC.h" |
29 | | #include "clang/AST/DeclOpenMP.h" |
30 | | #include "clang/Basic/CodeGenOptions.h" |
31 | | #include "clang/Basic/SourceManager.h" |
32 | | #include "clang/Basic/TargetInfo.h" |
33 | | #include "clang/CodeGen/CGFunctionInfo.h" |
34 | | #include "clang/Sema/Sema.h" |
35 | | #include "llvm/Analysis/ValueTracking.h" |
36 | | #include "llvm/IR/DataLayout.h" |
37 | | #include "llvm/IR/GlobalVariable.h" |
38 | | #include "llvm/IR/Intrinsics.h" |
39 | | #include "llvm/IR/Type.h" |
40 | | |
41 | | using namespace clang; |
42 | | using namespace CodeGen; |
43 | | |
44 | | static_assert(clang::Sema::MaximumAlignment <= llvm::Value::MaximumAlignment, |
45 | | "Clang max alignment greater than what LLVM supports?"); |
46 | | |
47 | 203k | void CodeGenFunction::EmitDecl(const Decl &D) { |
48 | 203k | switch (D.getKind()) { |
49 | 0 | case Decl::BuiltinTemplate: |
50 | 0 | case Decl::TranslationUnit: |
51 | 0 | case Decl::ExternCContext: |
52 | 0 | case Decl::Namespace: |
53 | 0 | case Decl::UnresolvedUsingTypename: |
54 | 0 | case Decl::ClassTemplateSpecialization: |
55 | 0 | case Decl::ClassTemplatePartialSpecialization: |
56 | 0 | case Decl::VarTemplateSpecialization: |
57 | 0 | case Decl::VarTemplatePartialSpecialization: |
58 | 0 | case Decl::TemplateTypeParm: |
59 | 0 | case Decl::UnresolvedUsingValue: |
60 | 0 | case Decl::NonTypeTemplateParm: |
61 | 0 | case Decl::CXXDeductionGuide: |
62 | 0 | case Decl::CXXMethod: |
63 | 0 | case Decl::CXXConstructor: |
64 | 0 | case Decl::CXXDestructor: |
65 | 0 | case Decl::CXXConversion: |
66 | 0 | case Decl::Field: |
67 | 0 | case Decl::MSProperty: |
68 | 0 | case Decl::IndirectField: |
69 | 0 | case Decl::ObjCIvar: |
70 | 0 | case Decl::ObjCAtDefsField: |
71 | 0 | case Decl::ParmVar: |
72 | 0 | case Decl::ImplicitParam: |
73 | 0 | case Decl::ClassTemplate: |
74 | 0 | case Decl::VarTemplate: |
75 | 0 | case Decl::FunctionTemplate: |
76 | 0 | case Decl::TypeAliasTemplate: |
77 | 0 | case Decl::TemplateTemplateParm: |
78 | 0 | case Decl::ObjCMethod: |
79 | 0 | case Decl::ObjCCategory: |
80 | 0 | case Decl::ObjCProtocol: |
81 | 0 | case Decl::ObjCInterface: |
82 | 0 | case Decl::ObjCCategoryImpl: |
83 | 0 | case Decl::ObjCImplementation: |
84 | 0 | case Decl::ObjCProperty: |
85 | 0 | case Decl::ObjCCompatibleAlias: |
86 | 0 | case Decl::PragmaComment: |
87 | 0 | case Decl::PragmaDetectMismatch: |
88 | 0 | case Decl::AccessSpec: |
89 | 0 | case Decl::LinkageSpec: |
90 | 0 | case Decl::Export: |
91 | 0 | case Decl::ObjCPropertyImpl: |
92 | 0 | case Decl::FileScopeAsm: |
93 | 0 | case Decl::Friend: |
94 | 0 | case Decl::FriendTemplate: |
95 | 0 | case Decl::Block: |
96 | 0 | case Decl::Captured: |
97 | 0 | case Decl::ClassScopeFunctionSpecialization: |
98 | 0 | case Decl::UsingShadow: |
99 | 0 | case Decl::ConstructorUsingShadow: |
100 | 0 | case Decl::ObjCTypeParam: |
101 | 0 | case Decl::Binding: |
102 | 0 | case Decl::UnresolvedUsingIfExists: |
103 | 0 | llvm_unreachable("Declaration should not be in declstmts!"); |
104 | 501 | case Decl::Record: // struct/union/class X; |
105 | 3.49k | case Decl::CXXRecord: // struct/union/class X; [C++] |
106 | 3.49k | if (CGDebugInfo *DI = getDebugInfo()) |
107 | 2.70k | if (cast<RecordDecl>(D).getDefinition()) |
108 | 2.69k | DI->EmitAndRetainType(getContext().getRecordType(cast<RecordDecl>(&D))); |
109 | 3.49k | return; |
110 | 85 | case Decl::Enum: // enum X; |
111 | 85 | if (CGDebugInfo *DI = getDebugInfo()) |
112 | 37 | if (cast<EnumDecl>(D).getDefinition()) |
113 | 35 | DI->EmitAndRetainType(getContext().getEnumType(cast<EnumDecl>(&D))); |
114 | 85 | return; |
115 | 540 | case Decl::Function: // void X(); |
116 | 540 | case Decl::EnumConstant: // enum ? { X = ? } |
117 | 3.16k | case Decl::StaticAssert: // static_assert(X, ""); [C++0x] |
118 | 3.16k | case Decl::Label: // __label__ x; |
119 | 3.16k | case Decl::Import: |
120 | 3.16k | case Decl::MSGuid: // __declspec(uuid("...")) |
121 | 3.16k | case Decl::UnnamedGlobalConstant: |
122 | 3.16k | case Decl::TemplateParamObject: |
123 | 3.19k | case Decl::OMPThreadPrivate: |
124 | 3.31k | case Decl::OMPAllocate: |
125 | 3.31k | case Decl::OMPCapturedExpr: |
126 | 3.31k | case Decl::OMPRequires: |
127 | 3.31k | case Decl::Empty: |
128 | 3.31k | case Decl::Concept: |
129 | 3.31k | case Decl::LifetimeExtendedTemporary: |
130 | 3.31k | case Decl::RequiresExprBody: |
131 | | // None of these decls require codegen support. |
132 | 3.31k | return; |
133 | | |
134 | 9 | case Decl::NamespaceAlias: |
135 | 9 | if (CGDebugInfo *DI = getDebugInfo()) |
136 | 8 | DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D)); |
137 | 9 | return; |
138 | 8.90k | case Decl::Using: // using X; [C++] |
139 | 8.90k | if (CGDebugInfo *DI = getDebugInfo()) |
140 | 84 | DI->EmitUsingDecl(cast<UsingDecl>(D)); |
141 | 8.90k | return; |
142 | 0 | case Decl::UsingEnum: // using enum X; [C++] |
143 | 0 | if (CGDebugInfo *DI = getDebugInfo()) |
144 | 0 | DI->EmitUsingEnumDecl(cast<UsingEnumDecl>(D)); |
145 | 0 | return; |
146 | 0 | case Decl::UsingPack: |
147 | 0 | for (auto *Using : cast<UsingPackDecl>(D).expansions()) |
148 | 0 | EmitDecl(*Using); |
149 | 0 | return; |
150 | 114 | case Decl::UsingDirective: // using namespace X; [C++] |
151 | 114 | if (CGDebugInfo *DI = getDebugInfo()) |
152 | 113 | DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D)); |
153 | 114 | return; |
154 | 184k | case Decl::Var: |
155 | 184k | case Decl::Decomposition: { |
156 | 184k | const VarDecl &VD = cast<VarDecl>(D); |
157 | 184k | assert(VD.isLocalVarDecl() && |
158 | 184k | "Should not see file-scope variables inside a function!"); |
159 | 0 | EmitVarDecl(VD); |
160 | 184k | if (auto *DD = dyn_cast<DecompositionDecl>(&VD)) |
161 | 52 | for (auto *B : DD->bindings()) |
162 | 175 | if (auto *HD = B->getHoldingVar()) |
163 | 22 | EmitVarDecl(*HD); |
164 | 184k | return; |
165 | 184k | } |
166 | | |
167 | 80 | case Decl::OMPDeclareReduction: |
168 | 80 | return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this); |
169 | | |
170 | 0 | case Decl::OMPDeclareMapper: |
171 | 0 | return CGM.EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(&D), this); |
172 | | |
173 | 2.88k | case Decl::Typedef: // typedef int X; |
174 | 3.09k | case Decl::TypeAlias: { // using X = int; [C++0x] |
175 | 3.09k | QualType Ty = cast<TypedefNameDecl>(D).getUnderlyingType(); |
176 | 3.09k | if (CGDebugInfo *DI = getDebugInfo()) |
177 | 2.57k | DI->EmitAndRetainType(Ty); |
178 | 3.09k | if (Ty->isVariablyModifiedType()) |
179 | 21 | EmitVariablyModifiedType(Ty); |
180 | 3.09k | return; |
181 | 2.88k | } |
182 | 203k | } |
183 | 203k | } |
184 | | |
185 | | /// EmitVarDecl - This method handles emission of any variable declaration |
186 | | /// inside a function, including static vars etc. |
187 | 251k | void CodeGenFunction::EmitVarDecl(const VarDecl &D) { |
188 | 251k | if (D.hasExternalStorage()) |
189 | | // Don't emit it now, allow it to be emitted lazily on its first use. |
190 | 133 | return; |
191 | | |
192 | | // Some function-scope variable does not have static storage but still |
193 | | // needs to be emitted like a static variable, e.g. a function-scope |
194 | | // variable in constant address space in OpenCL. |
195 | 251k | if (D.getStorageDuration() != SD_Automatic) { |
196 | | // Static sampler variables translated to function calls. |
197 | 12.3k | if (D.getType()->isSamplerT()) |
198 | 3 | return; |
199 | | |
200 | 12.3k | llvm::GlobalValue::LinkageTypes Linkage = |
201 | 12.3k | CGM.getLLVMLinkageVarDefinition(&D, /*IsConstant=*/false); |
202 | | |
203 | | // FIXME: We need to force the emission/use of a guard variable for |
204 | | // some variables even if we can constant-evaluate them because |
205 | | // we can't guarantee every translation unit will constant-evaluate them. |
206 | | |
207 | 12.3k | return EmitStaticVarDecl(D, Linkage); |
208 | 12.3k | } |
209 | | |
210 | 238k | if (D.getType().getAddressSpace() == LangAS::opencl_local) |
211 | 124 | return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D); |
212 | | |
213 | 238k | assert(D.hasLocalStorage()); |
214 | 0 | return EmitAutoVarDecl(D); |
215 | 238k | } |
216 | | |
217 | 12.4k | static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) { |
218 | 12.4k | if (CGM.getLangOpts().CPlusPlus) |
219 | 11.8k | return CGM.getMangledName(&D).str(); |
220 | | |
221 | | // If this isn't C++, we don't need a mangled name, just a pretty one. |
222 | 547 | assert(!D.isExternallyVisible() && "name shouldn't matter"); |
223 | 0 | std::string ContextName; |
224 | 547 | const DeclContext *DC = D.getDeclContext(); |
225 | 547 | if (auto *CD = dyn_cast<CapturedDecl>(DC)) |
226 | 1 | DC = cast<DeclContext>(CD->getNonClosureContext()); |
227 | 547 | if (const auto *FD = dyn_cast<FunctionDecl>(DC)) |
228 | 539 | ContextName = std::string(CGM.getMangledName(FD)); |
229 | 8 | else if (const auto *BD = dyn_cast<BlockDecl>(DC)) |
230 | 5 | ContextName = std::string(CGM.getBlockMangledName(GlobalDecl(), BD)); |
231 | 3 | else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC)) |
232 | 3 | ContextName = OMD->getSelector().getAsString(); |
233 | 0 | else |
234 | 0 | llvm_unreachable("Unknown context for static var decl"); |
235 | | |
236 | 547 | ContextName += "." + D.getNameAsString(); |
237 | 547 | return ContextName; |
238 | 12.4k | } |
239 | | |
240 | | llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl( |
241 | 13.0k | const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) { |
242 | | // In general, we don't always emit static var decls once before we reference |
243 | | // them. It is possible to reference them before emitting the function that |
244 | | // contains them, and it is possible to emit the containing function multiple |
245 | | // times. |
246 | 13.0k | if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D]) |
247 | 650 | return ExistingGV; |
248 | | |
249 | 12.4k | QualType Ty = D.getType(); |
250 | 12.4k | assert(Ty->isConstantSizeType() && "VLAs can't be static"); |
251 | | |
252 | | // Use the label if the variable is renamed with the asm-label extension. |
253 | 0 | std::string Name; |
254 | 12.4k | if (D.hasAttr<AsmLabelAttr>()) |
255 | 2 | Name = std::string(getMangledName(&D)); |
256 | 12.4k | else |
257 | 12.4k | Name = getStaticDeclName(*this, D); |
258 | | |
259 | 12.4k | llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty); |
260 | 12.4k | LangAS AS = GetGlobalVarAddressSpace(&D); |
261 | 12.4k | unsigned TargetAS = getContext().getTargetAddressSpace(AS); |
262 | | |
263 | | // OpenCL variables in local address space and CUDA shared |
264 | | // variables cannot have an initializer. |
265 | 12.4k | llvm::Constant *Init = nullptr; |
266 | 12.4k | if (Ty.getAddressSpace() == LangAS::opencl_local || |
267 | 12.4k | D.hasAttr<CUDASharedAttr>()12.3k || D.hasAttr<LoaderUninitializedAttr>()12.2k ) |
268 | 148 | Init = llvm::UndefValue::get(LTy); |
269 | 12.2k | else |
270 | 12.2k | Init = EmitNullConstant(Ty); |
271 | | |
272 | 12.4k | llvm::GlobalVariable *GV = new llvm::GlobalVariable( |
273 | 12.4k | getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name, |
274 | 12.4k | nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS); |
275 | 12.4k | GV->setAlignment(getContext().getDeclAlign(&D).getAsAlign()); |
276 | | |
277 | 12.4k | if (supportsCOMDAT() && GV->isWeakForLinker()2.64k ) |
278 | 258 | GV->setComdat(TheModule.getOrInsertComdat(GV->getName())); |
279 | | |
280 | 12.4k | if (D.getTLSKind()) |
281 | 127 | setTLSMode(GV, D); |
282 | | |
283 | 12.4k | setGVProperties(GV, &D); |
284 | | |
285 | | // Make sure the result is of the correct type. |
286 | 12.4k | LangAS ExpectedAS = Ty.getAddressSpace(); |
287 | 12.4k | llvm::Constant *Addr = GV; |
288 | 12.4k | if (AS != ExpectedAS) { |
289 | 50 | Addr = getTargetCodeGenInfo().performAddrSpaceCast( |
290 | 50 | *this, GV, AS, ExpectedAS, |
291 | 50 | LTy->getPointerTo(getContext().getTargetAddressSpace(ExpectedAS))); |
292 | 50 | } |
293 | | |
294 | 12.4k | setStaticLocalDeclAddress(&D, Addr); |
295 | | |
296 | | // Ensure that the static local gets initialized by making sure the parent |
297 | | // function gets emitted eventually. |
298 | 12.4k | const Decl *DC = cast<Decl>(D.getDeclContext()); |
299 | | |
300 | | // We can't name blocks or captured statements directly, so try to emit their |
301 | | // parents. |
302 | 12.4k | if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)12.3k ) { |
303 | 296 | DC = DC->getNonClosureContext(); |
304 | | // FIXME: Ensure that global blocks get emitted. |
305 | 296 | if (!DC) |
306 | 21 | return Addr; |
307 | 296 | } |
308 | | |
309 | 12.4k | GlobalDecl GD; |
310 | 12.4k | if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC)) |
311 | 11 | GD = GlobalDecl(CD, Ctor_Base); |
312 | 12.4k | else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC)) |
313 | 4 | GD = GlobalDecl(DD, Dtor_Base); |
314 | 12.4k | else if (const auto *FD = dyn_cast<FunctionDecl>(DC)) |
315 | 12.2k | GD = GlobalDecl(FD); |
316 | 116 | else { |
317 | | // Don't do anything for Obj-C method decls or global closures. We should |
318 | | // never defer them. |
319 | 116 | assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl"); |
320 | 116 | } |
321 | 12.4k | if (GD.getDecl()) { |
322 | | // Disable emission of the parent function for the OpenMP device codegen. |
323 | 12.3k | CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this); |
324 | 12.3k | (void)GetAddrOfGlobal(GD); |
325 | 12.3k | } |
326 | | |
327 | 12.4k | return Addr; |
328 | 12.4k | } |
329 | | |
330 | | /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the |
331 | | /// global variable that has already been created for it. If the initializer |
332 | | /// has a different type than GV does, this may free GV and return a different |
333 | | /// one. Otherwise it just returns GV. |
334 | | llvm::GlobalVariable * |
335 | | CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D, |
336 | 10.3k | llvm::GlobalVariable *GV) { |
337 | 10.3k | ConstantEmitter emitter(*this); |
338 | 10.3k | llvm::Constant *Init = emitter.tryEmitForInitializer(D); |
339 | | |
340 | | // If constant emission failed, then this should be a C++ static |
341 | | // initializer. |
342 | 10.3k | if (!Init) { |
343 | 8.95k | if (!getLangOpts().CPlusPlus) |
344 | 0 | CGM.ErrorUnsupported(D.getInit(), "constant l-value expression"); |
345 | 8.95k | else if (D.hasFlexibleArrayInit(getContext())) |
346 | 1 | CGM.ErrorUnsupported(D.getInit(), "flexible array initializer"); |
347 | 8.95k | else if (HaveInsertPoint()) { |
348 | | // Since we have a static initializer, this global variable can't |
349 | | // be constant. |
350 | 8.94k | GV->setConstant(false); |
351 | | |
352 | 8.94k | EmitCXXGuardedInit(D, GV, /*PerformInit*/true); |
353 | 8.94k | } |
354 | 8.95k | return GV; |
355 | 8.95k | } |
356 | | |
357 | 1.44k | #ifndef NDEBUG |
358 | 1.44k | CharUnits VarSize = CGM.getContext().getTypeSizeInChars(D.getType()) + |
359 | 1.44k | D.getFlexibleArrayInitChars(getContext()); |
360 | 1.44k | CharUnits CstSize = CharUnits::fromQuantity( |
361 | 1.44k | CGM.getDataLayout().getTypeAllocSize(Init->getType())); |
362 | 1.44k | assert(VarSize == CstSize && "Emitted constant has unexpected size"); |
363 | 0 | #endif |
364 | | |
365 | | // The initializer may differ in type from the global. Rewrite |
366 | | // the global to match the initializer. (We have to do this |
367 | | // because some types, like unions, can't be completely represented |
368 | | // in the LLVM type system.) |
369 | 1.44k | if (GV->getValueType() != Init->getType()) { |
370 | 84 | llvm::GlobalVariable *OldGV = GV; |
371 | | |
372 | 84 | GV = new llvm::GlobalVariable( |
373 | 84 | CGM.getModule(), Init->getType(), OldGV->isConstant(), |
374 | 84 | OldGV->getLinkage(), Init, "", |
375 | 84 | /*InsertBefore*/ OldGV, OldGV->getThreadLocalMode(), |
376 | 84 | OldGV->getType()->getPointerAddressSpace()); |
377 | 84 | GV->setVisibility(OldGV->getVisibility()); |
378 | 84 | GV->setDSOLocal(OldGV->isDSOLocal()); |
379 | 84 | GV->setComdat(OldGV->getComdat()); |
380 | | |
381 | | // Steal the name of the old global |
382 | 84 | GV->takeName(OldGV); |
383 | | |
384 | | // Replace all uses of the old global with the new global |
385 | 84 | llvm::Constant *NewPtrForOldDecl = |
386 | 84 | llvm::ConstantExpr::getBitCast(GV, OldGV->getType()); |
387 | 84 | OldGV->replaceAllUsesWith(NewPtrForOldDecl); |
388 | | |
389 | | // Erase the old global, since it is no longer used. |
390 | 84 | OldGV->eraseFromParent(); |
391 | 84 | } |
392 | | |
393 | 1.44k | GV->setConstant(CGM.isTypeConstant(D.getType(), true)); |
394 | 1.44k | GV->setInitializer(Init); |
395 | | |
396 | 1.44k | emitter.finalize(GV); |
397 | | |
398 | 1.44k | if (D.needsDestruction(getContext()) == QualType::DK_cxx_destructor && |
399 | 1.44k | HaveInsertPoint()51 ) { |
400 | | // We have a constant initializer, but a nontrivial destructor. We still |
401 | | // need to perform a guarded "initialization" in order to register the |
402 | | // destructor. |
403 | 50 | EmitCXXGuardedInit(D, GV, /*PerformInit*/false); |
404 | 50 | } |
405 | | |
406 | 1.44k | return GV; |
407 | 10.3k | } |
408 | | |
409 | | void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D, |
410 | 12.4k | llvm::GlobalValue::LinkageTypes Linkage) { |
411 | | // Check to see if we already have a global variable for this |
412 | | // declaration. This can happen when double-emitting function |
413 | | // bodies, e.g. with complete and base constructors. |
414 | 12.4k | llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage); |
415 | 12.4k | CharUnits alignment = getContext().getDeclAlign(&D); |
416 | | |
417 | | // Store into LocalDeclMap before generating initializer to handle |
418 | | // circular references. |
419 | 12.4k | llvm::Type *elemTy = ConvertTypeForMem(D.getType()); |
420 | 12.4k | setAddrOfLocalVar(&D, Address(addr, elemTy, alignment)); |
421 | | |
422 | | // We can't have a VLA here, but we can have a pointer to a VLA, |
423 | | // even though that doesn't really make any sense. |
424 | | // Make sure to evaluate VLA bounds now so that we have them for later. |
425 | 12.4k | if (D.getType()->isVariablyModifiedType()) |
426 | 1 | EmitVariablyModifiedType(D.getType()); |
427 | | |
428 | | // Save the type in case adding the initializer forces a type change. |
429 | 12.4k | llvm::Type *expectedType = addr->getType(); |
430 | | |
431 | 12.4k | llvm::GlobalVariable *var = |
432 | 12.4k | cast<llvm::GlobalVariable>(addr->stripPointerCasts()); |
433 | | |
434 | | // CUDA's local and local static __shared__ variables should not |
435 | | // have any non-empty initializers. This is ensured by Sema. |
436 | | // Whatever initializer such variable may have when it gets here is |
437 | | // a no-op and should not be emitted. |
438 | 12.4k | bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice51 && |
439 | 12.4k | D.hasAttr<CUDASharedAttr>()45 ; |
440 | | // If this value has an initializer, emit it. |
441 | 12.4k | if (D.getInit() && !isCudaSharedVar10.3k ) |
442 | 10.3k | var = AddInitializerToStaticVarDecl(D, var); |
443 | | |
444 | 12.4k | var->setAlignment(alignment.getAsAlign()); |
445 | | |
446 | 12.4k | if (D.hasAttr<AnnotateAttr>()) |
447 | 4 | CGM.AddGlobalAnnotations(&D, var); |
448 | | |
449 | 12.4k | if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>()) |
450 | 2 | var->addAttribute("bss-section", SA->getName()); |
451 | 12.4k | if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>()) |
452 | 0 | var->addAttribute("data-section", SA->getName()); |
453 | 12.4k | if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>()) |
454 | 2 | var->addAttribute("rodata-section", SA->getName()); |
455 | 12.4k | if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>()) |
456 | 0 | var->addAttribute("relro-section", SA->getName()); |
457 | | |
458 | 12.4k | if (const SectionAttr *SA = D.getAttr<SectionAttr>()) |
459 | 1 | var->setSection(SA->getName()); |
460 | | |
461 | 12.4k | if (D.hasAttr<RetainAttr>()) |
462 | 1 | CGM.addUsedGlobal(var); |
463 | 12.4k | else if (D.hasAttr<UsedAttr>()) |
464 | 2 | CGM.addUsedOrCompilerUsedGlobal(var); |
465 | | |
466 | | // We may have to cast the constant because of the initializer |
467 | | // mismatch above. |
468 | | // |
469 | | // FIXME: It is really dangerous to store this in the map; if anyone |
470 | | // RAUW's the GV uses of this constant will be invalid. |
471 | 12.4k | llvm::Constant *castedAddr = |
472 | 12.4k | llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType); |
473 | 12.4k | LocalDeclMap.find(&D)->second = Address(castedAddr, elemTy, alignment); |
474 | 12.4k | CGM.setStaticLocalDeclAddress(&D, castedAddr); |
475 | | |
476 | 12.4k | CGM.getSanitizerMetadata()->reportGlobal(var, D); |
477 | | |
478 | | // Emit global variable debug descriptor for static vars. |
479 | 12.4k | CGDebugInfo *DI = getDebugInfo(); |
480 | 12.4k | if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()226 ) { |
481 | 226 | DI->setLocation(D.getLocation()); |
482 | 226 | DI->EmitGlobalVariable(var, &D); |
483 | 226 | } |
484 | 12.4k | } |
485 | | |
486 | | namespace { |
487 | | struct DestroyObject final : EHScopeStack::Cleanup { |
488 | | DestroyObject(Address addr, QualType type, |
489 | | CodeGenFunction::Destroyer *destroyer, |
490 | | bool useEHCleanupForArray) |
491 | | : addr(addr), type(type), destroyer(destroyer), |
492 | 15.2k | useEHCleanupForArray(useEHCleanupForArray) {} |
493 | | |
494 | | Address addr; |
495 | | QualType type; |
496 | | CodeGenFunction::Destroyer *destroyer; |
497 | | bool useEHCleanupForArray; |
498 | | |
499 | 19.7k | void Emit(CodeGenFunction &CGF, Flags flags) override { |
500 | | // Don't use an EH cleanup recursively from an EH cleanup. |
501 | 19.7k | bool useEHCleanupForArray = |
502 | 19.7k | flags.isForNormalCleanup() && this->useEHCleanupForArray14.1k ; |
503 | | |
504 | 19.7k | CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray); |
505 | 19.7k | } |
506 | | }; |
507 | | |
508 | | template <class Derived> |
509 | | struct DestroyNRVOVariable : EHScopeStack::Cleanup { |
510 | | DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag) |
511 | 222 | : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {} CGDecl.cpp:(anonymous namespace)::DestroyNRVOVariable<(anonymous namespace)::DestroyNRVOVariableCXX>::DestroyNRVOVariable(clang::CodeGen::Address, clang::QualType, llvm::Value*) Line | Count | Source | 511 | 217 | : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {} |
CGDecl.cpp:(anonymous namespace)::DestroyNRVOVariable<(anonymous namespace)::DestroyNRVOVariableC>::DestroyNRVOVariable(clang::CodeGen::Address, clang::QualType, llvm::Value*) Line | Count | Source | 511 | 5 | : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {} |
|
512 | | |
513 | | llvm::Value *NRVOFlag; |
514 | | Address Loc; |
515 | | QualType Ty; |
516 | | |
517 | 348 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
518 | | // Along the exceptions path we always execute the dtor. |
519 | 348 | bool NRVO = flags.isForNormalCleanup() && NRVOFlag222 ; |
520 | | |
521 | 348 | llvm::BasicBlock *SkipDtorBB = nullptr; |
522 | 348 | if (NRVO) { |
523 | | // If we exited via NRVO, we skip the destructor call. |
524 | 222 | llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused"); |
525 | 222 | SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor"); |
526 | 222 | llvm::Value *DidNRVO = |
527 | 222 | CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val"); |
528 | 222 | CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB); |
529 | 222 | CGF.EmitBlock(RunDtorBB); |
530 | 222 | } |
531 | | |
532 | 348 | static_cast<Derived *>(this)->emitDestructorCall(CGF); |
533 | | |
534 | 348 | if (NRVO) CGF.EmitBlock(SkipDtorBB)222 ; |
535 | 348 | } CGDecl.cpp:(anonymous namespace)::DestroyNRVOVariable<(anonymous namespace)::DestroyNRVOVariableCXX>::Emit(clang::CodeGen::CodeGenFunction&, clang::CodeGen::EHScopeStack::Cleanup::Flags) Line | Count | Source | 517 | 343 | void Emit(CodeGenFunction &CGF, Flags flags) override { | 518 | | // Along the exceptions path we always execute the dtor. | 519 | 343 | bool NRVO = flags.isForNormalCleanup() && NRVOFlag217 ; | 520 | | | 521 | 343 | llvm::BasicBlock *SkipDtorBB = nullptr; | 522 | 343 | if (NRVO) { | 523 | | // If we exited via NRVO, we skip the destructor call. | 524 | 217 | llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused"); | 525 | 217 | SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor"); | 526 | 217 | llvm::Value *DidNRVO = | 527 | 217 | CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val"); | 528 | 217 | CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB); | 529 | 217 | CGF.EmitBlock(RunDtorBB); | 530 | 217 | } | 531 | | | 532 | 343 | static_cast<Derived *>(this)->emitDestructorCall(CGF); | 533 | | | 534 | 343 | if (NRVO) CGF.EmitBlock(SkipDtorBB)217 ; | 535 | 343 | } |
CGDecl.cpp:(anonymous namespace)::DestroyNRVOVariable<(anonymous namespace)::DestroyNRVOVariableC>::Emit(clang::CodeGen::CodeGenFunction&, clang::CodeGen::EHScopeStack::Cleanup::Flags) Line | Count | Source | 517 | 5 | void Emit(CodeGenFunction &CGF, Flags flags) override { | 518 | | // Along the exceptions path we always execute the dtor. | 519 | 5 | bool NRVO = flags.isForNormalCleanup() && NRVOFlag; | 520 | | | 521 | 5 | llvm::BasicBlock *SkipDtorBB = nullptr; | 522 | 5 | if (NRVO) { | 523 | | // If we exited via NRVO, we skip the destructor call. | 524 | 5 | llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused"); | 525 | 5 | SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor"); | 526 | 5 | llvm::Value *DidNRVO = | 527 | 5 | CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val"); | 528 | 5 | CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB); | 529 | 5 | CGF.EmitBlock(RunDtorBB); | 530 | 5 | } | 531 | | | 532 | 5 | static_cast<Derived *>(this)->emitDestructorCall(CGF); | 533 | | | 534 | 5 | if (NRVO) CGF.EmitBlock(SkipDtorBB); | 535 | 5 | } |
|
536 | | |
537 | 0 | virtual ~DestroyNRVOVariable() = default; Unexecuted instantiation: CGDecl.cpp:(anonymous namespace)::DestroyNRVOVariable<(anonymous namespace)::DestroyNRVOVariableCXX>::~DestroyNRVOVariable() Unexecuted instantiation: CGDecl.cpp:(anonymous namespace)::DestroyNRVOVariable<(anonymous namespace)::DestroyNRVOVariableC>::~DestroyNRVOVariable() |
538 | | }; |
539 | | |
540 | | struct DestroyNRVOVariableCXX final |
541 | | : DestroyNRVOVariable<DestroyNRVOVariableCXX> { |
542 | | DestroyNRVOVariableCXX(Address addr, QualType type, |
543 | | const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag) |
544 | | : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag), |
545 | 217 | Dtor(Dtor) {} |
546 | | |
547 | | const CXXDestructorDecl *Dtor; |
548 | | |
549 | 343 | void emitDestructorCall(CodeGenFunction &CGF) { |
550 | 343 | CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, |
551 | 343 | /*ForVirtualBase=*/false, |
552 | 343 | /*Delegating=*/false, Loc, Ty); |
553 | 343 | } |
554 | | }; |
555 | | |
556 | | struct DestroyNRVOVariableC final |
557 | | : DestroyNRVOVariable<DestroyNRVOVariableC> { |
558 | | DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty) |
559 | 5 | : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {} |
560 | | |
561 | 5 | void emitDestructorCall(CodeGenFunction &CGF) { |
562 | 5 | CGF.destroyNonTrivialCStruct(CGF, Loc, Ty); |
563 | 5 | } |
564 | | }; |
565 | | |
566 | | struct CallStackRestore final : EHScopeStack::Cleanup { |
567 | | Address Stack; |
568 | 1.60k | CallStackRestore(Address Stack) : Stack(Stack) {} |
569 | 2 | bool isRedundantBeforeReturn() override { return true; } |
570 | 1.60k | void Emit(CodeGenFunction &CGF, Flags flags) override { |
571 | 1.60k | llvm::Value *V = CGF.Builder.CreateLoad(Stack); |
572 | 1.60k | llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); |
573 | 1.60k | CGF.Builder.CreateCall(F, V); |
574 | 1.60k | } |
575 | | }; |
576 | | |
577 | | struct ExtendGCLifetime final : EHScopeStack::Cleanup { |
578 | | const VarDecl &Var; |
579 | 1 | ExtendGCLifetime(const VarDecl *var) : Var(*var) {} |
580 | | |
581 | 1 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
582 | | // Compute the address of the local variable, in case it's a |
583 | | // byref or something. |
584 | 1 | DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false, |
585 | 1 | Var.getType(), VK_LValue, SourceLocation()); |
586 | 1 | llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE), |
587 | 1 | SourceLocation()); |
588 | 1 | CGF.EmitExtendGCLifetime(value); |
589 | 1 | } |
590 | | }; |
591 | | |
592 | | struct CallCleanupFunction final : EHScopeStack::Cleanup { |
593 | | llvm::Constant *CleanupFn; |
594 | | const CGFunctionInfo &FnInfo; |
595 | | const VarDecl &Var; |
596 | | |
597 | | CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info, |
598 | | const VarDecl *Var) |
599 | 8 | : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {} |
600 | | |
601 | 10 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
602 | 10 | DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false, |
603 | 10 | Var.getType(), VK_LValue, SourceLocation()); |
604 | | // Compute the address of the local variable, in case it's a byref |
605 | | // or something. |
606 | 10 | llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer(CGF); |
607 | | |
608 | | // In some cases, the type of the function argument will be different from |
609 | | // the type of the pointer. An example of this is |
610 | | // void f(void* arg); |
611 | | // __attribute__((cleanup(f))) void *g; |
612 | | // |
613 | | // To fix this we insert a bitcast here. |
614 | 10 | QualType ArgTy = FnInfo.arg_begin()->type; |
615 | 10 | llvm::Value *Arg = |
616 | 10 | CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy)); |
617 | | |
618 | 10 | CallArgList Args; |
619 | 10 | Args.add(RValue::get(Arg), |
620 | 10 | CGF.getContext().getPointerType(Var.getType())); |
621 | 10 | auto Callee = CGCallee::forDirect(CleanupFn); |
622 | 10 | CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args); |
623 | 10 | } |
624 | | }; |
625 | | } // end anonymous namespace |
626 | | |
627 | | /// EmitAutoVarWithLifetime - Does the setup required for an automatic |
628 | | /// variable with lifetime. |
629 | | static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var, |
630 | | Address addr, |
631 | 590 | Qualifiers::ObjCLifetime lifetime) { |
632 | 590 | switch (lifetime) { |
633 | 0 | case Qualifiers::OCL_None: |
634 | 0 | llvm_unreachable("present but none"); |
635 | |
|
636 | 275 | case Qualifiers::OCL_ExplicitNone: |
637 | | // nothing to do |
638 | 275 | break; |
639 | | |
640 | 307 | case Qualifiers::OCL_Strong: { |
641 | 307 | CodeGenFunction::Destroyer *destroyer = |
642 | 307 | (var.hasAttr<ObjCPreciseLifetimeAttr>() |
643 | 307 | ? CodeGenFunction::destroyARCStrongPrecise1 |
644 | 307 | : CodeGenFunction::destroyARCStrongImprecise306 ); |
645 | | |
646 | 307 | CleanupKind cleanupKind = CGF.getARCCleanupKind(); |
647 | 307 | CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer, |
648 | 307 | cleanupKind & EHCleanup); |
649 | 307 | break; |
650 | 0 | } |
651 | 1 | case Qualifiers::OCL_Autoreleasing: |
652 | | // nothing to do |
653 | 1 | break; |
654 | | |
655 | 7 | case Qualifiers::OCL_Weak: |
656 | | // __weak objects always get EH cleanups; otherwise, exceptions |
657 | | // could cause really nasty crashes instead of mere leaks. |
658 | 7 | CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(), |
659 | 7 | CodeGenFunction::destroyARCWeak, |
660 | 7 | /*useEHCleanup*/ true); |
661 | 7 | break; |
662 | 590 | } |
663 | 590 | } |
664 | | |
665 | 1.35k | static bool isAccessedBy(const VarDecl &var, const Stmt *s) { |
666 | 1.35k | if (const Expr *e = dyn_cast<Expr>(s)) { |
667 | | // Skip the most common kinds of expressions that make |
668 | | // hierarchy-walking expensive. |
669 | 1.33k | s = e = e->IgnoreParenCasts(); |
670 | | |
671 | 1.33k | if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) |
672 | 565 | return (ref->getDecl() == &var); |
673 | 773 | if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) { |
674 | 134 | const BlockDecl *block = be->getBlockDecl(); |
675 | 383 | for (const auto &I : block->captures()) { |
676 | 383 | if (I.getVariable() == &var) |
677 | 3 | return true; |
678 | 383 | } |
679 | 134 | } |
680 | 773 | } |
681 | | |
682 | 786 | for (const Stmt *SubStmt : s->children()) |
683 | | // SubStmt might be null; as in missing decl or conditional of an if-stmt. |
684 | 384 | if (SubStmt && isAccessedBy(var, SubStmt)) |
685 | 15 | return true; |
686 | | |
687 | 771 | return false; |
688 | 786 | } |
689 | | |
690 | 1.10k | static bool isAccessedBy(const ValueDecl *decl, const Expr *e) { |
691 | 1.10k | if (!decl) return false86 ; |
692 | 1.01k | if (!isa<VarDecl>(decl)) return false45 ; |
693 | 970 | const VarDecl *var = cast<VarDecl>(decl); |
694 | 970 | return isAccessedBy(*var, e); |
695 | 1.01k | } |
696 | | |
697 | | static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF, |
698 | 330 | const LValue &destLV, const Expr *init) { |
699 | 330 | bool needsCast = false; |
700 | | |
701 | 341 | while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) { |
702 | 197 | switch (castExpr->getCastKind()) { |
703 | | // Look through casts that don't require representation changes. |
704 | 0 | case CK_NoOp: |
705 | 11 | case CK_BitCast: |
706 | 11 | case CK_BlockPointerToObjCPointerCast: |
707 | 11 | needsCast = true; |
708 | 11 | break; |
709 | | |
710 | | // If we find an l-value to r-value cast from a __weak variable, |
711 | | // emit this operation as a copy or move. |
712 | 169 | case CK_LValueToRValue: { |
713 | 169 | const Expr *srcExpr = castExpr->getSubExpr(); |
714 | 169 | if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak) |
715 | 9 | return false; |
716 | | |
717 | | // Emit the source l-value. |
718 | 160 | LValue srcLV = CGF.EmitLValue(srcExpr); |
719 | | |
720 | | // Handle a formal type change to avoid asserting. |
721 | 160 | auto srcAddr = srcLV.getAddress(CGF); |
722 | 160 | if (needsCast) { |
723 | 3 | srcAddr = CGF.Builder.CreateElementBitCast( |
724 | 3 | srcAddr, destLV.getAddress(CGF).getElementType()); |
725 | 3 | } |
726 | | |
727 | | // If it was an l-value, use objc_copyWeak. |
728 | 160 | if (srcExpr->isLValue()) { |
729 | 157 | CGF.EmitARCCopyWeak(destLV.getAddress(CGF), srcAddr); |
730 | 157 | } else { |
731 | 3 | assert(srcExpr->isXValue()); |
732 | 0 | CGF.EmitARCMoveWeak(destLV.getAddress(CGF), srcAddr); |
733 | 3 | } |
734 | 0 | return true; |
735 | 169 | } |
736 | | |
737 | | // Stop at anything else. |
738 | 17 | default: |
739 | 17 | return false; |
740 | 197 | } |
741 | | |
742 | 11 | init = castExpr->getSubExpr(); |
743 | 11 | } |
744 | 144 | return false; |
745 | 330 | } |
746 | | |
747 | | static void drillIntoBlockVariable(CodeGenFunction &CGF, |
748 | | LValue &lvalue, |
749 | 17 | const VarDecl *var) { |
750 | 17 | lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(CGF), var)); |
751 | 17 | } |
752 | | |
753 | | void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, |
754 | 370k | SourceLocation Loc) { |
755 | 370k | if (!SanOpts.has(SanitizerKind::NullabilityAssign)) |
756 | 370k | return; |
757 | | |
758 | 18 | auto Nullability = LHS.getType()->getNullability(getContext()); |
759 | 18 | if (!Nullability || *Nullability != NullabilityKind::NonNull14 ) |
760 | 4 | return; |
761 | | |
762 | | // Check if the right hand side of the assignment is nonnull, if the left |
763 | | // hand side must be nonnull. |
764 | 14 | SanitizerScope SanScope(this); |
765 | 14 | llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS); |
766 | 14 | llvm::Constant *StaticData[] = { |
767 | 14 | EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(LHS.getType()), |
768 | 14 | llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused. |
769 | 14 | llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)}; |
770 | 14 | EmitCheck({{IsNotNull, SanitizerKind::NullabilityAssign}}, |
771 | 14 | SanitizerHandler::TypeMismatch, StaticData, RHS); |
772 | 14 | } |
773 | | |
774 | | void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D, |
775 | 202k | LValue lvalue, bool capturedByInit) { |
776 | 202k | Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime(); |
777 | 202k | if (!lifetime) { |
778 | 201k | llvm::Value *value = EmitScalarExpr(init); |
779 | 201k | if (capturedByInit) |
780 | 4 | drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D)); |
781 | 201k | EmitNullabilityCheck(lvalue, value, init->getExprLoc()); |
782 | 201k | EmitStoreThroughLValue(RValue::get(value), lvalue, true); |
783 | 201k | return; |
784 | 201k | } |
785 | | |
786 | 1.60k | if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init)) |
787 | 1 | init = DIE->getExpr(); |
788 | | |
789 | | // If we're emitting a value with lifetime, we have to do the |
790 | | // initialization *before* we leave the cleanup scopes. |
791 | 1.60k | if (auto *EWC = dyn_cast<ExprWithCleanups>(init)) { |
792 | 375 | CodeGenFunction::RunCleanupsScope Scope(*this); |
793 | 375 | return EmitScalarInit(EWC->getSubExpr(), D, lvalue, capturedByInit); |
794 | 375 | } |
795 | | |
796 | | // We have to maintain the illusion that the variable is |
797 | | // zero-initialized. If the variable might be accessed in its |
798 | | // initializer, zero-initialize before running the initializer, then |
799 | | // actually perform the initialization with an assign. |
800 | 1.22k | bool accessedByInit = false; |
801 | 1.22k | if (lifetime != Qualifiers::OCL_ExplicitNone) |
802 | 1.10k | accessedByInit = (capturedByInit || isAccessedBy(D, init)1.10k ); |
803 | 1.22k | if (accessedByInit) { |
804 | 21 | LValue tempLV = lvalue; |
805 | | // Drill down to the __block object if necessary. |
806 | 21 | if (capturedByInit) { |
807 | | // We can use a simple GEP for this because it can't have been |
808 | | // moved yet. |
809 | 3 | tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(*this), |
810 | 3 | cast<VarDecl>(D), |
811 | 3 | /*follow*/ false)); |
812 | 3 | } |
813 | | |
814 | 21 | auto ty = |
815 | 21 | cast<llvm::PointerType>(tempLV.getAddress(*this).getElementType()); |
816 | 21 | llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType()); |
817 | | |
818 | | // If __weak, we want to use a barrier under certain conditions. |
819 | 21 | if (lifetime == Qualifiers::OCL_Weak) |
820 | 4 | EmitARCInitWeak(tempLV.getAddress(*this), zero); |
821 | | |
822 | | // Otherwise just do a simple store. |
823 | 17 | else |
824 | 17 | EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true); |
825 | 21 | } |
826 | | |
827 | | // Emit the initializer. |
828 | 1.22k | llvm::Value *value = nullptr; |
829 | | |
830 | 1.22k | switch (lifetime) { |
831 | 0 | case Qualifiers::OCL_None: |
832 | 0 | llvm_unreachable("present but none"); |
833 | |
|
834 | 760 | case Qualifiers::OCL_Strong: { |
835 | 760 | if (!D || !isa<VarDecl>(D)685 || !cast<VarDecl>(D)->isARCPseudoStrong()652 ) { |
836 | 756 | value = EmitARCRetainScalarExpr(init); |
837 | 756 | break; |
838 | 756 | } |
839 | | // If D is pseudo-strong, treat it like __unsafe_unretained here. This means |
840 | | // that we omit the retain, and causes non-autoreleased return values to be |
841 | | // immediately released. |
842 | 760 | LLVM_FALLTHROUGH4 ; |
843 | 4 | } |
844 | | |
845 | 125 | case Qualifiers::OCL_ExplicitNone: |
846 | 125 | value = EmitARCUnsafeUnretainedScalarExpr(init); |
847 | 125 | break; |
848 | | |
849 | 334 | case Qualifiers::OCL_Weak: { |
850 | | // If it's not accessed by the initializer, try to emit the |
851 | | // initialization with a copy or move. |
852 | 334 | if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)330 ) { |
853 | 160 | return; |
854 | 160 | } |
855 | | |
856 | | // No way to optimize a producing initializer into this. It's not |
857 | | // worth optimizing for, because the value will immediately |
858 | | // disappear in the common case. |
859 | 174 | value = EmitScalarExpr(init); |
860 | | |
861 | 174 | if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D))0 ; |
862 | 174 | if (accessedByInit) |
863 | 4 | EmitARCStoreWeak(lvalue.getAddress(*this), value, /*ignored*/ true); |
864 | 170 | else |
865 | 170 | EmitARCInitWeak(lvalue.getAddress(*this), value); |
866 | 174 | return; |
867 | 334 | } |
868 | | |
869 | 10 | case Qualifiers::OCL_Autoreleasing: |
870 | 10 | value = EmitARCRetainAutoreleaseScalarExpr(init); |
871 | 10 | break; |
872 | 1.22k | } |
873 | | |
874 | 891 | if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D))3 ; |
875 | | |
876 | 891 | EmitNullabilityCheck(lvalue, value, init->getExprLoc()); |
877 | | |
878 | | // If the variable might have been accessed by its initializer, we |
879 | | // might have to initialize with a barrier. We have to do this for |
880 | | // both __weak and __strong, but __weak got filtered out above. |
881 | 891 | if (accessedByInit && lifetime == Qualifiers::OCL_Strong17 ) { |
882 | 13 | llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc()); |
883 | 13 | EmitStoreOfScalar(value, lvalue, /* isInitialization */ true); |
884 | 13 | EmitARCRelease(oldValue, ARCImpreciseLifetime); |
885 | 13 | return; |
886 | 13 | } |
887 | | |
888 | 878 | EmitStoreOfScalar(value, lvalue, /* isInitialization */ true); |
889 | 878 | } |
890 | | |
891 | | /// Decide whether we can emit the non-zero parts of the specified initializer |
892 | | /// with equal or fewer than NumStores scalar stores. |
893 | | static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init, |
894 | 102k | unsigned &NumStores) { |
895 | | // Zero and Undef never requires any extra stores. |
896 | 102k | if (isa<llvm::ConstantAggregateZero>(Init) || |
897 | 102k | isa<llvm::ConstantPointerNull>(Init)102k || |
898 | 102k | isa<llvm::UndefValue>(Init)102k ) |
899 | 53 | return true; |
900 | 102k | if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init)376 || |
901 | 102k | isa<llvm::ConstantVector>(Init)302 || isa<llvm::BlockAddress>(Init)302 || |
902 | 102k | isa<llvm::ConstantExpr>(Init)297 ) |
903 | 102k | return Init->isNullValue() || NumStores--958 ; |
904 | | |
905 | | // See if we can emit each element. |
906 | 286 | if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)240 ) { |
907 | 622 | for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i446 ) { |
908 | 517 | llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i)); |
909 | 517 | if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores)) |
910 | 71 | return false; |
911 | 517 | } |
912 | 105 | return true; |
913 | 176 | } |
914 | | |
915 | 110 | if (llvm::ConstantDataSequential *CDS = |
916 | 110 | dyn_cast<llvm::ConstantDataSequential>(Init)) { |
917 | 101k | for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i101k ) { |
918 | 101k | llvm::Constant *Elt = CDS->getElementAsConstant(i); |
919 | 101k | if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores)) |
920 | 75 | return false; |
921 | 101k | } |
922 | 34 | return true; |
923 | 109 | } |
924 | | |
925 | | // Anything else is hard and scary. |
926 | 1 | return false; |
927 | 110 | } |
928 | | |
929 | | /// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit |
930 | | /// the scalar stores that would be required. |
931 | | static void emitStoresForInitAfterBZero(CodeGenModule &CGM, |
932 | | llvm::Constant *Init, Address Loc, |
933 | | bool isVolatile, CGBuilderTy &Builder, |
934 | 247 | bool IsAutoInit) { |
935 | 247 | assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) && |
936 | 247 | "called emitStoresForInitAfterBZero for zero or undef value."); |
937 | | |
938 | 247 | if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init)107 || |
939 | 247 | isa<llvm::ConstantVector>(Init)75 || isa<llvm::BlockAddress>(Init)75 || |
940 | 247 | isa<llvm::ConstantExpr>(Init)70 ) { |
941 | 187 | auto *I = Builder.CreateStore(Init, Loc, isVolatile); |
942 | 187 | if (IsAutoInit) |
943 | 0 | I->addAnnotationMetadata("auto-init"); |
944 | 187 | return; |
945 | 187 | } |
946 | | |
947 | 60 | if (llvm::ConstantDataSequential *CDS = |
948 | 60 | dyn_cast<llvm::ConstantDataSequential>(Init)) { |
949 | 101k | for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i101k ) { |
950 | 101k | llvm::Constant *Elt = CDS->getElementAsConstant(i); |
951 | | |
952 | | // If necessary, get a pointer to the element and emit it. |
953 | 101k | if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt)103 ) |
954 | 103 | emitStoresForInitAfterBZero( |
955 | 103 | CGM, Elt, Builder.CreateConstInBoundsGEP2_32(Loc, 0, i), isVolatile, |
956 | 103 | Builder, IsAutoInit); |
957 | 101k | } |
958 | 24 | return; |
959 | 24 | } |
960 | | |
961 | 36 | assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) && |
962 | 36 | "Unknown value type!"); |
963 | | |
964 | 180 | for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i144 ) { |
965 | 144 | llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i)); |
966 | | |
967 | | // If necessary, get a pointer to the element and emit it. |
968 | 144 | if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt)89 ) |
969 | 87 | emitStoresForInitAfterBZero(CGM, Elt, |
970 | 87 | Builder.CreateConstInBoundsGEP2_32(Loc, 0, i), |
971 | 87 | isVolatile, Builder, IsAutoInit); |
972 | 144 | } |
973 | 36 | } |
974 | | |
975 | | /// Decide whether we should use bzero plus some stores to initialize a local |
976 | | /// variable instead of using a memcpy from a constant global. It is beneficial |
977 | | /// to use bzero if the global is all zeros, or mostly zeros and large. |
978 | | static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init, |
979 | 4.74k | uint64_t GlobalSize) { |
980 | | // If a global is all zeros, always use a bzero. |
981 | 4.74k | if (isa<llvm::ConstantAggregateZero>(Init)) return true991 ; |
982 | | |
983 | | // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large, |
984 | | // do it if it will require 6 or fewer scalar stores. |
985 | | // TODO: Should budget depends on the size? Avoiding a large global warrants |
986 | | // plopping in more stores. |
987 | 3.75k | unsigned StoreBudget = 6; |
988 | 3.75k | uint64_t SizeLimit = 32; |
989 | | |
990 | 3.75k | return GlobalSize > SizeLimit && |
991 | 3.75k | canEmitInitWithFewStoresAfterBZero(Init, StoreBudget)168 ; |
992 | 4.74k | } |
993 | | |
994 | | /// Decide whether we should use memset to initialize a local variable instead |
995 | | /// of using a memcpy from a constant global. Assumes we've already decided to |
996 | | /// not user bzero. |
997 | | /// FIXME We could be more clever, as we are for bzero above, and generate |
998 | | /// memset followed by stores. It's unclear that's worth the effort. |
999 | | static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init, |
1000 | | uint64_t GlobalSize, |
1001 | 3.69k | const llvm::DataLayout &DL) { |
1002 | 3.69k | uint64_t SizeLimit = 32; |
1003 | 3.69k | if (GlobalSize <= SizeLimit) |
1004 | 3.58k | return nullptr; |
1005 | 111 | return llvm::isBytewiseValue(Init, DL); |
1006 | 3.69k | } |
1007 | | |
1008 | | /// Decide whether we want to split a constant structure or array store into a |
1009 | | /// sequence of its fields' stores. This may cost us code size and compilation |
1010 | | /// speed, but plays better with store optimizations. |
1011 | | static bool shouldSplitConstantStore(CodeGenModule &CGM, |
1012 | 3.66k | uint64_t GlobalByteSize) { |
1013 | | // Don't break things that occupy more than one cacheline. |
1014 | 3.66k | uint64_t ByteSizeLimit = 64; |
1015 | 3.66k | if (CGM.getCodeGenOpts().OptimizationLevel == 0) |
1016 | 3.43k | return false; |
1017 | 225 | if (GlobalByteSize <= ByteSizeLimit) |
1018 | 225 | return true; |
1019 | 0 | return false; |
1020 | 225 | } |
1021 | | |
1022 | | enum class IsPattern { No, Yes }; |
1023 | | |
1024 | | /// Generate a constant filled with either a pattern or zeroes. |
1025 | | static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern, |
1026 | 270 | llvm::Type *Ty) { |
1027 | 270 | if (isPattern == IsPattern::Yes) |
1028 | 140 | return initializationPatternFor(CGM, Ty); |
1029 | 130 | else |
1030 | 130 | return llvm::Constant::getNullValue(Ty); |
1031 | 270 | } |
1032 | | |
1033 | | static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern, |
1034 | | llvm::Constant *constant); |
1035 | | |
1036 | | /// Helper function for constWithPadding() to deal with padding in structures. |
1037 | | static llvm::Constant *constStructWithPadding(CodeGenModule &CGM, |
1038 | | IsPattern isPattern, |
1039 | | llvm::StructType *STy, |
1040 | 612 | llvm::Constant *constant) { |
1041 | 612 | const llvm::DataLayout &DL = CGM.getDataLayout(); |
1042 | 612 | const llvm::StructLayout *Layout = DL.getStructLayout(STy); |
1043 | 612 | llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext()); |
1044 | 612 | unsigned SizeSoFar = 0; |
1045 | 612 | SmallVector<llvm::Constant *, 8> Values; |
1046 | 612 | bool NestedIntact = true; |
1047 | 1.94k | for (unsigned i = 0, e = STy->getNumElements(); i != e; i++1.33k ) { |
1048 | 1.33k | unsigned CurOff = Layout->getElementOffset(i); |
1049 | 1.33k | if (SizeSoFar < CurOff) { |
1050 | 60 | assert(!STy->isPacked()); |
1051 | 0 | auto *PadTy = llvm::ArrayType::get(Int8Ty, CurOff - SizeSoFar); |
1052 | 60 | Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy)); |
1053 | 60 | } |
1054 | 0 | llvm::Constant *CurOp; |
1055 | 1.33k | if (constant->isZeroValue()) |
1056 | 284 | CurOp = llvm::Constant::getNullValue(STy->getElementType(i)); |
1057 | 1.04k | else |
1058 | 1.04k | CurOp = cast<llvm::Constant>(constant->getAggregateElement(i)); |
1059 | 1.33k | auto *NewOp = constWithPadding(CGM, isPattern, CurOp); |
1060 | 1.33k | if (CurOp != NewOp) |
1061 | 25 | NestedIntact = false; |
1062 | 1.33k | Values.push_back(NewOp); |
1063 | 1.33k | SizeSoFar = CurOff + DL.getTypeAllocSize(CurOp->getType()); |
1064 | 1.33k | } |
1065 | 612 | unsigned TotalSize = Layout->getSizeInBytes(); |
1066 | 612 | if (SizeSoFar < TotalSize) { |
1067 | 160 | auto *PadTy = llvm::ArrayType::get(Int8Ty, TotalSize - SizeSoFar); |
1068 | 160 | Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy)); |
1069 | 160 | } |
1070 | 612 | if (NestedIntact && Values.size() == STy->getNumElements()597 ) |
1071 | 377 | return constant; |
1072 | 235 | return llvm::ConstantStruct::getAnon(Values, STy->isPacked()); |
1073 | 612 | } |
1074 | | |
1075 | | /// Replace all padding bytes in a given constant with either a pattern byte or |
1076 | | /// 0x00. |
1077 | | static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern, |
1078 | 68.6k | llvm::Constant *constant) { |
1079 | 68.6k | llvm::Type *OrigTy = constant->getType(); |
1080 | 68.6k | if (const auto STy = dyn_cast<llvm::StructType>(OrigTy)) |
1081 | 612 | return constStructWithPadding(CGM, isPattern, STy, constant); |
1082 | 68.0k | if (auto *ArrayTy = dyn_cast<llvm::ArrayType>(OrigTy)) { |
1083 | 161 | llvm::SmallVector<llvm::Constant *, 8> Values; |
1084 | 161 | uint64_t Size = ArrayTy->getNumElements(); |
1085 | 161 | if (!Size) |
1086 | 10 | return constant; |
1087 | 151 | llvm::Type *ElemTy = ArrayTy->getElementType(); |
1088 | 151 | bool ZeroInitializer = constant->isNullValue(); |
1089 | 151 | llvm::Constant *OpValue, *PaddedOp; |
1090 | 151 | if (ZeroInitializer) { |
1091 | 49 | OpValue = llvm::Constant::getNullValue(ElemTy); |
1092 | 49 | PaddedOp = constWithPadding(CGM, isPattern, OpValue); |
1093 | 49 | } |
1094 | 394k | for (unsigned Op = 0; Op != Size; ++Op394k ) { |
1095 | 394k | if (!ZeroInitializer) { |
1096 | 66.4k | OpValue = constant->getAggregateElement(Op); |
1097 | 66.4k | PaddedOp = constWithPadding(CGM, isPattern, OpValue); |
1098 | 66.4k | } |
1099 | 394k | Values.push_back(PaddedOp); |
1100 | 394k | } |
1101 | 151 | auto *NewElemTy = Values[0]->getType(); |
1102 | 151 | if (NewElemTy == ElemTy) |
1103 | 123 | return constant; |
1104 | 28 | auto *NewArrayTy = llvm::ArrayType::get(NewElemTy, Size); |
1105 | 28 | return llvm::ConstantArray::get(NewArrayTy, Values); |
1106 | 151 | } |
1107 | | // FIXME: Add handling for tail padding in vectors. Vectors don't |
1108 | | // have padding between or inside elements, but the total amount of |
1109 | | // data can be less than the allocated size. |
1110 | 67.8k | return constant; |
1111 | 68.0k | } |
1112 | | |
1113 | | Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D, |
1114 | | llvm::Constant *Constant, |
1115 | 3.62k | CharUnits Align) { |
1116 | 3.62k | auto FunctionName = [&](const DeclContext *DC) -> std::string { |
1117 | 3.60k | if (const auto *FD = dyn_cast<FunctionDecl>(DC)) { |
1118 | 3.59k | if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD)) |
1119 | 1 | return CC->getNameAsString(); |
1120 | 3.59k | if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD)) |
1121 | 0 | return CD->getNameAsString(); |
1122 | 3.59k | return std::string(getMangledName(FD)); |
1123 | 3.59k | } else if (const auto *14 OM14 = dyn_cast<ObjCMethodDecl>(DC)) { |
1124 | 7 | return OM->getNameAsString(); |
1125 | 7 | } else if (isa<BlockDecl>(DC)) { |
1126 | 0 | return "<block>"; |
1127 | 7 | } else if (isa<CapturedDecl>(DC)) { |
1128 | 7 | return "<captured>"; |
1129 | 7 | } else { |
1130 | 0 | llvm_unreachable("expected a function or method"); |
1131 | 0 | } |
1132 | 3.60k | }; |
1133 | | |
1134 | | // Form a simple per-variable cache of these values in case we find we |
1135 | | // want to reuse them. |
1136 | 3.62k | llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D]; |
1137 | 3.62k | if (!CacheEntry || CacheEntry->getInitializer() != Constant6 ) { |
1138 | 3.61k | auto *Ty = Constant->getType(); |
1139 | 3.61k | bool isConstant = true; |
1140 | 3.61k | llvm::GlobalVariable *InsertBefore = nullptr; |
1141 | 3.61k | unsigned AS = |
1142 | 3.61k | getContext().getTargetAddressSpace(GetGlobalConstantAddressSpace()); |
1143 | 3.61k | std::string Name; |
1144 | 3.61k | if (D.hasGlobalStorage()) |
1145 | 10 | Name = getMangledName(&D).str() + ".const"; |
1146 | 3.60k | else if (const DeclContext *DC = D.getParentFunctionOrMethod()) |
1147 | 3.60k | Name = ("__const." + FunctionName(DC) + "." + D.getName()).str(); |
1148 | 0 | else |
1149 | 0 | llvm_unreachable("local variable has no parent function or method"); |
1150 | 3.61k | llvm::GlobalVariable *GV = new llvm::GlobalVariable( |
1151 | 3.61k | getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage, |
1152 | 3.61k | Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS); |
1153 | 3.61k | GV->setAlignment(Align.getAsAlign()); |
1154 | 3.61k | GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); |
1155 | 3.61k | CacheEntry = GV; |
1156 | 3.61k | } else if (6 CacheEntry->getAlignment() < uint64_t(Align.getQuantity())6 ) { |
1157 | 0 | CacheEntry->setAlignment(Align.getAsAlign()); |
1158 | 0 | } |
1159 | | |
1160 | 3.62k | return Address(CacheEntry, CacheEntry->getValueType(), Align); |
1161 | 3.62k | } |
1162 | | |
1163 | | static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM, |
1164 | | const VarDecl &D, |
1165 | | CGBuilderTy &Builder, |
1166 | | llvm::Constant *Constant, |
1167 | 3.60k | CharUnits Align) { |
1168 | 3.60k | Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align); |
1169 | 3.60k | return Builder.CreateElementBitCast(SrcPtr, CGM.Int8Ty); |
1170 | 3.60k | } |
1171 | | |
1172 | | static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D, |
1173 | | Address Loc, bool isVolatile, |
1174 | | CGBuilderTy &Builder, |
1175 | 5.22k | llvm::Constant *constant, bool IsAutoInit) { |
1176 | 5.22k | auto *Ty = constant->getType(); |
1177 | 5.22k | uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty); |
1178 | 5.22k | if (!ConstantSize) |
1179 | 16 | return; |
1180 | | |
1181 | 5.21k | bool canDoSingleStore = Ty->isIntOrIntVectorTy() || |
1182 | 5.21k | Ty->isPtrOrPtrVectorTy()4.90k || Ty->isFPOrFPVectorTy()4.84k ; |
1183 | 5.21k | if (canDoSingleStore) { |
1184 | 466 | auto *I = Builder.CreateStore(constant, Loc, isVolatile); |
1185 | 466 | if (IsAutoInit) |
1186 | 466 | I->addAnnotationMetadata("auto-init"); |
1187 | 466 | return; |
1188 | 466 | } |
1189 | | |
1190 | 4.74k | auto *SizeVal = llvm::ConstantInt::get(CGM.IntPtrTy, ConstantSize); |
1191 | | |
1192 | | // If the initializer is all or mostly the same, codegen with bzero / memset |
1193 | | // then do a few stores afterward. |
1194 | 4.74k | if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) { |
1195 | 1.04k | auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, 0), |
1196 | 1.04k | SizeVal, isVolatile); |
1197 | 1.04k | if (IsAutoInit) |
1198 | 113 | I->addAnnotationMetadata("auto-init"); |
1199 | | |
1200 | 1.04k | bool valueAlreadyCorrect = |
1201 | 1.04k | constant->isNullValue() || isa<llvm::UndefValue>(constant)57 ; |
1202 | 1.04k | if (!valueAlreadyCorrect) { |
1203 | 57 | Loc = Builder.CreateElementBitCast(Loc, Ty); |
1204 | 57 | emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder, |
1205 | 57 | IsAutoInit); |
1206 | 57 | } |
1207 | 1.04k | return; |
1208 | 1.04k | } |
1209 | | |
1210 | | // If the initializer is a repeated byte pattern, use memset. |
1211 | 3.69k | llvm::Value *Pattern = |
1212 | 3.69k | shouldUseMemSetToInitialize(constant, ConstantSize, CGM.getDataLayout()); |
1213 | 3.69k | if (Pattern) { |
1214 | 35 | uint64_t Value = 0x00; |
1215 | 35 | if (!isa<llvm::UndefValue>(Pattern)) { |
1216 | 35 | const llvm::APInt &AP = cast<llvm::ConstantInt>(Pattern)->getValue(); |
1217 | 35 | assert(AP.getBitWidth() <= 8); |
1218 | 0 | Value = AP.getLimitedValue(); |
1219 | 35 | } |
1220 | 0 | auto *I = Builder.CreateMemSet( |
1221 | 35 | Loc, llvm::ConstantInt::get(CGM.Int8Ty, Value), SizeVal, isVolatile); |
1222 | 35 | if (IsAutoInit) |
1223 | 14 | I->addAnnotationMetadata("auto-init"); |
1224 | 35 | return; |
1225 | 35 | } |
1226 | | |
1227 | | // If the initializer is small, use a handful of stores. |
1228 | 3.66k | if (shouldSplitConstantStore(CGM, ConstantSize)) { |
1229 | 225 | if (auto *STy = dyn_cast<llvm::StructType>(Ty)) { |
1230 | | // FIXME: handle the case when STy != Loc.getElementType(). |
1231 | 202 | if (STy == Loc.getElementType()) { |
1232 | 128 | for (unsigned i = 0; i != constant->getNumOperands(); i++77 ) { |
1233 | 77 | Address EltPtr = Builder.CreateStructGEP(Loc, i); |
1234 | 77 | emitStoresForConstant( |
1235 | 77 | CGM, D, EltPtr, isVolatile, Builder, |
1236 | 77 | cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)), |
1237 | 77 | IsAutoInit); |
1238 | 77 | } |
1239 | 51 | return; |
1240 | 51 | } |
1241 | 202 | } else if (auto *23 ATy23 = dyn_cast<llvm::ArrayType>(Ty)) { |
1242 | | // FIXME: handle the case when ATy != Loc.getElementType(). |
1243 | 23 | if (ATy == Loc.getElementType()) { |
1244 | 32 | for (unsigned i = 0; i != ATy->getNumElements(); i++24 ) { |
1245 | 24 | Address EltPtr = Builder.CreateConstArrayGEP(Loc, i); |
1246 | 24 | emitStoresForConstant( |
1247 | 24 | CGM, D, EltPtr, isVolatile, Builder, |
1248 | 24 | cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)), |
1249 | 24 | IsAutoInit); |
1250 | 24 | } |
1251 | 8 | return; |
1252 | 8 | } |
1253 | 23 | } |
1254 | 225 | } |
1255 | | |
1256 | | // Copy from a global. |
1257 | 3.60k | auto *I = |
1258 | 3.60k | Builder.CreateMemCpy(Loc, |
1259 | 3.60k | createUnnamedGlobalForMemcpyFrom( |
1260 | 3.60k | CGM, D, Builder, constant, Loc.getAlignment()), |
1261 | 3.60k | SizeVal, isVolatile); |
1262 | 3.60k | if (IsAutoInit) |
1263 | 111 | I->addAnnotationMetadata("auto-init"); |
1264 | 3.60k | } |
1265 | | |
1266 | | static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D, |
1267 | | Address Loc, bool isVolatile, |
1268 | 270 | CGBuilderTy &Builder) { |
1269 | 270 | llvm::Type *ElTy = Loc.getElementType(); |
1270 | 270 | llvm::Constant *constant = |
1271 | 270 | constWithPadding(CGM, IsPattern::No, llvm::Constant::getNullValue(ElTy)); |
1272 | 270 | emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant, |
1273 | 270 | /*IsAutoInit=*/true); |
1274 | 270 | } |
1275 | | |
1276 | | static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D, |
1277 | | Address Loc, bool isVolatile, |
1278 | 393 | CGBuilderTy &Builder) { |
1279 | 393 | llvm::Type *ElTy = Loc.getElementType(); |
1280 | 393 | llvm::Constant *constant = constWithPadding( |
1281 | 393 | CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy)); |
1282 | 393 | assert(!isa<llvm::UndefValue>(constant)); |
1283 | 0 | emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant, |
1284 | 393 | /*IsAutoInit=*/true); |
1285 | 393 | } |
1286 | | |
1287 | 735 | static bool containsUndef(llvm::Constant *constant) { |
1288 | 735 | auto *Ty = constant->getType(); |
1289 | 735 | if (isa<llvm::UndefValue>(constant)) |
1290 | 35 | return true; |
1291 | 700 | if (Ty->isStructTy() || Ty->isArrayTy()497 || Ty->isVectorTy()451 ) |
1292 | 249 | for (llvm::Use &Op : constant->operands()) |
1293 | 597 | if (containsUndef(cast<llvm::Constant>(Op))) |
1294 | 35 | return true; |
1295 | 665 | return false; |
1296 | 700 | } |
1297 | | |
1298 | | static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern, |
1299 | 233 | llvm::Constant *constant) { |
1300 | 233 | auto *Ty = constant->getType(); |
1301 | 233 | if (isa<llvm::UndefValue>(constant)) |
1302 | 50 | return patternOrZeroFor(CGM, isPattern, Ty); |
1303 | 183 | if (!(Ty->isStructTy() || Ty->isArrayTy()75 || Ty->isVectorTy()45 )) |
1304 | 45 | return constant; |
1305 | 138 | if (!containsUndef(constant)) |
1306 | 103 | return constant; |
1307 | 35 | llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands()); |
1308 | 125 | for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op90 ) { |
1309 | 90 | auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op)); |
1310 | 90 | Values[Op] = replaceUndef(CGM, isPattern, OpValue); |
1311 | 90 | } |
1312 | 35 | if (Ty->isStructTy()) |
1313 | 35 | return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values); |
1314 | 0 | if (Ty->isArrayTy()) |
1315 | 0 | return llvm::ConstantArray::get(cast<llvm::ArrayType>(Ty), Values); |
1316 | 0 | assert(Ty->isVectorTy()); |
1317 | 0 | return llvm::ConstantVector::get(Values); |
1318 | 0 | } |
1319 | | |
1320 | | /// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a |
1321 | | /// variable declaration with auto, register, or no storage class specifier. |
1322 | | /// These turn into simple stack objects, or GlobalValues depending on target. |
1323 | 238k | void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) { |
1324 | 238k | AutoVarEmission emission = EmitAutoVarAlloca(D); |
1325 | 238k | EmitAutoVarInit(emission); |
1326 | 238k | EmitAutoVarCleanups(emission); |
1327 | 238k | } |
1328 | | |
1329 | | /// Emit a lifetime.begin marker if some criteria are satisfied. |
1330 | | /// \return a pointer to the temporary size Value if a marker was emitted, null |
1331 | | /// otherwise |
1332 | | llvm::Value *CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size, |
1333 | 255k | llvm::Value *Addr) { |
1334 | 255k | if (!ShouldEmitLifetimeMarkers) |
1335 | 250k | return nullptr; |
1336 | | |
1337 | 4.78k | assert(Addr->getType()->getPointerAddressSpace() == |
1338 | 4.78k | CGM.getDataLayout().getAllocaAddrSpace() && |
1339 | 4.78k | "Pointer should be in alloca address space"); |
1340 | 0 | llvm::Value *SizeV = llvm::ConstantInt::get( |
1341 | 4.78k | Int64Ty, Size.isScalable() ? -165 : Size.getFixedValue()4.72k ); |
1342 | 4.78k | Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy); |
1343 | 4.78k | llvm::CallInst *C = |
1344 | 4.78k | Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr}); |
1345 | 4.78k | C->setDoesNotThrow(); |
1346 | 4.78k | return SizeV; |
1347 | 255k | } |
1348 | | |
1349 | 4.93k | void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) { |
1350 | 4.93k | assert(Addr->getType()->getPointerAddressSpace() == |
1351 | 4.93k | CGM.getDataLayout().getAllocaAddrSpace() && |
1352 | 4.93k | "Pointer should be in alloca address space"); |
1353 | 0 | Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy); |
1354 | 4.93k | llvm::CallInst *C = |
1355 | 4.93k | Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr}); |
1356 | 4.93k | C->setDoesNotThrow(); |
1357 | 4.93k | } |
1358 | | |
1359 | | void CodeGenFunction::EmitAndRegisterVariableArrayDimensions( |
1360 | 2.17k | CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) { |
1361 | | // For each dimension stores its QualType and corresponding |
1362 | | // size-expression Value. |
1363 | 2.17k | SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions; |
1364 | 2.17k | SmallVector<IdentifierInfo *, 4> VLAExprNames; |
1365 | | |
1366 | | // Break down the array into individual dimensions. |
1367 | 2.17k | QualType Type1D = D.getType(); |
1368 | 5.38k | while (getContext().getAsVariableArrayType(Type1D)) { |
1369 | 3.21k | auto VlaSize = getVLAElements1D(Type1D); |
1370 | 3.21k | if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts)) |
1371 | 896 | Dimensions.emplace_back(C, Type1D.getUnqualifiedType()); |
1372 | 2.31k | else { |
1373 | | // Generate a locally unique name for the size expression. |
1374 | 2.31k | Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++); |
1375 | 2.31k | SmallString<12> Buffer; |
1376 | 2.31k | StringRef NameRef = Name.toStringRef(Buffer); |
1377 | 2.31k | auto &Ident = getContext().Idents.getOwn(NameRef); |
1378 | 2.31k | VLAExprNames.push_back(&Ident); |
1379 | 2.31k | auto SizeExprAddr = |
1380 | 2.31k | CreateDefaultAlignTempAlloca(VlaSize.NumElts->getType(), NameRef); |
1381 | 2.31k | Builder.CreateStore(VlaSize.NumElts, SizeExprAddr); |
1382 | 2.31k | Dimensions.emplace_back(SizeExprAddr.getPointer(), |
1383 | 2.31k | Type1D.getUnqualifiedType()); |
1384 | 2.31k | } |
1385 | 3.21k | Type1D = VlaSize.Type; |
1386 | 3.21k | } |
1387 | | |
1388 | 2.17k | if (!EmitDebugInfo) |
1389 | 2.13k | return; |
1390 | | |
1391 | | // Register each dimension's size-expression with a DILocalVariable, |
1392 | | // so that it can be used by CGDebugInfo when instantiating a DISubrange |
1393 | | // to describe this array. |
1394 | 40 | unsigned NameIdx = 0; |
1395 | 41 | for (auto &VlaSize : Dimensions) { |
1396 | 41 | llvm::Metadata *MD; |
1397 | 41 | if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts)) |
1398 | 1 | MD = llvm::ConstantAsMetadata::get(C); |
1399 | 40 | else { |
1400 | | // Create an artificial VarDecl to generate debug info for. |
1401 | 40 | IdentifierInfo *NameIdent = VLAExprNames[NameIdx++]; |
1402 | 40 | assert(cast<llvm::PointerType>(VlaSize.NumElts->getType()) |
1403 | 40 | ->isOpaqueOrPointeeTypeMatches(SizeTy) && |
1404 | 40 | "Number of VLA elements must be SizeTy"); |
1405 | 0 | auto QT = getContext().getIntTypeForBitwidth( |
1406 | 40 | SizeTy->getScalarSizeInBits(), false); |
1407 | 40 | auto *ArtificialDecl = VarDecl::Create( |
1408 | 40 | getContext(), const_cast<DeclContext *>(D.getDeclContext()), |
1409 | 40 | D.getLocation(), D.getLocation(), NameIdent, QT, |
1410 | 40 | getContext().CreateTypeSourceInfo(QT), SC_Auto); |
1411 | 40 | ArtificialDecl->setImplicit(); |
1412 | | |
1413 | 40 | MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts, |
1414 | 40 | Builder); |
1415 | 40 | } |
1416 | 0 | assert(MD && "No Size expression debug node created"); |
1417 | 0 | DI->registerVLASizeExpression(VlaSize.Type, MD); |
1418 | 41 | } |
1419 | 40 | } |
1420 | | |
1421 | | /// EmitAutoVarAlloca - Emit the alloca and debug information for a |
1422 | | /// local variable. Does not emit initialization or destruction. |
1423 | | CodeGenFunction::AutoVarEmission |
1424 | 257k | CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) { |
1425 | 257k | QualType Ty = D.getType(); |
1426 | 257k | assert( |
1427 | 257k | Ty.getAddressSpace() == LangAS::Default || |
1428 | 257k | (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL)); |
1429 | | |
1430 | 0 | AutoVarEmission emission(D); |
1431 | | |
1432 | 257k | bool isEscapingByRef = D.isEscapingByref(); |
1433 | 257k | emission.IsEscapingByRef = isEscapingByRef; |
1434 | | |
1435 | 257k | CharUnits alignment = getContext().getDeclAlign(&D); |
1436 | | |
1437 | | // If the type is variably-modified, emit all the VLA sizes for it. |
1438 | 257k | if (Ty->isVariablyModifiedType()) |
1439 | 2.25k | EmitVariablyModifiedType(Ty); |
1440 | | |
1441 | 257k | auto *DI = getDebugInfo(); |
1442 | 257k | bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo()109k ; |
1443 | | |
1444 | 257k | Address address = Address::invalid(); |
1445 | 257k | Address AllocaAddr = Address::invalid(); |
1446 | 257k | Address OpenMPLocalAddr = Address::invalid(); |
1447 | 257k | if (CGM.getLangOpts().OpenMPIRBuilder) |
1448 | 701 | OpenMPLocalAddr = OMPBuilderCBHelpers::getAddressOfLocalVariable(*this, &D); |
1449 | 257k | else |
1450 | 257k | OpenMPLocalAddr = |
1451 | 257k | getLangOpts().OpenMP |
1452 | 257k | ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)120k |
1453 | 257k | : Address::invalid()136k ; |
1454 | | |
1455 | 257k | bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable()257k ; |
1456 | | |
1457 | 257k | if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()121k ) { |
1458 | 175 | address = OpenMPLocalAddr; |
1459 | 175 | AllocaAddr = OpenMPLocalAddr; |
1460 | 257k | } else if (Ty->isConstantSizeType()) { |
1461 | | // If this value is an array or struct with a statically determinable |
1462 | | // constant initializer, there are optimizations we can do. |
1463 | | // |
1464 | | // TODO: We should constant-evaluate the initializer of any variable, |
1465 | | // as long as it is initialized by a constant expression. Currently, |
1466 | | // isConstantInitializer produces wrong answers for structs with |
1467 | | // reference or bitfield members, and a few other cases, and checking |
1468 | | // for POD-ness protects us from some of these. |
1469 | 255k | if (D.getInit() && (202k Ty->isArrayType()202k || Ty->isRecordType()197k ) && |
1470 | 255k | (22.3k D.isConstexpr()22.3k || |
1471 | 22.3k | (22.1k (22.1k Ty.isPODType(getContext())22.1k || |
1472 | 22.1k | getContext().getBaseElementType(Ty)->isObjCObjectPointerType()12.3k ) && |
1473 | 22.1k | D.getInit()->isConstantInitializer(getContext(), false)9.88k ))) { |
1474 | | |
1475 | | // If the variable's a const type, and it's neither an NRVO |
1476 | | // candidate nor a __block variable and has no mutable members, |
1477 | | // emit it as a global instead. |
1478 | | // Exception is if a variable is located in non-constant address space |
1479 | | // in OpenCL. |
1480 | 8.06k | if ((!getLangOpts().OpenCL || |
1481 | 8.06k | Ty.getAddressSpace() == LangAS::opencl_constant86 ) && |
1482 | 8.06k | (7.98k CGM.getCodeGenOpts().MergeAllConstants7.98k && !NRVO92 && |
1483 | 7.98k | !isEscapingByRef92 && CGM.isTypeConstant(Ty, true)92 )) { |
1484 | 6 | EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage); |
1485 | | |
1486 | | // Signal this condition to later callbacks. |
1487 | 6 | emission.Addr = Address::invalid(); |
1488 | 6 | assert(emission.wasEmittedAsGlobal()); |
1489 | 0 | return emission; |
1490 | 6 | } |
1491 | | |
1492 | | // Otherwise, tell the initialization code that we're in this case. |
1493 | 8.06k | emission.IsConstantAggregate = true; |
1494 | 8.06k | } |
1495 | | |
1496 | | // A normal fixed sized variable becomes an alloca in the entry block, |
1497 | | // unless: |
1498 | | // - it's an NRVO variable. |
1499 | | // - we are compiling OpenMP and it's an OpenMP local variable. |
1500 | 255k | if (NRVO) { |
1501 | | // The named return value optimization: allocate this variable in the |
1502 | | // return slot, so that we can elide the copy when returning this |
1503 | | // variable (C++0x [class.copy]p34). |
1504 | 1.38k | address = ReturnValue; |
1505 | 1.38k | AllocaAddr = ReturnValue; |
1506 | | |
1507 | 1.38k | if (const RecordType *RecordTy = Ty->getAs<RecordType>()) { |
1508 | 1.38k | const auto *RD = RecordTy->getDecl(); |
1509 | 1.38k | const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD); |
1510 | 1.38k | if ((CXXRD && !CXXRD->hasTrivialDestructor()527 ) || |
1511 | 1.38k | RD->isNonTrivialToPrimitiveDestroy()1.16k ) { |
1512 | | // Create a flag that is used to indicate when the NRVO was applied |
1513 | | // to this variable. Set it to zero to indicate that NRVO was not |
1514 | | // applied. |
1515 | 222 | llvm::Value *Zero = Builder.getFalse(); |
1516 | 222 | Address NRVOFlag = |
1517 | 222 | CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo", |
1518 | 222 | /*ArraySize=*/nullptr, &AllocaAddr); |
1519 | 222 | EnsureInsertPoint(); |
1520 | 222 | Builder.CreateStore(Zero, NRVOFlag); |
1521 | | |
1522 | | // Record the NRVO flag for this variable. |
1523 | 222 | NRVOFlags[&D] = NRVOFlag.getPointer(); |
1524 | 222 | emission.NRVOFlag = NRVOFlag.getPointer(); |
1525 | 222 | } |
1526 | 1.38k | } |
1527 | 254k | } else { |
1528 | 254k | CharUnits allocaAlignment; |
1529 | 254k | llvm::Type *allocaTy; |
1530 | 254k | if (isEscapingByRef) { |
1531 | 215 | auto &byrefInfo = getBlockByrefInfo(&D); |
1532 | 215 | allocaTy = byrefInfo.Type; |
1533 | 215 | allocaAlignment = byrefInfo.ByrefAlignment; |
1534 | 253k | } else { |
1535 | 253k | allocaTy = ConvertTypeForMem(Ty); |
1536 | 253k | allocaAlignment = alignment; |
1537 | 253k | } |
1538 | | |
1539 | | // Create the alloca. Note that we set the name separately from |
1540 | | // building the instruction so that it's there even in no-asserts |
1541 | | // builds. |
1542 | 254k | address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(), |
1543 | 254k | /*ArraySize=*/nullptr, &AllocaAddr); |
1544 | | |
1545 | | // Don't emit lifetime markers for MSVC catch parameters. The lifetime of |
1546 | | // the catch parameter starts in the catchpad instruction, and we can't |
1547 | | // insert code in those basic blocks. |
1548 | 254k | bool IsMSCatchParam = |
1549 | 254k | D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft()299 ; |
1550 | | |
1551 | | // Emit a lifetime intrinsic if meaningful. There's no point in doing this |
1552 | | // if we don't have a valid insertion point (?). |
1553 | 254k | if (HaveInsertPoint() && !IsMSCatchParam254k ) { |
1554 | | // If there's a jump into the lifetime of this variable, its lifetime |
1555 | | // gets broken up into several regions in IR, which requires more work |
1556 | | // to handle correctly. For now, just omit the intrinsics; this is a |
1557 | | // rare case, and it's better to just be conservatively correct. |
1558 | | // PR28267. |
1559 | | // |
1560 | | // We have to do this in all language modes if there's a jump past the |
1561 | | // declaration. We also have to do it in C if there's a jump to an |
1562 | | // earlier point in the current block because non-VLA lifetimes begin as |
1563 | | // soon as the containing block is entered, not when its variables |
1564 | | // actually come into scope; suppressing the lifetime annotations |
1565 | | // completely in this case is unnecessarily pessimistic, but again, this |
1566 | | // is rare. |
1567 | 254k | if (!Bypasses.IsBypassed(&D) && |
1568 | 254k | !(254k !getLangOpts().CPlusPlus254k && hasLabelBeenSeenInCurrentScope()32.8k )) { |
1569 | 253k | llvm::TypeSize Size = CGM.getDataLayout().getTypeAllocSize(allocaTy); |
1570 | 253k | emission.SizeForLifetimeMarkers = |
1571 | 253k | EmitLifetimeStart(Size, AllocaAddr.getPointer()); |
1572 | 253k | } |
1573 | 254k | } else { |
1574 | 47 | assert(!emission.useLifetimeMarkers()); |
1575 | 47 | } |
1576 | 254k | } |
1577 | 255k | } else { |
1578 | 2.17k | EnsureInsertPoint(); |
1579 | | |
1580 | 2.17k | if (!DidCallStackSave) { |
1581 | | // Save the stack. |
1582 | 1.60k | Address Stack = |
1583 | 1.60k | CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack"); |
1584 | | |
1585 | 1.60k | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave); |
1586 | 1.60k | llvm::Value *V = Builder.CreateCall(F); |
1587 | 1.60k | Builder.CreateStore(V, Stack); |
1588 | | |
1589 | 1.60k | DidCallStackSave = true; |
1590 | | |
1591 | | // Push a cleanup block and restore the stack there. |
1592 | | // FIXME: in general circumstances, this should be an EH cleanup. |
1593 | 1.60k | pushStackRestore(NormalCleanup, Stack); |
1594 | 1.60k | } |
1595 | | |
1596 | 2.17k | auto VlaSize = getVLASize(Ty); |
1597 | 2.17k | llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type); |
1598 | | |
1599 | | // Allocate memory for the array. |
1600 | 2.17k | address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts, |
1601 | 2.17k | &AllocaAddr); |
1602 | | |
1603 | | // If we have debug info enabled, properly describe the VLA dimensions for |
1604 | | // this type by registering the vla size expression for each of the |
1605 | | // dimensions. |
1606 | 2.17k | EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo); |
1607 | 2.17k | } |
1608 | | |
1609 | 257k | setAddrOfLocalVar(&D, address); |
1610 | 257k | emission.Addr = address; |
1611 | 257k | emission.AllocaAddr = AllocaAddr; |
1612 | | |
1613 | | // Emit debug info for local var declaration. |
1614 | 257k | if (EmitDebugInfo && HaveInsertPoint()107k ) { |
1615 | 107k | Address DebugAddr = address; |
1616 | 107k | bool UsePointerValue = NRVO && ReturnValuePointer.isValid()294 ; |
1617 | 107k | DI->setLocation(D.getLocation()); |
1618 | | |
1619 | | // If NRVO, use a pointer to the return address. |
1620 | 107k | if (UsePointerValue) { |
1621 | 143 | DebugAddr = ReturnValuePointer; |
1622 | 143 | AllocaAddr = ReturnValuePointer; |
1623 | 143 | } |
1624 | 107k | (void)DI->EmitDeclareOfAutoVariable(&D, AllocaAddr.getPointer(), Builder, |
1625 | 107k | UsePointerValue); |
1626 | 107k | } |
1627 | | |
1628 | 257k | if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint()6 ) |
1629 | 5 | EmitVarAnnotations(&D, address.getPointer()); |
1630 | | |
1631 | | // Make sure we call @llvm.lifetime.end. |
1632 | 257k | if (emission.useLifetimeMarkers()) |
1633 | 3.94k | EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, |
1634 | 3.94k | emission.getOriginalAllocatedAddress(), |
1635 | 3.94k | emission.getSizeForLifetimeMarkers()); |
1636 | | |
1637 | 257k | return emission; |
1638 | 257k | } |
1639 | | |
1640 | | static bool isCapturedBy(const VarDecl &, const Expr *); |
1641 | | |
1642 | | /// Determines whether the given __block variable is potentially |
1643 | | /// captured by the given statement. |
1644 | 27 | static bool isCapturedBy(const VarDecl &Var, const Stmt *S) { |
1645 | 27 | if (const Expr *E = dyn_cast<Expr>(S)) |
1646 | 25 | return isCapturedBy(Var, E); |
1647 | 2 | for (const Stmt *SubStmt : S->children()) |
1648 | 2 | if (isCapturedBy(Var, SubStmt)) |
1649 | 0 | return true; |
1650 | 2 | return false; |
1651 | 2 | } |
1652 | | |
1653 | | /// Determines whether the given __block variable is potentially |
1654 | | /// captured by the given expression. |
1655 | 124 | static bool isCapturedBy(const VarDecl &Var, const Expr *E) { |
1656 | | // Skip the most common kinds of expressions that make |
1657 | | // hierarchy-walking expensive. |
1658 | 124 | E = E->IgnoreParenCasts(); |
1659 | | |
1660 | 124 | if (const BlockExpr *BE = dyn_cast<BlockExpr>(E)) { |
1661 | 7 | const BlockDecl *Block = BE->getBlockDecl(); |
1662 | 7 | for (const auto &I : Block->captures()) { |
1663 | 7 | if (I.getVariable() == &Var) |
1664 | 7 | return true; |
1665 | 7 | } |
1666 | | |
1667 | | // No need to walk into the subexpressions. |
1668 | 0 | return false; |
1669 | 7 | } |
1670 | | |
1671 | 117 | if (const StmtExpr *SE = dyn_cast<StmtExpr>(E)) { |
1672 | 0 | const CompoundStmt *CS = SE->getSubStmt(); |
1673 | 0 | for (const auto *BI : CS->body()) |
1674 | 0 | if (const auto *BIE = dyn_cast<Expr>(BI)) { |
1675 | 0 | if (isCapturedBy(Var, BIE)) |
1676 | 0 | return true; |
1677 | 0 | } |
1678 | 0 | else if (const auto *DS = dyn_cast<DeclStmt>(BI)) { |
1679 | | // special case declarations |
1680 | 0 | for (const auto *I : DS->decls()) { |
1681 | 0 | if (const auto *VD = dyn_cast<VarDecl>((I))) { |
1682 | 0 | const Expr *Init = VD->getInit(); |
1683 | 0 | if (Init && isCapturedBy(Var, Init)) |
1684 | 0 | return true; |
1685 | 0 | } |
1686 | 0 | } |
1687 | 0 | } |
1688 | 0 | else |
1689 | | // FIXME. Make safe assumption assuming arbitrary statements cause capturing. |
1690 | | // Later, provide code to poke into statements for capture analysis. |
1691 | 0 | return true; |
1692 | 0 | return false; |
1693 | 0 | } |
1694 | | |
1695 | 117 | for (const Stmt *SubStmt : E->children()) |
1696 | 25 | if (isCapturedBy(Var, SubStmt)) |
1697 | 3 | return true; |
1698 | | |
1699 | 114 | return false; |
1700 | 117 | } |
1701 | | |
1702 | | /// Determine whether the given initializer is trivial in the sense |
1703 | | /// that it requires no code to be generated. |
1704 | 240k | bool CodeGenFunction::isTrivialInitializer(const Expr *Init) { |
1705 | 240k | if (!Init) |
1706 | 38.1k | return true; |
1707 | | |
1708 | 201k | if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init)) |
1709 | 13.0k | if (CXXConstructorDecl *Constructor = Construct->getConstructor()) |
1710 | 13.0k | if (Constructor->isTrivial() && |
1711 | 13.0k | Constructor->isDefaultConstructor()4.29k && |
1712 | 13.0k | !Construct->requiresZeroInitialization()3.84k ) |
1713 | 3.84k | return true; |
1714 | | |
1715 | 198k | return false; |
1716 | 201k | } |
1717 | | |
1718 | | void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type, |
1719 | | const VarDecl &D, |
1720 | 692 | Address Loc) { |
1721 | 692 | auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit(); |
1722 | 692 | CharUnits Size = getContext().getTypeSizeInChars(type); |
1723 | 692 | bool isVolatile = type.isVolatileQualified(); |
1724 | 692 | if (!Size.isZero()) { |
1725 | 671 | switch (trivialAutoVarInit) { |
1726 | 0 | case LangOptions::TrivialAutoVarInitKind::Uninitialized: |
1727 | 0 | llvm_unreachable("Uninitialized handled by caller"); |
1728 | 274 | case LangOptions::TrivialAutoVarInitKind::Zero: |
1729 | 274 | if (CGM.stopAutoInit()) |
1730 | 4 | return; |
1731 | 270 | emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder); |
1732 | 270 | break; |
1733 | 397 | case LangOptions::TrivialAutoVarInitKind::Pattern: |
1734 | 397 | if (CGM.stopAutoInit()) |
1735 | 4 | return; |
1736 | 393 | emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder); |
1737 | 393 | break; |
1738 | 671 | } |
1739 | 663 | return; |
1740 | 671 | } |
1741 | | |
1742 | | // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to |
1743 | | // them, so emit a memcpy with the VLA size to initialize each element. |
1744 | | // Technically zero-sized or negative-sized VLAs are undefined, and UBSan |
1745 | | // will catch that code, but there exists code which generates zero-sized |
1746 | | // VLAs. Be nice and initialize whatever they requested. |
1747 | 21 | const auto *VlaType = getContext().getAsVariableArrayType(type); |
1748 | 21 | if (!VlaType) |
1749 | 7 | return; |
1750 | 14 | auto VlaSize = getVLASize(VlaType); |
1751 | 14 | auto SizeVal = VlaSize.NumElts; |
1752 | 14 | CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type); |
1753 | 14 | switch (trivialAutoVarInit) { |
1754 | 0 | case LangOptions::TrivialAutoVarInitKind::Uninitialized: |
1755 | 0 | llvm_unreachable("Uninitialized handled by caller"); |
1756 | |
|
1757 | 7 | case LangOptions::TrivialAutoVarInitKind::Zero: { |
1758 | 7 | if (CGM.stopAutoInit()) |
1759 | 2 | return; |
1760 | 5 | if (!EltSize.isOne()) |
1761 | 5 | SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize)); |
1762 | 5 | auto *I = Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), |
1763 | 5 | SizeVal, isVolatile); |
1764 | 5 | I->addAnnotationMetadata("auto-init"); |
1765 | 5 | break; |
1766 | 7 | } |
1767 | | |
1768 | 7 | case LangOptions::TrivialAutoVarInitKind::Pattern: { |
1769 | 7 | if (CGM.stopAutoInit()) |
1770 | 2 | return; |
1771 | 5 | llvm::Type *ElTy = Loc.getElementType(); |
1772 | 5 | llvm::Constant *Constant = constWithPadding( |
1773 | 5 | CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy)); |
1774 | 5 | CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type); |
1775 | 5 | llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop"); |
1776 | 5 | llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop"); |
1777 | 5 | llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont"); |
1778 | 5 | llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ( |
1779 | 5 | SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0), |
1780 | 5 | "vla.iszerosized"); |
1781 | 5 | Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB); |
1782 | 5 | EmitBlock(SetupBB); |
1783 | 5 | if (!EltSize.isOne()) |
1784 | 5 | SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize)); |
1785 | 5 | llvm::Value *BaseSizeInChars = |
1786 | 5 | llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity()); |
1787 | 5 | Address Begin = Builder.CreateElementBitCast(Loc, Int8Ty, "vla.begin"); |
1788 | 5 | llvm::Value *End = Builder.CreateInBoundsGEP( |
1789 | 5 | Begin.getElementType(), Begin.getPointer(), SizeVal, "vla.end"); |
1790 | 5 | llvm::BasicBlock *OriginBB = Builder.GetInsertBlock(); |
1791 | 5 | EmitBlock(LoopBB); |
1792 | 5 | llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur"); |
1793 | 5 | Cur->addIncoming(Begin.getPointer(), OriginBB); |
1794 | 5 | CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize); |
1795 | 5 | auto *I = |
1796 | 5 | Builder.CreateMemCpy(Address(Cur, Int8Ty, CurAlign), |
1797 | 5 | createUnnamedGlobalForMemcpyFrom( |
1798 | 5 | CGM, D, Builder, Constant, ConstantAlign), |
1799 | 5 | BaseSizeInChars, isVolatile); |
1800 | 5 | I->addAnnotationMetadata("auto-init"); |
1801 | 5 | llvm::Value *Next = |
1802 | 5 | Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next"); |
1803 | 5 | llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone"); |
1804 | 5 | Builder.CreateCondBr(Done, ContBB, LoopBB); |
1805 | 5 | Cur->addIncoming(Next, LoopBB); |
1806 | 5 | EmitBlock(ContBB); |
1807 | 5 | } break; |
1808 | 14 | } |
1809 | 14 | } |
1810 | | |
1811 | 239k | void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) { |
1812 | 239k | assert(emission.Variable && "emission was not valid!"); |
1813 | | |
1814 | | // If this was emitted as a global constant, we're done. |
1815 | 239k | if (emission.wasEmittedAsGlobal()) return6 ; |
1816 | | |
1817 | 239k | const VarDecl &D = *emission.Variable; |
1818 | 239k | auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation()); |
1819 | 239k | QualType type = D.getType(); |
1820 | | |
1821 | | // If this local has an initializer, emit it now. |
1822 | 239k | const Expr *Init = D.getInit(); |
1823 | | |
1824 | | // If we are at an unreachable point, we don't need to emit the initializer |
1825 | | // unless it contains a label. |
1826 | 239k | if (!HaveInsertPoint()) { |
1827 | 25 | if (!Init || !ContainsLabel(Init)17 ) return; |
1828 | 0 | EnsureInsertPoint(); |
1829 | 0 | } |
1830 | | |
1831 | | // Initialize the structure of a __block variable. |
1832 | 239k | if (emission.IsEscapingByRef) |
1833 | 215 | emitByrefStructureInit(emission); |
1834 | | |
1835 | | // Initialize the variable here if it doesn't have a initializer and it is a |
1836 | | // C struct that is non-trivial to initialize or an array containing such a |
1837 | | // struct. |
1838 | 239k | if (!Init && |
1839 | 239k | type.isNonTrivialToPrimitiveDefaultInitialize() == |
1840 | 38.1k | QualType::PDIK_Struct) { |
1841 | 38 | LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type); |
1842 | 38 | if (emission.IsEscapingByRef) |
1843 | 10 | drillIntoBlockVariable(*this, Dst, &D); |
1844 | 38 | defaultInitNonTrivialCStructVar(Dst); |
1845 | 38 | return; |
1846 | 38 | } |
1847 | | |
1848 | | // Check whether this is a byref variable that's potentially |
1849 | | // captured and moved by its own initializer. If so, we'll need to |
1850 | | // emit the initializer first, then copy into the variable. |
1851 | 239k | bool capturedByInit = |
1852 | 239k | Init && emission.IsEscapingByRef201k && isCapturedBy(D, Init)99 ; |
1853 | | |
1854 | 239k | bool locIsByrefHeader = !capturedByInit; |
1855 | 239k | const Address Loc = |
1856 | 239k | locIsByrefHeader ? emission.getObjectAddress(*this)239k : emission.Addr7 ; |
1857 | | |
1858 | | // Note: constexpr already initializes everything correctly. |
1859 | 239k | LangOptions::TrivialAutoVarInitKind trivialAutoVarInit = |
1860 | 239k | (D.isConstexpr() |
1861 | 239k | ? LangOptions::TrivialAutoVarInitKind::Uninitialized391 |
1862 | 239k | : (238k D.getAttr<UninitializedAttr>()238k |
1863 | 238k | ? LangOptions::TrivialAutoVarInitKind::Uninitialized6 |
1864 | 238k | : getContext().getLangOpts().getTrivialAutoVarInit()238k )); |
1865 | | |
1866 | 239k | auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) { |
1867 | 234k | if (trivialAutoVarInit == |
1868 | 234k | LangOptions::TrivialAutoVarInitKind::Uninitialized) |
1869 | 233k | return; |
1870 | | |
1871 | | // Only initialize a __block's storage: we always initialize the header. |
1872 | 692 | if (emission.IsEscapingByRef && !locIsByrefHeader4 ) |
1873 | 2 | Loc = emitBlockByrefAddress(Loc, &D, /*follow=*/false); |
1874 | | |
1875 | 692 | return emitZeroOrPatternForAutoVarInit(type, D, Loc); |
1876 | 234k | }; |
1877 | | |
1878 | 239k | if (isTrivialInitializer(Init)) |
1879 | 41.9k | return initializeWhatIsTechnicallyUninitialized(Loc); |
1880 | | |
1881 | 197k | llvm::Constant *constant = nullptr; |
1882 | 197k | if (emission.IsConstantAggregate || |
1883 | 197k | D.mightBeUsableInConstantExpressions(getContext())192k ) { |
1884 | 20.2k | assert(!capturedByInit && "constant init contains a capturing block?"); |
1885 | 0 | constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D); |
1886 | 20.2k | if (constant && !constant->isZeroValue()5.06k && |
1887 | 20.2k | (trivialAutoVarInit != |
1888 | 4.12k | LangOptions::TrivialAutoVarInitKind::Uninitialized)) { |
1889 | 143 | IsPattern isPattern = |
1890 | 143 | (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern) |
1891 | 143 | ? IsPattern::Yes85 |
1892 | 143 | : IsPattern::No58 ; |
1893 | | // C guarantees that brace-init with fewer initializers than members in |
1894 | | // the aggregate will initialize the rest of the aggregate as-if it were |
1895 | | // static initialization. In turn static initialization guarantees that |
1896 | | // padding is initialized to zero bits. We could instead pattern-init if D |
1897 | | // has any ImplicitValueInitExpr, but that seems to be unintuitive |
1898 | | // behavior. |
1899 | 143 | constant = constWithPadding(CGM, IsPattern::No, |
1900 | 143 | replaceUndef(CGM, isPattern, constant)); |
1901 | 143 | } |
1902 | 20.2k | } |
1903 | | |
1904 | 197k | if (!constant) { |
1905 | 192k | initializeWhatIsTechnicallyUninitialized(Loc); |
1906 | 192k | LValue lv = MakeAddrLValue(Loc, type); |
1907 | 192k | lv.setNonGC(true); |
1908 | 192k | return EmitExprAsInit(Init, &D, lv, capturedByInit); |
1909 | 192k | } |
1910 | | |
1911 | 5.06k | if (!emission.IsConstantAggregate) { |
1912 | | // For simple scalar/complex initialization, store the value directly. |
1913 | 605 | LValue lv = MakeAddrLValue(Loc, type); |
1914 | 605 | lv.setNonGC(true); |
1915 | 605 | return EmitStoreThroughLValue(RValue::get(constant), lv, true); |
1916 | 605 | } |
1917 | | |
1918 | 4.46k | emitStoresForConstant(CGM, D, Builder.CreateElementBitCast(Loc, CGM.Int8Ty), |
1919 | 4.46k | type.isVolatileQualified(), Builder, constant, |
1920 | 4.46k | /*IsAutoInit=*/false); |
1921 | 4.46k | } |
1922 | | |
1923 | | /// Emit an expression as an initializer for an object (variable, field, etc.) |
1924 | | /// at the given location. The expression is not necessarily the normal |
1925 | | /// initializer for the object, and the address is not necessarily |
1926 | | /// its normal location. |
1927 | | /// |
1928 | | /// \param init the initializing expression |
1929 | | /// \param D the object to act as if we're initializing |
1930 | | /// \param lvalue the lvalue to initialize |
1931 | | /// \param capturedByInit true if \p D is a __block variable |
1932 | | /// whose address is potentially changed by the initializer |
1933 | | void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D, |
1934 | 208k | LValue lvalue, bool capturedByInit) { |
1935 | 208k | QualType type = D->getType(); |
1936 | | |
1937 | 208k | if (type->isReferenceType()) { |
1938 | 5.03k | RValue rvalue = EmitReferenceBindingToExpr(init); |
1939 | 5.03k | if (capturedByInit) |
1940 | 0 | drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D)); |
1941 | 5.03k | EmitStoreThroughLValue(rvalue, lvalue, true); |
1942 | 5.03k | return; |
1943 | 5.03k | } |
1944 | 203k | switch (getEvaluationKind(type)) { |
1945 | 189k | case TEK_Scalar: |
1946 | 189k | EmitScalarInit(init, D, lvalue, capturedByInit); |
1947 | 189k | return; |
1948 | 164 | case TEK_Complex: { |
1949 | 164 | ComplexPairTy complex = EmitComplexExpr(init); |
1950 | 164 | if (capturedByInit) |
1951 | 0 | drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D)); |
1952 | 164 | EmitStoreOfComplex(complex, lvalue, /*init*/ true); |
1953 | 164 | return; |
1954 | 0 | } |
1955 | 14.0k | case TEK_Aggregate: |
1956 | 14.0k | if (type->isAtomicType()) { |
1957 | 4 | EmitAtomicInit(const_cast<Expr*>(init), lvalue); |
1958 | 14.0k | } else { |
1959 | 14.0k | AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap; |
1960 | 14.0k | if (isa<VarDecl>(D)) |
1961 | 14.0k | Overlap = AggValueSlot::DoesNotOverlap; |
1962 | 0 | else if (auto *FD = dyn_cast<FieldDecl>(D)) |
1963 | 0 | Overlap = getOverlapForFieldInit(FD); |
1964 | | // TODO: how can we delay here if D is captured by its initializer? |
1965 | 14.0k | EmitAggExpr(init, AggValueSlot::forLValue( |
1966 | 14.0k | lvalue, *this, AggValueSlot::IsDestructed, |
1967 | 14.0k | AggValueSlot::DoesNotNeedGCBarriers, |
1968 | 14.0k | AggValueSlot::IsNotAliased, Overlap)); |
1969 | 14.0k | } |
1970 | 14.0k | return; |
1971 | 203k | } |
1972 | 0 | llvm_unreachable("bad evaluation kind"); |
1973 | 0 | } |
1974 | | |
1975 | | /// Enter a destroy cleanup for the given local variable. |
1976 | | void CodeGenFunction::emitAutoVarTypeCleanup( |
1977 | | const CodeGenFunction::AutoVarEmission &emission, |
1978 | 9.82k | QualType::DestructionKind dtorKind) { |
1979 | 9.82k | assert(dtorKind != QualType::DK_none); |
1980 | | |
1981 | | // Note that for __block variables, we want to destroy the |
1982 | | // original stack object, not the possibly forwarded object. |
1983 | 0 | Address addr = emission.getObjectAddress(*this); |
1984 | | |
1985 | 9.82k | const VarDecl *var = emission.Variable; |
1986 | 9.82k | QualType type = var->getType(); |
1987 | | |
1988 | 9.82k | CleanupKind cleanupKind = NormalAndEHCleanup; |
1989 | 9.82k | CodeGenFunction::Destroyer *destroyer = nullptr; |
1990 | | |
1991 | 9.82k | switch (dtorKind) { |
1992 | 0 | case QualType::DK_none: |
1993 | 0 | llvm_unreachable("no cleanup for trivially-destructible variable"); |
1994 | |
|
1995 | 9.06k | case QualType::DK_cxx_destructor: |
1996 | | // If there's an NRVO flag on the emission, we need a different |
1997 | | // cleanup. |
1998 | 9.06k | if (emission.NRVOFlag) { |
1999 | 217 | assert(!type->isArrayType()); |
2000 | 0 | CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor(); |
2001 | 217 | EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, type, dtor, |
2002 | 217 | emission.NRVOFlag); |
2003 | 217 | return; |
2004 | 217 | } |
2005 | 8.85k | break; |
2006 | | |
2007 | 8.85k | case QualType::DK_objc_strong_lifetime: |
2008 | | // Suppress cleanups for pseudo-strong variables. |
2009 | 524 | if (var->isARCPseudoStrong()) return13 ; |
2010 | | |
2011 | | // Otherwise, consider whether to use an EH cleanup or not. |
2012 | 511 | cleanupKind = getARCCleanupKind(); |
2013 | | |
2014 | | // Use the imprecise destroyer by default. |
2015 | 511 | if (!var->hasAttr<ObjCPreciseLifetimeAttr>()) |
2016 | 506 | destroyer = CodeGenFunction::destroyARCStrongImprecise; |
2017 | 511 | break; |
2018 | | |
2019 | 172 | case QualType::DK_objc_weak_lifetime: |
2020 | 172 | break; |
2021 | | |
2022 | 65 | case QualType::DK_nontrivial_c_struct: |
2023 | 65 | destroyer = CodeGenFunction::destroyNonTrivialCStruct; |
2024 | 65 | if (emission.NRVOFlag) { |
2025 | 5 | assert(!type->isArrayType()); |
2026 | 0 | EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr, |
2027 | 5 | emission.NRVOFlag, type); |
2028 | 5 | return; |
2029 | 5 | } |
2030 | 60 | break; |
2031 | 9.82k | } |
2032 | | |
2033 | | // If we haven't chosen a more specific destroyer, use the default. |
2034 | 9.59k | if (!destroyer) destroyer = getDestroyer(dtorKind)9.02k ; |
2035 | | |
2036 | | // Use an EH cleanup in array destructors iff the destructor itself |
2037 | | // is being pushed as an EH cleanup. |
2038 | 9.59k | bool useEHCleanup = (cleanupKind & EHCleanup); |
2039 | 9.59k | EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer, |
2040 | 9.59k | useEHCleanup); |
2041 | 9.59k | } |
2042 | | |
2043 | 257k | void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) { |
2044 | 257k | assert(emission.Variable && "emission was not valid!"); |
2045 | | |
2046 | | // If this was emitted as a global constant, we're done. |
2047 | 257k | if (emission.wasEmittedAsGlobal()) return6 ; |
2048 | | |
2049 | | // If we don't have an insertion point, we're done. Sema prevents |
2050 | | // us from jumping into any of these scopes anyway. |
2051 | 257k | if (!HaveInsertPoint()) return25 ; |
2052 | | |
2053 | 257k | const VarDecl &D = *emission.Variable; |
2054 | | |
2055 | | // Check the type for a cleanup. |
2056 | 257k | if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext())) |
2057 | 9.82k | emitAutoVarTypeCleanup(emission, dtorKind); |
2058 | | |
2059 | | // In GC mode, honor objc_precise_lifetime. |
2060 | 257k | if (getLangOpts().getGC() != LangOptions::NonGC && |
2061 | 257k | D.hasAttr<ObjCPreciseLifetimeAttr>()126 ) { |
2062 | 1 | EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D); |
2063 | 1 | } |
2064 | | |
2065 | | // Handle the cleanup attribute. |
2066 | 257k | if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) { |
2067 | 8 | const FunctionDecl *FD = CA->getFunctionDecl(); |
2068 | | |
2069 | 8 | llvm::Constant *F = CGM.GetAddrOfFunction(FD); |
2070 | 8 | assert(F && "Could not find function!"); |
2071 | | |
2072 | 0 | const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD); |
2073 | 8 | EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D); |
2074 | 8 | } |
2075 | | |
2076 | | // If this is a block variable, call _Block_object_destroy |
2077 | | // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC |
2078 | | // mode. |
2079 | 257k | if (emission.IsEscapingByRef && |
2080 | 257k | CGM.getLangOpts().getGC() != LangOptions::GCOnly215 ) { |
2081 | 215 | BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF; |
2082 | 215 | if (emission.Variable->getType().isObjCGCWeak()) |
2083 | 7 | Flags |= BLOCK_FIELD_IS_WEAK; |
2084 | 215 | enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags, |
2085 | 215 | /*LoadBlockVarAddr*/ false, |
2086 | 215 | cxxDestructorCanThrow(emission.Variable->getType())); |
2087 | 215 | } |
2088 | 257k | } |
2089 | | |
2090 | | CodeGenFunction::Destroyer * |
2091 | 12.4k | CodeGenFunction::getDestroyer(QualType::DestructionKind kind) { |
2092 | 12.4k | switch (kind) { |
2093 | 0 | case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor"); |
2094 | 11.7k | case QualType::DK_cxx_destructor: |
2095 | 11.7k | return destroyCXXObject; |
2096 | 53 | case QualType::DK_objc_strong_lifetime: |
2097 | 53 | return destroyARCStrongPrecise; |
2098 | 508 | case QualType::DK_objc_weak_lifetime: |
2099 | 508 | return destroyARCWeak; |
2100 | 68 | case QualType::DK_nontrivial_c_struct: |
2101 | 68 | return destroyNonTrivialCStruct; |
2102 | 12.4k | } |
2103 | 0 | llvm_unreachable("Unknown DestructionKind"); |
2104 | 0 | } |
2105 | | |
2106 | | /// pushEHDestroy - Push the standard destructor for the given type as |
2107 | | /// an EH-only cleanup. |
2108 | | void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind, |
2109 | 856 | Address addr, QualType type) { |
2110 | 856 | assert(dtorKind && "cannot push destructor for trivial type"); |
2111 | 0 | assert(needsEHCleanup(dtorKind)); |
2112 | | |
2113 | 0 | pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true); |
2114 | 856 | } |
2115 | | |
2116 | | /// pushDestroy - Push the standard destructor for the given type as |
2117 | | /// at least a normal cleanup. |
2118 | | void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind, |
2119 | 352 | Address addr, QualType type) { |
2120 | 352 | assert(dtorKind && "cannot push destructor for trivial type"); |
2121 | | |
2122 | 0 | CleanupKind cleanupKind = getCleanupKind(dtorKind); |
2123 | 352 | pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind), |
2124 | 352 | cleanupKind & EHCleanup); |
2125 | 352 | } |
2126 | | |
2127 | | void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr, |
2128 | | QualType type, Destroyer *destroyer, |
2129 | 4.90k | bool useEHCleanupForArray) { |
2130 | 4.90k | pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type, |
2131 | 4.90k | destroyer, useEHCleanupForArray); |
2132 | 4.90k | } |
2133 | | |
2134 | 1.60k | void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) { |
2135 | 1.60k | EHStack.pushCleanup<CallStackRestore>(Kind, SPMem); |
2136 | 1.60k | } |
2137 | | |
2138 | | void CodeGenFunction::pushLifetimeExtendedDestroy(CleanupKind cleanupKind, |
2139 | | Address addr, QualType type, |
2140 | | Destroyer *destroyer, |
2141 | 549 | bool useEHCleanupForArray) { |
2142 | | // If we're not in a conditional branch, we don't need to bother generating a |
2143 | | // conditional cleanup. |
2144 | 549 | if (!isInConditionalBranch()) { |
2145 | | // Push an EH-only cleanup for the object now. |
2146 | | // FIXME: When popping normal cleanups, we need to keep this EH cleanup |
2147 | | // around in case a temporary's destructor throws an exception. |
2148 | 520 | if (cleanupKind & EHCleanup) |
2149 | 219 | EHStack.pushCleanup<DestroyObject>( |
2150 | 219 | static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type, |
2151 | 219 | destroyer, useEHCleanupForArray); |
2152 | | |
2153 | 520 | return pushCleanupAfterFullExprWithActiveFlag<DestroyObject>( |
2154 | 520 | cleanupKind, Address::invalid(), addr, type, destroyer, useEHCleanupForArray); |
2155 | 520 | } |
2156 | | |
2157 | | // Otherwise, we should only destroy the object if it's been initialized. |
2158 | | // Re-use the active flag and saved address across both the EH and end of |
2159 | | // scope cleanups. |
2160 | | |
2161 | 29 | using SavedType = typename DominatingValue<Address>::saved_type; |
2162 | 29 | using ConditionalCleanupType = |
2163 | 29 | EHScopeStack::ConditionalCleanup<DestroyObject, Address, QualType, |
2164 | 29 | Destroyer *, bool>; |
2165 | | |
2166 | 29 | Address ActiveFlag = createCleanupActiveFlag(); |
2167 | 29 | SavedType SavedAddr = saveValueInCond(addr); |
2168 | | |
2169 | 29 | if (cleanupKind & EHCleanup) { |
2170 | 15 | EHStack.pushCleanup<ConditionalCleanupType>( |
2171 | 15 | static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), SavedAddr, type, |
2172 | 15 | destroyer, useEHCleanupForArray); |
2173 | 15 | initFullExprCleanupWithFlag(ActiveFlag); |
2174 | 15 | } |
2175 | | |
2176 | 29 | pushCleanupAfterFullExprWithActiveFlag<ConditionalCleanupType>( |
2177 | 29 | cleanupKind, ActiveFlag, SavedAddr, type, destroyer, |
2178 | 29 | useEHCleanupForArray); |
2179 | 29 | } |
2180 | | |
2181 | | /// emitDestroy - Immediately perform the destruction of the given |
2182 | | /// object. |
2183 | | /// |
2184 | | /// \param addr - the address of the object; a type* |
2185 | | /// \param type - the type of the object; if an array type, all |
2186 | | /// objects are destroyed in reverse order |
2187 | | /// \param destroyer - the function to call to destroy individual |
2188 | | /// elements |
2189 | | /// \param useEHCleanupForArray - whether an EH cleanup should be |
2190 | | /// used when destroying array elements, in case one of the |
2191 | | /// destructions throws an exception |
2192 | | void CodeGenFunction::emitDestroy(Address addr, QualType type, |
2193 | | Destroyer *destroyer, |
2194 | 21.3k | bool useEHCleanupForArray) { |
2195 | 21.3k | const ArrayType *arrayType = getContext().getAsArrayType(type); |
2196 | 21.3k | if (!arrayType) |
2197 | 18.9k | return destroyer(*this, addr, type); |
2198 | | |
2199 | 2.39k | llvm::Value *length = emitArrayLength(arrayType, type, addr); |
2200 | | |
2201 | 2.39k | CharUnits elementAlign = |
2202 | 2.39k | addr.getAlignment() |
2203 | 2.39k | .alignmentOfArrayElement(getContext().getTypeSizeInChars(type)); |
2204 | | |
2205 | | // Normally we have to check whether the array is zero-length. |
2206 | 2.39k | bool checkZeroLength = true; |
2207 | | |
2208 | | // But if the array length is constant, we can suppress that. |
2209 | 2.39k | if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) { |
2210 | | // ...and if it's constant zero, we can just skip the entire thing. |
2211 | 2.36k | if (constLength->isZero()) return0 ; |
2212 | 2.36k | checkZeroLength = false; |
2213 | 2.36k | } |
2214 | | |
2215 | 2.39k | llvm::Value *begin = addr.getPointer(); |
2216 | 2.39k | llvm::Value *end = |
2217 | 2.39k | Builder.CreateInBoundsGEP(addr.getElementType(), begin, length); |
2218 | 2.39k | emitArrayDestroy(begin, end, type, elementAlign, destroyer, |
2219 | 2.39k | checkZeroLength, useEHCleanupForArray); |
2220 | 2.39k | } |
2221 | | |
2222 | | /// emitArrayDestroy - Destroys all the elements of the given array, |
2223 | | /// beginning from last to first. The array cannot be zero-length. |
2224 | | /// |
2225 | | /// \param begin - a type* denoting the first element of the array |
2226 | | /// \param end - a type* denoting one past the end of the array |
2227 | | /// \param elementType - the element type of the array |
2228 | | /// \param destroyer - the function to call to destroy elements |
2229 | | /// \param useEHCleanup - whether to push an EH cleanup to destroy |
2230 | | /// the remaining elements in case the destruction of a single |
2231 | | /// element throws |
2232 | | void CodeGenFunction::emitArrayDestroy(llvm::Value *begin, |
2233 | | llvm::Value *end, |
2234 | | QualType elementType, |
2235 | | CharUnits elementAlign, |
2236 | | Destroyer *destroyer, |
2237 | | bool checkZeroLength, |
2238 | 2.56k | bool useEHCleanup) { |
2239 | 2.56k | assert(!elementType->isArrayType()); |
2240 | | |
2241 | | // The basic structure here is a do-while loop, because we don't |
2242 | | // need to check for the zero-element case. |
2243 | 0 | llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body"); |
2244 | 2.56k | llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done"); |
2245 | | |
2246 | 2.56k | if (checkZeroLength) { |
2247 | 196 | llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end, |
2248 | 196 | "arraydestroy.isempty"); |
2249 | 196 | Builder.CreateCondBr(isEmpty, doneBB, bodyBB); |
2250 | 196 | } |
2251 | | |
2252 | | // Enter the loop body, making that address the current address. |
2253 | 2.56k | llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); |
2254 | 2.56k | EmitBlock(bodyBB); |
2255 | 2.56k | llvm::PHINode *elementPast = |
2256 | 2.56k | Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast"); |
2257 | 2.56k | elementPast->addIncoming(end, entryBB); |
2258 | | |
2259 | | // Shift the address back by one element. |
2260 | 2.56k | llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true); |
2261 | 2.56k | llvm::Type *llvmElementType = ConvertTypeForMem(elementType); |
2262 | 2.56k | llvm::Value *element = Builder.CreateInBoundsGEP( |
2263 | 2.56k | llvmElementType, elementPast, negativeOne, "arraydestroy.element"); |
2264 | | |
2265 | 2.56k | if (useEHCleanup) |
2266 | 1.86k | pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign, |
2267 | 1.86k | destroyer); |
2268 | | |
2269 | | // Perform the actual destruction there. |
2270 | 2.56k | destroyer(*this, Address(element, llvmElementType, elementAlign), |
2271 | 2.56k | elementType); |
2272 | | |
2273 | 2.56k | if (useEHCleanup) |
2274 | 1.86k | PopCleanupBlock(); |
2275 | | |
2276 | | // Check whether we've reached the end. |
2277 | 2.56k | llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done"); |
2278 | 2.56k | Builder.CreateCondBr(done, doneBB, bodyBB); |
2279 | 2.56k | elementPast->addIncoming(element, Builder.GetInsertBlock()); |
2280 | | |
2281 | | // Done. |
2282 | 2.56k | EmitBlock(doneBB); |
2283 | 2.56k | } |
2284 | | |
2285 | | /// Perform partial array destruction as if in an EH cleanup. Unlike |
2286 | | /// emitArrayDestroy, the element type here may still be an array type. |
2287 | | static void emitPartialArrayDestroy(CodeGenFunction &CGF, |
2288 | | llvm::Value *begin, llvm::Value *end, |
2289 | | QualType type, CharUnits elementAlign, |
2290 | 124 | CodeGenFunction::Destroyer *destroyer) { |
2291 | 124 | llvm::Type *elemTy = CGF.ConvertTypeForMem(type); |
2292 | | |
2293 | | // If the element type is itself an array, drill down. |
2294 | 124 | unsigned arrayDepth = 0; |
2295 | 143 | while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) { |
2296 | | // VLAs don't require a GEP index to walk into. |
2297 | 19 | if (!isa<VariableArrayType>(arrayType)) |
2298 | 19 | arrayDepth++; |
2299 | 19 | type = arrayType->getElementType(); |
2300 | 19 | } |
2301 | | |
2302 | 124 | if (arrayDepth) { |
2303 | 19 | llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0); |
2304 | | |
2305 | 19 | SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero); |
2306 | 19 | begin = CGF.Builder.CreateInBoundsGEP( |
2307 | 19 | elemTy, begin, gepIndices, "pad.arraybegin"); |
2308 | 19 | end = CGF.Builder.CreateInBoundsGEP( |
2309 | 19 | elemTy, end, gepIndices, "pad.arrayend"); |
2310 | 19 | } |
2311 | | |
2312 | | // Destroy the array. We don't ever need an EH cleanup because we |
2313 | | // assume that we're in an EH cleanup ourselves, so a throwing |
2314 | | // destructor causes an immediate terminate. |
2315 | 124 | CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer, |
2316 | 124 | /*checkZeroLength*/ true, /*useEHCleanup*/ false); |
2317 | 124 | } |
2318 | | |
2319 | | namespace { |
2320 | | /// RegularPartialArrayDestroy - a cleanup which performs a partial |
2321 | | /// array destroy where the end pointer is regularly determined and |
2322 | | /// does not need to be loaded from a local. |
2323 | | class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup { |
2324 | | llvm::Value *ArrayBegin; |
2325 | | llvm::Value *ArrayEnd; |
2326 | | QualType ElementType; |
2327 | | CodeGenFunction::Destroyer *Destroyer; |
2328 | | CharUnits ElementAlign; |
2329 | | public: |
2330 | | RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd, |
2331 | | QualType elementType, CharUnits elementAlign, |
2332 | | CodeGenFunction::Destroyer *destroyer) |
2333 | | : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd), |
2334 | | ElementType(elementType), Destroyer(destroyer), |
2335 | 1.90k | ElementAlign(elementAlign) {} |
2336 | | |
2337 | 46 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2338 | 46 | emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd, |
2339 | 46 | ElementType, ElementAlign, Destroyer); |
2340 | 46 | } |
2341 | | }; |
2342 | | |
2343 | | /// IrregularPartialArrayDestroy - a cleanup which performs a |
2344 | | /// partial array destroy where the end pointer is irregularly |
2345 | | /// determined and must be loaded from a local. |
2346 | | class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup { |
2347 | | llvm::Value *ArrayBegin; |
2348 | | Address ArrayEndPointer; |
2349 | | QualType ElementType; |
2350 | | CodeGenFunction::Destroyer *Destroyer; |
2351 | | CharUnits ElementAlign; |
2352 | | public: |
2353 | | IrregularPartialArrayDestroy(llvm::Value *arrayBegin, |
2354 | | Address arrayEndPointer, |
2355 | | QualType elementType, |
2356 | | CharUnits elementAlign, |
2357 | | CodeGenFunction::Destroyer *destroyer) |
2358 | | : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer), |
2359 | | ElementType(elementType), Destroyer(destroyer), |
2360 | 78 | ElementAlign(elementAlign) {} |
2361 | | |
2362 | 78 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2363 | 78 | llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer); |
2364 | 78 | emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd, |
2365 | 78 | ElementType, ElementAlign, Destroyer); |
2366 | 78 | } |
2367 | | }; |
2368 | | } // end anonymous namespace |
2369 | | |
2370 | | /// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy |
2371 | | /// already-constructed elements of the given array. The cleanup |
2372 | | /// may be popped with DeactivateCleanupBlock or PopCleanupBlock. |
2373 | | /// |
2374 | | /// \param elementType - the immediate element type of the array; |
2375 | | /// possibly still an array type |
2376 | | void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin, |
2377 | | Address arrayEndPointer, |
2378 | | QualType elementType, |
2379 | | CharUnits elementAlign, |
2380 | 78 | Destroyer *destroyer) { |
2381 | 78 | pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup, |
2382 | 78 | arrayBegin, arrayEndPointer, |
2383 | 78 | elementType, elementAlign, |
2384 | 78 | destroyer); |
2385 | 78 | } |
2386 | | |
2387 | | /// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy |
2388 | | /// already-constructed elements of the given array. The cleanup |
2389 | | /// may be popped with DeactivateCleanupBlock or PopCleanupBlock. |
2390 | | /// |
2391 | | /// \param elementType - the immediate element type of the array; |
2392 | | /// possibly still an array type |
2393 | | void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin, |
2394 | | llvm::Value *arrayEnd, |
2395 | | QualType elementType, |
2396 | | CharUnits elementAlign, |
2397 | 1.90k | Destroyer *destroyer) { |
2398 | 1.90k | pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup, |
2399 | 1.90k | arrayBegin, arrayEnd, |
2400 | 1.90k | elementType, elementAlign, |
2401 | 1.90k | destroyer); |
2402 | 1.90k | } |
2403 | | |
2404 | | /// Lazily declare the @llvm.lifetime.start intrinsic. |
2405 | 4.78k | llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() { |
2406 | 4.78k | if (LifetimeStartFn) |
2407 | 4.26k | return LifetimeStartFn; |
2408 | 521 | LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(), |
2409 | 521 | llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy); |
2410 | 521 | return LifetimeStartFn; |
2411 | 4.78k | } |
2412 | | |
2413 | | /// Lazily declare the @llvm.lifetime.end intrinsic. |
2414 | 4.93k | llvm::Function *CodeGenModule::getLLVMLifetimeEndFn() { |
2415 | 4.93k | if (LifetimeEndFn) |
2416 | 4.41k | return LifetimeEndFn; |
2417 | 518 | LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(), |
2418 | 518 | llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy); |
2419 | 518 | return LifetimeEndFn; |
2420 | 4.93k | } |
2421 | | |
2422 | | namespace { |
2423 | | /// A cleanup to perform a release of an object at the end of a |
2424 | | /// function. This is used to balance out the incoming +1 of a |
2425 | | /// ns_consumed argument when we can't reasonably do that just by |
2426 | | /// not doing the initial retain for a __block argument. |
2427 | | struct ConsumeARCParameter final : EHScopeStack::Cleanup { |
2428 | | ConsumeARCParameter(llvm::Value *param, |
2429 | | ARCPreciseLifetime_t precise) |
2430 | 0 | : Param(param), Precise(precise) {} |
2431 | | |
2432 | | llvm::Value *Param; |
2433 | | ARCPreciseLifetime_t Precise; |
2434 | | |
2435 | 0 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
2436 | 0 | CGF.EmitARCRelease(Param, Precise); |
2437 | 0 | } |
2438 | | }; |
2439 | | } // end anonymous namespace |
2440 | | |
2441 | | /// Emit an alloca (or GlobalValue depending on target) |
2442 | | /// for the specified parameter and set up LocalDeclMap. |
2443 | | void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg, |
2444 | 529k | unsigned ArgNo) { |
2445 | 529k | bool NoDebugInfo = false; |
2446 | | // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl? |
2447 | 529k | assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) && |
2448 | 529k | "Invalid argument to EmitParmDecl"); |
2449 | | |
2450 | 0 | Arg.getAnyValue()->setName(D.getName()); |
2451 | | |
2452 | 529k | QualType Ty = D.getType(); |
2453 | | |
2454 | | // Use better IR generation for certain implicit parameters. |
2455 | 529k | if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) { |
2456 | | // The only implicit argument a block has is its literal. |
2457 | | // This may be passed as an inalloca'ed value on Windows x86. |
2458 | 193k | if (BlockInfo) { |
2459 | 1.17k | llvm::Value *V = Arg.isIndirect() |
2460 | 1.17k | ? Builder.CreateLoad(Arg.getIndirectAddress())1 |
2461 | 1.17k | : Arg.getDirectValue()1.17k ; |
2462 | 1.17k | setBlockContextParameter(IPD, ArgNo, V); |
2463 | 1.17k | return; |
2464 | 1.17k | } |
2465 | | // Suppressing debug info for ThreadPrivateVar parameters, else it hides |
2466 | | // debug info of TLS variables. |
2467 | 192k | NoDebugInfo = |
2468 | 192k | (IPD->getParameterKind() == ImplicitParamDecl::ThreadPrivateVar); |
2469 | 192k | } |
2470 | | |
2471 | 528k | Address DeclPtr = Address::invalid(); |
2472 | 528k | Address AllocaPtr = Address::invalid(); |
2473 | 528k | bool DoStore = false; |
2474 | 528k | bool IsScalar = hasScalarEvaluationKind(Ty); |
2475 | | // If we already have a pointer to the argument, reuse the input pointer. |
2476 | 528k | if (Arg.isIndirect()) { |
2477 | | // If we have a prettier pointer type at this point, bitcast to that. |
2478 | 14.7k | DeclPtr = Arg.getIndirectAddress(); |
2479 | 14.7k | DeclPtr = Builder.CreateElementBitCast(DeclPtr, ConvertTypeForMem(Ty), |
2480 | 14.7k | D.getName()); |
2481 | | // Indirect argument is in alloca address space, which may be different |
2482 | | // from the default address space. |
2483 | 14.7k | auto AllocaAS = CGM.getASTAllocaAddressSpace(); |
2484 | 14.7k | auto *V = DeclPtr.getPointer(); |
2485 | 14.7k | AllocaPtr = DeclPtr; |
2486 | 14.7k | auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private161 : AllocaAS14.6k ; |
2487 | 14.7k | auto DestLangAS = |
2488 | 14.7k | getLangOpts().OpenCL ? LangAS::opencl_private161 : LangAS::Default14.6k ; |
2489 | 14.7k | if (SrcLangAS != DestLangAS) { |
2490 | 88 | assert(getContext().getTargetAddressSpace(SrcLangAS) == |
2491 | 88 | CGM.getDataLayout().getAllocaAddrSpace()); |
2492 | 0 | auto DestAS = getContext().getTargetAddressSpace(DestLangAS); |
2493 | 88 | auto *T = DeclPtr.getElementType()->getPointerTo(DestAS); |
2494 | 88 | DeclPtr = DeclPtr.withPointer(getTargetHooks().performAddrSpaceCast( |
2495 | 88 | *this, V, SrcLangAS, DestLangAS, T, true)); |
2496 | 88 | } |
2497 | | |
2498 | | // Push a destructor cleanup for this parameter if the ABI requires it. |
2499 | | // Don't push a cleanup in a thunk for a method that will also emit a |
2500 | | // cleanup. |
2501 | 14.7k | if (Ty->isRecordType() && !CurFuncIsThunk13.8k && |
2502 | 14.7k | Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()13.8k ) { |
2503 | 419 | if (QualType::DestructionKind DtorKind = |
2504 | 419 | D.needsDestruction(getContext())) { |
2505 | 125 | assert((DtorKind == QualType::DK_cxx_destructor || |
2506 | 125 | DtorKind == QualType::DK_nontrivial_c_struct) && |
2507 | 125 | "unexpected destructor type"); |
2508 | 0 | pushDestroy(DtorKind, DeclPtr, Ty); |
2509 | 125 | CalleeDestructedParamCleanups[cast<ParmVarDecl>(&D)] = |
2510 | 125 | EHStack.stable_begin(); |
2511 | 125 | } |
2512 | 419 | } |
2513 | 513k | } else { |
2514 | | // Check if the parameter address is controlled by OpenMP runtime. |
2515 | 513k | Address OpenMPLocalAddr = |
2516 | 513k | getLangOpts().OpenMP |
2517 | 513k | ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)116k |
2518 | 513k | : Address::invalid()397k ; |
2519 | 513k | if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()116k ) { |
2520 | 19 | DeclPtr = OpenMPLocalAddr; |
2521 | 19 | AllocaPtr = DeclPtr; |
2522 | 513k | } else { |
2523 | | // Otherwise, create a temporary to hold the value. |
2524 | 513k | DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D), |
2525 | 513k | D.getName() + ".addr", &AllocaPtr); |
2526 | 513k | } |
2527 | 513k | DoStore = true; |
2528 | 513k | } |
2529 | | |
2530 | 528k | llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue()513k : nullptr14.7k ); |
2531 | | |
2532 | 528k | LValue lv = MakeAddrLValue(DeclPtr, Ty); |
2533 | 528k | if (IsScalar) { |
2534 | 513k | Qualifiers qs = Ty.getQualifiers(); |
2535 | 513k | if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) { |
2536 | | // We honor __attribute__((ns_consumed)) for types with lifetime. |
2537 | | // For __strong, it's handled by just skipping the initial retain; |
2538 | | // otherwise we have to balance out the initial +1 with an extra |
2539 | | // cleanup to do the release at the end of the function. |
2540 | 590 | bool isConsumed = D.hasAttr<NSConsumedAttr>(); |
2541 | | |
2542 | | // If a parameter is pseudo-strong then we can omit the implicit retain. |
2543 | 590 | if (D.isARCPseudoStrong()) { |
2544 | 270 | assert(lt == Qualifiers::OCL_Strong && |
2545 | 270 | "pseudo-strong variable isn't strong?"); |
2546 | 0 | assert(qs.hasConst() && "pseudo-strong variable should be const!"); |
2547 | 0 | lt = Qualifiers::OCL_ExplicitNone; |
2548 | 270 | } |
2549 | | |
2550 | | // Load objects passed indirectly. |
2551 | 590 | if (Arg.isIndirect() && !ArgVal8 ) |
2552 | 8 | ArgVal = Builder.CreateLoad(DeclPtr); |
2553 | | |
2554 | 590 | if (lt == Qualifiers::OCL_Strong) { |
2555 | 307 | if (!isConsumed) { |
2556 | 277 | if (CGM.getCodeGenOpts().OptimizationLevel == 0) { |
2557 | | // use objc_storeStrong(&dest, value) for retaining the |
2558 | | // object. But first, store a null into 'dest' because |
2559 | | // objc_storeStrong attempts to release its old value. |
2560 | 203 | llvm::Value *Null = CGM.EmitNullConstant(D.getType()); |
2561 | 203 | EmitStoreOfScalar(Null, lv, /* isInitialization */ true); |
2562 | 203 | EmitARCStoreStrongCall(lv.getAddress(*this), ArgVal, true); |
2563 | 203 | DoStore = false; |
2564 | 203 | } |
2565 | 74 | else |
2566 | | // Don't use objc_retainBlock for block pointers, because we |
2567 | | // don't want to Block_copy something just because we got it |
2568 | | // as a parameter. |
2569 | 74 | ArgVal = EmitARCRetainNonBlock(ArgVal); |
2570 | 277 | } |
2571 | 307 | } else { |
2572 | | // Push the cleanup for a consumed parameter. |
2573 | 283 | if (isConsumed) { |
2574 | 0 | ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>() |
2575 | 0 | ? ARCPreciseLifetime : ARCImpreciseLifetime); |
2576 | 0 | EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal, |
2577 | 0 | precise); |
2578 | 0 | } |
2579 | | |
2580 | 283 | if (lt == Qualifiers::OCL_Weak) { |
2581 | 7 | EmitARCInitWeak(DeclPtr, ArgVal); |
2582 | 7 | DoStore = false; // The weak init is a store, no need to do two. |
2583 | 7 | } |
2584 | 283 | } |
2585 | | |
2586 | | // Enter the cleanup scope. |
2587 | 590 | EmitAutoVarWithLifetime(*this, D, DeclPtr, lt); |
2588 | 590 | } |
2589 | 513k | } |
2590 | | |
2591 | | // Store the initial value into the alloca. |
2592 | 528k | if (DoStore) |
2593 | 513k | EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true); |
2594 | | |
2595 | 528k | setAddrOfLocalVar(&D, DeclPtr); |
2596 | | |
2597 | | // Emit debug info for param declarations in non-thunk functions. |
2598 | 528k | if (CGDebugInfo *DI = getDebugInfo()) { |
2599 | 168k | if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk167k && |
2600 | 168k | !NoDebugInfo167k ) { |
2601 | 167k | llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable( |
2602 | 167k | &D, AllocaPtr.getPointer(), ArgNo, Builder); |
2603 | 167k | if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(&D)) |
2604 | 99.7k | DI->getParamDbgMappings().insert({Var, DILocalVar}); |
2605 | 167k | } |
2606 | 168k | } |
2607 | | |
2608 | 528k | if (D.hasAttr<AnnotateAttr>()) |
2609 | 3 | EmitVarAnnotations(&D, DeclPtr.getPointer()); |
2610 | | |
2611 | | // We can only check return value nullability if all arguments to the |
2612 | | // function satisfy their nullability preconditions. This makes it necessary |
2613 | | // to emit null checks for args in the function body itself. |
2614 | 528k | if (requiresReturnValueNullabilityCheck()) { |
2615 | 28 | auto Nullability = Ty->getNullability(getContext()); |
2616 | 28 | if (Nullability && *Nullability == NullabilityKind::NonNull10 ) { |
2617 | 8 | SanitizerScope SanScope(this); |
2618 | 8 | RetValNullabilityPrecondition = |
2619 | 8 | Builder.CreateAnd(RetValNullabilityPrecondition, |
2620 | 8 | Builder.CreateIsNotNull(Arg.getAnyValue())); |
2621 | 8 | } |
2622 | 28 | } |
2623 | 528k | } |
2624 | | |
2625 | | void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D, |
2626 | 198 | CodeGenFunction *CGF) { |
2627 | 198 | if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()84 )) |
2628 | 84 | return; |
2629 | 114 | getOpenMPRuntime().emitUserDefinedReduction(CGF, D); |
2630 | 114 | } |
2631 | | |
2632 | | void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D, |
2633 | 80 | CodeGenFunction *CGF) { |
2634 | 80 | if (!LangOpts.OpenMP || LangOpts.OpenMPSimd || |
2635 | 80 | (42 !LangOpts.EmitAllDecls42 && !D->isUsed()18 )) |
2636 | 56 | return; |
2637 | 24 | getOpenMPRuntime().emitUserDefinedMapper(D, CGF); |
2638 | 24 | } |
2639 | | |
2640 | 38 | void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) { |
2641 | 38 | getOpenMPRuntime().processRequiresDirective(D); |
2642 | 38 | } |
2643 | | |
2644 | 92 | void CodeGenModule::EmitOMPAllocateDecl(const OMPAllocateDecl *D) { |
2645 | 103 | for (const Expr *E : D->varlists()) { |
2646 | 103 | const auto *DE = cast<DeclRefExpr>(E); |
2647 | 103 | const auto *VD = cast<VarDecl>(DE->getDecl()); |
2648 | | |
2649 | | // Skip all but globals. |
2650 | 103 | if (!VD->hasGlobalStorage()) |
2651 | 0 | continue; |
2652 | | |
2653 | | // Check if the global has been materialized yet or not. If not, we are done |
2654 | | // as any later generation will utilize the OMPAllocateDeclAttr. However, if |
2655 | | // we already emitted the global we might have done so before the |
2656 | | // OMPAllocateDeclAttr was attached, leading to the wrong address space |
2657 | | // (potentially). While not pretty, common practise is to remove the old IR |
2658 | | // global and generate a new one, so we do that here too. Uses are replaced |
2659 | | // properly. |
2660 | 103 | StringRef MangledName = getMangledName(VD); |
2661 | 103 | llvm::GlobalValue *Entry = GetGlobalValue(MangledName); |
2662 | 103 | if (!Entry) |
2663 | 75 | continue; |
2664 | | |
2665 | | // We can also keep the existing global if the address space is what we |
2666 | | // expect it to be, if not, it is replaced. |
2667 | 28 | QualType ASTTy = VD->getType(); |
2668 | 28 | clang::LangAS GVAS = GetGlobalVarAddressSpace(VD); |
2669 | 28 | auto TargetAS = getContext().getTargetAddressSpace(GVAS); |
2670 | 28 | if (Entry->getType()->getAddressSpace() == TargetAS) |
2671 | 28 | continue; |
2672 | | |
2673 | | // Make a new global with the correct type / address space. |
2674 | 0 | llvm::Type *Ty = getTypes().ConvertTypeForMem(ASTTy); |
2675 | 0 | llvm::PointerType *PTy = llvm::PointerType::get(Ty, TargetAS); |
2676 | | |
2677 | | // Replace all uses of the old global with a cast. Since we mutate the type |
2678 | | // in place we neeed an intermediate that takes the spot of the old entry |
2679 | | // until we can create the cast. |
2680 | 0 | llvm::GlobalVariable *DummyGV = new llvm::GlobalVariable( |
2681 | 0 | getModule(), Entry->getValueType(), false, |
2682 | 0 | llvm::GlobalValue::CommonLinkage, nullptr, "dummy", nullptr, |
2683 | 0 | llvm::GlobalVariable::NotThreadLocal, Entry->getAddressSpace()); |
2684 | 0 | Entry->replaceAllUsesWith(DummyGV); |
2685 | |
|
2686 | 0 | Entry->mutateType(PTy); |
2687 | 0 | llvm::Constant *NewPtrForOldDecl = |
2688 | 0 | llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast( |
2689 | 0 | Entry, DummyGV->getType()); |
2690 | | |
2691 | | // Now we have a casted version of the changed global, the dummy can be |
2692 | | // replaced and deleted. |
2693 | 0 | DummyGV->replaceAllUsesWith(NewPtrForOldDecl); |
2694 | 0 | DummyGV->eraseFromParent(); |
2695 | 0 | } |
2696 | 92 | } |
2697 | | |
2698 | | llvm::Optional<CharUnits> |
2699 | 24.1k | CodeGenModule::getOMPAllocateAlignment(const VarDecl *VD) { |
2700 | 24.1k | if (const auto *AA = VD->getAttr<OMPAllocateDeclAttr>()) { |
2701 | 199 | if (Expr *Alignment = AA->getAlignment()) { |
2702 | 70 | unsigned UserAlign = |
2703 | 70 | Alignment->EvaluateKnownConstInt(getContext()).getExtValue(); |
2704 | 70 | CharUnits NaturalAlign = |
2705 | 70 | getNaturalTypeAlignment(VD->getType().getNonReferenceType()); |
2706 | | |
2707 | | // OpenMP5.1 pg 185 lines 7-10 |
2708 | | // Each item in the align modifier list must be aligned to the maximum |
2709 | | // of the specified alignment and the type's natural alignment. |
2710 | 70 | return CharUnits::fromQuantity( |
2711 | 70 | std::max<unsigned>(UserAlign, NaturalAlign.getQuantity())); |
2712 | 70 | } |
2713 | 199 | } |
2714 | 24.0k | return llvm::None; |
2715 | 24.1k | } |