/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This file contains code dealing with the IR generation for cleanups |
10 | | // and related information. |
11 | | // |
12 | | // A "cleanup" is a piece of code which needs to be executed whenever |
13 | | // control transfers out of a particular scope. This can be |
14 | | // conditionalized to occur only on exceptional control flow, only on |
15 | | // normal control flow, or both. |
16 | | // |
17 | | //===----------------------------------------------------------------------===// |
18 | | |
19 | | #include "CGCleanup.h" |
20 | | #include "CodeGenFunction.h" |
21 | | #include "llvm/Support/SaveAndRestore.h" |
22 | | |
23 | | using namespace clang; |
24 | | using namespace CodeGen; |
25 | | |
26 | 0 | bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) { |
27 | 0 | if (rv.isScalar()) |
28 | 0 | return DominatingLLVMValue::needsSaving(rv.getScalarVal()); |
29 | 0 | if (rv.isAggregate()) |
30 | 0 | return DominatingLLVMValue::needsSaving(rv.getAggregatePointer()); |
31 | 0 | return true; |
32 | 0 | } |
33 | | |
34 | | DominatingValue<RValue>::saved_type |
35 | 16 | DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) { |
36 | 16 | if (rv.isScalar()) { |
37 | 16 | llvm::Value *V = rv.getScalarVal(); |
38 | | |
39 | | // These automatically dominate and don't need to be saved. |
40 | 16 | if (!DominatingLLVMValue::needsSaving(V)) |
41 | 10 | return saved_type(V, nullptr, ScalarLiteral); |
42 | | |
43 | | // Everything else needs an alloca. |
44 | 6 | Address addr = |
45 | 6 | CGF.CreateDefaultAlignTempAlloca(V->getType(), "saved-rvalue"); |
46 | 6 | CGF.Builder.CreateStore(V, addr); |
47 | 6 | return saved_type(addr.getPointer(), nullptr, ScalarAddress); |
48 | 16 | } |
49 | | |
50 | 0 | if (rv.isComplex()) { |
51 | 0 | CodeGenFunction::ComplexPairTy V = rv.getComplexVal(); |
52 | 0 | llvm::Type *ComplexTy = |
53 | 0 | llvm::StructType::get(V.first->getType(), V.second->getType()); |
54 | 0 | Address addr = CGF.CreateDefaultAlignTempAlloca(ComplexTy, "saved-complex"); |
55 | 0 | CGF.Builder.CreateStore(V.first, CGF.Builder.CreateStructGEP(addr, 0)); |
56 | 0 | CGF.Builder.CreateStore(V.second, CGF.Builder.CreateStructGEP(addr, 1)); |
57 | 0 | return saved_type(addr.getPointer(), nullptr, ComplexAddress); |
58 | 0 | } |
59 | | |
60 | 0 | assert(rv.isAggregate()); |
61 | 0 | Address V = rv.getAggregateAddress(); // TODO: volatile? |
62 | 0 | if (!DominatingLLVMValue::needsSaving(V.getPointer())) |
63 | 0 | return saved_type(V.getPointer(), V.getElementType(), AggregateLiteral, |
64 | 0 | V.getAlignment().getQuantity()); |
65 | | |
66 | 0 | Address addr = |
67 | 0 | CGF.CreateTempAlloca(V.getType(), CGF.getPointerAlign(), "saved-rvalue"); |
68 | 0 | CGF.Builder.CreateStore(V.getPointer(), addr); |
69 | 0 | return saved_type(addr.getPointer(), V.getElementType(), AggregateAddress, |
70 | 0 | V.getAlignment().getQuantity()); |
71 | 0 | } |
72 | | |
73 | | /// Given a saved r-value produced by SaveRValue, perform the code |
74 | | /// necessary to restore it to usability at the current insertion |
75 | | /// point. |
76 | 10 | RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) { |
77 | 10 | auto getSavingAddress = [&](llvm::Value *value) { |
78 | 6 | auto *AI = cast<llvm::AllocaInst>(value); |
79 | 6 | return Address(value, AI->getAllocatedType(), |
80 | 6 | CharUnits::fromQuantity(AI->getAlign().value())); |
81 | 6 | }; |
82 | 10 | switch (K) { |
83 | 4 | case ScalarLiteral: |
84 | 4 | return RValue::get(Value); |
85 | 6 | case ScalarAddress: |
86 | 6 | return RValue::get(CGF.Builder.CreateLoad(getSavingAddress(Value))); |
87 | 0 | case AggregateLiteral: |
88 | 0 | return RValue::getAggregate( |
89 | 0 | Address(Value, ElementType, CharUnits::fromQuantity(Align))); |
90 | 0 | case AggregateAddress: { |
91 | 0 | auto addr = CGF.Builder.CreateLoad(getSavingAddress(Value)); |
92 | 0 | return RValue::getAggregate( |
93 | 0 | Address(addr, ElementType, CharUnits::fromQuantity(Align))); |
94 | 0 | } |
95 | 0 | case ComplexAddress: { |
96 | 0 | Address address = getSavingAddress(Value); |
97 | 0 | llvm::Value *real = |
98 | 0 | CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(address, 0)); |
99 | 0 | llvm::Value *imag = |
100 | 0 | CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(address, 1)); |
101 | 0 | return RValue::getComplex(real, imag); |
102 | 0 | } |
103 | 10 | } |
104 | | |
105 | 0 | llvm_unreachable("bad saved r-value kind"); |
106 | 0 | } |
107 | | |
108 | | /// Push an entry of the given size onto this protected-scope stack. |
109 | 167k | char *EHScopeStack::allocate(size_t Size) { |
110 | 167k | Size = llvm::alignTo(Size, ScopeStackAlignment); |
111 | 167k | if (!StartOfBuffer) { |
112 | 94.3k | unsigned Capacity = 1024; |
113 | 94.3k | while (Capacity < Size) Capacity *= 20 ; |
114 | 94.3k | StartOfBuffer = new char[Capacity]; |
115 | 94.3k | StartOfData = EndOfBuffer = StartOfBuffer + Capacity; |
116 | 94.3k | } else if (72.9k static_cast<size_t>(StartOfData - StartOfBuffer) < Size72.9k ) { |
117 | 157 | unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer; |
118 | 157 | unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer); |
119 | | |
120 | 157 | unsigned NewCapacity = CurrentCapacity; |
121 | 157 | do { |
122 | 157 | NewCapacity *= 2; |
123 | 157 | } while (NewCapacity < UsedCapacity + Size); |
124 | | |
125 | 157 | char *NewStartOfBuffer = new char[NewCapacity]; |
126 | 157 | char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity; |
127 | 157 | char *NewStartOfData = NewEndOfBuffer - UsedCapacity; |
128 | 157 | memcpy(NewStartOfData, StartOfData, UsedCapacity); |
129 | 157 | delete [] StartOfBuffer; |
130 | 157 | StartOfBuffer = NewStartOfBuffer; |
131 | 157 | EndOfBuffer = NewEndOfBuffer; |
132 | 157 | StartOfData = NewStartOfData; |
133 | 157 | } |
134 | | |
135 | 167k | assert(StartOfBuffer + Size <= StartOfData); |
136 | 0 | StartOfData -= Size; |
137 | 167k | return StartOfData; |
138 | 167k | } |
139 | | |
140 | 167k | void EHScopeStack::deallocate(size_t Size) { |
141 | 167k | StartOfData += llvm::alignTo(Size, ScopeStackAlignment); |
142 | 167k | } |
143 | | |
144 | | bool EHScopeStack::containsOnlyLifetimeMarkers( |
145 | 7.01k | EHScopeStack::stable_iterator Old) const { |
146 | 9.22k | for (EHScopeStack::iterator it = begin(); stabilize(it) != Old; it++2.21k ) { |
147 | 7.80k | EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*it); |
148 | 7.80k | if (!cleanup || !cleanup->isLifetimeMarker()) |
149 | 5.59k | return false; |
150 | 7.80k | } |
151 | | |
152 | 1.42k | return true; |
153 | 7.01k | } |
154 | | |
155 | 296k | bool EHScopeStack::requiresLandingPad() const { |
156 | 300k | for (stable_iterator si = getInnermostEHScope(); si != stable_end(); ) { |
157 | | // Skip lifetime markers. |
158 | 105k | if (auto *cleanup = dyn_cast<EHCleanupScope>(&*find(si))) |
159 | 63.3k | if (cleanup->isLifetimeMarker()) { |
160 | 3.28k | si = cleanup->getEnclosingEHScope(); |
161 | 3.28k | continue; |
162 | 3.28k | } |
163 | 102k | return true; |
164 | 105k | } |
165 | | |
166 | 194k | return false; |
167 | 296k | } |
168 | | |
169 | | EHScopeStack::stable_iterator |
170 | 176k | EHScopeStack::getInnermostActiveNormalCleanup() const { |
171 | 176k | for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end(); |
172 | 176k | si != se; ) { |
173 | 8.84k | EHCleanupScope &cleanup = cast<EHCleanupScope>(*find(si)); |
174 | 8.84k | if (cleanup.isActive()) return si; |
175 | 0 | si = cleanup.getEnclosingNormalCleanup(); |
176 | 0 | } |
177 | 167k | return stable_end(); |
178 | 176k | } |
179 | | |
180 | | |
181 | 48.6k | void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) { |
182 | 48.6k | char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size)); |
183 | 48.6k | bool IsNormalCleanup = Kind & NormalCleanup; |
184 | 48.6k | bool IsEHCleanup = Kind & EHCleanup; |
185 | 48.6k | bool IsLifetimeMarker = Kind & LifetimeMarker; |
186 | | |
187 | | // Per C++ [except.terminate], it is implementation-defined whether none, |
188 | | // some, or all cleanups are called before std::terminate. Thus, when |
189 | | // terminate is the current EH scope, we may skip adding any EH cleanup |
190 | | // scopes. |
191 | 48.6k | if (InnermostEHScope != stable_end() && |
192 | 48.6k | find(InnermostEHScope)->getKind() == EHScope::Terminate33.1k ) |
193 | 22.6k | IsEHCleanup = false; |
194 | | |
195 | 48.6k | EHCleanupScope *Scope = |
196 | 48.6k | new (Buffer) EHCleanupScope(IsNormalCleanup, |
197 | 48.6k | IsEHCleanup, |
198 | 48.6k | Size, |
199 | 48.6k | BranchFixups.size(), |
200 | 48.6k | InnermostNormalCleanup, |
201 | 48.6k | InnermostEHScope); |
202 | 48.6k | if (IsNormalCleanup) |
203 | 42.4k | InnermostNormalCleanup = stable_begin(); |
204 | 48.6k | if (IsEHCleanup) |
205 | 22.1k | InnermostEHScope = stable_begin(); |
206 | 48.6k | if (IsLifetimeMarker) |
207 | 4.77k | Scope->setLifetimeMarker(); |
208 | | |
209 | | // With Windows -EHa, Invoke llvm.seh.scope.begin() for EHCleanup |
210 | 48.6k | if (CGF->getLangOpts().EHAsynch && IsEHCleanup20 && !IsLifetimeMarker19 && |
211 | 48.6k | CGF->getTarget().getCXXABI().isMicrosoft()19 ) |
212 | 19 | CGF->EmitSehCppScopeBegin(); |
213 | | |
214 | 48.6k | return Scope->getCleanupBuffer(); |
215 | 48.6k | } |
216 | | |
217 | 48.6k | void EHScopeStack::popCleanup() { |
218 | 48.6k | assert(!empty() && "popping exception stack when not empty"); |
219 | | |
220 | 0 | assert(isa<EHCleanupScope>(*begin())); |
221 | 0 | EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin()); |
222 | 48.6k | InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup(); |
223 | 48.6k | InnermostEHScope = Cleanup.getEnclosingEHScope(); |
224 | 48.6k | deallocate(Cleanup.getAllocatedSize()); |
225 | | |
226 | | // Destroy the cleanup. |
227 | 48.6k | Cleanup.Destroy(); |
228 | | |
229 | | // Check whether we can shrink the branch-fixups stack. |
230 | 48.6k | if (!BranchFixups.empty()) { |
231 | | // If we no longer have any normal cleanups, all the fixups are |
232 | | // complete. |
233 | 11 | if (!hasNormalCleanups()) |
234 | 0 | BranchFixups.clear(); |
235 | | |
236 | | // Otherwise we can still trim out unnecessary nulls. |
237 | 11 | else |
238 | 11 | popNullFixups(); |
239 | 11 | } |
240 | 48.6k | } |
241 | | |
242 | 97 | EHFilterScope *EHScopeStack::pushFilter(unsigned numFilters) { |
243 | 97 | assert(getInnermostEHScope() == stable_end()); |
244 | 0 | char *buffer = allocate(EHFilterScope::getSizeForNumFilters(numFilters)); |
245 | 97 | EHFilterScope *filter = new (buffer) EHFilterScope(numFilters); |
246 | 97 | InnermostEHScope = stable_begin(); |
247 | 97 | return filter; |
248 | 97 | } |
249 | | |
250 | 97 | void EHScopeStack::popFilter() { |
251 | 97 | assert(!empty() && "popping exception stack when not empty"); |
252 | | |
253 | 0 | EHFilterScope &filter = cast<EHFilterScope>(*begin()); |
254 | 97 | deallocate(EHFilterScope::getSizeForNumFilters(filter.getNumFilters())); |
255 | | |
256 | 97 | InnermostEHScope = filter.getEnclosingEHScope(); |
257 | 97 | } |
258 | | |
259 | 571 | EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) { |
260 | 571 | char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers)); |
261 | 571 | EHCatchScope *scope = |
262 | 571 | new (buffer) EHCatchScope(numHandlers, InnermostEHScope); |
263 | 571 | InnermostEHScope = stable_begin(); |
264 | 571 | return scope; |
265 | 571 | } |
266 | | |
267 | 118k | void EHScopeStack::pushTerminate() { |
268 | 118k | char *Buffer = allocate(EHTerminateScope::getSize()); |
269 | 118k | new (Buffer) EHTerminateScope(InnermostEHScope); |
270 | 118k | InnermostEHScope = stable_begin(); |
271 | 118k | } |
272 | | |
273 | | /// Remove any 'null' fixups on the stack. However, we can't pop more |
274 | | /// fixups than the fixup depth on the innermost normal cleanup, or |
275 | | /// else fixups that we try to add to that cleanup will end up in the |
276 | | /// wrong place. We *could* try to shrink fixup depths, but that's |
277 | | /// actually a lot of work for little benefit. |
278 | 16 | void EHScopeStack::popNullFixups() { |
279 | | // We expect this to only be called when there's still an innermost |
280 | | // normal cleanup; otherwise there really shouldn't be any fixups. |
281 | 16 | assert(hasNormalCleanups()); |
282 | | |
283 | 0 | EHScopeStack::iterator it = find(InnermostNormalCleanup); |
284 | 16 | unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth(); |
285 | 16 | assert(BranchFixups.size() >= MinSize && "fixup stack out of order"); |
286 | | |
287 | 3.02k | while (BranchFixups.size() > MinSize && |
288 | 3.02k | BranchFixups.back().Destination == nullptr3.01k ) |
289 | 3.00k | BranchFixups.pop_back(); |
290 | 16 | } |
291 | | |
292 | 206 | Address CodeGenFunction::createCleanupActiveFlag() { |
293 | | // Create a variable to decide whether the cleanup needs to be run. |
294 | 206 | Address active = CreateTempAllocaWithoutCast( |
295 | 206 | Builder.getInt1Ty(), CharUnits::One(), "cleanup.cond"); |
296 | | |
297 | | // Initialize it to false at a site that's guaranteed to be run |
298 | | // before each evaluation. |
299 | 206 | setBeforeOutermostConditional(Builder.getFalse(), active); |
300 | | |
301 | | // Initialize it to true at the current location. |
302 | 206 | Builder.CreateStore(Builder.getTrue(), active); |
303 | | |
304 | 206 | return active; |
305 | 206 | } |
306 | | |
307 | 221 | void CodeGenFunction::initFullExprCleanupWithFlag(Address ActiveFlag) { |
308 | | // Set that as the active flag in the cleanup. |
309 | 221 | EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin()); |
310 | 221 | assert(!cleanup.hasActiveFlag() && "cleanup already has active flag?"); |
311 | 0 | cleanup.setActiveFlag(ActiveFlag); |
312 | | |
313 | 221 | if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup()179 ; |
314 | 221 | if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup()192 ; |
315 | 221 | } |
316 | | |
317 | 0 | void EHScopeStack::Cleanup::anchor() {} |
318 | | |
319 | | static void createStoreInstBefore(llvm::Value *value, Address addr, |
320 | 4.75k | llvm::Instruction *beforeInst) { |
321 | 4.75k | auto store = new llvm::StoreInst(value, addr.getPointer(), beforeInst); |
322 | 4.75k | store->setAlignment(addr.getAlignment().getAsAlign()); |
323 | 4.75k | } |
324 | | |
325 | | static llvm::LoadInst *createLoadInstBefore(Address addr, const Twine &name, |
326 | 609 | llvm::Instruction *beforeInst) { |
327 | 609 | return new llvm::LoadInst(addr.getElementType(), addr.getPointer(), name, |
328 | 609 | false, addr.getAlignment().getAsAlign(), |
329 | 609 | beforeInst); |
330 | 609 | } |
331 | | |
332 | | /// All the branch fixups on the EH stack have propagated out past the |
333 | | /// outermost normal cleanup; resolve them all by adding cases to the |
334 | | /// given switch instruction. |
335 | | static void ResolveAllBranchFixups(CodeGenFunction &CGF, |
336 | | llvm::SwitchInst *Switch, |
337 | 4 | llvm::BasicBlock *CleanupEntry) { |
338 | 4 | llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded; |
339 | | |
340 | 8 | for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I4 ) { |
341 | | // Skip this fixup if its destination isn't set. |
342 | 4 | BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I); |
343 | 4 | if (Fixup.Destination == nullptr) continue0 ; |
344 | | |
345 | | // If there isn't an OptimisticBranchBlock, then InitialBranch is |
346 | | // still pointing directly to its destination; forward it to the |
347 | | // appropriate cleanup entry. This is required in the specific |
348 | | // case of |
349 | | // { std::string s; goto lbl; } |
350 | | // lbl: |
351 | | // i.e. where there's an unresolved fixup inside a single cleanup |
352 | | // entry which we're currently popping. |
353 | 4 | if (Fixup.OptimisticBranchBlock == nullptr) { |
354 | 2 | createStoreInstBefore(CGF.Builder.getInt32(Fixup.DestinationIndex), |
355 | 2 | CGF.getNormalCleanupDestSlot(), |
356 | 2 | Fixup.InitialBranch); |
357 | 2 | Fixup.InitialBranch->setSuccessor(0, CleanupEntry); |
358 | 2 | } |
359 | | |
360 | | // Don't add this case to the switch statement twice. |
361 | 4 | if (!CasesAdded.insert(Fixup.Destination).second) |
362 | 0 | continue; |
363 | | |
364 | 4 | Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex), |
365 | 4 | Fixup.Destination); |
366 | 4 | } |
367 | | |
368 | 4 | CGF.EHStack.clearFixups(); |
369 | 4 | } |
370 | | |
371 | | /// Transitions the terminator of the given exit-block of a cleanup to |
372 | | /// be a cleanup switch. |
373 | | static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF, |
374 | 4 | llvm::BasicBlock *Block) { |
375 | | // If it's a branch, turn it into a switch whose default |
376 | | // destination is its original target. |
377 | 4 | llvm::Instruction *Term = Block->getTerminator(); |
378 | 4 | assert(Term && "can't transition block without terminator"); |
379 | | |
380 | 4 | if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { |
381 | 4 | assert(Br->isUnconditional()); |
382 | 0 | auto Load = createLoadInstBefore(CGF.getNormalCleanupDestSlot(), |
383 | 4 | "cleanup.dest", Term); |
384 | 4 | llvm::SwitchInst *Switch = |
385 | 4 | llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block); |
386 | 4 | Br->eraseFromParent(); |
387 | 4 | return Switch; |
388 | 4 | } else { |
389 | 0 | return cast<llvm::SwitchInst>(Term); |
390 | 0 | } |
391 | 4 | } |
392 | | |
393 | 154 | void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) { |
394 | 154 | assert(Block && "resolving a null target block"); |
395 | 154 | if (!EHStack.getNumBranchFixups()) return149 ; |
396 | | |
397 | 5 | assert(EHStack.hasNormalCleanups() && |
398 | 5 | "branch fixups exist with no normal cleanups on stack"); |
399 | | |
400 | 0 | llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks; |
401 | 5 | bool ResolvedAny = false; |
402 | | |
403 | 3.00k | for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I3.00k ) { |
404 | | // Skip this fixup if its destination doesn't match. |
405 | 3.00k | BranchFixup &Fixup = EHStack.getBranchFixup(I); |
406 | 3.00k | if (Fixup.Destination != Block) continue0 ; |
407 | | |
408 | 3.00k | Fixup.Destination = nullptr; |
409 | 3.00k | ResolvedAny = true; |
410 | | |
411 | | // If it doesn't have an optimistic branch block, LatestBranch is |
412 | | // already pointing to the right place. |
413 | 3.00k | llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock; |
414 | 3.00k | if (!BranchBB) |
415 | 3.00k | continue; |
416 | | |
417 | | // Don't process the same optimistic branch block twice. |
418 | 4 | if (!ModifiedOptimisticBlocks.insert(BranchBB).second) |
419 | 0 | continue; |
420 | | |
421 | 4 | llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB); |
422 | | |
423 | | // Add a case to the switch. |
424 | 4 | Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block); |
425 | 4 | } |
426 | | |
427 | 5 | if (ResolvedAny) |
428 | 5 | EHStack.popNullFixups(); |
429 | 5 | } |
430 | | |
431 | | /// Pops cleanup blocks until the given savepoint is reached. |
432 | | void CodeGenFunction::PopCleanupBlocks( |
433 | | EHScopeStack::stable_iterator Old, |
434 | 1.00M | std::initializer_list<llvm::Value **> ValuesToReload) { |
435 | 1.00M | assert(Old.isValid()); |
436 | | |
437 | 0 | bool HadBranches = false; |
438 | 1.05M | while (EHStack.stable_begin() != Old) { |
439 | 43.8k | EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); |
440 | 43.8k | HadBranches |= Scope.hasBranches(); |
441 | | |
442 | | // As long as Old strictly encloses the scope's enclosing normal |
443 | | // cleanup, we're going to emit another normal cleanup which |
444 | | // fallthrough can propagate through. |
445 | 43.8k | bool FallThroughIsBranchThrough = |
446 | 43.8k | Old.strictlyEncloses(Scope.getEnclosingNormalCleanup()); |
447 | | |
448 | 43.8k | PopCleanupBlock(FallThroughIsBranchThrough); |
449 | 43.8k | } |
450 | | |
451 | | // If we didn't have any branches, the insertion point before cleanups must |
452 | | // dominate the current insertion point and we don't need to reload any |
453 | | // values. |
454 | 1.00M | if (!HadBranches) |
455 | 1.00M | return; |
456 | | |
457 | | // Spill and reload all values that the caller wants to be live at the current |
458 | | // insertion point. |
459 | 4.57k | for (llvm::Value **ReloadedValue : ValuesToReload) { |
460 | 288 | auto *Inst = dyn_cast_or_null<llvm::Instruction>(*ReloadedValue); |
461 | 288 | if (!Inst) |
462 | 239 | continue; |
463 | | |
464 | | // Don't spill static allocas, they dominate all cleanups. These are created |
465 | | // by binding a reference to a local variable or temporary. |
466 | 49 | auto *AI = dyn_cast<llvm::AllocaInst>(Inst); |
467 | 49 | if (AI && AI->isStaticAlloca()3 ) |
468 | 3 | continue; |
469 | | |
470 | 46 | Address Tmp = |
471 | 46 | CreateDefaultAlignTempAlloca(Inst->getType(), "tmp.exprcleanup"); |
472 | | |
473 | | // Find an insertion point after Inst and spill it to the temporary. |
474 | 46 | llvm::BasicBlock::iterator InsertBefore; |
475 | 46 | if (auto *Invoke = dyn_cast<llvm::InvokeInst>(Inst)) |
476 | 0 | InsertBefore = Invoke->getNormalDest()->getFirstInsertionPt(); |
477 | 46 | else |
478 | 46 | InsertBefore = std::next(Inst->getIterator()); |
479 | 46 | CGBuilderTy(CGM, &*InsertBefore).CreateStore(Inst, Tmp); |
480 | | |
481 | | // Reload the value at the current insertion point. |
482 | 46 | *ReloadedValue = Builder.CreateLoad(Tmp); |
483 | 46 | } |
484 | 4.57k | } |
485 | | |
486 | | /// Pops cleanup blocks until the given savepoint is reached, then add the |
487 | | /// cleanups from the given savepoint in the lifetime-extended cleanups stack. |
488 | | void CodeGenFunction::PopCleanupBlocks( |
489 | | EHScopeStack::stable_iterator Old, size_t OldLifetimeExtendedSize, |
490 | 999k | std::initializer_list<llvm::Value **> ValuesToReload) { |
491 | 999k | PopCleanupBlocks(Old, ValuesToReload); |
492 | | |
493 | | // Move our deferred cleanups onto the EH stack. |
494 | 999k | for (size_t I = OldLifetimeExtendedSize, |
495 | 1.00M | E = LifetimeExtendedCleanupStack.size(); I != E; /**/) { |
496 | | // Alignment should be guaranteed by the vptrs in the individual cleanups. |
497 | 570 | assert((I % alignof(LifetimeExtendedCleanupHeader) == 0) && |
498 | 570 | "misaligned cleanup stack entry"); |
499 | | |
500 | 0 | LifetimeExtendedCleanupHeader &Header = |
501 | 570 | reinterpret_cast<LifetimeExtendedCleanupHeader&>( |
502 | 570 | LifetimeExtendedCleanupStack[I]); |
503 | 570 | I += sizeof(Header); |
504 | | |
505 | 570 | EHStack.pushCopyOfCleanup(Header.getKind(), |
506 | 570 | &LifetimeExtendedCleanupStack[I], |
507 | 570 | Header.getSize()); |
508 | 570 | I += Header.getSize(); |
509 | | |
510 | 570 | if (Header.isConditional()) { |
511 | 29 | Address ActiveFlag = |
512 | 29 | reinterpret_cast<Address &>(LifetimeExtendedCleanupStack[I]); |
513 | 29 | initFullExprCleanupWithFlag(ActiveFlag); |
514 | 29 | I += sizeof(ActiveFlag); |
515 | 29 | } |
516 | 570 | } |
517 | 999k | LifetimeExtendedCleanupStack.resize(OldLifetimeExtendedSize); |
518 | 999k | } |
519 | | |
520 | | static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF, |
521 | 14.6k | EHCleanupScope &Scope) { |
522 | 14.6k | assert(Scope.isNormalCleanup()); |
523 | 0 | llvm::BasicBlock *Entry = Scope.getNormalBlock(); |
524 | 14.6k | if (!Entry) { |
525 | 7.14k | Entry = CGF.createBasicBlock("cleanup"); |
526 | 7.14k | Scope.setNormalBlock(Entry); |
527 | 7.14k | } |
528 | 14.6k | return Entry; |
529 | 14.6k | } |
530 | | |
531 | | /// Attempts to reduce a cleanup's entry block to a fallthrough. This |
532 | | /// is basically llvm::MergeBlockIntoPredecessor, except |
533 | | /// simplified/optimized for the tighter constraints on cleanup blocks. |
534 | | /// |
535 | | /// Returns the new block, whatever it is. |
536 | | static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF, |
537 | 14.6k | llvm::BasicBlock *Entry) { |
538 | 14.6k | llvm::BasicBlock *Pred = Entry->getSinglePredecessor(); |
539 | 14.6k | if (!Pred) return Entry2.12k ; |
540 | | |
541 | 12.4k | llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator()); |
542 | 12.4k | if (!Br || Br->isConditional()12.3k ) return Entry157 ; |
543 | 12.3k | assert(Br->getSuccessor(0) == Entry); |
544 | | |
545 | | // If we were previously inserting at the end of the cleanup entry |
546 | | // block, we'll need to continue inserting at the end of the |
547 | | // predecessor. |
548 | 0 | bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry; |
549 | 12.3k | assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end()); |
550 | | |
551 | | // Kill the branch. |
552 | 0 | Br->eraseFromParent(); |
553 | | |
554 | | // Replace all uses of the entry with the predecessor, in case there |
555 | | // are phis in the cleanup. |
556 | 12.3k | Entry->replaceAllUsesWith(Pred); |
557 | | |
558 | | // Merge the blocks. |
559 | 12.3k | Pred->getInstList().splice(Pred->end(), Entry->getInstList()); |
560 | | |
561 | | // Kill the entry block. |
562 | 12.3k | Entry->eraseFromParent(); |
563 | | |
564 | 12.3k | if (WasInsertBlock) |
565 | 109 | CGF.Builder.SetInsertPoint(Pred); |
566 | | |
567 | 12.3k | return Pred; |
568 | 12.4k | } |
569 | | |
570 | | static void EmitCleanup(CodeGenFunction &CGF, |
571 | | EHScopeStack::Cleanup *Fn, |
572 | | EHScopeStack::Cleanup::Flags flags, |
573 | 49.7k | Address ActiveFlag) { |
574 | | // If there's an active flag, load it and skip the cleanup if it's |
575 | | // false. |
576 | 49.7k | llvm::BasicBlock *ContBB = nullptr; |
577 | 49.7k | if (ActiveFlag.isValid()) { |
578 | 270 | ContBB = CGF.createBasicBlock("cleanup.done"); |
579 | 270 | llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action"); |
580 | 270 | llvm::Value *IsActive |
581 | 270 | = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active"); |
582 | 270 | CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB); |
583 | 270 | CGF.EmitBlock(CleanupBB); |
584 | 270 | } |
585 | | |
586 | | // Ask the cleanup to emit itself. |
587 | 49.7k | Fn->Emit(CGF, flags); |
588 | 49.7k | assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); |
589 | | |
590 | | // Emit the continuation block if there was an active flag. |
591 | 49.7k | if (ActiveFlag.isValid()) |
592 | 270 | CGF.EmitBlock(ContBB); |
593 | 49.7k | } |
594 | | |
595 | | static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit, |
596 | | llvm::BasicBlock *From, |
597 | 0 | llvm::BasicBlock *To) { |
598 | | // Exit is the exit block of a cleanup, so it always terminates in |
599 | | // an unconditional branch or a switch. |
600 | 0 | llvm::Instruction *Term = Exit->getTerminator(); |
601 | |
|
602 | 0 | if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { |
603 | 0 | assert(Br->isUnconditional() && Br->getSuccessor(0) == From); |
604 | 0 | Br->setSuccessor(0, To); |
605 | 0 | } else { |
606 | 0 | llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term); |
607 | 0 | for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I) |
608 | 0 | if (Switch->getSuccessor(I) == From) |
609 | 0 | Switch->setSuccessor(I, To); |
610 | 0 | } |
611 | 0 | } |
612 | | |
613 | | /// We don't need a normal entry block for the given cleanup. |
614 | | /// Optimistic fixup branches can cause these blocks to come into |
615 | | /// existence anyway; if so, destroy it. |
616 | | /// |
617 | | /// The validity of this transformation is very much specific to the |
618 | | /// exact ways in which we form branches to cleanup entries. |
619 | | static void destroyOptimisticNormalEntry(CodeGenFunction &CGF, |
620 | 41.5k | EHCleanupScope &scope) { |
621 | 41.5k | llvm::BasicBlock *entry = scope.getNormalBlock(); |
622 | 41.5k | if (!entry) return41.5k ; |
623 | | |
624 | | // Replace all the uses with unreachable. |
625 | 4 | llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock(); |
626 | 4 | for (llvm::BasicBlock::use_iterator |
627 | 8 | i = entry->use_begin(), e = entry->use_end(); i != e; ) { |
628 | 4 | llvm::Use &use = *i; |
629 | 4 | ++i; |
630 | | |
631 | 4 | use.set(unreachableBB); |
632 | | |
633 | | // The only uses should be fixup switches. |
634 | 4 | llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser()); |
635 | 4 | if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) { |
636 | | // Replace the switch with a branch. |
637 | 4 | llvm::BranchInst::Create(si->case_begin()->getCaseSuccessor(), si); |
638 | | |
639 | | // The switch operand is a load from the cleanup-dest alloca. |
640 | 4 | llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition()); |
641 | | |
642 | | // Destroy the switch. |
643 | 4 | si->eraseFromParent(); |
644 | | |
645 | | // Destroy the load. |
646 | 4 | assert(condition->getOperand(0) == CGF.NormalCleanupDest.getPointer()); |
647 | 0 | assert(condition->use_empty()); |
648 | 0 | condition->eraseFromParent(); |
649 | 4 | } |
650 | 4 | } |
651 | | |
652 | 4 | assert(entry->use_empty()); |
653 | 0 | delete entry; |
654 | 4 | } |
655 | | |
656 | | /// Pops a cleanup block. If the block includes a normal cleanup, the |
657 | | /// current insertion point is threaded through the cleanup, as are |
658 | | /// any branch fixups on the cleanup. |
659 | 48.6k | void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { |
660 | 48.6k | assert(!EHStack.empty() && "cleanup stack is empty!"); |
661 | 0 | assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!"); |
662 | 0 | EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); |
663 | 48.6k | assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); |
664 | | |
665 | | // Remember activation information. |
666 | 0 | bool IsActive = Scope.isActive(); |
667 | 48.6k | Address NormalActiveFlag = |
668 | 48.6k | Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag()179 |
669 | 48.6k | : Address::invalid()48.4k ; |
670 | 48.6k | Address EHActiveFlag = |
671 | 48.6k | Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag()249 |
672 | 48.6k | : Address::invalid()48.4k ; |
673 | | |
674 | | // Check whether we need an EH cleanup. This is only true if we've |
675 | | // generated a lazy EH cleanup block. |
676 | 48.6k | llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock(); |
677 | 48.6k | assert(Scope.hasEHBranches() == (EHEntry != nullptr)); |
678 | 0 | bool RequiresEHCleanup = (EHEntry != nullptr); |
679 | 48.6k | EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope(); |
680 | | |
681 | | // Check the three conditions which might require a normal cleanup: |
682 | | |
683 | | // - whether there are branch fix-ups through this cleanup |
684 | 48.6k | unsigned FixupDepth = Scope.getFixupDepth(); |
685 | 48.6k | bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; |
686 | | |
687 | | // - whether there are branch-throughs or branch-afters |
688 | 48.6k | bool HasExistingBranches = Scope.hasBranches(); |
689 | | |
690 | | // - whether there's a fallthrough |
691 | 48.6k | llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock(); |
692 | 48.6k | bool HasFallthrough = (FallthroughSource != nullptr && IsActive40.5k ); |
693 | | |
694 | | // Branch-through fall-throughs leave the insertion point set to the |
695 | | // end of the last cleanup, which points to the current scope. The |
696 | | // rest of IR gen doesn't need to worry about this; it only happens |
697 | | // during the execution of PopCleanupBlocks(). |
698 | 48.6k | bool HasPrebranchedFallthrough = |
699 | 48.6k | (FallthroughSource && FallthroughSource->getTerminator()40.5k ); |
700 | | |
701 | | // If this is a normal cleanup, then having a prebranched |
702 | | // fallthrough implies that the fallthrough source unconditionally |
703 | | // jumps here. |
704 | 48.6k | assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough || |
705 | 48.6k | (Scope.getNormalBlock() && |
706 | 48.6k | FallthroughSource->getTerminator()->getSuccessor(0) |
707 | 48.6k | == Scope.getNormalBlock())); |
708 | | |
709 | 0 | bool RequiresNormalCleanup = false; |
710 | 48.6k | if (Scope.isNormalCleanup() && |
711 | 48.6k | (42.4k HasFixups42.4k || HasExistingBranches42.4k || HasFallthrough35.3k )) { |
712 | 42.3k | RequiresNormalCleanup = true; |
713 | 42.3k | } |
714 | | |
715 | | // If we have a prebranched fallthrough into an inactive normal |
716 | | // cleanup, rewrite it so that it leads to the appropriate place. |
717 | 48.6k | if (Scope.isNormalCleanup() && HasPrebranchedFallthrough42.4k && !IsActive252 ) { |
718 | 0 | llvm::BasicBlock *prebranchDest; |
719 | | |
720 | | // If the prebranch is semantically branching through the next |
721 | | // cleanup, just forward it to the next block, leaving the |
722 | | // insertion point in the prebranched block. |
723 | 0 | if (FallthroughIsBranchThrough) { |
724 | 0 | EHScope &enclosing = *EHStack.find(Scope.getEnclosingNormalCleanup()); |
725 | 0 | prebranchDest = CreateNormalEntry(*this, cast<EHCleanupScope>(enclosing)); |
726 | | |
727 | | // Otherwise, we need to make a new block. If the normal cleanup |
728 | | // isn't being used at all, we could actually reuse the normal |
729 | | // entry block, but this is simpler, and it avoids conflicts with |
730 | | // dead optimistic fixup branches. |
731 | 0 | } else { |
732 | 0 | prebranchDest = createBasicBlock("forwarded-prebranch"); |
733 | 0 | EmitBlock(prebranchDest); |
734 | 0 | } |
735 | |
|
736 | 0 | llvm::BasicBlock *normalEntry = Scope.getNormalBlock(); |
737 | 0 | assert(normalEntry && !normalEntry->use_empty()); |
738 | | |
739 | 0 | ForwardPrebranchedFallthrough(FallthroughSource, |
740 | 0 | normalEntry, prebranchDest); |
741 | 0 | } |
742 | | |
743 | | // If we don't need the cleanup at all, we're done. |
744 | 48.6k | if (!RequiresNormalCleanup && !RequiresEHCleanup6.34k ) { |
745 | 4.99k | destroyOptimisticNormalEntry(*this, Scope); |
746 | 4.99k | EHStack.popCleanup(); // safe because there are no fixups |
747 | 4.99k | assert(EHStack.getNumBranchFixups() == 0 || |
748 | 4.99k | EHStack.hasNormalCleanups()); |
749 | 0 | return; |
750 | 4.99k | } |
751 | | |
752 | | // Copy the cleanup emission data out. This uses either a stack |
753 | | // array or malloc'd memory, depending on the size, which is |
754 | | // behavior that SmallVector would provide, if we could use it |
755 | | // here. Unfortunately, if you ask for a SmallVector<char>, the |
756 | | // alignment isn't sufficient. |
757 | 43.6k | auto *CleanupSource = reinterpret_cast<char *>(Scope.getCleanupBuffer()); |
758 | 43.6k | alignas(EHScopeStack::ScopeStackAlignment) char |
759 | 43.6k | CleanupBufferStack[8 * sizeof(void *)]; |
760 | 43.6k | std::unique_ptr<char[]> CleanupBufferHeap; |
761 | 43.6k | size_t CleanupSize = Scope.getCleanupSize(); |
762 | 43.6k | EHScopeStack::Cleanup *Fn; |
763 | | |
764 | 43.6k | if (CleanupSize <= sizeof(CleanupBufferStack)) { |
765 | 43.5k | memcpy(CleanupBufferStack, CleanupSource, CleanupSize); |
766 | 43.5k | Fn = reinterpret_cast<EHScopeStack::Cleanup *>(CleanupBufferStack); |
767 | 43.5k | } else { |
768 | 81 | CleanupBufferHeap.reset(new char[CleanupSize]); |
769 | 81 | memcpy(CleanupBufferHeap.get(), CleanupSource, CleanupSize); |
770 | 81 | Fn = reinterpret_cast<EHScopeStack::Cleanup *>(CleanupBufferHeap.get()); |
771 | 81 | } |
772 | | |
773 | 43.6k | EHScopeStack::Cleanup::Flags cleanupFlags; |
774 | 43.6k | if (Scope.isNormalCleanup()) |
775 | 42.4k | cleanupFlags.setIsNormalCleanupKind(); |
776 | 43.6k | if (Scope.isEHCleanup()) |
777 | 18.9k | cleanupFlags.setIsEHCleanupKind(); |
778 | | |
779 | | // Under -EHa, invoke seh.scope.end() to mark scope end before dtor |
780 | 43.6k | bool IsEHa = getLangOpts().EHAsynch && !Scope.isLifetimeMarker()20 ; |
781 | 43.6k | const EHPersonality &Personality = EHPersonality::get(*this); |
782 | 43.6k | if (!RequiresNormalCleanup) { |
783 | | // Mark CPP scope end for passed-by-value Arg temp |
784 | | // per Windows ABI which is "normally" Cleanup in callee |
785 | 1.35k | if (IsEHa && getInvokeDest()4 ) { |
786 | 4 | if (Personality.isMSVCXXPersonality()) |
787 | 4 | EmitSehCppScopeEnd(); |
788 | 4 | } |
789 | 1.35k | destroyOptimisticNormalEntry(*this, Scope); |
790 | 1.35k | EHStack.popCleanup(); |
791 | 42.3k | } else { |
792 | | // If we have a fallthrough and no other need for the cleanup, |
793 | | // emit it directly. |
794 | 42.3k | if (HasFallthrough && !HasPrebranchedFallthrough36.0k && !HasFixups35.7k && |
795 | 42.3k | !HasExistingBranches35.7k ) { |
796 | | |
797 | | // mark SEH scope end for fall-through flow |
798 | 35.1k | if (IsEHa && getInvokeDest()11 ) { |
799 | 10 | if (Personality.isMSVCXXPersonality()) |
800 | 8 | EmitSehCppScopeEnd(); |
801 | 2 | else |
802 | 2 | EmitSehTryScopeEnd(); |
803 | 10 | } |
804 | | |
805 | 35.1k | destroyOptimisticNormalEntry(*this, Scope); |
806 | 35.1k | EHStack.popCleanup(); |
807 | | |
808 | 35.1k | EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); |
809 | | |
810 | | // Otherwise, the best approach is to thread everything through |
811 | | // the cleanup block and then try to clean up after ourselves. |
812 | 35.1k | } else { |
813 | | // Force the entry block to exist. |
814 | 7.13k | llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope); |
815 | | |
816 | | // I. Set up the fallthrough edge in. |
817 | | |
818 | 7.13k | CGBuilderTy::InsertPoint savedInactiveFallthroughIP; |
819 | | |
820 | | // If there's a fallthrough, we need to store the cleanup |
821 | | // destination index. For fall-throughs this is always zero. |
822 | 7.13k | if (HasFallthrough) { |
823 | 842 | if (!HasPrebranchedFallthrough) |
824 | 590 | Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot()); |
825 | | |
826 | | // Otherwise, save and clear the IP if we don't have fallthrough |
827 | | // because the cleanup is inactive. |
828 | 6.29k | } else if (FallthroughSource) { |
829 | 0 | assert(!IsActive && "source without fallthrough for active cleanup"); |
830 | 0 | savedInactiveFallthroughIP = Builder.saveAndClearIP(); |
831 | 0 | } |
832 | | |
833 | | // II. Emit the entry block. This implicitly branches to it if |
834 | | // we have fallthrough. All the fixups and existing branches |
835 | | // should already be branched to it. |
836 | 0 | EmitBlock(NormalEntry); |
837 | | |
838 | | // intercept normal cleanup to mark SEH scope end |
839 | 7.13k | if (IsEHa) { |
840 | 5 | if (Personality.isMSVCXXPersonality()) |
841 | 5 | EmitSehCppScopeEnd(); |
842 | 0 | else |
843 | 0 | EmitSehTryScopeEnd(); |
844 | 5 | } |
845 | | |
846 | | // III. Figure out where we're going and build the cleanup |
847 | | // epilogue. |
848 | | |
849 | 7.13k | bool HasEnclosingCleanups = |
850 | 7.13k | (Scope.getEnclosingNormalCleanup() != EHStack.stable_end()); |
851 | | |
852 | | // Compute the branch-through dest if we need it: |
853 | | // - if there are branch-throughs threaded through the scope |
854 | | // - if fall-through is a branch-through |
855 | | // - if there are fixups that will be optimistically forwarded |
856 | | // to the enclosing cleanup |
857 | 7.13k | llvm::BasicBlock *BranchThroughDest = nullptr; |
858 | 7.13k | if (Scope.hasBranchThroughs() || |
859 | 7.13k | (4.30k FallthroughSource4.30k && FallthroughIsBranchThrough285 ) || |
860 | 7.13k | (4.30k HasFixups4.30k && HasEnclosingCleanups14 )) { |
861 | 2.84k | assert(HasEnclosingCleanups); |
862 | 0 | EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup()); |
863 | 2.84k | BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S)); |
864 | 2.84k | } |
865 | | |
866 | 0 | llvm::BasicBlock *FallthroughDest = nullptr; |
867 | 7.13k | SmallVector<llvm::Instruction*, 2> InstsToAppend; |
868 | | |
869 | | // If there's exactly one branch-after and no other threads, |
870 | | // we can route it without a switch. |
871 | 7.13k | if (!Scope.hasBranchThroughs() && !HasFixups4.30k && !HasFallthrough4.29k && |
872 | 7.13k | Scope.getNumBranchAfters() == 14.00k ) { |
873 | 4.00k | assert(!BranchThroughDest || !IsActive); |
874 | | |
875 | | // Clean up the possibly dead store to the cleanup dest slot. |
876 | 0 | llvm::Instruction *NormalCleanupDestSlot = |
877 | 4.00k | cast<llvm::Instruction>(getNormalCleanupDestSlot().getPointer()); |
878 | 4.00k | if (NormalCleanupDestSlot->hasOneUse()) { |
879 | 3.88k | NormalCleanupDestSlot->user_back()->eraseFromParent(); |
880 | 3.88k | NormalCleanupDestSlot->eraseFromParent(); |
881 | 3.88k | NormalCleanupDest = Address::invalid(); |
882 | 3.88k | } |
883 | | |
884 | 4.00k | llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0); |
885 | 4.00k | InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter)); |
886 | | |
887 | | // Build a switch-out if we need it: |
888 | | // - if there are branch-afters threaded through the scope |
889 | | // - if fall-through is a branch-after |
890 | | // - if there are fixups that have nowhere left to go and |
891 | | // so must be immediately resolved |
892 | 4.00k | } else if (3.13k Scope.getNumBranchAfters()3.13k || |
893 | 3.13k | (2.84k HasFallthrough2.84k && !FallthroughIsBranchThrough559 ) || |
894 | 3.13k | (2.53k HasFixups2.53k && !HasEnclosingCleanups12 )) { |
895 | | |
896 | 605 | llvm::BasicBlock *Default = |
897 | 605 | (BranchThroughDest ? BranchThroughDest312 : getUnreachableBlock()293 ); |
898 | | |
899 | | // TODO: base this on the number of branch-afters and fixups |
900 | 605 | const unsigned SwitchCapacity = 10; |
901 | | |
902 | | // pass the abnormal exit flag to Fn (SEH cleanup) |
903 | 605 | cleanupFlags.setHasExitSwitch(); |
904 | | |
905 | 605 | llvm::LoadInst *Load = |
906 | 605 | createLoadInstBefore(getNormalCleanupDestSlot(), "cleanup.dest", |
907 | 605 | nullptr); |
908 | 605 | llvm::SwitchInst *Switch = |
909 | 605 | llvm::SwitchInst::Create(Load, Default, SwitchCapacity); |
910 | | |
911 | 605 | InstsToAppend.push_back(Load); |
912 | 605 | InstsToAppend.push_back(Switch); |
913 | | |
914 | | // Branch-after fallthrough. |
915 | 605 | if (FallthroughSource && !FallthroughIsBranchThrough591 ) { |
916 | 590 | FallthroughDest = createBasicBlock("cleanup.cont"); |
917 | 590 | if (HasFallthrough) |
918 | 590 | Switch->addCase(Builder.getInt32(0), FallthroughDest); |
919 | 590 | } |
920 | | |
921 | 923 | for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I318 ) { |
922 | 318 | Switch->addCase(Scope.getBranchAfterIndex(I), |
923 | 318 | Scope.getBranchAfterBlock(I)); |
924 | 318 | } |
925 | | |
926 | | // If there aren't any enclosing cleanups, we can resolve all |
927 | | // the fixups now. |
928 | 605 | if (HasFixups && !HasEnclosingCleanups4 ) |
929 | 4 | ResolveAllBranchFixups(*this, Switch, NormalEntry); |
930 | 2.53k | } else { |
931 | | // We should always have a branch-through destination in this case. |
932 | 2.53k | assert(BranchThroughDest); |
933 | 0 | InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest)); |
934 | 2.53k | } |
935 | | |
936 | | // IV. Pop the cleanup and emit it. |
937 | 0 | EHStack.popCleanup(); |
938 | 7.13k | assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); |
939 | | |
940 | 0 | EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); |
941 | | |
942 | | // Append the prepared cleanup prologue from above. |
943 | 7.13k | llvm::BasicBlock *NormalExit = Builder.GetInsertBlock(); |
944 | 14.8k | for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I7.74k ) |
945 | 7.74k | NormalExit->getInstList().push_back(InstsToAppend[I]); |
946 | | |
947 | | // Optimistically hope that any fixups will continue falling through. |
948 | 7.13k | for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); |
949 | 7.14k | I < E; ++I10 ) { |
950 | 10 | BranchFixup &Fixup = EHStack.getBranchFixup(I); |
951 | 10 | if (!Fixup.Destination) continue0 ; |
952 | 10 | if (!Fixup.OptimisticBranchBlock) { |
953 | 6 | createStoreInstBefore(Builder.getInt32(Fixup.DestinationIndex), |
954 | 6 | getNormalCleanupDestSlot(), |
955 | 6 | Fixup.InitialBranch); |
956 | 6 | Fixup.InitialBranch->setSuccessor(0, NormalEntry); |
957 | 6 | } |
958 | 10 | Fixup.OptimisticBranchBlock = NormalExit; |
959 | 10 | } |
960 | | |
961 | | // V. Set up the fallthrough edge out. |
962 | | |
963 | | // Case 1: a fallthrough source exists but doesn't branch to the |
964 | | // cleanup because the cleanup is inactive. |
965 | 7.13k | if (!HasFallthrough && FallthroughSource6.29k ) { |
966 | | // Prebranched fallthrough was forwarded earlier. |
967 | | // Non-prebranched fallthrough doesn't need to be forwarded. |
968 | | // Either way, all we need to do is restore the IP we cleared before. |
969 | 0 | assert(!IsActive); |
970 | 0 | Builder.restoreIP(savedInactiveFallthroughIP); |
971 | | |
972 | | // Case 2: a fallthrough source exists and should branch to the |
973 | | // cleanup, but we're not supposed to branch through to the next |
974 | | // cleanup. |
975 | 7.13k | } else if (HasFallthrough && FallthroughDest842 ) { |
976 | 590 | assert(!FallthroughIsBranchThrough); |
977 | 0 | EmitBlock(FallthroughDest); |
978 | | |
979 | | // Case 3: a fallthrough source exists and should branch to the |
980 | | // cleanup and then through to the next. |
981 | 6.54k | } else if (HasFallthrough) { |
982 | | // Everything is already set up for this. |
983 | | |
984 | | // Case 4: no fallthrough source exists. |
985 | 6.29k | } else { |
986 | 6.29k | Builder.ClearInsertionPoint(); |
987 | 6.29k | } |
988 | | |
989 | | // VI. Assorted cleaning. |
990 | | |
991 | | // Check whether we can merge NormalEntry into a single predecessor. |
992 | | // This might invalidate (non-IR) pointers to NormalEntry. |
993 | 0 | llvm::BasicBlock *NewNormalEntry = |
994 | 7.13k | SimplifyCleanupEntry(*this, NormalEntry); |
995 | | |
996 | | // If it did invalidate those pointers, and NormalEntry was the same |
997 | | // as NormalExit, go back and patch up the fixups. |
998 | 7.13k | if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit6.41k ) |
999 | 5.18k | for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); |
1000 | 5.19k | I < E; ++I5 ) |
1001 | 5 | EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry; |
1002 | 7.13k | } |
1003 | 42.3k | } |
1004 | | |
1005 | 0 | assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0); |
1006 | | |
1007 | | // Emit the EH cleanup if required. |
1008 | 43.6k | if (RequiresEHCleanup) { |
1009 | 7.47k | CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); |
1010 | | |
1011 | 7.47k | EmitBlock(EHEntry); |
1012 | | |
1013 | 7.47k | llvm::BasicBlock *NextAction = getEHDispatchBlock(EHParent); |
1014 | | |
1015 | | // Push a terminate scope or cleanupendpad scope around the potentially |
1016 | | // throwing cleanups. For funclet EH personalities, the cleanupendpad models |
1017 | | // program termination when cleanups throw. |
1018 | 7.47k | bool PushedTerminate = false; |
1019 | 7.47k | SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad( |
1020 | 7.47k | CurrentFuncletPad); |
1021 | 7.47k | llvm::CleanupPadInst *CPI = nullptr; |
1022 | | |
1023 | 7.47k | const EHPersonality &Personality = EHPersonality::get(*this); |
1024 | 7.47k | if (Personality.usesFuncletPads()) { |
1025 | 203 | llvm::Value *ParentPad = CurrentFuncletPad; |
1026 | 203 | if (!ParentPad) |
1027 | 198 | ParentPad = llvm::ConstantTokenNone::get(CGM.getLLVMContext()); |
1028 | 203 | CurrentFuncletPad = CPI = Builder.CreateCleanupPad(ParentPad); |
1029 | 203 | } |
1030 | | |
1031 | | // Non-MSVC personalities need to terminate when an EH cleanup throws. |
1032 | 7.47k | if (!Personality.isMSVCPersonality()) { |
1033 | 7.27k | EHStack.pushTerminate(); |
1034 | 7.27k | PushedTerminate = true; |
1035 | 7.27k | } |
1036 | | |
1037 | | // We only actually emit the cleanup code if the cleanup is either |
1038 | | // active or was used before it was deactivated. |
1039 | 7.47k | if (EHActiveFlag.isValid() || IsActive7.38k ) { |
1040 | 7.47k | cleanupFlags.setIsForEHCleanup(); |
1041 | 7.47k | EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag); |
1042 | 7.47k | } |
1043 | | |
1044 | 7.47k | if (CPI) |
1045 | 203 | Builder.CreateCleanupRet(CPI, NextAction); |
1046 | 7.27k | else |
1047 | 7.27k | Builder.CreateBr(NextAction); |
1048 | | |
1049 | | // Leave the terminate scope. |
1050 | 7.47k | if (PushedTerminate) |
1051 | 7.27k | EHStack.popTerminate(); |
1052 | | |
1053 | 7.47k | Builder.restoreIP(SavedIP); |
1054 | | |
1055 | 7.47k | SimplifyCleanupEntry(*this, EHEntry); |
1056 | 7.47k | } |
1057 | 43.6k | } |
1058 | | |
1059 | | /// isObviouslyBranchWithoutCleanups - Return true if a branch to the |
1060 | | /// specified destination obviously has no cleanups to run. 'false' is always |
1061 | | /// a conservatively correct answer for this method. |
1062 | 15 | bool CodeGenFunction::isObviouslyBranchWithoutCleanups(JumpDest Dest) const { |
1063 | 15 | assert(Dest.getScopeDepth().encloses(EHStack.stable_begin()) |
1064 | 15 | && "stale jump destination"); |
1065 | | |
1066 | | // Calculate the innermost active normal cleanup. |
1067 | 0 | EHScopeStack::stable_iterator TopCleanup = |
1068 | 15 | EHStack.getInnermostActiveNormalCleanup(); |
1069 | | |
1070 | | // If we're not in an active normal cleanup scope, or if the |
1071 | | // destination scope is within the innermost active normal cleanup |
1072 | | // scope, we don't need to worry about fixups. |
1073 | 15 | if (TopCleanup == EHStack.stable_end() || |
1074 | 15 | TopCleanup.encloses(Dest.getScopeDepth())0 ) // works for invalid |
1075 | 15 | return true; |
1076 | | |
1077 | | // Otherwise, we might need some cleanups. |
1078 | 0 | return false; |
1079 | 15 | } |
1080 | | |
1081 | | |
1082 | | /// Terminate the current block by emitting a branch which might leave |
1083 | | /// the current cleanup-protected scope. The target scope may not yet |
1084 | | /// be known, in which case this will require a fixup. |
1085 | | /// |
1086 | | /// As a side-effect, this method clears the insertion point. |
1087 | 176k | void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) { |
1088 | 176k | assert(Dest.getScopeDepth().encloses(EHStack.stable_begin()) |
1089 | 176k | && "stale jump destination"); |
1090 | | |
1091 | 176k | if (!HaveInsertPoint()) |
1092 | 212 | return; |
1093 | | |
1094 | | // Create the branch. |
1095 | 176k | llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock()); |
1096 | | |
1097 | | // Calculate the innermost active normal cleanup. |
1098 | 176k | EHScopeStack::stable_iterator |
1099 | 176k | TopCleanup = EHStack.getInnermostActiveNormalCleanup(); |
1100 | | |
1101 | | // If we're not in an active normal cleanup scope, or if the |
1102 | | // destination scope is within the innermost active normal cleanup |
1103 | | // scope, we don't need to worry about fixups. |
1104 | 176k | if (TopCleanup == EHStack.stable_end() || |
1105 | 176k | TopCleanup.encloses(Dest.getScopeDepth())8.84k ) { // works for invalid |
1106 | 168k | Builder.ClearInsertionPoint(); |
1107 | 168k | return; |
1108 | 168k | } |
1109 | | |
1110 | | // If we can't resolve the destination cleanup scope, just add this |
1111 | | // to the current cleanup scope as a branch fixup. |
1112 | 7.69k | if (!Dest.getScopeDepth().isValid()) { |
1113 | 3.00k | BranchFixup &Fixup = EHStack.addBranchFixup(); |
1114 | 3.00k | Fixup.Destination = Dest.getBlock(); |
1115 | 3.00k | Fixup.DestinationIndex = Dest.getDestIndex(); |
1116 | 3.00k | Fixup.InitialBranch = BI; |
1117 | 3.00k | Fixup.OptimisticBranchBlock = nullptr; |
1118 | | |
1119 | 3.00k | Builder.ClearInsertionPoint(); |
1120 | 3.00k | return; |
1121 | 3.00k | } |
1122 | | |
1123 | | // Otherwise, thread through all the normal cleanups in scope. |
1124 | | |
1125 | | // Store the index at the start. |
1126 | 4.68k | llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex()); |
1127 | 4.68k | createStoreInstBefore(Index, getNormalCleanupDestSlot(), BI); |
1128 | | |
1129 | | // Adjust BI to point to the first cleanup block. |
1130 | 4.68k | { |
1131 | 4.68k | EHCleanupScope &Scope = |
1132 | 4.68k | cast<EHCleanupScope>(*EHStack.find(TopCleanup)); |
1133 | 4.68k | BI->setSuccessor(0, CreateNormalEntry(*this, Scope)); |
1134 | 4.68k | } |
1135 | | |
1136 | | // Add this destination to all the scopes involved. |
1137 | 4.68k | EHScopeStack::stable_iterator I = TopCleanup; |
1138 | 4.68k | EHScopeStack::stable_iterator E = Dest.getScopeDepth(); |
1139 | 4.68k | if (E.strictlyEncloses(I)) { |
1140 | 7.53k | while (true) { |
1141 | 7.53k | EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I)); |
1142 | 7.53k | assert(Scope.isNormalCleanup()); |
1143 | 0 | I = Scope.getEnclosingNormalCleanup(); |
1144 | | |
1145 | | // If this is the last cleanup we're propagating through, tell it |
1146 | | // that there's a resolved jump moving through it. |
1147 | 7.53k | if (!E.strictlyEncloses(I)) { |
1148 | 4.45k | Scope.addBranchAfter(Index, Dest.getBlock()); |
1149 | 4.45k | break; |
1150 | 4.45k | } |
1151 | | |
1152 | | // Otherwise, tell the scope that there's a jump propagating |
1153 | | // through it. If this isn't new information, all the rest of |
1154 | | // the work has been done before. |
1155 | 3.07k | if (!Scope.addBranchThrough(Dest.getBlock())) |
1156 | 234 | break; |
1157 | 3.07k | } |
1158 | 4.68k | } |
1159 | | |
1160 | 4.68k | Builder.ClearInsertionPoint(); |
1161 | 4.68k | } |
1162 | | |
1163 | | static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack, |
1164 | 2 | EHScopeStack::stable_iterator C) { |
1165 | | // If we needed a normal block for any reason, that counts. |
1166 | 2 | if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock()) |
1167 | 0 | return true; |
1168 | | |
1169 | | // Check whether any enclosed cleanups were needed. |
1170 | 2 | for (EHScopeStack::stable_iterator |
1171 | 2 | I = EHStack.getInnermostNormalCleanup(); |
1172 | 3 | I != C; ) { |
1173 | 1 | assert(C.strictlyEncloses(I)); |
1174 | 0 | EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I)); |
1175 | 1 | if (S.getNormalBlock()) return true0 ; |
1176 | 1 | I = S.getEnclosingNormalCleanup(); |
1177 | 1 | } |
1178 | | |
1179 | 2 | return false; |
1180 | 2 | } |
1181 | | |
1182 | | static bool IsUsedAsEHCleanup(EHScopeStack &EHStack, |
1183 | 101 | EHScopeStack::stable_iterator cleanup) { |
1184 | | // If we needed an EH block for any reason, that counts. |
1185 | 101 | if (EHStack.find(cleanup)->hasEHBranches()) |
1186 | 61 | return true; |
1187 | | |
1188 | | // Check whether any enclosed cleanups were needed. |
1189 | 40 | for (EHScopeStack::stable_iterator |
1190 | 57 | i = EHStack.getInnermostEHScope(); i != cleanup; ) { |
1191 | 21 | assert(cleanup.strictlyEncloses(i)); |
1192 | | |
1193 | 0 | EHScope &scope = *EHStack.find(i); |
1194 | 21 | if (scope.hasEHBranches()) |
1195 | 4 | return true; |
1196 | | |
1197 | 17 | i = scope.getEnclosingEHScope(); |
1198 | 17 | } |
1199 | | |
1200 | 36 | return false; |
1201 | 40 | } |
1202 | | |
1203 | | enum ForActivation_t { |
1204 | | ForActivation, |
1205 | | ForDeactivation |
1206 | | }; |
1207 | | |
1208 | | /// The given cleanup block is changing activation state. Configure a |
1209 | | /// cleanup variable if necessary. |
1210 | | /// |
1211 | | /// It would be good if we had some way of determining if there were |
1212 | | /// extra uses *after* the change-over point. |
1213 | | static void SetupCleanupBlockActivation(CodeGenFunction &CGF, |
1214 | | EHScopeStack::stable_iterator C, |
1215 | | ForActivation_t kind, |
1216 | 102 | llvm::Instruction *dominatingIP) { |
1217 | 102 | EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C)); |
1218 | | |
1219 | | // We always need the flag if we're activating the cleanup in a |
1220 | | // conditional context, because we have to assume that the current |
1221 | | // location doesn't necessarily dominate the cleanup's code. |
1222 | 102 | bool isActivatedInConditional = |
1223 | 102 | (kind == ForActivation && CGF.isInConditionalBranch()0 ); |
1224 | | |
1225 | 102 | bool needFlag = false; |
1226 | | |
1227 | | // Calculate whether the cleanup was used: |
1228 | | |
1229 | | // - as a normal cleanup |
1230 | 102 | if (Scope.isNormalCleanup() && |
1231 | 102 | (2 isActivatedInConditional2 || IsUsedAsNormalCleanup(CGF.EHStack, C)2 )) { |
1232 | 0 | Scope.setTestFlagInNormalCleanup(); |
1233 | 0 | needFlag = true; |
1234 | 0 | } |
1235 | | |
1236 | | // - as an EH cleanup |
1237 | 102 | if (Scope.isEHCleanup() && |
1238 | 102 | (101 isActivatedInConditional101 || IsUsedAsEHCleanup(CGF.EHStack, C)101 )) { |
1239 | 65 | Scope.setTestFlagInEHCleanup(); |
1240 | 65 | needFlag = true; |
1241 | 65 | } |
1242 | | |
1243 | | // If it hasn't yet been used as either, we're done. |
1244 | 102 | if (!needFlag) return37 ; |
1245 | | |
1246 | 65 | Address var = Scope.getActiveFlag(); |
1247 | 65 | if (!var.isValid()) { |
1248 | 57 | var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), CharUnits::One(), |
1249 | 57 | "cleanup.isactive"); |
1250 | 57 | Scope.setActiveFlag(var); |
1251 | | |
1252 | 57 | assert(dominatingIP && "no existing variable and no dominating IP!"); |
1253 | | |
1254 | | // Initialize to true or false depending on whether it was |
1255 | | // active up to this point. |
1256 | 0 | llvm::Constant *value = CGF.Builder.getInt1(kind == ForDeactivation); |
1257 | | |
1258 | | // If we're in a conditional block, ignore the dominating IP and |
1259 | | // use the outermost conditional branch. |
1260 | 57 | if (CGF.isInConditionalBranch()) { |
1261 | 0 | CGF.setBeforeOutermostConditional(value, var); |
1262 | 57 | } else { |
1263 | 57 | createStoreInstBefore(value, var, dominatingIP); |
1264 | 57 | } |
1265 | 57 | } |
1266 | | |
1267 | 0 | CGF.Builder.CreateStore(CGF.Builder.getInt1(kind == ForActivation), var); |
1268 | 65 | } |
1269 | | |
1270 | | /// Activate a cleanup that was created in an inactivated state. |
1271 | | void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C, |
1272 | 0 | llvm::Instruction *dominatingIP) { |
1273 | 0 | assert(C != EHStack.stable_end() && "activating bottom of stack?"); |
1274 | 0 | EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); |
1275 | 0 | assert(!Scope.isActive() && "double activation"); |
1276 | | |
1277 | 0 | SetupCleanupBlockActivation(*this, C, ForActivation, dominatingIP); |
1278 | |
|
1279 | 0 | Scope.setActive(true); |
1280 | 0 | } |
1281 | | |
1282 | | /// Deactive a cleanup that was created in an active state. |
1283 | | void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C, |
1284 | 1.79k | llvm::Instruction *dominatingIP) { |
1285 | 1.79k | assert(C != EHStack.stable_end() && "deactivating bottom of stack?"); |
1286 | 0 | EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); |
1287 | 1.79k | assert(Scope.isActive() && "double deactivation"); |
1288 | | |
1289 | | // If it's the top of the stack, just pop it, but do so only if it belongs |
1290 | | // to the current RunCleanupsScope. |
1291 | 1.79k | if (C == EHStack.stable_begin() && |
1292 | 1.79k | CurrentCleanupScopeDepth.strictlyEncloses(C)1.71k ) { |
1293 | | // Per comment below, checking EHAsynch is not really necessary |
1294 | | // it's there to assure zero-impact w/o EHAsynch option |
1295 | 1.69k | if (!Scope.isNormalCleanup() && getLangOpts().EHAsynch1.67k ) { |
1296 | 2 | PopCleanupBlock(); |
1297 | 1.69k | } else { |
1298 | | // If it's a normal cleanup, we need to pretend that the |
1299 | | // fallthrough is unreachable. |
1300 | 1.69k | CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); |
1301 | 1.69k | PopCleanupBlock(); |
1302 | 1.69k | Builder.restoreIP(SavedIP); |
1303 | 1.69k | } |
1304 | 1.69k | return; |
1305 | 1.69k | } |
1306 | | |
1307 | | // Otherwise, follow the general case. |
1308 | 102 | SetupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP); |
1309 | | |
1310 | 102 | Scope.setActive(false); |
1311 | 102 | } |
1312 | | |
1313 | 9.92k | Address CodeGenFunction::getNormalCleanupDestSlot() { |
1314 | 9.92k | if (!NormalCleanupDest.isValid()) |
1315 | 4.27k | NormalCleanupDest = |
1316 | 4.27k | CreateDefaultAlignTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot"); |
1317 | 9.92k | return NormalCleanupDest; |
1318 | 9.92k | } |
1319 | | |
1320 | | /// Emits all the code to cause the given temporary to be cleaned up. |
1321 | | void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary, |
1322 | | QualType TempType, |
1323 | 816 | Address Ptr) { |
1324 | 816 | pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject, |
1325 | 816 | /*useEHCleanup*/ true); |
1326 | 816 | } |
1327 | | |
1328 | | // Need to set "funclet" in OperandBundle properly for noThrow |
1329 | | // intrinsic (see CGCall.cpp) |
1330 | | static void EmitSehScope(CodeGenFunction &CGF, |
1331 | 38 | llvm::FunctionCallee &SehCppScope) { |
1332 | 38 | llvm::BasicBlock *InvokeDest = CGF.getInvokeDest(); |
1333 | 38 | assert(CGF.Builder.GetInsertBlock() && InvokeDest); |
1334 | 0 | llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont"); |
1335 | 38 | SmallVector<llvm::OperandBundleDef, 1> BundleList = |
1336 | 38 | CGF.getBundlesForFunclet(SehCppScope.getCallee()); |
1337 | 38 | if (CGF.CurrentFuncletPad) |
1338 | 0 | BundleList.emplace_back("funclet", CGF.CurrentFuncletPad); |
1339 | 38 | CGF.Builder.CreateInvoke(SehCppScope, Cont, InvokeDest, None, BundleList); |
1340 | 38 | CGF.EmitBlock(Cont); |
1341 | 38 | } |
1342 | | |
1343 | | // Invoke a llvm.seh.scope.begin at the beginning of a CPP scope for -EHa |
1344 | 19 | void CodeGenFunction::EmitSehCppScopeBegin() { |
1345 | 19 | assert(getLangOpts().EHAsynch); |
1346 | 0 | llvm::FunctionType *FTy = |
1347 | 19 | llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); |
1348 | 19 | llvm::FunctionCallee SehCppScope = |
1349 | 19 | CGM.CreateRuntimeFunction(FTy, "llvm.seh.scope.begin"); |
1350 | 19 | EmitSehScope(*this, SehCppScope); |
1351 | 19 | } |
1352 | | |
1353 | | // Invoke a llvm.seh.scope.end at the end of a CPP scope for -EHa |
1354 | | // llvm.seh.scope.end is emitted before popCleanup, so it's "invoked" |
1355 | 17 | void CodeGenFunction::EmitSehCppScopeEnd() { |
1356 | 17 | assert(getLangOpts().EHAsynch); |
1357 | 0 | llvm::FunctionType *FTy = |
1358 | 17 | llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); |
1359 | 17 | llvm::FunctionCallee SehCppScope = |
1360 | 17 | CGM.CreateRuntimeFunction(FTy, "llvm.seh.scope.end"); |
1361 | 17 | EmitSehScope(*this, SehCppScope); |
1362 | 17 | } |
1363 | | |
1364 | | // Invoke a llvm.seh.try.begin at the beginning of a SEH scope for -EHa |
1365 | 0 | void CodeGenFunction::EmitSehTryScopeBegin() { |
1366 | 0 | assert(getLangOpts().EHAsynch); |
1367 | 0 | llvm::FunctionType *FTy = |
1368 | 0 | llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); |
1369 | 0 | llvm::FunctionCallee SehCppScope = |
1370 | 0 | CGM.CreateRuntimeFunction(FTy, "llvm.seh.try.begin"); |
1371 | 0 | EmitSehScope(*this, SehCppScope); |
1372 | 0 | } |
1373 | | |
1374 | | // Invoke a llvm.seh.try.end at the end of a SEH scope for -EHa |
1375 | 2 | void CodeGenFunction::EmitSehTryScopeEnd() { |
1376 | 2 | assert(getLangOpts().EHAsynch); |
1377 | 0 | llvm::FunctionType *FTy = |
1378 | 2 | llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); |
1379 | 2 | llvm::FunctionCallee SehCppScope = |
1380 | 2 | CGM.CreateRuntimeFunction(FTy, "llvm.seh.try.end"); |
1381 | 2 | EmitSehScope(*this, SehCppScope); |
1382 | 2 | } |