/Users/buildslave/jenkins/workspace/coverage/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //===----- CGCoroutine.cpp - Emit LLVM Code for C++ coroutines ------------===// |
2 | | // |
3 | | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | | // See https://llvm.org/LICENSE.txt for license information. |
5 | | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | | // |
7 | | //===----------------------------------------------------------------------===// |
8 | | // |
9 | | // This contains code dealing with C++ code generation of coroutines. |
10 | | // |
11 | | //===----------------------------------------------------------------------===// |
12 | | |
13 | | #include "CGCleanup.h" |
14 | | #include "CodeGenFunction.h" |
15 | | #include "llvm/ADT/ScopeExit.h" |
16 | | #include "clang/AST/StmtCXX.h" |
17 | | #include "clang/AST/StmtVisitor.h" |
18 | | |
19 | | using namespace clang; |
20 | | using namespace CodeGen; |
21 | | |
22 | | using llvm::Value; |
23 | | using llvm::BasicBlock; |
24 | | |
25 | | namespace { |
26 | | enum class AwaitKind { Init, Normal, Yield, Final }; |
27 | | static constexpr llvm::StringLiteral AwaitKindStr[] = {"init", "await", "yield", |
28 | | "final"}; |
29 | | } |
30 | | |
31 | | struct clang::CodeGen::CGCoroData { |
32 | | // What is the current await expression kind and how many |
33 | | // await/yield expressions were encountered so far. |
34 | | // These are used to generate pretty labels for await expressions in LLVM IR. |
35 | | AwaitKind CurrentAwaitKind = AwaitKind::Init; |
36 | | unsigned AwaitNum = 0; |
37 | | unsigned YieldNum = 0; |
38 | | |
39 | | // How many co_return statements are in the coroutine. Used to decide whether |
40 | | // we need to add co_return; equivalent at the end of the user authored body. |
41 | | unsigned CoreturnCount = 0; |
42 | | |
43 | | // A branch to this block is emitted when coroutine needs to suspend. |
44 | | llvm::BasicBlock *SuspendBB = nullptr; |
45 | | |
46 | | // The promise type's 'unhandled_exception' handler, if it defines one. |
47 | | Stmt *ExceptionHandler = nullptr; |
48 | | |
49 | | // A temporary i1 alloca that stores whether 'await_resume' threw an |
50 | | // exception. If it did, 'true' is stored in this variable, and the coroutine |
51 | | // body must be skipped. If the promise type does not define an exception |
52 | | // handler, this is null. |
53 | | llvm::Value *ResumeEHVar = nullptr; |
54 | | |
55 | | // Stores the jump destination just before the coroutine memory is freed. |
56 | | // This is the destination that every suspend point jumps to for the cleanup |
57 | | // branch. |
58 | | CodeGenFunction::JumpDest CleanupJD; |
59 | | |
60 | | // Stores the jump destination just before the final suspend. The co_return |
61 | | // statements jumps to this point after calling return_xxx promise member. |
62 | | CodeGenFunction::JumpDest FinalJD; |
63 | | |
64 | | // Stores the llvm.coro.id emitted in the function so that we can supply it |
65 | | // as the first argument to coro.begin, coro.alloc and coro.free intrinsics. |
66 | | // Note: llvm.coro.id returns a token that cannot be directly expressed in a |
67 | | // builtin. |
68 | | llvm::CallInst *CoroId = nullptr; |
69 | | |
70 | | // Stores the llvm.coro.begin emitted in the function so that we can replace |
71 | | // all coro.frame intrinsics with direct SSA value of coro.begin that returns |
72 | | // the address of the coroutine frame of the current coroutine. |
73 | | llvm::CallInst *CoroBegin = nullptr; |
74 | | |
75 | | // Stores the last emitted coro.free for the deallocate expressions, we use it |
76 | | // to wrap dealloc code with if(auto mem = coro.free) dealloc(mem). |
77 | | llvm::CallInst *LastCoroFree = nullptr; |
78 | | |
79 | | // If coro.id came from the builtin, remember the expression to give better |
80 | | // diagnostic. If CoroIdExpr is nullptr, the coro.id was created by |
81 | | // EmitCoroutineBody. |
82 | | CallExpr const *CoroIdExpr = nullptr; |
83 | | }; |
84 | | |
85 | | // Defining these here allows to keep CGCoroData private to this file. |
86 | 324k | clang::CodeGen::CodeGenFunction::CGCoroInfo::CGCoroInfo() {} |
87 | 324k | CodeGenFunction::CGCoroInfo::~CGCoroInfo() {} |
88 | | |
89 | | static void createCoroData(CodeGenFunction &CGF, |
90 | | CodeGenFunction::CGCoroInfo &CurCoro, |
91 | | llvm::CallInst *CoroId, |
92 | 113 | CallExpr const *CoroIdExpr = nullptr) { |
93 | 113 | if (CurCoro.Data) { |
94 | 1 | if (CurCoro.Data->CoroIdExpr) |
95 | 1 | CGF.CGM.Error(CoroIdExpr->getBeginLoc(), |
96 | 1 | "only one __builtin_coro_id can be used in a function"); |
97 | 0 | else if (CoroIdExpr) |
98 | 0 | CGF.CGM.Error(CoroIdExpr->getBeginLoc(), |
99 | 0 | "__builtin_coro_id shall not be used in a C++ coroutine"); |
100 | 0 | else |
101 | 0 | llvm_unreachable("EmitCoroutineBodyStatement called twice?"); |
102 | | |
103 | 1 | return; |
104 | 1 | } |
105 | | |
106 | 112 | CurCoro.Data = std::unique_ptr<CGCoroData>(new CGCoroData); |
107 | 112 | CurCoro.Data->CoroId = CoroId; |
108 | 112 | CurCoro.Data->CoroIdExpr = CoroIdExpr; |
109 | 112 | } |
110 | | |
111 | | // Synthesize a pretty name for a suspend point. |
112 | 280 | static SmallString<32> buildSuspendPrefixStr(CGCoroData &Coro, AwaitKind Kind) { |
113 | 280 | unsigned No = 0; |
114 | 280 | switch (Kind) { |
115 | 110 | case AwaitKind::Init: |
116 | 216 | case AwaitKind::Final: |
117 | 216 | break; |
118 | 52 | case AwaitKind::Normal: |
119 | 52 | No = ++Coro.AwaitNum; |
120 | 52 | break; |
121 | 12 | case AwaitKind::Yield: |
122 | 12 | No = ++Coro.YieldNum; |
123 | 12 | break; |
124 | 280 | } |
125 | 280 | SmallString<32> Prefix(AwaitKindStr[static_cast<unsigned>(Kind)]); |
126 | 280 | if (No > 1) { |
127 | 20 | Twine(No).toVector(Prefix); |
128 | 20 | } |
129 | 280 | return Prefix; |
130 | 280 | } |
131 | | |
132 | 23 | static bool memberCallExpressionCanThrow(const Expr *E) { |
133 | 23 | if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E)) |
134 | 23 | if (const auto *Proto = |
135 | 23 | CE->getMethodDecl()->getType()->getAs<FunctionProtoType>()) |
136 | 23 | if (isNoexceptExceptionSpec(Proto->getExceptionSpecType()) && |
137 | 23 | Proto->canThrow() == CT_Cannot22 ) |
138 | 22 | return false; |
139 | 1 | return true; |
140 | 23 | } |
141 | | |
142 | | // Emit suspend expression which roughly looks like: |
143 | | // |
144 | | // auto && x = CommonExpr(); |
145 | | // if (!x.await_ready()) { |
146 | | // llvm_coro_save(); |
147 | | // x.await_suspend(...); (*) |
148 | | // llvm_coro_suspend(); (**) |
149 | | // } |
150 | | // x.await_resume(); |
151 | | // |
152 | | // where the result of the entire expression is the result of x.await_resume() |
153 | | // |
154 | | // (*) If x.await_suspend return type is bool, it allows to veto a suspend: |
155 | | // if (x.await_suspend(...)) |
156 | | // llvm_coro_suspend(); |
157 | | // |
158 | | // (**) llvm_coro_suspend() encodes three possible continuations as |
159 | | // a switch instruction: |
160 | | // |
161 | | // %where-to = call i8 @llvm.coro.suspend(...) |
162 | | // switch i8 %where-to, label %coro.ret [ ; jump to epilogue to suspend |
163 | | // i8 0, label %yield.ready ; go here when resumed |
164 | | // i8 1, label %yield.cleanup ; go here when destroyed |
165 | | // ] |
166 | | // |
167 | | // See llvm's docs/Coroutines.rst for more details. |
168 | | // |
169 | | namespace { |
170 | | struct LValueOrRValue { |
171 | | LValue LV; |
172 | | RValue RV; |
173 | | }; |
174 | | } |
175 | | static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Coro, |
176 | | CoroutineSuspendExpr const &S, |
177 | | AwaitKind Kind, AggValueSlot aggSlot, |
178 | 280 | bool ignoreResult, bool forLValue) { |
179 | 280 | auto *E = S.getCommonExpr(); |
180 | | |
181 | 280 | auto Binder = |
182 | 280 | CodeGenFunction::OpaqueValueMappingData::bind(CGF, S.getOpaqueValue(), E); |
183 | 280 | auto UnbindOnExit = llvm::make_scope_exit([&] { Binder.unbind(CGF); }); |
184 | | |
185 | 280 | auto Prefix = buildSuspendPrefixStr(Coro, Kind); |
186 | 280 | BasicBlock *ReadyBlock = CGF.createBasicBlock(Prefix + Twine(".ready")); |
187 | 280 | BasicBlock *SuspendBlock = CGF.createBasicBlock(Prefix + Twine(".suspend")); |
188 | 280 | BasicBlock *CleanupBlock = CGF.createBasicBlock(Prefix + Twine(".cleanup")); |
189 | | |
190 | | // If expression is ready, no need to suspend. |
191 | 280 | CGF.EmitBranchOnBoolExpr(S.getReadyExpr(), ReadyBlock, SuspendBlock, 0); |
192 | | |
193 | | // Otherwise, emit suspend logic. |
194 | 280 | CGF.EmitBlock(SuspendBlock); |
195 | | |
196 | 280 | auto &Builder = CGF.Builder; |
197 | 280 | llvm::Function *CoroSave = CGF.CGM.getIntrinsic(llvm::Intrinsic::coro_save); |
198 | 280 | auto *NullPtr = llvm::ConstantPointerNull::get(CGF.CGM.Int8PtrTy); |
199 | 280 | auto *SaveCall = Builder.CreateCall(CoroSave, {NullPtr}); |
200 | | |
201 | 280 | CGF.CurCoro.InSuspendBlock = true; |
202 | 280 | auto *SuspendRet = CGF.EmitScalarExpr(S.getSuspendExpr()); |
203 | 280 | CGF.CurCoro.InSuspendBlock = false; |
204 | | |
205 | 280 | if (SuspendRet != nullptr && SuspendRet->getType()->isIntegerTy(1)2 ) { |
206 | | // Veto suspension if requested by bool returning await_suspend. |
207 | 2 | BasicBlock *RealSuspendBlock = |
208 | 2 | CGF.createBasicBlock(Prefix + Twine(".suspend.bool")); |
209 | 2 | CGF.Builder.CreateCondBr(SuspendRet, RealSuspendBlock, ReadyBlock); |
210 | 2 | CGF.EmitBlock(RealSuspendBlock); |
211 | 2 | } |
212 | | |
213 | | // Emit the suspend point. |
214 | 280 | const bool IsFinalSuspend = (Kind == AwaitKind::Final); |
215 | 280 | llvm::Function *CoroSuspend = |
216 | 280 | CGF.CGM.getIntrinsic(llvm::Intrinsic::coro_suspend); |
217 | 280 | auto *SuspendResult = Builder.CreateCall( |
218 | 280 | CoroSuspend, {SaveCall, Builder.getInt1(IsFinalSuspend)}); |
219 | | |
220 | | // Create a switch capturing three possible continuations. |
221 | 280 | auto *Switch = Builder.CreateSwitch(SuspendResult, Coro.SuspendBB, 2); |
222 | 280 | Switch->addCase(Builder.getInt8(0), ReadyBlock); |
223 | 280 | Switch->addCase(Builder.getInt8(1), CleanupBlock); |
224 | | |
225 | | // Emit cleanup for this suspend point. |
226 | 280 | CGF.EmitBlock(CleanupBlock); |
227 | 280 | CGF.EmitBranchThroughCleanup(Coro.CleanupJD); |
228 | | |
229 | | // Emit await_resume expression. |
230 | 280 | CGF.EmitBlock(ReadyBlock); |
231 | | |
232 | | // Exception handling requires additional IR. If the 'await_resume' function |
233 | | // is marked as 'noexcept', we avoid generating this additional IR. |
234 | 280 | CXXTryStmt *TryStmt = nullptr; |
235 | 280 | if (Coro.ExceptionHandler && Kind == AwaitKind::Init53 && |
236 | 280 | memberCallExpressionCanThrow(S.getResumeExpr())23 ) { |
237 | 1 | Coro.ResumeEHVar = |
238 | 1 | CGF.CreateTempAlloca(Builder.getInt1Ty(), Prefix + Twine("resume.eh")); |
239 | 1 | Builder.CreateFlagStore(true, Coro.ResumeEHVar); |
240 | | |
241 | 1 | auto Loc = S.getResumeExpr()->getExprLoc(); |
242 | 1 | auto *Catch = new (CGF.getContext()) |
243 | 1 | CXXCatchStmt(Loc, /*exDecl=*/nullptr, Coro.ExceptionHandler); |
244 | 1 | auto *TryBody = CompoundStmt::Create(CGF.getContext(), S.getResumeExpr(), |
245 | 1 | FPOptionsOverride(), Loc, Loc); |
246 | 1 | TryStmt = CXXTryStmt::Create(CGF.getContext(), Loc, TryBody, Catch); |
247 | 1 | CGF.EnterCXXTryStmt(*TryStmt); |
248 | 1 | } |
249 | | |
250 | 280 | LValueOrRValue Res; |
251 | 280 | if (forLValue) |
252 | 3 | Res.LV = CGF.EmitLValue(S.getResumeExpr()); |
253 | 277 | else |
254 | 277 | Res.RV = CGF.EmitAnyExpr(S.getResumeExpr(), aggSlot, ignoreResult); |
255 | | |
256 | 280 | if (TryStmt) { |
257 | 1 | Builder.CreateFlagStore(false, Coro.ResumeEHVar); |
258 | 1 | CGF.ExitCXXTryStmt(*TryStmt); |
259 | 1 | } |
260 | | |
261 | 280 | return Res; |
262 | 280 | } |
263 | | |
264 | | RValue CodeGenFunction::EmitCoawaitExpr(const CoawaitExpr &E, |
265 | | AggValueSlot aggSlot, |
266 | 266 | bool ignoreResult) { |
267 | 266 | return emitSuspendExpression(*this, *CurCoro.Data, E, |
268 | 266 | CurCoro.Data->CurrentAwaitKind, aggSlot, |
269 | 266 | ignoreResult, /*forLValue*/false).RV; |
270 | 266 | } |
271 | | RValue CodeGenFunction::EmitCoyieldExpr(const CoyieldExpr &E, |
272 | | AggValueSlot aggSlot, |
273 | 11 | bool ignoreResult) { |
274 | 11 | return emitSuspendExpression(*this, *CurCoro.Data, E, AwaitKind::Yield, |
275 | 11 | aggSlot, ignoreResult, /*forLValue*/false).RV; |
276 | 11 | } |
277 | | |
278 | 102 | void CodeGenFunction::EmitCoreturnStmt(CoreturnStmt const &S) { |
279 | 102 | ++CurCoro.Data->CoreturnCount; |
280 | 102 | const Expr *RV = S.getOperand(); |
281 | 102 | if (RV && RV->getType()->isVoidType()28 && !isa<InitListExpr>(RV)2 ) { |
282 | | // Make sure to evaluate the non initlist expression of a co_return |
283 | | // with a void expression for side effects. |
284 | 1 | RunCleanupsScope cleanupScope(*this); |
285 | 1 | EmitIgnoredExpr(RV); |
286 | 1 | } |
287 | 102 | EmitStmt(S.getPromiseCall()); |
288 | 102 | EmitBranchThroughCleanup(CurCoro.Data->FinalJD); |
289 | 102 | } |
290 | | |
291 | | |
292 | | #ifndef NDEBUG |
293 | | static QualType getCoroutineSuspendExprReturnType(const ASTContext &Ctx, |
294 | 3 | const CoroutineSuspendExpr *E) { |
295 | 3 | const auto *RE = E->getResumeExpr(); |
296 | | // Is it possible for RE to be a CXXBindTemporaryExpr wrapping |
297 | | // a MemberCallExpr? |
298 | 3 | assert(isa<CallExpr>(RE) && "unexpected suspend expression type"); |
299 | 3 | return cast<CallExpr>(RE)->getCallReturnType(Ctx); |
300 | 3 | } |
301 | | #endif |
302 | | |
303 | | LValue |
304 | 2 | CodeGenFunction::EmitCoawaitLValue(const CoawaitExpr *E) { |
305 | 2 | assert(getCoroutineSuspendExprReturnType(getContext(), E)->isReferenceType() && |
306 | 2 | "Can't have a scalar return unless the return type is a " |
307 | 2 | "reference type!"); |
308 | 2 | return emitSuspendExpression(*this, *CurCoro.Data, *E, |
309 | 2 | CurCoro.Data->CurrentAwaitKind, AggValueSlot::ignored(), |
310 | 2 | /*ignoreResult*/false, /*forLValue*/true).LV; |
311 | 2 | } |
312 | | |
313 | | LValue |
314 | 1 | CodeGenFunction::EmitCoyieldLValue(const CoyieldExpr *E) { |
315 | 1 | assert(getCoroutineSuspendExprReturnType(getContext(), E)->isReferenceType() && |
316 | 1 | "Can't have a scalar return unless the return type is a " |
317 | 1 | "reference type!"); |
318 | 1 | return emitSuspendExpression(*this, *CurCoro.Data, *E, |
319 | 1 | AwaitKind::Yield, AggValueSlot::ignored(), |
320 | 1 | /*ignoreResult*/false, /*forLValue*/true).LV; |
321 | 1 | } |
322 | | |
323 | | // Hunts for the parameter reference in the parameter copy/move declaration. |
324 | | namespace { |
325 | | struct GetParamRef : public StmtVisitor<GetParamRef> { |
326 | | public: |
327 | | DeclRefExpr *Expr = nullptr; |
328 | 54 | GetParamRef() {} |
329 | 54 | void VisitDeclRefExpr(DeclRefExpr *E) { |
330 | 54 | assert(Expr == nullptr && "multilple declref in param move"); |
331 | 54 | Expr = E; |
332 | 54 | } |
333 | 65 | void VisitStmt(Stmt *S) { |
334 | 65 | for (auto *C : S->children()) { |
335 | 65 | if (C) |
336 | 65 | Visit(C); |
337 | 65 | } |
338 | 65 | } |
339 | | }; |
340 | | } |
341 | | |
342 | | // This class replaces references to parameters to their copies by changing |
343 | | // the addresses in CGF.LocalDeclMap and restoring back the original values in |
344 | | // its destructor. |
345 | | |
346 | | namespace { |
347 | | struct ParamReferenceReplacerRAII { |
348 | | CodeGenFunction::DeclMapTy SavedLocals; |
349 | | CodeGenFunction::DeclMapTy& LocalDeclMap; |
350 | | |
351 | | ParamReferenceReplacerRAII(CodeGenFunction::DeclMapTy &LocalDeclMap) |
352 | 110 | : LocalDeclMap(LocalDeclMap) {} |
353 | | |
354 | 54 | void addCopy(DeclStmt const *PM) { |
355 | | // Figure out what param it refers to. |
356 | | |
357 | 54 | assert(PM->isSingleDecl()); |
358 | 54 | VarDecl const*VD = static_cast<VarDecl const*>(PM->getSingleDecl()); |
359 | 54 | Expr const *InitExpr = VD->getInit(); |
360 | 54 | GetParamRef Visitor; |
361 | 54 | Visitor.Visit(const_cast<Expr*>(InitExpr)); |
362 | 54 | assert(Visitor.Expr); |
363 | 54 | DeclRefExpr *DREOrig = Visitor.Expr; |
364 | 54 | auto *PD = DREOrig->getDecl(); |
365 | | |
366 | 54 | auto it = LocalDeclMap.find(PD); |
367 | 54 | assert(it != LocalDeclMap.end() && "parameter is not found"); |
368 | 54 | SavedLocals.insert({ PD, it->second }); |
369 | | |
370 | 54 | auto copyIt = LocalDeclMap.find(VD); |
371 | 54 | assert(copyIt != LocalDeclMap.end() && "parameter copy is not found"); |
372 | 54 | it->second = copyIt->getSecond(); |
373 | 54 | } |
374 | | |
375 | 110 | ~ParamReferenceReplacerRAII() { |
376 | 110 | for (auto&& SavedLocal : SavedLocals) { |
377 | 54 | LocalDeclMap.insert({SavedLocal.first, SavedLocal.second}); |
378 | 54 | } |
379 | 110 | } |
380 | | }; |
381 | | } |
382 | | |
383 | | // For WinEH exception representation backend needs to know what funclet coro.end |
384 | | // belongs to. That information is passed in a funclet bundle. |
385 | | static SmallVector<llvm::OperandBundleDef, 1> |
386 | 19 | getBundlesForCoroEnd(CodeGenFunction &CGF) { |
387 | 19 | SmallVector<llvm::OperandBundleDef, 1> BundleList; |
388 | | |
389 | 19 | if (llvm::Instruction *EHPad = CGF.CurrentFuncletPad) |
390 | 3 | BundleList.emplace_back("funclet", EHPad); |
391 | | |
392 | 19 | return BundleList; |
393 | 19 | } |
394 | | |
395 | | namespace { |
396 | | // We will insert coro.end to cut any of the destructors for objects that |
397 | | // do not need to be destroyed once the coroutine is resumed. |
398 | | // See llvm/docs/Coroutines.rst for more details about coro.end. |
399 | | struct CallCoroEnd final : public EHScopeStack::Cleanup { |
400 | 19 | void Emit(CodeGenFunction &CGF, Flags flags) override { |
401 | 19 | auto &CGM = CGF.CGM; |
402 | 19 | auto *NullPtr = llvm::ConstantPointerNull::get(CGF.Int8PtrTy); |
403 | 19 | llvm::Function *CoroEndFn = CGM.getIntrinsic(llvm::Intrinsic::coro_end); |
404 | | // See if we have a funclet bundle to associate coro.end with. (WinEH) |
405 | 19 | auto Bundles = getBundlesForCoroEnd(CGF); |
406 | 19 | auto *CoroEnd = |
407 | 19 | CGF.Builder.CreateCall(CoroEndFn, |
408 | 19 | {NullPtr, CGF.Builder.getTrue(), |
409 | 19 | llvm::ConstantTokenNone::get(CoroEndFn->getContext())}, |
410 | 19 | Bundles); |
411 | 19 | if (Bundles.empty()) { |
412 | | // Otherwise, (landingpad model), create a conditional branch that leads |
413 | | // either to a cleanup block or a block with EH resume instruction. |
414 | 16 | auto *ResumeBB = CGF.getEHResumeBlock(/*isCleanup=*/true); |
415 | 16 | auto *CleanupContBB = CGF.createBasicBlock("cleanup.cont"); |
416 | 16 | CGF.Builder.CreateCondBr(CoroEnd, ResumeBB, CleanupContBB); |
417 | 16 | CGF.EmitBlock(CleanupContBB); |
418 | 16 | } |
419 | 19 | } |
420 | | }; |
421 | | } |
422 | | |
423 | | namespace { |
424 | | // Make sure to call coro.delete on scope exit. |
425 | | struct CallCoroDelete final : public EHScopeStack::Cleanup { |
426 | | Stmt *Deallocate; |
427 | | |
428 | | // Emit "if (coro.free(CoroId, CoroBegin)) Deallocate;" |
429 | | |
430 | | // Note: That deallocation will be emitted twice: once for a normal exit and |
431 | | // once for exceptional exit. This usage is safe because Deallocate does not |
432 | | // contain any declarations. The SubStmtBuilder::makeNewAndDeleteExpr() |
433 | | // builds a single call to a deallocation function which is safe to emit |
434 | | // multiple times. |
435 | 134 | void Emit(CodeGenFunction &CGF, Flags) override { |
436 | | // Remember the current point, as we are going to emit deallocation code |
437 | | // first to get to coro.free instruction that is an argument to a delete |
438 | | // call. |
439 | 134 | BasicBlock *SaveInsertBlock = CGF.Builder.GetInsertBlock(); |
440 | | |
441 | 134 | auto *FreeBB = CGF.createBasicBlock("coro.free"); |
442 | 134 | CGF.EmitBlock(FreeBB); |
443 | 134 | CGF.EmitStmt(Deallocate); |
444 | | |
445 | 134 | auto *AfterFreeBB = CGF.createBasicBlock("after.coro.free"); |
446 | 134 | CGF.EmitBlock(AfterFreeBB); |
447 | | |
448 | | // We should have captured coro.free from the emission of deallocate. |
449 | 134 | auto *CoroFree = CGF.CurCoro.Data->LastCoroFree; |
450 | 134 | if (!CoroFree) { |
451 | 0 | CGF.CGM.Error(Deallocate->getBeginLoc(), |
452 | 0 | "Deallocation expressoin does not refer to coro.free"); |
453 | 0 | return; |
454 | 0 | } |
455 | | |
456 | | // Get back to the block we were originally and move coro.free there. |
457 | 134 | auto *InsertPt = SaveInsertBlock->getTerminator(); |
458 | 134 | CoroFree->moveBefore(InsertPt); |
459 | 134 | CGF.Builder.SetInsertPoint(InsertPt); |
460 | | |
461 | | // Add if (auto *mem = coro.free) Deallocate; |
462 | 134 | auto *NullPtr = llvm::ConstantPointerNull::get(CGF.Int8PtrTy); |
463 | 134 | auto *Cond = CGF.Builder.CreateICmpNE(CoroFree, NullPtr); |
464 | 134 | CGF.Builder.CreateCondBr(Cond, FreeBB, AfterFreeBB); |
465 | | |
466 | | // No longer need old terminator. |
467 | 134 | InsertPt->eraseFromParent(); |
468 | 134 | CGF.Builder.SetInsertPoint(AfterFreeBB); |
469 | 134 | } |
470 | 110 | explicit CallCoroDelete(Stmt *DeallocStmt) : Deallocate(DeallocStmt) {} |
471 | | }; |
472 | | } |
473 | | |
474 | | namespace { |
475 | | struct GetReturnObjectManager { |
476 | | CodeGenFunction &CGF; |
477 | | CGBuilderTy &Builder; |
478 | | const CoroutineBodyStmt &S; |
479 | | // When true, performs RVO for the return object. |
480 | | bool DirectEmit = false; |
481 | | |
482 | | Address GroActiveFlag; |
483 | | CodeGenFunction::AutoVarEmission GroEmission; |
484 | | |
485 | | GetReturnObjectManager(CodeGenFunction &CGF, const CoroutineBodyStmt &S) |
486 | 110 | : CGF(CGF), Builder(CGF.Builder), S(S), GroActiveFlag(Address::invalid()), |
487 | 110 | GroEmission(CodeGenFunction::AutoVarEmission::invalid()) { |
488 | | // The call to get_return_object is sequenced before the call to |
489 | | // initial_suspend and is invoked at most once, but there are caveats |
490 | | // regarding on whether the prvalue result object may be initialized |
491 | | // directly/eager or delayed, depending on the types involved. |
492 | | // |
493 | | // More info at https://github.com/cplusplus/papers/issues/1414 |
494 | | // |
495 | | // The general cases: |
496 | | // 1. Same type of get_return_object and coroutine return type (direct |
497 | | // emission): |
498 | | // - Constructed in the return slot. |
499 | | // 2. Different types (delayed emission): |
500 | | // - Constructed temporary object prior to initial suspend initialized with |
501 | | // a call to get_return_object() |
502 | | // - When coroutine needs to to return to the caller and needs to construct |
503 | | // return value for the coroutine it is initialized with expiring value of |
504 | | // the temporary obtained above. |
505 | | // |
506 | | // Direct emission for void returning coroutines or GROs. |
507 | 110 | DirectEmit = [&]() { |
508 | 110 | auto *RVI = S.getReturnValueInit(); |
509 | 110 | assert(RVI && "expected RVI"); |
510 | 110 | auto GroType = RVI->getType(); |
511 | 110 | return CGF.getContext().hasSameType(GroType, CGF.FnRetTy); |
512 | 110 | }(); |
513 | 110 | } |
514 | | |
515 | | // The gro variable has to outlive coroutine frame and coroutine promise, but, |
516 | | // it can only be initialized after coroutine promise was created, thus, we |
517 | | // split its emission in two parts. EmitGroAlloca emits an alloca and sets up |
518 | | // cleanups. Later when coroutine promise is available we initialize the gro |
519 | | // and sets the flag that the cleanup is now active. |
520 | 110 | void EmitGroAlloca() { |
521 | 110 | if (DirectEmit) |
522 | 105 | return; |
523 | | |
524 | 5 | auto *GroDeclStmt = dyn_cast_or_null<DeclStmt>(S.getResultDecl()); |
525 | 5 | if (!GroDeclStmt) { |
526 | | // If get_return_object returns void, no need to do an alloca. |
527 | 0 | return; |
528 | 0 | } |
529 | | |
530 | 5 | auto *GroVarDecl = cast<VarDecl>(GroDeclStmt->getSingleDecl()); |
531 | | |
532 | | // Set GRO flag that it is not initialized yet |
533 | 5 | GroActiveFlag = CGF.CreateTempAlloca(Builder.getInt1Ty(), CharUnits::One(), |
534 | 5 | "gro.active"); |
535 | 5 | Builder.CreateStore(Builder.getFalse(), GroActiveFlag); |
536 | | |
537 | 5 | GroEmission = CGF.EmitAutoVarAlloca(*GroVarDecl); |
538 | 5 | auto *GroAlloca = dyn_cast_or_null<llvm::AllocaInst>( |
539 | 5 | GroEmission.getOriginalAllocatedAddress().getPointer()); |
540 | 5 | assert(GroAlloca && "expected alloca to be emitted"); |
541 | 5 | GroAlloca->setMetadata(llvm::LLVMContext::MD_coro_outside_frame, |
542 | 5 | llvm::MDNode::get(CGF.CGM.getLLVMContext(), {})); |
543 | | |
544 | | // Remember the top of EHStack before emitting the cleanup. |
545 | 5 | auto old_top = CGF.EHStack.stable_begin(); |
546 | 5 | CGF.EmitAutoVarCleanups(GroEmission); |
547 | 5 | auto top = CGF.EHStack.stable_begin(); |
548 | | |
549 | | // Make the cleanup conditional on gro.active |
550 | 6 | for (auto b = CGF.EHStack.find(top), e = CGF.EHStack.find(old_top); b != e; |
551 | 5 | b++1 ) { |
552 | 1 | if (auto *Cleanup = dyn_cast<EHCleanupScope>(&*b)) { |
553 | 1 | assert(!Cleanup->hasActiveFlag() && "cleanup already has active flag?"); |
554 | 1 | Cleanup->setActiveFlag(GroActiveFlag); |
555 | 1 | Cleanup->setTestFlagInEHCleanup(); |
556 | 1 | Cleanup->setTestFlagInNormalCleanup(); |
557 | 1 | } |
558 | 1 | } |
559 | 5 | } |
560 | | |
561 | 110 | void EmitGroInit() { |
562 | 110 | if (DirectEmit) { |
563 | | // ReturnValue should be valid as long as the coroutine's return type |
564 | | // is not void. The assertion could help us to reduce the check later. |
565 | 105 | assert(CGF.ReturnValue.isValid() == (bool)S.getReturnStmt()); |
566 | | // Now we have the promise, initialize the GRO. |
567 | | // We need to emit `get_return_object` first. According to: |
568 | | // [dcl.fct.def.coroutine]p7 |
569 | | // The call to get_return_object is sequenced before the call to |
570 | | // initial_suspend and is invoked at most once. |
571 | | // |
572 | | // So we couldn't emit return value when we emit return statment, |
573 | | // otherwise the call to get_return_object wouldn't be in front |
574 | | // of initial_suspend. |
575 | 105 | if (CGF.ReturnValue.isValid()) { |
576 | 71 | CGF.EmitAnyExprToMem(S.getReturnValue(), CGF.ReturnValue, |
577 | 71 | S.getReturnValue()->getType().getQualifiers(), |
578 | 71 | /*IsInit*/ true); |
579 | 71 | } |
580 | 105 | return; |
581 | 105 | } |
582 | | |
583 | 5 | if (!GroActiveFlag.isValid()) { |
584 | | // No Gro variable was allocated. Simply emit the call to |
585 | | // get_return_object. |
586 | 0 | CGF.EmitStmt(S.getResultDecl()); |
587 | 0 | return; |
588 | 0 | } |
589 | | |
590 | 5 | CGF.EmitAutoVarInit(GroEmission); |
591 | 5 | Builder.CreateStore(Builder.getTrue(), GroActiveFlag); |
592 | 5 | } |
593 | | }; |
594 | | } // namespace |
595 | | |
596 | | static void emitBodyAndFallthrough(CodeGenFunction &CGF, |
597 | 110 | const CoroutineBodyStmt &S, Stmt *Body) { |
598 | 110 | CGF.EmitStmt(Body); |
599 | 110 | const bool CanFallthrough = CGF.Builder.GetInsertBlock(); |
600 | 110 | if (CanFallthrough) |
601 | 32 | if (Stmt *OnFallthrough = S.getFallthroughHandler()) |
602 | 32 | CGF.EmitStmt(OnFallthrough); |
603 | 110 | } |
604 | | |
605 | 110 | void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) { |
606 | 110 | auto *NullPtr = llvm::ConstantPointerNull::get(Builder.getPtrTy()); |
607 | 110 | auto &TI = CGM.getContext().getTargetInfo(); |
608 | 110 | unsigned NewAlign = TI.getNewAlign() / TI.getCharWidth(); |
609 | | |
610 | 110 | auto *EntryBB = Builder.GetInsertBlock(); |
611 | 110 | auto *AllocBB = createBasicBlock("coro.alloc"); |
612 | 110 | auto *InitBB = createBasicBlock("coro.init"); |
613 | 110 | auto *FinalBB = createBasicBlock("coro.final"); |
614 | 110 | auto *RetBB = createBasicBlock("coro.ret"); |
615 | | |
616 | 110 | auto *CoroId = Builder.CreateCall( |
617 | 110 | CGM.getIntrinsic(llvm::Intrinsic::coro_id), |
618 | 110 | {Builder.getInt32(NewAlign), NullPtr, NullPtr, NullPtr}); |
619 | 110 | createCoroData(*this, CurCoro, CoroId); |
620 | 110 | CurCoro.Data->SuspendBB = RetBB; |
621 | 110 | assert(ShouldEmitLifetimeMarkers && |
622 | 110 | "Must emit lifetime intrinsics for coroutines"); |
623 | | |
624 | | // Backend is allowed to elide memory allocations, to help it, emit |
625 | | // auto mem = coro.alloc() ? 0 : ... allocation code ...; |
626 | 110 | auto *CoroAlloc = Builder.CreateCall( |
627 | 110 | CGM.getIntrinsic(llvm::Intrinsic::coro_alloc), {CoroId}); |
628 | | |
629 | 110 | Builder.CreateCondBr(CoroAlloc, AllocBB, InitBB); |
630 | | |
631 | 110 | EmitBlock(AllocBB); |
632 | 110 | auto *AllocateCall = EmitScalarExpr(S.getAllocate()); |
633 | 110 | auto *AllocOrInvokeContBB = Builder.GetInsertBlock(); |
634 | | |
635 | | // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided. |
636 | 110 | if (auto *RetOnAllocFailure = S.getReturnStmtOnAllocFailure()) { |
637 | 3 | auto *RetOnFailureBB = createBasicBlock("coro.ret.on.failure"); |
638 | | |
639 | | // See if allocation was successful. |
640 | 3 | auto *NullPtr = llvm::ConstantPointerNull::get(Int8PtrTy); |
641 | 3 | auto *Cond = Builder.CreateICmpNE(AllocateCall, NullPtr); |
642 | | // Expect the allocation to be successful. |
643 | 3 | emitCondLikelihoodViaExpectIntrinsic(Cond, Stmt::LH_Likely); |
644 | 3 | Builder.CreateCondBr(Cond, InitBB, RetOnFailureBB); |
645 | | |
646 | | // If not, return OnAllocFailure object. |
647 | 3 | EmitBlock(RetOnFailureBB); |
648 | 3 | EmitStmt(RetOnAllocFailure); |
649 | 3 | } |
650 | 107 | else { |
651 | 107 | Builder.CreateBr(InitBB); |
652 | 107 | } |
653 | | |
654 | 110 | EmitBlock(InitBB); |
655 | | |
656 | | // Pass the result of the allocation to coro.begin. |
657 | 110 | auto *Phi = Builder.CreatePHI(VoidPtrTy, 2); |
658 | 110 | Phi->addIncoming(NullPtr, EntryBB); |
659 | 110 | Phi->addIncoming(AllocateCall, AllocOrInvokeContBB); |
660 | 110 | auto *CoroBegin = Builder.CreateCall( |
661 | 110 | CGM.getIntrinsic(llvm::Intrinsic::coro_begin), {CoroId, Phi}); |
662 | 110 | CurCoro.Data->CoroBegin = CoroBegin; |
663 | | |
664 | 110 | GetReturnObjectManager GroManager(*this, S); |
665 | 110 | GroManager.EmitGroAlloca(); |
666 | | |
667 | 110 | CurCoro.Data->CleanupJD = getJumpDestInCurrentScope(RetBB); |
668 | 110 | { |
669 | 110 | CGDebugInfo *DI = getDebugInfo(); |
670 | 110 | ParamReferenceReplacerRAII ParamReplacer(LocalDeclMap); |
671 | 110 | CodeGenFunction::RunCleanupsScope ResumeScope(*this); |
672 | 110 | EHStack.pushCleanup<CallCoroDelete>(NormalAndEHCleanup, S.getDeallocate()); |
673 | | |
674 | | // Create mapping between parameters and copy-params for coroutine function. |
675 | 110 | llvm::ArrayRef<const Stmt *> ParamMoves = S.getParamMoves(); |
676 | 110 | assert( |
677 | 110 | (ParamMoves.size() == 0 || (ParamMoves.size() == FnArgs.size())) && |
678 | 110 | "ParamMoves and FnArgs should be the same size for coroutine function"); |
679 | 110 | if (ParamMoves.size() == FnArgs.size() && DI) |
680 | 3 | for (const auto Pair : llvm::zip(FnArgs, ParamMoves)) |
681 | 3 | DI->getCoroutineParameterMappings().insert( |
682 | 3 | {std::get<0>(Pair), std::get<1>(Pair)}); |
683 | | |
684 | | // Create parameter copies. We do it before creating a promise, since an |
685 | | // evolution of coroutine TS may allow promise constructor to observe |
686 | | // parameter copies. |
687 | 110 | for (auto *PM : S.getParamMoves()) { |
688 | 54 | EmitStmt(PM); |
689 | 54 | ParamReplacer.addCopy(cast<DeclStmt>(PM)); |
690 | | // TODO: if(CoroParam(...)) need to surround ctor and dtor |
691 | | // for the copy, so that llvm can elide it if the copy is |
692 | | // not needed. |
693 | 54 | } |
694 | | |
695 | 110 | EmitStmt(S.getPromiseDeclStmt()); |
696 | | |
697 | 110 | Address PromiseAddr = GetAddrOfLocalVar(S.getPromiseDecl()); |
698 | 110 | auto *PromiseAddrVoidPtr = |
699 | 110 | new llvm::BitCastInst(PromiseAddr.getPointer(), VoidPtrTy, "", CoroId); |
700 | | // Update CoroId to refer to the promise. We could not do it earlier because |
701 | | // promise local variable was not emitted yet. |
702 | 110 | CoroId->setArgOperand(1, PromiseAddrVoidPtr); |
703 | | |
704 | | // Now we have the promise, initialize the GRO |
705 | 110 | GroManager.EmitGroInit(); |
706 | | |
707 | 110 | EHStack.pushCleanup<CallCoroEnd>(EHCleanup); |
708 | | |
709 | 110 | CurCoro.Data->CurrentAwaitKind = AwaitKind::Init; |
710 | 110 | CurCoro.Data->ExceptionHandler = S.getExceptionHandler(); |
711 | 110 | EmitStmt(S.getInitSuspendStmt()); |
712 | 110 | CurCoro.Data->FinalJD = getJumpDestInCurrentScope(FinalBB); |
713 | | |
714 | 110 | CurCoro.Data->CurrentAwaitKind = AwaitKind::Normal; |
715 | | |
716 | 110 | if (CurCoro.Data->ExceptionHandler) { |
717 | | // If we generated IR to record whether an exception was thrown from |
718 | | // 'await_resume', then use that IR to determine whether the coroutine |
719 | | // body should be skipped. |
720 | | // If we didn't generate the IR (perhaps because 'await_resume' was marked |
721 | | // as 'noexcept'), then we skip this check. |
722 | 23 | BasicBlock *ContBB = nullptr; |
723 | 23 | if (CurCoro.Data->ResumeEHVar) { |
724 | 1 | BasicBlock *BodyBB = createBasicBlock("coro.resumed.body"); |
725 | 1 | ContBB = createBasicBlock("coro.resumed.cont"); |
726 | 1 | Value *SkipBody = Builder.CreateFlagLoad(CurCoro.Data->ResumeEHVar, |
727 | 1 | "coro.resumed.eh"); |
728 | 1 | Builder.CreateCondBr(SkipBody, ContBB, BodyBB); |
729 | 1 | EmitBlock(BodyBB); |
730 | 1 | } |
731 | | |
732 | 23 | auto Loc = S.getBeginLoc(); |
733 | 23 | CXXCatchStmt Catch(Loc, /*exDecl=*/nullptr, |
734 | 23 | CurCoro.Data->ExceptionHandler); |
735 | 23 | auto *TryStmt = |
736 | 23 | CXXTryStmt::Create(getContext(), Loc, S.getBody(), &Catch); |
737 | | |
738 | 23 | EnterCXXTryStmt(*TryStmt); |
739 | 23 | emitBodyAndFallthrough(*this, S, TryStmt->getTryBlock()); |
740 | 23 | ExitCXXTryStmt(*TryStmt); |
741 | | |
742 | 23 | if (ContBB) |
743 | 1 | EmitBlock(ContBB); |
744 | 23 | } |
745 | 87 | else { |
746 | 87 | emitBodyAndFallthrough(*this, S, S.getBody()); |
747 | 87 | } |
748 | | |
749 | | // See if we need to generate final suspend. |
750 | 110 | const bool CanFallthrough = Builder.GetInsertBlock(); |
751 | 110 | const bool HasCoreturns = CurCoro.Data->CoreturnCount > 0; |
752 | 110 | if (CanFallthrough || HasCoreturns88 ) { |
753 | 106 | EmitBlock(FinalBB); |
754 | 106 | CurCoro.Data->CurrentAwaitKind = AwaitKind::Final; |
755 | 106 | EmitStmt(S.getFinalSuspendStmt()); |
756 | 106 | } else { |
757 | | // We don't need FinalBB. Emit it to make sure the block is deleted. |
758 | 4 | EmitBlock(FinalBB, /*IsFinished=*/true); |
759 | 4 | } |
760 | 110 | } |
761 | | |
762 | 0 | EmitBlock(RetBB); |
763 | | // Emit coro.end before getReturnStmt (and parameter destructors), since |
764 | | // resume and destroy parts of the coroutine should not include them. |
765 | 110 | llvm::Function *CoroEnd = CGM.getIntrinsic(llvm::Intrinsic::coro_end); |
766 | 110 | Builder.CreateCall(CoroEnd, |
767 | 110 | {NullPtr, Builder.getFalse(), |
768 | 110 | llvm::ConstantTokenNone::get(CoroEnd->getContext())}); |
769 | | |
770 | 110 | if (Stmt *Ret = S.getReturnStmt()) { |
771 | | // Since we already emitted the return value above, so we shouldn't |
772 | | // emit it again here. |
773 | 76 | if (GroManager.DirectEmit) |
774 | 71 | cast<ReturnStmt>(Ret)->setRetValue(nullptr); |
775 | 76 | EmitStmt(Ret); |
776 | 76 | } |
777 | | |
778 | | // LLVM require the frontend to mark the coroutine. |
779 | 110 | CurFn->setPresplitCoroutine(); |
780 | | |
781 | 110 | if (CXXRecordDecl *RD = FnRetTy->getAsCXXRecordDecl(); |
782 | 110 | RD && RD->hasAttr<CoroOnlyDestroyWhenCompleteAttr>()72 ) |
783 | 2 | CurFn->setCoroDestroyOnlyWhenComplete(); |
784 | 110 | } |
785 | | |
786 | | // Emit coroutine intrinsic and patch up arguments of the token type. |
787 | | RValue CodeGenFunction::EmitCoroutineIntrinsic(const CallExpr *E, |
788 | 788 | unsigned int IID) { |
789 | 788 | SmallVector<llvm::Value *, 8> Args; |
790 | 788 | switch (IID) { |
791 | 89 | default: |
792 | 89 | break; |
793 | | // The coro.frame builtin is replaced with an SSA value of the coro.begin |
794 | | // intrinsic. |
795 | 420 | case llvm::Intrinsic::coro_frame: { |
796 | 420 | if (CurCoro.Data && CurCoro.Data->CoroBegin) { |
797 | 420 | return RValue::get(CurCoro.Data->CoroBegin); |
798 | 420 | } |
799 | 0 | CGM.Error(E->getBeginLoc(), "this builtin expect that __builtin_coro_begin " |
800 | 0 | "has been used earlier in this function"); |
801 | 0 | auto *NullPtr = llvm::ConstantPointerNull::get(Builder.getPtrTy()); |
802 | 0 | return RValue::get(NullPtr); |
803 | 420 | } |
804 | 117 | case llvm::Intrinsic::coro_size: { |
805 | 117 | auto &Context = getContext(); |
806 | 117 | CanQualType SizeTy = Context.getSizeType(); |
807 | 117 | llvm::IntegerType *T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); |
808 | 117 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::coro_size, T); |
809 | 117 | return RValue::get(Builder.CreateCall(F)); |
810 | 420 | } |
811 | 21 | case llvm::Intrinsic::coro_align: { |
812 | 21 | auto &Context = getContext(); |
813 | 21 | CanQualType SizeTy = Context.getSizeType(); |
814 | 21 | llvm::IntegerType *T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); |
815 | 21 | llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::coro_align, T); |
816 | 21 | return RValue::get(Builder.CreateCall(F)); |
817 | 420 | } |
818 | | // The following three intrinsics take a token parameter referring to a token |
819 | | // returned by earlier call to @llvm.coro.id. Since we cannot represent it in |
820 | | // builtins, we patch it up here. |
821 | 2 | case llvm::Intrinsic::coro_alloc: |
822 | 4 | case llvm::Intrinsic::coro_begin: |
823 | 140 | case llvm::Intrinsic::coro_free: { |
824 | 140 | if (CurCoro.Data && CurCoro.Data->CoroId137 ) { |
825 | 137 | Args.push_back(CurCoro.Data->CoroId); |
826 | 137 | break; |
827 | 137 | } |
828 | 3 | CGM.Error(E->getBeginLoc(), "this builtin expect that __builtin_coro_id has" |
829 | 3 | " been used earlier in this function"); |
830 | | // Fallthrough to the next case to add TokenNone as the first argument. |
831 | 3 | [[fallthrough]]; |
832 | 3 | } |
833 | | // @llvm.coro.suspend takes a token parameter. Add token 'none' as the first |
834 | | // argument. |
835 | 4 | case llvm::Intrinsic::coro_suspend: |
836 | 4 | Args.push_back(llvm::ConstantTokenNone::get(getLLVMContext())); |
837 | 4 | break; |
838 | 788 | } |
839 | 230 | for (const Expr *Arg : E->arguments()) |
840 | 290 | Args.push_back(EmitScalarExpr(Arg)); |
841 | | // @llvm.coro.end takes a token parameter. Add token 'none' as the last |
842 | | // argument. |
843 | 230 | if (IID == llvm::Intrinsic::coro_end) |
844 | 1 | Args.push_back(llvm::ConstantTokenNone::get(getLLVMContext())); |
845 | | |
846 | 230 | llvm::Function *F = CGM.getIntrinsic(IID); |
847 | 230 | llvm::CallInst *Call = Builder.CreateCall(F, Args); |
848 | | |
849 | | // Note: The following code is to enable to emit coro.id and coro.begin by |
850 | | // hand to experiment with coroutines in C. |
851 | | // If we see @llvm.coro.id remember it in the CoroData. We will update |
852 | | // coro.alloc, coro.begin and coro.free intrinsics to refer to it. |
853 | 230 | if (IID == llvm::Intrinsic::coro_id) { |
854 | 3 | createCoroData(*this, CurCoro, Call, E); |
855 | 3 | } |
856 | 227 | else if (IID == llvm::Intrinsic::coro_begin) { |
857 | 2 | if (CurCoro.Data) |
858 | 1 | CurCoro.Data->CoroBegin = Call; |
859 | 2 | } |
860 | 225 | else if (IID == llvm::Intrinsic::coro_free) { |
861 | | // Remember the last coro_free as we need it to build the conditional |
862 | | // deletion of the coroutine frame. |
863 | 136 | if (CurCoro.Data) |
864 | 135 | CurCoro.Data->LastCoroFree = Call; |
865 | 136 | } |
866 | 230 | return RValue::get(Call); |
867 | 788 | } |