1 //===----- CGCoroutine.cpp - Emit LLVM Code for C++ coroutines ------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code dealing with C++ code generation of coroutines.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCleanup.h"
15 #include "CodeGenFunction.h"
16 #include "llvm/ADT/ScopeExit.h"
17 #include "clang/AST/StmtCXX.h"
18 #include "clang/AST/StmtVisitor.h"
19 
20 using namespace clang;
21 using namespace CodeGen;
22 
23 using llvm::Value;
24 using llvm::BasicBlock;
25 
26 namespace {
27 enum class AwaitKind { Init, Normal, Yield, Final };
28 static constexpr llvm::StringLiteral AwaitKindStr[] = {"init", "await", "yield",
29                                                        "final"};
30 }
31 
32 struct clang::CodeGen::CGCoroData {
33   // What is the current await expression kind and how many
34   // await/yield expressions were encountered so far.
35   // These are used to generate pretty labels for await expressions in LLVM IR.
36   AwaitKind CurrentAwaitKind = AwaitKind::Init;
37   unsigned AwaitNum = 0;
38   unsigned YieldNum = 0;
39 
40   // How many co_return statements are in the coroutine. Used to decide whether
41   // we need to add co_return; equivalent at the end of the user authored body.
42   unsigned CoreturnCount = 0;
43 
44   // A branch to this block is emitted when coroutine needs to suspend.
45   llvm::BasicBlock *SuspendBB = nullptr;
46 
47   // Stores the jump destination just before the coroutine memory is freed.
48   // This is the destination that every suspend point jumps to for the cleanup
49   // branch.
50   CodeGenFunction::JumpDest CleanupJD;
51 
52   // Stores the jump destination just before the final suspend. The co_return
53   // statements jumps to this point after calling return_xxx promise member.
54   CodeGenFunction::JumpDest FinalJD;
55 
56   // Stores the llvm.coro.id emitted in the function so that we can supply it
57   // as the first argument to coro.begin, coro.alloc and coro.free intrinsics.
58   // Note: llvm.coro.id returns a token that cannot be directly expressed in a
59   // builtin.
60   llvm::CallInst *CoroId = nullptr;
61 
62   // Stores the llvm.coro.begin emitted in the function so that we can replace
63   // all coro.frame intrinsics with direct SSA value of coro.begin that returns
64   // the address of the coroutine frame of the current coroutine.
65   llvm::CallInst *CoroBegin = nullptr;
66 
67   // Stores the last emitted coro.free for the deallocate expressions, we use it
68   // to wrap dealloc code with if(auto mem = coro.free) dealloc(mem).
69   llvm::CallInst *LastCoroFree = nullptr;
70 
71   // If coro.id came from the builtin, remember the expression to give better
72   // diagnostic. If CoroIdExpr is nullptr, the coro.id was created by
73   // EmitCoroutineBody.
74   CallExpr const *CoroIdExpr = nullptr;
75 };
76 
77 // Defining these here allows to keep CGCoroData private to this file.
78 clang::CodeGen::CodeGenFunction::CGCoroInfo::CGCoroInfo() {}
79 CodeGenFunction::CGCoroInfo::~CGCoroInfo() {}
80 
81 static void createCoroData(CodeGenFunction &CGF,
82                            CodeGenFunction::CGCoroInfo &CurCoro,
83                            llvm::CallInst *CoroId,
84                            CallExpr const *CoroIdExpr = nullptr) {
85   if (CurCoro.Data) {
86     if (CurCoro.Data->CoroIdExpr)
87       CGF.CGM.Error(CoroIdExpr->getLocStart(),
88                     "only one __builtin_coro_id can be used in a function");
89     else if (CoroIdExpr)
90       CGF.CGM.Error(CoroIdExpr->getLocStart(),
91                     "__builtin_coro_id shall not be used in a C++ coroutine");
92     else
93       llvm_unreachable("EmitCoroutineBodyStatement called twice?");
94 
95     return;
96   }
97 
98   CurCoro.Data = std::unique_ptr<CGCoroData>(new CGCoroData);
99   CurCoro.Data->CoroId = CoroId;
100   CurCoro.Data->CoroIdExpr = CoroIdExpr;
101 }
102 
103 // Synthesize a pretty name for a suspend point.
104 static SmallString<32> buildSuspendPrefixStr(CGCoroData &Coro, AwaitKind Kind) {
105   unsigned No = 0;
106   switch (Kind) {
107   case AwaitKind::Init:
108   case AwaitKind::Final:
109     break;
110   case AwaitKind::Normal:
111     No = ++Coro.AwaitNum;
112     break;
113   case AwaitKind::Yield:
114     No = ++Coro.YieldNum;
115     break;
116   }
117   SmallString<32> Prefix(AwaitKindStr[static_cast<unsigned>(Kind)]);
118   if (No > 1) {
119     Twine(No).toVector(Prefix);
120   }
121   return Prefix;
122 }
123 
124 // Emit suspend expression which roughly looks like:
125 //
126 //   auto && x = CommonExpr();
127 //   if (!x.await_ready()) {
128 //      llvm_coro_save();
129 //      x.await_suspend(...);     (*)
130 //      llvm_coro_suspend(); (**)
131 //   }
132 //   x.await_resume();
133 //
134 // where the result of the entire expression is the result of x.await_resume()
135 //
136 //   (*) If x.await_suspend return type is bool, it allows to veto a suspend:
137 //      if (x.await_suspend(...))
138 //        llvm_coro_suspend();
139 //
140 //  (**) llvm_coro_suspend() encodes three possible continuations as
141 //       a switch instruction:
142 //
143 //  %where-to = call i8 @llvm.coro.suspend(...)
144 //  switch i8 %where-to, label %coro.ret [ ; jump to epilogue to suspend
145 //    i8 0, label %yield.ready   ; go here when resumed
146 //    i8 1, label %yield.cleanup ; go here when destroyed
147 //  ]
148 //
149 //  See llvm's docs/Coroutines.rst for more details.
150 //
151 namespace {
152   struct LValueOrRValue {
153     LValue LV;
154     RValue RV;
155   };
156 }
157 static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Coro,
158                                     CoroutineSuspendExpr const &S,
159                                     AwaitKind Kind, AggValueSlot aggSlot,
160                                     bool ignoreResult, bool forLValue) {
161   auto *E = S.getCommonExpr();
162 
163   auto Binder =
164       CodeGenFunction::OpaqueValueMappingData::bind(CGF, S.getOpaqueValue(), E);
165   auto UnbindOnExit = llvm::make_scope_exit([&] { Binder.unbind(CGF); });
166 
167   auto Prefix = buildSuspendPrefixStr(Coro, Kind);
168   BasicBlock *ReadyBlock = CGF.createBasicBlock(Prefix + Twine(".ready"));
169   BasicBlock *SuspendBlock = CGF.createBasicBlock(Prefix + Twine(".suspend"));
170   BasicBlock *CleanupBlock = CGF.createBasicBlock(Prefix + Twine(".cleanup"));
171 
172   // If expression is ready, no need to suspend.
173   CGF.EmitBranchOnBoolExpr(S.getReadyExpr(), ReadyBlock, SuspendBlock, 0);
174 
175   // Otherwise, emit suspend logic.
176   CGF.EmitBlock(SuspendBlock);
177 
178   auto &Builder = CGF.Builder;
179   llvm::Function *CoroSave = CGF.CGM.getIntrinsic(llvm::Intrinsic::coro_save);
180   auto *NullPtr = llvm::ConstantPointerNull::get(CGF.CGM.Int8PtrTy);
181   auto *SaveCall = Builder.CreateCall(CoroSave, {NullPtr});
182 
183   auto *SuspendRet = CGF.EmitScalarExpr(S.getSuspendExpr());
184   if (SuspendRet != nullptr) {
185     // Veto suspension if requested by bool returning await_suspend.
186     assert(SuspendRet->getType()->isIntegerTy(1) &&
187            "Sema should have already checked that it is void or bool");
188     BasicBlock *RealSuspendBlock =
189         CGF.createBasicBlock(Prefix + Twine(".suspend.bool"));
190     CGF.Builder.CreateCondBr(SuspendRet, RealSuspendBlock, ReadyBlock);
191     SuspendBlock = RealSuspendBlock;
192     CGF.EmitBlock(RealSuspendBlock);
193   }
194 
195   // Emit the suspend point.
196   const bool IsFinalSuspend = (Kind == AwaitKind::Final);
197   llvm::Function *CoroSuspend =
198       CGF.CGM.getIntrinsic(llvm::Intrinsic::coro_suspend);
199   auto *SuspendResult = Builder.CreateCall(
200       CoroSuspend, {SaveCall, Builder.getInt1(IsFinalSuspend)});
201 
202   // Create a switch capturing three possible continuations.
203   auto *Switch = Builder.CreateSwitch(SuspendResult, Coro.SuspendBB, 2);
204   Switch->addCase(Builder.getInt8(0), ReadyBlock);
205   Switch->addCase(Builder.getInt8(1), CleanupBlock);
206 
207   // Emit cleanup for this suspend point.
208   CGF.EmitBlock(CleanupBlock);
209   CGF.EmitBranchThroughCleanup(Coro.CleanupJD);
210 
211   // Emit await_resume expression.
212   CGF.EmitBlock(ReadyBlock);
213   LValueOrRValue Res;
214   if (forLValue)
215     Res.LV = CGF.EmitLValue(S.getResumeExpr());
216   else
217     Res.RV = CGF.EmitAnyExpr(S.getResumeExpr(), aggSlot, ignoreResult);
218   return Res;
219 }
220 
221 RValue CodeGenFunction::EmitCoawaitExpr(const CoawaitExpr &E,
222                                         AggValueSlot aggSlot,
223                                         bool ignoreResult) {
224   return emitSuspendExpression(*this, *CurCoro.Data, E,
225                                CurCoro.Data->CurrentAwaitKind, aggSlot,
226                                ignoreResult, /*forLValue*/false).RV;
227 }
228 RValue CodeGenFunction::EmitCoyieldExpr(const CoyieldExpr &E,
229                                         AggValueSlot aggSlot,
230                                         bool ignoreResult) {
231   return emitSuspendExpression(*this, *CurCoro.Data, E, AwaitKind::Yield,
232                                aggSlot, ignoreResult, /*forLValue*/false).RV;
233 }
234 
235 void CodeGenFunction::EmitCoreturnStmt(CoreturnStmt const &S) {
236   ++CurCoro.Data->CoreturnCount;
237   const Expr *RV = S.getOperand();
238   if (RV && RV->getType()->isVoidType()) {
239     // Make sure to evaluate the expression of a co_return with a void
240     // expression for side effects.
241     RunCleanupsScope cleanupScope(*this);
242     EmitIgnoredExpr(RV);
243   }
244   EmitStmt(S.getPromiseCall());
245   EmitBranchThroughCleanup(CurCoro.Data->FinalJD);
246 }
247 
248 
249 #ifndef NDEBUG
250 static QualType getCoroutineSuspendExprReturnType(const ASTContext &Ctx,
251   const CoroutineSuspendExpr *E) {
252   const auto *RE = E->getResumeExpr();
253   // Is it possible for RE to be a CXXBindTemporaryExpr wrapping
254   // a MemberCallExpr?
255   assert(isa<CallExpr>(RE) && "unexpected suspend expression type");
256   return cast<CallExpr>(RE)->getCallReturnType(Ctx);
257 }
258 #endif
259 
260 LValue
261 CodeGenFunction::EmitCoawaitLValue(const CoawaitExpr *E) {
262   assert(getCoroutineSuspendExprReturnType(getContext(), E)->isReferenceType() &&
263          "Can't have a scalar return unless the return type is a "
264          "reference type!");
265   return emitSuspendExpression(*this, *CurCoro.Data, *E,
266                                CurCoro.Data->CurrentAwaitKind, AggValueSlot::ignored(),
267                                /*ignoreResult*/false, /*forLValue*/true).LV;
268 }
269 
270 LValue
271 CodeGenFunction::EmitCoyieldLValue(const CoyieldExpr *E) {
272   assert(getCoroutineSuspendExprReturnType(getContext(), E)->isReferenceType() &&
273          "Can't have a scalar return unless the return type is a "
274          "reference type!");
275   return emitSuspendExpression(*this, *CurCoro.Data, *E,
276                                AwaitKind::Yield, AggValueSlot::ignored(),
277                                /*ignoreResult*/false, /*forLValue*/true).LV;
278 }
279 
280 // Hunts for the parameter reference in the parameter copy/move declaration.
281 namespace {
282 struct GetParamRef : public StmtVisitor<GetParamRef> {
283 public:
284   DeclRefExpr *Expr = nullptr;
285   GetParamRef() {}
286   void VisitDeclRefExpr(DeclRefExpr *E) {
287     assert(Expr == nullptr && "multilple declref in param move");
288     Expr = E;
289   }
290   void VisitStmt(Stmt *S) {
291     for (auto *C : S->children()) {
292       if (C)
293         Visit(C);
294     }
295   }
296 };
297 }
298 
299 // This class replaces references to parameters to their copies by changing
300 // the addresses in CGF.LocalDeclMap and restoring back the original values in
301 // its destructor.
302 
303 namespace {
304   struct ParamReferenceReplacerRAII {
305     CodeGenFunction::DeclMapTy SavedLocals;
306     CodeGenFunction::DeclMapTy& LocalDeclMap;
307 
308     ParamReferenceReplacerRAII(CodeGenFunction::DeclMapTy &LocalDeclMap)
309         : LocalDeclMap(LocalDeclMap) {}
310 
311     void addCopy(DeclStmt const *PM) {
312       // Figure out what param it refers to.
313 
314       assert(PM->isSingleDecl());
315       VarDecl const*VD = static_cast<VarDecl const*>(PM->getSingleDecl());
316       Expr const *InitExpr = VD->getInit();
317       GetParamRef Visitor;
318       Visitor.Visit(const_cast<Expr*>(InitExpr));
319       assert(Visitor.Expr);
320       auto *DREOrig = cast<DeclRefExpr>(Visitor.Expr);
321       auto *PD = DREOrig->getDecl();
322 
323       auto it = LocalDeclMap.find(PD);
324       assert(it != LocalDeclMap.end() && "parameter is not found");
325       SavedLocals.insert({ PD, it->second });
326 
327       auto copyIt = LocalDeclMap.find(VD);
328       assert(copyIt != LocalDeclMap.end() && "parameter copy is not found");
329       it->second = copyIt->getSecond();
330     }
331 
332     ~ParamReferenceReplacerRAII() {
333       for (auto&& SavedLocal : SavedLocals) {
334         LocalDeclMap.insert({SavedLocal.first, SavedLocal.second});
335       }
336     }
337   };
338 }
339 
340 // For WinEH exception representation backend needs to know what funclet coro.end
341 // belongs to. That information is passed in a funclet bundle.
342 static SmallVector<llvm::OperandBundleDef, 1>
343 getBundlesForCoroEnd(CodeGenFunction &CGF) {
344   SmallVector<llvm::OperandBundleDef, 1> BundleList;
345 
346   if (llvm::Instruction *EHPad = CGF.CurrentFuncletPad)
347     BundleList.emplace_back("funclet", EHPad);
348 
349   return BundleList;
350 }
351 
352 namespace {
353 // We will insert coro.end to cut any of the destructors for objects that
354 // do not need to be destroyed once the coroutine is resumed.
355 // See llvm/docs/Coroutines.rst for more details about coro.end.
356 struct CallCoroEnd final : public EHScopeStack::Cleanup {
357   void Emit(CodeGenFunction &CGF, Flags flags) override {
358     auto &CGM = CGF.CGM;
359     auto *NullPtr = llvm::ConstantPointerNull::get(CGF.Int8PtrTy);
360     llvm::Function *CoroEndFn = CGM.getIntrinsic(llvm::Intrinsic::coro_end);
361     // See if we have a funclet bundle to associate coro.end with. (WinEH)
362     auto Bundles = getBundlesForCoroEnd(CGF);
363     auto *CoroEnd = CGF.Builder.CreateCall(
364         CoroEndFn, {NullPtr, CGF.Builder.getTrue()}, Bundles);
365     if (Bundles.empty()) {
366       // Otherwise, (landingpad model), create a conditional branch that leads
367       // either to a cleanup block or a block with EH resume instruction.
368       auto *ResumeBB = CGF.getEHResumeBlock(/*cleanup=*/true);
369       auto *CleanupContBB = CGF.createBasicBlock("cleanup.cont");
370       CGF.Builder.CreateCondBr(CoroEnd, ResumeBB, CleanupContBB);
371       CGF.EmitBlock(CleanupContBB);
372     }
373   }
374 };
375 }
376 
377 namespace {
378 // Make sure to call coro.delete on scope exit.
379 struct CallCoroDelete final : public EHScopeStack::Cleanup {
380   Stmt *Deallocate;
381 
382   // Emit "if (coro.free(CoroId, CoroBegin)) Deallocate;"
383 
384   // Note: That deallocation will be emitted twice: once for a normal exit and
385   // once for exceptional exit. This usage is safe because Deallocate does not
386   // contain any declarations. The SubStmtBuilder::makeNewAndDeleteExpr()
387   // builds a single call to a deallocation function which is safe to emit
388   // multiple times.
389   void Emit(CodeGenFunction &CGF, Flags) override {
390     // Remember the current point, as we are going to emit deallocation code
391     // first to get to coro.free instruction that is an argument to a delete
392     // call.
393     BasicBlock *SaveInsertBlock = CGF.Builder.GetInsertBlock();
394 
395     auto *FreeBB = CGF.createBasicBlock("coro.free");
396     CGF.EmitBlock(FreeBB);
397     CGF.EmitStmt(Deallocate);
398 
399     auto *AfterFreeBB = CGF.createBasicBlock("after.coro.free");
400     CGF.EmitBlock(AfterFreeBB);
401 
402     // We should have captured coro.free from the emission of deallocate.
403     auto *CoroFree = CGF.CurCoro.Data->LastCoroFree;
404     if (!CoroFree) {
405       CGF.CGM.Error(Deallocate->getLocStart(),
406                     "Deallocation expressoin does not refer to coro.free");
407       return;
408     }
409 
410     // Get back to the block we were originally and move coro.free there.
411     auto *InsertPt = SaveInsertBlock->getTerminator();
412     CoroFree->moveBefore(InsertPt);
413     CGF.Builder.SetInsertPoint(InsertPt);
414 
415     // Add if (auto *mem = coro.free) Deallocate;
416     auto *NullPtr = llvm::ConstantPointerNull::get(CGF.Int8PtrTy);
417     auto *Cond = CGF.Builder.CreateICmpNE(CoroFree, NullPtr);
418     CGF.Builder.CreateCondBr(Cond, FreeBB, AfterFreeBB);
419 
420     // No longer need old terminator.
421     InsertPt->eraseFromParent();
422     CGF.Builder.SetInsertPoint(AfterFreeBB);
423   }
424   explicit CallCoroDelete(Stmt *DeallocStmt) : Deallocate(DeallocStmt) {}
425 };
426 }
427 
428 namespace {
429 struct GetReturnObjectManager {
430   CodeGenFunction &CGF;
431   CGBuilderTy &Builder;
432   const CoroutineBodyStmt &S;
433 
434   Address GroActiveFlag;
435   CodeGenFunction::AutoVarEmission GroEmission;
436 
437   GetReturnObjectManager(CodeGenFunction &CGF, const CoroutineBodyStmt &S)
438       : CGF(CGF), Builder(CGF.Builder), S(S), GroActiveFlag(Address::invalid()),
439         GroEmission(CodeGenFunction::AutoVarEmission::invalid()) {}
440 
441   // The gro variable has to outlive coroutine frame and coroutine promise, but,
442   // it can only be initialized after coroutine promise was created, thus, we
443   // split its emission in two parts. EmitGroAlloca emits an alloca and sets up
444   // cleanups. Later when coroutine promise is available we initialize the gro
445   // and sets the flag that the cleanup is now active.
446 
447   void EmitGroAlloca() {
448     auto *GroDeclStmt = dyn_cast<DeclStmt>(S.getResultDecl());
449     if (!GroDeclStmt) {
450       // If get_return_object returns void, no need to do an alloca.
451       return;
452     }
453 
454     auto *GroVarDecl = cast<VarDecl>(GroDeclStmt->getSingleDecl());
455 
456     // Set GRO flag that it is not initialized yet
457     GroActiveFlag =
458       CGF.CreateTempAlloca(Builder.getInt1Ty(), CharUnits::One(), "gro.active");
459     Builder.CreateStore(Builder.getFalse(), GroActiveFlag);
460 
461     GroEmission = CGF.EmitAutoVarAlloca(*GroVarDecl);
462 
463     // Remember the top of EHStack before emitting the cleanup.
464     auto old_top = CGF.EHStack.stable_begin();
465     CGF.EmitAutoVarCleanups(GroEmission);
466     auto top = CGF.EHStack.stable_begin();
467 
468     // Make the cleanup conditional on gro.active
469     for (auto b = CGF.EHStack.find(top), e = CGF.EHStack.find(old_top);
470       b != e; b++) {
471       if (auto *Cleanup = dyn_cast<EHCleanupScope>(&*b)) {
472         assert(!Cleanup->hasActiveFlag() && "cleanup already has active flag?");
473         Cleanup->setActiveFlag(GroActiveFlag);
474         Cleanup->setTestFlagInEHCleanup();
475         Cleanup->setTestFlagInNormalCleanup();
476       }
477     }
478   }
479 
480   void EmitGroInit() {
481     if (!GroActiveFlag.isValid()) {
482       // No Gro variable was allocated. Simply emit the call to
483       // get_return_object.
484       CGF.EmitStmt(S.getResultDecl());
485       return;
486     }
487 
488     CGF.EmitAutoVarInit(GroEmission);
489     Builder.CreateStore(Builder.getTrue(), GroActiveFlag);
490   }
491 };
492 }
493 
494 static void emitBodyAndFallthrough(CodeGenFunction &CGF,
495                                    const CoroutineBodyStmt &S, Stmt *Body) {
496   CGF.EmitStmt(Body);
497   const bool CanFallthrough = CGF.Builder.GetInsertBlock();
498   if (CanFallthrough)
499     if (Stmt *OnFallthrough = S.getFallthroughHandler())
500       CGF.EmitStmt(OnFallthrough);
501 }
502 
503 void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
504   auto *NullPtr = llvm::ConstantPointerNull::get(Builder.getInt8PtrTy());
505   auto &TI = CGM.getContext().getTargetInfo();
506   unsigned NewAlign = TI.getNewAlign() / TI.getCharWidth();
507 
508   auto *EntryBB = Builder.GetInsertBlock();
509   auto *AllocBB = createBasicBlock("coro.alloc");
510   auto *InitBB = createBasicBlock("coro.init");
511   auto *FinalBB = createBasicBlock("coro.final");
512   auto *RetBB = createBasicBlock("coro.ret");
513 
514   auto *CoroId = Builder.CreateCall(
515       CGM.getIntrinsic(llvm::Intrinsic::coro_id),
516       {Builder.getInt32(NewAlign), NullPtr, NullPtr, NullPtr});
517   createCoroData(*this, CurCoro, CoroId);
518   CurCoro.Data->SuspendBB = RetBB;
519 
520   // Backend is allowed to elide memory allocations, to help it, emit
521   // auto mem = coro.alloc() ? 0 : ... allocation code ...;
522   auto *CoroAlloc = Builder.CreateCall(
523       CGM.getIntrinsic(llvm::Intrinsic::coro_alloc), {CoroId});
524 
525   Builder.CreateCondBr(CoroAlloc, AllocBB, InitBB);
526 
527   EmitBlock(AllocBB);
528   auto *AllocateCall = EmitScalarExpr(S.getAllocate());
529   auto *AllocOrInvokeContBB = Builder.GetInsertBlock();
530 
531   // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided.
532   if (auto *RetOnAllocFailure = S.getReturnStmtOnAllocFailure()) {
533     auto *RetOnFailureBB = createBasicBlock("coro.ret.on.failure");
534 
535     // See if allocation was successful.
536     auto *NullPtr = llvm::ConstantPointerNull::get(Int8PtrTy);
537     auto *Cond = Builder.CreateICmpNE(AllocateCall, NullPtr);
538     Builder.CreateCondBr(Cond, InitBB, RetOnFailureBB);
539 
540     // If not, return OnAllocFailure object.
541     EmitBlock(RetOnFailureBB);
542     EmitStmt(RetOnAllocFailure);
543   }
544   else {
545     Builder.CreateBr(InitBB);
546   }
547 
548   EmitBlock(InitBB);
549 
550   // Pass the result of the allocation to coro.begin.
551   auto *Phi = Builder.CreatePHI(VoidPtrTy, 2);
552   Phi->addIncoming(NullPtr, EntryBB);
553   Phi->addIncoming(AllocateCall, AllocOrInvokeContBB);
554   auto *CoroBegin = Builder.CreateCall(
555       CGM.getIntrinsic(llvm::Intrinsic::coro_begin), {CoroId, Phi});
556   CurCoro.Data->CoroBegin = CoroBegin;
557 
558   GetReturnObjectManager GroManager(*this, S);
559   GroManager.EmitGroAlloca();
560 
561   CurCoro.Data->CleanupJD = getJumpDestInCurrentScope(RetBB);
562   {
563     ParamReferenceReplacerRAII ParamReplacer(LocalDeclMap);
564     CodeGenFunction::RunCleanupsScope ResumeScope(*this);
565     EHStack.pushCleanup<CallCoroDelete>(NormalAndEHCleanup, S.getDeallocate());
566 
567     // Create parameter copies. We do it before creating a promise, since an
568     // evolution of coroutine TS may allow promise constructor to observe
569     // parameter copies.
570     for (auto *PM : S.getParamMoves()) {
571       EmitStmt(PM);
572       ParamReplacer.addCopy(cast<DeclStmt>(PM));
573       // TODO: if(CoroParam(...)) need to surround ctor and dtor
574       // for the copy, so that llvm can elide it if the copy is
575       // not needed.
576     }
577 
578     EmitStmt(S.getPromiseDeclStmt());
579 
580     Address PromiseAddr = GetAddrOfLocalVar(S.getPromiseDecl());
581     auto *PromiseAddrVoidPtr =
582         new llvm::BitCastInst(PromiseAddr.getPointer(), VoidPtrTy, "", CoroId);
583     // Update CoroId to refer to the promise. We could not do it earlier because
584     // promise local variable was not emitted yet.
585     CoroId->setArgOperand(1, PromiseAddrVoidPtr);
586 
587     // Now we have the promise, initialize the GRO
588     GroManager.EmitGroInit();
589 
590     EHStack.pushCleanup<CallCoroEnd>(EHCleanup);
591 
592     CurCoro.Data->CurrentAwaitKind = AwaitKind::Init;
593     EmitStmt(S.getInitSuspendStmt());
594     CurCoro.Data->FinalJD = getJumpDestInCurrentScope(FinalBB);
595 
596     CurCoro.Data->CurrentAwaitKind = AwaitKind::Normal;
597 
598     if (auto *OnException = S.getExceptionHandler()) {
599       auto Loc = S.getLocStart();
600       CXXCatchStmt Catch(Loc, /*exDecl=*/nullptr, OnException);
601       auto *TryStmt = CXXTryStmt::Create(getContext(), Loc, S.getBody(), &Catch);
602 
603       EnterCXXTryStmt(*TryStmt);
604       emitBodyAndFallthrough(*this, S, TryStmt->getTryBlock());
605       ExitCXXTryStmt(*TryStmt);
606     }
607     else {
608       emitBodyAndFallthrough(*this, S, S.getBody());
609     }
610 
611     // See if we need to generate final suspend.
612     const bool CanFallthrough = Builder.GetInsertBlock();
613     const bool HasCoreturns = CurCoro.Data->CoreturnCount > 0;
614     if (CanFallthrough || HasCoreturns) {
615       EmitBlock(FinalBB);
616       CurCoro.Data->CurrentAwaitKind = AwaitKind::Final;
617       EmitStmt(S.getFinalSuspendStmt());
618     } else {
619       // We don't need FinalBB. Emit it to make sure the block is deleted.
620       EmitBlock(FinalBB, /*IsFinished=*/true);
621     }
622   }
623 
624   EmitBlock(RetBB);
625   // Emit coro.end before getReturnStmt (and parameter destructors), since
626   // resume and destroy parts of the coroutine should not include them.
627   llvm::Function *CoroEnd = CGM.getIntrinsic(llvm::Intrinsic::coro_end);
628   Builder.CreateCall(CoroEnd, {NullPtr, Builder.getFalse()});
629 
630   if (Stmt *Ret = S.getReturnStmt())
631     EmitStmt(Ret);
632 }
633 
634 // Emit coroutine intrinsic and patch up arguments of the token type.
635 RValue CodeGenFunction::EmitCoroutineIntrinsic(const CallExpr *E,
636                                                unsigned int IID) {
637   SmallVector<llvm::Value *, 8> Args;
638   switch (IID) {
639   default:
640     break;
641   // The coro.frame builtin is replaced with an SSA value of the coro.begin
642   // intrinsic.
643   case llvm::Intrinsic::coro_frame: {
644     if (CurCoro.Data && CurCoro.Data->CoroBegin) {
645       return RValue::get(CurCoro.Data->CoroBegin);
646     }
647     CGM.Error(E->getLocStart(), "this builtin expect that __builtin_coro_begin "
648       "has been used earlier in this function");
649     auto NullPtr = llvm::ConstantPointerNull::get(Builder.getInt8PtrTy());
650     return RValue::get(NullPtr);
651   }
652   // The following three intrinsics take a token parameter referring to a token
653   // returned by earlier call to @llvm.coro.id. Since we cannot represent it in
654   // builtins, we patch it up here.
655   case llvm::Intrinsic::coro_alloc:
656   case llvm::Intrinsic::coro_begin:
657   case llvm::Intrinsic::coro_free: {
658     if (CurCoro.Data && CurCoro.Data->CoroId) {
659       Args.push_back(CurCoro.Data->CoroId);
660       break;
661     }
662     CGM.Error(E->getLocStart(), "this builtin expect that __builtin_coro_id has"
663                                 " been used earlier in this function");
664     // Fallthrough to the next case to add TokenNone as the first argument.
665     LLVM_FALLTHROUGH;
666   }
667   // @llvm.coro.suspend takes a token parameter. Add token 'none' as the first
668   // argument.
669   case llvm::Intrinsic::coro_suspend:
670     Args.push_back(llvm::ConstantTokenNone::get(getLLVMContext()));
671     break;
672   }
673   for (auto &Arg : E->arguments())
674     Args.push_back(EmitScalarExpr(Arg));
675 
676   llvm::Value *F = CGM.getIntrinsic(IID);
677   llvm::CallInst *Call = Builder.CreateCall(F, Args);
678 
679   // Note: The following code is to enable to emit coro.id and coro.begin by
680   // hand to experiment with coroutines in C.
681   // If we see @llvm.coro.id remember it in the CoroData. We will update
682   // coro.alloc, coro.begin and coro.free intrinsics to refer to it.
683   if (IID == llvm::Intrinsic::coro_id) {
684     createCoroData(*this, CurCoro, Call, E);
685   }
686   else if (IID == llvm::Intrinsic::coro_begin) {
687     if (CurCoro.Data)
688       CurCoro.Data->CoroBegin = Call;
689   }
690   else if (IID == llvm::Intrinsic::coro_free) {
691     // Remember the last coro_free as we need it to build the conditional
692     // deletion of the coroutine frame.
693     if (CurCoro.Data)
694       CurCoro.Data->LastCoroFree = Call;
695   }
696   return RValue::get(Call);
697 }
698