1 //===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This pass builds the coroutine frame and outlines resume and destroy parts
9 // of the coroutine into separate functions.
10 //
11 // We present a coroutine to an LLVM as an ordinary function with suspension
12 // points marked up with intrinsics. We let the optimizer party on the coroutine
13 // as a single function for as long as possible. Shortly before the coroutine is
14 // eligible to be inlined into its callers, we split up the coroutine into parts
15 // corresponding to an initial, resume and destroy invocations of the coroutine,
16 // add them to the current SCC and restart the IPO pipeline to optimize the
17 // coroutine subfunctions we extracted before proceeding to the caller of the
18 // coroutine.
19 //===----------------------------------------------------------------------===//
20 
21 #include "llvm/Transforms/Coroutines/CoroSplit.h"
22 #include "CoroInstr.h"
23 #include "CoroInternal.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/StringRef.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/Analysis/CallGraph.h"
30 #include "llvm/Analysis/CallGraphSCCPass.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/BasicBlock.h"
34 #include "llvm/IR/CFG.h"
35 #include "llvm/IR/CallingConv.h"
36 #include "llvm/IR/Constants.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GlobalValue.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/IRBuilder.h"
43 #include "llvm/IR/InstIterator.h"
44 #include "llvm/IR/InstrTypes.h"
45 #include "llvm/IR/Instruction.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/IntrinsicInst.h"
48 #include "llvm/IR/LLVMContext.h"
49 #include "llvm/IR/LegacyPassManager.h"
50 #include "llvm/IR/Module.h"
51 #include "llvm/IR/Type.h"
52 #include "llvm/IR/Value.h"
53 #include "llvm/IR/Verifier.h"
54 #include "llvm/InitializePasses.h"
55 #include "llvm/Pass.h"
56 #include "llvm/Support/Casting.h"
57 #include "llvm/Support/Debug.h"
58 #include "llvm/Support/PrettyStackTrace.h"
59 #include "llvm/Support/raw_ostream.h"
60 #include "llvm/Transforms/Scalar.h"
61 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
62 #include "llvm/Transforms/Utils/CallGraphUpdater.h"
63 #include "llvm/Transforms/Utils/Cloning.h"
64 #include "llvm/Transforms/Utils/Local.h"
65 #include "llvm/Transforms/Utils/ValueMapper.h"
66 #include <cassert>
67 #include <cstddef>
68 #include <cstdint>
69 #include <initializer_list>
70 #include <iterator>
71 
72 using namespace llvm;
73 
74 #define DEBUG_TYPE "coro-split"
75 
76 namespace {
77 
78 /// A little helper class for building
79 class CoroCloner {
80 public:
81   enum class Kind {
82     /// The shared resume function for a switch lowering.
83     SwitchResume,
84 
85     /// The shared unwind function for a switch lowering.
86     SwitchUnwind,
87 
88     /// The shared cleanup function for a switch lowering.
89     SwitchCleanup,
90 
91     /// An individual continuation function.
92     Continuation,
93 
94     /// An async resume function.
95     Async,
96   };
97 
98 private:
99   Function &OrigF;
100   Function *NewF;
101   const Twine &Suffix;
102   coro::Shape &Shape;
103   Kind FKind;
104   ValueToValueMapTy VMap;
105   IRBuilder<> Builder;
106   Value *NewFramePtr = nullptr;
107 
108   /// The active suspend instruction; meaningful only for continuation and async
109   /// ABIs.
110   AnyCoroSuspendInst *ActiveSuspend = nullptr;
111 
112 public:
113   /// Create a cloner for a switch lowering.
114   CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
115              Kind FKind)
116     : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape),
117       FKind(FKind), Builder(OrigF.getContext()) {
118     assert(Shape.ABI == coro::ABI::Switch);
119   }
120 
121   /// Create a cloner for a continuation lowering.
122   CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
123              Function *NewF, AnyCoroSuspendInst *ActiveSuspend)
124       : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape),
125         FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation),
126         Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) {
127     assert(Shape.ABI == coro::ABI::Retcon ||
128            Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async);
129     assert(NewF && "need existing function for continuation");
130     assert(ActiveSuspend && "need active suspend point for continuation");
131   }
132 
133   Function *getFunction() const {
134     assert(NewF != nullptr && "declaration not yet set");
135     return NewF;
136   }
137 
138   void create();
139 
140 private:
141   bool isSwitchDestroyFunction() {
142     switch (FKind) {
143     case Kind::Async:
144     case Kind::Continuation:
145     case Kind::SwitchResume:
146       return false;
147     case Kind::SwitchUnwind:
148     case Kind::SwitchCleanup:
149       return true;
150     }
151     llvm_unreachable("Unknown CoroCloner::Kind enum");
152   }
153 
154   void replaceEntryBlock();
155   Value *deriveNewFramePointer();
156   void replaceRetconOrAsyncSuspendUses();
157   void replaceCoroSuspends();
158   void replaceCoroEnds();
159   void replaceSwiftErrorOps();
160   void handleFinalSuspend();
161 };
162 
163 } // end anonymous namespace
164 
165 static void maybeFreeRetconStorage(IRBuilder<> &Builder,
166                                    const coro::Shape &Shape, Value *FramePtr,
167                                    CallGraph *CG) {
168   assert(Shape.ABI == coro::ABI::Retcon ||
169          Shape.ABI == coro::ABI::RetconOnce);
170   if (Shape.RetconLowering.IsFrameInlineInStorage)
171     return;
172 
173   Shape.emitDealloc(Builder, FramePtr, CG);
174 }
175 
176 /// Replace an llvm.coro.end.async.
177 /// Will inline the must tail call function call if there is one.
178 /// \returns true if cleanup of the coro.end block is needed, false otherwise.
179 static bool replaceCoroEndAsync(AnyCoroEndInst *End) {
180   IRBuilder<> Builder(End);
181 
182   auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End);
183   if (!EndAsync) {
184     Builder.CreateRetVoid();
185     return true /*needs cleanup of coro.end block*/;
186   }
187 
188   auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
189   if (!MustTailCallFunc) {
190     Builder.CreateRetVoid();
191     return true /*needs cleanup of coro.end block*/;
192   }
193 
194   // Move the must tail call from the predecessor block into the end block.
195   auto *CoroEndBlock = End->getParent();
196   auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
197   assert(MustTailCallFuncBlock && "Must have a single predecessor block");
198   auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
199   auto *MustTailCall = cast<CallInst>(&*std::prev(It));
200   CoroEndBlock->getInstList().splice(
201       End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall);
202 
203   // Insert the return instruction.
204   Builder.SetInsertPoint(End);
205   Builder.CreateRetVoid();
206   InlineFunctionInfo FnInfo;
207 
208   // Remove the rest of the block, by splitting it into an unreachable block.
209   auto *BB = End->getParent();
210   BB->splitBasicBlock(End);
211   BB->getTerminator()->eraseFromParent();
212 
213   auto InlineRes = InlineFunction(*MustTailCall, FnInfo);
214   assert(InlineRes.isSuccess() && "Expected inlining to succeed");
215   (void)InlineRes;
216 
217   // We have cleaned up the coro.end block above.
218   return false;
219 }
220 
221 /// Replace a non-unwind call to llvm.coro.end.
222 static void replaceFallthroughCoroEnd(AnyCoroEndInst *End,
223                                       const coro::Shape &Shape, Value *FramePtr,
224                                       bool InResume, CallGraph *CG) {
225   // Start inserting right before the coro.end.
226   IRBuilder<> Builder(End);
227 
228   // Create the return instruction.
229   switch (Shape.ABI) {
230   // The cloned functions in switch-lowering always return void.
231   case coro::ABI::Switch:
232     // coro.end doesn't immediately end the coroutine in the main function
233     // in this lowering, because we need to deallocate the coroutine.
234     if (!InResume)
235       return;
236     Builder.CreateRetVoid();
237     break;
238 
239   // In async lowering this returns.
240   case coro::ABI::Async: {
241     bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
242     if (!CoroEndBlockNeedsCleanup)
243       return;
244     break;
245   }
246 
247   // In unique continuation lowering, the continuations always return void.
248   // But we may have implicitly allocated storage.
249   case coro::ABI::RetconOnce:
250     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
251     Builder.CreateRetVoid();
252     break;
253 
254   // In non-unique continuation lowering, we signal completion by returning
255   // a null continuation.
256   case coro::ABI::Retcon: {
257     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
258     auto RetTy = Shape.getResumeFunctionType()->getReturnType();
259     auto RetStructTy = dyn_cast<StructType>(RetTy);
260     PointerType *ContinuationTy =
261       cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy);
262 
263     Value *ReturnValue = ConstantPointerNull::get(ContinuationTy);
264     if (RetStructTy) {
265       ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy),
266                                               ReturnValue, 0);
267     }
268     Builder.CreateRet(ReturnValue);
269     break;
270   }
271   }
272 
273   // Remove the rest of the block, by splitting it into an unreachable block.
274   auto *BB = End->getParent();
275   BB->splitBasicBlock(End);
276   BB->getTerminator()->eraseFromParent();
277 }
278 
279 /// Replace an unwind call to llvm.coro.end.
280 static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
281                                  Value *FramePtr, bool InResume,
282                                  CallGraph *CG) {
283   IRBuilder<> Builder(End);
284 
285   switch (Shape.ABI) {
286   // In switch-lowering, this does nothing in the main function.
287   case coro::ABI::Switch:
288     if (!InResume)
289       return;
290     break;
291   // In async lowering this does nothing.
292   case coro::ABI::Async:
293     break;
294   // In continuation-lowering, this frees the continuation storage.
295   case coro::ABI::Retcon:
296   case coro::ABI::RetconOnce:
297     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
298     break;
299   }
300 
301   // If coro.end has an associated bundle, add cleanupret instruction.
302   if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) {
303     auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]);
304     auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr);
305     End->getParent()->splitBasicBlock(End);
306     CleanupRet->getParent()->getTerminator()->eraseFromParent();
307   }
308 }
309 
310 static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
311                            Value *FramePtr, bool InResume, CallGraph *CG) {
312   if (End->isUnwind())
313     replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG);
314   else
315     replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG);
316 
317   auto &Context = End->getContext();
318   End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context)
319                                    : ConstantInt::getFalse(Context));
320   End->eraseFromParent();
321 }
322 
323 // Create an entry block for a resume function with a switch that will jump to
324 // suspend points.
325 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) {
326   assert(Shape.ABI == coro::ABI::Switch);
327   LLVMContext &C = F.getContext();
328 
329   // resume.entry:
330   //  %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0,
331   //  i32 2
332   //  % index = load i32, i32* %index.addr
333   //  switch i32 %index, label %unreachable [
334   //    i32 0, label %resume.0
335   //    i32 1, label %resume.1
336   //    ...
337   //  ]
338 
339   auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F);
340   auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F);
341 
342   IRBuilder<> Builder(NewEntry);
343   auto *FramePtr = Shape.FramePtr;
344   auto *FrameTy = Shape.FrameTy;
345   auto *GepIndex = Builder.CreateStructGEP(
346       FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
347   auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index");
348   auto *Switch =
349       Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size());
350   Shape.SwitchLowering.ResumeSwitch = Switch;
351 
352   size_t SuspendIndex = 0;
353   for (auto *AnyS : Shape.CoroSuspends) {
354     auto *S = cast<CoroSuspendInst>(AnyS);
355     ConstantInt *IndexVal = Shape.getIndex(SuspendIndex);
356 
357     // Replace CoroSave with a store to Index:
358     //    %index.addr = getelementptr %f.frame... (index field number)
359     //    store i32 0, i32* %index.addr1
360     auto *Save = S->getCoroSave();
361     Builder.SetInsertPoint(Save);
362     if (S->isFinal()) {
363       // Final suspend point is represented by storing zero in ResumeFnAddr.
364       auto *GepIndex = Builder.CreateStructGEP(FrameTy, FramePtr,
365                                  coro::Shape::SwitchFieldIndex::Resume,
366                                   "ResumeFn.addr");
367       auto *NullPtr = ConstantPointerNull::get(cast<PointerType>(
368           cast<PointerType>(GepIndex->getType())->getElementType()));
369       Builder.CreateStore(NullPtr, GepIndex);
370     } else {
371       auto *GepIndex = Builder.CreateStructGEP(
372           FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
373       Builder.CreateStore(IndexVal, GepIndex);
374     }
375     Save->replaceAllUsesWith(ConstantTokenNone::get(C));
376     Save->eraseFromParent();
377 
378     // Split block before and after coro.suspend and add a jump from an entry
379     // switch:
380     //
381     //  whateverBB:
382     //    whatever
383     //    %0 = call i8 @llvm.coro.suspend(token none, i1 false)
384     //    switch i8 %0, label %suspend[i8 0, label %resume
385     //                                 i8 1, label %cleanup]
386     // becomes:
387     //
388     //  whateverBB:
389     //     whatever
390     //     br label %resume.0.landing
391     //
392     //  resume.0: ; <--- jump from the switch in the resume.entry
393     //     %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
394     //     br label %resume.0.landing
395     //
396     //  resume.0.landing:
397     //     %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
398     //     switch i8 % 1, label %suspend [i8 0, label %resume
399     //                                    i8 1, label %cleanup]
400 
401     auto *SuspendBB = S->getParent();
402     auto *ResumeBB =
403         SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex));
404     auto *LandingBB = ResumeBB->splitBasicBlock(
405         S->getNextNode(), ResumeBB->getName() + Twine(".landing"));
406     Switch->addCase(IndexVal, ResumeBB);
407 
408     cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB);
409     auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front());
410     S->replaceAllUsesWith(PN);
411     PN->addIncoming(Builder.getInt8(-1), SuspendBB);
412     PN->addIncoming(S, ResumeBB);
413 
414     ++SuspendIndex;
415   }
416 
417   Builder.SetInsertPoint(UnreachBB);
418   Builder.CreateUnreachable();
419 
420   Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
421 }
422 
423 
424 // Rewrite final suspend point handling. We do not use suspend index to
425 // represent the final suspend point. Instead we zero-out ResumeFnAddr in the
426 // coroutine frame, since it is undefined behavior to resume a coroutine
427 // suspended at the final suspend point. Thus, in the resume function, we can
428 // simply remove the last case (when coro::Shape is built, the final suspend
429 // point (if present) is always the last element of CoroSuspends array).
430 // In the destroy function, we add a code sequence to check if ResumeFnAddress
431 // is Null, and if so, jump to the appropriate label to handle cleanup from the
432 // final suspend point.
433 void CoroCloner::handleFinalSuspend() {
434   assert(Shape.ABI == coro::ABI::Switch &&
435          Shape.SwitchLowering.HasFinalSuspend);
436   auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]);
437   auto FinalCaseIt = std::prev(Switch->case_end());
438   BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
439   Switch->removeCase(FinalCaseIt);
440   if (isSwitchDestroyFunction()) {
441     BasicBlock *OldSwitchBB = Switch->getParent();
442     auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch");
443     Builder.SetInsertPoint(OldSwitchBB->getTerminator());
444     auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr,
445                                        coro::Shape::SwitchFieldIndex::Resume,
446                                              "ResumeFn.addr");
447     auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(),
448                                     GepIndex);
449     auto *Cond = Builder.CreateIsNull(Load);
450     Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB);
451     OldSwitchBB->getTerminator()->eraseFromParent();
452   }
453 }
454 
455 static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape,
456                                         const Twine &Suffix,
457                                         Module::iterator InsertBefore) {
458   Module *M = OrigF.getParent();
459   auto *FnTy = Shape.getResumeFunctionType();
460 
461   Function *NewF =
462       Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage,
463                        OrigF.getName() + Suffix);
464   NewF->addParamAttr(0, Attribute::NonNull);
465 
466   // For the async lowering ABI we can't guarantee that the context argument is
467   // not access via a different pointer not based on the argument.
468   if (Shape.ABI != coro::ABI::Async)
469     NewF->addParamAttr(0, Attribute::NoAlias);
470 
471   M->getFunctionList().insert(InsertBefore, NewF);
472 
473   return NewF;
474 }
475 
476 /// Replace uses of the active llvm.coro.suspend.retcon/async call with the
477 /// arguments to the continuation function.
478 ///
479 /// This assumes that the builder has a meaningful insertion point.
480 void CoroCloner::replaceRetconOrAsyncSuspendUses() {
481   assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
482          Shape.ABI == coro::ABI::Async);
483 
484   auto NewS = VMap[ActiveSuspend];
485   if (NewS->use_empty()) return;
486 
487   // Copy out all the continuation arguments after the buffer pointer into
488   // an easily-indexed data structure for convenience.
489   SmallVector<Value*, 8> Args;
490   // The async ABI includes all arguments -- including the first argument.
491   bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
492   for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()),
493             E = NewF->arg_end();
494        I != E; ++I)
495     Args.push_back(&*I);
496 
497   // If the suspend returns a single scalar value, we can just do a simple
498   // replacement.
499   if (!isa<StructType>(NewS->getType())) {
500     assert(Args.size() == 1);
501     NewS->replaceAllUsesWith(Args.front());
502     return;
503   }
504 
505   // Try to peephole extracts of an aggregate return.
506   for (auto UI = NewS->use_begin(), UE = NewS->use_end(); UI != UE; ) {
507     auto EVI = dyn_cast<ExtractValueInst>((UI++)->getUser());
508     if (!EVI || EVI->getNumIndices() != 1)
509       continue;
510 
511     EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]);
512     EVI->eraseFromParent();
513   }
514 
515   // If we have no remaining uses, we're done.
516   if (NewS->use_empty()) return;
517 
518   // Otherwise, we need to create an aggregate.
519   Value *Agg = UndefValue::get(NewS->getType());
520   for (size_t I = 0, E = Args.size(); I != E; ++I)
521     Agg = Builder.CreateInsertValue(Agg, Args[I], I);
522 
523   NewS->replaceAllUsesWith(Agg);
524 }
525 
526 void CoroCloner::replaceCoroSuspends() {
527   Value *SuspendResult;
528 
529   switch (Shape.ABI) {
530   // In switch lowering, replace coro.suspend with the appropriate value
531   // for the type of function we're extracting.
532   // Replacing coro.suspend with (0) will result in control flow proceeding to
533   // a resume label associated with a suspend point, replacing it with (1) will
534   // result in control flow proceeding to a cleanup label associated with this
535   // suspend point.
536   case coro::ABI::Switch:
537     SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0);
538     break;
539 
540   // In async lowering there are no uses of the result.
541   case coro::ABI::Async:
542     return;
543 
544   // In returned-continuation lowering, the arguments from earlier
545   // continuations are theoretically arbitrary, and they should have been
546   // spilled.
547   case coro::ABI::RetconOnce:
548   case coro::ABI::Retcon:
549     return;
550   }
551 
552   for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) {
553     // The active suspend was handled earlier.
554     if (CS == ActiveSuspend) continue;
555 
556     auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]);
557     MappedCS->replaceAllUsesWith(SuspendResult);
558     MappedCS->eraseFromParent();
559   }
560 }
561 
562 void CoroCloner::replaceCoroEnds() {
563   for (AnyCoroEndInst *CE : Shape.CoroEnds) {
564     // We use a null call graph because there's no call graph node for
565     // the cloned function yet.  We'll just be rebuilding that later.
566     auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]);
567     replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr);
568   }
569 }
570 
571 static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
572                                  ValueToValueMapTy *VMap) {
573   Value *CachedSlot = nullptr;
574   auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
575     if (CachedSlot) {
576       assert(CachedSlot->getType()->getPointerElementType() == ValueTy &&
577              "multiple swifterror slots in function with different types");
578       return CachedSlot;
579     }
580 
581     // Check if the function has a swifterror argument.
582     for (auto &Arg : F.args()) {
583       if (Arg.isSwiftError()) {
584         CachedSlot = &Arg;
585         assert(Arg.getType()->getPointerElementType() == ValueTy &&
586                "swifterror argument does not have expected type");
587         return &Arg;
588       }
589     }
590 
591     // Create a swifterror alloca.
592     IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
593     auto Alloca = Builder.CreateAlloca(ValueTy);
594     Alloca->setSwiftError(true);
595 
596     CachedSlot = Alloca;
597     return Alloca;
598   };
599 
600   for (CallInst *Op : Shape.SwiftErrorOps) {
601     auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op;
602     IRBuilder<> Builder(MappedOp);
603 
604     // If there are no arguments, this is a 'get' operation.
605     Value *MappedResult;
606     if (Op->getNumArgOperands() == 0) {
607       auto ValueTy = Op->getType();
608       auto Slot = getSwiftErrorSlot(ValueTy);
609       MappedResult = Builder.CreateLoad(ValueTy, Slot);
610     } else {
611       assert(Op->getNumArgOperands() == 1);
612       auto Value = MappedOp->getArgOperand(0);
613       auto ValueTy = Value->getType();
614       auto Slot = getSwiftErrorSlot(ValueTy);
615       Builder.CreateStore(Value, Slot);
616       MappedResult = Slot;
617     }
618 
619     MappedOp->replaceAllUsesWith(MappedResult);
620     MappedOp->eraseFromParent();
621   }
622 
623   // If we're updating the original function, we've invalidated SwiftErrorOps.
624   if (VMap == nullptr) {
625     Shape.SwiftErrorOps.clear();
626   }
627 }
628 
629 void CoroCloner::replaceSwiftErrorOps() {
630   ::replaceSwiftErrorOps(*NewF, Shape, &VMap);
631 }
632 
633 void CoroCloner::replaceEntryBlock() {
634   // In the original function, the AllocaSpillBlock is a block immediately
635   // following the allocation of the frame object which defines GEPs for
636   // all the allocas that have been moved into the frame, and it ends by
637   // branching to the original beginning of the coroutine.  Make this
638   // the entry block of the cloned function.
639   auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]);
640   auto *OldEntry = &NewF->getEntryBlock();
641   Entry->setName("entry" + Suffix);
642   Entry->moveBefore(OldEntry);
643   Entry->getTerminator()->eraseFromParent();
644 
645   // Clear all predecessors of the new entry block.  There should be
646   // exactly one predecessor, which we created when splitting out
647   // AllocaSpillBlock to begin with.
648   assert(Entry->hasOneUse());
649   auto BranchToEntry = cast<BranchInst>(Entry->user_back());
650   assert(BranchToEntry->isUnconditional());
651   Builder.SetInsertPoint(BranchToEntry);
652   Builder.CreateUnreachable();
653   BranchToEntry->eraseFromParent();
654 
655   // Branch from the entry to the appropriate place.
656   Builder.SetInsertPoint(Entry);
657   switch (Shape.ABI) {
658   case coro::ABI::Switch: {
659     // In switch-lowering, we built a resume-entry block in the original
660     // function.  Make the entry block branch to this.
661     auto *SwitchBB =
662       cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]);
663     Builder.CreateBr(SwitchBB);
664     break;
665   }
666   case coro::ABI::Async:
667   case coro::ABI::Retcon:
668   case coro::ABI::RetconOnce: {
669     // In continuation ABIs, we want to branch to immediately after the
670     // active suspend point.  Earlier phases will have put the suspend in its
671     // own basic block, so just thread our jump directly to its successor.
672     assert((Shape.ABI == coro::ABI::Async &&
673             isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||
674            ((Shape.ABI == coro::ABI::Retcon ||
675              Shape.ABI == coro::ABI::RetconOnce) &&
676             isa<CoroSuspendRetconInst>(ActiveSuspend)));
677     auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]);
678     auto Branch = cast<BranchInst>(MappedCS->getNextNode());
679     assert(Branch->isUnconditional());
680     Builder.CreateBr(Branch->getSuccessor(0));
681     break;
682   }
683   }
684 
685   // Any alloca that's still being used but not reachable from the new entry
686   // needs to be moved to the new entry.
687   Function *F = OldEntry->getParent();
688   DominatorTree DT{*F};
689   for (auto IT = inst_begin(F), End = inst_end(F); IT != End;) {
690     Instruction &I = *IT++;
691     if (!isa<AllocaInst>(&I) || I.use_empty())
692       continue;
693     if (DT.isReachableFromEntry(I.getParent()))
694       continue;
695     I.moveBefore(*Entry, Entry->getFirstInsertionPt());
696   }
697 }
698 
699 /// Derive the value of the new frame pointer.
700 Value *CoroCloner::deriveNewFramePointer() {
701   // Builder should be inserting to the front of the new entry block.
702 
703   switch (Shape.ABI) {
704   // In switch-lowering, the argument is the frame pointer.
705   case coro::ABI::Switch:
706     return &*NewF->arg_begin();
707   // In async-lowering, one of the arguments is an async context as determined
708   // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
709   // the resume function from the async context projection function associated
710   // with the active suspend. The frame is located as a tail to the async
711   // context header.
712   case coro::ABI::Async: {
713     auto *CalleeContext = NewF->getArg(Shape.AsyncLowering.ContextArgNo);
714     auto *FramePtrTy = Shape.FrameTy->getPointerTo();
715     auto *ProjectionFunc = cast<CoroSuspendAsyncInst>(ActiveSuspend)
716                                ->getAsyncContextProjectionFunction();
717     auto DbgLoc =
718         cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc();
719     // Calling i8* (i8*)
720     auto *CallerContext = Builder.CreateCall(
721         cast<FunctionType>(ProjectionFunc->getType()->getPointerElementType()),
722         ProjectionFunc, CalleeContext);
723     CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
724     CallerContext->setDebugLoc(DbgLoc);
725     // The frame is located after the async_context header.
726     auto &Context = Builder.getContext();
727     auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
728         Type::getInt8Ty(Context), CallerContext,
729         Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr");
730     // Inline the projection function.
731     InlineFunctionInfo InlineInfo;
732     auto InlineRes = InlineFunction(*CallerContext, InlineInfo);
733     assert(InlineRes.isSuccess());
734     (void)InlineRes;
735     return Builder.CreateBitCast(FramePtrAddr, FramePtrTy);
736   }
737   // In continuation-lowering, the argument is the opaque storage.
738   case coro::ABI::Retcon:
739   case coro::ABI::RetconOnce: {
740     Argument *NewStorage = &*NewF->arg_begin();
741     auto FramePtrTy = Shape.FrameTy->getPointerTo();
742 
743     // If the storage is inline, just bitcast to the storage to the frame type.
744     if (Shape.RetconLowering.IsFrameInlineInStorage)
745       return Builder.CreateBitCast(NewStorage, FramePtrTy);
746 
747     // Otherwise, load the real frame from the opaque storage.
748     auto FramePtrPtr =
749       Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo());
750     return Builder.CreateLoad(FramePtrTy, FramePtrPtr);
751   }
752   }
753   llvm_unreachable("bad ABI");
754 }
755 
756 static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
757                                  unsigned ParamIndex,
758                                  uint64_t Size, Align Alignment) {
759   AttrBuilder ParamAttrs;
760   ParamAttrs.addAttribute(Attribute::NonNull);
761   ParamAttrs.addAttribute(Attribute::NoAlias);
762   ParamAttrs.addAlignmentAttr(Alignment);
763   ParamAttrs.addDereferenceableAttr(Size);
764   Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
765 }
766 
767 /// Clone the body of the original function into a resume function of
768 /// some sort.
769 void CoroCloner::create() {
770   // Create the new function if we don't already have one.
771   if (!NewF) {
772     NewF = createCloneDeclaration(OrigF, Shape, Suffix,
773                                   OrigF.getParent()->end());
774   }
775 
776   // Replace all args with undefs. The buildCoroutineFrame algorithm already
777   // rewritten access to the args that occurs after suspend points with loads
778   // and stores to/from the coroutine frame.
779   for (Argument &A : OrigF.args())
780     VMap[&A] = UndefValue::get(A.getType());
781 
782   SmallVector<ReturnInst *, 4> Returns;
783 
784   // Ignore attempts to change certain attributes of the function.
785   // TODO: maybe there should be a way to suppress this during cloning?
786   auto savedVisibility = NewF->getVisibility();
787   auto savedUnnamedAddr = NewF->getUnnamedAddr();
788   auto savedDLLStorageClass = NewF->getDLLStorageClass();
789 
790   // NewF's linkage (which CloneFunctionInto does *not* change) might not
791   // be compatible with the visibility of OrigF (which it *does* change),
792   // so protect against that.
793   auto savedLinkage = NewF->getLinkage();
794   NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
795 
796   CloneFunctionInto(NewF, &OrigF, VMap, /*ModuleLevelChanges=*/true, Returns);
797 
798   NewF->setLinkage(savedLinkage);
799   NewF->setVisibility(savedVisibility);
800   NewF->setUnnamedAddr(savedUnnamedAddr);
801   NewF->setDLLStorageClass(savedDLLStorageClass);
802 
803   auto &Context = NewF->getContext();
804 
805   // Replace the attributes of the new function:
806   auto OrigAttrs = NewF->getAttributes();
807   auto NewAttrs = AttributeList();
808 
809   switch (Shape.ABI) {
810   case coro::ABI::Switch:
811     // Bootstrap attributes by copying function attributes from the
812     // original function.  This should include optimization settings and so on.
813     NewAttrs = NewAttrs.addAttributes(Context, AttributeList::FunctionIndex,
814                                       OrigAttrs.getFnAttributes());
815 
816     addFramePointerAttrs(NewAttrs, Context, 0,
817                          Shape.FrameSize, Shape.FrameAlign);
818     break;
819   case coro::ABI::Async:
820     break;
821   case coro::ABI::Retcon:
822   case coro::ABI::RetconOnce:
823     // If we have a continuation prototype, just use its attributes,
824     // full-stop.
825     NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes();
826 
827     addFramePointerAttrs(NewAttrs, Context, 0,
828                          Shape.getRetconCoroId()->getStorageSize(),
829                          Shape.getRetconCoroId()->getStorageAlignment());
830     break;
831   }
832 
833   switch (Shape.ABI) {
834   // In these ABIs, the cloned functions always return 'void', and the
835   // existing return sites are meaningless.  Note that for unique
836   // continuations, this includes the returns associated with suspends;
837   // this is fine because we can't suspend twice.
838   case coro::ABI::Switch:
839   case coro::ABI::RetconOnce:
840     // Remove old returns.
841     for (ReturnInst *Return : Returns)
842       changeToUnreachable(Return, /*UseLLVMTrap=*/false);
843     break;
844 
845   // With multi-suspend continuations, we'll already have eliminated the
846   // original returns and inserted returns before all the suspend points,
847   // so we want to leave any returns in place.
848   case coro::ABI::Retcon:
849     break;
850   // Async lowering will insert musttail call functions at all suspend points
851   // followed by a return.
852   // Don't change returns to unreachable because that will trip up the verifier.
853   // These returns should be unreachable from the clone.
854   case coro::ABI::Async:
855     break;
856   }
857 
858   NewF->setAttributes(NewAttrs);
859   NewF->setCallingConv(Shape.getResumeFunctionCC());
860 
861   // Set up the new entry block.
862   replaceEntryBlock();
863 
864   Builder.SetInsertPoint(&NewF->getEntryBlock().front());
865   NewFramePtr = deriveNewFramePointer();
866 
867   // Remap frame pointer.
868   Value *OldFramePtr = VMap[Shape.FramePtr];
869   NewFramePtr->takeName(OldFramePtr);
870   OldFramePtr->replaceAllUsesWith(NewFramePtr);
871 
872   // Remap vFrame pointer.
873   auto *NewVFrame = Builder.CreateBitCast(
874       NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame");
875   Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]);
876   OldVFrame->replaceAllUsesWith(NewVFrame);
877 
878   switch (Shape.ABI) {
879   case coro::ABI::Switch:
880     // Rewrite final suspend handling as it is not done via switch (allows to
881     // remove final case from the switch, since it is undefined behavior to
882     // resume the coroutine suspended at the final suspend point.
883     if (Shape.SwitchLowering.HasFinalSuspend)
884       handleFinalSuspend();
885     break;
886   case coro::ABI::Async:
887   case coro::ABI::Retcon:
888   case coro::ABI::RetconOnce:
889     // Replace uses of the active suspend with the corresponding
890     // continuation-function arguments.
891     assert(ActiveSuspend != nullptr &&
892            "no active suspend when lowering a continuation-style coroutine");
893     replaceRetconOrAsyncSuspendUses();
894     break;
895   }
896 
897   // Handle suspends.
898   replaceCoroSuspends();
899 
900   // Handle swifterror.
901   replaceSwiftErrorOps();
902 
903   // Remove coro.end intrinsics.
904   replaceCoroEnds();
905 
906   // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
907   // to suppress deallocation code.
908   if (Shape.ABI == coro::ABI::Switch)
909     coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]),
910                           /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup);
911 }
912 
913 // Create a resume clone by cloning the body of the original function, setting
914 // new entry block and replacing coro.suspend an appropriate value to force
915 // resume or cleanup pass for every suspend point.
916 static Function *createClone(Function &F, const Twine &Suffix,
917                              coro::Shape &Shape, CoroCloner::Kind FKind) {
918   CoroCloner Cloner(F, Suffix, Shape, FKind);
919   Cloner.create();
920   return Cloner.getFunction();
921 }
922 
923 /// Remove calls to llvm.coro.end in the original function.
924 static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG) {
925   for (auto End : Shape.CoroEnds) {
926     replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, CG);
927   }
928 }
929 
930 static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) {
931   assert(Shape.ABI == coro::ABI::Async);
932 
933   auto *FuncPtrStruct = cast<ConstantStruct>(
934       Shape.AsyncLowering.AsyncFuncPointer->getInitializer());
935   auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0);
936   auto *OrigContextSize = FuncPtrStruct->getOperand(1);
937   auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(),
938                                           Shape.AsyncLowering.ContextSize);
939   auto *NewFuncPtrStruct = ConstantStruct::get(
940       FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize);
941 
942   Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
943 }
944 
945 static void replaceFrameSize(coro::Shape &Shape) {
946   if (Shape.ABI == coro::ABI::Async)
947     updateAsyncFuncPointerContextSize(Shape);
948 
949   if (Shape.CoroSizes.empty())
950     return;
951 
952   // In the same function all coro.sizes should have the same result type.
953   auto *SizeIntrin = Shape.CoroSizes.back();
954   Module *M = SizeIntrin->getModule();
955   const DataLayout &DL = M->getDataLayout();
956   auto Size = DL.getTypeAllocSize(Shape.FrameTy);
957   auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size);
958 
959   for (CoroSizeInst *CS : Shape.CoroSizes) {
960     CS->replaceAllUsesWith(SizeConstant);
961     CS->eraseFromParent();
962   }
963 }
964 
965 // Create a global constant array containing pointers to functions provided and
966 // set Info parameter of CoroBegin to point at this constant. Example:
967 //
968 //   @f.resumers = internal constant [2 x void(%f.frame*)*]
969 //                    [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy]
970 //   define void @f() {
971 //     ...
972 //     call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
973 //                    i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*))
974 //
975 // Assumes that all the functions have the same signature.
976 static void setCoroInfo(Function &F, coro::Shape &Shape,
977                         ArrayRef<Function *> Fns) {
978   // This only works under the switch-lowering ABI because coro elision
979   // only works on the switch-lowering ABI.
980   assert(Shape.ABI == coro::ABI::Switch);
981 
982   SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end());
983   assert(!Args.empty());
984   Function *Part = *Fns.begin();
985   Module *M = Part->getParent();
986   auto *ArrTy = ArrayType::get(Part->getType(), Args.size());
987 
988   auto *ConstVal = ConstantArray::get(ArrTy, Args);
989   auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
990                                 GlobalVariable::PrivateLinkage, ConstVal,
991                                 F.getName() + Twine(".resumers"));
992 
993   // Update coro.begin instruction to refer to this constant.
994   LLVMContext &C = F.getContext();
995   auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C));
996   Shape.getSwitchCoroId()->setInfo(BC);
997 }
998 
999 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
1000 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
1001                             Function *DestroyFn, Function *CleanupFn) {
1002   assert(Shape.ABI == coro::ABI::Switch);
1003 
1004   IRBuilder<> Builder(Shape.FramePtr->getNextNode());
1005   auto *ResumeAddr = Builder.CreateStructGEP(
1006       Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume,
1007       "resume.addr");
1008   Builder.CreateStore(ResumeFn, ResumeAddr);
1009 
1010   Value *DestroyOrCleanupFn = DestroyFn;
1011 
1012   CoroIdInst *CoroId = Shape.getSwitchCoroId();
1013   if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
1014     // If there is a CoroAlloc and it returns false (meaning we elide the
1015     // allocation, use CleanupFn instead of DestroyFn).
1016     DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn);
1017   }
1018 
1019   auto *DestroyAddr = Builder.CreateStructGEP(
1020       Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy,
1021       "destroy.addr");
1022   Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr);
1023 }
1024 
1025 static void postSplitCleanup(Function &F) {
1026   removeUnreachableBlocks(F);
1027 
1028   // For now, we do a mandatory verification step because we don't
1029   // entirely trust this pass.  Note that we don't want to add a verifier
1030   // pass to FPM below because it will also verify all the global data.
1031   if (verifyFunction(F, &errs()))
1032     report_fatal_error("Broken function");
1033 
1034   legacy::FunctionPassManager FPM(F.getParent());
1035 
1036   FPM.add(createSCCPPass());
1037   FPM.add(createCFGSimplificationPass());
1038   FPM.add(createEarlyCSEPass());
1039   FPM.add(createCFGSimplificationPass());
1040 
1041   FPM.doInitialization();
1042   FPM.run(F);
1043   FPM.doFinalization();
1044 }
1045 
1046 // Assuming we arrived at the block NewBlock from Prev instruction, store
1047 // PHI's incoming values in the ResolvedValues map.
1048 static void
1049 scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock,
1050                           DenseMap<Value *, Value *> &ResolvedValues) {
1051   auto *PrevBB = Prev->getParent();
1052   for (PHINode &PN : NewBlock->phis()) {
1053     auto V = PN.getIncomingValueForBlock(PrevBB);
1054     // See if we already resolved it.
1055     auto VI = ResolvedValues.find(V);
1056     if (VI != ResolvedValues.end())
1057       V = VI->second;
1058     // Remember the value.
1059     ResolvedValues[&PN] = V;
1060   }
1061 }
1062 
1063 // Replace a sequence of branches leading to a ret, with a clone of a ret
1064 // instruction. Suspend instruction represented by a switch, track the PHI
1065 // values and select the correct case successor when possible.
1066 static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
1067   DenseMap<Value *, Value *> ResolvedValues;
1068   BasicBlock *UnconditionalSucc = nullptr;
1069 
1070   Instruction *I = InitialInst;
1071   while (I->isTerminator() ||
1072          (isa<CmpInst>(I) && I->getNextNode()->isTerminator())) {
1073     if (isa<ReturnInst>(I)) {
1074       if (I != InitialInst) {
1075         // If InitialInst is an unconditional branch,
1076         // remove PHI values that come from basic block of InitialInst
1077         if (UnconditionalSucc)
1078           UnconditionalSucc->removePredecessor(InitialInst->getParent(), true);
1079         ReplaceInstWithInst(InitialInst, I->clone());
1080       }
1081       return true;
1082     }
1083     if (auto *BR = dyn_cast<BranchInst>(I)) {
1084       if (BR->isUnconditional()) {
1085         BasicBlock *BB = BR->getSuccessor(0);
1086         if (I == InitialInst)
1087           UnconditionalSucc = BB;
1088         scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1089         I = BB->getFirstNonPHIOrDbgOrLifetime();
1090         continue;
1091       }
1092     } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) {
1093       auto *BR = dyn_cast<BranchInst>(I->getNextNode());
1094       if (BR && BR->isConditional() && CondCmp == BR->getCondition()) {
1095         // If the case number of suspended switch instruction is reduced to
1096         // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
1097         // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
1098         ConstantInt *CondConst = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
1099         if (CondConst && CondCmp->getPredicate() == CmpInst::ICMP_EQ) {
1100           Value *V = CondCmp->getOperand(0);
1101           auto it = ResolvedValues.find(V);
1102           if (it != ResolvedValues.end())
1103             V = it->second;
1104 
1105           if (ConstantInt *Cond0 = dyn_cast<ConstantInt>(V)) {
1106             BasicBlock *BB = Cond0->equalsInt(CondConst->getZExtValue())
1107                                  ? BR->getSuccessor(0)
1108                                  : BR->getSuccessor(1);
1109             scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1110             I = BB->getFirstNonPHIOrDbgOrLifetime();
1111             continue;
1112           }
1113         }
1114       }
1115     } else if (auto *SI = dyn_cast<SwitchInst>(I)) {
1116       Value *V = SI->getCondition();
1117       auto it = ResolvedValues.find(V);
1118       if (it != ResolvedValues.end())
1119         V = it->second;
1120       if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1121         BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
1122         scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1123         I = BB->getFirstNonPHIOrDbgOrLifetime();
1124         continue;
1125       }
1126     }
1127     return false;
1128   }
1129   return false;
1130 }
1131 
1132 // Check whether CI obeys the rules of musttail attribute.
1133 static bool shouldBeMustTail(const CallInst &CI, const Function &F) {
1134   if (CI.isInlineAsm())
1135     return false;
1136 
1137   // Match prototypes and calling conventions of resume function.
1138   FunctionType *CalleeTy = CI.getFunctionType();
1139   if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1))
1140     return false;
1141 
1142   Type *CalleeParmTy = CalleeTy->getParamType(0);
1143   if (!CalleeParmTy->isPointerTy() ||
1144       (CalleeParmTy->getPointerAddressSpace() != 0))
1145     return false;
1146 
1147   if (CI.getCallingConv() != F.getCallingConv())
1148     return false;
1149 
1150   // CI should not has any ABI-impacting function attributes.
1151   static const Attribute::AttrKind ABIAttrs[] = {
1152       Attribute::StructRet,    Attribute::ByVal,     Attribute::InAlloca,
1153       Attribute::Preallocated, Attribute::InReg,     Attribute::Returned,
1154       Attribute::SwiftSelf,    Attribute::SwiftError};
1155   AttributeList Attrs = CI.getAttributes();
1156   for (auto AK : ABIAttrs)
1157     if (Attrs.hasParamAttribute(0, AK))
1158       return false;
1159 
1160   return true;
1161 }
1162 
1163 // Add musttail to any resume instructions that is immediately followed by a
1164 // suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call
1165 // for symmetrical coroutine control transfer (C++ Coroutines TS extension).
1166 // This transformation is done only in the resume part of the coroutine that has
1167 // identical signature and calling convention as the coro.resume call.
1168 static void addMustTailToCoroResumes(Function &F) {
1169   bool changed = false;
1170 
1171   // Collect potential resume instructions.
1172   SmallVector<CallInst *, 4> Resumes;
1173   for (auto &I : instructions(F))
1174     if (auto *Call = dyn_cast<CallInst>(&I))
1175       if (shouldBeMustTail(*Call, F))
1176         Resumes.push_back(Call);
1177 
1178   // Set musttail on those that are followed by a ret instruction.
1179   for (CallInst *Call : Resumes)
1180     if (simplifyTerminatorLeadingToRet(Call->getNextNode())) {
1181       Call->setTailCallKind(CallInst::TCK_MustTail);
1182       changed = true;
1183     }
1184 
1185   if (changed)
1186     removeUnreachableBlocks(F);
1187 }
1188 
1189 // Coroutine has no suspend points. Remove heap allocation for the coroutine
1190 // frame if possible.
1191 static void handleNoSuspendCoroutine(coro::Shape &Shape) {
1192   auto *CoroBegin = Shape.CoroBegin;
1193   auto *CoroId = CoroBegin->getId();
1194   auto *AllocInst = CoroId->getCoroAlloc();
1195   switch (Shape.ABI) {
1196   case coro::ABI::Switch: {
1197     auto SwitchId = cast<CoroIdInst>(CoroId);
1198     coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr);
1199     if (AllocInst) {
1200       IRBuilder<> Builder(AllocInst);
1201       auto *Frame = Builder.CreateAlloca(Shape.FrameTy);
1202       Frame->setAlignment(Shape.FrameAlign);
1203       auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy());
1204       AllocInst->replaceAllUsesWith(Builder.getFalse());
1205       AllocInst->eraseFromParent();
1206       CoroBegin->replaceAllUsesWith(VFrame);
1207     } else {
1208       CoroBegin->replaceAllUsesWith(CoroBegin->getMem());
1209     }
1210     break;
1211   }
1212   case coro::ABI::Async:
1213   case coro::ABI::Retcon:
1214   case coro::ABI::RetconOnce:
1215     CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType()));
1216     break;
1217   }
1218 
1219   CoroBegin->eraseFromParent();
1220 }
1221 
1222 // SimplifySuspendPoint needs to check that there is no calls between
1223 // coro_save and coro_suspend, since any of the calls may potentially resume
1224 // the coroutine and if that is the case we cannot eliminate the suspend point.
1225 static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) {
1226   for (Instruction *I = From; I != To; I = I->getNextNode()) {
1227     // Assume that no intrinsic can resume the coroutine.
1228     if (isa<IntrinsicInst>(I))
1229       continue;
1230 
1231     if (isa<CallBase>(I))
1232       return true;
1233   }
1234   return false;
1235 }
1236 
1237 static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
1238   SmallPtrSet<BasicBlock *, 8> Set;
1239   SmallVector<BasicBlock *, 8> Worklist;
1240 
1241   Set.insert(SaveBB);
1242   Worklist.push_back(ResDesBB);
1243 
1244   // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1245   // returns a token consumed by suspend instruction, all blocks in between
1246   // will have to eventually hit SaveBB when going backwards from ResDesBB.
1247   while (!Worklist.empty()) {
1248     auto *BB = Worklist.pop_back_val();
1249     Set.insert(BB);
1250     for (auto *Pred : predecessors(BB))
1251       if (Set.count(Pred) == 0)
1252         Worklist.push_back(Pred);
1253   }
1254 
1255   // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1256   Set.erase(SaveBB);
1257   Set.erase(ResDesBB);
1258 
1259   for (auto *BB : Set)
1260     if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr))
1261       return true;
1262 
1263   return false;
1264 }
1265 
1266 static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
1267   auto *SaveBB = Save->getParent();
1268   auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
1269 
1270   if (SaveBB == ResumeOrDestroyBB)
1271     return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy);
1272 
1273   // Any calls from Save to the end of the block?
1274   if (hasCallsInBlockBetween(Save->getNextNode(), nullptr))
1275     return true;
1276 
1277   // Any calls from begging of the block up to ResumeOrDestroy?
1278   if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(),
1279                              ResumeOrDestroy))
1280     return true;
1281 
1282   // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1283   if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB))
1284     return true;
1285 
1286   return false;
1287 }
1288 
1289 // If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1290 // suspend point and replace it with nornal control flow.
1291 static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
1292                                  CoroBeginInst *CoroBegin) {
1293   Instruction *Prev = Suspend->getPrevNode();
1294   if (!Prev) {
1295     auto *Pred = Suspend->getParent()->getSinglePredecessor();
1296     if (!Pred)
1297       return false;
1298     Prev = Pred->getTerminator();
1299   }
1300 
1301   CallBase *CB = dyn_cast<CallBase>(Prev);
1302   if (!CB)
1303     return false;
1304 
1305   auto *Callee = CB->getCalledOperand()->stripPointerCasts();
1306 
1307   // See if the callsite is for resumption or destruction of the coroutine.
1308   auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
1309   if (!SubFn)
1310     return false;
1311 
1312   // Does not refer to the current coroutine, we cannot do anything with it.
1313   if (SubFn->getFrame() != CoroBegin)
1314     return false;
1315 
1316   // See if the transformation is safe. Specifically, see if there are any
1317   // calls in between Save and CallInstr. They can potenitally resume the
1318   // coroutine rendering this optimization unsafe.
1319   auto *Save = Suspend->getCoroSave();
1320   if (hasCallsBetween(Save, CB))
1321     return false;
1322 
1323   // Replace llvm.coro.suspend with the value that results in resumption over
1324   // the resume or cleanup path.
1325   Suspend->replaceAllUsesWith(SubFn->getRawIndex());
1326   Suspend->eraseFromParent();
1327   Save->eraseFromParent();
1328 
1329   // No longer need a call to coro.resume or coro.destroy.
1330   if (auto *Invoke = dyn_cast<InvokeInst>(CB)) {
1331     BranchInst::Create(Invoke->getNormalDest(), Invoke);
1332   }
1333 
1334   // Grab the CalledValue from CB before erasing the CallInstr.
1335   auto *CalledValue = CB->getCalledOperand();
1336   CB->eraseFromParent();
1337 
1338   // If no more users remove it. Usually it is a bitcast of SubFn.
1339   if (CalledValue != SubFn && CalledValue->user_empty())
1340     if (auto *I = dyn_cast<Instruction>(CalledValue))
1341       I->eraseFromParent();
1342 
1343   // Now we are good to remove SubFn.
1344   if (SubFn->user_empty())
1345     SubFn->eraseFromParent();
1346 
1347   return true;
1348 }
1349 
1350 // Remove suspend points that are simplified.
1351 static void simplifySuspendPoints(coro::Shape &Shape) {
1352   // Currently, the only simplification we do is switch-lowering-specific.
1353   if (Shape.ABI != coro::ABI::Switch)
1354     return;
1355 
1356   auto &S = Shape.CoroSuspends;
1357   size_t I = 0, N = S.size();
1358   if (N == 0)
1359     return;
1360   while (true) {
1361     auto SI = cast<CoroSuspendInst>(S[I]);
1362     // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1363     // to resume a coroutine suspended at the final suspend point.
1364     if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) {
1365       if (--N == I)
1366         break;
1367       std::swap(S[I], S[N]);
1368       continue;
1369     }
1370     if (++I == N)
1371       break;
1372   }
1373   S.resize(N);
1374 }
1375 
1376 static void splitSwitchCoroutine(Function &F, coro::Shape &Shape,
1377                                  SmallVectorImpl<Function *> &Clones) {
1378   assert(Shape.ABI == coro::ABI::Switch);
1379 
1380   createResumeEntryBlock(F, Shape);
1381   auto ResumeClone = createClone(F, ".resume", Shape,
1382                                  CoroCloner::Kind::SwitchResume);
1383   auto DestroyClone = createClone(F, ".destroy", Shape,
1384                                   CoroCloner::Kind::SwitchUnwind);
1385   auto CleanupClone = createClone(F, ".cleanup", Shape,
1386                                   CoroCloner::Kind::SwitchCleanup);
1387 
1388   postSplitCleanup(*ResumeClone);
1389   postSplitCleanup(*DestroyClone);
1390   postSplitCleanup(*CleanupClone);
1391 
1392   addMustTailToCoroResumes(*ResumeClone);
1393 
1394   // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1395   updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone);
1396 
1397   assert(Clones.empty());
1398   Clones.push_back(ResumeClone);
1399   Clones.push_back(DestroyClone);
1400   Clones.push_back(CleanupClone);
1401 
1402   // Create a constant array referring to resume/destroy/clone functions pointed
1403   // by the last argument of @llvm.coro.info, so that CoroElide pass can
1404   // determined correct function to call.
1405   setCoroInfo(F, Shape, Clones);
1406 }
1407 
1408 static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend,
1409                                        Value *Continuation) {
1410   auto *ResumeIntrinsic = Suspend->getResumeFunction();
1411   auto &Context = Suspend->getParent()->getParent()->getContext();
1412   auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1413 
1414   IRBuilder<> Builder(ResumeIntrinsic);
1415   auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy);
1416   ResumeIntrinsic->replaceAllUsesWith(Val);
1417   ResumeIntrinsic->eraseFromParent();
1418   Suspend->setOperand(0, UndefValue::get(Int8PtrTy));
1419 }
1420 
1421 /// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
1422 static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy,
1423                             ArrayRef<Value *> FnArgs,
1424                             SmallVectorImpl<Value *> &CallArgs) {
1425   size_t ArgIdx = 0;
1426   for (auto paramTy : FnTy->params()) {
1427     assert(ArgIdx < FnArgs.size());
1428     if (paramTy != FnArgs[ArgIdx]->getType())
1429       CallArgs.push_back(
1430           Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy));
1431     else
1432       CallArgs.push_back(FnArgs[ArgIdx]);
1433     ++ArgIdx;
1434   }
1435 }
1436 
1437 CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
1438                                    ArrayRef<Value *> Arguments,
1439                                    IRBuilder<> &Builder) {
1440   auto *FnTy =
1441       cast<FunctionType>(MustTailCallFn->getType()->getPointerElementType());
1442   // Coerce the arguments, llvm optimizations seem to ignore the types in
1443   // vaarg functions and throws away casts in optimized mode.
1444   SmallVector<Value *, 8> CallArgs;
1445   coerceArguments(Builder, FnTy, Arguments, CallArgs);
1446 
1447   auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs);
1448   TailCall->setTailCallKind(CallInst::TCK_MustTail);
1449   TailCall->setDebugLoc(Loc);
1450   TailCall->setCallingConv(MustTailCallFn->getCallingConv());
1451   return TailCall;
1452 }
1453 
1454 static void splitAsyncCoroutine(Function &F, coro::Shape &Shape,
1455                                 SmallVectorImpl<Function *> &Clones) {
1456   assert(Shape.ABI == coro::ABI::Async);
1457   assert(Clones.empty());
1458   // Reset various things that the optimizer might have decided it
1459   // "knows" about the coroutine function due to not seeing a return.
1460   F.removeFnAttr(Attribute::NoReturn);
1461   F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1462   F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
1463 
1464   auto &Context = F.getContext();
1465   auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1466 
1467   auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId());
1468   IRBuilder<> Builder(Id);
1469 
1470   auto *FramePtr = Id->getStorage();
1471   FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy);
1472   FramePtr = Builder.CreateConstInBoundsGEP1_32(
1473       Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset,
1474       "async.ctx.frameptr");
1475 
1476   // Map all uses of llvm.coro.begin to the allocated frame pointer.
1477   {
1478     // Make sure we don't invalidate Shape.FramePtr.
1479     TrackingVH<Instruction> Handle(Shape.FramePtr);
1480     Shape.CoroBegin->replaceAllUsesWith(FramePtr);
1481     Shape.FramePtr = Handle.getValPtr();
1482   }
1483 
1484   // Create all the functions in order after the main function.
1485   auto NextF = std::next(F.getIterator());
1486 
1487   // Create a continuation function for each of the suspend points.
1488   Clones.reserve(Shape.CoroSuspends.size());
1489   for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1490     auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]);
1491 
1492     // Create the clone declaration.
1493     auto *Continuation =
1494         createCloneDeclaration(F, Shape, ".resume." + Twine(Idx), NextF);
1495     Clones.push_back(Continuation);
1496 
1497     // Insert a branch to a new return block immediately before the suspend
1498     // point.
1499     auto *SuspendBB = Suspend->getParent();
1500     auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1501     auto *Branch = cast<BranchInst>(SuspendBB->getTerminator());
1502 
1503     // Place it before the first suspend.
1504     auto *ReturnBB =
1505         BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB);
1506     Branch->setSuccessor(0, ReturnBB);
1507 
1508     IRBuilder<> Builder(ReturnBB);
1509 
1510     // Insert the call to the tail call function and inline it.
1511     auto *Fn = Suspend->getMustTailCallFunction();
1512     SmallVector<Value *, 8> Args(Suspend->args());
1513     auto FnArgs = ArrayRef<Value *>(Args).drop_front(3);
1514     auto *TailCall =
1515         coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder);
1516     Builder.CreateRetVoid();
1517     InlineFunctionInfo FnInfo;
1518     auto InlineRes = InlineFunction(*TailCall, FnInfo);
1519     assert(InlineRes.isSuccess() && "Expected inlining to succeed");
1520     (void)InlineRes;
1521 
1522     // Replace the lvm.coro.async.resume intrisic call.
1523     replaceAsyncResumeFunction(Suspend, Continuation);
1524   }
1525 
1526   assert(Clones.size() == Shape.CoroSuspends.size());
1527   for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1528     auto *Suspend = Shape.CoroSuspends[Idx];
1529     auto *Clone = Clones[Idx];
1530 
1531     CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create();
1532   }
1533 }
1534 
1535 static void splitRetconCoroutine(Function &F, coro::Shape &Shape,
1536                                  SmallVectorImpl<Function *> &Clones) {
1537   assert(Shape.ABI == coro::ABI::Retcon ||
1538          Shape.ABI == coro::ABI::RetconOnce);
1539   assert(Clones.empty());
1540 
1541   // Reset various things that the optimizer might have decided it
1542   // "knows" about the coroutine function due to not seeing a return.
1543   F.removeFnAttr(Attribute::NoReturn);
1544   F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
1545   F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
1546 
1547   // Allocate the frame.
1548   auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId());
1549   Value *RawFramePtr;
1550   if (Shape.RetconLowering.IsFrameInlineInStorage) {
1551     RawFramePtr = Id->getStorage();
1552   } else {
1553     IRBuilder<> Builder(Id);
1554 
1555     // Determine the size of the frame.
1556     const DataLayout &DL = F.getParent()->getDataLayout();
1557     auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1558 
1559     // Allocate.  We don't need to update the call graph node because we're
1560     // going to recompute it from scratch after splitting.
1561     // FIXME: pass the required alignment
1562     RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr);
1563     RawFramePtr =
1564       Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType());
1565 
1566     // Stash the allocated frame pointer in the continuation storage.
1567     auto Dest = Builder.CreateBitCast(Id->getStorage(),
1568                                       RawFramePtr->getType()->getPointerTo());
1569     Builder.CreateStore(RawFramePtr, Dest);
1570   }
1571 
1572   // Map all uses of llvm.coro.begin to the allocated frame pointer.
1573   {
1574     // Make sure we don't invalidate Shape.FramePtr.
1575     TrackingVH<Instruction> Handle(Shape.FramePtr);
1576     Shape.CoroBegin->replaceAllUsesWith(RawFramePtr);
1577     Shape.FramePtr = Handle.getValPtr();
1578   }
1579 
1580   // Create a unique return block.
1581   BasicBlock *ReturnBB = nullptr;
1582   SmallVector<PHINode *, 4> ReturnPHIs;
1583 
1584   // Create all the functions in order after the main function.
1585   auto NextF = std::next(F.getIterator());
1586 
1587   // Create a continuation function for each of the suspend points.
1588   Clones.reserve(Shape.CoroSuspends.size());
1589   for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1590     auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]);
1591 
1592     // Create the clone declaration.
1593     auto Continuation =
1594       createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF);
1595     Clones.push_back(Continuation);
1596 
1597     // Insert a branch to the unified return block immediately before
1598     // the suspend point.
1599     auto SuspendBB = Suspend->getParent();
1600     auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1601     auto Branch = cast<BranchInst>(SuspendBB->getTerminator());
1602 
1603     // Create the unified return block.
1604     if (!ReturnBB) {
1605       // Place it before the first suspend.
1606       ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F,
1607                                     NewSuspendBB);
1608       Shape.RetconLowering.ReturnBlock = ReturnBB;
1609 
1610       IRBuilder<> Builder(ReturnBB);
1611 
1612       // Create PHIs for all the return values.
1613       assert(ReturnPHIs.empty());
1614 
1615       // First, the continuation.
1616       ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(),
1617                                              Shape.CoroSuspends.size()));
1618 
1619       // Next, all the directly-yielded values.
1620       for (auto ResultTy : Shape.getRetconResultTypes())
1621         ReturnPHIs.push_back(Builder.CreatePHI(ResultTy,
1622                                                Shape.CoroSuspends.size()));
1623 
1624       // Build the return value.
1625       auto RetTy = F.getReturnType();
1626 
1627       // Cast the continuation value if necessary.
1628       // We can't rely on the types matching up because that type would
1629       // have to be infinite.
1630       auto CastedContinuationTy =
1631         (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0));
1632       auto *CastedContinuation =
1633         Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy);
1634 
1635       Value *RetV;
1636       if (ReturnPHIs.size() == 1) {
1637         RetV = CastedContinuation;
1638       } else {
1639         RetV = UndefValue::get(RetTy);
1640         RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0);
1641         for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I)
1642           RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I);
1643       }
1644 
1645       Builder.CreateRet(RetV);
1646     }
1647 
1648     // Branch to the return block.
1649     Branch->setSuccessor(0, ReturnBB);
1650     ReturnPHIs[0]->addIncoming(Continuation, SuspendBB);
1651     size_t NextPHIIndex = 1;
1652     for (auto &VUse : Suspend->value_operands())
1653       ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB);
1654     assert(NextPHIIndex == ReturnPHIs.size());
1655   }
1656 
1657   assert(Clones.size() == Shape.CoroSuspends.size());
1658   for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1659     auto Suspend = Shape.CoroSuspends[i];
1660     auto Clone = Clones[i];
1661 
1662     CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create();
1663   }
1664 }
1665 
1666 namespace {
1667   class PrettyStackTraceFunction : public PrettyStackTraceEntry {
1668     Function &F;
1669   public:
1670     PrettyStackTraceFunction(Function &F) : F(F) {}
1671     void print(raw_ostream &OS) const override {
1672       OS << "While splitting coroutine ";
1673       F.printAsOperand(OS, /*print type*/ false, F.getParent());
1674       OS << "\n";
1675     }
1676   };
1677 }
1678 
1679 static coro::Shape splitCoroutine(Function &F,
1680                                   SmallVectorImpl<Function *> &Clones,
1681                                   bool ReuseFrameSlot) {
1682   PrettyStackTraceFunction prettyStackTrace(F);
1683 
1684   // The suspend-crossing algorithm in buildCoroutineFrame get tripped
1685   // up by uses in unreachable blocks, so remove them as a first pass.
1686   removeUnreachableBlocks(F);
1687 
1688   coro::Shape Shape(F, ReuseFrameSlot);
1689   if (!Shape.CoroBegin)
1690     return Shape;
1691 
1692   simplifySuspendPoints(Shape);
1693   buildCoroutineFrame(F, Shape);
1694   replaceFrameSize(Shape);
1695 
1696   // If there are no suspend points, no split required, just remove
1697   // the allocation and deallocation blocks, they are not needed.
1698   if (Shape.CoroSuspends.empty()) {
1699     handleNoSuspendCoroutine(Shape);
1700   } else {
1701     switch (Shape.ABI) {
1702     case coro::ABI::Switch:
1703       splitSwitchCoroutine(F, Shape, Clones);
1704       break;
1705     case coro::ABI::Async:
1706       splitAsyncCoroutine(F, Shape, Clones);
1707       break;
1708     case coro::ABI::Retcon:
1709     case coro::ABI::RetconOnce:
1710       splitRetconCoroutine(F, Shape, Clones);
1711       break;
1712     }
1713   }
1714 
1715   // Replace all the swifterror operations in the original function.
1716   // This invalidates SwiftErrorOps in the Shape.
1717   replaceSwiftErrorOps(F, Shape, nullptr);
1718 
1719   return Shape;
1720 }
1721 
1722 static void
1723 updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape,
1724                                    const SmallVectorImpl<Function *> &Clones,
1725                                    CallGraph &CG, CallGraphSCC &SCC) {
1726   if (!Shape.CoroBegin)
1727     return;
1728 
1729   removeCoroEnds(Shape, &CG);
1730   postSplitCleanup(F);
1731 
1732   // Update call graph and add the functions we created to the SCC.
1733   coro::updateCallGraph(F, Clones, CG, SCC);
1734 }
1735 
1736 static void updateCallGraphAfterCoroutineSplit(
1737     LazyCallGraph::Node &N, const coro::Shape &Shape,
1738     const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C,
1739     LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
1740     FunctionAnalysisManager &FAM) {
1741   if (!Shape.CoroBegin)
1742     return;
1743 
1744   for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) {
1745     auto &Context = End->getContext();
1746     End->replaceAllUsesWith(ConstantInt::getFalse(Context));
1747     End->eraseFromParent();
1748   }
1749 
1750   postSplitCleanup(N.getFunction());
1751 
1752   // We've inserted instructions into coroutine 'f' that reference the three new
1753   // coroutine funclets. We must now update the call graph so that reference
1754   // edges between 'f' and its funclets are added to it. LazyCallGraph only
1755   // allows CGSCC passes to insert "trivial" reference edges. We've ensured
1756   // above, by inserting the funclets into the same SCC as the corutine, that
1757   // the edges are trivial.
1758   //
1759   // N.B.: If we didn't update the call graph here, a CGSCCToFunctionPassAdaptor
1760   // later in this CGSCC pass pipeline may be run, triggering a call graph
1761   // update of its own. Function passes run by the adaptor are not permitted to
1762   // add new edges of any kind to the graph, and the new edges inserted by this
1763   // pass would be misattributed to that unrelated function pass.
1764   updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM);
1765 }
1766 
1767 // When we see the coroutine the first time, we insert an indirect call to a
1768 // devirt trigger function and mark the coroutine that it is now ready for
1769 // split.
1770 // Async lowering uses this after it has split the function to restart the
1771 // pipeline.
1772 static void prepareForSplit(Function &F, CallGraph &CG,
1773                             bool MarkForAsyncRestart = false) {
1774   Module &M = *F.getParent();
1775   LLVMContext &Context = F.getContext();
1776 #ifndef NDEBUG
1777   Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN);
1778   assert(DevirtFn && "coro.devirt.trigger function not found");
1779 #endif
1780 
1781   F.addFnAttr(CORO_PRESPLIT_ATTR, MarkForAsyncRestart
1782                                       ? ASYNC_RESTART_AFTER_SPLIT
1783                                       : PREPARED_FOR_SPLIT);
1784 
1785   // Insert an indirect call sequence that will be devirtualized by CoroElide
1786   // pass:
1787   //    %0 = call i8* @llvm.coro.subfn.addr(i8* null, i8 -1)
1788   //    %1 = bitcast i8* %0 to void(i8*)*
1789   //    call void %1(i8* null)
1790   coro::LowererBase Lowerer(M);
1791   Instruction *InsertPt =
1792       MarkForAsyncRestart ? F.getEntryBlock().getFirstNonPHIOrDbgOrLifetime()
1793                           : F.getEntryBlock().getTerminator();
1794   auto *Null = ConstantPointerNull::get(Type::getInt8PtrTy(Context));
1795   auto *DevirtFnAddr =
1796       Lowerer.makeSubFnCall(Null, CoroSubFnInst::RestartTrigger, InsertPt);
1797   FunctionType *FnTy = FunctionType::get(Type::getVoidTy(Context),
1798                                          {Type::getInt8PtrTy(Context)}, false);
1799   auto *IndirectCall = CallInst::Create(FnTy, DevirtFnAddr, Null, "", InsertPt);
1800 
1801   // Update CG graph with an indirect call we just added.
1802   CG[&F]->addCalledFunction(IndirectCall, CG.getCallsExternalNode());
1803 }
1804 
1805 // Make sure that there is a devirtualization trigger function that the
1806 // coro-split pass uses to force a restart of the CGSCC pipeline. If the devirt
1807 // trigger function is not found, we will create one and add it to the current
1808 // SCC.
1809 static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC) {
1810   Module &M = CG.getModule();
1811   if (M.getFunction(CORO_DEVIRT_TRIGGER_FN))
1812     return;
1813 
1814   LLVMContext &C = M.getContext();
1815   auto *FnTy = FunctionType::get(Type::getVoidTy(C), Type::getInt8PtrTy(C),
1816                                  /*isVarArg=*/false);
1817   Function *DevirtFn =
1818       Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage,
1819                        CORO_DEVIRT_TRIGGER_FN, &M);
1820   DevirtFn->addFnAttr(Attribute::AlwaysInline);
1821   auto *Entry = BasicBlock::Create(C, "entry", DevirtFn);
1822   ReturnInst::Create(C, Entry);
1823 
1824   auto *Node = CG.getOrInsertFunction(DevirtFn);
1825 
1826   SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end());
1827   Nodes.push_back(Node);
1828   SCC.initialize(Nodes);
1829 }
1830 
1831 /// Replace a call to llvm.coro.prepare.retcon.
1832 static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
1833                            LazyCallGraph::SCC &C) {
1834   auto CastFn = Prepare->getArgOperand(0); // as an i8*
1835   auto Fn = CastFn->stripPointerCasts();   // as its original type
1836 
1837   // Attempt to peephole this pattern:
1838   //    %0 = bitcast [[TYPE]] @some_function to i8*
1839   //    %1 = call @llvm.coro.prepare.retcon(i8* %0)
1840   //    %2 = bitcast %1 to [[TYPE]]
1841   // ==>
1842   //    %2 = @some_function
1843   for (auto UI = Prepare->use_begin(), UE = Prepare->use_end(); UI != UE;) {
1844     // Look for bitcasts back to the original function type.
1845     auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser());
1846     if (!Cast || Cast->getType() != Fn->getType())
1847       continue;
1848 
1849     // Replace and remove the cast.
1850     Cast->replaceAllUsesWith(Fn);
1851     Cast->eraseFromParent();
1852   }
1853 
1854   // Replace any remaining uses with the function as an i8*.
1855   // This can never directly be a callee, so we don't need to update CG.
1856   Prepare->replaceAllUsesWith(CastFn);
1857   Prepare->eraseFromParent();
1858 
1859   // Kill dead bitcasts.
1860   while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
1861     if (!Cast->use_empty())
1862       break;
1863     CastFn = Cast->getOperand(0);
1864     Cast->eraseFromParent();
1865   }
1866 }
1867 /// Replace a call to llvm.coro.prepare.retcon.
1868 static void replacePrepare(CallInst *Prepare, CallGraph &CG) {
1869   auto CastFn = Prepare->getArgOperand(0); // as an i8*
1870   auto Fn = CastFn->stripPointerCasts(); // as its original type
1871 
1872   // Find call graph nodes for the preparation.
1873   CallGraphNode *PrepareUserNode = nullptr, *FnNode = nullptr;
1874   if (auto ConcreteFn = dyn_cast<Function>(Fn)) {
1875     PrepareUserNode = CG[Prepare->getFunction()];
1876     FnNode = CG[ConcreteFn];
1877   }
1878 
1879   // Attempt to peephole this pattern:
1880   //    %0 = bitcast [[TYPE]] @some_function to i8*
1881   //    %1 = call @llvm.coro.prepare.retcon(i8* %0)
1882   //    %2 = bitcast %1 to [[TYPE]]
1883   // ==>
1884   //    %2 = @some_function
1885   for (auto UI = Prepare->use_begin(), UE = Prepare->use_end();
1886          UI != UE; ) {
1887     // Look for bitcasts back to the original function type.
1888     auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser());
1889     if (!Cast || Cast->getType() != Fn->getType()) continue;
1890 
1891     // Check whether the replacement will introduce new direct calls.
1892     // If so, we'll need to update the call graph.
1893     if (PrepareUserNode) {
1894       for (auto &Use : Cast->uses()) {
1895         if (auto *CB = dyn_cast<CallBase>(Use.getUser())) {
1896           if (!CB->isCallee(&Use))
1897             continue;
1898           PrepareUserNode->removeCallEdgeFor(*CB);
1899           PrepareUserNode->addCalledFunction(CB, FnNode);
1900         }
1901       }
1902     }
1903 
1904     // Replace and remove the cast.
1905     Cast->replaceAllUsesWith(Fn);
1906     Cast->eraseFromParent();
1907   }
1908 
1909   // Replace any remaining uses with the function as an i8*.
1910   // This can never directly be a callee, so we don't need to update CG.
1911   Prepare->replaceAllUsesWith(CastFn);
1912   Prepare->eraseFromParent();
1913 
1914   // Kill dead bitcasts.
1915   while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
1916     if (!Cast->use_empty()) break;
1917     CastFn = Cast->getOperand(0);
1918     Cast->eraseFromParent();
1919   }
1920 }
1921 
1922 static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
1923                                LazyCallGraph::SCC &C) {
1924   bool Changed = false;
1925   for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end(); PI != PE;) {
1926     // Intrinsics can only be used in calls.
1927     auto *Prepare = cast<CallInst>((PI++)->getUser());
1928     replacePrepare(Prepare, CG, C);
1929     Changed = true;
1930   }
1931 
1932   return Changed;
1933 }
1934 
1935 /// Remove calls to llvm.coro.prepare.retcon, a barrier meant to prevent
1936 /// IPO from operating on calls to a retcon coroutine before it's been
1937 /// split.  This is only safe to do after we've split all retcon
1938 /// coroutines in the module.  We can do that this in this pass because
1939 /// this pass does promise to split all retcon coroutines (as opposed to
1940 /// switch coroutines, which are lowered in multiple stages).
1941 static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) {
1942   bool Changed = false;
1943   for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end();
1944          PI != PE; ) {
1945     // Intrinsics can only be used in calls.
1946     auto *Prepare = cast<CallInst>((PI++)->getUser());
1947     replacePrepare(Prepare, CG);
1948     Changed = true;
1949   }
1950 
1951   return Changed;
1952 }
1953 
1954 static bool declaresCoroSplitIntrinsics(const Module &M) {
1955   return coro::declaresIntrinsics(M, {"llvm.coro.begin",
1956                                       "llvm.coro.prepare.retcon",
1957                                       "llvm.coro.prepare.async"});
1958 }
1959 
1960 static void addPrepareFunction(const Module &M,
1961                                SmallVectorImpl<Function *> &Fns,
1962                                StringRef Name) {
1963   auto *PrepareFn = M.getFunction(Name);
1964   if (PrepareFn && !PrepareFn->use_empty())
1965     Fns.push_back(PrepareFn);
1966 }
1967 
1968 PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C,
1969                                      CGSCCAnalysisManager &AM,
1970                                      LazyCallGraph &CG, CGSCCUpdateResult &UR) {
1971   // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
1972   //     non-zero number of nodes, so we assume that here and grab the first
1973   //     node's function's module.
1974   Module &M = *C.begin()->getFunction().getParent();
1975   auto &FAM =
1976       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
1977 
1978   if (!declaresCoroSplitIntrinsics(M))
1979     return PreservedAnalyses::all();
1980 
1981   // Check for uses of llvm.coro.prepare.retcon/async.
1982   SmallVector<Function *, 2> PrepareFns;
1983   addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
1984   addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
1985 
1986   // Find coroutines for processing.
1987   SmallVector<LazyCallGraph::Node *, 4> Coroutines;
1988   for (LazyCallGraph::Node &N : C)
1989     if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR))
1990       Coroutines.push_back(&N);
1991 
1992   if (Coroutines.empty() && PrepareFns.empty())
1993     return PreservedAnalyses::all();
1994 
1995   if (Coroutines.empty()) {
1996     for (auto *PrepareFn : PrepareFns) {
1997       replaceAllPrepares(PrepareFn, CG, C);
1998     }
1999   }
2000 
2001   // Split all the coroutines.
2002   for (LazyCallGraph::Node *N : Coroutines) {
2003     Function &F = N->getFunction();
2004     Attribute Attr = F.getFnAttribute(CORO_PRESPLIT_ATTR);
2005     StringRef Value = Attr.getValueAsString();
2006     LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()
2007                       << "' state: " << Value << "\n");
2008     if (Value == UNPREPARED_FOR_SPLIT) {
2009       // Enqueue a second iteration of the CGSCC pipeline.
2010       // N.B.:
2011       // The CoroSplitLegacy pass "triggers" a restart of the CGSCC pass
2012       // pipeline by inserting an indirect function call that the
2013       // CoroElideLegacy pass then replaces with a direct function call. The
2014       // legacy CGSCC pipeline's implicit behavior was as if wrapped in the new
2015       // pass manager abstraction DevirtSCCRepeatedPass.
2016       //
2017       // This pass does not need to "trigger" another run of the pipeline.
2018       // Instead, it simply enqueues the same RefSCC onto the pipeline's
2019       // worklist.
2020       UR.CWorklist.insert(&C);
2021       F.addFnAttr(CORO_PRESPLIT_ATTR, PREPARED_FOR_SPLIT);
2022       continue;
2023     }
2024     F.removeFnAttr(CORO_PRESPLIT_ATTR);
2025 
2026     SmallVector<Function *, 4> Clones;
2027     const coro::Shape Shape = splitCoroutine(F, Clones, ReuseFrameSlot);
2028     updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM);
2029 
2030     if (Shape.ABI == coro::ABI::Async && !Shape.CoroSuspends.empty()) {
2031       // We want the inliner to be run on the newly inserted functions.
2032       UR.CWorklist.insert(&C);
2033     }
2034   }
2035 
2036   if (!PrepareFns.empty()) {
2037     for (auto *PrepareFn : PrepareFns) {
2038       replaceAllPrepares(PrepareFn, CG, C);
2039     }
2040   }
2041 
2042   return PreservedAnalyses::none();
2043 }
2044 
2045 namespace {
2046 
2047 // We present a coroutine to LLVM as an ordinary function with suspension
2048 // points marked up with intrinsics. We let the optimizer party on the coroutine
2049 // as a single function for as long as possible. Shortly before the coroutine is
2050 // eligible to be inlined into its callers, we split up the coroutine into parts
2051 // corresponding to initial, resume and destroy invocations of the coroutine,
2052 // add them to the current SCC and restart the IPO pipeline to optimize the
2053 // coroutine subfunctions we extracted before proceeding to the caller of the
2054 // coroutine.
2055 struct CoroSplitLegacy : public CallGraphSCCPass {
2056   static char ID; // Pass identification, replacement for typeid
2057 
2058   CoroSplitLegacy(bool ReuseFrameSlot = false)
2059       : CallGraphSCCPass(ID), ReuseFrameSlot(ReuseFrameSlot) {
2060     initializeCoroSplitLegacyPass(*PassRegistry::getPassRegistry());
2061   }
2062 
2063   bool Run = false;
2064   bool ReuseFrameSlot;
2065 
2066   // A coroutine is identified by the presence of coro.begin intrinsic, if
2067   // we don't have any, this pass has nothing to do.
2068   bool doInitialization(CallGraph &CG) override {
2069     Run = declaresCoroSplitIntrinsics(CG.getModule());
2070     return CallGraphSCCPass::doInitialization(CG);
2071   }
2072 
2073   bool runOnSCC(CallGraphSCC &SCC) override {
2074     if (!Run)
2075       return false;
2076 
2077     // Check for uses of llvm.coro.prepare.retcon.
2078     SmallVector<Function *, 2> PrepareFns;
2079     auto &M = SCC.getCallGraph().getModule();
2080     addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2081     addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2082 
2083     // Find coroutines for processing.
2084     SmallVector<Function *, 4> Coroutines;
2085     for (CallGraphNode *CGN : SCC)
2086       if (auto *F = CGN->getFunction())
2087         if (F->hasFnAttribute(CORO_PRESPLIT_ATTR))
2088           Coroutines.push_back(F);
2089 
2090     if (Coroutines.empty() && PrepareFns.empty())
2091       return false;
2092 
2093     CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
2094 
2095     if (Coroutines.empty()) {
2096       bool Changed = false;
2097       for (auto *PrepareFn : PrepareFns)
2098         Changed |= replaceAllPrepares(PrepareFn, CG);
2099       return Changed;
2100     }
2101 
2102     createDevirtTriggerFunc(CG, SCC);
2103 
2104     // Split all the coroutines.
2105     for (Function *F : Coroutines) {
2106       Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR);
2107       StringRef Value = Attr.getValueAsString();
2108       LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName()
2109                         << "' state: " << Value << "\n");
2110       // Async lowering marks coroutines to trigger a restart of the pipeline
2111       // after it has split them.
2112       if (Value == ASYNC_RESTART_AFTER_SPLIT) {
2113         F->removeFnAttr(CORO_PRESPLIT_ATTR);
2114         continue;
2115       }
2116       if (Value == UNPREPARED_FOR_SPLIT) {
2117         prepareForSplit(*F, CG);
2118         continue;
2119       }
2120       F->removeFnAttr(CORO_PRESPLIT_ATTR);
2121 
2122       SmallVector<Function *, 4> Clones;
2123       const coro::Shape Shape = splitCoroutine(*F, Clones, ReuseFrameSlot);
2124       updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC);
2125       if (Shape.ABI == coro::ABI::Async) {
2126         // Restart SCC passes.
2127         // Mark function for CoroElide pass. It will devirtualize causing a
2128         // restart of the SCC pipeline.
2129         prepareForSplit(*F, CG, true /*MarkForAsyncRestart*/);
2130       }
2131     }
2132 
2133     for (auto *PrepareFn : PrepareFns)
2134       replaceAllPrepares(PrepareFn, CG);
2135 
2136     return true;
2137   }
2138 
2139   void getAnalysisUsage(AnalysisUsage &AU) const override {
2140     CallGraphSCCPass::getAnalysisUsage(AU);
2141   }
2142 
2143   StringRef getPassName() const override { return "Coroutine Splitting"; }
2144 };
2145 
2146 } // end anonymous namespace
2147 
2148 char CoroSplitLegacy::ID = 0;
2149 
2150 INITIALIZE_PASS_BEGIN(
2151     CoroSplitLegacy, "coro-split",
2152     "Split coroutine into a set of functions driving its state machine", false,
2153     false)
2154 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
2155 INITIALIZE_PASS_END(
2156     CoroSplitLegacy, "coro-split",
2157     "Split coroutine into a set of functions driving its state machine", false,
2158     false)
2159 
2160 Pass *llvm::createCoroSplitLegacyPass(bool ReuseFrameSlot) {
2161   return new CoroSplitLegacy(ReuseFrameSlot);
2162 }
2163