1 //===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This pass builds the coroutine frame and outlines resume and destroy parts
9 // of the coroutine into separate functions.
10 //
11 // We present a coroutine to an LLVM as an ordinary function with suspension
12 // points marked up with intrinsics. We let the optimizer party on the coroutine
13 // as a single function for as long as possible. Shortly before the coroutine is
14 // eligible to be inlined into its callers, we split up the coroutine into parts
15 // corresponding to an initial, resume and destroy invocations of the coroutine,
16 // add them to the current SCC and restart the IPO pipeline to optimize the
17 // coroutine subfunctions we extracted before proceeding to the caller of the
18 // coroutine.
19 //===----------------------------------------------------------------------===//
20 
21 #include "llvm/Transforms/Coroutines/CoroSplit.h"
22 #include "CoroInstr.h"
23 #include "CoroInternal.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/StringRef.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/Analysis/CFG.h"
30 #include "llvm/Analysis/CallGraph.h"
31 #include "llvm/Analysis/CallGraphSCCPass.h"
32 #include "llvm/Analysis/ConstantFolding.h"
33 #include "llvm/Analysis/LazyCallGraph.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CFG.h"
38 #include "llvm/IR/CallingConv.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DataLayout.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/Dominators.h"
43 #include "llvm/IR/Function.h"
44 #include "llvm/IR/GlobalValue.h"
45 #include "llvm/IR/GlobalVariable.h"
46 #include "llvm/IR/IRBuilder.h"
47 #include "llvm/IR/InstIterator.h"
48 #include "llvm/IR/InstrTypes.h"
49 #include "llvm/IR/Instruction.h"
50 #include "llvm/IR/Instructions.h"
51 #include "llvm/IR/IntrinsicInst.h"
52 #include "llvm/IR/LLVMContext.h"
53 #include "llvm/IR/LegacyPassManager.h"
54 #include "llvm/IR/Module.h"
55 #include "llvm/IR/Type.h"
56 #include "llvm/IR/Value.h"
57 #include "llvm/IR/Verifier.h"
58 #include "llvm/InitializePasses.h"
59 #include "llvm/Pass.h"
60 #include "llvm/Support/Casting.h"
61 #include "llvm/Support/Debug.h"
62 #include "llvm/Support/PrettyStackTrace.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "llvm/Transforms/Scalar.h"
65 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
66 #include "llvm/Transforms/Utils/CallGraphUpdater.h"
67 #include "llvm/Transforms/Utils/Cloning.h"
68 #include "llvm/Transforms/Utils/Local.h"
69 #include "llvm/Transforms/Utils/ValueMapper.h"
70 #include <cassert>
71 #include <cstddef>
72 #include <cstdint>
73 #include <initializer_list>
74 #include <iterator>
75 
76 using namespace llvm;
77 
78 #define DEBUG_TYPE "coro-split"
79 
80 namespace {
81 
82 /// A little helper class for building
83 class CoroCloner {
84 public:
85   enum class Kind {
86     /// The shared resume function for a switch lowering.
87     SwitchResume,
88 
89     /// The shared unwind function for a switch lowering.
90     SwitchUnwind,
91 
92     /// The shared cleanup function for a switch lowering.
93     SwitchCleanup,
94 
95     /// An individual continuation function.
96     Continuation,
97 
98     /// An async resume function.
99     Async,
100   };
101 
102 private:
103   Function &OrigF;
104   Function *NewF;
105   const Twine &Suffix;
106   coro::Shape &Shape;
107   Kind FKind;
108   ValueToValueMapTy VMap;
109   IRBuilder<> Builder;
110   Value *NewFramePtr = nullptr;
111 
112   /// The active suspend instruction; meaningful only for continuation and async
113   /// ABIs.
114   AnyCoroSuspendInst *ActiveSuspend = nullptr;
115 
116 public:
117   /// Create a cloner for a switch lowering.
118   CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
119              Kind FKind)
120     : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape),
121       FKind(FKind), Builder(OrigF.getContext()) {
122     assert(Shape.ABI == coro::ABI::Switch);
123   }
124 
125   /// Create a cloner for a continuation lowering.
126   CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
127              Function *NewF, AnyCoroSuspendInst *ActiveSuspend)
128       : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape),
129         FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation),
130         Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) {
131     assert(Shape.ABI == coro::ABI::Retcon ||
132            Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async);
133     assert(NewF && "need existing function for continuation");
134     assert(ActiveSuspend && "need active suspend point for continuation");
135   }
136 
137   Function *getFunction() const {
138     assert(NewF != nullptr && "declaration not yet set");
139     return NewF;
140   }
141 
142   void create();
143 
144 private:
145   bool isSwitchDestroyFunction() {
146     switch (FKind) {
147     case Kind::Async:
148     case Kind::Continuation:
149     case Kind::SwitchResume:
150       return false;
151     case Kind::SwitchUnwind:
152     case Kind::SwitchCleanup:
153       return true;
154     }
155     llvm_unreachable("Unknown CoroCloner::Kind enum");
156   }
157 
158   void replaceEntryBlock();
159   Value *deriveNewFramePointer();
160   void replaceRetconOrAsyncSuspendUses();
161   void replaceCoroSuspends();
162   void replaceCoroEnds();
163   void replaceSwiftErrorOps();
164   void salvageDebugInfo();
165   void handleFinalSuspend();
166 };
167 
168 } // end anonymous namespace
169 
170 static void maybeFreeRetconStorage(IRBuilder<> &Builder,
171                                    const coro::Shape &Shape, Value *FramePtr,
172                                    CallGraph *CG) {
173   assert(Shape.ABI == coro::ABI::Retcon ||
174          Shape.ABI == coro::ABI::RetconOnce);
175   if (Shape.RetconLowering.IsFrameInlineInStorage)
176     return;
177 
178   Shape.emitDealloc(Builder, FramePtr, CG);
179 }
180 
181 /// Replace an llvm.coro.end.async.
182 /// Will inline the must tail call function call if there is one.
183 /// \returns true if cleanup of the coro.end block is needed, false otherwise.
184 static bool replaceCoroEndAsync(AnyCoroEndInst *End) {
185   IRBuilder<> Builder(End);
186 
187   auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End);
188   if (!EndAsync) {
189     Builder.CreateRetVoid();
190     return true /*needs cleanup of coro.end block*/;
191   }
192 
193   auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
194   if (!MustTailCallFunc) {
195     Builder.CreateRetVoid();
196     return true /*needs cleanup of coro.end block*/;
197   }
198 
199   // Move the must tail call from the predecessor block into the end block.
200   auto *CoroEndBlock = End->getParent();
201   auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
202   assert(MustTailCallFuncBlock && "Must have a single predecessor block");
203   auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
204   auto *MustTailCall = cast<CallInst>(&*std::prev(It));
205   CoroEndBlock->getInstList().splice(
206       End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall);
207 
208   // Insert the return instruction.
209   Builder.SetInsertPoint(End);
210   Builder.CreateRetVoid();
211   InlineFunctionInfo FnInfo;
212 
213   // Remove the rest of the block, by splitting it into an unreachable block.
214   auto *BB = End->getParent();
215   BB->splitBasicBlock(End);
216   BB->getTerminator()->eraseFromParent();
217 
218   auto InlineRes = InlineFunction(*MustTailCall, FnInfo);
219   assert(InlineRes.isSuccess() && "Expected inlining to succeed");
220   (void)InlineRes;
221 
222   // We have cleaned up the coro.end block above.
223   return false;
224 }
225 
226 /// Replace a non-unwind call to llvm.coro.end.
227 static void replaceFallthroughCoroEnd(AnyCoroEndInst *End,
228                                       const coro::Shape &Shape, Value *FramePtr,
229                                       bool InResume, CallGraph *CG) {
230   // Start inserting right before the coro.end.
231   IRBuilder<> Builder(End);
232 
233   // Create the return instruction.
234   switch (Shape.ABI) {
235   // The cloned functions in switch-lowering always return void.
236   case coro::ABI::Switch:
237     // coro.end doesn't immediately end the coroutine in the main function
238     // in this lowering, because we need to deallocate the coroutine.
239     if (!InResume)
240       return;
241     Builder.CreateRetVoid();
242     break;
243 
244   // In async lowering this returns.
245   case coro::ABI::Async: {
246     bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
247     if (!CoroEndBlockNeedsCleanup)
248       return;
249     break;
250   }
251 
252   // In unique continuation lowering, the continuations always return void.
253   // But we may have implicitly allocated storage.
254   case coro::ABI::RetconOnce:
255     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
256     Builder.CreateRetVoid();
257     break;
258 
259   // In non-unique continuation lowering, we signal completion by returning
260   // a null continuation.
261   case coro::ABI::Retcon: {
262     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
263     auto RetTy = Shape.getResumeFunctionType()->getReturnType();
264     auto RetStructTy = dyn_cast<StructType>(RetTy);
265     PointerType *ContinuationTy =
266       cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy);
267 
268     Value *ReturnValue = ConstantPointerNull::get(ContinuationTy);
269     if (RetStructTy) {
270       ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy),
271                                               ReturnValue, 0);
272     }
273     Builder.CreateRet(ReturnValue);
274     break;
275   }
276   }
277 
278   // Remove the rest of the block, by splitting it into an unreachable block.
279   auto *BB = End->getParent();
280   BB->splitBasicBlock(End);
281   BB->getTerminator()->eraseFromParent();
282 }
283 
284 // Mark a coroutine as done, which implies that the coroutine is finished and
285 // never get resumed.
286 //
287 // In resume-switched ABI, the done state is represented by storing zero in
288 // ResumeFnAddr.
289 //
290 // NOTE: We couldn't omit the argument `FramePtr`. It is necessary because the
291 // pointer to the frame in splitted function is not stored in `Shape`.
292 static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape,
293                                 Value *FramePtr) {
294   assert(
295       Shape.ABI == coro::ABI::Switch &&
296       "markCoroutineAsDone is only supported for Switch-Resumed ABI for now.");
297   auto *GepIndex = Builder.CreateStructGEP(
298       Shape.FrameTy, FramePtr, coro::Shape::SwitchFieldIndex::Resume,
299       "ResumeFn.addr");
300   auto *NullPtr = ConstantPointerNull::get(cast<PointerType>(
301       Shape.FrameTy->getTypeAtIndex(coro::Shape::SwitchFieldIndex::Resume)));
302   Builder.CreateStore(NullPtr, GepIndex);
303 }
304 
305 /// Replace an unwind call to llvm.coro.end.
306 static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
307                                  Value *FramePtr, bool InResume,
308                                  CallGraph *CG) {
309   IRBuilder<> Builder(End);
310 
311   switch (Shape.ABI) {
312   // In switch-lowering, this does nothing in the main function.
313   case coro::ABI::Switch: {
314     // In C++'s specification, the coroutine should be marked as done
315     // if promise.unhandled_exception() throws.  The frontend will
316     // call coro.end(true) along this path.
317     //
318     // FIXME: We should refactor this once there is other language
319     // which uses Switch-Resumed style other than C++.
320     markCoroutineAsDone(Builder, Shape, FramePtr);
321     if (!InResume)
322       return;
323     break;
324   }
325   // In async lowering this does nothing.
326   case coro::ABI::Async:
327     break;
328   // In continuation-lowering, this frees the continuation storage.
329   case coro::ABI::Retcon:
330   case coro::ABI::RetconOnce:
331     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
332     break;
333   }
334 
335   // If coro.end has an associated bundle, add cleanupret instruction.
336   if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) {
337     auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]);
338     auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr);
339     End->getParent()->splitBasicBlock(End);
340     CleanupRet->getParent()->getTerminator()->eraseFromParent();
341   }
342 }
343 
344 static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
345                            Value *FramePtr, bool InResume, CallGraph *CG) {
346   if (End->isUnwind())
347     replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG);
348   else
349     replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG);
350 
351   auto &Context = End->getContext();
352   End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context)
353                                    : ConstantInt::getFalse(Context));
354   End->eraseFromParent();
355 }
356 
357 // Create an entry block for a resume function with a switch that will jump to
358 // suspend points.
359 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) {
360   assert(Shape.ABI == coro::ABI::Switch);
361   LLVMContext &C = F.getContext();
362 
363   // resume.entry:
364   //  %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0,
365   //  i32 2
366   //  % index = load i32, i32* %index.addr
367   //  switch i32 %index, label %unreachable [
368   //    i32 0, label %resume.0
369   //    i32 1, label %resume.1
370   //    ...
371   //  ]
372 
373   auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F);
374   auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F);
375 
376   IRBuilder<> Builder(NewEntry);
377   auto *FramePtr = Shape.FramePtr;
378   auto *FrameTy = Shape.FrameTy;
379   auto *GepIndex = Builder.CreateStructGEP(
380       FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
381   auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index");
382   auto *Switch =
383       Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size());
384   Shape.SwitchLowering.ResumeSwitch = Switch;
385 
386   size_t SuspendIndex = 0;
387   for (auto *AnyS : Shape.CoroSuspends) {
388     auto *S = cast<CoroSuspendInst>(AnyS);
389     ConstantInt *IndexVal = Shape.getIndex(SuspendIndex);
390 
391     // Replace CoroSave with a store to Index:
392     //    %index.addr = getelementptr %f.frame... (index field number)
393     //    store i32 0, i32* %index.addr1
394     auto *Save = S->getCoroSave();
395     Builder.SetInsertPoint(Save);
396     if (S->isFinal()) {
397       // The coroutine should be marked done if it reaches the final suspend
398       // point.
399       markCoroutineAsDone(Builder, Shape, FramePtr);
400     } else {
401       auto *GepIndex = Builder.CreateStructGEP(
402           FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
403       Builder.CreateStore(IndexVal, GepIndex);
404     }
405     Save->replaceAllUsesWith(ConstantTokenNone::get(C));
406     Save->eraseFromParent();
407 
408     // Split block before and after coro.suspend and add a jump from an entry
409     // switch:
410     //
411     //  whateverBB:
412     //    whatever
413     //    %0 = call i8 @llvm.coro.suspend(token none, i1 false)
414     //    switch i8 %0, label %suspend[i8 0, label %resume
415     //                                 i8 1, label %cleanup]
416     // becomes:
417     //
418     //  whateverBB:
419     //     whatever
420     //     br label %resume.0.landing
421     //
422     //  resume.0: ; <--- jump from the switch in the resume.entry
423     //     %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
424     //     br label %resume.0.landing
425     //
426     //  resume.0.landing:
427     //     %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
428     //     switch i8 % 1, label %suspend [i8 0, label %resume
429     //                                    i8 1, label %cleanup]
430 
431     auto *SuspendBB = S->getParent();
432     auto *ResumeBB =
433         SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex));
434     auto *LandingBB = ResumeBB->splitBasicBlock(
435         S->getNextNode(), ResumeBB->getName() + Twine(".landing"));
436     Switch->addCase(IndexVal, ResumeBB);
437 
438     cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB);
439     auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front());
440     S->replaceAllUsesWith(PN);
441     PN->addIncoming(Builder.getInt8(-1), SuspendBB);
442     PN->addIncoming(S, ResumeBB);
443 
444     ++SuspendIndex;
445   }
446 
447   Builder.SetInsertPoint(UnreachBB);
448   Builder.CreateUnreachable();
449 
450   Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
451 }
452 
453 
454 // Rewrite final suspend point handling. We do not use suspend index to
455 // represent the final suspend point. Instead we zero-out ResumeFnAddr in the
456 // coroutine frame, since it is undefined behavior to resume a coroutine
457 // suspended at the final suspend point. Thus, in the resume function, we can
458 // simply remove the last case (when coro::Shape is built, the final suspend
459 // point (if present) is always the last element of CoroSuspends array).
460 // In the destroy function, we add a code sequence to check if ResumeFnAddress
461 // is Null, and if so, jump to the appropriate label to handle cleanup from the
462 // final suspend point.
463 void CoroCloner::handleFinalSuspend() {
464   assert(Shape.ABI == coro::ABI::Switch &&
465          Shape.SwitchLowering.HasFinalSuspend);
466   auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]);
467   auto FinalCaseIt = std::prev(Switch->case_end());
468   BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
469   Switch->removeCase(FinalCaseIt);
470   if (isSwitchDestroyFunction()) {
471     BasicBlock *OldSwitchBB = Switch->getParent();
472     auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch");
473     Builder.SetInsertPoint(OldSwitchBB->getTerminator());
474     auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr,
475                                        coro::Shape::SwitchFieldIndex::Resume,
476                                              "ResumeFn.addr");
477     auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(),
478                                     GepIndex);
479     auto *Cond = Builder.CreateIsNull(Load);
480     Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB);
481     OldSwitchBB->getTerminator()->eraseFromParent();
482   }
483 }
484 
485 static FunctionType *
486 getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend) {
487   auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Suspend);
488   auto *StructTy = cast<StructType>(AsyncSuspend->getType());
489   auto &Context = Suspend->getParent()->getParent()->getContext();
490   auto *VoidTy = Type::getVoidTy(Context);
491   return FunctionType::get(VoidTy, StructTy->elements(), false);
492 }
493 
494 static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape,
495                                         const Twine &Suffix,
496                                         Module::iterator InsertBefore,
497                                         AnyCoroSuspendInst *ActiveSuspend) {
498   Module *M = OrigF.getParent();
499   auto *FnTy = (Shape.ABI != coro::ABI::Async)
500                    ? Shape.getResumeFunctionType()
501                    : getFunctionTypeFromAsyncSuspend(ActiveSuspend);
502 
503   Function *NewF =
504       Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage,
505                        OrigF.getName() + Suffix);
506   if (Shape.ABI != coro::ABI::Async)
507     NewF->addParamAttr(0, Attribute::NonNull);
508 
509   // For the async lowering ABI we can't guarantee that the context argument is
510   // not access via a different pointer not based on the argument.
511   if (Shape.ABI != coro::ABI::Async)
512     NewF->addParamAttr(0, Attribute::NoAlias);
513 
514   M->getFunctionList().insert(InsertBefore, NewF);
515 
516   return NewF;
517 }
518 
519 /// Replace uses of the active llvm.coro.suspend.retcon/async call with the
520 /// arguments to the continuation function.
521 ///
522 /// This assumes that the builder has a meaningful insertion point.
523 void CoroCloner::replaceRetconOrAsyncSuspendUses() {
524   assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
525          Shape.ABI == coro::ABI::Async);
526 
527   auto NewS = VMap[ActiveSuspend];
528   if (NewS->use_empty()) return;
529 
530   // Copy out all the continuation arguments after the buffer pointer into
531   // an easily-indexed data structure for convenience.
532   SmallVector<Value*, 8> Args;
533   // The async ABI includes all arguments -- including the first argument.
534   bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
535   for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()),
536             E = NewF->arg_end();
537        I != E; ++I)
538     Args.push_back(&*I);
539 
540   // If the suspend returns a single scalar value, we can just do a simple
541   // replacement.
542   if (!isa<StructType>(NewS->getType())) {
543     assert(Args.size() == 1);
544     NewS->replaceAllUsesWith(Args.front());
545     return;
546   }
547 
548   // Try to peephole extracts of an aggregate return.
549   for (Use &U : llvm::make_early_inc_range(NewS->uses())) {
550     auto *EVI = dyn_cast<ExtractValueInst>(U.getUser());
551     if (!EVI || EVI->getNumIndices() != 1)
552       continue;
553 
554     EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]);
555     EVI->eraseFromParent();
556   }
557 
558   // If we have no remaining uses, we're done.
559   if (NewS->use_empty()) return;
560 
561   // Otherwise, we need to create an aggregate.
562   Value *Agg = UndefValue::get(NewS->getType());
563   for (size_t I = 0, E = Args.size(); I != E; ++I)
564     Agg = Builder.CreateInsertValue(Agg, Args[I], I);
565 
566   NewS->replaceAllUsesWith(Agg);
567 }
568 
569 void CoroCloner::replaceCoroSuspends() {
570   Value *SuspendResult;
571 
572   switch (Shape.ABI) {
573   // In switch lowering, replace coro.suspend with the appropriate value
574   // for the type of function we're extracting.
575   // Replacing coro.suspend with (0) will result in control flow proceeding to
576   // a resume label associated with a suspend point, replacing it with (1) will
577   // result in control flow proceeding to a cleanup label associated with this
578   // suspend point.
579   case coro::ABI::Switch:
580     SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0);
581     break;
582 
583   // In async lowering there are no uses of the result.
584   case coro::ABI::Async:
585     return;
586 
587   // In returned-continuation lowering, the arguments from earlier
588   // continuations are theoretically arbitrary, and they should have been
589   // spilled.
590   case coro::ABI::RetconOnce:
591   case coro::ABI::Retcon:
592     return;
593   }
594 
595   for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) {
596     // The active suspend was handled earlier.
597     if (CS == ActiveSuspend) continue;
598 
599     auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]);
600     MappedCS->replaceAllUsesWith(SuspendResult);
601     MappedCS->eraseFromParent();
602   }
603 }
604 
605 void CoroCloner::replaceCoroEnds() {
606   for (AnyCoroEndInst *CE : Shape.CoroEnds) {
607     // We use a null call graph because there's no call graph node for
608     // the cloned function yet.  We'll just be rebuilding that later.
609     auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]);
610     replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr);
611   }
612 }
613 
614 static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
615                                  ValueToValueMapTy *VMap) {
616   if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty())
617     return;
618   Value *CachedSlot = nullptr;
619   auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
620     if (CachedSlot) {
621       assert(cast<PointerType>(CachedSlot->getType())
622                  ->isOpaqueOrPointeeTypeMatches(ValueTy) &&
623              "multiple swifterror slots in function with different types");
624       return CachedSlot;
625     }
626 
627     // Check if the function has a swifterror argument.
628     for (auto &Arg : F.args()) {
629       if (Arg.isSwiftError()) {
630         CachedSlot = &Arg;
631         assert(cast<PointerType>(Arg.getType())
632                    ->isOpaqueOrPointeeTypeMatches(ValueTy) &&
633                "swifterror argument does not have expected type");
634         return &Arg;
635       }
636     }
637 
638     // Create a swifterror alloca.
639     IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
640     auto Alloca = Builder.CreateAlloca(ValueTy);
641     Alloca->setSwiftError(true);
642 
643     CachedSlot = Alloca;
644     return Alloca;
645   };
646 
647   for (CallInst *Op : Shape.SwiftErrorOps) {
648     auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op;
649     IRBuilder<> Builder(MappedOp);
650 
651     // If there are no arguments, this is a 'get' operation.
652     Value *MappedResult;
653     if (Op->arg_empty()) {
654       auto ValueTy = Op->getType();
655       auto Slot = getSwiftErrorSlot(ValueTy);
656       MappedResult = Builder.CreateLoad(ValueTy, Slot);
657     } else {
658       assert(Op->arg_size() == 1);
659       auto Value = MappedOp->getArgOperand(0);
660       auto ValueTy = Value->getType();
661       auto Slot = getSwiftErrorSlot(ValueTy);
662       Builder.CreateStore(Value, Slot);
663       MappedResult = Slot;
664     }
665 
666     MappedOp->replaceAllUsesWith(MappedResult);
667     MappedOp->eraseFromParent();
668   }
669 
670   // If we're updating the original function, we've invalidated SwiftErrorOps.
671   if (VMap == nullptr) {
672     Shape.SwiftErrorOps.clear();
673   }
674 }
675 
676 void CoroCloner::replaceSwiftErrorOps() {
677   ::replaceSwiftErrorOps(*NewF, Shape, &VMap);
678 }
679 
680 void CoroCloner::salvageDebugInfo() {
681   SmallVector<DbgVariableIntrinsic *, 8> Worklist;
682   SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
683   for (auto &BB : *NewF)
684     for (auto &I : BB)
685       if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I))
686         Worklist.push_back(DVI);
687   for (DbgVariableIntrinsic *DVI : Worklist)
688     coro::salvageDebugInfo(DbgPtrAllocaCache, DVI, Shape.OptimizeFrame);
689 
690   // Remove all salvaged dbg.declare intrinsics that became
691   // either unreachable or stale due to the CoroSplit transformation.
692   DominatorTree DomTree(*NewF);
693   auto IsUnreachableBlock = [&](BasicBlock *BB) {
694     return !isPotentiallyReachable(&NewF->getEntryBlock(), BB, nullptr,
695                                    &DomTree);
696   };
697   for (DbgVariableIntrinsic *DVI : Worklist) {
698     if (IsUnreachableBlock(DVI->getParent()))
699       DVI->eraseFromParent();
700     else if (isa_and_nonnull<AllocaInst>(DVI->getVariableLocationOp(0))) {
701       // Count all non-debuginfo uses in reachable blocks.
702       unsigned Uses = 0;
703       for (auto *User : DVI->getVariableLocationOp(0)->users())
704         if (auto *I = dyn_cast<Instruction>(User))
705           if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent()))
706             ++Uses;
707       if (!Uses)
708         DVI->eraseFromParent();
709     }
710   }
711 }
712 
713 void CoroCloner::replaceEntryBlock() {
714   // In the original function, the AllocaSpillBlock is a block immediately
715   // following the allocation of the frame object which defines GEPs for
716   // all the allocas that have been moved into the frame, and it ends by
717   // branching to the original beginning of the coroutine.  Make this
718   // the entry block of the cloned function.
719   auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]);
720   auto *OldEntry = &NewF->getEntryBlock();
721   Entry->setName("entry" + Suffix);
722   Entry->moveBefore(OldEntry);
723   Entry->getTerminator()->eraseFromParent();
724 
725   // Clear all predecessors of the new entry block.  There should be
726   // exactly one predecessor, which we created when splitting out
727   // AllocaSpillBlock to begin with.
728   assert(Entry->hasOneUse());
729   auto BranchToEntry = cast<BranchInst>(Entry->user_back());
730   assert(BranchToEntry->isUnconditional());
731   Builder.SetInsertPoint(BranchToEntry);
732   Builder.CreateUnreachable();
733   BranchToEntry->eraseFromParent();
734 
735   // Branch from the entry to the appropriate place.
736   Builder.SetInsertPoint(Entry);
737   switch (Shape.ABI) {
738   case coro::ABI::Switch: {
739     // In switch-lowering, we built a resume-entry block in the original
740     // function.  Make the entry block branch to this.
741     auto *SwitchBB =
742       cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]);
743     Builder.CreateBr(SwitchBB);
744     break;
745   }
746   case coro::ABI::Async:
747   case coro::ABI::Retcon:
748   case coro::ABI::RetconOnce: {
749     // In continuation ABIs, we want to branch to immediately after the
750     // active suspend point.  Earlier phases will have put the suspend in its
751     // own basic block, so just thread our jump directly to its successor.
752     assert((Shape.ABI == coro::ABI::Async &&
753             isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||
754            ((Shape.ABI == coro::ABI::Retcon ||
755              Shape.ABI == coro::ABI::RetconOnce) &&
756             isa<CoroSuspendRetconInst>(ActiveSuspend)));
757     auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]);
758     auto Branch = cast<BranchInst>(MappedCS->getNextNode());
759     assert(Branch->isUnconditional());
760     Builder.CreateBr(Branch->getSuccessor(0));
761     break;
762   }
763   }
764 
765   // Any static alloca that's still being used but not reachable from the new
766   // entry needs to be moved to the new entry.
767   Function *F = OldEntry->getParent();
768   DominatorTree DT{*F};
769   for (Instruction &I : llvm::make_early_inc_range(instructions(F))) {
770     auto *Alloca = dyn_cast<AllocaInst>(&I);
771     if (!Alloca || I.use_empty())
772       continue;
773     if (DT.isReachableFromEntry(I.getParent()) ||
774         !isa<ConstantInt>(Alloca->getArraySize()))
775       continue;
776     I.moveBefore(*Entry, Entry->getFirstInsertionPt());
777   }
778 }
779 
780 /// Derive the value of the new frame pointer.
781 Value *CoroCloner::deriveNewFramePointer() {
782   // Builder should be inserting to the front of the new entry block.
783 
784   switch (Shape.ABI) {
785   // In switch-lowering, the argument is the frame pointer.
786   case coro::ABI::Switch:
787     return &*NewF->arg_begin();
788   // In async-lowering, one of the arguments is an async context as determined
789   // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
790   // the resume function from the async context projection function associated
791   // with the active suspend. The frame is located as a tail to the async
792   // context header.
793   case coro::ABI::Async: {
794     auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
795     auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff;
796     auto *CalleeContext = NewF->getArg(ContextIdx);
797     auto *FramePtrTy = Shape.FrameTy->getPointerTo();
798     auto *ProjectionFunc =
799         ActiveAsyncSuspend->getAsyncContextProjectionFunction();
800     auto DbgLoc =
801         cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc();
802     // Calling i8* (i8*)
803     auto *CallerContext = Builder.CreateCall(ProjectionFunc->getFunctionType(),
804                                              ProjectionFunc, CalleeContext);
805     CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
806     CallerContext->setDebugLoc(DbgLoc);
807     // The frame is located after the async_context header.
808     auto &Context = Builder.getContext();
809     auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
810         Type::getInt8Ty(Context), CallerContext,
811         Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr");
812     // Inline the projection function.
813     InlineFunctionInfo InlineInfo;
814     auto InlineRes = InlineFunction(*CallerContext, InlineInfo);
815     assert(InlineRes.isSuccess());
816     (void)InlineRes;
817     return Builder.CreateBitCast(FramePtrAddr, FramePtrTy);
818   }
819   // In continuation-lowering, the argument is the opaque storage.
820   case coro::ABI::Retcon:
821   case coro::ABI::RetconOnce: {
822     Argument *NewStorage = &*NewF->arg_begin();
823     auto FramePtrTy = Shape.FrameTy->getPointerTo();
824 
825     // If the storage is inline, just bitcast to the storage to the frame type.
826     if (Shape.RetconLowering.IsFrameInlineInStorage)
827       return Builder.CreateBitCast(NewStorage, FramePtrTy);
828 
829     // Otherwise, load the real frame from the opaque storage.
830     auto FramePtrPtr =
831       Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo());
832     return Builder.CreateLoad(FramePtrTy, FramePtrPtr);
833   }
834   }
835   llvm_unreachable("bad ABI");
836 }
837 
838 static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
839                                  unsigned ParamIndex,
840                                  uint64_t Size, Align Alignment) {
841   AttrBuilder ParamAttrs(Context);
842   ParamAttrs.addAttribute(Attribute::NonNull);
843   ParamAttrs.addAttribute(Attribute::NoAlias);
844   ParamAttrs.addAlignmentAttr(Alignment);
845   ParamAttrs.addDereferenceableAttr(Size);
846   Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
847 }
848 
849 static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context,
850                                  unsigned ParamIndex) {
851   AttrBuilder ParamAttrs(Context);
852   ParamAttrs.addAttribute(Attribute::SwiftAsync);
853   Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
854 }
855 
856 static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context,
857                               unsigned ParamIndex) {
858   AttrBuilder ParamAttrs(Context);
859   ParamAttrs.addAttribute(Attribute::SwiftSelf);
860   Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
861 }
862 
863 /// Clone the body of the original function into a resume function of
864 /// some sort.
865 void CoroCloner::create() {
866   // Create the new function if we don't already have one.
867   if (!NewF) {
868     NewF = createCloneDeclaration(OrigF, Shape, Suffix,
869                                   OrigF.getParent()->end(), ActiveSuspend);
870   }
871 
872   // Replace all args with undefs. The buildCoroutineFrame algorithm already
873   // rewritten access to the args that occurs after suspend points with loads
874   // and stores to/from the coroutine frame.
875   for (Argument &A : OrigF.args())
876     VMap[&A] = UndefValue::get(A.getType());
877 
878   SmallVector<ReturnInst *, 4> Returns;
879 
880   // Ignore attempts to change certain attributes of the function.
881   // TODO: maybe there should be a way to suppress this during cloning?
882   auto savedVisibility = NewF->getVisibility();
883   auto savedUnnamedAddr = NewF->getUnnamedAddr();
884   auto savedDLLStorageClass = NewF->getDLLStorageClass();
885 
886   // NewF's linkage (which CloneFunctionInto does *not* change) might not
887   // be compatible with the visibility of OrigF (which it *does* change),
888   // so protect against that.
889   auto savedLinkage = NewF->getLinkage();
890   NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
891 
892   CloneFunctionInto(NewF, &OrigF, VMap,
893                     CloneFunctionChangeType::LocalChangesOnly, Returns);
894 
895   auto &Context = NewF->getContext();
896 
897   // For async functions / continuations, adjust the scope line of the
898   // clone to the line number of the suspend point. However, only
899   // adjust the scope line when the files are the same. This ensures
900   // line number and file name belong together. The scope line is
901   // associated with all pre-prologue instructions. This avoids a jump
902   // in the linetable from the function declaration to the suspend point.
903   if (DISubprogram *SP = NewF->getSubprogram()) {
904     assert(SP != OrigF.getSubprogram() && SP->isDistinct());
905     if (ActiveSuspend)
906       if (auto DL = ActiveSuspend->getDebugLoc())
907         if (SP->getFile() == DL->getFile())
908           SP->setScopeLine(DL->getLine());
909     // Update the linkage name to reflect the modified symbol name. It
910     // is necessary to update the linkage name in Swift, since the
911     // mangling changes for resume functions. It might also be the
912     // right thing to do in C++, but due to a limitation in LLVM's
913     // AsmPrinter we can only do this if the function doesn't have an
914     // abstract specification, since the DWARF backend expects the
915     // abstract specification to contain the linkage name and asserts
916     // that they are identical.
917     if (!SP->getDeclaration() && SP->getUnit() &&
918         SP->getUnit()->getSourceLanguage() == dwarf::DW_LANG_Swift)
919       SP->replaceLinkageName(MDString::get(Context, NewF->getName()));
920   }
921 
922   NewF->setLinkage(savedLinkage);
923   NewF->setVisibility(savedVisibility);
924   NewF->setUnnamedAddr(savedUnnamedAddr);
925   NewF->setDLLStorageClass(savedDLLStorageClass);
926 
927   // Replace the attributes of the new function:
928   auto OrigAttrs = NewF->getAttributes();
929   auto NewAttrs = AttributeList();
930 
931   switch (Shape.ABI) {
932   case coro::ABI::Switch:
933     // Bootstrap attributes by copying function attributes from the
934     // original function.  This should include optimization settings and so on.
935     NewAttrs = NewAttrs.addFnAttributes(Context, AttrBuilder(Context, OrigAttrs.getFnAttrs()));
936 
937     addFramePointerAttrs(NewAttrs, Context, 0,
938                          Shape.FrameSize, Shape.FrameAlign);
939     break;
940   case coro::ABI::Async: {
941     auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
942     if (OrigF.hasParamAttribute(Shape.AsyncLowering.ContextArgNo,
943                                 Attribute::SwiftAsync)) {
944       uint32_t ArgAttributeIndices =
945           ActiveAsyncSuspend->getStorageArgumentIndex();
946       auto ContextArgIndex = ArgAttributeIndices & 0xff;
947       addAsyncContextAttrs(NewAttrs, Context, ContextArgIndex);
948 
949       // `swiftasync` must preceed `swiftself` so 0 is not a valid index for
950       // `swiftself`.
951       auto SwiftSelfIndex = ArgAttributeIndices >> 8;
952       if (SwiftSelfIndex)
953         addSwiftSelfAttrs(NewAttrs, Context, SwiftSelfIndex);
954     }
955 
956     // Transfer the original function's attributes.
957     auto FnAttrs = OrigF.getAttributes().getFnAttrs();
958     NewAttrs = NewAttrs.addFnAttributes(Context, AttrBuilder(Context, FnAttrs));
959     break;
960   }
961   case coro::ABI::Retcon:
962   case coro::ABI::RetconOnce:
963     // If we have a continuation prototype, just use its attributes,
964     // full-stop.
965     NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes();
966 
967     addFramePointerAttrs(NewAttrs, Context, 0,
968                          Shape.getRetconCoroId()->getStorageSize(),
969                          Shape.getRetconCoroId()->getStorageAlignment());
970     break;
971   }
972 
973   switch (Shape.ABI) {
974   // In these ABIs, the cloned functions always return 'void', and the
975   // existing return sites are meaningless.  Note that for unique
976   // continuations, this includes the returns associated with suspends;
977   // this is fine because we can't suspend twice.
978   case coro::ABI::Switch:
979   case coro::ABI::RetconOnce:
980     // Remove old returns.
981     for (ReturnInst *Return : Returns)
982       changeToUnreachable(Return);
983     break;
984 
985   // With multi-suspend continuations, we'll already have eliminated the
986   // original returns and inserted returns before all the suspend points,
987   // so we want to leave any returns in place.
988   case coro::ABI::Retcon:
989     break;
990   // Async lowering will insert musttail call functions at all suspend points
991   // followed by a return.
992   // Don't change returns to unreachable because that will trip up the verifier.
993   // These returns should be unreachable from the clone.
994   case coro::ABI::Async:
995     break;
996   }
997 
998   NewF->setAttributes(NewAttrs);
999   NewF->setCallingConv(Shape.getResumeFunctionCC());
1000 
1001   // Set up the new entry block.
1002   replaceEntryBlock();
1003 
1004   Builder.SetInsertPoint(&NewF->getEntryBlock().front());
1005   NewFramePtr = deriveNewFramePointer();
1006 
1007   // Remap frame pointer.
1008   Value *OldFramePtr = VMap[Shape.FramePtr];
1009   NewFramePtr->takeName(OldFramePtr);
1010   OldFramePtr->replaceAllUsesWith(NewFramePtr);
1011 
1012   // Remap vFrame pointer.
1013   auto *NewVFrame = Builder.CreateBitCast(
1014       NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame");
1015   Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]);
1016   OldVFrame->replaceAllUsesWith(NewVFrame);
1017 
1018   switch (Shape.ABI) {
1019   case coro::ABI::Switch:
1020     // Rewrite final suspend handling as it is not done via switch (allows to
1021     // remove final case from the switch, since it is undefined behavior to
1022     // resume the coroutine suspended at the final suspend point.
1023     if (Shape.SwitchLowering.HasFinalSuspend)
1024       handleFinalSuspend();
1025     break;
1026   case coro::ABI::Async:
1027   case coro::ABI::Retcon:
1028   case coro::ABI::RetconOnce:
1029     // Replace uses of the active suspend with the corresponding
1030     // continuation-function arguments.
1031     assert(ActiveSuspend != nullptr &&
1032            "no active suspend when lowering a continuation-style coroutine");
1033     replaceRetconOrAsyncSuspendUses();
1034     break;
1035   }
1036 
1037   // Handle suspends.
1038   replaceCoroSuspends();
1039 
1040   // Handle swifterror.
1041   replaceSwiftErrorOps();
1042 
1043   // Remove coro.end intrinsics.
1044   replaceCoroEnds();
1045 
1046   // Salvage debug info that points into the coroutine frame.
1047   salvageDebugInfo();
1048 
1049   // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
1050   // to suppress deallocation code.
1051   if (Shape.ABI == coro::ABI::Switch)
1052     coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]),
1053                           /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup);
1054 }
1055 
1056 // Create a resume clone by cloning the body of the original function, setting
1057 // new entry block and replacing coro.suspend an appropriate value to force
1058 // resume or cleanup pass for every suspend point.
1059 static Function *createClone(Function &F, const Twine &Suffix,
1060                              coro::Shape &Shape, CoroCloner::Kind FKind) {
1061   CoroCloner Cloner(F, Suffix, Shape, FKind);
1062   Cloner.create();
1063   return Cloner.getFunction();
1064 }
1065 
1066 /// Remove calls to llvm.coro.end in the original function.
1067 static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG) {
1068   for (auto End : Shape.CoroEnds) {
1069     replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, CG);
1070   }
1071 }
1072 
1073 static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) {
1074   assert(Shape.ABI == coro::ABI::Async);
1075 
1076   auto *FuncPtrStruct = cast<ConstantStruct>(
1077       Shape.AsyncLowering.AsyncFuncPointer->getInitializer());
1078   auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0);
1079   auto *OrigContextSize = FuncPtrStruct->getOperand(1);
1080   auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(),
1081                                           Shape.AsyncLowering.ContextSize);
1082   auto *NewFuncPtrStruct = ConstantStruct::get(
1083       FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize);
1084 
1085   Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
1086 }
1087 
1088 static void replaceFrameSizeAndAlignment(coro::Shape &Shape) {
1089   if (Shape.ABI == coro::ABI::Async)
1090     updateAsyncFuncPointerContextSize(Shape);
1091 
1092   for (CoroAlignInst *CA : Shape.CoroAligns) {
1093     CA->replaceAllUsesWith(
1094         ConstantInt::get(CA->getType(), Shape.FrameAlign.value()));
1095     CA->eraseFromParent();
1096   }
1097 
1098   if (Shape.CoroSizes.empty())
1099     return;
1100 
1101   // In the same function all coro.sizes should have the same result type.
1102   auto *SizeIntrin = Shape.CoroSizes.back();
1103   Module *M = SizeIntrin->getModule();
1104   const DataLayout &DL = M->getDataLayout();
1105   auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1106   auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size);
1107 
1108   for (CoroSizeInst *CS : Shape.CoroSizes) {
1109     CS->replaceAllUsesWith(SizeConstant);
1110     CS->eraseFromParent();
1111   }
1112 }
1113 
1114 // Create a global constant array containing pointers to functions provided and
1115 // set Info parameter of CoroBegin to point at this constant. Example:
1116 //
1117 //   @f.resumers = internal constant [2 x void(%f.frame*)*]
1118 //                    [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy]
1119 //   define void @f() {
1120 //     ...
1121 //     call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
1122 //                    i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*))
1123 //
1124 // Assumes that all the functions have the same signature.
1125 static void setCoroInfo(Function &F, coro::Shape &Shape,
1126                         ArrayRef<Function *> Fns) {
1127   // This only works under the switch-lowering ABI because coro elision
1128   // only works on the switch-lowering ABI.
1129   assert(Shape.ABI == coro::ABI::Switch);
1130 
1131   SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end());
1132   assert(!Args.empty());
1133   Function *Part = *Fns.begin();
1134   Module *M = Part->getParent();
1135   auto *ArrTy = ArrayType::get(Part->getType(), Args.size());
1136 
1137   auto *ConstVal = ConstantArray::get(ArrTy, Args);
1138   auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
1139                                 GlobalVariable::PrivateLinkage, ConstVal,
1140                                 F.getName() + Twine(".resumers"));
1141 
1142   // Update coro.begin instruction to refer to this constant.
1143   LLVMContext &C = F.getContext();
1144   auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C));
1145   Shape.getSwitchCoroId()->setInfo(BC);
1146 }
1147 
1148 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
1149 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
1150                             Function *DestroyFn, Function *CleanupFn) {
1151   assert(Shape.ABI == coro::ABI::Switch);
1152 
1153   IRBuilder<> Builder(Shape.FramePtr->getNextNode());
1154   auto *ResumeAddr = Builder.CreateStructGEP(
1155       Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume,
1156       "resume.addr");
1157   Builder.CreateStore(ResumeFn, ResumeAddr);
1158 
1159   Value *DestroyOrCleanupFn = DestroyFn;
1160 
1161   CoroIdInst *CoroId = Shape.getSwitchCoroId();
1162   if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
1163     // If there is a CoroAlloc and it returns false (meaning we elide the
1164     // allocation, use CleanupFn instead of DestroyFn).
1165     DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn);
1166   }
1167 
1168   auto *DestroyAddr = Builder.CreateStructGEP(
1169       Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy,
1170       "destroy.addr");
1171   Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr);
1172 }
1173 
1174 static void postSplitCleanup(Function &F) {
1175   removeUnreachableBlocks(F);
1176 
1177 #ifndef NDEBUG
1178   // For now, we do a mandatory verification step because we don't
1179   // entirely trust this pass.  Note that we don't want to add a verifier
1180   // pass to FPM below because it will also verify all the global data.
1181   if (verifyFunction(F, &errs()))
1182     report_fatal_error("Broken function");
1183 #endif
1184 }
1185 
1186 // Assuming we arrived at the block NewBlock from Prev instruction, store
1187 // PHI's incoming values in the ResolvedValues map.
1188 static void
1189 scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock,
1190                           DenseMap<Value *, Value *> &ResolvedValues) {
1191   auto *PrevBB = Prev->getParent();
1192   for (PHINode &PN : NewBlock->phis()) {
1193     auto V = PN.getIncomingValueForBlock(PrevBB);
1194     // See if we already resolved it.
1195     auto VI = ResolvedValues.find(V);
1196     if (VI != ResolvedValues.end())
1197       V = VI->second;
1198     // Remember the value.
1199     ResolvedValues[&PN] = V;
1200   }
1201 }
1202 
1203 // Replace a sequence of branches leading to a ret, with a clone of a ret
1204 // instruction. Suspend instruction represented by a switch, track the PHI
1205 // values and select the correct case successor when possible.
1206 static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
1207   DenseMap<Value *, Value *> ResolvedValues;
1208   BasicBlock *UnconditionalSucc = nullptr;
1209   assert(InitialInst->getModule());
1210   const DataLayout &DL = InitialInst->getModule()->getDataLayout();
1211 
1212   auto GetFirstValidInstruction = [](Instruction *I) {
1213     while (I) {
1214       // BitCastInst wouldn't generate actual code so that we could skip it.
1215       if (isa<BitCastInst>(I) || I->isDebugOrPseudoInst() ||
1216           I->isLifetimeStartOrEnd())
1217         I = I->getNextNode();
1218       else if (isInstructionTriviallyDead(I))
1219         // Duing we are in the middle of the transformation, we need to erase
1220         // the dead instruction manually.
1221         I = &*I->eraseFromParent();
1222       else
1223         break;
1224     }
1225     return I;
1226   };
1227 
1228   auto TryResolveConstant = [&ResolvedValues](Value *V) {
1229     auto It = ResolvedValues.find(V);
1230     if (It != ResolvedValues.end())
1231       V = It->second;
1232     return dyn_cast<ConstantInt>(V);
1233   };
1234 
1235   Instruction *I = InitialInst;
1236   while (I->isTerminator() || isa<CmpInst>(I)) {
1237     if (isa<ReturnInst>(I)) {
1238       if (I != InitialInst) {
1239         // If InitialInst is an unconditional branch,
1240         // remove PHI values that come from basic block of InitialInst
1241         if (UnconditionalSucc)
1242           UnconditionalSucc->removePredecessor(InitialInst->getParent(), true);
1243         ReplaceInstWithInst(InitialInst, I->clone());
1244       }
1245       return true;
1246     }
1247     if (auto *BR = dyn_cast<BranchInst>(I)) {
1248       if (BR->isUnconditional()) {
1249         BasicBlock *Succ = BR->getSuccessor(0);
1250         if (I == InitialInst)
1251           UnconditionalSucc = Succ;
1252         scanPHIsAndUpdateValueMap(I, Succ, ResolvedValues);
1253         I = GetFirstValidInstruction(Succ->getFirstNonPHIOrDbgOrLifetime());
1254         continue;
1255       }
1256 
1257       BasicBlock *BB = BR->getParent();
1258       // Handle the case the condition of the conditional branch is constant.
1259       // e.g.,
1260       //
1261       //     br i1 false, label %cleanup, label %CoroEnd
1262       //
1263       // It is possible during the transformation. We could continue the
1264       // simplifying in this case.
1265       if (ConstantFoldTerminator(BB, /*DeleteDeadConditions=*/true)) {
1266         // Handle this branch in next iteration.
1267         I = BB->getTerminator();
1268         continue;
1269       }
1270     } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) {
1271       // If the case number of suspended switch instruction is reduced to
1272       // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
1273       auto *BR = dyn_cast<BranchInst>(
1274           GetFirstValidInstruction(CondCmp->getNextNode()));
1275       if (!BR || !BR->isConditional() || CondCmp != BR->getCondition())
1276         return false;
1277 
1278       // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
1279       // So we try to resolve constant for the first operand only since the
1280       // second operand should be literal constant by design.
1281       ConstantInt *Cond0 = TryResolveConstant(CondCmp->getOperand(0));
1282       auto *Cond1 = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
1283       if (!Cond0 || !Cond1)
1284         return false;
1285 
1286       // Both operands of the CmpInst are Constant. So that we could evaluate
1287       // it immediately to get the destination.
1288       auto *ConstResult =
1289           dyn_cast_or_null<ConstantInt>(ConstantFoldCompareInstOperands(
1290               CondCmp->getPredicate(), Cond0, Cond1, DL));
1291       if (!ConstResult)
1292         return false;
1293 
1294       CondCmp->replaceAllUsesWith(ConstResult);
1295       CondCmp->eraseFromParent();
1296 
1297       // Handle this branch in next iteration.
1298       I = BR;
1299       continue;
1300     } else if (auto *SI = dyn_cast<SwitchInst>(I)) {
1301       ConstantInt *Cond = TryResolveConstant(SI->getCondition());
1302       if (!Cond)
1303         return false;
1304 
1305       BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
1306       scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1307       I = GetFirstValidInstruction(BB->getFirstNonPHIOrDbgOrLifetime());
1308       continue;
1309     }
1310 
1311     return false;
1312   }
1313   return false;
1314 }
1315 
1316 // Check whether CI obeys the rules of musttail attribute.
1317 static bool shouldBeMustTail(const CallInst &CI, const Function &F) {
1318   if (CI.isInlineAsm())
1319     return false;
1320 
1321   // Match prototypes and calling conventions of resume function.
1322   FunctionType *CalleeTy = CI.getFunctionType();
1323   if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1))
1324     return false;
1325 
1326   Type *CalleeParmTy = CalleeTy->getParamType(0);
1327   if (!CalleeParmTy->isPointerTy() ||
1328       (CalleeParmTy->getPointerAddressSpace() != 0))
1329     return false;
1330 
1331   if (CI.getCallingConv() != F.getCallingConv())
1332     return false;
1333 
1334   // CI should not has any ABI-impacting function attributes.
1335   static const Attribute::AttrKind ABIAttrs[] = {
1336       Attribute::StructRet,    Attribute::ByVal,     Attribute::InAlloca,
1337       Attribute::Preallocated, Attribute::InReg,     Attribute::Returned,
1338       Attribute::SwiftSelf,    Attribute::SwiftError};
1339   AttributeList Attrs = CI.getAttributes();
1340   for (auto AK : ABIAttrs)
1341     if (Attrs.hasParamAttr(0, AK))
1342       return false;
1343 
1344   return true;
1345 }
1346 
1347 // Add musttail to any resume instructions that is immediately followed by a
1348 // suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call
1349 // for symmetrical coroutine control transfer (C++ Coroutines TS extension).
1350 // This transformation is done only in the resume part of the coroutine that has
1351 // identical signature and calling convention as the coro.resume call.
1352 static void addMustTailToCoroResumes(Function &F) {
1353   bool changed = false;
1354 
1355   // Collect potential resume instructions.
1356   SmallVector<CallInst *, 4> Resumes;
1357   for (auto &I : instructions(F))
1358     if (auto *Call = dyn_cast<CallInst>(&I))
1359       if (shouldBeMustTail(*Call, F))
1360         Resumes.push_back(Call);
1361 
1362   // Set musttail on those that are followed by a ret instruction.
1363   for (CallInst *Call : Resumes)
1364     if (simplifyTerminatorLeadingToRet(Call->getNextNode())) {
1365       Call->setTailCallKind(CallInst::TCK_MustTail);
1366       changed = true;
1367     }
1368 
1369   if (changed)
1370     removeUnreachableBlocks(F);
1371 }
1372 
1373 // Coroutine has no suspend points. Remove heap allocation for the coroutine
1374 // frame if possible.
1375 static void handleNoSuspendCoroutine(coro::Shape &Shape) {
1376   auto *CoroBegin = Shape.CoroBegin;
1377   auto *CoroId = CoroBegin->getId();
1378   auto *AllocInst = CoroId->getCoroAlloc();
1379   switch (Shape.ABI) {
1380   case coro::ABI::Switch: {
1381     auto SwitchId = cast<CoroIdInst>(CoroId);
1382     coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr);
1383     if (AllocInst) {
1384       IRBuilder<> Builder(AllocInst);
1385       auto *Frame = Builder.CreateAlloca(Shape.FrameTy);
1386       Frame->setAlignment(Shape.FrameAlign);
1387       auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy());
1388       AllocInst->replaceAllUsesWith(Builder.getFalse());
1389       AllocInst->eraseFromParent();
1390       CoroBegin->replaceAllUsesWith(VFrame);
1391     } else {
1392       CoroBegin->replaceAllUsesWith(CoroBegin->getMem());
1393     }
1394 
1395     break;
1396   }
1397   case coro::ABI::Async:
1398   case coro::ABI::Retcon:
1399   case coro::ABI::RetconOnce:
1400     CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType()));
1401     break;
1402   }
1403 
1404   CoroBegin->eraseFromParent();
1405 }
1406 
1407 // SimplifySuspendPoint needs to check that there is no calls between
1408 // coro_save and coro_suspend, since any of the calls may potentially resume
1409 // the coroutine and if that is the case we cannot eliminate the suspend point.
1410 static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) {
1411   for (Instruction *I = From; I != To; I = I->getNextNode()) {
1412     // Assume that no intrinsic can resume the coroutine.
1413     if (isa<IntrinsicInst>(I))
1414       continue;
1415 
1416     if (isa<CallBase>(I))
1417       return true;
1418   }
1419   return false;
1420 }
1421 
1422 static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
1423   SmallPtrSet<BasicBlock *, 8> Set;
1424   SmallVector<BasicBlock *, 8> Worklist;
1425 
1426   Set.insert(SaveBB);
1427   Worklist.push_back(ResDesBB);
1428 
1429   // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1430   // returns a token consumed by suspend instruction, all blocks in between
1431   // will have to eventually hit SaveBB when going backwards from ResDesBB.
1432   while (!Worklist.empty()) {
1433     auto *BB = Worklist.pop_back_val();
1434     Set.insert(BB);
1435     for (auto *Pred : predecessors(BB))
1436       if (!Set.contains(Pred))
1437         Worklist.push_back(Pred);
1438   }
1439 
1440   // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1441   Set.erase(SaveBB);
1442   Set.erase(ResDesBB);
1443 
1444   for (auto *BB : Set)
1445     if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr))
1446       return true;
1447 
1448   return false;
1449 }
1450 
1451 static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
1452   auto *SaveBB = Save->getParent();
1453   auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
1454 
1455   if (SaveBB == ResumeOrDestroyBB)
1456     return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy);
1457 
1458   // Any calls from Save to the end of the block?
1459   if (hasCallsInBlockBetween(Save->getNextNode(), nullptr))
1460     return true;
1461 
1462   // Any calls from begging of the block up to ResumeOrDestroy?
1463   if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(),
1464                              ResumeOrDestroy))
1465     return true;
1466 
1467   // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1468   if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB))
1469     return true;
1470 
1471   return false;
1472 }
1473 
1474 // If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1475 // suspend point and replace it with nornal control flow.
1476 static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
1477                                  CoroBeginInst *CoroBegin) {
1478   Instruction *Prev = Suspend->getPrevNode();
1479   if (!Prev) {
1480     auto *Pred = Suspend->getParent()->getSinglePredecessor();
1481     if (!Pred)
1482       return false;
1483     Prev = Pred->getTerminator();
1484   }
1485 
1486   CallBase *CB = dyn_cast<CallBase>(Prev);
1487   if (!CB)
1488     return false;
1489 
1490   auto *Callee = CB->getCalledOperand()->stripPointerCasts();
1491 
1492   // See if the callsite is for resumption or destruction of the coroutine.
1493   auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
1494   if (!SubFn)
1495     return false;
1496 
1497   // Does not refer to the current coroutine, we cannot do anything with it.
1498   if (SubFn->getFrame() != CoroBegin)
1499     return false;
1500 
1501   // See if the transformation is safe. Specifically, see if there are any
1502   // calls in between Save and CallInstr. They can potenitally resume the
1503   // coroutine rendering this optimization unsafe.
1504   auto *Save = Suspend->getCoroSave();
1505   if (hasCallsBetween(Save, CB))
1506     return false;
1507 
1508   // Replace llvm.coro.suspend with the value that results in resumption over
1509   // the resume or cleanup path.
1510   Suspend->replaceAllUsesWith(SubFn->getRawIndex());
1511   Suspend->eraseFromParent();
1512   Save->eraseFromParent();
1513 
1514   // No longer need a call to coro.resume or coro.destroy.
1515   if (auto *Invoke = dyn_cast<InvokeInst>(CB)) {
1516     BranchInst::Create(Invoke->getNormalDest(), Invoke);
1517   }
1518 
1519   // Grab the CalledValue from CB before erasing the CallInstr.
1520   auto *CalledValue = CB->getCalledOperand();
1521   CB->eraseFromParent();
1522 
1523   // If no more users remove it. Usually it is a bitcast of SubFn.
1524   if (CalledValue != SubFn && CalledValue->user_empty())
1525     if (auto *I = dyn_cast<Instruction>(CalledValue))
1526       I->eraseFromParent();
1527 
1528   // Now we are good to remove SubFn.
1529   if (SubFn->user_empty())
1530     SubFn->eraseFromParent();
1531 
1532   return true;
1533 }
1534 
1535 // Remove suspend points that are simplified.
1536 static void simplifySuspendPoints(coro::Shape &Shape) {
1537   // Currently, the only simplification we do is switch-lowering-specific.
1538   if (Shape.ABI != coro::ABI::Switch)
1539     return;
1540 
1541   auto &S = Shape.CoroSuspends;
1542   size_t I = 0, N = S.size();
1543   if (N == 0)
1544     return;
1545   while (true) {
1546     auto SI = cast<CoroSuspendInst>(S[I]);
1547     // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1548     // to resume a coroutine suspended at the final suspend point.
1549     if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) {
1550       if (--N == I)
1551         break;
1552       std::swap(S[I], S[N]);
1553       continue;
1554     }
1555     if (++I == N)
1556       break;
1557   }
1558   S.resize(N);
1559 }
1560 
1561 static void splitSwitchCoroutine(Function &F, coro::Shape &Shape,
1562                                  SmallVectorImpl<Function *> &Clones) {
1563   assert(Shape.ABI == coro::ABI::Switch);
1564 
1565   createResumeEntryBlock(F, Shape);
1566   auto ResumeClone = createClone(F, ".resume", Shape,
1567                                  CoroCloner::Kind::SwitchResume);
1568   auto DestroyClone = createClone(F, ".destroy", Shape,
1569                                   CoroCloner::Kind::SwitchUnwind);
1570   auto CleanupClone = createClone(F, ".cleanup", Shape,
1571                                   CoroCloner::Kind::SwitchCleanup);
1572 
1573   postSplitCleanup(*ResumeClone);
1574   postSplitCleanup(*DestroyClone);
1575   postSplitCleanup(*CleanupClone);
1576 
1577   addMustTailToCoroResumes(*ResumeClone);
1578 
1579   // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1580   updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone);
1581 
1582   assert(Clones.empty());
1583   Clones.push_back(ResumeClone);
1584   Clones.push_back(DestroyClone);
1585   Clones.push_back(CleanupClone);
1586 
1587   // Create a constant array referring to resume/destroy/clone functions pointed
1588   // by the last argument of @llvm.coro.info, so that CoroElide pass can
1589   // determined correct function to call.
1590   setCoroInfo(F, Shape, Clones);
1591 }
1592 
1593 static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend,
1594                                        Value *Continuation) {
1595   auto *ResumeIntrinsic = Suspend->getResumeFunction();
1596   auto &Context = Suspend->getParent()->getParent()->getContext();
1597   auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1598 
1599   IRBuilder<> Builder(ResumeIntrinsic);
1600   auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy);
1601   ResumeIntrinsic->replaceAllUsesWith(Val);
1602   ResumeIntrinsic->eraseFromParent();
1603   Suspend->setOperand(CoroSuspendAsyncInst::ResumeFunctionArg,
1604                       UndefValue::get(Int8PtrTy));
1605 }
1606 
1607 /// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
1608 static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy,
1609                             ArrayRef<Value *> FnArgs,
1610                             SmallVectorImpl<Value *> &CallArgs) {
1611   size_t ArgIdx = 0;
1612   for (auto paramTy : FnTy->params()) {
1613     assert(ArgIdx < FnArgs.size());
1614     if (paramTy != FnArgs[ArgIdx]->getType())
1615       CallArgs.push_back(
1616           Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy));
1617     else
1618       CallArgs.push_back(FnArgs[ArgIdx]);
1619     ++ArgIdx;
1620   }
1621 }
1622 
1623 CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
1624                                    ArrayRef<Value *> Arguments,
1625                                    IRBuilder<> &Builder) {
1626   auto *FnTy = MustTailCallFn->getFunctionType();
1627   // Coerce the arguments, llvm optimizations seem to ignore the types in
1628   // vaarg functions and throws away casts in optimized mode.
1629   SmallVector<Value *, 8> CallArgs;
1630   coerceArguments(Builder, FnTy, Arguments, CallArgs);
1631 
1632   auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs);
1633   TailCall->setTailCallKind(CallInst::TCK_MustTail);
1634   TailCall->setDebugLoc(Loc);
1635   TailCall->setCallingConv(MustTailCallFn->getCallingConv());
1636   return TailCall;
1637 }
1638 
1639 static void splitAsyncCoroutine(Function &F, coro::Shape &Shape,
1640                                 SmallVectorImpl<Function *> &Clones) {
1641   assert(Shape.ABI == coro::ABI::Async);
1642   assert(Clones.empty());
1643   // Reset various things that the optimizer might have decided it
1644   // "knows" about the coroutine function due to not seeing a return.
1645   F.removeFnAttr(Attribute::NoReturn);
1646   F.removeRetAttr(Attribute::NoAlias);
1647   F.removeRetAttr(Attribute::NonNull);
1648 
1649   auto &Context = F.getContext();
1650   auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1651 
1652   auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId());
1653   IRBuilder<> Builder(Id);
1654 
1655   auto *FramePtr = Id->getStorage();
1656   FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy);
1657   FramePtr = Builder.CreateConstInBoundsGEP1_32(
1658       Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset,
1659       "async.ctx.frameptr");
1660 
1661   // Map all uses of llvm.coro.begin to the allocated frame pointer.
1662   {
1663     // Make sure we don't invalidate Shape.FramePtr.
1664     TrackingVH<Instruction> Handle(Shape.FramePtr);
1665     Shape.CoroBegin->replaceAllUsesWith(FramePtr);
1666     Shape.FramePtr = Handle.getValPtr();
1667   }
1668 
1669   // Create all the functions in order after the main function.
1670   auto NextF = std::next(F.getIterator());
1671 
1672   // Create a continuation function for each of the suspend points.
1673   Clones.reserve(Shape.CoroSuspends.size());
1674   for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1675     auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]);
1676 
1677     // Create the clone declaration.
1678     auto ResumeNameSuffix = ".resume.";
1679     auto ProjectionFunctionName =
1680         Suspend->getAsyncContextProjectionFunction()->getName();
1681     bool UseSwiftMangling = false;
1682     if (ProjectionFunctionName.equals("__swift_async_resume_project_context")) {
1683       ResumeNameSuffix = "TQ";
1684       UseSwiftMangling = true;
1685     } else if (ProjectionFunctionName.equals(
1686                    "__swift_async_resume_get_context")) {
1687       ResumeNameSuffix = "TY";
1688       UseSwiftMangling = true;
1689     }
1690     auto *Continuation = createCloneDeclaration(
1691         F, Shape,
1692         UseSwiftMangling ? ResumeNameSuffix + Twine(Idx) + "_"
1693                          : ResumeNameSuffix + Twine(Idx),
1694         NextF, Suspend);
1695     Clones.push_back(Continuation);
1696 
1697     // Insert a branch to a new return block immediately before the suspend
1698     // point.
1699     auto *SuspendBB = Suspend->getParent();
1700     auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1701     auto *Branch = cast<BranchInst>(SuspendBB->getTerminator());
1702 
1703     // Place it before the first suspend.
1704     auto *ReturnBB =
1705         BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB);
1706     Branch->setSuccessor(0, ReturnBB);
1707 
1708     IRBuilder<> Builder(ReturnBB);
1709 
1710     // Insert the call to the tail call function and inline it.
1711     auto *Fn = Suspend->getMustTailCallFunction();
1712     SmallVector<Value *, 8> Args(Suspend->args());
1713     auto FnArgs = ArrayRef<Value *>(Args).drop_front(
1714         CoroSuspendAsyncInst::MustTailCallFuncArg + 1);
1715     auto *TailCall =
1716         coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder);
1717     Builder.CreateRetVoid();
1718     InlineFunctionInfo FnInfo;
1719     auto InlineRes = InlineFunction(*TailCall, FnInfo);
1720     assert(InlineRes.isSuccess() && "Expected inlining to succeed");
1721     (void)InlineRes;
1722 
1723     // Replace the lvm.coro.async.resume intrisic call.
1724     replaceAsyncResumeFunction(Suspend, Continuation);
1725   }
1726 
1727   assert(Clones.size() == Shape.CoroSuspends.size());
1728   for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1729     auto *Suspend = Shape.CoroSuspends[Idx];
1730     auto *Clone = Clones[Idx];
1731 
1732     CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create();
1733   }
1734 }
1735 
1736 static void splitRetconCoroutine(Function &F, coro::Shape &Shape,
1737                                  SmallVectorImpl<Function *> &Clones) {
1738   assert(Shape.ABI == coro::ABI::Retcon ||
1739          Shape.ABI == coro::ABI::RetconOnce);
1740   assert(Clones.empty());
1741 
1742   // Reset various things that the optimizer might have decided it
1743   // "knows" about the coroutine function due to not seeing a return.
1744   F.removeFnAttr(Attribute::NoReturn);
1745   F.removeRetAttr(Attribute::NoAlias);
1746   F.removeRetAttr(Attribute::NonNull);
1747 
1748   // Allocate the frame.
1749   auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId());
1750   Value *RawFramePtr;
1751   if (Shape.RetconLowering.IsFrameInlineInStorage) {
1752     RawFramePtr = Id->getStorage();
1753   } else {
1754     IRBuilder<> Builder(Id);
1755 
1756     // Determine the size of the frame.
1757     const DataLayout &DL = F.getParent()->getDataLayout();
1758     auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1759 
1760     // Allocate.  We don't need to update the call graph node because we're
1761     // going to recompute it from scratch after splitting.
1762     // FIXME: pass the required alignment
1763     RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr);
1764     RawFramePtr =
1765       Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType());
1766 
1767     // Stash the allocated frame pointer in the continuation storage.
1768     auto Dest = Builder.CreateBitCast(Id->getStorage(),
1769                                       RawFramePtr->getType()->getPointerTo());
1770     Builder.CreateStore(RawFramePtr, Dest);
1771   }
1772 
1773   // Map all uses of llvm.coro.begin to the allocated frame pointer.
1774   {
1775     // Make sure we don't invalidate Shape.FramePtr.
1776     TrackingVH<Instruction> Handle(Shape.FramePtr);
1777     Shape.CoroBegin->replaceAllUsesWith(RawFramePtr);
1778     Shape.FramePtr = Handle.getValPtr();
1779   }
1780 
1781   // Create a unique return block.
1782   BasicBlock *ReturnBB = nullptr;
1783   SmallVector<PHINode *, 4> ReturnPHIs;
1784 
1785   // Create all the functions in order after the main function.
1786   auto NextF = std::next(F.getIterator());
1787 
1788   // Create a continuation function for each of the suspend points.
1789   Clones.reserve(Shape.CoroSuspends.size());
1790   for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1791     auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]);
1792 
1793     // Create the clone declaration.
1794     auto Continuation =
1795         createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF, nullptr);
1796     Clones.push_back(Continuation);
1797 
1798     // Insert a branch to the unified return block immediately before
1799     // the suspend point.
1800     auto SuspendBB = Suspend->getParent();
1801     auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1802     auto Branch = cast<BranchInst>(SuspendBB->getTerminator());
1803 
1804     // Create the unified return block.
1805     if (!ReturnBB) {
1806       // Place it before the first suspend.
1807       ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F,
1808                                     NewSuspendBB);
1809       Shape.RetconLowering.ReturnBlock = ReturnBB;
1810 
1811       IRBuilder<> Builder(ReturnBB);
1812 
1813       // Create PHIs for all the return values.
1814       assert(ReturnPHIs.empty());
1815 
1816       // First, the continuation.
1817       ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(),
1818                                              Shape.CoroSuspends.size()));
1819 
1820       // Next, all the directly-yielded values.
1821       for (auto ResultTy : Shape.getRetconResultTypes())
1822         ReturnPHIs.push_back(Builder.CreatePHI(ResultTy,
1823                                                Shape.CoroSuspends.size()));
1824 
1825       // Build the return value.
1826       auto RetTy = F.getReturnType();
1827 
1828       // Cast the continuation value if necessary.
1829       // We can't rely on the types matching up because that type would
1830       // have to be infinite.
1831       auto CastedContinuationTy =
1832         (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0));
1833       auto *CastedContinuation =
1834         Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy);
1835 
1836       Value *RetV;
1837       if (ReturnPHIs.size() == 1) {
1838         RetV = CastedContinuation;
1839       } else {
1840         RetV = UndefValue::get(RetTy);
1841         RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0);
1842         for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I)
1843           RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I);
1844       }
1845 
1846       Builder.CreateRet(RetV);
1847     }
1848 
1849     // Branch to the return block.
1850     Branch->setSuccessor(0, ReturnBB);
1851     ReturnPHIs[0]->addIncoming(Continuation, SuspendBB);
1852     size_t NextPHIIndex = 1;
1853     for (auto &VUse : Suspend->value_operands())
1854       ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB);
1855     assert(NextPHIIndex == ReturnPHIs.size());
1856   }
1857 
1858   assert(Clones.size() == Shape.CoroSuspends.size());
1859   for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1860     auto Suspend = Shape.CoroSuspends[i];
1861     auto Clone = Clones[i];
1862 
1863     CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create();
1864   }
1865 }
1866 
1867 namespace {
1868   class PrettyStackTraceFunction : public PrettyStackTraceEntry {
1869     Function &F;
1870   public:
1871     PrettyStackTraceFunction(Function &F) : F(F) {}
1872     void print(raw_ostream &OS) const override {
1873       OS << "While splitting coroutine ";
1874       F.printAsOperand(OS, /*print type*/ false, F.getParent());
1875       OS << "\n";
1876     }
1877   };
1878 }
1879 
1880 static coro::Shape splitCoroutine(Function &F,
1881                                   SmallVectorImpl<Function *> &Clones,
1882                                   bool OptimizeFrame) {
1883   PrettyStackTraceFunction prettyStackTrace(F);
1884 
1885   // The suspend-crossing algorithm in buildCoroutineFrame get tripped
1886   // up by uses in unreachable blocks, so remove them as a first pass.
1887   removeUnreachableBlocks(F);
1888 
1889   coro::Shape Shape(F, OptimizeFrame);
1890   if (!Shape.CoroBegin)
1891     return Shape;
1892 
1893   simplifySuspendPoints(Shape);
1894   buildCoroutineFrame(F, Shape);
1895   replaceFrameSizeAndAlignment(Shape);
1896 
1897   // If there are no suspend points, no split required, just remove
1898   // the allocation and deallocation blocks, they are not needed.
1899   if (Shape.CoroSuspends.empty()) {
1900     handleNoSuspendCoroutine(Shape);
1901   } else {
1902     switch (Shape.ABI) {
1903     case coro::ABI::Switch:
1904       splitSwitchCoroutine(F, Shape, Clones);
1905       break;
1906     case coro::ABI::Async:
1907       splitAsyncCoroutine(F, Shape, Clones);
1908       break;
1909     case coro::ABI::Retcon:
1910     case coro::ABI::RetconOnce:
1911       splitRetconCoroutine(F, Shape, Clones);
1912       break;
1913     }
1914   }
1915 
1916   // Replace all the swifterror operations in the original function.
1917   // This invalidates SwiftErrorOps in the Shape.
1918   replaceSwiftErrorOps(F, Shape, nullptr);
1919 
1920   return Shape;
1921 }
1922 
1923 static void
1924 updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape,
1925                                    const SmallVectorImpl<Function *> &Clones,
1926                                    CallGraph &CG, CallGraphSCC &SCC) {
1927   if (!Shape.CoroBegin)
1928     return;
1929 
1930   removeCoroEnds(Shape, &CG);
1931   postSplitCleanup(F);
1932 
1933   // Update call graph and add the functions we created to the SCC.
1934   coro::updateCallGraph(F, Clones, CG, SCC);
1935 }
1936 
1937 static void updateCallGraphAfterCoroutineSplit(
1938     LazyCallGraph::Node &N, const coro::Shape &Shape,
1939     const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C,
1940     LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
1941     FunctionAnalysisManager &FAM) {
1942   if (!Shape.CoroBegin)
1943     return;
1944 
1945   for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) {
1946     auto &Context = End->getContext();
1947     End->replaceAllUsesWith(ConstantInt::getFalse(Context));
1948     End->eraseFromParent();
1949   }
1950 
1951   if (!Clones.empty()) {
1952     switch (Shape.ABI) {
1953     case coro::ABI::Switch:
1954       // Each clone in the Switch lowering is independent of the other clones.
1955       // Let the LazyCallGraph know about each one separately.
1956       for (Function *Clone : Clones)
1957         CG.addSplitFunction(N.getFunction(), *Clone);
1958       break;
1959     case coro::ABI::Async:
1960     case coro::ABI::Retcon:
1961     case coro::ABI::RetconOnce:
1962       // Each clone in the Async/Retcon lowering references of the other clones.
1963       // Let the LazyCallGraph know about all of them at once.
1964       if (!Clones.empty())
1965         CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones);
1966       break;
1967     }
1968 
1969     // Let the CGSCC infra handle the changes to the original function.
1970     updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM);
1971   }
1972 
1973   // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
1974   // to the split functions.
1975   postSplitCleanup(N.getFunction());
1976   updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM);
1977 }
1978 
1979 // When we see the coroutine the first time, we insert an indirect call to a
1980 // devirt trigger function and mark the coroutine that it is now ready for
1981 // split.
1982 // Async lowering uses this after it has split the function to restart the
1983 // pipeline.
1984 static void prepareForSplit(Function &F, CallGraph &CG,
1985                             bool MarkForAsyncRestart = false) {
1986   Module &M = *F.getParent();
1987   LLVMContext &Context = F.getContext();
1988 #ifndef NDEBUG
1989   Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN);
1990   assert(DevirtFn && "coro.devirt.trigger function not found");
1991 #endif
1992 
1993   F.addFnAttr(CORO_PRESPLIT_ATTR, MarkForAsyncRestart
1994                                       ? ASYNC_RESTART_AFTER_SPLIT
1995                                       : PREPARED_FOR_SPLIT);
1996 
1997   // Insert an indirect call sequence that will be devirtualized by CoroElide
1998   // pass:
1999   //    %0 = call i8* @llvm.coro.subfn.addr(i8* null, i8 -1)
2000   //    %1 = bitcast i8* %0 to void(i8*)*
2001   //    call void %1(i8* null)
2002   coro::LowererBase Lowerer(M);
2003   Instruction *InsertPt =
2004       MarkForAsyncRestart ? F.getEntryBlock().getFirstNonPHIOrDbgOrLifetime()
2005                           : F.getEntryBlock().getTerminator();
2006   auto *Null = ConstantPointerNull::get(Type::getInt8PtrTy(Context));
2007   auto *DevirtFnAddr =
2008       Lowerer.makeSubFnCall(Null, CoroSubFnInst::RestartTrigger, InsertPt);
2009   FunctionType *FnTy = FunctionType::get(Type::getVoidTy(Context),
2010                                          {Type::getInt8PtrTy(Context)}, false);
2011   auto *IndirectCall = CallInst::Create(FnTy, DevirtFnAddr, Null, "", InsertPt);
2012 
2013   // Update CG graph with an indirect call we just added.
2014   CG[&F]->addCalledFunction(IndirectCall, CG.getCallsExternalNode());
2015 }
2016 
2017 // Make sure that there is a devirtualization trigger function that the
2018 // coro-split pass uses to force a restart of the CGSCC pipeline. If the devirt
2019 // trigger function is not found, we will create one and add it to the current
2020 // SCC.
2021 static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC) {
2022   Module &M = CG.getModule();
2023   if (M.getFunction(CORO_DEVIRT_TRIGGER_FN))
2024     return;
2025 
2026   LLVMContext &C = M.getContext();
2027   auto *FnTy = FunctionType::get(Type::getVoidTy(C), Type::getInt8PtrTy(C),
2028                                  /*isVarArg=*/false);
2029   Function *DevirtFn =
2030       Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage,
2031                        CORO_DEVIRT_TRIGGER_FN, &M);
2032   DevirtFn->addFnAttr(Attribute::AlwaysInline);
2033   auto *Entry = BasicBlock::Create(C, "entry", DevirtFn);
2034   ReturnInst::Create(C, Entry);
2035 
2036   auto *Node = CG.getOrInsertFunction(DevirtFn);
2037 
2038   SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end());
2039   Nodes.push_back(Node);
2040   SCC.initialize(Nodes);
2041 }
2042 
2043 /// Replace a call to llvm.coro.prepare.retcon.
2044 static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
2045                            LazyCallGraph::SCC &C) {
2046   auto CastFn = Prepare->getArgOperand(0); // as an i8*
2047   auto Fn = CastFn->stripPointerCasts();   // as its original type
2048 
2049   // Attempt to peephole this pattern:
2050   //    %0 = bitcast [[TYPE]] @some_function to i8*
2051   //    %1 = call @llvm.coro.prepare.retcon(i8* %0)
2052   //    %2 = bitcast %1 to [[TYPE]]
2053   // ==>
2054   //    %2 = @some_function
2055   for (Use &U : llvm::make_early_inc_range(Prepare->uses())) {
2056     // Look for bitcasts back to the original function type.
2057     auto *Cast = dyn_cast<BitCastInst>(U.getUser());
2058     if (!Cast || Cast->getType() != Fn->getType())
2059       continue;
2060 
2061     // Replace and remove the cast.
2062     Cast->replaceAllUsesWith(Fn);
2063     Cast->eraseFromParent();
2064   }
2065 
2066   // Replace any remaining uses with the function as an i8*.
2067   // This can never directly be a callee, so we don't need to update CG.
2068   Prepare->replaceAllUsesWith(CastFn);
2069   Prepare->eraseFromParent();
2070 
2071   // Kill dead bitcasts.
2072   while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2073     if (!Cast->use_empty())
2074       break;
2075     CastFn = Cast->getOperand(0);
2076     Cast->eraseFromParent();
2077   }
2078 }
2079 /// Replace a call to llvm.coro.prepare.retcon.
2080 static void replacePrepare(CallInst *Prepare, CallGraph &CG) {
2081   auto CastFn = Prepare->getArgOperand(0); // as an i8*
2082   auto Fn = CastFn->stripPointerCasts(); // as its original type
2083 
2084   // Find call graph nodes for the preparation.
2085   CallGraphNode *PrepareUserNode = nullptr, *FnNode = nullptr;
2086   if (auto ConcreteFn = dyn_cast<Function>(Fn)) {
2087     PrepareUserNode = CG[Prepare->getFunction()];
2088     FnNode = CG[ConcreteFn];
2089   }
2090 
2091   // Attempt to peephole this pattern:
2092   //    %0 = bitcast [[TYPE]] @some_function to i8*
2093   //    %1 = call @llvm.coro.prepare.retcon(i8* %0)
2094   //    %2 = bitcast %1 to [[TYPE]]
2095   // ==>
2096   //    %2 = @some_function
2097   for (Use &U : llvm::make_early_inc_range(Prepare->uses())) {
2098     // Look for bitcasts back to the original function type.
2099     auto *Cast = dyn_cast<BitCastInst>(U.getUser());
2100     if (!Cast || Cast->getType() != Fn->getType()) continue;
2101 
2102     // Check whether the replacement will introduce new direct calls.
2103     // If so, we'll need to update the call graph.
2104     if (PrepareUserNode) {
2105       for (auto &Use : Cast->uses()) {
2106         if (auto *CB = dyn_cast<CallBase>(Use.getUser())) {
2107           if (!CB->isCallee(&Use))
2108             continue;
2109           PrepareUserNode->removeCallEdgeFor(*CB);
2110           PrepareUserNode->addCalledFunction(CB, FnNode);
2111         }
2112       }
2113     }
2114 
2115     // Replace and remove the cast.
2116     Cast->replaceAllUsesWith(Fn);
2117     Cast->eraseFromParent();
2118   }
2119 
2120   // Replace any remaining uses with the function as an i8*.
2121   // This can never directly be a callee, so we don't need to update CG.
2122   Prepare->replaceAllUsesWith(CastFn);
2123   Prepare->eraseFromParent();
2124 
2125   // Kill dead bitcasts.
2126   while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2127     if (!Cast->use_empty()) break;
2128     CastFn = Cast->getOperand(0);
2129     Cast->eraseFromParent();
2130   }
2131 }
2132 
2133 static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
2134                                LazyCallGraph::SCC &C) {
2135   bool Changed = false;
2136   for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) {
2137     // Intrinsics can only be used in calls.
2138     auto *Prepare = cast<CallInst>(P.getUser());
2139     replacePrepare(Prepare, CG, C);
2140     Changed = true;
2141   }
2142 
2143   return Changed;
2144 }
2145 
2146 /// Remove calls to llvm.coro.prepare.retcon, a barrier meant to prevent
2147 /// IPO from operating on calls to a retcon coroutine before it's been
2148 /// split.  This is only safe to do after we've split all retcon
2149 /// coroutines in the module.  We can do that this in this pass because
2150 /// this pass does promise to split all retcon coroutines (as opposed to
2151 /// switch coroutines, which are lowered in multiple stages).
2152 static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) {
2153   bool Changed = false;
2154   for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) {
2155     // Intrinsics can only be used in calls.
2156     auto *Prepare = cast<CallInst>(P.getUser());
2157     replacePrepare(Prepare, CG);
2158     Changed = true;
2159   }
2160 
2161   return Changed;
2162 }
2163 
2164 static bool declaresCoroSplitIntrinsics(const Module &M) {
2165   return coro::declaresIntrinsics(M, {"llvm.coro.begin",
2166                                       "llvm.coro.prepare.retcon",
2167                                       "llvm.coro.prepare.async"});
2168 }
2169 
2170 static void addPrepareFunction(const Module &M,
2171                                SmallVectorImpl<Function *> &Fns,
2172                                StringRef Name) {
2173   auto *PrepareFn = M.getFunction(Name);
2174   if (PrepareFn && !PrepareFn->use_empty())
2175     Fns.push_back(PrepareFn);
2176 }
2177 
2178 PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C,
2179                                      CGSCCAnalysisManager &AM,
2180                                      LazyCallGraph &CG, CGSCCUpdateResult &UR) {
2181   // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
2182   //     non-zero number of nodes, so we assume that here and grab the first
2183   //     node's function's module.
2184   Module &M = *C.begin()->getFunction().getParent();
2185   auto &FAM =
2186       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
2187 
2188   if (!declaresCoroSplitIntrinsics(M))
2189     return PreservedAnalyses::all();
2190 
2191   // Check for uses of llvm.coro.prepare.retcon/async.
2192   SmallVector<Function *, 2> PrepareFns;
2193   addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2194   addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2195 
2196   // Find coroutines for processing.
2197   SmallVector<LazyCallGraph::Node *, 4> Coroutines;
2198   for (LazyCallGraph::Node &N : C)
2199     if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR))
2200       Coroutines.push_back(&N);
2201 
2202   if (Coroutines.empty() && PrepareFns.empty())
2203     return PreservedAnalyses::all();
2204 
2205   if (Coroutines.empty()) {
2206     for (auto *PrepareFn : PrepareFns) {
2207       replaceAllPrepares(PrepareFn, CG, C);
2208     }
2209   }
2210 
2211   // Split all the coroutines.
2212   for (LazyCallGraph::Node *N : Coroutines) {
2213     Function &F = N->getFunction();
2214     LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()
2215                       << "' state: "
2216                       << F.getFnAttribute(CORO_PRESPLIT_ATTR).getValueAsString()
2217                       << "\n");
2218     F.removeFnAttr(CORO_PRESPLIT_ATTR);
2219 
2220     SmallVector<Function *, 4> Clones;
2221     const coro::Shape Shape = splitCoroutine(F, Clones, OptimizeFrame);
2222     updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM);
2223 
2224     if (!Shape.CoroSuspends.empty()) {
2225       // Run the CGSCC pipeline on the original and newly split functions.
2226       UR.CWorklist.insert(&C);
2227       for (Function *Clone : Clones)
2228         UR.CWorklist.insert(CG.lookupSCC(CG.get(*Clone)));
2229     }
2230   }
2231 
2232   if (!PrepareFns.empty()) {
2233     for (auto *PrepareFn : PrepareFns) {
2234       replaceAllPrepares(PrepareFn, CG, C);
2235     }
2236   }
2237 
2238   return PreservedAnalyses::none();
2239 }
2240 
2241 namespace {
2242 
2243 // We present a coroutine to LLVM as an ordinary function with suspension
2244 // points marked up with intrinsics. We let the optimizer party on the coroutine
2245 // as a single function for as long as possible. Shortly before the coroutine is
2246 // eligible to be inlined into its callers, we split up the coroutine into parts
2247 // corresponding to initial, resume and destroy invocations of the coroutine,
2248 // add them to the current SCC and restart the IPO pipeline to optimize the
2249 // coroutine subfunctions we extracted before proceeding to the caller of the
2250 // coroutine.
2251 struct CoroSplitLegacy : public CallGraphSCCPass {
2252   static char ID; // Pass identification, replacement for typeid
2253 
2254   CoroSplitLegacy(bool OptimizeFrame = false)
2255       : CallGraphSCCPass(ID), OptimizeFrame(OptimizeFrame) {
2256     initializeCoroSplitLegacyPass(*PassRegistry::getPassRegistry());
2257   }
2258 
2259   bool Run = false;
2260   bool OptimizeFrame;
2261 
2262   // A coroutine is identified by the presence of coro.begin intrinsic, if
2263   // we don't have any, this pass has nothing to do.
2264   bool doInitialization(CallGraph &CG) override {
2265     Run = declaresCoroSplitIntrinsics(CG.getModule());
2266     return CallGraphSCCPass::doInitialization(CG);
2267   }
2268 
2269   bool runOnSCC(CallGraphSCC &SCC) override {
2270     if (!Run)
2271       return false;
2272 
2273     // Check for uses of llvm.coro.prepare.retcon.
2274     SmallVector<Function *, 2> PrepareFns;
2275     auto &M = SCC.getCallGraph().getModule();
2276     addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2277     addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2278 
2279     // Find coroutines for processing.
2280     SmallVector<Function *, 4> Coroutines;
2281     for (CallGraphNode *CGN : SCC)
2282       if (auto *F = CGN->getFunction())
2283         if (F->hasFnAttribute(CORO_PRESPLIT_ATTR))
2284           Coroutines.push_back(F);
2285 
2286     if (Coroutines.empty() && PrepareFns.empty())
2287       return false;
2288 
2289     CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
2290 
2291     if (Coroutines.empty()) {
2292       bool Changed = false;
2293       for (auto *PrepareFn : PrepareFns)
2294         Changed |= replaceAllPrepares(PrepareFn, CG);
2295       return Changed;
2296     }
2297 
2298     createDevirtTriggerFunc(CG, SCC);
2299 
2300     // Split all the coroutines.
2301     for (Function *F : Coroutines) {
2302       Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR);
2303       StringRef Value = Attr.getValueAsString();
2304       LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName()
2305                         << "' state: " << Value << "\n");
2306       // Async lowering marks coroutines to trigger a restart of the pipeline
2307       // after it has split them.
2308       if (Value == ASYNC_RESTART_AFTER_SPLIT) {
2309         F->removeFnAttr(CORO_PRESPLIT_ATTR);
2310         continue;
2311       }
2312       if (Value == UNPREPARED_FOR_SPLIT) {
2313         prepareForSplit(*F, CG);
2314         continue;
2315       }
2316       F->removeFnAttr(CORO_PRESPLIT_ATTR);
2317 
2318       SmallVector<Function *, 4> Clones;
2319       const coro::Shape Shape = splitCoroutine(*F, Clones, OptimizeFrame);
2320       updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC);
2321       if (Shape.ABI == coro::ABI::Async) {
2322         // Restart SCC passes.
2323         // Mark function for CoroElide pass. It will devirtualize causing a
2324         // restart of the SCC pipeline.
2325         prepareForSplit(*F, CG, true /*MarkForAsyncRestart*/);
2326       }
2327     }
2328 
2329     for (auto *PrepareFn : PrepareFns)
2330       replaceAllPrepares(PrepareFn, CG);
2331 
2332     return true;
2333   }
2334 
2335   void getAnalysisUsage(AnalysisUsage &AU) const override {
2336     CallGraphSCCPass::getAnalysisUsage(AU);
2337   }
2338 
2339   StringRef getPassName() const override { return "Coroutine Splitting"; }
2340 };
2341 
2342 } // end anonymous namespace
2343 
2344 char CoroSplitLegacy::ID = 0;
2345 
2346 INITIALIZE_PASS_BEGIN(
2347     CoroSplitLegacy, "coro-split",
2348     "Split coroutine into a set of functions driving its state machine", false,
2349     false)
2350 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
2351 INITIALIZE_PASS_END(
2352     CoroSplitLegacy, "coro-split",
2353     "Split coroutine into a set of functions driving its state machine", false,
2354     false)
2355 
2356 Pass *llvm::createCoroSplitLegacyPass(bool OptimizeFrame) {
2357   return new CoroSplitLegacy(OptimizeFrame);
2358 }
2359