1 //===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This pass builds the coroutine frame and outlines resume and destroy parts
9 // of the coroutine into separate functions.
10 //
11 // We present a coroutine to an LLVM as an ordinary function with suspension
12 // points marked up with intrinsics. We let the optimizer party on the coroutine
13 // as a single function for as long as possible. Shortly before the coroutine is
14 // eligible to be inlined into its callers, we split up the coroutine into parts
15 // corresponding to an initial, resume and destroy invocations of the coroutine,
16 // add them to the current SCC and restart the IPO pipeline to optimize the
17 // coroutine subfunctions we extracted before proceeding to the caller of the
18 // coroutine.
19 //===----------------------------------------------------------------------===//
20 
21 #include "llvm/Transforms/Coroutines/CoroSplit.h"
22 #include "CoroInstr.h"
23 #include "CoroInternal.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/PriorityWorklist.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/StringRef.h"
29 #include "llvm/ADT/Twine.h"
30 #include "llvm/Analysis/CFG.h"
31 #include "llvm/Analysis/CallGraph.h"
32 #include "llvm/Analysis/CallGraphSCCPass.h"
33 #include "llvm/Analysis/ConstantFolding.h"
34 #include "llvm/Analysis/LazyCallGraph.h"
35 #include "llvm/BinaryFormat/Dwarf.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/BasicBlock.h"
39 #include "llvm/IR/CFG.h"
40 #include "llvm/IR/CallingConv.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GlobalValue.h"
47 #include "llvm/IR/GlobalVariable.h"
48 #include "llvm/IR/IRBuilder.h"
49 #include "llvm/IR/InstIterator.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/LLVMContext.h"
55 #include "llvm/IR/LegacyPassManager.h"
56 #include "llvm/IR/Module.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/IR/Verifier.h"
60 #include "llvm/InitializePasses.h"
61 #include "llvm/Pass.h"
62 #include "llvm/Support/Casting.h"
63 #include "llvm/Support/Debug.h"
64 #include "llvm/Support/PrettyStackTrace.h"
65 #include "llvm/Support/raw_ostream.h"
66 #include "llvm/Transforms/Scalar.h"
67 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
68 #include "llvm/Transforms/Utils/CallGraphUpdater.h"
69 #include "llvm/Transforms/Utils/Cloning.h"
70 #include "llvm/Transforms/Utils/Local.h"
71 #include "llvm/Transforms/Utils/ValueMapper.h"
72 #include <cassert>
73 #include <cstddef>
74 #include <cstdint>
75 #include <initializer_list>
76 #include <iterator>
77 
78 using namespace llvm;
79 
80 #define DEBUG_TYPE "coro-split"
81 
82 namespace {
83 
84 /// A little helper class for building
85 class CoroCloner {
86 public:
87   enum class Kind {
88     /// The shared resume function for a switch lowering.
89     SwitchResume,
90 
91     /// The shared unwind function for a switch lowering.
92     SwitchUnwind,
93 
94     /// The shared cleanup function for a switch lowering.
95     SwitchCleanup,
96 
97     /// An individual continuation function.
98     Continuation,
99 
100     /// An async resume function.
101     Async,
102   };
103 
104 private:
105   Function &OrigF;
106   Function *NewF;
107   const Twine &Suffix;
108   coro::Shape &Shape;
109   Kind FKind;
110   ValueToValueMapTy VMap;
111   IRBuilder<> Builder;
112   Value *NewFramePtr = nullptr;
113 
114   /// The active suspend instruction; meaningful only for continuation and async
115   /// ABIs.
116   AnyCoroSuspendInst *ActiveSuspend = nullptr;
117 
118 public:
119   /// Create a cloner for a switch lowering.
120   CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
121              Kind FKind)
122     : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape),
123       FKind(FKind), Builder(OrigF.getContext()) {
124     assert(Shape.ABI == coro::ABI::Switch);
125   }
126 
127   /// Create a cloner for a continuation lowering.
128   CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
129              Function *NewF, AnyCoroSuspendInst *ActiveSuspend)
130       : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape),
131         FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation),
132         Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) {
133     assert(Shape.ABI == coro::ABI::Retcon ||
134            Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async);
135     assert(NewF && "need existing function for continuation");
136     assert(ActiveSuspend && "need active suspend point for continuation");
137   }
138 
139   Function *getFunction() const {
140     assert(NewF != nullptr && "declaration not yet set");
141     return NewF;
142   }
143 
144   void create();
145 
146 private:
147   bool isSwitchDestroyFunction() {
148     switch (FKind) {
149     case Kind::Async:
150     case Kind::Continuation:
151     case Kind::SwitchResume:
152       return false;
153     case Kind::SwitchUnwind:
154     case Kind::SwitchCleanup:
155       return true;
156     }
157     llvm_unreachable("Unknown CoroCloner::Kind enum");
158   }
159 
160   void replaceEntryBlock();
161   Value *deriveNewFramePointer();
162   void replaceRetconOrAsyncSuspendUses();
163   void replaceCoroSuspends();
164   void replaceCoroEnds();
165   void replaceSwiftErrorOps();
166   void salvageDebugInfo();
167   void handleFinalSuspend();
168 };
169 
170 } // end anonymous namespace
171 
172 static void maybeFreeRetconStorage(IRBuilder<> &Builder,
173                                    const coro::Shape &Shape, Value *FramePtr,
174                                    CallGraph *CG) {
175   assert(Shape.ABI == coro::ABI::Retcon ||
176          Shape.ABI == coro::ABI::RetconOnce);
177   if (Shape.RetconLowering.IsFrameInlineInStorage)
178     return;
179 
180   Shape.emitDealloc(Builder, FramePtr, CG);
181 }
182 
183 /// Replace an llvm.coro.end.async.
184 /// Will inline the must tail call function call if there is one.
185 /// \returns true if cleanup of the coro.end block is needed, false otherwise.
186 static bool replaceCoroEndAsync(AnyCoroEndInst *End) {
187   IRBuilder<> Builder(End);
188 
189   auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End);
190   if (!EndAsync) {
191     Builder.CreateRetVoid();
192     return true /*needs cleanup of coro.end block*/;
193   }
194 
195   auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
196   if (!MustTailCallFunc) {
197     Builder.CreateRetVoid();
198     return true /*needs cleanup of coro.end block*/;
199   }
200 
201   // Move the must tail call from the predecessor block into the end block.
202   auto *CoroEndBlock = End->getParent();
203   auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
204   assert(MustTailCallFuncBlock && "Must have a single predecessor block");
205   auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
206   auto *MustTailCall = cast<CallInst>(&*std::prev(It));
207   CoroEndBlock->getInstList().splice(
208       End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall);
209 
210   // Insert the return instruction.
211   Builder.SetInsertPoint(End);
212   Builder.CreateRetVoid();
213   InlineFunctionInfo FnInfo;
214 
215   // Remove the rest of the block, by splitting it into an unreachable block.
216   auto *BB = End->getParent();
217   BB->splitBasicBlock(End);
218   BB->getTerminator()->eraseFromParent();
219 
220   auto InlineRes = InlineFunction(*MustTailCall, FnInfo);
221   assert(InlineRes.isSuccess() && "Expected inlining to succeed");
222   (void)InlineRes;
223 
224   // We have cleaned up the coro.end block above.
225   return false;
226 }
227 
228 /// Replace a non-unwind call to llvm.coro.end.
229 static void replaceFallthroughCoroEnd(AnyCoroEndInst *End,
230                                       const coro::Shape &Shape, Value *FramePtr,
231                                       bool InResume, CallGraph *CG) {
232   // Start inserting right before the coro.end.
233   IRBuilder<> Builder(End);
234 
235   // Create the return instruction.
236   switch (Shape.ABI) {
237   // The cloned functions in switch-lowering always return void.
238   case coro::ABI::Switch:
239     // coro.end doesn't immediately end the coroutine in the main function
240     // in this lowering, because we need to deallocate the coroutine.
241     if (!InResume)
242       return;
243     Builder.CreateRetVoid();
244     break;
245 
246   // In async lowering this returns.
247   case coro::ABI::Async: {
248     bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
249     if (!CoroEndBlockNeedsCleanup)
250       return;
251     break;
252   }
253 
254   // In unique continuation lowering, the continuations always return void.
255   // But we may have implicitly allocated storage.
256   case coro::ABI::RetconOnce:
257     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
258     Builder.CreateRetVoid();
259     break;
260 
261   // In non-unique continuation lowering, we signal completion by returning
262   // a null continuation.
263   case coro::ABI::Retcon: {
264     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
265     auto RetTy = Shape.getResumeFunctionType()->getReturnType();
266     auto RetStructTy = dyn_cast<StructType>(RetTy);
267     PointerType *ContinuationTy =
268       cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy);
269 
270     Value *ReturnValue = ConstantPointerNull::get(ContinuationTy);
271     if (RetStructTy) {
272       ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy),
273                                               ReturnValue, 0);
274     }
275     Builder.CreateRet(ReturnValue);
276     break;
277   }
278   }
279 
280   // Remove the rest of the block, by splitting it into an unreachable block.
281   auto *BB = End->getParent();
282   BB->splitBasicBlock(End);
283   BB->getTerminator()->eraseFromParent();
284 }
285 
286 // Mark a coroutine as done, which implies that the coroutine is finished and
287 // never get resumed.
288 //
289 // In resume-switched ABI, the done state is represented by storing zero in
290 // ResumeFnAddr.
291 //
292 // NOTE: We couldn't omit the argument `FramePtr`. It is necessary because the
293 // pointer to the frame in splitted function is not stored in `Shape`.
294 static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape,
295                                 Value *FramePtr) {
296   assert(
297       Shape.ABI == coro::ABI::Switch &&
298       "markCoroutineAsDone is only supported for Switch-Resumed ABI for now.");
299   auto *GepIndex = Builder.CreateStructGEP(
300       Shape.FrameTy, FramePtr, coro::Shape::SwitchFieldIndex::Resume,
301       "ResumeFn.addr");
302   auto *NullPtr = ConstantPointerNull::get(cast<PointerType>(
303       Shape.FrameTy->getTypeAtIndex(coro::Shape::SwitchFieldIndex::Resume)));
304   Builder.CreateStore(NullPtr, GepIndex);
305 }
306 
307 /// Replace an unwind call to llvm.coro.end.
308 static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
309                                  Value *FramePtr, bool InResume,
310                                  CallGraph *CG) {
311   IRBuilder<> Builder(End);
312 
313   switch (Shape.ABI) {
314   // In switch-lowering, this does nothing in the main function.
315   case coro::ABI::Switch: {
316     // In C++'s specification, the coroutine should be marked as done
317     // if promise.unhandled_exception() throws.  The frontend will
318     // call coro.end(true) along this path.
319     //
320     // FIXME: We should refactor this once there is other language
321     // which uses Switch-Resumed style other than C++.
322     markCoroutineAsDone(Builder, Shape, FramePtr);
323     if (!InResume)
324       return;
325     break;
326   }
327   // In async lowering this does nothing.
328   case coro::ABI::Async:
329     break;
330   // In continuation-lowering, this frees the continuation storage.
331   case coro::ABI::Retcon:
332   case coro::ABI::RetconOnce:
333     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
334     break;
335   }
336 
337   // If coro.end has an associated bundle, add cleanupret instruction.
338   if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) {
339     auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]);
340     auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr);
341     End->getParent()->splitBasicBlock(End);
342     CleanupRet->getParent()->getTerminator()->eraseFromParent();
343   }
344 }
345 
346 static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
347                            Value *FramePtr, bool InResume, CallGraph *CG) {
348   if (End->isUnwind())
349     replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG);
350   else
351     replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG);
352 
353   auto &Context = End->getContext();
354   End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context)
355                                    : ConstantInt::getFalse(Context));
356   End->eraseFromParent();
357 }
358 
359 // Create an entry block for a resume function with a switch that will jump to
360 // suspend points.
361 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) {
362   assert(Shape.ABI == coro::ABI::Switch);
363   LLVMContext &C = F.getContext();
364 
365   // resume.entry:
366   //  %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0,
367   //  i32 2
368   //  % index = load i32, i32* %index.addr
369   //  switch i32 %index, label %unreachable [
370   //    i32 0, label %resume.0
371   //    i32 1, label %resume.1
372   //    ...
373   //  ]
374 
375   auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F);
376   auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F);
377 
378   IRBuilder<> Builder(NewEntry);
379   auto *FramePtr = Shape.FramePtr;
380   auto *FrameTy = Shape.FrameTy;
381   auto *GepIndex = Builder.CreateStructGEP(
382       FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
383   auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index");
384   auto *Switch =
385       Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size());
386   Shape.SwitchLowering.ResumeSwitch = Switch;
387 
388   size_t SuspendIndex = 0;
389   for (auto *AnyS : Shape.CoroSuspends) {
390     auto *S = cast<CoroSuspendInst>(AnyS);
391     ConstantInt *IndexVal = Shape.getIndex(SuspendIndex);
392 
393     // Replace CoroSave with a store to Index:
394     //    %index.addr = getelementptr %f.frame... (index field number)
395     //    store i32 0, i32* %index.addr1
396     auto *Save = S->getCoroSave();
397     Builder.SetInsertPoint(Save);
398     if (S->isFinal()) {
399       // The coroutine should be marked done if it reaches the final suspend
400       // point.
401       markCoroutineAsDone(Builder, Shape, FramePtr);
402     } else {
403       auto *GepIndex = Builder.CreateStructGEP(
404           FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
405       Builder.CreateStore(IndexVal, GepIndex);
406     }
407     Save->replaceAllUsesWith(ConstantTokenNone::get(C));
408     Save->eraseFromParent();
409 
410     // Split block before and after coro.suspend and add a jump from an entry
411     // switch:
412     //
413     //  whateverBB:
414     //    whatever
415     //    %0 = call i8 @llvm.coro.suspend(token none, i1 false)
416     //    switch i8 %0, label %suspend[i8 0, label %resume
417     //                                 i8 1, label %cleanup]
418     // becomes:
419     //
420     //  whateverBB:
421     //     whatever
422     //     br label %resume.0.landing
423     //
424     //  resume.0: ; <--- jump from the switch in the resume.entry
425     //     %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
426     //     br label %resume.0.landing
427     //
428     //  resume.0.landing:
429     //     %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
430     //     switch i8 % 1, label %suspend [i8 0, label %resume
431     //                                    i8 1, label %cleanup]
432 
433     auto *SuspendBB = S->getParent();
434     auto *ResumeBB =
435         SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex));
436     auto *LandingBB = ResumeBB->splitBasicBlock(
437         S->getNextNode(), ResumeBB->getName() + Twine(".landing"));
438     Switch->addCase(IndexVal, ResumeBB);
439 
440     cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB);
441     auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front());
442     S->replaceAllUsesWith(PN);
443     PN->addIncoming(Builder.getInt8(-1), SuspendBB);
444     PN->addIncoming(S, ResumeBB);
445 
446     ++SuspendIndex;
447   }
448 
449   Builder.SetInsertPoint(UnreachBB);
450   Builder.CreateUnreachable();
451 
452   Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
453 }
454 
455 
456 // Rewrite final suspend point handling. We do not use suspend index to
457 // represent the final suspend point. Instead we zero-out ResumeFnAddr in the
458 // coroutine frame, since it is undefined behavior to resume a coroutine
459 // suspended at the final suspend point. Thus, in the resume function, we can
460 // simply remove the last case (when coro::Shape is built, the final suspend
461 // point (if present) is always the last element of CoroSuspends array).
462 // In the destroy function, we add a code sequence to check if ResumeFnAddress
463 // is Null, and if so, jump to the appropriate label to handle cleanup from the
464 // final suspend point.
465 void CoroCloner::handleFinalSuspend() {
466   assert(Shape.ABI == coro::ABI::Switch &&
467          Shape.SwitchLowering.HasFinalSuspend);
468   auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]);
469   auto FinalCaseIt = std::prev(Switch->case_end());
470   BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
471   Switch->removeCase(FinalCaseIt);
472   if (isSwitchDestroyFunction()) {
473     BasicBlock *OldSwitchBB = Switch->getParent();
474     auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch");
475     Builder.SetInsertPoint(OldSwitchBB->getTerminator());
476     auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr,
477                                        coro::Shape::SwitchFieldIndex::Resume,
478                                              "ResumeFn.addr");
479     auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(),
480                                     GepIndex);
481     auto *Cond = Builder.CreateIsNull(Load);
482     Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB);
483     OldSwitchBB->getTerminator()->eraseFromParent();
484   }
485 }
486 
487 static FunctionType *
488 getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend) {
489   auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Suspend);
490   auto *StructTy = cast<StructType>(AsyncSuspend->getType());
491   auto &Context = Suspend->getParent()->getParent()->getContext();
492   auto *VoidTy = Type::getVoidTy(Context);
493   return FunctionType::get(VoidTy, StructTy->elements(), false);
494 }
495 
496 static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape,
497                                         const Twine &Suffix,
498                                         Module::iterator InsertBefore,
499                                         AnyCoroSuspendInst *ActiveSuspend) {
500   Module *M = OrigF.getParent();
501   auto *FnTy = (Shape.ABI != coro::ABI::Async)
502                    ? Shape.getResumeFunctionType()
503                    : getFunctionTypeFromAsyncSuspend(ActiveSuspend);
504 
505   Function *NewF =
506       Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage,
507                        OrigF.getName() + Suffix);
508   if (Shape.ABI != coro::ABI::Async)
509     NewF->addParamAttr(0, Attribute::NonNull);
510 
511   // For the async lowering ABI we can't guarantee that the context argument is
512   // not access via a different pointer not based on the argument.
513   if (Shape.ABI != coro::ABI::Async)
514     NewF->addParamAttr(0, Attribute::NoAlias);
515 
516   M->getFunctionList().insert(InsertBefore, NewF);
517 
518   return NewF;
519 }
520 
521 /// Replace uses of the active llvm.coro.suspend.retcon/async call with the
522 /// arguments to the continuation function.
523 ///
524 /// This assumes that the builder has a meaningful insertion point.
525 void CoroCloner::replaceRetconOrAsyncSuspendUses() {
526   assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
527          Shape.ABI == coro::ABI::Async);
528 
529   auto NewS = VMap[ActiveSuspend];
530   if (NewS->use_empty()) return;
531 
532   // Copy out all the continuation arguments after the buffer pointer into
533   // an easily-indexed data structure for convenience.
534   SmallVector<Value*, 8> Args;
535   // The async ABI includes all arguments -- including the first argument.
536   bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
537   for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()),
538             E = NewF->arg_end();
539        I != E; ++I)
540     Args.push_back(&*I);
541 
542   // If the suspend returns a single scalar value, we can just do a simple
543   // replacement.
544   if (!isa<StructType>(NewS->getType())) {
545     assert(Args.size() == 1);
546     NewS->replaceAllUsesWith(Args.front());
547     return;
548   }
549 
550   // Try to peephole extracts of an aggregate return.
551   for (Use &U : llvm::make_early_inc_range(NewS->uses())) {
552     auto *EVI = dyn_cast<ExtractValueInst>(U.getUser());
553     if (!EVI || EVI->getNumIndices() != 1)
554       continue;
555 
556     EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]);
557     EVI->eraseFromParent();
558   }
559 
560   // If we have no remaining uses, we're done.
561   if (NewS->use_empty()) return;
562 
563   // Otherwise, we need to create an aggregate.
564   Value *Agg = UndefValue::get(NewS->getType());
565   for (size_t I = 0, E = Args.size(); I != E; ++I)
566     Agg = Builder.CreateInsertValue(Agg, Args[I], I);
567 
568   NewS->replaceAllUsesWith(Agg);
569 }
570 
571 void CoroCloner::replaceCoroSuspends() {
572   Value *SuspendResult;
573 
574   switch (Shape.ABI) {
575   // In switch lowering, replace coro.suspend with the appropriate value
576   // for the type of function we're extracting.
577   // Replacing coro.suspend with (0) will result in control flow proceeding to
578   // a resume label associated with a suspend point, replacing it with (1) will
579   // result in control flow proceeding to a cleanup label associated with this
580   // suspend point.
581   case coro::ABI::Switch:
582     SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0);
583     break;
584 
585   // In async lowering there are no uses of the result.
586   case coro::ABI::Async:
587     return;
588 
589   // In returned-continuation lowering, the arguments from earlier
590   // continuations are theoretically arbitrary, and they should have been
591   // spilled.
592   case coro::ABI::RetconOnce:
593   case coro::ABI::Retcon:
594     return;
595   }
596 
597   for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) {
598     // The active suspend was handled earlier.
599     if (CS == ActiveSuspend) continue;
600 
601     auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]);
602     MappedCS->replaceAllUsesWith(SuspendResult);
603     MappedCS->eraseFromParent();
604   }
605 }
606 
607 void CoroCloner::replaceCoroEnds() {
608   for (AnyCoroEndInst *CE : Shape.CoroEnds) {
609     // We use a null call graph because there's no call graph node for
610     // the cloned function yet.  We'll just be rebuilding that later.
611     auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]);
612     replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr);
613   }
614 }
615 
616 static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
617                                  ValueToValueMapTy *VMap) {
618   if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty())
619     return;
620   Value *CachedSlot = nullptr;
621   auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
622     if (CachedSlot) {
623       assert(cast<PointerType>(CachedSlot->getType())
624                  ->isOpaqueOrPointeeTypeMatches(ValueTy) &&
625              "multiple swifterror slots in function with different types");
626       return CachedSlot;
627     }
628 
629     // Check if the function has a swifterror argument.
630     for (auto &Arg : F.args()) {
631       if (Arg.isSwiftError()) {
632         CachedSlot = &Arg;
633         assert(cast<PointerType>(Arg.getType())
634                    ->isOpaqueOrPointeeTypeMatches(ValueTy) &&
635                "swifterror argument does not have expected type");
636         return &Arg;
637       }
638     }
639 
640     // Create a swifterror alloca.
641     IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
642     auto Alloca = Builder.CreateAlloca(ValueTy);
643     Alloca->setSwiftError(true);
644 
645     CachedSlot = Alloca;
646     return Alloca;
647   };
648 
649   for (CallInst *Op : Shape.SwiftErrorOps) {
650     auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op;
651     IRBuilder<> Builder(MappedOp);
652 
653     // If there are no arguments, this is a 'get' operation.
654     Value *MappedResult;
655     if (Op->arg_empty()) {
656       auto ValueTy = Op->getType();
657       auto Slot = getSwiftErrorSlot(ValueTy);
658       MappedResult = Builder.CreateLoad(ValueTy, Slot);
659     } else {
660       assert(Op->arg_size() == 1);
661       auto Value = MappedOp->getArgOperand(0);
662       auto ValueTy = Value->getType();
663       auto Slot = getSwiftErrorSlot(ValueTy);
664       Builder.CreateStore(Value, Slot);
665       MappedResult = Slot;
666     }
667 
668     MappedOp->replaceAllUsesWith(MappedResult);
669     MappedOp->eraseFromParent();
670   }
671 
672   // If we're updating the original function, we've invalidated SwiftErrorOps.
673   if (VMap == nullptr) {
674     Shape.SwiftErrorOps.clear();
675   }
676 }
677 
678 void CoroCloner::replaceSwiftErrorOps() {
679   ::replaceSwiftErrorOps(*NewF, Shape, &VMap);
680 }
681 
682 void CoroCloner::salvageDebugInfo() {
683   SmallVector<DbgVariableIntrinsic *, 8> Worklist;
684   SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
685   for (auto &BB : *NewF)
686     for (auto &I : BB)
687       if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I))
688         Worklist.push_back(DVI);
689   for (DbgVariableIntrinsic *DVI : Worklist)
690     coro::salvageDebugInfo(DbgPtrAllocaCache, DVI, Shape.OptimizeFrame);
691 
692   // Remove all salvaged dbg.declare intrinsics that became
693   // either unreachable or stale due to the CoroSplit transformation.
694   DominatorTree DomTree(*NewF);
695   auto IsUnreachableBlock = [&](BasicBlock *BB) {
696     return !isPotentiallyReachable(&NewF->getEntryBlock(), BB, nullptr,
697                                    &DomTree);
698   };
699   for (DbgVariableIntrinsic *DVI : Worklist) {
700     if (IsUnreachableBlock(DVI->getParent()))
701       DVI->eraseFromParent();
702     else if (isa_and_nonnull<AllocaInst>(DVI->getVariableLocationOp(0))) {
703       // Count all non-debuginfo uses in reachable blocks.
704       unsigned Uses = 0;
705       for (auto *User : DVI->getVariableLocationOp(0)->users())
706         if (auto *I = dyn_cast<Instruction>(User))
707           if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent()))
708             ++Uses;
709       if (!Uses)
710         DVI->eraseFromParent();
711     }
712   }
713 }
714 
715 void CoroCloner::replaceEntryBlock() {
716   // In the original function, the AllocaSpillBlock is a block immediately
717   // following the allocation of the frame object which defines GEPs for
718   // all the allocas that have been moved into the frame, and it ends by
719   // branching to the original beginning of the coroutine.  Make this
720   // the entry block of the cloned function.
721   auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]);
722   auto *OldEntry = &NewF->getEntryBlock();
723   Entry->setName("entry" + Suffix);
724   Entry->moveBefore(OldEntry);
725   Entry->getTerminator()->eraseFromParent();
726 
727   // Clear all predecessors of the new entry block.  There should be
728   // exactly one predecessor, which we created when splitting out
729   // AllocaSpillBlock to begin with.
730   assert(Entry->hasOneUse());
731   auto BranchToEntry = cast<BranchInst>(Entry->user_back());
732   assert(BranchToEntry->isUnconditional());
733   Builder.SetInsertPoint(BranchToEntry);
734   Builder.CreateUnreachable();
735   BranchToEntry->eraseFromParent();
736 
737   // Branch from the entry to the appropriate place.
738   Builder.SetInsertPoint(Entry);
739   switch (Shape.ABI) {
740   case coro::ABI::Switch: {
741     // In switch-lowering, we built a resume-entry block in the original
742     // function.  Make the entry block branch to this.
743     auto *SwitchBB =
744       cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]);
745     Builder.CreateBr(SwitchBB);
746     break;
747   }
748   case coro::ABI::Async:
749   case coro::ABI::Retcon:
750   case coro::ABI::RetconOnce: {
751     // In continuation ABIs, we want to branch to immediately after the
752     // active suspend point.  Earlier phases will have put the suspend in its
753     // own basic block, so just thread our jump directly to its successor.
754     assert((Shape.ABI == coro::ABI::Async &&
755             isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||
756            ((Shape.ABI == coro::ABI::Retcon ||
757              Shape.ABI == coro::ABI::RetconOnce) &&
758             isa<CoroSuspendRetconInst>(ActiveSuspend)));
759     auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]);
760     auto Branch = cast<BranchInst>(MappedCS->getNextNode());
761     assert(Branch->isUnconditional());
762     Builder.CreateBr(Branch->getSuccessor(0));
763     break;
764   }
765   }
766 
767   // Any static alloca that's still being used but not reachable from the new
768   // entry needs to be moved to the new entry.
769   Function *F = OldEntry->getParent();
770   DominatorTree DT{*F};
771   for (Instruction &I : llvm::make_early_inc_range(instructions(F))) {
772     auto *Alloca = dyn_cast<AllocaInst>(&I);
773     if (!Alloca || I.use_empty())
774       continue;
775     if (DT.isReachableFromEntry(I.getParent()) ||
776         !isa<ConstantInt>(Alloca->getArraySize()))
777       continue;
778     I.moveBefore(*Entry, Entry->getFirstInsertionPt());
779   }
780 }
781 
782 /// Derive the value of the new frame pointer.
783 Value *CoroCloner::deriveNewFramePointer() {
784   // Builder should be inserting to the front of the new entry block.
785 
786   switch (Shape.ABI) {
787   // In switch-lowering, the argument is the frame pointer.
788   case coro::ABI::Switch:
789     return &*NewF->arg_begin();
790   // In async-lowering, one of the arguments is an async context as determined
791   // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
792   // the resume function from the async context projection function associated
793   // with the active suspend. The frame is located as a tail to the async
794   // context header.
795   case coro::ABI::Async: {
796     auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
797     auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff;
798     auto *CalleeContext = NewF->getArg(ContextIdx);
799     auto *FramePtrTy = Shape.FrameTy->getPointerTo();
800     auto *ProjectionFunc =
801         ActiveAsyncSuspend->getAsyncContextProjectionFunction();
802     auto DbgLoc =
803         cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc();
804     // Calling i8* (i8*)
805     auto *CallerContext = Builder.CreateCall(ProjectionFunc->getFunctionType(),
806                                              ProjectionFunc, CalleeContext);
807     CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
808     CallerContext->setDebugLoc(DbgLoc);
809     // The frame is located after the async_context header.
810     auto &Context = Builder.getContext();
811     auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
812         Type::getInt8Ty(Context), CallerContext,
813         Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr");
814     // Inline the projection function.
815     InlineFunctionInfo InlineInfo;
816     auto InlineRes = InlineFunction(*CallerContext, InlineInfo);
817     assert(InlineRes.isSuccess());
818     (void)InlineRes;
819     return Builder.CreateBitCast(FramePtrAddr, FramePtrTy);
820   }
821   // In continuation-lowering, the argument is the opaque storage.
822   case coro::ABI::Retcon:
823   case coro::ABI::RetconOnce: {
824     Argument *NewStorage = &*NewF->arg_begin();
825     auto FramePtrTy = Shape.FrameTy->getPointerTo();
826 
827     // If the storage is inline, just bitcast to the storage to the frame type.
828     if (Shape.RetconLowering.IsFrameInlineInStorage)
829       return Builder.CreateBitCast(NewStorage, FramePtrTy);
830 
831     // Otherwise, load the real frame from the opaque storage.
832     auto FramePtrPtr =
833       Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo());
834     return Builder.CreateLoad(FramePtrTy, FramePtrPtr);
835   }
836   }
837   llvm_unreachable("bad ABI");
838 }
839 
840 static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
841                                  unsigned ParamIndex,
842                                  uint64_t Size, Align Alignment) {
843   AttrBuilder ParamAttrs(Context);
844   ParamAttrs.addAttribute(Attribute::NonNull);
845   ParamAttrs.addAttribute(Attribute::NoAlias);
846   ParamAttrs.addAlignmentAttr(Alignment);
847   ParamAttrs.addDereferenceableAttr(Size);
848   Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
849 }
850 
851 static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context,
852                                  unsigned ParamIndex) {
853   AttrBuilder ParamAttrs(Context);
854   ParamAttrs.addAttribute(Attribute::SwiftAsync);
855   Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
856 }
857 
858 static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context,
859                               unsigned ParamIndex) {
860   AttrBuilder ParamAttrs(Context);
861   ParamAttrs.addAttribute(Attribute::SwiftSelf);
862   Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
863 }
864 
865 /// Clone the body of the original function into a resume function of
866 /// some sort.
867 void CoroCloner::create() {
868   // Create the new function if we don't already have one.
869   if (!NewF) {
870     NewF = createCloneDeclaration(OrigF, Shape, Suffix,
871                                   OrigF.getParent()->end(), ActiveSuspend);
872   }
873 
874   // Replace all args with undefs. The buildCoroutineFrame algorithm already
875   // rewritten access to the args that occurs after suspend points with loads
876   // and stores to/from the coroutine frame.
877   for (Argument &A : OrigF.args())
878     VMap[&A] = UndefValue::get(A.getType());
879 
880   SmallVector<ReturnInst *, 4> Returns;
881 
882   // Ignore attempts to change certain attributes of the function.
883   // TODO: maybe there should be a way to suppress this during cloning?
884   auto savedVisibility = NewF->getVisibility();
885   auto savedUnnamedAddr = NewF->getUnnamedAddr();
886   auto savedDLLStorageClass = NewF->getDLLStorageClass();
887 
888   // NewF's linkage (which CloneFunctionInto does *not* change) might not
889   // be compatible with the visibility of OrigF (which it *does* change),
890   // so protect against that.
891   auto savedLinkage = NewF->getLinkage();
892   NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
893 
894   CloneFunctionInto(NewF, &OrigF, VMap,
895                     CloneFunctionChangeType::LocalChangesOnly, Returns);
896 
897   auto &Context = NewF->getContext();
898 
899   // For async functions / continuations, adjust the scope line of the
900   // clone to the line number of the suspend point. However, only
901   // adjust the scope line when the files are the same. This ensures
902   // line number and file name belong together. The scope line is
903   // associated with all pre-prologue instructions. This avoids a jump
904   // in the linetable from the function declaration to the suspend point.
905   if (DISubprogram *SP = NewF->getSubprogram()) {
906     assert(SP != OrigF.getSubprogram() && SP->isDistinct());
907     if (ActiveSuspend)
908       if (auto DL = ActiveSuspend->getDebugLoc())
909         if (SP->getFile() == DL->getFile())
910           SP->setScopeLine(DL->getLine());
911     // Update the linkage name to reflect the modified symbol name. It
912     // is necessary to update the linkage name in Swift, since the
913     // mangling changes for resume functions. It might also be the
914     // right thing to do in C++, but due to a limitation in LLVM's
915     // AsmPrinter we can only do this if the function doesn't have an
916     // abstract specification, since the DWARF backend expects the
917     // abstract specification to contain the linkage name and asserts
918     // that they are identical.
919     if (!SP->getDeclaration() && SP->getUnit() &&
920         SP->getUnit()->getSourceLanguage() == dwarf::DW_LANG_Swift)
921       SP->replaceLinkageName(MDString::get(Context, NewF->getName()));
922   }
923 
924   NewF->setLinkage(savedLinkage);
925   NewF->setVisibility(savedVisibility);
926   NewF->setUnnamedAddr(savedUnnamedAddr);
927   NewF->setDLLStorageClass(savedDLLStorageClass);
928 
929   // Replace the attributes of the new function:
930   auto OrigAttrs = NewF->getAttributes();
931   auto NewAttrs = AttributeList();
932 
933   switch (Shape.ABI) {
934   case coro::ABI::Switch:
935     // Bootstrap attributes by copying function attributes from the
936     // original function.  This should include optimization settings and so on.
937     NewAttrs = NewAttrs.addFnAttributes(Context, AttrBuilder(Context, OrigAttrs.getFnAttrs()));
938 
939     addFramePointerAttrs(NewAttrs, Context, 0,
940                          Shape.FrameSize, Shape.FrameAlign);
941     break;
942   case coro::ABI::Async: {
943     auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
944     if (OrigF.hasParamAttribute(Shape.AsyncLowering.ContextArgNo,
945                                 Attribute::SwiftAsync)) {
946       uint32_t ArgAttributeIndices =
947           ActiveAsyncSuspend->getStorageArgumentIndex();
948       auto ContextArgIndex = ArgAttributeIndices & 0xff;
949       addAsyncContextAttrs(NewAttrs, Context, ContextArgIndex);
950 
951       // `swiftasync` must preceed `swiftself` so 0 is not a valid index for
952       // `swiftself`.
953       auto SwiftSelfIndex = ArgAttributeIndices >> 8;
954       if (SwiftSelfIndex)
955         addSwiftSelfAttrs(NewAttrs, Context, SwiftSelfIndex);
956     }
957 
958     // Transfer the original function's attributes.
959     auto FnAttrs = OrigF.getAttributes().getFnAttrs();
960     NewAttrs = NewAttrs.addFnAttributes(Context, AttrBuilder(Context, FnAttrs));
961     break;
962   }
963   case coro::ABI::Retcon:
964   case coro::ABI::RetconOnce:
965     // If we have a continuation prototype, just use its attributes,
966     // full-stop.
967     NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes();
968 
969     addFramePointerAttrs(NewAttrs, Context, 0,
970                          Shape.getRetconCoroId()->getStorageSize(),
971                          Shape.getRetconCoroId()->getStorageAlignment());
972     break;
973   }
974 
975   switch (Shape.ABI) {
976   // In these ABIs, the cloned functions always return 'void', and the
977   // existing return sites are meaningless.  Note that for unique
978   // continuations, this includes the returns associated with suspends;
979   // this is fine because we can't suspend twice.
980   case coro::ABI::Switch:
981   case coro::ABI::RetconOnce:
982     // Remove old returns.
983     for (ReturnInst *Return : Returns)
984       changeToUnreachable(Return);
985     break;
986 
987   // With multi-suspend continuations, we'll already have eliminated the
988   // original returns and inserted returns before all the suspend points,
989   // so we want to leave any returns in place.
990   case coro::ABI::Retcon:
991     break;
992   // Async lowering will insert musttail call functions at all suspend points
993   // followed by a return.
994   // Don't change returns to unreachable because that will trip up the verifier.
995   // These returns should be unreachable from the clone.
996   case coro::ABI::Async:
997     break;
998   }
999 
1000   NewF->setAttributes(NewAttrs);
1001   NewF->setCallingConv(Shape.getResumeFunctionCC());
1002 
1003   // Set up the new entry block.
1004   replaceEntryBlock();
1005 
1006   Builder.SetInsertPoint(&NewF->getEntryBlock().front());
1007   NewFramePtr = deriveNewFramePointer();
1008 
1009   // Remap frame pointer.
1010   Value *OldFramePtr = VMap[Shape.FramePtr];
1011   NewFramePtr->takeName(OldFramePtr);
1012   OldFramePtr->replaceAllUsesWith(NewFramePtr);
1013 
1014   // Remap vFrame pointer.
1015   auto *NewVFrame = Builder.CreateBitCast(
1016       NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame");
1017   Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]);
1018   if (OldVFrame != NewVFrame)
1019     OldVFrame->replaceAllUsesWith(NewVFrame);
1020 
1021   switch (Shape.ABI) {
1022   case coro::ABI::Switch:
1023     // Rewrite final suspend handling as it is not done via switch (allows to
1024     // remove final case from the switch, since it is undefined behavior to
1025     // resume the coroutine suspended at the final suspend point.
1026     if (Shape.SwitchLowering.HasFinalSuspend)
1027       handleFinalSuspend();
1028     break;
1029   case coro::ABI::Async:
1030   case coro::ABI::Retcon:
1031   case coro::ABI::RetconOnce:
1032     // Replace uses of the active suspend with the corresponding
1033     // continuation-function arguments.
1034     assert(ActiveSuspend != nullptr &&
1035            "no active suspend when lowering a continuation-style coroutine");
1036     replaceRetconOrAsyncSuspendUses();
1037     break;
1038   }
1039 
1040   // Handle suspends.
1041   replaceCoroSuspends();
1042 
1043   // Handle swifterror.
1044   replaceSwiftErrorOps();
1045 
1046   // Remove coro.end intrinsics.
1047   replaceCoroEnds();
1048 
1049   // Salvage debug info that points into the coroutine frame.
1050   salvageDebugInfo();
1051 
1052   // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
1053   // to suppress deallocation code.
1054   if (Shape.ABI == coro::ABI::Switch)
1055     coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]),
1056                           /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup);
1057 }
1058 
1059 // Create a resume clone by cloning the body of the original function, setting
1060 // new entry block and replacing coro.suspend an appropriate value to force
1061 // resume or cleanup pass for every suspend point.
1062 static Function *createClone(Function &F, const Twine &Suffix,
1063                              coro::Shape &Shape, CoroCloner::Kind FKind) {
1064   CoroCloner Cloner(F, Suffix, Shape, FKind);
1065   Cloner.create();
1066   return Cloner.getFunction();
1067 }
1068 
1069 /// Remove calls to llvm.coro.end in the original function.
1070 static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG) {
1071   for (auto End : Shape.CoroEnds) {
1072     replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, CG);
1073   }
1074 }
1075 
1076 static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) {
1077   assert(Shape.ABI == coro::ABI::Async);
1078 
1079   auto *FuncPtrStruct = cast<ConstantStruct>(
1080       Shape.AsyncLowering.AsyncFuncPointer->getInitializer());
1081   auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0);
1082   auto *OrigContextSize = FuncPtrStruct->getOperand(1);
1083   auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(),
1084                                           Shape.AsyncLowering.ContextSize);
1085   auto *NewFuncPtrStruct = ConstantStruct::get(
1086       FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize);
1087 
1088   Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
1089 }
1090 
1091 static void replaceFrameSizeAndAlignment(coro::Shape &Shape) {
1092   if (Shape.ABI == coro::ABI::Async)
1093     updateAsyncFuncPointerContextSize(Shape);
1094 
1095   for (CoroAlignInst *CA : Shape.CoroAligns) {
1096     CA->replaceAllUsesWith(
1097         ConstantInt::get(CA->getType(), Shape.FrameAlign.value()));
1098     CA->eraseFromParent();
1099   }
1100 
1101   if (Shape.CoroSizes.empty())
1102     return;
1103 
1104   // In the same function all coro.sizes should have the same result type.
1105   auto *SizeIntrin = Shape.CoroSizes.back();
1106   Module *M = SizeIntrin->getModule();
1107   const DataLayout &DL = M->getDataLayout();
1108   auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1109   auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size);
1110 
1111   for (CoroSizeInst *CS : Shape.CoroSizes) {
1112     CS->replaceAllUsesWith(SizeConstant);
1113     CS->eraseFromParent();
1114   }
1115 }
1116 
1117 // Create a global constant array containing pointers to functions provided and
1118 // set Info parameter of CoroBegin to point at this constant. Example:
1119 //
1120 //   @f.resumers = internal constant [2 x void(%f.frame*)*]
1121 //                    [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy]
1122 //   define void @f() {
1123 //     ...
1124 //     call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
1125 //                    i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*))
1126 //
1127 // Assumes that all the functions have the same signature.
1128 static void setCoroInfo(Function &F, coro::Shape &Shape,
1129                         ArrayRef<Function *> Fns) {
1130   // This only works under the switch-lowering ABI because coro elision
1131   // only works on the switch-lowering ABI.
1132   assert(Shape.ABI == coro::ABI::Switch);
1133 
1134   SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end());
1135   assert(!Args.empty());
1136   Function *Part = *Fns.begin();
1137   Module *M = Part->getParent();
1138   auto *ArrTy = ArrayType::get(Part->getType(), Args.size());
1139 
1140   auto *ConstVal = ConstantArray::get(ArrTy, Args);
1141   auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
1142                                 GlobalVariable::PrivateLinkage, ConstVal,
1143                                 F.getName() + Twine(".resumers"));
1144 
1145   // Update coro.begin instruction to refer to this constant.
1146   LLVMContext &C = F.getContext();
1147   auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C));
1148   Shape.getSwitchCoroId()->setInfo(BC);
1149 }
1150 
1151 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
1152 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
1153                             Function *DestroyFn, Function *CleanupFn) {
1154   assert(Shape.ABI == coro::ABI::Switch);
1155 
1156   IRBuilder<> Builder(Shape.getInsertPtAfterFramePtr());
1157 
1158   auto *ResumeAddr = Builder.CreateStructGEP(
1159       Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume,
1160       "resume.addr");
1161   Builder.CreateStore(ResumeFn, ResumeAddr);
1162 
1163   Value *DestroyOrCleanupFn = DestroyFn;
1164 
1165   CoroIdInst *CoroId = Shape.getSwitchCoroId();
1166   if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
1167     // If there is a CoroAlloc and it returns false (meaning we elide the
1168     // allocation, use CleanupFn instead of DestroyFn).
1169     DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn);
1170   }
1171 
1172   auto *DestroyAddr = Builder.CreateStructGEP(
1173       Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy,
1174       "destroy.addr");
1175   Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr);
1176 }
1177 
1178 static void postSplitCleanup(Function &F) {
1179   removeUnreachableBlocks(F);
1180 
1181 #ifndef NDEBUG
1182   // For now, we do a mandatory verification step because we don't
1183   // entirely trust this pass.  Note that we don't want to add a verifier
1184   // pass to FPM below because it will also verify all the global data.
1185   if (verifyFunction(F, &errs()))
1186     report_fatal_error("Broken function");
1187 #endif
1188 }
1189 
1190 // Assuming we arrived at the block NewBlock from Prev instruction, store
1191 // PHI's incoming values in the ResolvedValues map.
1192 static void
1193 scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock,
1194                           DenseMap<Value *, Value *> &ResolvedValues) {
1195   auto *PrevBB = Prev->getParent();
1196   for (PHINode &PN : NewBlock->phis()) {
1197     auto V = PN.getIncomingValueForBlock(PrevBB);
1198     // See if we already resolved it.
1199     auto VI = ResolvedValues.find(V);
1200     if (VI != ResolvedValues.end())
1201       V = VI->second;
1202     // Remember the value.
1203     ResolvedValues[&PN] = V;
1204   }
1205 }
1206 
1207 // Replace a sequence of branches leading to a ret, with a clone of a ret
1208 // instruction. Suspend instruction represented by a switch, track the PHI
1209 // values and select the correct case successor when possible.
1210 static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
1211   DenseMap<Value *, Value *> ResolvedValues;
1212   BasicBlock *UnconditionalSucc = nullptr;
1213   assert(InitialInst->getModule());
1214   const DataLayout &DL = InitialInst->getModule()->getDataLayout();
1215 
1216   auto GetFirstValidInstruction = [](Instruction *I) {
1217     while (I) {
1218       // BitCastInst wouldn't generate actual code so that we could skip it.
1219       if (isa<BitCastInst>(I) || I->isDebugOrPseudoInst() ||
1220           I->isLifetimeStartOrEnd())
1221         I = I->getNextNode();
1222       else if (isInstructionTriviallyDead(I))
1223         // Duing we are in the middle of the transformation, we need to erase
1224         // the dead instruction manually.
1225         I = &*I->eraseFromParent();
1226       else
1227         break;
1228     }
1229     return I;
1230   };
1231 
1232   auto TryResolveConstant = [&ResolvedValues](Value *V) {
1233     auto It = ResolvedValues.find(V);
1234     if (It != ResolvedValues.end())
1235       V = It->second;
1236     return dyn_cast<ConstantInt>(V);
1237   };
1238 
1239   Instruction *I = InitialInst;
1240   while (I->isTerminator() || isa<CmpInst>(I)) {
1241     if (isa<ReturnInst>(I)) {
1242       if (I != InitialInst) {
1243         // If InitialInst is an unconditional branch,
1244         // remove PHI values that come from basic block of InitialInst
1245         if (UnconditionalSucc)
1246           UnconditionalSucc->removePredecessor(InitialInst->getParent(), true);
1247         ReplaceInstWithInst(InitialInst, I->clone());
1248       }
1249       return true;
1250     }
1251     if (auto *BR = dyn_cast<BranchInst>(I)) {
1252       if (BR->isUnconditional()) {
1253         BasicBlock *Succ = BR->getSuccessor(0);
1254         if (I == InitialInst)
1255           UnconditionalSucc = Succ;
1256         scanPHIsAndUpdateValueMap(I, Succ, ResolvedValues);
1257         I = GetFirstValidInstruction(Succ->getFirstNonPHIOrDbgOrLifetime());
1258         continue;
1259       }
1260 
1261       BasicBlock *BB = BR->getParent();
1262       // Handle the case the condition of the conditional branch is constant.
1263       // e.g.,
1264       //
1265       //     br i1 false, label %cleanup, label %CoroEnd
1266       //
1267       // It is possible during the transformation. We could continue the
1268       // simplifying in this case.
1269       if (ConstantFoldTerminator(BB, /*DeleteDeadConditions=*/true)) {
1270         // Handle this branch in next iteration.
1271         I = BB->getTerminator();
1272         continue;
1273       }
1274     } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) {
1275       // If the case number of suspended switch instruction is reduced to
1276       // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
1277       auto *BR = dyn_cast<BranchInst>(
1278           GetFirstValidInstruction(CondCmp->getNextNode()));
1279       if (!BR || !BR->isConditional() || CondCmp != BR->getCondition())
1280         return false;
1281 
1282       // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
1283       // So we try to resolve constant for the first operand only since the
1284       // second operand should be literal constant by design.
1285       ConstantInt *Cond0 = TryResolveConstant(CondCmp->getOperand(0));
1286       auto *Cond1 = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
1287       if (!Cond0 || !Cond1)
1288         return false;
1289 
1290       // Both operands of the CmpInst are Constant. So that we could evaluate
1291       // it immediately to get the destination.
1292       auto *ConstResult =
1293           dyn_cast_or_null<ConstantInt>(ConstantFoldCompareInstOperands(
1294               CondCmp->getPredicate(), Cond0, Cond1, DL));
1295       if (!ConstResult)
1296         return false;
1297 
1298       CondCmp->replaceAllUsesWith(ConstResult);
1299       CondCmp->eraseFromParent();
1300 
1301       // Handle this branch in next iteration.
1302       I = BR;
1303       continue;
1304     } else if (auto *SI = dyn_cast<SwitchInst>(I)) {
1305       ConstantInt *Cond = TryResolveConstant(SI->getCondition());
1306       if (!Cond)
1307         return false;
1308 
1309       BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
1310       scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1311       I = GetFirstValidInstruction(BB->getFirstNonPHIOrDbgOrLifetime());
1312       continue;
1313     }
1314 
1315     return false;
1316   }
1317   return false;
1318 }
1319 
1320 // Check whether CI obeys the rules of musttail attribute.
1321 static bool shouldBeMustTail(const CallInst &CI, const Function &F) {
1322   if (CI.isInlineAsm())
1323     return false;
1324 
1325   // Match prototypes and calling conventions of resume function.
1326   FunctionType *CalleeTy = CI.getFunctionType();
1327   if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1))
1328     return false;
1329 
1330   Type *CalleeParmTy = CalleeTy->getParamType(0);
1331   if (!CalleeParmTy->isPointerTy() ||
1332       (CalleeParmTy->getPointerAddressSpace() != 0))
1333     return false;
1334 
1335   if (CI.getCallingConv() != F.getCallingConv())
1336     return false;
1337 
1338   // CI should not has any ABI-impacting function attributes.
1339   static const Attribute::AttrKind ABIAttrs[] = {
1340       Attribute::StructRet,    Attribute::ByVal,     Attribute::InAlloca,
1341       Attribute::Preallocated, Attribute::InReg,     Attribute::Returned,
1342       Attribute::SwiftSelf,    Attribute::SwiftError};
1343   AttributeList Attrs = CI.getAttributes();
1344   for (auto AK : ABIAttrs)
1345     if (Attrs.hasParamAttr(0, AK))
1346       return false;
1347 
1348   return true;
1349 }
1350 
1351 // Add musttail to any resume instructions that is immediately followed by a
1352 // suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call
1353 // for symmetrical coroutine control transfer (C++ Coroutines TS extension).
1354 // This transformation is done only in the resume part of the coroutine that has
1355 // identical signature and calling convention as the coro.resume call.
1356 static void addMustTailToCoroResumes(Function &F) {
1357   bool changed = false;
1358 
1359   // Collect potential resume instructions.
1360   SmallVector<CallInst *, 4> Resumes;
1361   for (auto &I : instructions(F))
1362     if (auto *Call = dyn_cast<CallInst>(&I))
1363       if (shouldBeMustTail(*Call, F))
1364         Resumes.push_back(Call);
1365 
1366   // Set musttail on those that are followed by a ret instruction.
1367   for (CallInst *Call : Resumes)
1368     if (simplifyTerminatorLeadingToRet(Call->getNextNode())) {
1369       Call->setTailCallKind(CallInst::TCK_MustTail);
1370       changed = true;
1371     }
1372 
1373   if (changed)
1374     removeUnreachableBlocks(F);
1375 }
1376 
1377 // Coroutine has no suspend points. Remove heap allocation for the coroutine
1378 // frame if possible.
1379 static void handleNoSuspendCoroutine(coro::Shape &Shape) {
1380   auto *CoroBegin = Shape.CoroBegin;
1381   auto *CoroId = CoroBegin->getId();
1382   auto *AllocInst = CoroId->getCoroAlloc();
1383   switch (Shape.ABI) {
1384   case coro::ABI::Switch: {
1385     auto SwitchId = cast<CoroIdInst>(CoroId);
1386     coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr);
1387     if (AllocInst) {
1388       IRBuilder<> Builder(AllocInst);
1389       auto *Frame = Builder.CreateAlloca(Shape.FrameTy);
1390       Frame->setAlignment(Shape.FrameAlign);
1391       auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy());
1392       AllocInst->replaceAllUsesWith(Builder.getFalse());
1393       AllocInst->eraseFromParent();
1394       CoroBegin->replaceAllUsesWith(VFrame);
1395     } else {
1396       CoroBegin->replaceAllUsesWith(CoroBegin->getMem());
1397     }
1398 
1399     break;
1400   }
1401   case coro::ABI::Async:
1402   case coro::ABI::Retcon:
1403   case coro::ABI::RetconOnce:
1404     CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType()));
1405     break;
1406   }
1407 
1408   CoroBegin->eraseFromParent();
1409 }
1410 
1411 // SimplifySuspendPoint needs to check that there is no calls between
1412 // coro_save and coro_suspend, since any of the calls may potentially resume
1413 // the coroutine and if that is the case we cannot eliminate the suspend point.
1414 static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) {
1415   for (Instruction *I = From; I != To; I = I->getNextNode()) {
1416     // Assume that no intrinsic can resume the coroutine.
1417     if (isa<IntrinsicInst>(I))
1418       continue;
1419 
1420     if (isa<CallBase>(I))
1421       return true;
1422   }
1423   return false;
1424 }
1425 
1426 static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
1427   SmallPtrSet<BasicBlock *, 8> Set;
1428   SmallVector<BasicBlock *, 8> Worklist;
1429 
1430   Set.insert(SaveBB);
1431   Worklist.push_back(ResDesBB);
1432 
1433   // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1434   // returns a token consumed by suspend instruction, all blocks in between
1435   // will have to eventually hit SaveBB when going backwards from ResDesBB.
1436   while (!Worklist.empty()) {
1437     auto *BB = Worklist.pop_back_val();
1438     Set.insert(BB);
1439     for (auto *Pred : predecessors(BB))
1440       if (!Set.contains(Pred))
1441         Worklist.push_back(Pred);
1442   }
1443 
1444   // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1445   Set.erase(SaveBB);
1446   Set.erase(ResDesBB);
1447 
1448   for (auto *BB : Set)
1449     if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr))
1450       return true;
1451 
1452   return false;
1453 }
1454 
1455 static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
1456   auto *SaveBB = Save->getParent();
1457   auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
1458 
1459   if (SaveBB == ResumeOrDestroyBB)
1460     return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy);
1461 
1462   // Any calls from Save to the end of the block?
1463   if (hasCallsInBlockBetween(Save->getNextNode(), nullptr))
1464     return true;
1465 
1466   // Any calls from begging of the block up to ResumeOrDestroy?
1467   if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(),
1468                              ResumeOrDestroy))
1469     return true;
1470 
1471   // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1472   if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB))
1473     return true;
1474 
1475   return false;
1476 }
1477 
1478 // If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1479 // suspend point and replace it with nornal control flow.
1480 static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
1481                                  CoroBeginInst *CoroBegin) {
1482   Instruction *Prev = Suspend->getPrevNode();
1483   if (!Prev) {
1484     auto *Pred = Suspend->getParent()->getSinglePredecessor();
1485     if (!Pred)
1486       return false;
1487     Prev = Pred->getTerminator();
1488   }
1489 
1490   CallBase *CB = dyn_cast<CallBase>(Prev);
1491   if (!CB)
1492     return false;
1493 
1494   auto *Callee = CB->getCalledOperand()->stripPointerCasts();
1495 
1496   // See if the callsite is for resumption or destruction of the coroutine.
1497   auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
1498   if (!SubFn)
1499     return false;
1500 
1501   // Does not refer to the current coroutine, we cannot do anything with it.
1502   if (SubFn->getFrame() != CoroBegin)
1503     return false;
1504 
1505   // See if the transformation is safe. Specifically, see if there are any
1506   // calls in between Save and CallInstr. They can potenitally resume the
1507   // coroutine rendering this optimization unsafe.
1508   auto *Save = Suspend->getCoroSave();
1509   if (hasCallsBetween(Save, CB))
1510     return false;
1511 
1512   // Replace llvm.coro.suspend with the value that results in resumption over
1513   // the resume or cleanup path.
1514   Suspend->replaceAllUsesWith(SubFn->getRawIndex());
1515   Suspend->eraseFromParent();
1516   Save->eraseFromParent();
1517 
1518   // No longer need a call to coro.resume or coro.destroy.
1519   if (auto *Invoke = dyn_cast<InvokeInst>(CB)) {
1520     BranchInst::Create(Invoke->getNormalDest(), Invoke);
1521   }
1522 
1523   // Grab the CalledValue from CB before erasing the CallInstr.
1524   auto *CalledValue = CB->getCalledOperand();
1525   CB->eraseFromParent();
1526 
1527   // If no more users remove it. Usually it is a bitcast of SubFn.
1528   if (CalledValue != SubFn && CalledValue->user_empty())
1529     if (auto *I = dyn_cast<Instruction>(CalledValue))
1530       I->eraseFromParent();
1531 
1532   // Now we are good to remove SubFn.
1533   if (SubFn->user_empty())
1534     SubFn->eraseFromParent();
1535 
1536   return true;
1537 }
1538 
1539 // Remove suspend points that are simplified.
1540 static void simplifySuspendPoints(coro::Shape &Shape) {
1541   // Currently, the only simplification we do is switch-lowering-specific.
1542   if (Shape.ABI != coro::ABI::Switch)
1543     return;
1544 
1545   auto &S = Shape.CoroSuspends;
1546   size_t I = 0, N = S.size();
1547   if (N == 0)
1548     return;
1549   while (true) {
1550     auto SI = cast<CoroSuspendInst>(S[I]);
1551     // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1552     // to resume a coroutine suspended at the final suspend point.
1553     if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) {
1554       if (--N == I)
1555         break;
1556       std::swap(S[I], S[N]);
1557       continue;
1558     }
1559     if (++I == N)
1560       break;
1561   }
1562   S.resize(N);
1563 }
1564 
1565 static void splitSwitchCoroutine(Function &F, coro::Shape &Shape,
1566                                  SmallVectorImpl<Function *> &Clones) {
1567   assert(Shape.ABI == coro::ABI::Switch);
1568 
1569   createResumeEntryBlock(F, Shape);
1570   auto ResumeClone = createClone(F, ".resume", Shape,
1571                                  CoroCloner::Kind::SwitchResume);
1572   auto DestroyClone = createClone(F, ".destroy", Shape,
1573                                   CoroCloner::Kind::SwitchUnwind);
1574   auto CleanupClone = createClone(F, ".cleanup", Shape,
1575                                   CoroCloner::Kind::SwitchCleanup);
1576 
1577   postSplitCleanup(*ResumeClone);
1578   postSplitCleanup(*DestroyClone);
1579   postSplitCleanup(*CleanupClone);
1580 
1581   addMustTailToCoroResumes(*ResumeClone);
1582 
1583   // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1584   updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone);
1585 
1586   assert(Clones.empty());
1587   Clones.push_back(ResumeClone);
1588   Clones.push_back(DestroyClone);
1589   Clones.push_back(CleanupClone);
1590 
1591   // Create a constant array referring to resume/destroy/clone functions pointed
1592   // by the last argument of @llvm.coro.info, so that CoroElide pass can
1593   // determined correct function to call.
1594   setCoroInfo(F, Shape, Clones);
1595 }
1596 
1597 static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend,
1598                                        Value *Continuation) {
1599   auto *ResumeIntrinsic = Suspend->getResumeFunction();
1600   auto &Context = Suspend->getParent()->getParent()->getContext();
1601   auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1602 
1603   IRBuilder<> Builder(ResumeIntrinsic);
1604   auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy);
1605   ResumeIntrinsic->replaceAllUsesWith(Val);
1606   ResumeIntrinsic->eraseFromParent();
1607   Suspend->setOperand(CoroSuspendAsyncInst::ResumeFunctionArg,
1608                       UndefValue::get(Int8PtrTy));
1609 }
1610 
1611 /// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
1612 static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy,
1613                             ArrayRef<Value *> FnArgs,
1614                             SmallVectorImpl<Value *> &CallArgs) {
1615   size_t ArgIdx = 0;
1616   for (auto paramTy : FnTy->params()) {
1617     assert(ArgIdx < FnArgs.size());
1618     if (paramTy != FnArgs[ArgIdx]->getType())
1619       CallArgs.push_back(
1620           Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy));
1621     else
1622       CallArgs.push_back(FnArgs[ArgIdx]);
1623     ++ArgIdx;
1624   }
1625 }
1626 
1627 CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
1628                                    ArrayRef<Value *> Arguments,
1629                                    IRBuilder<> &Builder) {
1630   auto *FnTy = MustTailCallFn->getFunctionType();
1631   // Coerce the arguments, llvm optimizations seem to ignore the types in
1632   // vaarg functions and throws away casts in optimized mode.
1633   SmallVector<Value *, 8> CallArgs;
1634   coerceArguments(Builder, FnTy, Arguments, CallArgs);
1635 
1636   auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs);
1637   TailCall->setTailCallKind(CallInst::TCK_MustTail);
1638   TailCall->setDebugLoc(Loc);
1639   TailCall->setCallingConv(MustTailCallFn->getCallingConv());
1640   return TailCall;
1641 }
1642 
1643 static void splitAsyncCoroutine(Function &F, coro::Shape &Shape,
1644                                 SmallVectorImpl<Function *> &Clones) {
1645   assert(Shape.ABI == coro::ABI::Async);
1646   assert(Clones.empty());
1647   // Reset various things that the optimizer might have decided it
1648   // "knows" about the coroutine function due to not seeing a return.
1649   F.removeFnAttr(Attribute::NoReturn);
1650   F.removeRetAttr(Attribute::NoAlias);
1651   F.removeRetAttr(Attribute::NonNull);
1652 
1653   auto &Context = F.getContext();
1654   auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1655 
1656   auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId());
1657   IRBuilder<> Builder(Id);
1658 
1659   auto *FramePtr = Id->getStorage();
1660   FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy);
1661   FramePtr = Builder.CreateConstInBoundsGEP1_32(
1662       Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset,
1663       "async.ctx.frameptr");
1664 
1665   // Map all uses of llvm.coro.begin to the allocated frame pointer.
1666   {
1667     // Make sure we don't invalidate Shape.FramePtr.
1668     TrackingVH<Value> Handle(Shape.FramePtr);
1669     Shape.CoroBegin->replaceAllUsesWith(FramePtr);
1670     Shape.FramePtr = Handle.getValPtr();
1671   }
1672 
1673   // Create all the functions in order after the main function.
1674   auto NextF = std::next(F.getIterator());
1675 
1676   // Create a continuation function for each of the suspend points.
1677   Clones.reserve(Shape.CoroSuspends.size());
1678   for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1679     auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]);
1680 
1681     // Create the clone declaration.
1682     auto ResumeNameSuffix = ".resume.";
1683     auto ProjectionFunctionName =
1684         Suspend->getAsyncContextProjectionFunction()->getName();
1685     bool UseSwiftMangling = false;
1686     if (ProjectionFunctionName.equals("__swift_async_resume_project_context")) {
1687       ResumeNameSuffix = "TQ";
1688       UseSwiftMangling = true;
1689     } else if (ProjectionFunctionName.equals(
1690                    "__swift_async_resume_get_context")) {
1691       ResumeNameSuffix = "TY";
1692       UseSwiftMangling = true;
1693     }
1694     auto *Continuation = createCloneDeclaration(
1695         F, Shape,
1696         UseSwiftMangling ? ResumeNameSuffix + Twine(Idx) + "_"
1697                          : ResumeNameSuffix + Twine(Idx),
1698         NextF, Suspend);
1699     Clones.push_back(Continuation);
1700 
1701     // Insert a branch to a new return block immediately before the suspend
1702     // point.
1703     auto *SuspendBB = Suspend->getParent();
1704     auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1705     auto *Branch = cast<BranchInst>(SuspendBB->getTerminator());
1706 
1707     // Place it before the first suspend.
1708     auto *ReturnBB =
1709         BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB);
1710     Branch->setSuccessor(0, ReturnBB);
1711 
1712     IRBuilder<> Builder(ReturnBB);
1713 
1714     // Insert the call to the tail call function and inline it.
1715     auto *Fn = Suspend->getMustTailCallFunction();
1716     SmallVector<Value *, 8> Args(Suspend->args());
1717     auto FnArgs = ArrayRef<Value *>(Args).drop_front(
1718         CoroSuspendAsyncInst::MustTailCallFuncArg + 1);
1719     auto *TailCall =
1720         coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder);
1721     Builder.CreateRetVoid();
1722     InlineFunctionInfo FnInfo;
1723     auto InlineRes = InlineFunction(*TailCall, FnInfo);
1724     assert(InlineRes.isSuccess() && "Expected inlining to succeed");
1725     (void)InlineRes;
1726 
1727     // Replace the lvm.coro.async.resume intrisic call.
1728     replaceAsyncResumeFunction(Suspend, Continuation);
1729   }
1730 
1731   assert(Clones.size() == Shape.CoroSuspends.size());
1732   for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1733     auto *Suspend = Shape.CoroSuspends[Idx];
1734     auto *Clone = Clones[Idx];
1735 
1736     CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create();
1737   }
1738 }
1739 
1740 static void splitRetconCoroutine(Function &F, coro::Shape &Shape,
1741                                  SmallVectorImpl<Function *> &Clones) {
1742   assert(Shape.ABI == coro::ABI::Retcon ||
1743          Shape.ABI == coro::ABI::RetconOnce);
1744   assert(Clones.empty());
1745 
1746   // Reset various things that the optimizer might have decided it
1747   // "knows" about the coroutine function due to not seeing a return.
1748   F.removeFnAttr(Attribute::NoReturn);
1749   F.removeRetAttr(Attribute::NoAlias);
1750   F.removeRetAttr(Attribute::NonNull);
1751 
1752   // Allocate the frame.
1753   auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId());
1754   Value *RawFramePtr;
1755   if (Shape.RetconLowering.IsFrameInlineInStorage) {
1756     RawFramePtr = Id->getStorage();
1757   } else {
1758     IRBuilder<> Builder(Id);
1759 
1760     // Determine the size of the frame.
1761     const DataLayout &DL = F.getParent()->getDataLayout();
1762     auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1763 
1764     // Allocate.  We don't need to update the call graph node because we're
1765     // going to recompute it from scratch after splitting.
1766     // FIXME: pass the required alignment
1767     RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr);
1768     RawFramePtr =
1769       Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType());
1770 
1771     // Stash the allocated frame pointer in the continuation storage.
1772     auto Dest = Builder.CreateBitCast(Id->getStorage(),
1773                                       RawFramePtr->getType()->getPointerTo());
1774     Builder.CreateStore(RawFramePtr, Dest);
1775   }
1776 
1777   // Map all uses of llvm.coro.begin to the allocated frame pointer.
1778   {
1779     // Make sure we don't invalidate Shape.FramePtr.
1780     TrackingVH<Value> Handle(Shape.FramePtr);
1781     Shape.CoroBegin->replaceAllUsesWith(RawFramePtr);
1782     Shape.FramePtr = Handle.getValPtr();
1783   }
1784 
1785   // Create a unique return block.
1786   BasicBlock *ReturnBB = nullptr;
1787   SmallVector<PHINode *, 4> ReturnPHIs;
1788 
1789   // Create all the functions in order after the main function.
1790   auto NextF = std::next(F.getIterator());
1791 
1792   // Create a continuation function for each of the suspend points.
1793   Clones.reserve(Shape.CoroSuspends.size());
1794   for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1795     auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]);
1796 
1797     // Create the clone declaration.
1798     auto Continuation =
1799         createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF, nullptr);
1800     Clones.push_back(Continuation);
1801 
1802     // Insert a branch to the unified return block immediately before
1803     // the suspend point.
1804     auto SuspendBB = Suspend->getParent();
1805     auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1806     auto Branch = cast<BranchInst>(SuspendBB->getTerminator());
1807 
1808     // Create the unified return block.
1809     if (!ReturnBB) {
1810       // Place it before the first suspend.
1811       ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F,
1812                                     NewSuspendBB);
1813       Shape.RetconLowering.ReturnBlock = ReturnBB;
1814 
1815       IRBuilder<> Builder(ReturnBB);
1816 
1817       // Create PHIs for all the return values.
1818       assert(ReturnPHIs.empty());
1819 
1820       // First, the continuation.
1821       ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(),
1822                                              Shape.CoroSuspends.size()));
1823 
1824       // Next, all the directly-yielded values.
1825       for (auto ResultTy : Shape.getRetconResultTypes())
1826         ReturnPHIs.push_back(Builder.CreatePHI(ResultTy,
1827                                                Shape.CoroSuspends.size()));
1828 
1829       // Build the return value.
1830       auto RetTy = F.getReturnType();
1831 
1832       // Cast the continuation value if necessary.
1833       // We can't rely on the types matching up because that type would
1834       // have to be infinite.
1835       auto CastedContinuationTy =
1836         (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0));
1837       auto *CastedContinuation =
1838         Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy);
1839 
1840       Value *RetV;
1841       if (ReturnPHIs.size() == 1) {
1842         RetV = CastedContinuation;
1843       } else {
1844         RetV = UndefValue::get(RetTy);
1845         RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0);
1846         for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I)
1847           RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I);
1848       }
1849 
1850       Builder.CreateRet(RetV);
1851     }
1852 
1853     // Branch to the return block.
1854     Branch->setSuccessor(0, ReturnBB);
1855     ReturnPHIs[0]->addIncoming(Continuation, SuspendBB);
1856     size_t NextPHIIndex = 1;
1857     for (auto &VUse : Suspend->value_operands())
1858       ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB);
1859     assert(NextPHIIndex == ReturnPHIs.size());
1860   }
1861 
1862   assert(Clones.size() == Shape.CoroSuspends.size());
1863   for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1864     auto Suspend = Shape.CoroSuspends[i];
1865     auto Clone = Clones[i];
1866 
1867     CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create();
1868   }
1869 }
1870 
1871 namespace {
1872   class PrettyStackTraceFunction : public PrettyStackTraceEntry {
1873     Function &F;
1874   public:
1875     PrettyStackTraceFunction(Function &F) : F(F) {}
1876     void print(raw_ostream &OS) const override {
1877       OS << "While splitting coroutine ";
1878       F.printAsOperand(OS, /*print type*/ false, F.getParent());
1879       OS << "\n";
1880     }
1881   };
1882 }
1883 
1884 static coro::Shape splitCoroutine(Function &F,
1885                                   SmallVectorImpl<Function *> &Clones,
1886                                   bool OptimizeFrame) {
1887   PrettyStackTraceFunction prettyStackTrace(F);
1888 
1889   // The suspend-crossing algorithm in buildCoroutineFrame get tripped
1890   // up by uses in unreachable blocks, so remove them as a first pass.
1891   removeUnreachableBlocks(F);
1892 
1893   coro::Shape Shape(F, OptimizeFrame);
1894   if (!Shape.CoroBegin)
1895     return Shape;
1896 
1897   simplifySuspendPoints(Shape);
1898   buildCoroutineFrame(F, Shape);
1899   replaceFrameSizeAndAlignment(Shape);
1900 
1901   // If there are no suspend points, no split required, just remove
1902   // the allocation and deallocation blocks, they are not needed.
1903   if (Shape.CoroSuspends.empty()) {
1904     handleNoSuspendCoroutine(Shape);
1905   } else {
1906     switch (Shape.ABI) {
1907     case coro::ABI::Switch:
1908       splitSwitchCoroutine(F, Shape, Clones);
1909       break;
1910     case coro::ABI::Async:
1911       splitAsyncCoroutine(F, Shape, Clones);
1912       break;
1913     case coro::ABI::Retcon:
1914     case coro::ABI::RetconOnce:
1915       splitRetconCoroutine(F, Shape, Clones);
1916       break;
1917     }
1918   }
1919 
1920   // Replace all the swifterror operations in the original function.
1921   // This invalidates SwiftErrorOps in the Shape.
1922   replaceSwiftErrorOps(F, Shape, nullptr);
1923 
1924   // Finally, salvage the llvm.dbg.{declare,addr} in our original function that
1925   // point into the coroutine frame. We only do this for the current function
1926   // since the Cloner salvaged debug info for us in the new coroutine funclets.
1927   SmallVector<DbgVariableIntrinsic *, 8> Worklist;
1928   SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
1929   for (auto &BB : F) {
1930     for (auto &I : BB) {
1931       if (auto *DDI = dyn_cast<DbgDeclareInst>(&I)) {
1932         Worklist.push_back(DDI);
1933         continue;
1934       }
1935       if (auto *DDI = dyn_cast<DbgAddrIntrinsic>(&I)) {
1936         Worklist.push_back(DDI);
1937         continue;
1938       }
1939     }
1940   }
1941   for (auto *DDI : Worklist)
1942     coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.OptimizeFrame);
1943 
1944   return Shape;
1945 }
1946 
1947 static void
1948 updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape,
1949                                    const SmallVectorImpl<Function *> &Clones,
1950                                    CallGraph &CG, CallGraphSCC &SCC) {
1951   if (!Shape.CoroBegin)
1952     return;
1953 
1954   removeCoroEnds(Shape, &CG);
1955   postSplitCleanup(F);
1956 
1957   // Update call graph and add the functions we created to the SCC.
1958   coro::updateCallGraph(F, Clones, CG, SCC);
1959 }
1960 
1961 static void updateCallGraphAfterCoroutineSplit(
1962     LazyCallGraph::Node &N, const coro::Shape &Shape,
1963     const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C,
1964     LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
1965     FunctionAnalysisManager &FAM) {
1966   if (!Shape.CoroBegin)
1967     return;
1968 
1969   for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) {
1970     auto &Context = End->getContext();
1971     End->replaceAllUsesWith(ConstantInt::getFalse(Context));
1972     End->eraseFromParent();
1973   }
1974 
1975   if (!Clones.empty()) {
1976     switch (Shape.ABI) {
1977     case coro::ABI::Switch:
1978       // Each clone in the Switch lowering is independent of the other clones.
1979       // Let the LazyCallGraph know about each one separately.
1980       for (Function *Clone : Clones)
1981         CG.addSplitFunction(N.getFunction(), *Clone);
1982       break;
1983     case coro::ABI::Async:
1984     case coro::ABI::Retcon:
1985     case coro::ABI::RetconOnce:
1986       // Each clone in the Async/Retcon lowering references of the other clones.
1987       // Let the LazyCallGraph know about all of them at once.
1988       if (!Clones.empty())
1989         CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones);
1990       break;
1991     }
1992 
1993     // Let the CGSCC infra handle the changes to the original function.
1994     updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM);
1995   }
1996 
1997   // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
1998   // to the split functions.
1999   postSplitCleanup(N.getFunction());
2000   updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM);
2001 }
2002 
2003 // When we see the coroutine the first time, we insert an indirect call to a
2004 // devirt trigger function and mark the coroutine that it is now ready for
2005 // split.
2006 // Async lowering uses this after it has split the function to restart the
2007 // pipeline.
2008 static void prepareForSplit(Function &F, CallGraph &CG,
2009                             bool MarkForAsyncRestart = false) {
2010   Module &M = *F.getParent();
2011   LLVMContext &Context = F.getContext();
2012 #ifndef NDEBUG
2013   Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN);
2014   assert(DevirtFn && "coro.devirt.trigger function not found");
2015 #endif
2016 
2017   F.addFnAttr(CORO_PRESPLIT_ATTR, MarkForAsyncRestart
2018                                       ? ASYNC_RESTART_AFTER_SPLIT
2019                                       : PREPARED_FOR_SPLIT);
2020 
2021   // Insert an indirect call sequence that will be devirtualized by CoroElide
2022   // pass:
2023   //    %0 = call i8* @llvm.coro.subfn.addr(i8* null, i8 -1)
2024   //    %1 = bitcast i8* %0 to void(i8*)*
2025   //    call void %1(i8* null)
2026   coro::LowererBase Lowerer(M);
2027   Instruction *InsertPt =
2028       MarkForAsyncRestart ? F.getEntryBlock().getFirstNonPHIOrDbgOrLifetime()
2029                           : F.getEntryBlock().getTerminator();
2030   auto *Null = ConstantPointerNull::get(Type::getInt8PtrTy(Context));
2031   auto *DevirtFnAddr =
2032       Lowerer.makeSubFnCall(Null, CoroSubFnInst::RestartTrigger, InsertPt);
2033   FunctionType *FnTy = FunctionType::get(Type::getVoidTy(Context),
2034                                          {Type::getInt8PtrTy(Context)}, false);
2035   auto *IndirectCall = CallInst::Create(FnTy, DevirtFnAddr, Null, "", InsertPt);
2036 
2037   // Update CG graph with an indirect call we just added.
2038   CG[&F]->addCalledFunction(IndirectCall, CG.getCallsExternalNode());
2039 }
2040 
2041 // Make sure that there is a devirtualization trigger function that the
2042 // coro-split pass uses to force a restart of the CGSCC pipeline. If the devirt
2043 // trigger function is not found, we will create one and add it to the current
2044 // SCC.
2045 static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC) {
2046   Module &M = CG.getModule();
2047   if (M.getFunction(CORO_DEVIRT_TRIGGER_FN))
2048     return;
2049 
2050   LLVMContext &C = M.getContext();
2051   auto *FnTy = FunctionType::get(Type::getVoidTy(C), Type::getInt8PtrTy(C),
2052                                  /*isVarArg=*/false);
2053   Function *DevirtFn =
2054       Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage,
2055                        CORO_DEVIRT_TRIGGER_FN, &M);
2056   DevirtFn->addFnAttr(Attribute::AlwaysInline);
2057   auto *Entry = BasicBlock::Create(C, "entry", DevirtFn);
2058   ReturnInst::Create(C, Entry);
2059 
2060   auto *Node = CG.getOrInsertFunction(DevirtFn);
2061 
2062   SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end());
2063   Nodes.push_back(Node);
2064   SCC.initialize(Nodes);
2065 }
2066 
2067 /// Replace a call to llvm.coro.prepare.retcon.
2068 static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
2069                            LazyCallGraph::SCC &C) {
2070   auto CastFn = Prepare->getArgOperand(0); // as an i8*
2071   auto Fn = CastFn->stripPointerCasts();   // as its original type
2072 
2073   // Attempt to peephole this pattern:
2074   //    %0 = bitcast [[TYPE]] @some_function to i8*
2075   //    %1 = call @llvm.coro.prepare.retcon(i8* %0)
2076   //    %2 = bitcast %1 to [[TYPE]]
2077   // ==>
2078   //    %2 = @some_function
2079   for (Use &U : llvm::make_early_inc_range(Prepare->uses())) {
2080     // Look for bitcasts back to the original function type.
2081     auto *Cast = dyn_cast<BitCastInst>(U.getUser());
2082     if (!Cast || Cast->getType() != Fn->getType())
2083       continue;
2084 
2085     // Replace and remove the cast.
2086     Cast->replaceAllUsesWith(Fn);
2087     Cast->eraseFromParent();
2088   }
2089 
2090   // Replace any remaining uses with the function as an i8*.
2091   // This can never directly be a callee, so we don't need to update CG.
2092   Prepare->replaceAllUsesWith(CastFn);
2093   Prepare->eraseFromParent();
2094 
2095   // Kill dead bitcasts.
2096   while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2097     if (!Cast->use_empty())
2098       break;
2099     CastFn = Cast->getOperand(0);
2100     Cast->eraseFromParent();
2101   }
2102 }
2103 /// Replace a call to llvm.coro.prepare.retcon.
2104 static void replacePrepare(CallInst *Prepare, CallGraph &CG) {
2105   auto CastFn = Prepare->getArgOperand(0); // as an i8*
2106   auto Fn = CastFn->stripPointerCasts(); // as its original type
2107 
2108   // Find call graph nodes for the preparation.
2109   CallGraphNode *PrepareUserNode = nullptr, *FnNode = nullptr;
2110   if (auto ConcreteFn = dyn_cast<Function>(Fn)) {
2111     PrepareUserNode = CG[Prepare->getFunction()];
2112     FnNode = CG[ConcreteFn];
2113   }
2114 
2115   // Attempt to peephole this pattern:
2116   //    %0 = bitcast [[TYPE]] @some_function to i8*
2117   //    %1 = call @llvm.coro.prepare.retcon(i8* %0)
2118   //    %2 = bitcast %1 to [[TYPE]]
2119   // ==>
2120   //    %2 = @some_function
2121   for (Use &U : llvm::make_early_inc_range(Prepare->uses())) {
2122     // Look for bitcasts back to the original function type.
2123     auto *Cast = dyn_cast<BitCastInst>(U.getUser());
2124     if (!Cast || Cast->getType() != Fn->getType()) continue;
2125 
2126     // Check whether the replacement will introduce new direct calls.
2127     // If so, we'll need to update the call graph.
2128     if (PrepareUserNode) {
2129       for (auto &Use : Cast->uses()) {
2130         if (auto *CB = dyn_cast<CallBase>(Use.getUser())) {
2131           if (!CB->isCallee(&Use))
2132             continue;
2133           PrepareUserNode->removeCallEdgeFor(*CB);
2134           PrepareUserNode->addCalledFunction(CB, FnNode);
2135         }
2136       }
2137     }
2138 
2139     // Replace and remove the cast.
2140     Cast->replaceAllUsesWith(Fn);
2141     Cast->eraseFromParent();
2142   }
2143 
2144   // Replace any remaining uses with the function as an i8*.
2145   // This can never directly be a callee, so we don't need to update CG.
2146   Prepare->replaceAllUsesWith(CastFn);
2147   Prepare->eraseFromParent();
2148 
2149   // Kill dead bitcasts.
2150   while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2151     if (!Cast->use_empty()) break;
2152     CastFn = Cast->getOperand(0);
2153     Cast->eraseFromParent();
2154   }
2155 }
2156 
2157 static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
2158                                LazyCallGraph::SCC &C) {
2159   bool Changed = false;
2160   for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) {
2161     // Intrinsics can only be used in calls.
2162     auto *Prepare = cast<CallInst>(P.getUser());
2163     replacePrepare(Prepare, CG, C);
2164     Changed = true;
2165   }
2166 
2167   return Changed;
2168 }
2169 
2170 /// Remove calls to llvm.coro.prepare.retcon, a barrier meant to prevent
2171 /// IPO from operating on calls to a retcon coroutine before it's been
2172 /// split.  This is only safe to do after we've split all retcon
2173 /// coroutines in the module.  We can do that this in this pass because
2174 /// this pass does promise to split all retcon coroutines (as opposed to
2175 /// switch coroutines, which are lowered in multiple stages).
2176 static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) {
2177   bool Changed = false;
2178   for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) {
2179     // Intrinsics can only be used in calls.
2180     auto *Prepare = cast<CallInst>(P.getUser());
2181     replacePrepare(Prepare, CG);
2182     Changed = true;
2183   }
2184 
2185   return Changed;
2186 }
2187 
2188 static bool declaresCoroSplitIntrinsics(const Module &M) {
2189   return coro::declaresIntrinsics(M, {"llvm.coro.begin",
2190                                       "llvm.coro.prepare.retcon",
2191                                       "llvm.coro.prepare.async"});
2192 }
2193 
2194 static void addPrepareFunction(const Module &M,
2195                                SmallVectorImpl<Function *> &Fns,
2196                                StringRef Name) {
2197   auto *PrepareFn = M.getFunction(Name);
2198   if (PrepareFn && !PrepareFn->use_empty())
2199     Fns.push_back(PrepareFn);
2200 }
2201 
2202 PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C,
2203                                      CGSCCAnalysisManager &AM,
2204                                      LazyCallGraph &CG, CGSCCUpdateResult &UR) {
2205   // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
2206   //     non-zero number of nodes, so we assume that here and grab the first
2207   //     node's function's module.
2208   Module &M = *C.begin()->getFunction().getParent();
2209   auto &FAM =
2210       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
2211 
2212   if (!declaresCoroSplitIntrinsics(M))
2213     return PreservedAnalyses::all();
2214 
2215   // Check for uses of llvm.coro.prepare.retcon/async.
2216   SmallVector<Function *, 2> PrepareFns;
2217   addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2218   addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2219 
2220   // Find coroutines for processing.
2221   SmallVector<LazyCallGraph::Node *, 4> Coroutines;
2222   for (LazyCallGraph::Node &N : C)
2223     if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR))
2224       Coroutines.push_back(&N);
2225 
2226   if (Coroutines.empty() && PrepareFns.empty())
2227     return PreservedAnalyses::all();
2228 
2229   if (Coroutines.empty()) {
2230     for (auto *PrepareFn : PrepareFns) {
2231       replaceAllPrepares(PrepareFn, CG, C);
2232     }
2233   }
2234 
2235   // Split all the coroutines.
2236   for (LazyCallGraph::Node *N : Coroutines) {
2237     Function &F = N->getFunction();
2238     LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()
2239                       << "' state: "
2240                       << F.getFnAttribute(CORO_PRESPLIT_ATTR).getValueAsString()
2241                       << "\n");
2242     F.removeFnAttr(CORO_PRESPLIT_ATTR);
2243 
2244     SmallVector<Function *, 4> Clones;
2245     const coro::Shape Shape = splitCoroutine(F, Clones, OptimizeFrame);
2246     updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM);
2247 
2248     if (!Shape.CoroSuspends.empty()) {
2249       // Run the CGSCC pipeline on the original and newly split functions.
2250       UR.CWorklist.insert(&C);
2251       for (Function *Clone : Clones)
2252         UR.CWorklist.insert(CG.lookupSCC(CG.get(*Clone)));
2253     }
2254   }
2255 
2256   if (!PrepareFns.empty()) {
2257     for (auto *PrepareFn : PrepareFns) {
2258       replaceAllPrepares(PrepareFn, CG, C);
2259     }
2260   }
2261 
2262   return PreservedAnalyses::none();
2263 }
2264 
2265 namespace {
2266 
2267 // We present a coroutine to LLVM as an ordinary function with suspension
2268 // points marked up with intrinsics. We let the optimizer party on the coroutine
2269 // as a single function for as long as possible. Shortly before the coroutine is
2270 // eligible to be inlined into its callers, we split up the coroutine into parts
2271 // corresponding to initial, resume and destroy invocations of the coroutine,
2272 // add them to the current SCC and restart the IPO pipeline to optimize the
2273 // coroutine subfunctions we extracted before proceeding to the caller of the
2274 // coroutine.
2275 struct CoroSplitLegacy : public CallGraphSCCPass {
2276   static char ID; // Pass identification, replacement for typeid
2277 
2278   CoroSplitLegacy(bool OptimizeFrame = false)
2279       : CallGraphSCCPass(ID), OptimizeFrame(OptimizeFrame) {
2280     initializeCoroSplitLegacyPass(*PassRegistry::getPassRegistry());
2281   }
2282 
2283   bool Run = false;
2284   bool OptimizeFrame;
2285 
2286   // A coroutine is identified by the presence of coro.begin intrinsic, if
2287   // we don't have any, this pass has nothing to do.
2288   bool doInitialization(CallGraph &CG) override {
2289     Run = declaresCoroSplitIntrinsics(CG.getModule());
2290     return CallGraphSCCPass::doInitialization(CG);
2291   }
2292 
2293   bool runOnSCC(CallGraphSCC &SCC) override {
2294     if (!Run)
2295       return false;
2296 
2297     // Check for uses of llvm.coro.prepare.retcon.
2298     SmallVector<Function *, 2> PrepareFns;
2299     auto &M = SCC.getCallGraph().getModule();
2300     addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2301     addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2302 
2303     // Find coroutines for processing.
2304     SmallVector<Function *, 4> Coroutines;
2305     for (CallGraphNode *CGN : SCC)
2306       if (auto *F = CGN->getFunction())
2307         if (F->hasFnAttribute(CORO_PRESPLIT_ATTR))
2308           Coroutines.push_back(F);
2309 
2310     if (Coroutines.empty() && PrepareFns.empty())
2311       return false;
2312 
2313     CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
2314 
2315     if (Coroutines.empty()) {
2316       bool Changed = false;
2317       for (auto *PrepareFn : PrepareFns)
2318         Changed |= replaceAllPrepares(PrepareFn, CG);
2319       return Changed;
2320     }
2321 
2322     createDevirtTriggerFunc(CG, SCC);
2323 
2324     // Split all the coroutines.
2325     for (Function *F : Coroutines) {
2326       Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR);
2327       StringRef Value = Attr.getValueAsString();
2328       LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName()
2329                         << "' state: " << Value << "\n");
2330       // Async lowering marks coroutines to trigger a restart of the pipeline
2331       // after it has split them.
2332       if (Value == ASYNC_RESTART_AFTER_SPLIT) {
2333         F->removeFnAttr(CORO_PRESPLIT_ATTR);
2334         continue;
2335       }
2336       if (Value == UNPREPARED_FOR_SPLIT) {
2337         prepareForSplit(*F, CG);
2338         continue;
2339       }
2340       F->removeFnAttr(CORO_PRESPLIT_ATTR);
2341 
2342       SmallVector<Function *, 4> Clones;
2343       const coro::Shape Shape = splitCoroutine(*F, Clones, OptimizeFrame);
2344       updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC);
2345       if (Shape.ABI == coro::ABI::Async) {
2346         // Restart SCC passes.
2347         // Mark function for CoroElide pass. It will devirtualize causing a
2348         // restart of the SCC pipeline.
2349         prepareForSplit(*F, CG, true /*MarkForAsyncRestart*/);
2350       }
2351     }
2352 
2353     for (auto *PrepareFn : PrepareFns)
2354       replaceAllPrepares(PrepareFn, CG);
2355 
2356     return true;
2357   }
2358 
2359   void getAnalysisUsage(AnalysisUsage &AU) const override {
2360     CallGraphSCCPass::getAnalysisUsage(AU);
2361   }
2362 
2363   StringRef getPassName() const override { return "Coroutine Splitting"; }
2364 };
2365 
2366 } // end anonymous namespace
2367 
2368 char CoroSplitLegacy::ID = 0;
2369 
2370 INITIALIZE_PASS_BEGIN(
2371     CoroSplitLegacy, "coro-split",
2372     "Split coroutine into a set of functions driving its state machine", false,
2373     false)
2374 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
2375 INITIALIZE_PASS_END(
2376     CoroSplitLegacy, "coro-split",
2377     "Split coroutine into a set of functions driving its state machine", false,
2378     false)
2379 
2380 Pass *llvm::createCoroSplitLegacyPass(bool OptimizeFrame) {
2381   return new CoroSplitLegacy(OptimizeFrame);
2382 }
2383