1 //===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This pass builds the coroutine frame and outlines resume and destroy parts
9 // of the coroutine into separate functions.
10 //
11 // We present a coroutine to an LLVM as an ordinary function with suspension
12 // points marked up with intrinsics. We let the optimizer party on the coroutine
13 // as a single function for as long as possible. Shortly before the coroutine is
14 // eligible to be inlined into its callers, we split up the coroutine into parts
15 // corresponding to an initial, resume and destroy invocations of the coroutine,
16 // add them to the current SCC and restart the IPO pipeline to optimize the
17 // coroutine subfunctions we extracted before proceeding to the caller of the
18 // coroutine.
19 //===----------------------------------------------------------------------===//
20 
21 #include "llvm/Transforms/Coroutines/CoroSplit.h"
22 #include "CoroInstr.h"
23 #include "CoroInternal.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/PriorityWorklist.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/StringRef.h"
29 #include "llvm/ADT/Twine.h"
30 #include "llvm/Analysis/CFG.h"
31 #include "llvm/Analysis/CallGraph.h"
32 #include "llvm/Analysis/ConstantFolding.h"
33 #include "llvm/Analysis/LazyCallGraph.h"
34 #include "llvm/BinaryFormat/Dwarf.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/CFG.h"
39 #include "llvm/IR/CallingConv.h"
40 #include "llvm/IR/Constants.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/GlobalValue.h"
46 #include "llvm/IR/GlobalVariable.h"
47 #include "llvm/IR/IRBuilder.h"
48 #include "llvm/IR/InstIterator.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/LLVMContext.h"
54 #include "llvm/IR/Module.h"
55 #include "llvm/IR/Type.h"
56 #include "llvm/IR/Value.h"
57 #include "llvm/IR/Verifier.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/Debug.h"
60 #include "llvm/Support/PrettyStackTrace.h"
61 #include "llvm/Support/raw_ostream.h"
62 #include "llvm/Transforms/Scalar.h"
63 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
64 #include "llvm/Transforms/Utils/CallGraphUpdater.h"
65 #include "llvm/Transforms/Utils/Cloning.h"
66 #include "llvm/Transforms/Utils/Local.h"
67 #include "llvm/Transforms/Utils/ValueMapper.h"
68 #include <cassert>
69 #include <cstddef>
70 #include <cstdint>
71 #include <initializer_list>
72 #include <iterator>
73 
74 using namespace llvm;
75 
76 #define DEBUG_TYPE "coro-split"
77 
78 namespace {
79 
80 /// A little helper class for building
81 class CoroCloner {
82 public:
83   enum class Kind {
84     /// The shared resume function for a switch lowering.
85     SwitchResume,
86 
87     /// The shared unwind function for a switch lowering.
88     SwitchUnwind,
89 
90     /// The shared cleanup function for a switch lowering.
91     SwitchCleanup,
92 
93     /// An individual continuation function.
94     Continuation,
95 
96     /// An async resume function.
97     Async,
98   };
99 
100 private:
101   Function &OrigF;
102   Function *NewF;
103   const Twine &Suffix;
104   coro::Shape &Shape;
105   Kind FKind;
106   ValueToValueMapTy VMap;
107   IRBuilder<> Builder;
108   Value *NewFramePtr = nullptr;
109 
110   /// The active suspend instruction; meaningful only for continuation and async
111   /// ABIs.
112   AnyCoroSuspendInst *ActiveSuspend = nullptr;
113 
114 public:
115   /// Create a cloner for a switch lowering.
116   CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
117              Kind FKind)
118     : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape),
119       FKind(FKind), Builder(OrigF.getContext()) {
120     assert(Shape.ABI == coro::ABI::Switch);
121   }
122 
123   /// Create a cloner for a continuation lowering.
124   CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
125              Function *NewF, AnyCoroSuspendInst *ActiveSuspend)
126       : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape),
127         FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation),
128         Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) {
129     assert(Shape.ABI == coro::ABI::Retcon ||
130            Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async);
131     assert(NewF && "need existing function for continuation");
132     assert(ActiveSuspend && "need active suspend point for continuation");
133   }
134 
135   Function *getFunction() const {
136     assert(NewF != nullptr && "declaration not yet set");
137     return NewF;
138   }
139 
140   void create();
141 
142 private:
143   bool isSwitchDestroyFunction() {
144     switch (FKind) {
145     case Kind::Async:
146     case Kind::Continuation:
147     case Kind::SwitchResume:
148       return false;
149     case Kind::SwitchUnwind:
150     case Kind::SwitchCleanup:
151       return true;
152     }
153     llvm_unreachable("Unknown CoroCloner::Kind enum");
154   }
155 
156   void replaceEntryBlock();
157   Value *deriveNewFramePointer();
158   void replaceRetconOrAsyncSuspendUses();
159   void replaceCoroSuspends();
160   void replaceCoroEnds();
161   void replaceSwiftErrorOps();
162   void salvageDebugInfo();
163   void handleFinalSuspend();
164 };
165 
166 } // end anonymous namespace
167 
168 static void maybeFreeRetconStorage(IRBuilder<> &Builder,
169                                    const coro::Shape &Shape, Value *FramePtr,
170                                    CallGraph *CG) {
171   assert(Shape.ABI == coro::ABI::Retcon ||
172          Shape.ABI == coro::ABI::RetconOnce);
173   if (Shape.RetconLowering.IsFrameInlineInStorage)
174     return;
175 
176   Shape.emitDealloc(Builder, FramePtr, CG);
177 }
178 
179 /// Replace an llvm.coro.end.async.
180 /// Will inline the must tail call function call if there is one.
181 /// \returns true if cleanup of the coro.end block is needed, false otherwise.
182 static bool replaceCoroEndAsync(AnyCoroEndInst *End) {
183   IRBuilder<> Builder(End);
184 
185   auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End);
186   if (!EndAsync) {
187     Builder.CreateRetVoid();
188     return true /*needs cleanup of coro.end block*/;
189   }
190 
191   auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
192   if (!MustTailCallFunc) {
193     Builder.CreateRetVoid();
194     return true /*needs cleanup of coro.end block*/;
195   }
196 
197   // Move the must tail call from the predecessor block into the end block.
198   auto *CoroEndBlock = End->getParent();
199   auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
200   assert(MustTailCallFuncBlock && "Must have a single predecessor block");
201   auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
202   auto *MustTailCall = cast<CallInst>(&*std::prev(It));
203   CoroEndBlock->getInstList().splice(
204       End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall);
205 
206   // Insert the return instruction.
207   Builder.SetInsertPoint(End);
208   Builder.CreateRetVoid();
209   InlineFunctionInfo FnInfo;
210 
211   // Remove the rest of the block, by splitting it into an unreachable block.
212   auto *BB = End->getParent();
213   BB->splitBasicBlock(End);
214   BB->getTerminator()->eraseFromParent();
215 
216   auto InlineRes = InlineFunction(*MustTailCall, FnInfo);
217   assert(InlineRes.isSuccess() && "Expected inlining to succeed");
218   (void)InlineRes;
219 
220   // We have cleaned up the coro.end block above.
221   return false;
222 }
223 
224 /// Replace a non-unwind call to llvm.coro.end.
225 static void replaceFallthroughCoroEnd(AnyCoroEndInst *End,
226                                       const coro::Shape &Shape, Value *FramePtr,
227                                       bool InResume, CallGraph *CG) {
228   // Start inserting right before the coro.end.
229   IRBuilder<> Builder(End);
230 
231   // Create the return instruction.
232   switch (Shape.ABI) {
233   // The cloned functions in switch-lowering always return void.
234   case coro::ABI::Switch:
235     // coro.end doesn't immediately end the coroutine in the main function
236     // in this lowering, because we need to deallocate the coroutine.
237     if (!InResume)
238       return;
239     Builder.CreateRetVoid();
240     break;
241 
242   // In async lowering this returns.
243   case coro::ABI::Async: {
244     bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
245     if (!CoroEndBlockNeedsCleanup)
246       return;
247     break;
248   }
249 
250   // In unique continuation lowering, the continuations always return void.
251   // But we may have implicitly allocated storage.
252   case coro::ABI::RetconOnce:
253     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
254     Builder.CreateRetVoid();
255     break;
256 
257   // In non-unique continuation lowering, we signal completion by returning
258   // a null continuation.
259   case coro::ABI::Retcon: {
260     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
261     auto RetTy = Shape.getResumeFunctionType()->getReturnType();
262     auto RetStructTy = dyn_cast<StructType>(RetTy);
263     PointerType *ContinuationTy =
264       cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy);
265 
266     Value *ReturnValue = ConstantPointerNull::get(ContinuationTy);
267     if (RetStructTy) {
268       ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy),
269                                               ReturnValue, 0);
270     }
271     Builder.CreateRet(ReturnValue);
272     break;
273   }
274   }
275 
276   // Remove the rest of the block, by splitting it into an unreachable block.
277   auto *BB = End->getParent();
278   BB->splitBasicBlock(End);
279   BB->getTerminator()->eraseFromParent();
280 }
281 
282 // Mark a coroutine as done, which implies that the coroutine is finished and
283 // never get resumed.
284 //
285 // In resume-switched ABI, the done state is represented by storing zero in
286 // ResumeFnAddr.
287 //
288 // NOTE: We couldn't omit the argument `FramePtr`. It is necessary because the
289 // pointer to the frame in splitted function is not stored in `Shape`.
290 static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape,
291                                 Value *FramePtr) {
292   assert(
293       Shape.ABI == coro::ABI::Switch &&
294       "markCoroutineAsDone is only supported for Switch-Resumed ABI for now.");
295   auto *GepIndex = Builder.CreateStructGEP(
296       Shape.FrameTy, FramePtr, coro::Shape::SwitchFieldIndex::Resume,
297       "ResumeFn.addr");
298   auto *NullPtr = ConstantPointerNull::get(cast<PointerType>(
299       Shape.FrameTy->getTypeAtIndex(coro::Shape::SwitchFieldIndex::Resume)));
300   Builder.CreateStore(NullPtr, GepIndex);
301 }
302 
303 /// Replace an unwind call to llvm.coro.end.
304 static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
305                                  Value *FramePtr, bool InResume,
306                                  CallGraph *CG) {
307   IRBuilder<> Builder(End);
308 
309   switch (Shape.ABI) {
310   // In switch-lowering, this does nothing in the main function.
311   case coro::ABI::Switch: {
312     // In C++'s specification, the coroutine should be marked as done
313     // if promise.unhandled_exception() throws.  The frontend will
314     // call coro.end(true) along this path.
315     //
316     // FIXME: We should refactor this once there is other language
317     // which uses Switch-Resumed style other than C++.
318     markCoroutineAsDone(Builder, Shape, FramePtr);
319     if (!InResume)
320       return;
321     break;
322   }
323   // In async lowering this does nothing.
324   case coro::ABI::Async:
325     break;
326   // In continuation-lowering, this frees the continuation storage.
327   case coro::ABI::Retcon:
328   case coro::ABI::RetconOnce:
329     maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
330     break;
331   }
332 
333   // If coro.end has an associated bundle, add cleanupret instruction.
334   if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) {
335     auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]);
336     auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr);
337     End->getParent()->splitBasicBlock(End);
338     CleanupRet->getParent()->getTerminator()->eraseFromParent();
339   }
340 }
341 
342 static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
343                            Value *FramePtr, bool InResume, CallGraph *CG) {
344   if (End->isUnwind())
345     replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG);
346   else
347     replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG);
348 
349   auto &Context = End->getContext();
350   End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context)
351                                    : ConstantInt::getFalse(Context));
352   End->eraseFromParent();
353 }
354 
355 // Create an entry block for a resume function with a switch that will jump to
356 // suspend points.
357 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) {
358   assert(Shape.ABI == coro::ABI::Switch);
359   LLVMContext &C = F.getContext();
360 
361   // resume.entry:
362   //  %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0,
363   //  i32 2
364   //  % index = load i32, i32* %index.addr
365   //  switch i32 %index, label %unreachable [
366   //    i32 0, label %resume.0
367   //    i32 1, label %resume.1
368   //    ...
369   //  ]
370 
371   auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F);
372   auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F);
373 
374   IRBuilder<> Builder(NewEntry);
375   auto *FramePtr = Shape.FramePtr;
376   auto *FrameTy = Shape.FrameTy;
377   auto *GepIndex = Builder.CreateStructGEP(
378       FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
379   auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index");
380   auto *Switch =
381       Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size());
382   Shape.SwitchLowering.ResumeSwitch = Switch;
383 
384   size_t SuspendIndex = 0;
385   for (auto *AnyS : Shape.CoroSuspends) {
386     auto *S = cast<CoroSuspendInst>(AnyS);
387     ConstantInt *IndexVal = Shape.getIndex(SuspendIndex);
388 
389     // Replace CoroSave with a store to Index:
390     //    %index.addr = getelementptr %f.frame... (index field number)
391     //    store i32 0, i32* %index.addr1
392     auto *Save = S->getCoroSave();
393     Builder.SetInsertPoint(Save);
394     if (S->isFinal()) {
395       // The coroutine should be marked done if it reaches the final suspend
396       // point.
397       markCoroutineAsDone(Builder, Shape, FramePtr);
398     } else {
399       auto *GepIndex = Builder.CreateStructGEP(
400           FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
401       Builder.CreateStore(IndexVal, GepIndex);
402     }
403     Save->replaceAllUsesWith(ConstantTokenNone::get(C));
404     Save->eraseFromParent();
405 
406     // Split block before and after coro.suspend and add a jump from an entry
407     // switch:
408     //
409     //  whateverBB:
410     //    whatever
411     //    %0 = call i8 @llvm.coro.suspend(token none, i1 false)
412     //    switch i8 %0, label %suspend[i8 0, label %resume
413     //                                 i8 1, label %cleanup]
414     // becomes:
415     //
416     //  whateverBB:
417     //     whatever
418     //     br label %resume.0.landing
419     //
420     //  resume.0: ; <--- jump from the switch in the resume.entry
421     //     %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
422     //     br label %resume.0.landing
423     //
424     //  resume.0.landing:
425     //     %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
426     //     switch i8 % 1, label %suspend [i8 0, label %resume
427     //                                    i8 1, label %cleanup]
428 
429     auto *SuspendBB = S->getParent();
430     auto *ResumeBB =
431         SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex));
432     auto *LandingBB = ResumeBB->splitBasicBlock(
433         S->getNextNode(), ResumeBB->getName() + Twine(".landing"));
434     Switch->addCase(IndexVal, ResumeBB);
435 
436     cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB);
437     auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front());
438     S->replaceAllUsesWith(PN);
439     PN->addIncoming(Builder.getInt8(-1), SuspendBB);
440     PN->addIncoming(S, ResumeBB);
441 
442     ++SuspendIndex;
443   }
444 
445   Builder.SetInsertPoint(UnreachBB);
446   Builder.CreateUnreachable();
447 
448   Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
449 }
450 
451 
452 // Rewrite final suspend point handling. We do not use suspend index to
453 // represent the final suspend point. Instead we zero-out ResumeFnAddr in the
454 // coroutine frame, since it is undefined behavior to resume a coroutine
455 // suspended at the final suspend point. Thus, in the resume function, we can
456 // simply remove the last case (when coro::Shape is built, the final suspend
457 // point (if present) is always the last element of CoroSuspends array).
458 // In the destroy function, we add a code sequence to check if ResumeFnAddress
459 // is Null, and if so, jump to the appropriate label to handle cleanup from the
460 // final suspend point.
461 void CoroCloner::handleFinalSuspend() {
462   assert(Shape.ABI == coro::ABI::Switch &&
463          Shape.SwitchLowering.HasFinalSuspend);
464   auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]);
465   auto FinalCaseIt = std::prev(Switch->case_end());
466   BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
467   Switch->removeCase(FinalCaseIt);
468   if (isSwitchDestroyFunction()) {
469     BasicBlock *OldSwitchBB = Switch->getParent();
470     auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch");
471     Builder.SetInsertPoint(OldSwitchBB->getTerminator());
472     auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr,
473                                        coro::Shape::SwitchFieldIndex::Resume,
474                                              "ResumeFn.addr");
475     auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(),
476                                     GepIndex);
477     auto *Cond = Builder.CreateIsNull(Load);
478     Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB);
479     OldSwitchBB->getTerminator()->eraseFromParent();
480   }
481 }
482 
483 static FunctionType *
484 getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend) {
485   auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Suspend);
486   auto *StructTy = cast<StructType>(AsyncSuspend->getType());
487   auto &Context = Suspend->getParent()->getParent()->getContext();
488   auto *VoidTy = Type::getVoidTy(Context);
489   return FunctionType::get(VoidTy, StructTy->elements(), false);
490 }
491 
492 static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape,
493                                         const Twine &Suffix,
494                                         Module::iterator InsertBefore,
495                                         AnyCoroSuspendInst *ActiveSuspend) {
496   Module *M = OrigF.getParent();
497   auto *FnTy = (Shape.ABI != coro::ABI::Async)
498                    ? Shape.getResumeFunctionType()
499                    : getFunctionTypeFromAsyncSuspend(ActiveSuspend);
500 
501   Function *NewF =
502       Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage,
503                        OrigF.getName() + Suffix);
504   if (Shape.ABI != coro::ABI::Async)
505     NewF->addParamAttr(0, Attribute::NonNull);
506 
507   // For the async lowering ABI we can't guarantee that the context argument is
508   // not access via a different pointer not based on the argument.
509   if (Shape.ABI != coro::ABI::Async)
510     NewF->addParamAttr(0, Attribute::NoAlias);
511 
512   M->getFunctionList().insert(InsertBefore, NewF);
513 
514   return NewF;
515 }
516 
517 /// Replace uses of the active llvm.coro.suspend.retcon/async call with the
518 /// arguments to the continuation function.
519 ///
520 /// This assumes that the builder has a meaningful insertion point.
521 void CoroCloner::replaceRetconOrAsyncSuspendUses() {
522   assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
523          Shape.ABI == coro::ABI::Async);
524 
525   auto NewS = VMap[ActiveSuspend];
526   if (NewS->use_empty()) return;
527 
528   // Copy out all the continuation arguments after the buffer pointer into
529   // an easily-indexed data structure for convenience.
530   SmallVector<Value*, 8> Args;
531   // The async ABI includes all arguments -- including the first argument.
532   bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
533   for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()),
534             E = NewF->arg_end();
535        I != E; ++I)
536     Args.push_back(&*I);
537 
538   // If the suspend returns a single scalar value, we can just do a simple
539   // replacement.
540   if (!isa<StructType>(NewS->getType())) {
541     assert(Args.size() == 1);
542     NewS->replaceAllUsesWith(Args.front());
543     return;
544   }
545 
546   // Try to peephole extracts of an aggregate return.
547   for (Use &U : llvm::make_early_inc_range(NewS->uses())) {
548     auto *EVI = dyn_cast<ExtractValueInst>(U.getUser());
549     if (!EVI || EVI->getNumIndices() != 1)
550       continue;
551 
552     EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]);
553     EVI->eraseFromParent();
554   }
555 
556   // If we have no remaining uses, we're done.
557   if (NewS->use_empty()) return;
558 
559   // Otherwise, we need to create an aggregate.
560   Value *Agg = UndefValue::get(NewS->getType());
561   for (size_t I = 0, E = Args.size(); I != E; ++I)
562     Agg = Builder.CreateInsertValue(Agg, Args[I], I);
563 
564   NewS->replaceAllUsesWith(Agg);
565 }
566 
567 void CoroCloner::replaceCoroSuspends() {
568   Value *SuspendResult;
569 
570   switch (Shape.ABI) {
571   // In switch lowering, replace coro.suspend with the appropriate value
572   // for the type of function we're extracting.
573   // Replacing coro.suspend with (0) will result in control flow proceeding to
574   // a resume label associated with a suspend point, replacing it with (1) will
575   // result in control flow proceeding to a cleanup label associated with this
576   // suspend point.
577   case coro::ABI::Switch:
578     SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0);
579     break;
580 
581   // In async lowering there are no uses of the result.
582   case coro::ABI::Async:
583     return;
584 
585   // In returned-continuation lowering, the arguments from earlier
586   // continuations are theoretically arbitrary, and they should have been
587   // spilled.
588   case coro::ABI::RetconOnce:
589   case coro::ABI::Retcon:
590     return;
591   }
592 
593   for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) {
594     // The active suspend was handled earlier.
595     if (CS == ActiveSuspend) continue;
596 
597     auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]);
598     MappedCS->replaceAllUsesWith(SuspendResult);
599     MappedCS->eraseFromParent();
600   }
601 }
602 
603 void CoroCloner::replaceCoroEnds() {
604   for (AnyCoroEndInst *CE : Shape.CoroEnds) {
605     // We use a null call graph because there's no call graph node for
606     // the cloned function yet.  We'll just be rebuilding that later.
607     auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]);
608     replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr);
609   }
610 }
611 
612 static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
613                                  ValueToValueMapTy *VMap) {
614   if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty())
615     return;
616   Value *CachedSlot = nullptr;
617   auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
618     if (CachedSlot) {
619       assert(cast<PointerType>(CachedSlot->getType())
620                  ->isOpaqueOrPointeeTypeMatches(ValueTy) &&
621              "multiple swifterror slots in function with different types");
622       return CachedSlot;
623     }
624 
625     // Check if the function has a swifterror argument.
626     for (auto &Arg : F.args()) {
627       if (Arg.isSwiftError()) {
628         CachedSlot = &Arg;
629         assert(cast<PointerType>(Arg.getType())
630                    ->isOpaqueOrPointeeTypeMatches(ValueTy) &&
631                "swifterror argument does not have expected type");
632         return &Arg;
633       }
634     }
635 
636     // Create a swifterror alloca.
637     IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
638     auto Alloca = Builder.CreateAlloca(ValueTy);
639     Alloca->setSwiftError(true);
640 
641     CachedSlot = Alloca;
642     return Alloca;
643   };
644 
645   for (CallInst *Op : Shape.SwiftErrorOps) {
646     auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op;
647     IRBuilder<> Builder(MappedOp);
648 
649     // If there are no arguments, this is a 'get' operation.
650     Value *MappedResult;
651     if (Op->arg_empty()) {
652       auto ValueTy = Op->getType();
653       auto Slot = getSwiftErrorSlot(ValueTy);
654       MappedResult = Builder.CreateLoad(ValueTy, Slot);
655     } else {
656       assert(Op->arg_size() == 1);
657       auto Value = MappedOp->getArgOperand(0);
658       auto ValueTy = Value->getType();
659       auto Slot = getSwiftErrorSlot(ValueTy);
660       Builder.CreateStore(Value, Slot);
661       MappedResult = Slot;
662     }
663 
664     MappedOp->replaceAllUsesWith(MappedResult);
665     MappedOp->eraseFromParent();
666   }
667 
668   // If we're updating the original function, we've invalidated SwiftErrorOps.
669   if (VMap == nullptr) {
670     Shape.SwiftErrorOps.clear();
671   }
672 }
673 
674 void CoroCloner::replaceSwiftErrorOps() {
675   ::replaceSwiftErrorOps(*NewF, Shape, &VMap);
676 }
677 
678 void CoroCloner::salvageDebugInfo() {
679   SmallVector<DbgVariableIntrinsic *, 8> Worklist;
680   SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
681   for (auto &BB : *NewF)
682     for (auto &I : BB)
683       if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I))
684         Worklist.push_back(DVI);
685   for (DbgVariableIntrinsic *DVI : Worklist)
686     coro::salvageDebugInfo(DbgPtrAllocaCache, DVI, Shape.OptimizeFrame);
687 
688   // Remove all salvaged dbg.declare intrinsics that became
689   // either unreachable or stale due to the CoroSplit transformation.
690   DominatorTree DomTree(*NewF);
691   auto IsUnreachableBlock = [&](BasicBlock *BB) {
692     return !isPotentiallyReachable(&NewF->getEntryBlock(), BB, nullptr,
693                                    &DomTree);
694   };
695   for (DbgVariableIntrinsic *DVI : Worklist) {
696     if (IsUnreachableBlock(DVI->getParent()))
697       DVI->eraseFromParent();
698     else if (isa_and_nonnull<AllocaInst>(DVI->getVariableLocationOp(0))) {
699       // Count all non-debuginfo uses in reachable blocks.
700       unsigned Uses = 0;
701       for (auto *User : DVI->getVariableLocationOp(0)->users())
702         if (auto *I = dyn_cast<Instruction>(User))
703           if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent()))
704             ++Uses;
705       if (!Uses)
706         DVI->eraseFromParent();
707     }
708   }
709 }
710 
711 void CoroCloner::replaceEntryBlock() {
712   // In the original function, the AllocaSpillBlock is a block immediately
713   // following the allocation of the frame object which defines GEPs for
714   // all the allocas that have been moved into the frame, and it ends by
715   // branching to the original beginning of the coroutine.  Make this
716   // the entry block of the cloned function.
717   auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]);
718   auto *OldEntry = &NewF->getEntryBlock();
719   Entry->setName("entry" + Suffix);
720   Entry->moveBefore(OldEntry);
721   Entry->getTerminator()->eraseFromParent();
722 
723   // Clear all predecessors of the new entry block.  There should be
724   // exactly one predecessor, which we created when splitting out
725   // AllocaSpillBlock to begin with.
726   assert(Entry->hasOneUse());
727   auto BranchToEntry = cast<BranchInst>(Entry->user_back());
728   assert(BranchToEntry->isUnconditional());
729   Builder.SetInsertPoint(BranchToEntry);
730   Builder.CreateUnreachable();
731   BranchToEntry->eraseFromParent();
732 
733   // Branch from the entry to the appropriate place.
734   Builder.SetInsertPoint(Entry);
735   switch (Shape.ABI) {
736   case coro::ABI::Switch: {
737     // In switch-lowering, we built a resume-entry block in the original
738     // function.  Make the entry block branch to this.
739     auto *SwitchBB =
740       cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]);
741     Builder.CreateBr(SwitchBB);
742     break;
743   }
744   case coro::ABI::Async:
745   case coro::ABI::Retcon:
746   case coro::ABI::RetconOnce: {
747     // In continuation ABIs, we want to branch to immediately after the
748     // active suspend point.  Earlier phases will have put the suspend in its
749     // own basic block, so just thread our jump directly to its successor.
750     assert((Shape.ABI == coro::ABI::Async &&
751             isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||
752            ((Shape.ABI == coro::ABI::Retcon ||
753              Shape.ABI == coro::ABI::RetconOnce) &&
754             isa<CoroSuspendRetconInst>(ActiveSuspend)));
755     auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]);
756     auto Branch = cast<BranchInst>(MappedCS->getNextNode());
757     assert(Branch->isUnconditional());
758     Builder.CreateBr(Branch->getSuccessor(0));
759     break;
760   }
761   }
762 
763   // Any static alloca that's still being used but not reachable from the new
764   // entry needs to be moved to the new entry.
765   Function *F = OldEntry->getParent();
766   DominatorTree DT{*F};
767   for (Instruction &I : llvm::make_early_inc_range(instructions(F))) {
768     auto *Alloca = dyn_cast<AllocaInst>(&I);
769     if (!Alloca || I.use_empty())
770       continue;
771     if (DT.isReachableFromEntry(I.getParent()) ||
772         !isa<ConstantInt>(Alloca->getArraySize()))
773       continue;
774     I.moveBefore(*Entry, Entry->getFirstInsertionPt());
775   }
776 }
777 
778 /// Derive the value of the new frame pointer.
779 Value *CoroCloner::deriveNewFramePointer() {
780   // Builder should be inserting to the front of the new entry block.
781 
782   switch (Shape.ABI) {
783   // In switch-lowering, the argument is the frame pointer.
784   case coro::ABI::Switch:
785     return &*NewF->arg_begin();
786   // In async-lowering, one of the arguments is an async context as determined
787   // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
788   // the resume function from the async context projection function associated
789   // with the active suspend. The frame is located as a tail to the async
790   // context header.
791   case coro::ABI::Async: {
792     auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
793     auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff;
794     auto *CalleeContext = NewF->getArg(ContextIdx);
795     auto *FramePtrTy = Shape.FrameTy->getPointerTo();
796     auto *ProjectionFunc =
797         ActiveAsyncSuspend->getAsyncContextProjectionFunction();
798     auto DbgLoc =
799         cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc();
800     // Calling i8* (i8*)
801     auto *CallerContext = Builder.CreateCall(ProjectionFunc->getFunctionType(),
802                                              ProjectionFunc, CalleeContext);
803     CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
804     CallerContext->setDebugLoc(DbgLoc);
805     // The frame is located after the async_context header.
806     auto &Context = Builder.getContext();
807     auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
808         Type::getInt8Ty(Context), CallerContext,
809         Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr");
810     // Inline the projection function.
811     InlineFunctionInfo InlineInfo;
812     auto InlineRes = InlineFunction(*CallerContext, InlineInfo);
813     assert(InlineRes.isSuccess());
814     (void)InlineRes;
815     return Builder.CreateBitCast(FramePtrAddr, FramePtrTy);
816   }
817   // In continuation-lowering, the argument is the opaque storage.
818   case coro::ABI::Retcon:
819   case coro::ABI::RetconOnce: {
820     Argument *NewStorage = &*NewF->arg_begin();
821     auto FramePtrTy = Shape.FrameTy->getPointerTo();
822 
823     // If the storage is inline, just bitcast to the storage to the frame type.
824     if (Shape.RetconLowering.IsFrameInlineInStorage)
825       return Builder.CreateBitCast(NewStorage, FramePtrTy);
826 
827     // Otherwise, load the real frame from the opaque storage.
828     auto FramePtrPtr =
829       Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo());
830     return Builder.CreateLoad(FramePtrTy, FramePtrPtr);
831   }
832   }
833   llvm_unreachable("bad ABI");
834 }
835 
836 static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
837                                  unsigned ParamIndex,
838                                  uint64_t Size, Align Alignment) {
839   AttrBuilder ParamAttrs(Context);
840   ParamAttrs.addAttribute(Attribute::NonNull);
841   ParamAttrs.addAttribute(Attribute::NoAlias);
842   ParamAttrs.addAlignmentAttr(Alignment);
843   ParamAttrs.addDereferenceableAttr(Size);
844   Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
845 }
846 
847 static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context,
848                                  unsigned ParamIndex) {
849   AttrBuilder ParamAttrs(Context);
850   ParamAttrs.addAttribute(Attribute::SwiftAsync);
851   Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
852 }
853 
854 static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context,
855                               unsigned ParamIndex) {
856   AttrBuilder ParamAttrs(Context);
857   ParamAttrs.addAttribute(Attribute::SwiftSelf);
858   Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
859 }
860 
861 /// Clone the body of the original function into a resume function of
862 /// some sort.
863 void CoroCloner::create() {
864   // Create the new function if we don't already have one.
865   if (!NewF) {
866     NewF = createCloneDeclaration(OrigF, Shape, Suffix,
867                                   OrigF.getParent()->end(), ActiveSuspend);
868   }
869 
870   // Replace all args with dummy instructions. If an argument is the old frame
871   // pointer, the dummy will be replaced by the new frame pointer once it is
872   // computed below. Uses of all other arguments should have already been
873   // rewritten by buildCoroutineFrame() to use loads/stores on the coroutine
874   // frame.
875   SmallVector<Instruction *> DummyArgs;
876   for (Argument &A : OrigF.args()) {
877     DummyArgs.push_back(new FreezeInst(UndefValue::get(A.getType())));
878     VMap[&A] = DummyArgs.back();
879   }
880 
881   SmallVector<ReturnInst *, 4> Returns;
882 
883   // Ignore attempts to change certain attributes of the function.
884   // TODO: maybe there should be a way to suppress this during cloning?
885   auto savedVisibility = NewF->getVisibility();
886   auto savedUnnamedAddr = NewF->getUnnamedAddr();
887   auto savedDLLStorageClass = NewF->getDLLStorageClass();
888 
889   // NewF's linkage (which CloneFunctionInto does *not* change) might not
890   // be compatible with the visibility of OrigF (which it *does* change),
891   // so protect against that.
892   auto savedLinkage = NewF->getLinkage();
893   NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
894 
895   CloneFunctionInto(NewF, &OrigF, VMap,
896                     CloneFunctionChangeType::LocalChangesOnly, Returns);
897 
898   auto &Context = NewF->getContext();
899 
900   // For async functions / continuations, adjust the scope line of the
901   // clone to the line number of the suspend point. However, only
902   // adjust the scope line when the files are the same. This ensures
903   // line number and file name belong together. The scope line is
904   // associated with all pre-prologue instructions. This avoids a jump
905   // in the linetable from the function declaration to the suspend point.
906   if (DISubprogram *SP = NewF->getSubprogram()) {
907     assert(SP != OrigF.getSubprogram() && SP->isDistinct());
908     if (ActiveSuspend)
909       if (auto DL = ActiveSuspend->getDebugLoc())
910         if (SP->getFile() == DL->getFile())
911           SP->setScopeLine(DL->getLine());
912     // Update the linkage name to reflect the modified symbol name. It
913     // is necessary to update the linkage name in Swift, since the
914     // mangling changes for resume functions. It might also be the
915     // right thing to do in C++, but due to a limitation in LLVM's
916     // AsmPrinter we can only do this if the function doesn't have an
917     // abstract specification, since the DWARF backend expects the
918     // abstract specification to contain the linkage name and asserts
919     // that they are identical.
920     if (!SP->getDeclaration() && SP->getUnit() &&
921         SP->getUnit()->getSourceLanguage() == dwarf::DW_LANG_Swift)
922       SP->replaceLinkageName(MDString::get(Context, NewF->getName()));
923   }
924 
925   NewF->setLinkage(savedLinkage);
926   NewF->setVisibility(savedVisibility);
927   NewF->setUnnamedAddr(savedUnnamedAddr);
928   NewF->setDLLStorageClass(savedDLLStorageClass);
929 
930   // Replace the attributes of the new function:
931   auto OrigAttrs = NewF->getAttributes();
932   auto NewAttrs = AttributeList();
933 
934   switch (Shape.ABI) {
935   case coro::ABI::Switch:
936     // Bootstrap attributes by copying function attributes from the
937     // original function.  This should include optimization settings and so on.
938     NewAttrs = NewAttrs.addFnAttributes(
939         Context, AttrBuilder(Context, OrigAttrs.getFnAttrs()));
940 
941     addFramePointerAttrs(NewAttrs, Context, 0,
942                          Shape.FrameSize, Shape.FrameAlign);
943     break;
944   case coro::ABI::Async: {
945     auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
946     if (OrigF.hasParamAttribute(Shape.AsyncLowering.ContextArgNo,
947                                 Attribute::SwiftAsync)) {
948       uint32_t ArgAttributeIndices =
949           ActiveAsyncSuspend->getStorageArgumentIndex();
950       auto ContextArgIndex = ArgAttributeIndices & 0xff;
951       addAsyncContextAttrs(NewAttrs, Context, ContextArgIndex);
952 
953       // `swiftasync` must preceed `swiftself` so 0 is not a valid index for
954       // `swiftself`.
955       auto SwiftSelfIndex = ArgAttributeIndices >> 8;
956       if (SwiftSelfIndex)
957         addSwiftSelfAttrs(NewAttrs, Context, SwiftSelfIndex);
958     }
959 
960     // Transfer the original function's attributes.
961     auto FnAttrs = OrigF.getAttributes().getFnAttrs();
962     NewAttrs = NewAttrs.addFnAttributes(Context, AttrBuilder(Context, FnAttrs));
963     break;
964   }
965   case coro::ABI::Retcon:
966   case coro::ABI::RetconOnce:
967     // If we have a continuation prototype, just use its attributes,
968     // full-stop.
969     NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes();
970 
971     addFramePointerAttrs(NewAttrs, Context, 0,
972                          Shape.getRetconCoroId()->getStorageSize(),
973                          Shape.getRetconCoroId()->getStorageAlignment());
974     break;
975   }
976 
977   switch (Shape.ABI) {
978   // In these ABIs, the cloned functions always return 'void', and the
979   // existing return sites are meaningless.  Note that for unique
980   // continuations, this includes the returns associated with suspends;
981   // this is fine because we can't suspend twice.
982   case coro::ABI::Switch:
983   case coro::ABI::RetconOnce:
984     // Remove old returns.
985     for (ReturnInst *Return : Returns)
986       changeToUnreachable(Return);
987     break;
988 
989   // With multi-suspend continuations, we'll already have eliminated the
990   // original returns and inserted returns before all the suspend points,
991   // so we want to leave any returns in place.
992   case coro::ABI::Retcon:
993     break;
994   // Async lowering will insert musttail call functions at all suspend points
995   // followed by a return.
996   // Don't change returns to unreachable because that will trip up the verifier.
997   // These returns should be unreachable from the clone.
998   case coro::ABI::Async:
999     break;
1000   }
1001 
1002   NewF->setAttributes(NewAttrs);
1003   NewF->setCallingConv(Shape.getResumeFunctionCC());
1004 
1005   // Set up the new entry block.
1006   replaceEntryBlock();
1007 
1008   Builder.SetInsertPoint(&NewF->getEntryBlock().front());
1009   NewFramePtr = deriveNewFramePointer();
1010 
1011   // Remap frame pointer.
1012   Value *OldFramePtr = VMap[Shape.FramePtr];
1013   NewFramePtr->takeName(OldFramePtr);
1014   OldFramePtr->replaceAllUsesWith(NewFramePtr);
1015 
1016   // Remap vFrame pointer.
1017   auto *NewVFrame = Builder.CreateBitCast(
1018       NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame");
1019   Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]);
1020   if (OldVFrame != NewVFrame)
1021     OldVFrame->replaceAllUsesWith(NewVFrame);
1022 
1023   // All uses of the arguments should have been resolved by this point,
1024   // so we can safely remove the dummy values.
1025   for (Instruction *DummyArg : DummyArgs) {
1026     DummyArg->replaceAllUsesWith(UndefValue::get(DummyArg->getType()));
1027     DummyArg->deleteValue();
1028   }
1029 
1030   switch (Shape.ABI) {
1031   case coro::ABI::Switch:
1032     // Rewrite final suspend handling as it is not done via switch (allows to
1033     // remove final case from the switch, since it is undefined behavior to
1034     // resume the coroutine suspended at the final suspend point.
1035     if (Shape.SwitchLowering.HasFinalSuspend)
1036       handleFinalSuspend();
1037     break;
1038   case coro::ABI::Async:
1039   case coro::ABI::Retcon:
1040   case coro::ABI::RetconOnce:
1041     // Replace uses of the active suspend with the corresponding
1042     // continuation-function arguments.
1043     assert(ActiveSuspend != nullptr &&
1044            "no active suspend when lowering a continuation-style coroutine");
1045     replaceRetconOrAsyncSuspendUses();
1046     break;
1047   }
1048 
1049   // Handle suspends.
1050   replaceCoroSuspends();
1051 
1052   // Handle swifterror.
1053   replaceSwiftErrorOps();
1054 
1055   // Remove coro.end intrinsics.
1056   replaceCoroEnds();
1057 
1058   // Salvage debug info that points into the coroutine frame.
1059   salvageDebugInfo();
1060 
1061   // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
1062   // to suppress deallocation code.
1063   if (Shape.ABI == coro::ABI::Switch)
1064     coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]),
1065                           /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup);
1066 }
1067 
1068 // Create a resume clone by cloning the body of the original function, setting
1069 // new entry block and replacing coro.suspend an appropriate value to force
1070 // resume or cleanup pass for every suspend point.
1071 static Function *createClone(Function &F, const Twine &Suffix,
1072                              coro::Shape &Shape, CoroCloner::Kind FKind) {
1073   CoroCloner Cloner(F, Suffix, Shape, FKind);
1074   Cloner.create();
1075   return Cloner.getFunction();
1076 }
1077 
1078 static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) {
1079   assert(Shape.ABI == coro::ABI::Async);
1080 
1081   auto *FuncPtrStruct = cast<ConstantStruct>(
1082       Shape.AsyncLowering.AsyncFuncPointer->getInitializer());
1083   auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0);
1084   auto *OrigContextSize = FuncPtrStruct->getOperand(1);
1085   auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(),
1086                                           Shape.AsyncLowering.ContextSize);
1087   auto *NewFuncPtrStruct = ConstantStruct::get(
1088       FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize);
1089 
1090   Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
1091 }
1092 
1093 static void replaceFrameSizeAndAlignment(coro::Shape &Shape) {
1094   if (Shape.ABI == coro::ABI::Async)
1095     updateAsyncFuncPointerContextSize(Shape);
1096 
1097   for (CoroAlignInst *CA : Shape.CoroAligns) {
1098     CA->replaceAllUsesWith(
1099         ConstantInt::get(CA->getType(), Shape.FrameAlign.value()));
1100     CA->eraseFromParent();
1101   }
1102 
1103   if (Shape.CoroSizes.empty())
1104     return;
1105 
1106   // In the same function all coro.sizes should have the same result type.
1107   auto *SizeIntrin = Shape.CoroSizes.back();
1108   Module *M = SizeIntrin->getModule();
1109   const DataLayout &DL = M->getDataLayout();
1110   auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1111   auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size);
1112 
1113   for (CoroSizeInst *CS : Shape.CoroSizes) {
1114     CS->replaceAllUsesWith(SizeConstant);
1115     CS->eraseFromParent();
1116   }
1117 }
1118 
1119 // Create a global constant array containing pointers to functions provided and
1120 // set Info parameter of CoroBegin to point at this constant. Example:
1121 //
1122 //   @f.resumers = internal constant [2 x void(%f.frame*)*]
1123 //                    [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy]
1124 //   define void @f() {
1125 //     ...
1126 //     call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
1127 //                    i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*))
1128 //
1129 // Assumes that all the functions have the same signature.
1130 static void setCoroInfo(Function &F, coro::Shape &Shape,
1131                         ArrayRef<Function *> Fns) {
1132   // This only works under the switch-lowering ABI because coro elision
1133   // only works on the switch-lowering ABI.
1134   assert(Shape.ABI == coro::ABI::Switch);
1135 
1136   SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end());
1137   assert(!Args.empty());
1138   Function *Part = *Fns.begin();
1139   Module *M = Part->getParent();
1140   auto *ArrTy = ArrayType::get(Part->getType(), Args.size());
1141 
1142   auto *ConstVal = ConstantArray::get(ArrTy, Args);
1143   auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
1144                                 GlobalVariable::PrivateLinkage, ConstVal,
1145                                 F.getName() + Twine(".resumers"));
1146 
1147   // Update coro.begin instruction to refer to this constant.
1148   LLVMContext &C = F.getContext();
1149   auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C));
1150   Shape.getSwitchCoroId()->setInfo(BC);
1151 }
1152 
1153 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
1154 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
1155                             Function *DestroyFn, Function *CleanupFn) {
1156   assert(Shape.ABI == coro::ABI::Switch);
1157 
1158   IRBuilder<> Builder(Shape.getInsertPtAfterFramePtr());
1159 
1160   auto *ResumeAddr = Builder.CreateStructGEP(
1161       Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume,
1162       "resume.addr");
1163   Builder.CreateStore(ResumeFn, ResumeAddr);
1164 
1165   Value *DestroyOrCleanupFn = DestroyFn;
1166 
1167   CoroIdInst *CoroId = Shape.getSwitchCoroId();
1168   if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
1169     // If there is a CoroAlloc and it returns false (meaning we elide the
1170     // allocation, use CleanupFn instead of DestroyFn).
1171     DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn);
1172   }
1173 
1174   auto *DestroyAddr = Builder.CreateStructGEP(
1175       Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy,
1176       "destroy.addr");
1177   Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr);
1178 }
1179 
1180 static void postSplitCleanup(Function &F) {
1181   removeUnreachableBlocks(F);
1182 
1183 #ifndef NDEBUG
1184   // For now, we do a mandatory verification step because we don't
1185   // entirely trust this pass.  Note that we don't want to add a verifier
1186   // pass to FPM below because it will also verify all the global data.
1187   if (verifyFunction(F, &errs()))
1188     report_fatal_error("Broken function");
1189 #endif
1190 }
1191 
1192 // Assuming we arrived at the block NewBlock from Prev instruction, store
1193 // PHI's incoming values in the ResolvedValues map.
1194 static void
1195 scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock,
1196                           DenseMap<Value *, Value *> &ResolvedValues) {
1197   auto *PrevBB = Prev->getParent();
1198   for (PHINode &PN : NewBlock->phis()) {
1199     auto V = PN.getIncomingValueForBlock(PrevBB);
1200     // See if we already resolved it.
1201     auto VI = ResolvedValues.find(V);
1202     if (VI != ResolvedValues.end())
1203       V = VI->second;
1204     // Remember the value.
1205     ResolvedValues[&PN] = V;
1206   }
1207 }
1208 
1209 // Replace a sequence of branches leading to a ret, with a clone of a ret
1210 // instruction. Suspend instruction represented by a switch, track the PHI
1211 // values and select the correct case successor when possible.
1212 static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
1213   DenseMap<Value *, Value *> ResolvedValues;
1214   BasicBlock *UnconditionalSucc = nullptr;
1215   assert(InitialInst->getModule());
1216   const DataLayout &DL = InitialInst->getModule()->getDataLayout();
1217 
1218   auto GetFirstValidInstruction = [](Instruction *I) {
1219     while (I) {
1220       // BitCastInst wouldn't generate actual code so that we could skip it.
1221       if (isa<BitCastInst>(I) || I->isDebugOrPseudoInst() ||
1222           I->isLifetimeStartOrEnd())
1223         I = I->getNextNode();
1224       else if (isInstructionTriviallyDead(I))
1225         // Duing we are in the middle of the transformation, we need to erase
1226         // the dead instruction manually.
1227         I = &*I->eraseFromParent();
1228       else
1229         break;
1230     }
1231     return I;
1232   };
1233 
1234   auto TryResolveConstant = [&ResolvedValues](Value *V) {
1235     auto It = ResolvedValues.find(V);
1236     if (It != ResolvedValues.end())
1237       V = It->second;
1238     return dyn_cast<ConstantInt>(V);
1239   };
1240 
1241   Instruction *I = InitialInst;
1242   while (I->isTerminator() || isa<CmpInst>(I)) {
1243     if (isa<ReturnInst>(I)) {
1244       if (I != InitialInst) {
1245         // If InitialInst is an unconditional branch,
1246         // remove PHI values that come from basic block of InitialInst
1247         if (UnconditionalSucc)
1248           UnconditionalSucc->removePredecessor(InitialInst->getParent(), true);
1249         ReplaceInstWithInst(InitialInst, I->clone());
1250       }
1251       return true;
1252     }
1253     if (auto *BR = dyn_cast<BranchInst>(I)) {
1254       if (BR->isUnconditional()) {
1255         BasicBlock *Succ = BR->getSuccessor(0);
1256         if (I == InitialInst)
1257           UnconditionalSucc = Succ;
1258         scanPHIsAndUpdateValueMap(I, Succ, ResolvedValues);
1259         I = GetFirstValidInstruction(Succ->getFirstNonPHIOrDbgOrLifetime());
1260         continue;
1261       }
1262 
1263       BasicBlock *BB = BR->getParent();
1264       // Handle the case the condition of the conditional branch is constant.
1265       // e.g.,
1266       //
1267       //     br i1 false, label %cleanup, label %CoroEnd
1268       //
1269       // It is possible during the transformation. We could continue the
1270       // simplifying in this case.
1271       if (ConstantFoldTerminator(BB, /*DeleteDeadConditions=*/true)) {
1272         // Handle this branch in next iteration.
1273         I = BB->getTerminator();
1274         continue;
1275       }
1276     } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) {
1277       // If the case number of suspended switch instruction is reduced to
1278       // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
1279       auto *BR = dyn_cast<BranchInst>(
1280           GetFirstValidInstruction(CondCmp->getNextNode()));
1281       if (!BR || !BR->isConditional() || CondCmp != BR->getCondition())
1282         return false;
1283 
1284       // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
1285       // So we try to resolve constant for the first operand only since the
1286       // second operand should be literal constant by design.
1287       ConstantInt *Cond0 = TryResolveConstant(CondCmp->getOperand(0));
1288       auto *Cond1 = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
1289       if (!Cond0 || !Cond1)
1290         return false;
1291 
1292       // Both operands of the CmpInst are Constant. So that we could evaluate
1293       // it immediately to get the destination.
1294       auto *ConstResult =
1295           dyn_cast_or_null<ConstantInt>(ConstantFoldCompareInstOperands(
1296               CondCmp->getPredicate(), Cond0, Cond1, DL));
1297       if (!ConstResult)
1298         return false;
1299 
1300       CondCmp->replaceAllUsesWith(ConstResult);
1301       CondCmp->eraseFromParent();
1302 
1303       // Handle this branch in next iteration.
1304       I = BR;
1305       continue;
1306     } else if (auto *SI = dyn_cast<SwitchInst>(I)) {
1307       ConstantInt *Cond = TryResolveConstant(SI->getCondition());
1308       if (!Cond)
1309         return false;
1310 
1311       BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
1312       scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1313       I = GetFirstValidInstruction(BB->getFirstNonPHIOrDbgOrLifetime());
1314       continue;
1315     }
1316 
1317     return false;
1318   }
1319   return false;
1320 }
1321 
1322 // Check whether CI obeys the rules of musttail attribute.
1323 static bool shouldBeMustTail(const CallInst &CI, const Function &F) {
1324   if (CI.isInlineAsm())
1325     return false;
1326 
1327   // Match prototypes and calling conventions of resume function.
1328   FunctionType *CalleeTy = CI.getFunctionType();
1329   if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1))
1330     return false;
1331 
1332   Type *CalleeParmTy = CalleeTy->getParamType(0);
1333   if (!CalleeParmTy->isPointerTy() ||
1334       (CalleeParmTy->getPointerAddressSpace() != 0))
1335     return false;
1336 
1337   if (CI.getCallingConv() != F.getCallingConv())
1338     return false;
1339 
1340   // CI should not has any ABI-impacting function attributes.
1341   static const Attribute::AttrKind ABIAttrs[] = {
1342       Attribute::StructRet,    Attribute::ByVal,     Attribute::InAlloca,
1343       Attribute::Preallocated, Attribute::InReg,     Attribute::Returned,
1344       Attribute::SwiftSelf,    Attribute::SwiftError};
1345   AttributeList Attrs = CI.getAttributes();
1346   for (auto AK : ABIAttrs)
1347     if (Attrs.hasParamAttr(0, AK))
1348       return false;
1349 
1350   return true;
1351 }
1352 
1353 // Add musttail to any resume instructions that is immediately followed by a
1354 // suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call
1355 // for symmetrical coroutine control transfer (C++ Coroutines TS extension).
1356 // This transformation is done only in the resume part of the coroutine that has
1357 // identical signature and calling convention as the coro.resume call.
1358 static void addMustTailToCoroResumes(Function &F) {
1359   bool changed = false;
1360 
1361   // Collect potential resume instructions.
1362   SmallVector<CallInst *, 4> Resumes;
1363   for (auto &I : instructions(F))
1364     if (auto *Call = dyn_cast<CallInst>(&I))
1365       if (shouldBeMustTail(*Call, F))
1366         Resumes.push_back(Call);
1367 
1368   // Set musttail on those that are followed by a ret instruction.
1369   for (CallInst *Call : Resumes)
1370     if (simplifyTerminatorLeadingToRet(Call->getNextNode())) {
1371       Call->setTailCallKind(CallInst::TCK_MustTail);
1372       changed = true;
1373     }
1374 
1375   if (changed)
1376     removeUnreachableBlocks(F);
1377 }
1378 
1379 // Coroutine has no suspend points. Remove heap allocation for the coroutine
1380 // frame if possible.
1381 static void handleNoSuspendCoroutine(coro::Shape &Shape) {
1382   auto *CoroBegin = Shape.CoroBegin;
1383   auto *CoroId = CoroBegin->getId();
1384   auto *AllocInst = CoroId->getCoroAlloc();
1385   switch (Shape.ABI) {
1386   case coro::ABI::Switch: {
1387     auto SwitchId = cast<CoroIdInst>(CoroId);
1388     coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr);
1389     if (AllocInst) {
1390       IRBuilder<> Builder(AllocInst);
1391       auto *Frame = Builder.CreateAlloca(Shape.FrameTy);
1392       Frame->setAlignment(Shape.FrameAlign);
1393       auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy());
1394       AllocInst->replaceAllUsesWith(Builder.getFalse());
1395       AllocInst->eraseFromParent();
1396       CoroBegin->replaceAllUsesWith(VFrame);
1397     } else {
1398       CoroBegin->replaceAllUsesWith(CoroBegin->getMem());
1399     }
1400 
1401     break;
1402   }
1403   case coro::ABI::Async:
1404   case coro::ABI::Retcon:
1405   case coro::ABI::RetconOnce:
1406     CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType()));
1407     break;
1408   }
1409 
1410   CoroBegin->eraseFromParent();
1411 }
1412 
1413 // SimplifySuspendPoint needs to check that there is no calls between
1414 // coro_save and coro_suspend, since any of the calls may potentially resume
1415 // the coroutine and if that is the case we cannot eliminate the suspend point.
1416 static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) {
1417   for (Instruction *I = From; I != To; I = I->getNextNode()) {
1418     // Assume that no intrinsic can resume the coroutine.
1419     if (isa<IntrinsicInst>(I))
1420       continue;
1421 
1422     if (isa<CallBase>(I))
1423       return true;
1424   }
1425   return false;
1426 }
1427 
1428 static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
1429   SmallPtrSet<BasicBlock *, 8> Set;
1430   SmallVector<BasicBlock *, 8> Worklist;
1431 
1432   Set.insert(SaveBB);
1433   Worklist.push_back(ResDesBB);
1434 
1435   // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1436   // returns a token consumed by suspend instruction, all blocks in between
1437   // will have to eventually hit SaveBB when going backwards from ResDesBB.
1438   while (!Worklist.empty()) {
1439     auto *BB = Worklist.pop_back_val();
1440     Set.insert(BB);
1441     for (auto *Pred : predecessors(BB))
1442       if (!Set.contains(Pred))
1443         Worklist.push_back(Pred);
1444   }
1445 
1446   // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1447   Set.erase(SaveBB);
1448   Set.erase(ResDesBB);
1449 
1450   for (auto *BB : Set)
1451     if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr))
1452       return true;
1453 
1454   return false;
1455 }
1456 
1457 static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
1458   auto *SaveBB = Save->getParent();
1459   auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
1460 
1461   if (SaveBB == ResumeOrDestroyBB)
1462     return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy);
1463 
1464   // Any calls from Save to the end of the block?
1465   if (hasCallsInBlockBetween(Save->getNextNode(), nullptr))
1466     return true;
1467 
1468   // Any calls from begging of the block up to ResumeOrDestroy?
1469   if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(),
1470                              ResumeOrDestroy))
1471     return true;
1472 
1473   // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1474   if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB))
1475     return true;
1476 
1477   return false;
1478 }
1479 
1480 // If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1481 // suspend point and replace it with nornal control flow.
1482 static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
1483                                  CoroBeginInst *CoroBegin) {
1484   Instruction *Prev = Suspend->getPrevNode();
1485   if (!Prev) {
1486     auto *Pred = Suspend->getParent()->getSinglePredecessor();
1487     if (!Pred)
1488       return false;
1489     Prev = Pred->getTerminator();
1490   }
1491 
1492   CallBase *CB = dyn_cast<CallBase>(Prev);
1493   if (!CB)
1494     return false;
1495 
1496   auto *Callee = CB->getCalledOperand()->stripPointerCasts();
1497 
1498   // See if the callsite is for resumption or destruction of the coroutine.
1499   auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
1500   if (!SubFn)
1501     return false;
1502 
1503   // Does not refer to the current coroutine, we cannot do anything with it.
1504   if (SubFn->getFrame() != CoroBegin)
1505     return false;
1506 
1507   // See if the transformation is safe. Specifically, see if there are any
1508   // calls in between Save and CallInstr. They can potenitally resume the
1509   // coroutine rendering this optimization unsafe.
1510   auto *Save = Suspend->getCoroSave();
1511   if (hasCallsBetween(Save, CB))
1512     return false;
1513 
1514   // Replace llvm.coro.suspend with the value that results in resumption over
1515   // the resume or cleanup path.
1516   Suspend->replaceAllUsesWith(SubFn->getRawIndex());
1517   Suspend->eraseFromParent();
1518   Save->eraseFromParent();
1519 
1520   // No longer need a call to coro.resume or coro.destroy.
1521   if (auto *Invoke = dyn_cast<InvokeInst>(CB)) {
1522     BranchInst::Create(Invoke->getNormalDest(), Invoke);
1523   }
1524 
1525   // Grab the CalledValue from CB before erasing the CallInstr.
1526   auto *CalledValue = CB->getCalledOperand();
1527   CB->eraseFromParent();
1528 
1529   // If no more users remove it. Usually it is a bitcast of SubFn.
1530   if (CalledValue != SubFn && CalledValue->user_empty())
1531     if (auto *I = dyn_cast<Instruction>(CalledValue))
1532       I->eraseFromParent();
1533 
1534   // Now we are good to remove SubFn.
1535   if (SubFn->user_empty())
1536     SubFn->eraseFromParent();
1537 
1538   return true;
1539 }
1540 
1541 // Remove suspend points that are simplified.
1542 static void simplifySuspendPoints(coro::Shape &Shape) {
1543   // Currently, the only simplification we do is switch-lowering-specific.
1544   if (Shape.ABI != coro::ABI::Switch)
1545     return;
1546 
1547   auto &S = Shape.CoroSuspends;
1548   size_t I = 0, N = S.size();
1549   if (N == 0)
1550     return;
1551   while (true) {
1552     auto SI = cast<CoroSuspendInst>(S[I]);
1553     // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1554     // to resume a coroutine suspended at the final suspend point.
1555     if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) {
1556       if (--N == I)
1557         break;
1558       std::swap(S[I], S[N]);
1559       continue;
1560     }
1561     if (++I == N)
1562       break;
1563   }
1564   S.resize(N);
1565 }
1566 
1567 static void splitSwitchCoroutine(Function &F, coro::Shape &Shape,
1568                                  SmallVectorImpl<Function *> &Clones) {
1569   assert(Shape.ABI == coro::ABI::Switch);
1570 
1571   createResumeEntryBlock(F, Shape);
1572   auto ResumeClone = createClone(F, ".resume", Shape,
1573                                  CoroCloner::Kind::SwitchResume);
1574   auto DestroyClone = createClone(F, ".destroy", Shape,
1575                                   CoroCloner::Kind::SwitchUnwind);
1576   auto CleanupClone = createClone(F, ".cleanup", Shape,
1577                                   CoroCloner::Kind::SwitchCleanup);
1578 
1579   postSplitCleanup(*ResumeClone);
1580   postSplitCleanup(*DestroyClone);
1581   postSplitCleanup(*CleanupClone);
1582 
1583   addMustTailToCoroResumes(*ResumeClone);
1584 
1585   // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1586   updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone);
1587 
1588   assert(Clones.empty());
1589   Clones.push_back(ResumeClone);
1590   Clones.push_back(DestroyClone);
1591   Clones.push_back(CleanupClone);
1592 
1593   // Create a constant array referring to resume/destroy/clone functions pointed
1594   // by the last argument of @llvm.coro.info, so that CoroElide pass can
1595   // determined correct function to call.
1596   setCoroInfo(F, Shape, Clones);
1597 }
1598 
1599 static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend,
1600                                        Value *Continuation) {
1601   auto *ResumeIntrinsic = Suspend->getResumeFunction();
1602   auto &Context = Suspend->getParent()->getParent()->getContext();
1603   auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1604 
1605   IRBuilder<> Builder(ResumeIntrinsic);
1606   auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy);
1607   ResumeIntrinsic->replaceAllUsesWith(Val);
1608   ResumeIntrinsic->eraseFromParent();
1609   Suspend->setOperand(CoroSuspendAsyncInst::ResumeFunctionArg,
1610                       UndefValue::get(Int8PtrTy));
1611 }
1612 
1613 /// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
1614 static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy,
1615                             ArrayRef<Value *> FnArgs,
1616                             SmallVectorImpl<Value *> &CallArgs) {
1617   size_t ArgIdx = 0;
1618   for (auto paramTy : FnTy->params()) {
1619     assert(ArgIdx < FnArgs.size());
1620     if (paramTy != FnArgs[ArgIdx]->getType())
1621       CallArgs.push_back(
1622           Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy));
1623     else
1624       CallArgs.push_back(FnArgs[ArgIdx]);
1625     ++ArgIdx;
1626   }
1627 }
1628 
1629 CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
1630                                    ArrayRef<Value *> Arguments,
1631                                    IRBuilder<> &Builder) {
1632   auto *FnTy = MustTailCallFn->getFunctionType();
1633   // Coerce the arguments, llvm optimizations seem to ignore the types in
1634   // vaarg functions and throws away casts in optimized mode.
1635   SmallVector<Value *, 8> CallArgs;
1636   coerceArguments(Builder, FnTy, Arguments, CallArgs);
1637 
1638   auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs);
1639   TailCall->setTailCallKind(CallInst::TCK_MustTail);
1640   TailCall->setDebugLoc(Loc);
1641   TailCall->setCallingConv(MustTailCallFn->getCallingConv());
1642   return TailCall;
1643 }
1644 
1645 static void splitAsyncCoroutine(Function &F, coro::Shape &Shape,
1646                                 SmallVectorImpl<Function *> &Clones) {
1647   assert(Shape.ABI == coro::ABI::Async);
1648   assert(Clones.empty());
1649   // Reset various things that the optimizer might have decided it
1650   // "knows" about the coroutine function due to not seeing a return.
1651   F.removeFnAttr(Attribute::NoReturn);
1652   F.removeRetAttr(Attribute::NoAlias);
1653   F.removeRetAttr(Attribute::NonNull);
1654 
1655   auto &Context = F.getContext();
1656   auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1657 
1658   auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId());
1659   IRBuilder<> Builder(Id);
1660 
1661   auto *FramePtr = Id->getStorage();
1662   FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy);
1663   FramePtr = Builder.CreateConstInBoundsGEP1_32(
1664       Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset,
1665       "async.ctx.frameptr");
1666 
1667   // Map all uses of llvm.coro.begin to the allocated frame pointer.
1668   {
1669     // Make sure we don't invalidate Shape.FramePtr.
1670     TrackingVH<Value> Handle(Shape.FramePtr);
1671     Shape.CoroBegin->replaceAllUsesWith(FramePtr);
1672     Shape.FramePtr = Handle.getValPtr();
1673   }
1674 
1675   // Create all the functions in order after the main function.
1676   auto NextF = std::next(F.getIterator());
1677 
1678   // Create a continuation function for each of the suspend points.
1679   Clones.reserve(Shape.CoroSuspends.size());
1680   for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1681     auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]);
1682 
1683     // Create the clone declaration.
1684     auto ResumeNameSuffix = ".resume.";
1685     auto ProjectionFunctionName =
1686         Suspend->getAsyncContextProjectionFunction()->getName();
1687     bool UseSwiftMangling = false;
1688     if (ProjectionFunctionName.equals("__swift_async_resume_project_context")) {
1689       ResumeNameSuffix = "TQ";
1690       UseSwiftMangling = true;
1691     } else if (ProjectionFunctionName.equals(
1692                    "__swift_async_resume_get_context")) {
1693       ResumeNameSuffix = "TY";
1694       UseSwiftMangling = true;
1695     }
1696     auto *Continuation = createCloneDeclaration(
1697         F, Shape,
1698         UseSwiftMangling ? ResumeNameSuffix + Twine(Idx) + "_"
1699                          : ResumeNameSuffix + Twine(Idx),
1700         NextF, Suspend);
1701     Clones.push_back(Continuation);
1702 
1703     // Insert a branch to a new return block immediately before the suspend
1704     // point.
1705     auto *SuspendBB = Suspend->getParent();
1706     auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1707     auto *Branch = cast<BranchInst>(SuspendBB->getTerminator());
1708 
1709     // Place it before the first suspend.
1710     auto *ReturnBB =
1711         BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB);
1712     Branch->setSuccessor(0, ReturnBB);
1713 
1714     IRBuilder<> Builder(ReturnBB);
1715 
1716     // Insert the call to the tail call function and inline it.
1717     auto *Fn = Suspend->getMustTailCallFunction();
1718     SmallVector<Value *, 8> Args(Suspend->args());
1719     auto FnArgs = ArrayRef<Value *>(Args).drop_front(
1720         CoroSuspendAsyncInst::MustTailCallFuncArg + 1);
1721     auto *TailCall =
1722         coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder);
1723     Builder.CreateRetVoid();
1724     InlineFunctionInfo FnInfo;
1725     auto InlineRes = InlineFunction(*TailCall, FnInfo);
1726     assert(InlineRes.isSuccess() && "Expected inlining to succeed");
1727     (void)InlineRes;
1728 
1729     // Replace the lvm.coro.async.resume intrisic call.
1730     replaceAsyncResumeFunction(Suspend, Continuation);
1731   }
1732 
1733   assert(Clones.size() == Shape.CoroSuspends.size());
1734   for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1735     auto *Suspend = Shape.CoroSuspends[Idx];
1736     auto *Clone = Clones[Idx];
1737 
1738     CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create();
1739   }
1740 }
1741 
1742 static void splitRetconCoroutine(Function &F, coro::Shape &Shape,
1743                                  SmallVectorImpl<Function *> &Clones) {
1744   assert(Shape.ABI == coro::ABI::Retcon ||
1745          Shape.ABI == coro::ABI::RetconOnce);
1746   assert(Clones.empty());
1747 
1748   // Reset various things that the optimizer might have decided it
1749   // "knows" about the coroutine function due to not seeing a return.
1750   F.removeFnAttr(Attribute::NoReturn);
1751   F.removeRetAttr(Attribute::NoAlias);
1752   F.removeRetAttr(Attribute::NonNull);
1753 
1754   // Allocate the frame.
1755   auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId());
1756   Value *RawFramePtr;
1757   if (Shape.RetconLowering.IsFrameInlineInStorage) {
1758     RawFramePtr = Id->getStorage();
1759   } else {
1760     IRBuilder<> Builder(Id);
1761 
1762     // Determine the size of the frame.
1763     const DataLayout &DL = F.getParent()->getDataLayout();
1764     auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1765 
1766     // Allocate.  We don't need to update the call graph node because we're
1767     // going to recompute it from scratch after splitting.
1768     // FIXME: pass the required alignment
1769     RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr);
1770     RawFramePtr =
1771       Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType());
1772 
1773     // Stash the allocated frame pointer in the continuation storage.
1774     auto Dest = Builder.CreateBitCast(Id->getStorage(),
1775                                       RawFramePtr->getType()->getPointerTo());
1776     Builder.CreateStore(RawFramePtr, Dest);
1777   }
1778 
1779   // Map all uses of llvm.coro.begin to the allocated frame pointer.
1780   {
1781     // Make sure we don't invalidate Shape.FramePtr.
1782     TrackingVH<Value> Handle(Shape.FramePtr);
1783     Shape.CoroBegin->replaceAllUsesWith(RawFramePtr);
1784     Shape.FramePtr = Handle.getValPtr();
1785   }
1786 
1787   // Create a unique return block.
1788   BasicBlock *ReturnBB = nullptr;
1789   SmallVector<PHINode *, 4> ReturnPHIs;
1790 
1791   // Create all the functions in order after the main function.
1792   auto NextF = std::next(F.getIterator());
1793 
1794   // Create a continuation function for each of the suspend points.
1795   Clones.reserve(Shape.CoroSuspends.size());
1796   for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1797     auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]);
1798 
1799     // Create the clone declaration.
1800     auto Continuation =
1801         createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF, nullptr);
1802     Clones.push_back(Continuation);
1803 
1804     // Insert a branch to the unified return block immediately before
1805     // the suspend point.
1806     auto SuspendBB = Suspend->getParent();
1807     auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1808     auto Branch = cast<BranchInst>(SuspendBB->getTerminator());
1809 
1810     // Create the unified return block.
1811     if (!ReturnBB) {
1812       // Place it before the first suspend.
1813       ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F,
1814                                     NewSuspendBB);
1815       Shape.RetconLowering.ReturnBlock = ReturnBB;
1816 
1817       IRBuilder<> Builder(ReturnBB);
1818 
1819       // Create PHIs for all the return values.
1820       assert(ReturnPHIs.empty());
1821 
1822       // First, the continuation.
1823       ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(),
1824                                              Shape.CoroSuspends.size()));
1825 
1826       // Next, all the directly-yielded values.
1827       for (auto ResultTy : Shape.getRetconResultTypes())
1828         ReturnPHIs.push_back(Builder.CreatePHI(ResultTy,
1829                                                Shape.CoroSuspends.size()));
1830 
1831       // Build the return value.
1832       auto RetTy = F.getReturnType();
1833 
1834       // Cast the continuation value if necessary.
1835       // We can't rely on the types matching up because that type would
1836       // have to be infinite.
1837       auto CastedContinuationTy =
1838         (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0));
1839       auto *CastedContinuation =
1840         Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy);
1841 
1842       Value *RetV;
1843       if (ReturnPHIs.size() == 1) {
1844         RetV = CastedContinuation;
1845       } else {
1846         RetV = UndefValue::get(RetTy);
1847         RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0);
1848         for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I)
1849           RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I);
1850       }
1851 
1852       Builder.CreateRet(RetV);
1853     }
1854 
1855     // Branch to the return block.
1856     Branch->setSuccessor(0, ReturnBB);
1857     ReturnPHIs[0]->addIncoming(Continuation, SuspendBB);
1858     size_t NextPHIIndex = 1;
1859     for (auto &VUse : Suspend->value_operands())
1860       ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB);
1861     assert(NextPHIIndex == ReturnPHIs.size());
1862   }
1863 
1864   assert(Clones.size() == Shape.CoroSuspends.size());
1865   for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1866     auto Suspend = Shape.CoroSuspends[i];
1867     auto Clone = Clones[i];
1868 
1869     CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create();
1870   }
1871 }
1872 
1873 namespace {
1874   class PrettyStackTraceFunction : public PrettyStackTraceEntry {
1875     Function &F;
1876   public:
1877     PrettyStackTraceFunction(Function &F) : F(F) {}
1878     void print(raw_ostream &OS) const override {
1879       OS << "While splitting coroutine ";
1880       F.printAsOperand(OS, /*print type*/ false, F.getParent());
1881       OS << "\n";
1882     }
1883   };
1884 }
1885 
1886 static coro::Shape splitCoroutine(Function &F,
1887                                   SmallVectorImpl<Function *> &Clones,
1888                                   bool OptimizeFrame) {
1889   PrettyStackTraceFunction prettyStackTrace(F);
1890 
1891   // The suspend-crossing algorithm in buildCoroutineFrame get tripped
1892   // up by uses in unreachable blocks, so remove them as a first pass.
1893   removeUnreachableBlocks(F);
1894 
1895   coro::Shape Shape(F, OptimizeFrame);
1896   if (!Shape.CoroBegin)
1897     return Shape;
1898 
1899   simplifySuspendPoints(Shape);
1900   buildCoroutineFrame(F, Shape);
1901   replaceFrameSizeAndAlignment(Shape);
1902 
1903   // If there are no suspend points, no split required, just remove
1904   // the allocation and deallocation blocks, they are not needed.
1905   if (Shape.CoroSuspends.empty()) {
1906     handleNoSuspendCoroutine(Shape);
1907   } else {
1908     switch (Shape.ABI) {
1909     case coro::ABI::Switch:
1910       splitSwitchCoroutine(F, Shape, Clones);
1911       break;
1912     case coro::ABI::Async:
1913       splitAsyncCoroutine(F, Shape, Clones);
1914       break;
1915     case coro::ABI::Retcon:
1916     case coro::ABI::RetconOnce:
1917       splitRetconCoroutine(F, Shape, Clones);
1918       break;
1919     }
1920   }
1921 
1922   // Replace all the swifterror operations in the original function.
1923   // This invalidates SwiftErrorOps in the Shape.
1924   replaceSwiftErrorOps(F, Shape, nullptr);
1925 
1926   // Finally, salvage the llvm.dbg.{declare,addr} in our original function that
1927   // point into the coroutine frame. We only do this for the current function
1928   // since the Cloner salvaged debug info for us in the new coroutine funclets.
1929   SmallVector<DbgVariableIntrinsic *, 8> Worklist;
1930   SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
1931   for (auto &BB : F) {
1932     for (auto &I : BB) {
1933       if (auto *DDI = dyn_cast<DbgDeclareInst>(&I)) {
1934         Worklist.push_back(DDI);
1935         continue;
1936       }
1937       if (auto *DDI = dyn_cast<DbgAddrIntrinsic>(&I)) {
1938         Worklist.push_back(DDI);
1939         continue;
1940       }
1941     }
1942   }
1943   for (auto *DDI : Worklist)
1944     coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.OptimizeFrame);
1945 
1946   return Shape;
1947 }
1948 
1949 static void updateCallGraphAfterCoroutineSplit(
1950     LazyCallGraph::Node &N, const coro::Shape &Shape,
1951     const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C,
1952     LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
1953     FunctionAnalysisManager &FAM) {
1954   if (!Shape.CoroBegin)
1955     return;
1956 
1957   for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) {
1958     auto &Context = End->getContext();
1959     End->replaceAllUsesWith(ConstantInt::getFalse(Context));
1960     End->eraseFromParent();
1961   }
1962 
1963   if (!Clones.empty()) {
1964     switch (Shape.ABI) {
1965     case coro::ABI::Switch:
1966       // Each clone in the Switch lowering is independent of the other clones.
1967       // Let the LazyCallGraph know about each one separately.
1968       for (Function *Clone : Clones)
1969         CG.addSplitFunction(N.getFunction(), *Clone);
1970       break;
1971     case coro::ABI::Async:
1972     case coro::ABI::Retcon:
1973     case coro::ABI::RetconOnce:
1974       // Each clone in the Async/Retcon lowering references of the other clones.
1975       // Let the LazyCallGraph know about all of them at once.
1976       if (!Clones.empty())
1977         CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones);
1978       break;
1979     }
1980 
1981     // Let the CGSCC infra handle the changes to the original function.
1982     updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM);
1983   }
1984 
1985   // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
1986   // to the split functions.
1987   postSplitCleanup(N.getFunction());
1988   updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM);
1989 }
1990 
1991 /// Replace a call to llvm.coro.prepare.retcon.
1992 static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
1993                            LazyCallGraph::SCC &C) {
1994   auto CastFn = Prepare->getArgOperand(0); // as an i8*
1995   auto Fn = CastFn->stripPointerCasts();   // as its original type
1996 
1997   // Attempt to peephole this pattern:
1998   //    %0 = bitcast [[TYPE]] @some_function to i8*
1999   //    %1 = call @llvm.coro.prepare.retcon(i8* %0)
2000   //    %2 = bitcast %1 to [[TYPE]]
2001   // ==>
2002   //    %2 = @some_function
2003   for (Use &U : llvm::make_early_inc_range(Prepare->uses())) {
2004     // Look for bitcasts back to the original function type.
2005     auto *Cast = dyn_cast<BitCastInst>(U.getUser());
2006     if (!Cast || Cast->getType() != Fn->getType())
2007       continue;
2008 
2009     // Replace and remove the cast.
2010     Cast->replaceAllUsesWith(Fn);
2011     Cast->eraseFromParent();
2012   }
2013 
2014   // Replace any remaining uses with the function as an i8*.
2015   // This can never directly be a callee, so we don't need to update CG.
2016   Prepare->replaceAllUsesWith(CastFn);
2017   Prepare->eraseFromParent();
2018 
2019   // Kill dead bitcasts.
2020   while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2021     if (!Cast->use_empty())
2022       break;
2023     CastFn = Cast->getOperand(0);
2024     Cast->eraseFromParent();
2025   }
2026 }
2027 
2028 static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
2029                                LazyCallGraph::SCC &C) {
2030   bool Changed = false;
2031   for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) {
2032     // Intrinsics can only be used in calls.
2033     auto *Prepare = cast<CallInst>(P.getUser());
2034     replacePrepare(Prepare, CG, C);
2035     Changed = true;
2036   }
2037 
2038   return Changed;
2039 }
2040 
2041 static void addPrepareFunction(const Module &M,
2042                                SmallVectorImpl<Function *> &Fns,
2043                                StringRef Name) {
2044   auto *PrepareFn = M.getFunction(Name);
2045   if (PrepareFn && !PrepareFn->use_empty())
2046     Fns.push_back(PrepareFn);
2047 }
2048 
2049 PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C,
2050                                      CGSCCAnalysisManager &AM,
2051                                      LazyCallGraph &CG, CGSCCUpdateResult &UR) {
2052   // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
2053   //     non-zero number of nodes, so we assume that here and grab the first
2054   //     node's function's module.
2055   Module &M = *C.begin()->getFunction().getParent();
2056   auto &FAM =
2057       AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
2058 
2059   // Check for uses of llvm.coro.prepare.retcon/async.
2060   SmallVector<Function *, 2> PrepareFns;
2061   addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2062   addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2063 
2064   // Find coroutines for processing.
2065   SmallVector<LazyCallGraph::Node *> Coroutines;
2066   for (LazyCallGraph::Node &N : C)
2067     if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR))
2068       Coroutines.push_back(&N);
2069 
2070   if (Coroutines.empty() && PrepareFns.empty())
2071     return PreservedAnalyses::all();
2072 
2073   if (Coroutines.empty()) {
2074     for (auto *PrepareFn : PrepareFns) {
2075       replaceAllPrepares(PrepareFn, CG, C);
2076     }
2077   }
2078 
2079   // Split all the coroutines.
2080   for (LazyCallGraph::Node *N : Coroutines) {
2081     Function &F = N->getFunction();
2082     LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()
2083                       << "\n");
2084     F.removeFnAttr(CORO_PRESPLIT_ATTR);
2085 
2086     SmallVector<Function *, 4> Clones;
2087     const coro::Shape Shape = splitCoroutine(F, Clones, OptimizeFrame);
2088     updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM);
2089 
2090     if (!Shape.CoroSuspends.empty()) {
2091       // Run the CGSCC pipeline on the original and newly split functions.
2092       UR.CWorklist.insert(&C);
2093       for (Function *Clone : Clones)
2094         UR.CWorklist.insert(CG.lookupSCC(CG.get(*Clone)));
2095     }
2096   }
2097 
2098   if (!PrepareFns.empty()) {
2099     for (auto *PrepareFn : PrepareFns) {
2100       replaceAllPrepares(PrepareFn, CG, C);
2101     }
2102   }
2103 
2104   return PreservedAnalyses::none();
2105 }
2106