1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This file contains classes used to discover if for a particular value
9 // there from sue to definition that crosses a suspend block.
10 //
11 // Using the information discovered we form a Coroutine Frame structure to
12 // contain those values. All uses of those values are replaced with appropriate
13 // GEP + load from the coroutine frame. At the point of the definition we spill
14 // the value into the coroutine frame.
15 //===----------------------------------------------------------------------===//
16 
17 #include "CoroInternal.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/SmallString.h"
20 #include "llvm/Analysis/PtrUseVisitor.h"
21 #include "llvm/Analysis/StackLifetime.h"
22 #include "llvm/Config/llvm-config.h"
23 #include "llvm/IR/CFG.h"
24 #include "llvm/IR/DIBuilder.h"
25 #include "llvm/IR/Dominators.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/InstIterator.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/MathExtras.h"
31 #include "llvm/Support/OptimizedStructLayout.h"
32 #include "llvm/Support/circular_raw_ostream.h"
33 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
36 #include <algorithm>
37 
38 using namespace llvm;
39 
40 // The "coro-suspend-crossing" flag is very noisy. There is another debug type,
41 // "coro-frame", which results in leaner debug spew.
42 #define DEBUG_TYPE "coro-suspend-crossing"
43 
44 static cl::opt<bool> EnableReuseStorageInFrame(
45     "reuse-storage-in-coroutine-frame", cl::Hidden,
46     cl::desc(
47         "Enable the optimization which would reuse the storage in the coroutine \
48          frame for allocas whose liferanges are not overlapped, for testing purposes"),
49     llvm::cl::init(false));
50 
51 enum { SmallVectorThreshold = 32 };
52 
53 // Provides two way mapping between the blocks and numbers.
54 namespace {
55 class BlockToIndexMapping {
56   SmallVector<BasicBlock *, SmallVectorThreshold> V;
57 
58 public:
59   size_t size() const { return V.size(); }
60 
61   BlockToIndexMapping(Function &F) {
62     for (BasicBlock &BB : F)
63       V.push_back(&BB);
64     llvm::sort(V);
65   }
66 
67   size_t blockToIndex(BasicBlock *BB) const {
68     auto *I = llvm::lower_bound(V, BB);
69     assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
70     return I - V.begin();
71   }
72 
73   BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
74 };
75 } // end anonymous namespace
76 
77 // The SuspendCrossingInfo maintains data that allows to answer a question
78 // whether given two BasicBlocks A and B there is a path from A to B that
79 // passes through a suspend point.
80 //
81 // For every basic block 'i' it maintains a BlockData that consists of:
82 //   Consumes:  a bit vector which contains a set of indices of blocks that can
83 //              reach block 'i'
84 //   Kills: a bit vector which contains a set of indices of blocks that can
85 //          reach block 'i', but one of the path will cross a suspend point
86 //   Suspend: a boolean indicating whether block 'i' contains a suspend point.
87 //   End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
88 //
89 namespace {
90 struct SuspendCrossingInfo {
91   BlockToIndexMapping Mapping;
92 
93   struct BlockData {
94     BitVector Consumes;
95     BitVector Kills;
96     bool Suspend = false;
97     bool End = false;
98   };
99   SmallVector<BlockData, SmallVectorThreshold> Block;
100 
101   iterator_range<succ_iterator> successors(BlockData const &BD) const {
102     BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
103     return llvm::successors(BB);
104   }
105 
106   BlockData &getBlockData(BasicBlock *BB) {
107     return Block[Mapping.blockToIndex(BB)];
108   }
109 
110   void dump() const;
111   void dump(StringRef Label, BitVector const &BV) const;
112 
113   SuspendCrossingInfo(Function &F, coro::Shape &Shape);
114 
115   bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
116     size_t const DefIndex = Mapping.blockToIndex(DefBB);
117     size_t const UseIndex = Mapping.blockToIndex(UseBB);
118 
119     bool const Result = Block[UseIndex].Kills[DefIndex];
120     LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
121                       << " answer is " << Result << "\n");
122     return Result;
123   }
124 
125   bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
126     auto *I = cast<Instruction>(U);
127 
128     // We rewrote PHINodes, so that only the ones with exactly one incoming
129     // value need to be analyzed.
130     if (auto *PN = dyn_cast<PHINode>(I))
131       if (PN->getNumIncomingValues() > 1)
132         return false;
133 
134     BasicBlock *UseBB = I->getParent();
135 
136     // As a special case, treat uses by an llvm.coro.suspend.retcon or an
137     // llvm.coro.suspend.async as if they were uses in the suspend's single
138     // predecessor: the uses conceptually occur before the suspend.
139     if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
140       UseBB = UseBB->getSinglePredecessor();
141       assert(UseBB && "should have split coro.suspend into its own block");
142     }
143 
144     return hasPathCrossingSuspendPoint(DefBB, UseBB);
145   }
146 
147   bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
148     return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
149   }
150 
151   bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
152     auto *DefBB = I.getParent();
153 
154     // As a special case, treat values produced by an llvm.coro.suspend.*
155     // as if they were defined in the single successor: the uses
156     // conceptually occur after the suspend.
157     if (isa<AnyCoroSuspendInst>(I)) {
158       DefBB = DefBB->getSingleSuccessor();
159       assert(DefBB && "should have split coro.suspend into its own block");
160     }
161 
162     return isDefinitionAcrossSuspend(DefBB, U);
163   }
164 };
165 } // end anonymous namespace
166 
167 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
168 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
169                                                 BitVector const &BV) const {
170   dbgs() << Label << ":";
171   for (size_t I = 0, N = BV.size(); I < N; ++I)
172     if (BV[I])
173       dbgs() << " " << Mapping.indexToBlock(I)->getName();
174   dbgs() << "\n";
175 }
176 
177 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
178   for (size_t I = 0, N = Block.size(); I < N; ++I) {
179     BasicBlock *const B = Mapping.indexToBlock(I);
180     dbgs() << B->getName() << ":\n";
181     dump("   Consumes", Block[I].Consumes);
182     dump("      Kills", Block[I].Kills);
183   }
184   dbgs() << "\n";
185 }
186 #endif
187 
188 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
189     : Mapping(F) {
190   const size_t N = Mapping.size();
191   Block.resize(N);
192 
193   // Initialize every block so that it consumes itself
194   for (size_t I = 0; I < N; ++I) {
195     auto &B = Block[I];
196     B.Consumes.resize(N);
197     B.Kills.resize(N);
198     B.Consumes.set(I);
199   }
200 
201   // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
202   // the code beyond coro.end is reachable during initial invocation of the
203   // coroutine.
204   for (auto *CE : Shape.CoroEnds)
205     getBlockData(CE->getParent()).End = true;
206 
207   // Mark all suspend blocks and indicate that they kill everything they
208   // consume. Note, that crossing coro.save also requires a spill, as any code
209   // between coro.save and coro.suspend may resume the coroutine and all of the
210   // state needs to be saved by that time.
211   auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
212     BasicBlock *SuspendBlock = BarrierInst->getParent();
213     auto &B = getBlockData(SuspendBlock);
214     B.Suspend = true;
215     B.Kills |= B.Consumes;
216   };
217   for (auto *CSI : Shape.CoroSuspends) {
218     markSuspendBlock(CSI);
219     if (auto *Save = CSI->getCoroSave())
220       markSuspendBlock(Save);
221   }
222 
223   // Iterate propagating consumes and kills until they stop changing.
224   int Iteration = 0;
225   (void)Iteration;
226 
227   bool Changed;
228   do {
229     LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
230     LLVM_DEBUG(dbgs() << "==============\n");
231 
232     Changed = false;
233     for (size_t I = 0; I < N; ++I) {
234       auto &B = Block[I];
235       for (BasicBlock *SI : successors(B)) {
236 
237         auto SuccNo = Mapping.blockToIndex(SI);
238 
239         // Saved Consumes and Kills bitsets so that it is easy to see
240         // if anything changed after propagation.
241         auto &S = Block[SuccNo];
242         auto SavedConsumes = S.Consumes;
243         auto SavedKills = S.Kills;
244 
245         // Propagate Kills and Consumes from block B into its successor S.
246         S.Consumes |= B.Consumes;
247         S.Kills |= B.Kills;
248 
249         // If block B is a suspend block, it should propagate kills into the
250         // its successor for every block B consumes.
251         if (B.Suspend) {
252           S.Kills |= B.Consumes;
253         }
254         if (S.Suspend) {
255           // If block S is a suspend block, it should kill all of the blocks it
256           // consumes.
257           S.Kills |= S.Consumes;
258         } else if (S.End) {
259           // If block S is an end block, it should not propagate kills as the
260           // blocks following coro.end() are reached during initial invocation
261           // of the coroutine while all the data are still available on the
262           // stack or in the registers.
263           S.Kills.reset();
264         } else {
265           // This is reached when S block it not Suspend nor coro.end and it
266           // need to make sure that it is not in the kill set.
267           S.Kills.reset(SuccNo);
268         }
269 
270         // See if anything changed.
271         Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
272 
273         if (S.Kills != SavedKills) {
274           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
275                             << "\n");
276           LLVM_DEBUG(dump("S.Kills", S.Kills));
277           LLVM_DEBUG(dump("SavedKills", SavedKills));
278         }
279         if (S.Consumes != SavedConsumes) {
280           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
281           LLVM_DEBUG(dump("S.Consume", S.Consumes));
282           LLVM_DEBUG(dump("SavedCons", SavedConsumes));
283         }
284       }
285     }
286   } while (Changed);
287   LLVM_DEBUG(dump());
288 }
289 
290 #undef DEBUG_TYPE // "coro-suspend-crossing"
291 #define DEBUG_TYPE "coro-frame"
292 
293 namespace {
294 class FrameTypeBuilder;
295 // Mapping from the to-be-spilled value to all the users that need reload.
296 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>;
297 struct AllocaInfo {
298   AllocaInst *Alloca;
299   DenseMap<Instruction *, llvm::Optional<APInt>> Aliases;
300   bool MayWriteBeforeCoroBegin;
301   AllocaInfo(AllocaInst *Alloca,
302              DenseMap<Instruction *, llvm::Optional<APInt>> Aliases,
303              bool MayWriteBeforeCoroBegin)
304       : Alloca(Alloca), Aliases(std::move(Aliases)),
305         MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
306 };
307 struct FrameDataInfo {
308   // All the values (that are not allocas) that needs to be spilled to the
309   // frame.
310   SpillInfo Spills;
311   // Allocas contains all values defined as allocas that need to live in the
312   // frame.
313   SmallVector<AllocaInfo, 8> Allocas;
314 
315   SmallVector<Value *, 8> getAllDefs() const {
316     SmallVector<Value *, 8> Defs;
317     for (const auto &P : Spills)
318       Defs.push_back(P.first);
319     for (const auto &A : Allocas)
320       Defs.push_back(A.Alloca);
321     return Defs;
322   }
323 
324   uint32_t getFieldIndex(Value *V) const {
325     auto Itr = FieldIndexMap.find(V);
326     assert(Itr != FieldIndexMap.end() &&
327            "Value does not have a frame field index");
328     return Itr->second;
329   }
330 
331   void setFieldIndex(Value *V, uint32_t Index) {
332     assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
333            "Cannot set the index for the same field twice.");
334     FieldIndexMap[V] = Index;
335   }
336 
337   // Remap the index of every field in the frame, using the final layout index.
338   void updateLayoutIndex(FrameTypeBuilder &B);
339 
340 private:
341   // LayoutIndexUpdateStarted is used to avoid updating the index of any field
342   // twice by mistake.
343   bool LayoutIndexUpdateStarted = false;
344   // Map from values to their slot indexes on the frame. They will be first set
345   // with their original insertion field index. After the frame is built, their
346   // indexes will be updated into the final layout index.
347   DenseMap<Value *, uint32_t> FieldIndexMap;
348 };
349 } // namespace
350 
351 #ifndef NDEBUG
352 static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
353   dbgs() << "------------- " << Title << "--------------\n";
354   for (const auto &E : Spills) {
355     E.first->dump();
356     dbgs() << "   user: ";
357     for (auto *I : E.second)
358       I->dump();
359   }
360 }
361 
362 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
363   dbgs() << "------------- Allocas --------------\n";
364   for (const auto &A : Allocas) {
365     A.Alloca->dump();
366   }
367 }
368 #endif
369 
370 namespace {
371 using FieldIDType = size_t;
372 // We cannot rely solely on natural alignment of a type when building a
373 // coroutine frame and if the alignment specified on the Alloca instruction
374 // differs from the natural alignment of the alloca type we will need to insert
375 // padding.
376 class FrameTypeBuilder {
377 private:
378   struct Field {
379     uint64_t Size;
380     uint64_t Offset;
381     Type *Ty;
382     FieldIDType LayoutFieldIndex;
383     Align Alignment;
384     Align TyAlignment;
385   };
386 
387   const DataLayout &DL;
388   LLVMContext &Context;
389   uint64_t StructSize = 0;
390   Align StructAlign;
391   bool IsFinished = false;
392 
393   SmallVector<Field, 8> Fields;
394   DenseMap<Value*, unsigned> FieldIndexByKey;
395 
396 public:
397   FrameTypeBuilder(LLVMContext &Context, DataLayout const &DL)
398       : DL(DL), Context(Context) {}
399 
400   /// Add a field to this structure for the storage of an `alloca`
401   /// instruction.
402   LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI,
403                                                bool IsHeader = false) {
404     Type *Ty = AI->getAllocatedType();
405 
406     // Make an array type if this is a static array allocation.
407     if (AI->isArrayAllocation()) {
408       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
409         Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
410       else
411         report_fatal_error("Coroutines cannot handle non static allocas yet");
412     }
413 
414     return addField(Ty, AI->getAlign(), IsHeader);
415   }
416 
417   /// We want to put the allocas whose lifetime-ranges are not overlapped
418   /// into one slot of coroutine frame.
419   /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
420   ///
421   ///     cppcoro::task<void> alternative_paths(bool cond) {
422   ///         if (cond) {
423   ///             big_structure a;
424   ///             process(a);
425   ///             co_await something();
426   ///         } else {
427   ///             big_structure b;
428   ///             process2(b);
429   ///             co_await something();
430   ///         }
431   ///     }
432   ///
433   /// We want to put variable a and variable b in the same slot to
434   /// reduce the size of coroutine frame.
435   ///
436   /// This function use StackLifetime algorithm to partition the AllocaInsts in
437   /// Spills to non-overlapped sets in order to put Alloca in the same
438   /// non-overlapped set into the same slot in the Coroutine Frame. Then add
439   /// field for the allocas in the same non-overlapped set by using the largest
440   /// type as the field type.
441   ///
442   /// Side Effects: Because We sort the allocas, the order of allocas in the
443   /// frame may be different with the order in the source code.
444   void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
445                           coro::Shape &Shape);
446 
447   /// Add a field to this structure.
448   LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment,
449                                       bool IsHeader = false) {
450     assert(!IsFinished && "adding fields to a finished builder");
451     assert(Ty && "must provide a type for a field");
452 
453     // The field size is always the alloc size of the type.
454     uint64_t FieldSize = DL.getTypeAllocSize(Ty);
455 
456     // The field alignment might not be the type alignment, but we need
457     // to remember the type alignment anyway to build the type.
458     Align TyAlignment = DL.getABITypeAlign(Ty);
459     if (!FieldAlignment) FieldAlignment = TyAlignment;
460 
461     // Lay out header fields immediately.
462     uint64_t Offset;
463     if (IsHeader) {
464       Offset = alignTo(StructSize, FieldAlignment);
465       StructSize = Offset + FieldSize;
466 
467     // Everything else has a flexible offset.
468     } else {
469       Offset = OptimizedStructLayoutField::FlexibleOffset;
470     }
471 
472     Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment});
473     return Fields.size() - 1;
474   }
475 
476   /// Finish the layout and set the body on the given type.
477   void finish(StructType *Ty);
478 
479   uint64_t getStructSize() const {
480     assert(IsFinished && "not yet finished!");
481     return StructSize;
482   }
483 
484   Align getStructAlign() const {
485     assert(IsFinished && "not yet finished!");
486     return StructAlign;
487   }
488 
489   FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
490     assert(IsFinished && "not yet finished!");
491     return Fields[Id].LayoutFieldIndex;
492   }
493 };
494 } // namespace
495 
496 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
497   auto Updater = [&](Value *I) {
498     setFieldIndex(I, B.getLayoutFieldIndex(getFieldIndex(I)));
499   };
500   LayoutIndexUpdateStarted = true;
501   for (auto &S : Spills)
502     Updater(S.first);
503   for (const auto &A : Allocas)
504     Updater(A.Alloca);
505   LayoutIndexUpdateStarted = false;
506 }
507 
508 void FrameTypeBuilder::addFieldForAllocas(const Function &F,
509                                           FrameDataInfo &FrameData,
510                                           coro::Shape &Shape) {
511   using AllocaSetType = SmallVector<AllocaInst *, 4>;
512   SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
513 
514   // We need to add field for allocas at the end of this function. However, this
515   // function has multiple exits, so we use this helper to avoid redundant code.
516   struct RTTIHelper {
517     std::function<void()> func;
518     RTTIHelper(std::function<void()> &&func) : func(func) {}
519     ~RTTIHelper() { func(); }
520   } Helper([&]() {
521     for (auto AllocaList : NonOverlapedAllocas) {
522       auto *LargestAI = *AllocaList.begin();
523       FieldIDType Id = addFieldForAlloca(LargestAI);
524       for (auto *Alloca : AllocaList)
525         FrameData.setFieldIndex(Alloca, Id);
526     }
527   });
528 
529   if (!Shape.ReuseFrameSlot && !EnableReuseStorageInFrame) {
530     for (const auto &A : FrameData.Allocas) {
531       AllocaInst *Alloca = A.Alloca;
532       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
533     }
534     return;
535   }
536 
537   // Because there are pathes from the lifetime.start to coro.end
538   // for each alloca, the liferanges for every alloca is overlaped
539   // in the blocks who contain coro.end and the successor blocks.
540   // So we choose to skip there blocks when we calculates the liferange
541   // for each alloca. It should be reasonable since there shouldn't be uses
542   // in these blocks and the coroutine frame shouldn't be used outside the
543   // coroutine body.
544   //
545   // Note that the user of coro.suspend may not be SwitchInst. However, this
546   // case seems too complex to handle. And it is harmless to skip these
547   // patterns since it just prevend putting the allocas to live in the same
548   // slot.
549   DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
550   for (auto CoroSuspendInst : Shape.CoroSuspends) {
551     for (auto U : CoroSuspendInst->users()) {
552       if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
553         auto *SWI = const_cast<SwitchInst *>(ConstSWI);
554         DefaultSuspendDest[SWI] = SWI->getDefaultDest();
555         SWI->setDefaultDest(SWI->getSuccessor(1));
556       }
557     }
558   }
559 
560   auto ExtractAllocas = [&]() {
561     AllocaSetType Allocas;
562     Allocas.reserve(FrameData.Allocas.size());
563     for (const auto &A : FrameData.Allocas)
564       Allocas.push_back(A.Alloca);
565     return Allocas;
566   };
567   StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
568                                       StackLifetime::LivenessType::May);
569   StackLifetimeAnalyzer.run();
570   auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
571     return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
572         StackLifetimeAnalyzer.getLiveRange(AI2));
573   };
574   auto GetAllocaSize = [&](const AllocaInfo &A) {
575     Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL);
576     assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
577     assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
578     return RetSize->getFixedSize();
579   };
580   // Put larger allocas in the front. So the larger allocas have higher
581   // priority to merge, which can save more space potentially. Also each
582   // AllocaSet would be ordered. So we can get the largest Alloca in one
583   // AllocaSet easily.
584   sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
585     return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
586   });
587   for (const auto &A : FrameData.Allocas) {
588     AllocaInst *Alloca = A.Alloca;
589     bool Merged = false;
590     // Try to find if the Alloca is not inferenced with any existing
591     // NonOverlappedAllocaSet. If it is true, insert the alloca to that
592     // NonOverlappedAllocaSet.
593     for (auto &AllocaSet : NonOverlapedAllocas) {
594       assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
595       bool NoInference = none_of(AllocaSet, [&](auto Iter) {
596         return IsAllocaInferenre(Alloca, Iter);
597       });
598       // If the alignment of A is multiple of the alignment of B, the address
599       // of A should satisfy the requirement for aligning for B.
600       //
601       // There may be other more fine-grained strategies to handle the alignment
602       // infomation during the merging process. But it seems hard to handle
603       // these strategies and benefit little.
604       bool Alignable = [&]() -> bool {
605         auto *LargestAlloca = *AllocaSet.begin();
606         return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
607                0;
608       }();
609       bool CouldMerge = NoInference && Alignable;
610       if (!CouldMerge)
611         continue;
612       AllocaSet.push_back(Alloca);
613       Merged = true;
614       break;
615     }
616     if (!Merged) {
617       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
618     }
619   }
620   // Recover the default target destination for each Switch statement
621   // reserved.
622   for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
623     SwitchInst *SWI = SwitchAndDefaultDest.first;
624     BasicBlock *DestBB = SwitchAndDefaultDest.second;
625     SWI->setDefaultDest(DestBB);
626   }
627   // This Debug Info could tell us which allocas are merged into one slot.
628   LLVM_DEBUG(for (auto &AllocaSet
629                   : NonOverlapedAllocas) {
630     if (AllocaSet.size() > 1) {
631       dbgs() << "In Function:" << F.getName() << "\n";
632       dbgs() << "Find Union Set "
633              << "\n";
634       dbgs() << "\tAllocas are \n";
635       for (auto Alloca : AllocaSet)
636         dbgs() << "\t\t" << *Alloca << "\n";
637     }
638   });
639 }
640 
641 void FrameTypeBuilder::finish(StructType *Ty) {
642   assert(!IsFinished && "already finished!");
643 
644   // Prepare the optimal-layout field array.
645   // The Id in the layout field is a pointer to our Field for it.
646   SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
647   LayoutFields.reserve(Fields.size());
648   for (auto &Field : Fields) {
649     LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
650                               Field.Offset);
651   }
652 
653   // Perform layout.
654   auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
655   StructSize = SizeAndAlign.first;
656   StructAlign = SizeAndAlign.second;
657 
658   auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
659     return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
660   };
661 
662   // We need to produce a packed struct type if there's a field whose
663   // assigned offset isn't a multiple of its natural type alignment.
664   bool Packed = [&] {
665     for (auto &LayoutField : LayoutFields) {
666       auto &F = getField(LayoutField);
667       if (!isAligned(F.TyAlignment, LayoutField.Offset))
668         return true;
669     }
670     return false;
671   }();
672 
673   // Build the struct body.
674   SmallVector<Type*, 16> FieldTypes;
675   FieldTypes.reserve(LayoutFields.size() * 3 / 2);
676   uint64_t LastOffset = 0;
677   for (auto &LayoutField : LayoutFields) {
678     auto &F = getField(LayoutField);
679 
680     auto Offset = LayoutField.Offset;
681 
682     // Add a padding field if there's a padding gap and we're either
683     // building a packed struct or the padding gap is more than we'd
684     // get from aligning to the field type's natural alignment.
685     assert(Offset >= LastOffset);
686     if (Offset != LastOffset) {
687       if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
688         FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
689                                             Offset - LastOffset));
690     }
691 
692     F.Offset = Offset;
693     F.LayoutFieldIndex = FieldTypes.size();
694 
695     FieldTypes.push_back(F.Ty);
696     LastOffset = Offset + F.Size;
697   }
698 
699   Ty->setBody(FieldTypes, Packed);
700 
701 #ifndef NDEBUG
702   // Check that the IR layout matches the offsets we expect.
703   auto Layout = DL.getStructLayout(Ty);
704   for (auto &F : Fields) {
705     assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
706     assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
707   }
708 #endif
709 
710   IsFinished = true;
711 }
712 
713 // Build a struct that will keep state for an active coroutine.
714 //   struct f.frame {
715 //     ResumeFnTy ResumeFnAddr;
716 //     ResumeFnTy DestroyFnAddr;
717 //     int ResumeIndex;
718 //     ... promise (if present) ...
719 //     ... spills ...
720 //   };
721 static StructType *buildFrameType(Function &F, coro::Shape &Shape,
722                                   FrameDataInfo &FrameData) {
723   LLVMContext &C = F.getContext();
724   const DataLayout &DL = F.getParent()->getDataLayout();
725   StructType *FrameTy = [&] {
726     SmallString<32> Name(F.getName());
727     Name.append(".Frame");
728     return StructType::create(C, Name);
729   }();
730 
731   FrameTypeBuilder B(C, DL);
732 
733   AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
734   Optional<FieldIDType> SwitchIndexFieldId;
735 
736   if (Shape.ABI == coro::ABI::Switch) {
737     auto *FramePtrTy = FrameTy->getPointerTo();
738     auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
739                                    /*IsVarArg=*/false);
740     auto *FnPtrTy = FnTy->getPointerTo();
741 
742     // Add header fields for the resume and destroy functions.
743     // We can rely on these being perfectly packed.
744     (void)B.addField(FnPtrTy, None, /*header*/ true);
745     (void)B.addField(FnPtrTy, None, /*header*/ true);
746 
747     // PromiseAlloca field needs to be explicitly added here because it's
748     // a header field with a fixed offset based on its alignment. Hence it
749     // needs special handling and cannot be added to FrameData.Allocas.
750     if (PromiseAlloca)
751       FrameData.setFieldIndex(
752           PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
753 
754     // Add a field to store the suspend index.  This doesn't need to
755     // be in the header.
756     unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
757     Type *IndexType = Type::getIntNTy(C, IndexBits);
758 
759     SwitchIndexFieldId = B.addField(IndexType, None);
760   } else {
761     assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
762   }
763 
764   // Because multiple allocas may own the same field slot,
765   // we add allocas to field here.
766   B.addFieldForAllocas(F, FrameData, Shape);
767   // Add PromiseAlloca to Allocas list so that
768   // 1. updateLayoutIndex could update its index after
769   // `performOptimizedStructLayout`
770   // 2. it is processed in insertSpills.
771   if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
772     // We assume that the promise alloca won't be modified before
773     // CoroBegin and no alias will be create before CoroBegin.
774     FrameData.Allocas.emplace_back(
775         PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false);
776   // Create an entry for every spilled value.
777   for (auto &S : FrameData.Spills) {
778     FieldIDType Id = B.addField(S.first->getType(), None);
779     FrameData.setFieldIndex(S.first, Id);
780   }
781 
782   B.finish(FrameTy);
783   FrameData.updateLayoutIndex(B);
784   Shape.FrameAlign = B.getStructAlign();
785   Shape.FrameSize = B.getStructSize();
786 
787   switch (Shape.ABI) {
788   case coro::ABI::Switch:
789     // In the switch ABI, remember the switch-index field.
790     Shape.SwitchLowering.IndexField =
791         B.getLayoutFieldIndex(*SwitchIndexFieldId);
792 
793     // Also round the frame size up to a multiple of its alignment, as is
794     // generally expected in C/C++.
795     Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
796     break;
797 
798   // In the retcon ABI, remember whether the frame is inline in the storage.
799   case coro::ABI::Retcon:
800   case coro::ABI::RetconOnce: {
801     auto Id = Shape.getRetconCoroId();
802     Shape.RetconLowering.IsFrameInlineInStorage
803       = (B.getStructSize() <= Id->getStorageSize() &&
804          B.getStructAlign() <= Id->getStorageAlignment());
805     break;
806   }
807   case coro::ABI::Async: {
808     Shape.AsyncLowering.FrameOffset =
809         alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign);
810     // Also make the final context size a multiple of the context alignment to
811     // make allocation easier for allocators.
812     Shape.AsyncLowering.ContextSize =
813         alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize,
814                 Shape.AsyncLowering.getContextAlignment());
815     if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
816       report_fatal_error(
817           "The alignment requirment of frame variables cannot be higher than "
818           "the alignment of the async function context");
819     }
820     break;
821   }
822   }
823 
824   return FrameTy;
825 }
826 
827 // We use a pointer use visitor to track how an alloca is being used.
828 // The goal is to be able to answer the following three questions:
829 // 1. Should this alloca be allocated on the frame instead.
830 // 2. Could the content of the alloca be modified prior to CoroBegn, which would
831 // require copying the data from alloca to the frame after CoroBegin.
832 // 3. Is there any alias created for this alloca prior to CoroBegin, but used
833 // after CoroBegin. In that case, we will need to recreate the alias after
834 // CoroBegin based off the frame. To answer question 1, we track two things:
835 //   a. List of all BasicBlocks that use this alloca or any of the aliases of
836 //   the alloca. In the end, we check if there exists any two basic blocks that
837 //   cross suspension points. If so, this alloca must be put on the frame. b.
838 //   Whether the alloca or any alias of the alloca is escaped at some point,
839 //   either by storing the address somewhere, or the address is used in a
840 //   function call that might capture. If it's ever escaped, this alloca must be
841 //   put on the frame conservatively.
842 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
843 // Whenever a potential write happens, either through a store instruction, a
844 // function call or any of the memory intrinsics, we check whether this
845 // instruction is prior to CoroBegin. To answer question 3, we track the offsets
846 // of all aliases created for the alloca prior to CoroBegin but used after
847 // CoroBegin. llvm::Optional is used to be able to represent the case when the
848 // offset is unknown (e.g. when you have a PHINode that takes in different
849 // offset values). We cannot handle unknown offsets and will assert. This is the
850 // potential issue left out. An ideal solution would likely require a
851 // significant redesign.
852 namespace {
853 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
854   using Base = PtrUseVisitor<AllocaUseVisitor>;
855   AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
856                    const CoroBeginInst &CB, const SuspendCrossingInfo &Checker)
857       : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker) {}
858 
859   void visit(Instruction &I) {
860     UserBBs.insert(I.getParent());
861     Base::visit(I);
862     // If the pointer is escaped prior to CoroBegin, we have to assume it would
863     // be written into before CoroBegin as well.
864     if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
865       MayWriteBeforeCoroBegin = true;
866     }
867   }
868   // We need to provide this overload as PtrUseVisitor uses a pointer based
869   // visiting function.
870   void visit(Instruction *I) { return visit(*I); }
871 
872   void visitPHINode(PHINode &I) {
873     enqueueUsers(I);
874     handleAlias(I);
875   }
876 
877   void visitSelectInst(SelectInst &I) {
878     enqueueUsers(I);
879     handleAlias(I);
880   }
881 
882   void visitStoreInst(StoreInst &SI) {
883     // Regardless whether the alias of the alloca is the value operand or the
884     // pointer operand, we need to assume the alloca is been written.
885     handleMayWrite(SI);
886 
887     if (SI.getValueOperand() != U->get())
888       return;
889 
890     // We are storing the pointer into a memory location, potentially escaping.
891     // As an optimization, we try to detect simple cases where it doesn't
892     // actually escape, for example:
893     //   %ptr = alloca ..
894     //   %addr = alloca ..
895     //   store %ptr, %addr
896     //   %x = load %addr
897     //   ..
898     // If %addr is only used by loading from it, we could simply treat %x as
899     // another alias of %ptr, and not considering %ptr being escaped.
900     auto IsSimpleStoreThenLoad = [&]() {
901       auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
902       // If the memory location we are storing to is not an alloca, it
903       // could be an alias of some other memory locations, which is difficult
904       // to analyze.
905       if (!AI)
906         return false;
907       // StoreAliases contains aliases of the memory location stored into.
908       SmallVector<Instruction *, 4> StoreAliases = {AI};
909       while (!StoreAliases.empty()) {
910         Instruction *I = StoreAliases.pop_back_val();
911         for (User *U : I->users()) {
912           // If we are loading from the memory location, we are creating an
913           // alias of the original pointer.
914           if (auto *LI = dyn_cast<LoadInst>(U)) {
915             enqueueUsers(*LI);
916             handleAlias(*LI);
917             continue;
918           }
919           // If we are overriding the memory location, the pointer certainly
920           // won't escape.
921           if (auto *S = dyn_cast<StoreInst>(U))
922             if (S->getPointerOperand() == I)
923               continue;
924           if (auto *II = dyn_cast<IntrinsicInst>(U))
925             if (II->isLifetimeStartOrEnd())
926               continue;
927           // BitCastInst creats aliases of the memory location being stored
928           // into.
929           if (auto *BI = dyn_cast<BitCastInst>(U)) {
930             StoreAliases.push_back(BI);
931             continue;
932           }
933           return false;
934         }
935       }
936 
937       return true;
938     };
939 
940     if (!IsSimpleStoreThenLoad())
941       PI.setEscaped(&SI);
942   }
943 
944   // All mem intrinsics modify the data.
945   void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
946 
947   void visitBitCastInst(BitCastInst &BC) {
948     Base::visitBitCastInst(BC);
949     handleAlias(BC);
950   }
951 
952   void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
953     Base::visitAddrSpaceCastInst(ASC);
954     handleAlias(ASC);
955   }
956 
957   void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
958     // The base visitor will adjust Offset accordingly.
959     Base::visitGetElementPtrInst(GEPI);
960     handleAlias(GEPI);
961   }
962 
963   void visitCallBase(CallBase &CB) {
964     for (unsigned Op = 0, OpCount = CB.getNumArgOperands(); Op < OpCount; ++Op)
965       if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
966         PI.setEscaped(&CB);
967     handleMayWrite(CB);
968   }
969 
970   bool getShouldLiveOnFrame() const {
971     if (!ShouldLiveOnFrame)
972       ShouldLiveOnFrame = computeShouldLiveOnFrame();
973     return ShouldLiveOnFrame.getValue();
974   }
975 
976   bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
977 
978   DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const {
979     assert(getShouldLiveOnFrame() && "This method should only be called if the "
980                                      "alloca needs to live on the frame.");
981     for (const auto &P : AliasOffetMap)
982       if (!P.second)
983         report_fatal_error("Unable to handle an alias with unknown offset "
984                            "created before CoroBegin.");
985     return AliasOffetMap;
986   }
987 
988 private:
989   const DominatorTree &DT;
990   const CoroBeginInst &CoroBegin;
991   const SuspendCrossingInfo &Checker;
992   // All alias to the original AllocaInst, created before CoroBegin and used
993   // after CoroBegin. Each entry contains the instruction and the offset in the
994   // original Alloca. They need to be recreated after CoroBegin off the frame.
995   DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{};
996   SmallPtrSet<BasicBlock *, 2> UserBBs{};
997   bool MayWriteBeforeCoroBegin{false};
998 
999   mutable llvm::Optional<bool> ShouldLiveOnFrame{};
1000 
1001   bool computeShouldLiveOnFrame() const {
1002     if (PI.isEscaped())
1003       return true;
1004 
1005     for (auto *BB1 : UserBBs)
1006       for (auto *BB2 : UserBBs)
1007         if (Checker.hasPathCrossingSuspendPoint(BB1, BB2))
1008           return true;
1009 
1010     return false;
1011   }
1012 
1013   void handleMayWrite(const Instruction &I) {
1014     if (!DT.dominates(&CoroBegin, &I))
1015       MayWriteBeforeCoroBegin = true;
1016   }
1017 
1018   bool usedAfterCoroBegin(Instruction &I) {
1019     for (auto &U : I.uses())
1020       if (DT.dominates(&CoroBegin, U))
1021         return true;
1022     return false;
1023   }
1024 
1025   void handleAlias(Instruction &I) {
1026     // We track all aliases created prior to CoroBegin but used after.
1027     // These aliases may need to be recreated after CoroBegin if the alloca
1028     // need to live on the frame.
1029     if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
1030       return;
1031 
1032     if (!IsOffsetKnown) {
1033       AliasOffetMap[&I].reset();
1034     } else {
1035       auto Itr = AliasOffetMap.find(&I);
1036       if (Itr == AliasOffetMap.end()) {
1037         AliasOffetMap[&I] = Offset;
1038       } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) {
1039         // If we have seen two different possible values for this alias, we set
1040         // it to empty.
1041         AliasOffetMap[&I].reset();
1042       }
1043     }
1044   }
1045 };
1046 } // namespace
1047 
1048 // We need to make room to insert a spill after initial PHIs, but before
1049 // catchswitch instruction. Placing it before violates the requirement that
1050 // catchswitch, like all other EHPads must be the first nonPHI in a block.
1051 //
1052 // Split away catchswitch into a separate block and insert in its place:
1053 //
1054 //   cleanuppad <InsertPt> cleanupret.
1055 //
1056 // cleanupret instruction will act as an insert point for the spill.
1057 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
1058   BasicBlock *CurrentBlock = CatchSwitch->getParent();
1059   BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
1060   CurrentBlock->getTerminator()->eraseFromParent();
1061 
1062   auto *CleanupPad =
1063       CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
1064   auto *CleanupRet =
1065       CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
1066   return CleanupRet;
1067 }
1068 
1069 // Replace all alloca and SSA values that are accessed across suspend points
1070 // with GetElementPointer from coroutine frame + loads and stores. Create an
1071 // AllocaSpillBB that will become the new entry block for the resume parts of
1072 // the coroutine:
1073 //
1074 //    %hdl = coro.begin(...)
1075 //    whatever
1076 //
1077 // becomes:
1078 //
1079 //    %hdl = coro.begin(...)
1080 //    %FramePtr = bitcast i8* hdl to %f.frame*
1081 //    br label %AllocaSpillBB
1082 //
1083 //  AllocaSpillBB:
1084 //    ; geps corresponding to allocas that were moved to coroutine frame
1085 //    br label PostSpill
1086 //
1087 //  PostSpill:
1088 //    whatever
1089 //
1090 //
1091 static Instruction *insertSpills(const FrameDataInfo &FrameData,
1092                                  coro::Shape &Shape) {
1093   auto *CB = Shape.CoroBegin;
1094   LLVMContext &C = CB->getContext();
1095   IRBuilder<> Builder(CB->getNextNode());
1096   StructType *FrameTy = Shape.FrameTy;
1097   PointerType *FramePtrTy = FrameTy->getPointerTo();
1098   auto *FramePtr =
1099       cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
1100   DominatorTree DT(*CB->getFunction());
1101   SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
1102 
1103   // Create a GEP with the given index into the coroutine frame for the original
1104   // value Orig. Appends an extra 0 index for array-allocas, preserving the
1105   // original type.
1106   auto GetFramePointer = [&](Value *Orig) -> Value * {
1107     FieldIDType Index = FrameData.getFieldIndex(Orig);
1108     SmallVector<Value *, 3> Indices = {
1109         ConstantInt::get(Type::getInt32Ty(C), 0),
1110         ConstantInt::get(Type::getInt32Ty(C), Index),
1111     };
1112 
1113     if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1114       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1115         auto Count = CI->getValue().getZExtValue();
1116         if (Count > 1) {
1117           Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
1118         }
1119       } else {
1120         report_fatal_error("Coroutines cannot handle non static allocas yet");
1121       }
1122     }
1123 
1124     auto GEP = cast<GetElementPtrInst>(
1125         Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1126     if (isa<AllocaInst>(Orig)) {
1127       // If the type of GEP is not equal to the type of AllocaInst, it implies
1128       // that the AllocaInst may be reused in the Frame slot of other
1129       // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1130       // the Frame storage.
1131       //
1132       // Note: If we change the strategy dealing with alignment, we need to refine
1133       // this casting.
1134       if (GEP->getResultElementType() != Orig->getType())
1135         return Builder.CreateBitCast(GEP, Orig->getType(),
1136                                      Orig->getName() + Twine(".cast"));
1137     }
1138     return GEP;
1139   };
1140 
1141   for (auto const &E : FrameData.Spills) {
1142     Value *Def = E.first;
1143     // Create a store instruction storing the value into the
1144     // coroutine frame.
1145     Instruction *InsertPt = nullptr;
1146     if (auto *Arg = dyn_cast<Argument>(Def)) {
1147       // For arguments, we will place the store instruction right after
1148       // the coroutine frame pointer instruction, i.e. bitcast of
1149       // coro.begin from i8* to %f.frame*.
1150       InsertPt = FramePtr->getNextNode();
1151 
1152       // If we're spilling an Argument, make sure we clear 'nocapture'
1153       // from the coroutine function.
1154       Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1155 
1156     } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1157       // Don't spill immediately after a suspend; splitting assumes
1158       // that the suspend will be followed by a branch.
1159       InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI();
1160     } else {
1161       auto *I = cast<Instruction>(Def);
1162       if (!DT.dominates(CB, I)) {
1163         // If it is not dominated by CoroBegin, then spill should be
1164         // inserted immediately after CoroFrame is computed.
1165         InsertPt = FramePtr->getNextNode();
1166       } else if (auto *II = dyn_cast<InvokeInst>(I)) {
1167         // If we are spilling the result of the invoke instruction, split
1168         // the normal edge and insert the spill in the new block.
1169         auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
1170         InsertPt = NewBB->getTerminator();
1171       } else if (isa<PHINode>(I)) {
1172         // Skip the PHINodes and EH pads instructions.
1173         BasicBlock *DefBlock = I->getParent();
1174         if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
1175           InsertPt = splitBeforeCatchSwitch(CSI);
1176         else
1177           InsertPt = &*DefBlock->getFirstInsertionPt();
1178       } else {
1179         assert(!I->isTerminator() && "unexpected terminator");
1180         // For all other values, the spill is placed immediately after
1181         // the definition.
1182         InsertPt = I->getNextNode();
1183       }
1184     }
1185 
1186     auto Index = FrameData.getFieldIndex(Def);
1187     Builder.SetInsertPoint(InsertPt);
1188     auto *G = Builder.CreateConstInBoundsGEP2_32(
1189         FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1190     Builder.CreateStore(Def, G);
1191 
1192     BasicBlock *CurrentBlock = nullptr;
1193     Value *CurrentReload = nullptr;
1194     for (auto *U : E.second) {
1195       // If we have not seen the use block, create a load instruction to reload
1196       // the spilled value from the coroutine frame. Populates the Value pointer
1197       // reference provided with the frame GEP.
1198       if (CurrentBlock != U->getParent()) {
1199         CurrentBlock = U->getParent();
1200         Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt());
1201 
1202         auto *GEP = GetFramePointer(E.first);
1203         GEP->setName(E.first->getName() + Twine(".reload.addr"));
1204         CurrentReload = Builder.CreateLoad(
1205             FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1206             E.first->getName() + Twine(".reload"));
1207 
1208         TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def);
1209         for (DbgDeclareInst *DDI : DIs) {
1210           bool AllowUnresolved = false;
1211           // This dbg.declare is preserved for all coro-split function
1212           // fragments. It will be unreachable in the main function, and
1213           // processed by coro::salvageDebugInfo() by CoroCloner.
1214           DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
1215               .insertDeclare(CurrentReload, DDI->getVariable(),
1216                              DDI->getExpression(), DDI->getDebugLoc(),
1217                              &*Builder.GetInsertPoint());
1218           // This dbg.declare is for the main function entry point.  It
1219           // will be deleted in all coro-split functions.
1220           coro::salvageDebugInfo(DbgPtrAllocaCache, DDI);
1221         }
1222       }
1223 
1224       // If we have a single edge PHINode, remove it and replace it with a
1225       // reload from the coroutine frame. (We already took care of multi edge
1226       // PHINodes by rewriting them in the rewritePHIs function).
1227       if (auto *PN = dyn_cast<PHINode>(U)) {
1228         assert(PN->getNumIncomingValues() == 1 &&
1229                "unexpected number of incoming "
1230                "values in the PHINode");
1231         PN->replaceAllUsesWith(CurrentReload);
1232         PN->eraseFromParent();
1233         continue;
1234       }
1235 
1236       // Replace all uses of CurrentValue in the current instruction with
1237       // reload.
1238       U->replaceUsesOfWith(Def, CurrentReload);
1239     }
1240   }
1241 
1242   BasicBlock *FramePtrBB = FramePtr->getParent();
1243 
1244   auto SpillBlock =
1245       FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
1246   SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1247   Shape.AllocaSpillBlock = SpillBlock;
1248 
1249   // retcon and retcon.once lowering assumes all uses have been sunk.
1250   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1251       Shape.ABI == coro::ABI::Async) {
1252     // If we found any allocas, replace all of their remaining uses with Geps.
1253     Builder.SetInsertPoint(&SpillBlock->front());
1254     for (const auto &P : FrameData.Allocas) {
1255       AllocaInst *Alloca = P.Alloca;
1256       auto *G = GetFramePointer(Alloca);
1257 
1258       // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1259       // here, as we are changing location of the instruction.
1260       G->takeName(Alloca);
1261       Alloca->replaceAllUsesWith(G);
1262       Alloca->eraseFromParent();
1263     }
1264     return FramePtr;
1265   }
1266 
1267   // If we found any alloca, replace all of their remaining uses with GEP
1268   // instructions. Because new dbg.declare have been created for these alloca,
1269   // we also delete the original dbg.declare and replace other uses with undef.
1270   // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1271   // as some of the uses may not be dominated by CoroBegin.
1272   Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
1273   SmallVector<Instruction *, 4> UsersToUpdate;
1274   for (const auto &A : FrameData.Allocas) {
1275     AllocaInst *Alloca = A.Alloca;
1276     UsersToUpdate.clear();
1277     for (User *U : Alloca->users()) {
1278       auto *I = cast<Instruction>(U);
1279       if (DT.dominates(CB, I))
1280         UsersToUpdate.push_back(I);
1281     }
1282     if (UsersToUpdate.empty())
1283       continue;
1284     auto *G = GetFramePointer(Alloca);
1285     G->setName(Alloca->getName() + Twine(".reload.addr"));
1286 
1287     TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Alloca);
1288     if (!DIs.empty())
1289       DIBuilder(*Alloca->getModule(),
1290                 /*AllowUnresolved*/ false)
1291           .insertDeclare(G, DIs.front()->getVariable(),
1292                          DIs.front()->getExpression(),
1293                          DIs.front()->getDebugLoc(), DIs.front());
1294     for (auto *DI : FindDbgDeclareUses(Alloca))
1295       DI->eraseFromParent();
1296     replaceDbgUsesWithUndef(Alloca);
1297 
1298     for (Instruction *I : UsersToUpdate)
1299       I->replaceUsesOfWith(Alloca, G);
1300   }
1301   Builder.SetInsertPoint(FramePtr->getNextNode());
1302   for (const auto &A : FrameData.Allocas) {
1303     AllocaInst *Alloca = A.Alloca;
1304     if (A.MayWriteBeforeCoroBegin) {
1305       // isEscaped really means potentially modified before CoroBegin.
1306       if (Alloca->isArrayAllocation())
1307         report_fatal_error(
1308             "Coroutines cannot handle copying of array allocas yet");
1309 
1310       auto *G = GetFramePointer(Alloca);
1311       auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
1312       Builder.CreateStore(Value, G);
1313     }
1314     // For each alias to Alloca created before CoroBegin but used after
1315     // CoroBegin, we recreate them after CoroBegin by appplying the offset
1316     // to the pointer in the frame.
1317     for (const auto &Alias : A.Aliases) {
1318       auto *FramePtr = GetFramePointer(Alloca);
1319       auto *FramePtrRaw =
1320           Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C));
1321       auto *AliasPtr = Builder.CreateGEP(
1322           FramePtrRaw,
1323           ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue()));
1324       auto *AliasPtrTyped =
1325           Builder.CreateBitCast(AliasPtr, Alias.first->getType());
1326       Alias.first->replaceUsesWithIf(
1327           AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); });
1328     }
1329   }
1330   return FramePtr;
1331 }
1332 
1333 // Sets the unwind edge of an instruction to a particular successor.
1334 static void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ) {
1335   if (auto *II = dyn_cast<InvokeInst>(TI))
1336     II->setUnwindDest(Succ);
1337   else if (auto *CS = dyn_cast<CatchSwitchInst>(TI))
1338     CS->setUnwindDest(Succ);
1339   else if (auto *CR = dyn_cast<CleanupReturnInst>(TI))
1340     CR->setUnwindDest(Succ);
1341   else
1342     llvm_unreachable("unexpected terminator instruction");
1343 }
1344 
1345 // Replaces all uses of OldPred with the NewPred block in all PHINodes in a
1346 // block.
1347 static void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred,
1348                            BasicBlock *NewPred, PHINode *Until = nullptr) {
1349   unsigned BBIdx = 0;
1350   for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) {
1351     PHINode *PN = cast<PHINode>(I);
1352 
1353     // We manually update the LandingPadReplacement PHINode and it is the last
1354     // PHI Node. So, if we find it, we are done.
1355     if (Until == PN)
1356       break;
1357 
1358     // Reuse the previous value of BBIdx if it lines up.  In cases where we
1359     // have multiple phi nodes with *lots* of predecessors, this is a speed
1360     // win because we don't have to scan the PHI looking for TIBB.  This
1361     // happens because the BB list of PHI nodes are usually in the same
1362     // order.
1363     if (PN->getIncomingBlock(BBIdx) != OldPred)
1364       BBIdx = PN->getBasicBlockIndex(OldPred);
1365 
1366     assert(BBIdx != (unsigned)-1 && "Invalid PHI Index!");
1367     PN->setIncomingBlock(BBIdx, NewPred);
1368   }
1369 }
1370 
1371 // Uses SplitEdge unless the successor block is an EHPad, in which case do EH
1372 // specific handling.
1373 static BasicBlock *ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ,
1374                                     LandingPadInst *OriginalPad,
1375                                     PHINode *LandingPadReplacement) {
1376   auto *PadInst = Succ->getFirstNonPHI();
1377   if (!LandingPadReplacement && !PadInst->isEHPad())
1378     return SplitEdge(BB, Succ);
1379 
1380   auto *NewBB = BasicBlock::Create(BB->getContext(), "", BB->getParent(), Succ);
1381   setUnwindEdgeTo(BB->getTerminator(), NewBB);
1382   updatePhiNodes(Succ, BB, NewBB, LandingPadReplacement);
1383 
1384   if (LandingPadReplacement) {
1385     auto *NewLP = OriginalPad->clone();
1386     auto *Terminator = BranchInst::Create(Succ, NewBB);
1387     NewLP->insertBefore(Terminator);
1388     LandingPadReplacement->addIncoming(NewLP, NewBB);
1389     return NewBB;
1390   }
1391   Value *ParentPad = nullptr;
1392   if (auto *FuncletPad = dyn_cast<FuncletPadInst>(PadInst))
1393     ParentPad = FuncletPad->getParentPad();
1394   else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(PadInst))
1395     ParentPad = CatchSwitch->getParentPad();
1396   else
1397     llvm_unreachable("handling for other EHPads not implemented yet");
1398 
1399   auto *NewCleanupPad = CleanupPadInst::Create(ParentPad, {}, "", NewBB);
1400   CleanupReturnInst::Create(NewCleanupPad, Succ, NewBB);
1401   return NewBB;
1402 }
1403 
1404 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1405 // PHI in InsertedBB.
1406 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB,
1407                                          BasicBlock *InsertedBB,
1408                                          BasicBlock *PredBB,
1409                                          PHINode *UntilPHI = nullptr) {
1410   auto *PN = cast<PHINode>(&SuccBB->front());
1411   do {
1412     int Index = PN->getBasicBlockIndex(InsertedBB);
1413     Value *V = PN->getIncomingValue(Index);
1414     PHINode *InputV = PHINode::Create(
1415         V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(),
1416         &InsertedBB->front());
1417     InputV->addIncoming(V, PredBB);
1418     PN->setIncomingValue(Index, InputV);
1419     PN = dyn_cast<PHINode>(PN->getNextNode());
1420   } while (PN != UntilPHI);
1421 }
1422 
1423 // Rewrites the PHI Nodes in a cleanuppad.
1424 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1425                                      CleanupPadInst *CleanupPad) {
1426   // For every incoming edge to a CleanupPad we will create a new block holding
1427   // all incoming values in single-value PHI nodes. We will then create another
1428   // block to act as a dispather (as all unwind edges for related EH blocks
1429   // must be the same).
1430   //
1431   // cleanuppad:
1432   //    %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1433   //    %3 = cleanuppad within none []
1434   //
1435   // It will create:
1436   //
1437   // cleanuppad.corodispatch
1438   //    %2 = phi i8[0, %catchswitch], [1, %catch.1]
1439   //    %3 = cleanuppad within none []
1440   //    switch i8 % 2, label %unreachable
1441   //            [i8 0, label %cleanuppad.from.catchswitch
1442   //             i8 1, label %cleanuppad.from.catch.1]
1443   // cleanuppad.from.catchswitch:
1444   //    %4 = phi i32 [%0, %catchswitch]
1445   //    br %label cleanuppad
1446   // cleanuppad.from.catch.1:
1447   //    %6 = phi i32 [%1, %catch.1]
1448   //    br %label cleanuppad
1449   // cleanuppad:
1450   //    %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1451   //                 [%6, %cleanuppad.from.catch.1]
1452 
1453   // Unreachable BB, in case switching on an invalid value in the dispatcher.
1454   auto *UnreachBB = BasicBlock::Create(
1455       CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
1456   IRBuilder<> Builder(UnreachBB);
1457   Builder.CreateUnreachable();
1458 
1459   // Create a new cleanuppad which will be the dispatcher.
1460   auto *NewCleanupPadBB =
1461       BasicBlock::Create(CleanupPadBB->getContext(),
1462                          CleanupPadBB->getName() + Twine(".corodispatch"),
1463                          CleanupPadBB->getParent(), CleanupPadBB);
1464   Builder.SetInsertPoint(NewCleanupPadBB);
1465   auto *SwitchType = Builder.getInt8Ty();
1466   auto *SetDispatchValuePN =
1467       Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
1468   CleanupPad->removeFromParent();
1469   CleanupPad->insertAfter(SetDispatchValuePN);
1470   auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1471                                                 pred_size(CleanupPadBB));
1472 
1473   int SwitchIndex = 0;
1474   SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
1475   for (BasicBlock *Pred : Preds) {
1476     // Create a new cleanuppad and move the PHI values to there.
1477     auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
1478                                       CleanupPadBB->getName() +
1479                                           Twine(".from.") + Pred->getName(),
1480                                       CleanupPadBB->getParent(), CleanupPadBB);
1481     updatePhiNodes(CleanupPadBB, Pred, CaseBB);
1482     CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1483                     Pred->getName());
1484     Builder.SetInsertPoint(CaseBB);
1485     Builder.CreateBr(CleanupPadBB);
1486     movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
1487 
1488     // Update this Pred to the new unwind point.
1489     setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1490 
1491     // Setup the switch in the dispatcher.
1492     auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1493     SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1494     SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1495     SwitchIndex++;
1496   }
1497 }
1498 
1499 static void rewritePHIs(BasicBlock &BB) {
1500   // For every incoming edge we will create a block holding all
1501   // incoming values in a single PHI nodes.
1502   //
1503   // loop:
1504   //    %n.val = phi i32[%n, %entry], [%inc, %loop]
1505   //
1506   // It will create:
1507   //
1508   // loop.from.entry:
1509   //    %n.loop.pre = phi i32 [%n, %entry]
1510   //    br %label loop
1511   // loop.from.loop:
1512   //    %inc.loop.pre = phi i32 [%inc, %loop]
1513   //    br %label loop
1514   //
1515   // After this rewrite, further analysis will ignore any phi nodes with more
1516   // than one incoming edge.
1517 
1518   // TODO: Simplify PHINodes in the basic block to remove duplicate
1519   // predecessors.
1520 
1521   // Special case for CleanupPad: all EH blocks must have the same unwind edge
1522   // so we need to create an additional "dispatcher" block.
1523   if (auto *CleanupPad =
1524           dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
1525     SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1526     for (BasicBlock *Pred : Preds) {
1527       if (CatchSwitchInst *CS =
1528               dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
1529         // CleanupPad with a CatchSwitch predecessor: therefore this is an
1530         // unwind destination that needs to be handle specially.
1531         assert(CS->getUnwindDest() == &BB);
1532         (void)CS;
1533         rewritePHIsForCleanupPad(&BB, CleanupPad);
1534         return;
1535       }
1536     }
1537   }
1538 
1539   LandingPadInst *LandingPad = nullptr;
1540   PHINode *ReplPHI = nullptr;
1541   if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
1542     // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1543     // We replace the original landing pad with a PHINode that will collect the
1544     // results from all of them.
1545     ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
1546     ReplPHI->takeName(LandingPad);
1547     LandingPad->replaceAllUsesWith(ReplPHI);
1548     // We will erase the original landing pad at the end of this function after
1549     // ehAwareSplitEdge cloned it in the transition blocks.
1550   }
1551 
1552   SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1553   for (BasicBlock *Pred : Preds) {
1554     auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
1555     IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1556 
1557     // Stop the moving of values at ReplPHI, as this is either null or the PHI
1558     // that replaced the landing pad.
1559     movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
1560   }
1561 
1562   if (LandingPad) {
1563     // Calls to ehAwareSplitEdge function cloned the original lading pad.
1564     // No longer need it.
1565     LandingPad->eraseFromParent();
1566   }
1567 }
1568 
1569 static void rewritePHIs(Function &F) {
1570   SmallVector<BasicBlock *, 8> WorkList;
1571 
1572   for (BasicBlock &BB : F)
1573     if (auto *PN = dyn_cast<PHINode>(&BB.front()))
1574       if (PN->getNumIncomingValues() > 1)
1575         WorkList.push_back(&BB);
1576 
1577   for (BasicBlock *BB : WorkList)
1578     rewritePHIs(*BB);
1579 }
1580 
1581 // Check for instructions that we can recreate on resume as opposed to spill
1582 // the result into a coroutine frame.
1583 static bool materializable(Instruction &V) {
1584   return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
1585          isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
1586 }
1587 
1588 // Check for structural coroutine intrinsics that should not be spilled into
1589 // the coroutine frame.
1590 static bool isCoroutineStructureIntrinsic(Instruction &I) {
1591   return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
1592          isa<CoroSuspendInst>(&I);
1593 }
1594 
1595 // For every use of the value that is across suspend point, recreate that value
1596 // after a suspend point.
1597 static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
1598                                               const SpillInfo &Spills) {
1599   for (const auto &E : Spills) {
1600     Value *Def = E.first;
1601     BasicBlock *CurrentBlock = nullptr;
1602     Instruction *CurrentMaterialization = nullptr;
1603     for (Instruction *U : E.second) {
1604       // If we have not seen this block, materialize the value.
1605       if (CurrentBlock != U->getParent()) {
1606         CurrentBlock = U->getParent();
1607         CurrentMaterialization = cast<Instruction>(Def)->clone();
1608         CurrentMaterialization->setName(Def->getName());
1609         CurrentMaterialization->insertBefore(
1610             &*CurrentBlock->getFirstInsertionPt());
1611       }
1612       if (auto *PN = dyn_cast<PHINode>(U)) {
1613         assert(PN->getNumIncomingValues() == 1 &&
1614                "unexpected number of incoming "
1615                "values in the PHINode");
1616         PN->replaceAllUsesWith(CurrentMaterialization);
1617         PN->eraseFromParent();
1618         continue;
1619       }
1620       // Replace all uses of Def in the current instruction with the
1621       // CurrentMaterialization for the block.
1622       U->replaceUsesOfWith(Def, CurrentMaterialization);
1623     }
1624   }
1625 }
1626 
1627 // Splits the block at a particular instruction unless it is the first
1628 // instruction in the block with a single predecessor.
1629 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
1630   auto *BB = I->getParent();
1631   if (&BB->front() == I) {
1632     if (BB->getSinglePredecessor()) {
1633       BB->setName(Name);
1634       return BB;
1635     }
1636   }
1637   return BB->splitBasicBlock(I, Name);
1638 }
1639 
1640 // Split above and below a particular instruction so that it
1641 // will be all alone by itself in a block.
1642 static void splitAround(Instruction *I, const Twine &Name) {
1643   splitBlockIfNotFirst(I, Name);
1644   splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
1645 }
1646 
1647 static bool isSuspendBlock(BasicBlock *BB) {
1648   return isa<AnyCoroSuspendInst>(BB->front());
1649 }
1650 
1651 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet;
1652 
1653 /// Does control flow starting at the given block ever reach a suspend
1654 /// instruction before reaching a block in VisitedOrFreeBBs?
1655 static bool isSuspendReachableFrom(BasicBlock *From,
1656                                    VisitedBlocksSet &VisitedOrFreeBBs) {
1657   // Eagerly try to add this block to the visited set.  If it's already
1658   // there, stop recursing; this path doesn't reach a suspend before
1659   // either looping or reaching a freeing block.
1660   if (!VisitedOrFreeBBs.insert(From).second)
1661     return false;
1662 
1663   // We assume that we'll already have split suspends into their own blocks.
1664   if (isSuspendBlock(From))
1665     return true;
1666 
1667   // Recurse on the successors.
1668   for (auto Succ : successors(From)) {
1669     if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
1670       return true;
1671   }
1672 
1673   return false;
1674 }
1675 
1676 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a
1677 /// suspend point?
1678 static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
1679   // Seed the visited set with all the basic blocks containing a free
1680   // so that we won't pass them up.
1681   VisitedBlocksSet VisitedOrFreeBBs;
1682   for (auto User : AI->users()) {
1683     if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
1684       VisitedOrFreeBBs.insert(FI->getParent());
1685   }
1686 
1687   return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
1688 }
1689 
1690 /// After we split the coroutine, will the given basic block be along
1691 /// an obvious exit path for the resumption function?
1692 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
1693                                               unsigned depth = 3) {
1694   // If we've bottomed out our depth count, stop searching and assume
1695   // that the path might loop back.
1696   if (depth == 0) return false;
1697 
1698   // If this is a suspend block, we're about to exit the resumption function.
1699   if (isSuspendBlock(BB)) return true;
1700 
1701   // Recurse into the successors.
1702   for (auto Succ : successors(BB)) {
1703     if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
1704       return false;
1705   }
1706 
1707   // If none of the successors leads back in a loop, we're on an exit/abort.
1708   return true;
1709 }
1710 
1711 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
1712   // Look for a free that isn't sufficiently obviously followed by
1713   // either a suspend or a termination, i.e. something that will leave
1714   // the coro resumption frame.
1715   for (auto U : AI->users()) {
1716     auto FI = dyn_cast<CoroAllocaFreeInst>(U);
1717     if (!FI) continue;
1718 
1719     if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
1720       return true;
1721   }
1722 
1723   // If we never found one, we don't need a stack save.
1724   return false;
1725 }
1726 
1727 /// Turn each of the given local allocas into a normal (dynamic) alloca
1728 /// instruction.
1729 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
1730                               SmallVectorImpl<Instruction*> &DeadInsts) {
1731   for (auto AI : LocalAllocas) {
1732     auto M = AI->getModule();
1733     IRBuilder<> Builder(AI);
1734 
1735     // Save the stack depth.  Try to avoid doing this if the stackrestore
1736     // is going to immediately precede a return or something.
1737     Value *StackSave = nullptr;
1738     if (localAllocaNeedsStackSave(AI))
1739       StackSave = Builder.CreateCall(
1740                             Intrinsic::getDeclaration(M, Intrinsic::stacksave));
1741 
1742     // Allocate memory.
1743     auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
1744     Alloca->setAlignment(Align(AI->getAlignment()));
1745 
1746     for (auto U : AI->users()) {
1747       // Replace gets with the allocation.
1748       if (isa<CoroAllocaGetInst>(U)) {
1749         U->replaceAllUsesWith(Alloca);
1750 
1751       // Replace frees with stackrestores.  This is safe because
1752       // alloca.alloc is required to obey a stack discipline, although we
1753       // don't enforce that structurally.
1754       } else {
1755         auto FI = cast<CoroAllocaFreeInst>(U);
1756         if (StackSave) {
1757           Builder.SetInsertPoint(FI);
1758           Builder.CreateCall(
1759                     Intrinsic::getDeclaration(M, Intrinsic::stackrestore),
1760                              StackSave);
1761         }
1762       }
1763       DeadInsts.push_back(cast<Instruction>(U));
1764     }
1765 
1766     DeadInsts.push_back(AI);
1767   }
1768 }
1769 
1770 /// Turn the given coro.alloca.alloc call into a dynamic allocation.
1771 /// This happens during the all-instructions iteration, so it must not
1772 /// delete the call.
1773 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI,
1774                                         coro::Shape &Shape,
1775                                    SmallVectorImpl<Instruction*> &DeadInsts) {
1776   IRBuilder<> Builder(AI);
1777   auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
1778 
1779   for (User *U : AI->users()) {
1780     if (isa<CoroAllocaGetInst>(U)) {
1781       U->replaceAllUsesWith(Alloc);
1782     } else {
1783       auto FI = cast<CoroAllocaFreeInst>(U);
1784       Builder.SetInsertPoint(FI);
1785       Shape.emitDealloc(Builder, Alloc, nullptr);
1786     }
1787     DeadInsts.push_back(cast<Instruction>(U));
1788   }
1789 
1790   // Push this on last so that it gets deleted after all the others.
1791   DeadInsts.push_back(AI);
1792 
1793   // Return the new allocation value so that we can check for needed spills.
1794   return cast<Instruction>(Alloc);
1795 }
1796 
1797 /// Get the current swifterror value.
1798 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
1799                                      coro::Shape &Shape) {
1800   // Make a fake function pointer as a sort of intrinsic.
1801   auto FnTy = FunctionType::get(ValueTy, {}, false);
1802   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
1803 
1804   auto Call = Builder.CreateCall(FnTy, Fn, {});
1805   Shape.SwiftErrorOps.push_back(Call);
1806 
1807   return Call;
1808 }
1809 
1810 /// Set the given value as the current swifterror value.
1811 ///
1812 /// Returns a slot that can be used as a swifterror slot.
1813 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
1814                                      coro::Shape &Shape) {
1815   // Make a fake function pointer as a sort of intrinsic.
1816   auto FnTy = FunctionType::get(V->getType()->getPointerTo(),
1817                                 {V->getType()}, false);
1818   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
1819 
1820   auto Call = Builder.CreateCall(FnTy, Fn, { V });
1821   Shape.SwiftErrorOps.push_back(Call);
1822 
1823   return Call;
1824 }
1825 
1826 /// Set the swifterror value from the given alloca before a call,
1827 /// then put in back in the alloca afterwards.
1828 ///
1829 /// Returns an address that will stand in for the swifterror slot
1830 /// until splitting.
1831 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
1832                                                  AllocaInst *Alloca,
1833                                                  coro::Shape &Shape) {
1834   auto ValueTy = Alloca->getAllocatedType();
1835   IRBuilder<> Builder(Call);
1836 
1837   // Load the current value from the alloca and set it as the
1838   // swifterror value.
1839   auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
1840   auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
1841 
1842   // Move to after the call.  Since swifterror only has a guaranteed
1843   // value on normal exits, we can ignore implicit and explicit unwind
1844   // edges.
1845   if (isa<CallInst>(Call)) {
1846     Builder.SetInsertPoint(Call->getNextNode());
1847   } else {
1848     auto Invoke = cast<InvokeInst>(Call);
1849     Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
1850   }
1851 
1852   // Get the current swifterror value and store it to the alloca.
1853   auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
1854   Builder.CreateStore(ValueAfterCall, Alloca);
1855 
1856   return Addr;
1857 }
1858 
1859 /// Eliminate a formerly-swifterror alloca by inserting the get/set
1860 /// intrinsics and attempting to MemToReg the alloca away.
1861 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
1862                                       coro::Shape &Shape) {
1863   for (auto UI = Alloca->use_begin(), UE = Alloca->use_end(); UI != UE; ) {
1864     // We're likely changing the use list, so use a mutation-safe
1865     // iteration pattern.
1866     auto &Use = *UI;
1867     ++UI;
1868 
1869     // swifterror values can only be used in very specific ways.
1870     // We take advantage of that here.
1871     auto User = Use.getUser();
1872     if (isa<LoadInst>(User) || isa<StoreInst>(User))
1873       continue;
1874 
1875     assert(isa<CallInst>(User) || isa<InvokeInst>(User));
1876     auto Call = cast<Instruction>(User);
1877 
1878     auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
1879 
1880     // Use the returned slot address as the call argument.
1881     Use.set(Addr);
1882   }
1883 
1884   // All the uses should be loads and stores now.
1885   assert(isAllocaPromotable(Alloca));
1886 }
1887 
1888 /// "Eliminate" a swifterror argument by reducing it to the alloca case
1889 /// and then loading and storing in the prologue and epilog.
1890 ///
1891 /// The argument keeps the swifterror flag.
1892 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
1893                                         coro::Shape &Shape,
1894                              SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
1895   IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
1896 
1897   auto ArgTy = cast<PointerType>(Arg.getType());
1898   auto ValueTy = ArgTy->getElementType();
1899 
1900   // Reduce to the alloca case:
1901 
1902   // Create an alloca and replace all uses of the arg with it.
1903   auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
1904   Arg.replaceAllUsesWith(Alloca);
1905 
1906   // Set an initial value in the alloca.  swifterror is always null on entry.
1907   auto InitialValue = Constant::getNullValue(ValueTy);
1908   Builder.CreateStore(InitialValue, Alloca);
1909 
1910   // Find all the suspends in the function and save and restore around them.
1911   for (auto Suspend : Shape.CoroSuspends) {
1912     (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
1913   }
1914 
1915   // Find all the coro.ends in the function and restore the error value.
1916   for (auto End : Shape.CoroEnds) {
1917     Builder.SetInsertPoint(End);
1918     auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
1919     (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
1920   }
1921 
1922   // Now we can use the alloca logic.
1923   AllocasToPromote.push_back(Alloca);
1924   eliminateSwiftErrorAlloca(F, Alloca, Shape);
1925 }
1926 
1927 /// Eliminate all problematic uses of swifterror arguments and allocas
1928 /// from the function.  We'll fix them up later when splitting the function.
1929 static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
1930   SmallVector<AllocaInst*, 4> AllocasToPromote;
1931 
1932   // Look for a swifterror argument.
1933   for (auto &Arg : F.args()) {
1934     if (!Arg.hasSwiftErrorAttr()) continue;
1935 
1936     eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
1937     break;
1938   }
1939 
1940   // Look for swifterror allocas.
1941   for (auto &Inst : F.getEntryBlock()) {
1942     auto Alloca = dyn_cast<AllocaInst>(&Inst);
1943     if (!Alloca || !Alloca->isSwiftError()) continue;
1944 
1945     // Clear the swifterror flag.
1946     Alloca->setSwiftError(false);
1947 
1948     AllocasToPromote.push_back(Alloca);
1949     eliminateSwiftErrorAlloca(F, Alloca, Shape);
1950   }
1951 
1952   // If we have any allocas to promote, compute a dominator tree and
1953   // promote them en masse.
1954   if (!AllocasToPromote.empty()) {
1955     DominatorTree DT(F);
1956     PromoteMemToReg(AllocasToPromote, DT);
1957   }
1958 }
1959 
1960 /// retcon and retcon.once conventions assume that all spill uses can be sunk
1961 /// after the coro.begin intrinsic.
1962 static void sinkSpillUsesAfterCoroBegin(Function &F,
1963                                         const FrameDataInfo &FrameData,
1964                                         CoroBeginInst *CoroBegin) {
1965   DominatorTree Dom(F);
1966 
1967   SmallSetVector<Instruction *, 32> ToMove;
1968   SmallVector<Instruction *, 32> Worklist;
1969 
1970   // Collect all users that precede coro.begin.
1971   for (auto *Def : FrameData.getAllDefs()) {
1972     for (User *U : Def->users()) {
1973       auto Inst = cast<Instruction>(U);
1974       if (Inst->getParent() != CoroBegin->getParent() ||
1975           Dom.dominates(CoroBegin, Inst))
1976         continue;
1977       if (ToMove.insert(Inst))
1978         Worklist.push_back(Inst);
1979     }
1980   }
1981   // Recursively collect users before coro.begin.
1982   while (!Worklist.empty()) {
1983     auto *Def = Worklist.pop_back_val();
1984     for (User *U : Def->users()) {
1985       auto Inst = cast<Instruction>(U);
1986       if (Dom.dominates(CoroBegin, Inst))
1987         continue;
1988       if (ToMove.insert(Inst))
1989         Worklist.push_back(Inst);
1990     }
1991   }
1992 
1993   // Sort by dominance.
1994   SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
1995   llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
1996     // If a dominates b it should preceed (<) b.
1997     return Dom.dominates(A, B);
1998   });
1999 
2000   Instruction *InsertPt = CoroBegin->getNextNode();
2001   for (Instruction *Inst : InsertionList)
2002     Inst->moveBefore(InsertPt);
2003 }
2004 
2005 /// For each local variable that all of its user are only used inside one of
2006 /// suspended region, we sink their lifetime.start markers to the place where
2007 /// after the suspend block. Doing so minimizes the lifetime of each variable,
2008 /// hence minimizing the amount of data we end up putting on the frame.
2009 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
2010                                      SuspendCrossingInfo &Checker) {
2011   DominatorTree DT(F);
2012 
2013   // Collect all possible basic blocks which may dominate all uses of allocas.
2014   SmallPtrSet<BasicBlock *, 4> DomSet;
2015   DomSet.insert(&F.getEntryBlock());
2016   for (auto *CSI : Shape.CoroSuspends) {
2017     BasicBlock *SuspendBlock = CSI->getParent();
2018     assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
2019            "should have split coro.suspend into its own block");
2020     DomSet.insert(SuspendBlock->getSingleSuccessor());
2021   }
2022 
2023   for (Instruction &I : instructions(F)) {
2024     AllocaInst* AI = dyn_cast<AllocaInst>(&I);
2025     if (!AI)
2026       continue;
2027 
2028     for (BasicBlock *DomBB : DomSet) {
2029       bool Valid = true;
2030       SmallVector<Instruction *, 1> Lifetimes;
2031 
2032       auto isLifetimeStart = [](Instruction* I) {
2033         if (auto* II = dyn_cast<IntrinsicInst>(I))
2034           return II->getIntrinsicID() == Intrinsic::lifetime_start;
2035         return false;
2036       };
2037 
2038       auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
2039         if (isLifetimeStart(U)) {
2040           Lifetimes.push_back(U);
2041           return true;
2042         }
2043         if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2044           return false;
2045         if (isLifetimeStart(U->user_back())) {
2046           Lifetimes.push_back(U->user_back());
2047           return true;
2048         }
2049         return false;
2050       };
2051 
2052       for (User *U : AI->users()) {
2053         Instruction *UI = cast<Instruction>(U);
2054         // For all users except lifetime.start markers, if they are all
2055         // dominated by one of the basic blocks and do not cross
2056         // suspend points as well, then there is no need to spill the
2057         // instruction.
2058         if (!DT.dominates(DomBB, UI->getParent()) ||
2059             Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2060           // Skip lifetime.start, GEP and bitcast used by lifetime.start
2061           // markers.
2062           if (collectLifetimeStart(UI, AI))
2063             continue;
2064           Valid = false;
2065           break;
2066         }
2067       }
2068       // Sink lifetime.start markers to dominate block when they are
2069       // only used outside the region.
2070       if (Valid && Lifetimes.size() != 0) {
2071         // May be AI itself, when the type of AI is i8*
2072         auto *NewBitCast = [&](AllocaInst *AI) -> Value* {
2073           if (isa<AllocaInst>(Lifetimes[0]->getOperand(1)))
2074             return AI;
2075           auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext());
2076           return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "",
2077                                   DomBB->getTerminator());
2078         }(AI);
2079 
2080         auto *NewLifetime = Lifetimes[0]->clone();
2081         NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast);
2082         NewLifetime->insertBefore(DomBB->getTerminator());
2083 
2084         // All the outsided lifetime.start markers are no longer necessary.
2085         for (Instruction *S : Lifetimes)
2086           S->eraseFromParent();
2087 
2088         break;
2089       }
2090     }
2091   }
2092 }
2093 
2094 static void collectFrameAllocas(Function &F, coro::Shape &Shape,
2095                                 const SuspendCrossingInfo &Checker,
2096                                 SmallVectorImpl<AllocaInfo> &Allocas) {
2097   // Collect lifetime.start info for each alloca.
2098   using LifetimeStart = SmallPtrSet<Instruction *, 2>;
2099   llvm::DenseMap<AllocaInst *, std::unique_ptr<LifetimeStart>> LifetimeMap;
2100   for (Instruction &I : instructions(F)) {
2101     auto *II = dyn_cast<IntrinsicInst>(&I);
2102     if (!II || II->getIntrinsicID() != Intrinsic::lifetime_start)
2103       continue;
2104 
2105     if (auto *OpInst = dyn_cast<Instruction>(II->getOperand(1))) {
2106       if (auto *AI = dyn_cast<AllocaInst>(OpInst->stripPointerCasts())) {
2107 
2108         if (LifetimeMap.find(AI) == LifetimeMap.end())
2109           LifetimeMap[AI] = std::make_unique<LifetimeStart>();
2110         LifetimeMap[AI]->insert(isa<AllocaInst>(OpInst) ? II : OpInst);
2111       }
2112     }
2113   }
2114 
2115   for (Instruction &I : instructions(F)) {
2116     auto *AI = dyn_cast<AllocaInst>(&I);
2117     if (!AI)
2118       continue;
2119     // The PromiseAlloca will be specially handled since it needs to be in a
2120     // fixed position in the frame.
2121     if (AI == Shape.SwitchLowering.PromiseAlloca) {
2122       continue;
2123     }
2124     bool ShouldLiveOnFrame = false;
2125     auto Iter = LifetimeMap.find(AI);
2126     if (Iter != LifetimeMap.end()) {
2127       // Check against lifetime.start if the instruction has the info.
2128       for (User *U : I.users()) {
2129         for (auto *S : *Iter->second)
2130           if ((ShouldLiveOnFrame = Checker.isDefinitionAcrossSuspend(*S, U)))
2131             break;
2132         if (ShouldLiveOnFrame)
2133           break;
2134       }
2135       if (!ShouldLiveOnFrame)
2136         continue;
2137     }
2138     // At this point, either ShouldLiveOnFrame is true or we didn't have
2139     // lifetime information. We will need to rely on more precise pointer
2140     // tracking.
2141     DominatorTree DT(F);
2142     AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT,
2143                              *Shape.CoroBegin, Checker};
2144     Visitor.visitPtr(*AI);
2145     if (!Visitor.getShouldLiveOnFrame())
2146       continue;
2147     Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
2148                          Visitor.getMayWriteBeforeCoroBegin());
2149   }
2150 }
2151 
2152 void coro::salvageDebugInfo(
2153     SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
2154     DbgDeclareInst *DDI) {
2155   Function *F = DDI->getFunction();
2156   IRBuilder<> Builder(F->getContext());
2157   auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
2158   while (isa<IntrinsicInst>(InsertPt))
2159     ++InsertPt;
2160   Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
2161   DIExpression *Expr = DDI->getExpression();
2162   // Follow the pointer arithmetic all the way to the incoming
2163   // function argument and convert into a DIExpression.
2164   bool OutermostLoad = true;
2165   Value *Storage = DDI->getAddress();
2166   while (Storage) {
2167     if (auto *LdInst = dyn_cast<LoadInst>(Storage)) {
2168       Storage = LdInst->getOperand(0);
2169       // FIXME: This is a heuristic that works around the fact that
2170       // LLVM IR debug intrinsics cannot yet distinguish between
2171       // memory and value locations: Because a dbg.declare(alloca) is
2172       // implicitly a memory location no DW_OP_deref operation for the
2173       // last direct load from an alloca is necessary.  This condition
2174       // effectively drops the *last* DW_OP_deref in the expression.
2175       if (!OutermostLoad)
2176         Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2177       OutermostLoad = false;
2178     } else if (auto *StInst = dyn_cast<StoreInst>(Storage)) {
2179       Storage = StInst->getOperand(0);
2180     } else if (auto *GEPInst = dyn_cast<GetElementPtrInst>(Storage)) {
2181       Expr = llvm::salvageDebugInfoImpl(*GEPInst, Expr,
2182                                         /*WithStackValue=*/false);
2183       if (!Expr)
2184         return;
2185       Storage = GEPInst->getOperand(0);
2186     } else if (auto *BCInst = dyn_cast<llvm::BitCastInst>(Storage))
2187       Storage = BCInst->getOperand(0);
2188     else
2189       break;
2190   }
2191   // Store a pointer to the coroutine frame object in an alloca so it
2192   // is available throughout the function when producing unoptimized
2193   // code. Extending the lifetime this way is correct because the
2194   // variable has been declared by a dbg.declare intrinsic.
2195   if (auto Arg = dyn_cast_or_null<llvm::Argument>(Storage)) {
2196     auto &Cached = DbgPtrAllocaCache[Storage];
2197     if (!Cached) {
2198       Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
2199                                     Arg->getName() + ".debug");
2200       Builder.CreateStore(Storage, Cached);
2201     }
2202     Storage = Cached;
2203     // FIXME: LLVM lacks nuanced semantics to differentiate between
2204     // memory and direct locations at the IR level. The backend will
2205     // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
2206     // location. Thus, if there are deref and offset operations in the
2207     // expression, we need to add a DW_OP_deref at the *start* of the
2208     // expression to first load the contents of the alloca before
2209     // adjusting it with the expression.
2210     if (Expr && Expr->isComplex())
2211       Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2212   }
2213   auto &VMContext = DDI->getFunction()->getContext();
2214   DDI->setOperand(
2215       0, MetadataAsValue::get(VMContext, ValueAsMetadata::get(Storage)));
2216   DDI->setOperand(2, MetadataAsValue::get(VMContext, Expr));
2217   if (auto *InsertPt = dyn_cast_or_null<Instruction>(Storage))
2218     DDI->moveAfter(InsertPt);
2219 }
2220 
2221 void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
2222   // Don't eliminate swifterror in async functions that won't be split.
2223   if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2224     eliminateSwiftError(F, Shape);
2225 
2226   if (Shape.ABI == coro::ABI::Switch &&
2227       Shape.SwitchLowering.PromiseAlloca) {
2228     Shape.getSwitchCoroId()->clearPromise();
2229   }
2230 
2231   // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2232   // intrinsics are in their own blocks to simplify the logic of building up
2233   // SuspendCrossing data.
2234   for (auto *CSI : Shape.CoroSuspends) {
2235     if (auto *Save = CSI->getCoroSave())
2236       splitAround(Save, "CoroSave");
2237     splitAround(CSI, "CoroSuspend");
2238   }
2239 
2240   // Put CoroEnds into their own blocks.
2241   for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2242     splitAround(CE, "CoroEnd");
2243 
2244     // Emit the musttail call function in a new block before the CoroEnd.
2245     // We do this here so that the right suspend crossing info is computed for
2246     // the uses of the musttail call function call. (Arguments to the coro.end
2247     // instructions would be ignored)
2248     if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
2249       auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2250       if (!MustTailCallFn)
2251         continue;
2252       IRBuilder<> Builder(AsyncEnd);
2253       SmallVector<Value *, 8> Args(AsyncEnd->args());
2254       auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
2255       auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
2256                                       Arguments, Builder);
2257       splitAround(Call, "MustTailCall.Before.CoroEnd");
2258     }
2259   }
2260 
2261   // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2262   // never has its definition separated from the PHI by the suspend point.
2263   rewritePHIs(F);
2264 
2265   // Build suspend crossing info.
2266   SuspendCrossingInfo Checker(F, Shape);
2267 
2268   IRBuilder<> Builder(F.getContext());
2269   FrameDataInfo FrameData;
2270   SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas;
2271   SmallVector<Instruction*, 4> DeadInstructions;
2272 
2273   {
2274     SpillInfo Spills;
2275     for (int Repeat = 0; Repeat < 4; ++Repeat) {
2276       // See if there are materializable instructions across suspend points.
2277       for (Instruction &I : instructions(F))
2278         if (materializable(I))
2279           for (User *U : I.users())
2280             if (Checker.isDefinitionAcrossSuspend(I, U))
2281               Spills[&I].push_back(cast<Instruction>(U));
2282 
2283       if (Spills.empty())
2284         break;
2285 
2286       // Rewrite materializable instructions to be materialized at the use
2287       // point.
2288       LLVM_DEBUG(dumpSpills("Materializations", Spills));
2289       rewriteMaterializableInstructions(Builder, Spills);
2290       Spills.clear();
2291     }
2292   }
2293 
2294   sinkLifetimeStartMarkers(F, Shape, Checker);
2295   if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2296     collectFrameAllocas(F, Shape, Checker, FrameData.Allocas);
2297   LLVM_DEBUG(dumpAllocas(FrameData.Allocas));
2298 
2299   // Collect the spills for arguments and other not-materializable values.
2300   for (Argument &A : F.args())
2301     for (User *U : A.users())
2302       if (Checker.isDefinitionAcrossSuspend(A, U))
2303         FrameData.Spills[&A].push_back(cast<Instruction>(U));
2304 
2305   for (Instruction &I : instructions(F)) {
2306     // Values returned from coroutine structure intrinsics should not be part
2307     // of the Coroutine Frame.
2308     if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
2309       continue;
2310 
2311     // The Coroutine Promise always included into coroutine frame, no need to
2312     // check for suspend crossing.
2313     if (Shape.ABI == coro::ABI::Switch &&
2314         Shape.SwitchLowering.PromiseAlloca == &I)
2315       continue;
2316 
2317     // Handle alloca.alloc specially here.
2318     if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
2319       // Check whether the alloca's lifetime is bounded by suspend points.
2320       if (isLocalAlloca(AI)) {
2321         LocalAllocas.push_back(AI);
2322         continue;
2323       }
2324 
2325       // If not, do a quick rewrite of the alloca and then add spills of
2326       // the rewritten value.  The rewrite doesn't invalidate anything in
2327       // Spills because the other alloca intrinsics have no other operands
2328       // besides AI, and it doesn't invalidate the iteration because we delay
2329       // erasing AI.
2330       auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
2331 
2332       for (User *U : Alloc->users()) {
2333         if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
2334           FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
2335       }
2336       continue;
2337     }
2338 
2339     // Ignore alloca.get; we process this as part of coro.alloca.alloc.
2340     if (isa<CoroAllocaGetInst>(I))
2341       continue;
2342 
2343     if (isa<AllocaInst>(I))
2344       continue;
2345 
2346     for (User *U : I.users())
2347       if (Checker.isDefinitionAcrossSuspend(I, U)) {
2348         // We cannot spill a token.
2349         if (I.getType()->isTokenTy())
2350           report_fatal_error(
2351               "token definition is separated from the use by a suspend point");
2352         FrameData.Spills[&I].push_back(cast<Instruction>(U));
2353       }
2354   }
2355   LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
2356   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2357       Shape.ABI == coro::ABI::Async)
2358     sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin);
2359   Shape.FrameTy = buildFrameType(F, Shape, FrameData);
2360   Shape.FramePtr = insertSpills(FrameData, Shape);
2361   lowerLocalAllocas(LocalAllocas, DeadInstructions);
2362 
2363   for (auto I : DeadInstructions)
2364     I->eraseFromParent();
2365 }
2366