1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This file contains classes used to discover if for a particular value
9 // there from sue to definition that crosses a suspend block.
10 //
11 // Using the information discovered we form a Coroutine Frame structure to
12 // contain those values. All uses of those values are replaced with appropriate
13 // GEP + load from the coroutine frame. At the point of the definition we spill
14 // the value into the coroutine frame.
15 //
16 // TODO: pack values tightly using liveness info.
17 //===----------------------------------------------------------------------===//
18 
19 #include "CoroInternal.h"
20 #include "llvm/ADT/BitVector.h"
21 #include "llvm/ADT/SmallString.h"
22 #include "llvm/Analysis/PtrUseVisitor.h"
23 #include "llvm/Analysis/StackLifetime.h"
24 #include "llvm/Config/llvm-config.h"
25 #include "llvm/IR/CFG.h"
26 #include "llvm/IR/DIBuilder.h"
27 #include "llvm/IR/Dominators.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/InstIterator.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Support/OptimizedStructLayout.h"
34 #include "llvm/Support/circular_raw_ostream.h"
35 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
36 #include "llvm/Transforms/Utils/Local.h"
37 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
38 #include <algorithm>
39 
40 using namespace llvm;
41 
42 // The "coro-suspend-crossing" flag is very noisy. There is another debug type,
43 // "coro-frame", which results in leaner debug spew.
44 #define DEBUG_TYPE "coro-suspend-crossing"
45 
46 static cl::opt<bool> EnableReuseStorageInFrame(
47     "reuse-storage-in-coroutine-frame", cl::Hidden,
48     cl::desc(
49         "Enable the optimization which would reuse the storage in the coroutine \
50          frame for allocas whose liferanges are not overlapped, for testing purposes"),
51     llvm::cl::init(false));
52 
53 enum { SmallVectorThreshold = 32 };
54 
55 // Provides two way mapping between the blocks and numbers.
56 namespace {
57 class BlockToIndexMapping {
58   SmallVector<BasicBlock *, SmallVectorThreshold> V;
59 
60 public:
61   size_t size() const { return V.size(); }
62 
63   BlockToIndexMapping(Function &F) {
64     for (BasicBlock &BB : F)
65       V.push_back(&BB);
66     llvm::sort(V);
67   }
68 
69   size_t blockToIndex(BasicBlock *BB) const {
70     auto *I = llvm::lower_bound(V, BB);
71     assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
72     return I - V.begin();
73   }
74 
75   BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
76 };
77 } // end anonymous namespace
78 
79 // The SuspendCrossingInfo maintains data that allows to answer a question
80 // whether given two BasicBlocks A and B there is a path from A to B that
81 // passes through a suspend point.
82 //
83 // For every basic block 'i' it maintains a BlockData that consists of:
84 //   Consumes:  a bit vector which contains a set of indices of blocks that can
85 //              reach block 'i'
86 //   Kills: a bit vector which contains a set of indices of blocks that can
87 //          reach block 'i', but one of the path will cross a suspend point
88 //   Suspend: a boolean indicating whether block 'i' contains a suspend point.
89 //   End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
90 //
91 namespace {
92 struct SuspendCrossingInfo {
93   BlockToIndexMapping Mapping;
94 
95   struct BlockData {
96     BitVector Consumes;
97     BitVector Kills;
98     bool Suspend = false;
99     bool End = false;
100   };
101   SmallVector<BlockData, SmallVectorThreshold> Block;
102 
103   iterator_range<succ_iterator> successors(BlockData const &BD) const {
104     BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
105     return llvm::successors(BB);
106   }
107 
108   BlockData &getBlockData(BasicBlock *BB) {
109     return Block[Mapping.blockToIndex(BB)];
110   }
111 
112   void dump() const;
113   void dump(StringRef Label, BitVector const &BV) const;
114 
115   SuspendCrossingInfo(Function &F, coro::Shape &Shape);
116 
117   bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
118     size_t const DefIndex = Mapping.blockToIndex(DefBB);
119     size_t const UseIndex = Mapping.blockToIndex(UseBB);
120 
121     bool const Result = Block[UseIndex].Kills[DefIndex];
122     LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
123                       << " answer is " << Result << "\n");
124     return Result;
125   }
126 
127   bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
128     auto *I = cast<Instruction>(U);
129 
130     // We rewrote PHINodes, so that only the ones with exactly one incoming
131     // value need to be analyzed.
132     if (auto *PN = dyn_cast<PHINode>(I))
133       if (PN->getNumIncomingValues() > 1)
134         return false;
135 
136     BasicBlock *UseBB = I->getParent();
137 
138     // As a special case, treat uses by an llvm.coro.suspend.retcon or an
139     // llvm.coro.suspend.async as if they were uses in the suspend's single
140     // predecessor: the uses conceptually occur before the suspend.
141     if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
142       UseBB = UseBB->getSinglePredecessor();
143       assert(UseBB && "should have split coro.suspend into its own block");
144     }
145 
146     return hasPathCrossingSuspendPoint(DefBB, UseBB);
147   }
148 
149   bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
150     return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
151   }
152 
153   bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
154     auto *DefBB = I.getParent();
155 
156     // As a special case, treat values produced by an llvm.coro.suspend.*
157     // as if they were defined in the single successor: the uses
158     // conceptually occur after the suspend.
159     if (isa<AnyCoroSuspendInst>(I)) {
160       DefBB = DefBB->getSingleSuccessor();
161       assert(DefBB && "should have split coro.suspend into its own block");
162     }
163 
164     return isDefinitionAcrossSuspend(DefBB, U);
165   }
166 };
167 } // end anonymous namespace
168 
169 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
170 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
171                                                 BitVector const &BV) const {
172   dbgs() << Label << ":";
173   for (size_t I = 0, N = BV.size(); I < N; ++I)
174     if (BV[I])
175       dbgs() << " " << Mapping.indexToBlock(I)->getName();
176   dbgs() << "\n";
177 }
178 
179 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
180   for (size_t I = 0, N = Block.size(); I < N; ++I) {
181     BasicBlock *const B = Mapping.indexToBlock(I);
182     dbgs() << B->getName() << ":\n";
183     dump("   Consumes", Block[I].Consumes);
184     dump("      Kills", Block[I].Kills);
185   }
186   dbgs() << "\n";
187 }
188 #endif
189 
190 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
191     : Mapping(F) {
192   const size_t N = Mapping.size();
193   Block.resize(N);
194 
195   // Initialize every block so that it consumes itself
196   for (size_t I = 0; I < N; ++I) {
197     auto &B = Block[I];
198     B.Consumes.resize(N);
199     B.Kills.resize(N);
200     B.Consumes.set(I);
201   }
202 
203   // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
204   // the code beyond coro.end is reachable during initial invocation of the
205   // coroutine.
206   for (auto *CE : Shape.CoroEnds)
207     getBlockData(CE->getParent()).End = true;
208 
209   // Mark all suspend blocks and indicate that they kill everything they
210   // consume. Note, that crossing coro.save also requires a spill, as any code
211   // between coro.save and coro.suspend may resume the coroutine and all of the
212   // state needs to be saved by that time.
213   auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
214     BasicBlock *SuspendBlock = BarrierInst->getParent();
215     auto &B = getBlockData(SuspendBlock);
216     B.Suspend = true;
217     B.Kills |= B.Consumes;
218   };
219   for (auto *CSI : Shape.CoroSuspends) {
220     markSuspendBlock(CSI);
221     if (auto *Save = CSI->getCoroSave())
222       markSuspendBlock(Save);
223   }
224 
225   // Iterate propagating consumes and kills until they stop changing.
226   int Iteration = 0;
227   (void)Iteration;
228 
229   bool Changed;
230   do {
231     LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
232     LLVM_DEBUG(dbgs() << "==============\n");
233 
234     Changed = false;
235     for (size_t I = 0; I < N; ++I) {
236       auto &B = Block[I];
237       for (BasicBlock *SI : successors(B)) {
238 
239         auto SuccNo = Mapping.blockToIndex(SI);
240 
241         // Saved Consumes and Kills bitsets so that it is easy to see
242         // if anything changed after propagation.
243         auto &S = Block[SuccNo];
244         auto SavedConsumes = S.Consumes;
245         auto SavedKills = S.Kills;
246 
247         // Propagate Kills and Consumes from block B into its successor S.
248         S.Consumes |= B.Consumes;
249         S.Kills |= B.Kills;
250 
251         // If block B is a suspend block, it should propagate kills into the
252         // its successor for every block B consumes.
253         if (B.Suspend) {
254           S.Kills |= B.Consumes;
255         }
256         if (S.Suspend) {
257           // If block S is a suspend block, it should kill all of the blocks it
258           // consumes.
259           S.Kills |= S.Consumes;
260         } else if (S.End) {
261           // If block S is an end block, it should not propagate kills as the
262           // blocks following coro.end() are reached during initial invocation
263           // of the coroutine while all the data are still available on the
264           // stack or in the registers.
265           S.Kills.reset();
266         } else {
267           // This is reached when S block it not Suspend nor coro.end and it
268           // need to make sure that it is not in the kill set.
269           S.Kills.reset(SuccNo);
270         }
271 
272         // See if anything changed.
273         Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
274 
275         if (S.Kills != SavedKills) {
276           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
277                             << "\n");
278           LLVM_DEBUG(dump("S.Kills", S.Kills));
279           LLVM_DEBUG(dump("SavedKills", SavedKills));
280         }
281         if (S.Consumes != SavedConsumes) {
282           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
283           LLVM_DEBUG(dump("S.Consume", S.Consumes));
284           LLVM_DEBUG(dump("SavedCons", SavedConsumes));
285         }
286       }
287     }
288   } while (Changed);
289   LLVM_DEBUG(dump());
290 }
291 
292 #undef DEBUG_TYPE // "coro-suspend-crossing"
293 #define DEBUG_TYPE "coro-frame"
294 
295 namespace {
296 class FrameTypeBuilder;
297 // Mapping from the to-be-spilled value to all the users that need reload.
298 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>;
299 struct AllocaInfo {
300   AllocaInst *Alloca;
301   DenseMap<Instruction *, llvm::Optional<APInt>> Aliases;
302   bool MayWriteBeforeCoroBegin;
303   AllocaInfo(AllocaInst *Alloca,
304              DenseMap<Instruction *, llvm::Optional<APInt>> Aliases,
305              bool MayWriteBeforeCoroBegin)
306       : Alloca(Alloca), Aliases(std::move(Aliases)),
307         MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
308 };
309 struct FrameDataInfo {
310   // All the values (that are not allocas) that needs to be spilled to the
311   // frame.
312   SpillInfo Spills;
313   // Allocas contains all values defined as allocas that need to live in the
314   // frame.
315   SmallVector<AllocaInfo, 8> Allocas;
316 
317   SmallVector<Value *, 8> getAllDefs() const {
318     SmallVector<Value *, 8> Defs;
319     for (const auto &P : Spills)
320       Defs.push_back(P.first);
321     for (const auto &A : Allocas)
322       Defs.push_back(A.Alloca);
323     return Defs;
324   }
325 
326   uint32_t getFieldIndex(Value *V) const {
327     auto Itr = FieldIndexMap.find(V);
328     assert(Itr != FieldIndexMap.end() &&
329            "Value does not have a frame field index");
330     return Itr->second;
331   }
332 
333   void setFieldIndex(Value *V, uint32_t Index) {
334     assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
335            "Cannot set the index for the same field twice.");
336     FieldIndexMap[V] = Index;
337   }
338 
339   // Remap the index of every field in the frame, using the final layout index.
340   void updateLayoutIndex(FrameTypeBuilder &B);
341 
342 private:
343   // LayoutIndexUpdateStarted is used to avoid updating the index of any field
344   // twice by mistake.
345   bool LayoutIndexUpdateStarted = false;
346   // Map from values to their slot indexes on the frame. They will be first set
347   // with their original insertion field index. After the frame is built, their
348   // indexes will be updated into the final layout index.
349   DenseMap<Value *, uint32_t> FieldIndexMap;
350 };
351 } // namespace
352 
353 #ifndef NDEBUG
354 static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
355   dbgs() << "------------- " << Title << "--------------\n";
356   for (const auto &E : Spills) {
357     E.first->dump();
358     dbgs() << "   user: ";
359     for (auto *I : E.second)
360       I->dump();
361   }
362 }
363 
364 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
365   dbgs() << "------------- Allocas --------------\n";
366   for (const auto &A : Allocas) {
367     A.Alloca->dump();
368   }
369 }
370 #endif
371 
372 namespace {
373 using FieldIDType = size_t;
374 // We cannot rely solely on natural alignment of a type when building a
375 // coroutine frame and if the alignment specified on the Alloca instruction
376 // differs from the natural alignment of the alloca type we will need to insert
377 // padding.
378 class FrameTypeBuilder {
379 private:
380   struct Field {
381     uint64_t Size;
382     uint64_t Offset;
383     Type *Ty;
384     FieldIDType LayoutFieldIndex;
385     Align Alignment;
386     Align TyAlignment;
387   };
388 
389   const DataLayout &DL;
390   LLVMContext &Context;
391   uint64_t StructSize = 0;
392   Align StructAlign;
393   bool IsFinished = false;
394 
395   SmallVector<Field, 8> Fields;
396   DenseMap<Value*, unsigned> FieldIndexByKey;
397 
398 public:
399   FrameTypeBuilder(LLVMContext &Context, DataLayout const &DL)
400       : DL(DL), Context(Context) {}
401 
402   /// Add a field to this structure for the storage of an `alloca`
403   /// instruction.
404   LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI,
405                                                bool IsHeader = false) {
406     Type *Ty = AI->getAllocatedType();
407 
408     // Make an array type if this is a static array allocation.
409     if (AI->isArrayAllocation()) {
410       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
411         Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
412       else
413         report_fatal_error("Coroutines cannot handle non static allocas yet");
414     }
415 
416     return addField(Ty, AI->getAlign(), IsHeader);
417   }
418 
419   /// We want to put the allocas whose lifetime-ranges are not overlapped
420   /// into one slot of coroutine frame.
421   /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
422   ///
423   ///     cppcoro::task<void> alternative_paths(bool cond) {
424   ///         if (cond) {
425   ///             big_structure a;
426   ///             process(a);
427   ///             co_await something();
428   ///         } else {
429   ///             big_structure b;
430   ///             process2(b);
431   ///             co_await something();
432   ///         }
433   ///     }
434   ///
435   /// We want to put variable a and variable b in the same slot to
436   /// reduce the size of coroutine frame.
437   ///
438   /// This function use StackLifetime algorithm to partition the AllocaInsts in
439   /// Spills to non-overlapped sets in order to put Alloca in the same
440   /// non-overlapped set into the same slot in the Coroutine Frame. Then add
441   /// field for the allocas in the same non-overlapped set by using the largest
442   /// type as the field type.
443   ///
444   /// Side Effects: Because We sort the allocas, the order of allocas in the
445   /// frame may be different with the order in the source code.
446   void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
447                           coro::Shape &Shape);
448 
449   /// Add a field to this structure.
450   LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment,
451                                       bool IsHeader = false) {
452     assert(!IsFinished && "adding fields to a finished builder");
453     assert(Ty && "must provide a type for a field");
454 
455     // The field size is always the alloc size of the type.
456     uint64_t FieldSize = DL.getTypeAllocSize(Ty);
457 
458     // The field alignment might not be the type alignment, but we need
459     // to remember the type alignment anyway to build the type.
460     Align TyAlignment = DL.getABITypeAlign(Ty);
461     if (!FieldAlignment) FieldAlignment = TyAlignment;
462 
463     // Lay out header fields immediately.
464     uint64_t Offset;
465     if (IsHeader) {
466       Offset = alignTo(StructSize, FieldAlignment);
467       StructSize = Offset + FieldSize;
468 
469     // Everything else has a flexible offset.
470     } else {
471       Offset = OptimizedStructLayoutField::FlexibleOffset;
472     }
473 
474     Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment});
475     return Fields.size() - 1;
476   }
477 
478   /// Finish the layout and set the body on the given type.
479   void finish(StructType *Ty);
480 
481   uint64_t getStructSize() const {
482     assert(IsFinished && "not yet finished!");
483     return StructSize;
484   }
485 
486   Align getStructAlign() const {
487     assert(IsFinished && "not yet finished!");
488     return StructAlign;
489   }
490 
491   FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
492     assert(IsFinished && "not yet finished!");
493     return Fields[Id].LayoutFieldIndex;
494   }
495 };
496 } // namespace
497 
498 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
499   auto Updater = [&](Value *I) {
500     setFieldIndex(I, B.getLayoutFieldIndex(getFieldIndex(I)));
501   };
502   LayoutIndexUpdateStarted = true;
503   for (auto &S : Spills)
504     Updater(S.first);
505   for (const auto &A : Allocas)
506     Updater(A.Alloca);
507   LayoutIndexUpdateStarted = false;
508 }
509 
510 void FrameTypeBuilder::addFieldForAllocas(const Function &F,
511                                           FrameDataInfo &FrameData,
512                                           coro::Shape &Shape) {
513   DenseMap<AllocaInst *, unsigned int> AllocaIndex;
514   using AllocaSetType = SmallVector<AllocaInst *, 4>;
515   SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
516 
517   // We need to add field for allocas at the end of this function. However, this
518   // function has multiple exits, so we use this helper to avoid redundant code.
519   struct RTTIHelper {
520     std::function<void()> func;
521     RTTIHelper(std::function<void()> &&func) : func(func) {}
522     ~RTTIHelper() { func(); }
523   } Helper([&]() {
524     for (auto AllocaList : NonOverlapedAllocas) {
525       auto *LargestAI = *AllocaList.begin();
526       FieldIDType Id = addFieldForAlloca(LargestAI);
527       for (auto *Alloca : AllocaList)
528         FrameData.setFieldIndex(Alloca, Id);
529     }
530   });
531 
532   if (!Shape.ReuseFrameSlot && !EnableReuseStorageInFrame) {
533     for (const auto &A : FrameData.Allocas) {
534       AllocaInst *Alloca = A.Alloca;
535       AllocaIndex[Alloca] = NonOverlapedAllocas.size();
536       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
537     }
538     return;
539   }
540 
541   // Because there are pathes from the lifetime.start to coro.end
542   // for each alloca, the liferanges for every alloca is overlaped
543   // in the blocks who contain coro.end and the successor blocks.
544   // So we choose to skip there blocks when we calculates the liferange
545   // for each alloca. It should be reasonable since there shouldn't be uses
546   // in these blocks and the coroutine frame shouldn't be used outside the
547   // coroutine body.
548   //
549   // Note that the user of coro.suspend may not be SwitchInst. However, this
550   // case seems too complex to handle. And it is harmless to skip these
551   // patterns since it just prevend putting the allocas to live in the same
552   // slot.
553   DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
554   for (auto CoroSuspendInst : Shape.CoroSuspends) {
555     for (auto U : CoroSuspendInst->users()) {
556       if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
557         auto *SWI = const_cast<SwitchInst *>(ConstSWI);
558         DefaultSuspendDest[SWI] = SWI->getDefaultDest();
559         SWI->setDefaultDest(SWI->getSuccessor(1));
560       }
561     }
562   }
563 
564   auto ExtractAllocas = [&]() {
565     AllocaSetType Allocas;
566     Allocas.reserve(FrameData.Allocas.size());
567     for (const auto &A : FrameData.Allocas)
568       Allocas.push_back(A.Alloca);
569     return Allocas;
570   };
571   StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
572                                       StackLifetime::LivenessType::May);
573   StackLifetimeAnalyzer.run();
574   auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
575     return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
576         StackLifetimeAnalyzer.getLiveRange(AI2));
577   };
578   auto GetAllocaSize = [&](const AllocaInfo &A) {
579     Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL);
580     assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
581     assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
582     return RetSize->getFixedSize();
583   };
584   // Put larger allocas in the front. So the larger allocas have higher
585   // priority to merge, which can save more space potentially. Also each
586   // AllocaSet would be ordered. So we can get the largest Alloca in one
587   // AllocaSet easily.
588   sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
589     return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
590   });
591   for (const auto &A : FrameData.Allocas) {
592     AllocaInst *Alloca = A.Alloca;
593     bool Merged = false;
594     // Try to find if the Alloca is not inferenced with any existing
595     // NonOverlappedAllocaSet. If it is true, insert the alloca to that
596     // NonOverlappedAllocaSet.
597     for (auto &AllocaSet : NonOverlapedAllocas) {
598       assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
599       bool CouldMerge = none_of(AllocaSet, [&](auto Iter) {
600         return IsAllocaInferenre(Alloca, Iter);
601       });
602       if (!CouldMerge)
603         continue;
604       AllocaIndex[Alloca] = AllocaIndex[*AllocaSet.begin()];
605       AllocaSet.push_back(Alloca);
606       Merged = true;
607       break;
608     }
609     if (!Merged) {
610       AllocaIndex[Alloca] = NonOverlapedAllocas.size();
611       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
612     }
613   }
614   // Recover the default target destination for each Switch statement
615   // reserved.
616   for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
617     SwitchInst *SWI = SwitchAndDefaultDest.first;
618     BasicBlock *DestBB = SwitchAndDefaultDest.second;
619     SWI->setDefaultDest(DestBB);
620   }
621   // This Debug Info could tell us which allocas are merged into one slot.
622   LLVM_DEBUG(for (auto &AllocaSet
623                   : NonOverlapedAllocas) {
624     if (AllocaSet.size() > 1) {
625       dbgs() << "In Function:" << F.getName() << "\n";
626       dbgs() << "Find Union Set "
627              << "\n";
628       dbgs() << "\tAllocas are \n";
629       for (auto Alloca : AllocaSet)
630         dbgs() << "\t\t" << *Alloca << "\n";
631     }
632   });
633 }
634 
635 void FrameTypeBuilder::finish(StructType *Ty) {
636   assert(!IsFinished && "already finished!");
637 
638   // Prepare the optimal-layout field array.
639   // The Id in the layout field is a pointer to our Field for it.
640   SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
641   LayoutFields.reserve(Fields.size());
642   for (auto &Field : Fields) {
643     LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
644                               Field.Offset);
645   }
646 
647   // Perform layout.
648   auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
649   StructSize = SizeAndAlign.first;
650   StructAlign = SizeAndAlign.second;
651 
652   auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
653     return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
654   };
655 
656   // We need to produce a packed struct type if there's a field whose
657   // assigned offset isn't a multiple of its natural type alignment.
658   bool Packed = [&] {
659     for (auto &LayoutField : LayoutFields) {
660       auto &F = getField(LayoutField);
661       if (!isAligned(F.TyAlignment, LayoutField.Offset))
662         return true;
663     }
664     return false;
665   }();
666 
667   // Build the struct body.
668   SmallVector<Type*, 16> FieldTypes;
669   FieldTypes.reserve(LayoutFields.size() * 3 / 2);
670   uint64_t LastOffset = 0;
671   for (auto &LayoutField : LayoutFields) {
672     auto &F = getField(LayoutField);
673 
674     auto Offset = LayoutField.Offset;
675 
676     // Add a padding field if there's a padding gap and we're either
677     // building a packed struct or the padding gap is more than we'd
678     // get from aligning to the field type's natural alignment.
679     assert(Offset >= LastOffset);
680     if (Offset != LastOffset) {
681       if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
682         FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
683                                             Offset - LastOffset));
684     }
685 
686     F.Offset = Offset;
687     F.LayoutFieldIndex = FieldTypes.size();
688 
689     FieldTypes.push_back(F.Ty);
690     LastOffset = Offset + F.Size;
691   }
692 
693   Ty->setBody(FieldTypes, Packed);
694 
695 #ifndef NDEBUG
696   // Check that the IR layout matches the offsets we expect.
697   auto Layout = DL.getStructLayout(Ty);
698   for (auto &F : Fields) {
699     assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
700     assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
701   }
702 #endif
703 
704   IsFinished = true;
705 }
706 
707 // Build a struct that will keep state for an active coroutine.
708 //   struct f.frame {
709 //     ResumeFnTy ResumeFnAddr;
710 //     ResumeFnTy DestroyFnAddr;
711 //     int ResumeIndex;
712 //     ... promise (if present) ...
713 //     ... spills ...
714 //   };
715 static StructType *buildFrameType(Function &F, coro::Shape &Shape,
716                                   FrameDataInfo &FrameData) {
717   LLVMContext &C = F.getContext();
718   const DataLayout &DL = F.getParent()->getDataLayout();
719   StructType *FrameTy = [&] {
720     SmallString<32> Name(F.getName());
721     Name.append(".Frame");
722     return StructType::create(C, Name);
723   }();
724 
725   FrameTypeBuilder B(C, DL);
726 
727   AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
728   Optional<FieldIDType> SwitchIndexFieldId;
729 
730   if (Shape.ABI == coro::ABI::Switch) {
731     auto *FramePtrTy = FrameTy->getPointerTo();
732     auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
733                                    /*IsVarArg=*/false);
734     auto *FnPtrTy = FnTy->getPointerTo();
735 
736     // Add header fields for the resume and destroy functions.
737     // We can rely on these being perfectly packed.
738     (void)B.addField(FnPtrTy, None, /*header*/ true);
739     (void)B.addField(FnPtrTy, None, /*header*/ true);
740 
741     // PromiseAlloca field needs to be explicitly added here because it's
742     // a header field with a fixed offset based on its alignment. Hence it
743     // needs special handling and cannot be added to FrameData.Allocas.
744     if (PromiseAlloca)
745       FrameData.setFieldIndex(
746           PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
747 
748     // Add a field to store the suspend index.  This doesn't need to
749     // be in the header.
750     unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
751     Type *IndexType = Type::getIntNTy(C, IndexBits);
752 
753     SwitchIndexFieldId = B.addField(IndexType, None);
754   } else {
755     assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
756   }
757 
758   // Because multiple allocas may own the same field slot,
759   // we add allocas to field here.
760   B.addFieldForAllocas(F, FrameData, Shape);
761   // Create an entry for every spilled value.
762   for (auto &S : FrameData.Spills) {
763     FieldIDType Id = B.addField(S.first->getType(), None);
764     FrameData.setFieldIndex(S.first, Id);
765   }
766 
767   B.finish(FrameTy);
768   FrameData.updateLayoutIndex(B);
769   Shape.FrameAlign = B.getStructAlign();
770   Shape.FrameSize = B.getStructSize();
771 
772   switch (Shape.ABI) {
773   case coro::ABI::Switch:
774     // In the switch ABI, remember the switch-index field.
775     Shape.SwitchLowering.IndexField =
776         B.getLayoutFieldIndex(*SwitchIndexFieldId);
777 
778     // Also round the frame size up to a multiple of its alignment, as is
779     // generally expected in C/C++.
780     Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
781     break;
782 
783   // In the retcon ABI, remember whether the frame is inline in the storage.
784   case coro::ABI::Retcon:
785   case coro::ABI::RetconOnce: {
786     auto Id = Shape.getRetconCoroId();
787     Shape.RetconLowering.IsFrameInlineInStorage
788       = (B.getStructSize() <= Id->getStorageSize() &&
789          B.getStructAlign() <= Id->getStorageAlignment());
790     break;
791   }
792   case coro::ABI::Async: {
793     Shape.AsyncLowering.FrameOffset =
794         alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign);
795     // Also make the final context size a multiple of the context alignment to
796     // make allocation easier for allocators.
797     Shape.AsyncLowering.ContextSize =
798         alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize,
799                 Shape.AsyncLowering.getContextAlignment());
800     if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
801       report_fatal_error(
802           "The alignment requirment of frame variables cannot be higher than "
803           "the alignment of the async function context");
804     }
805     break;
806   }
807   }
808 
809   return FrameTy;
810 }
811 
812 // We use a pointer use visitor to track how an alloca is being used.
813 // The goal is to be able to answer the following three questions:
814 // 1. Should this alloca be allocated on the frame instead.
815 // 2. Could the content of the alloca be modified prior to CoroBegn, which would
816 // require copying the data from alloca to the frame after CoroBegin.
817 // 3. Is there any alias created for this alloca prior to CoroBegin, but used
818 // after CoroBegin. In that case, we will need to recreate the alias after
819 // CoroBegin based off the frame. To answer question 1, we track two things:
820 //   a. List of all BasicBlocks that use this alloca or any of the aliases of
821 //   the alloca. In the end, we check if there exists any two basic blocks that
822 //   cross suspension points. If so, this alloca must be put on the frame. b.
823 //   Whether the alloca or any alias of the alloca is escaped at some point,
824 //   either by storing the address somewhere, or the address is used in a
825 //   function call that might capture. If it's ever escaped, this alloca must be
826 //   put on the frame conservatively.
827 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
828 // Whenever a potential write happens, either through a store instruction, a
829 // function call or any of the memory intrinsics, we check whether this
830 // instruction is prior to CoroBegin. To answer question 3, we track the offsets
831 // of all aliases created for the alloca prior to CoroBegin but used after
832 // CoroBegin. llvm::Optional is used to be able to represent the case when the
833 // offset is unknown (e.g. when you have a PHINode that takes in different
834 // offset values). We cannot handle unknown offsets and will assert. This is the
835 // potential issue left out. An ideal solution would likely require a
836 // significant redesign.
837 namespace {
838 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
839   using Base = PtrUseVisitor<AllocaUseVisitor>;
840   AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
841                    const CoroBeginInst &CB, const SuspendCrossingInfo &Checker)
842       : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker) {}
843 
844   void visit(Instruction &I) {
845     UserBBs.insert(I.getParent());
846     Base::visit(I);
847     // If the pointer is escaped prior to CoroBegin, we have to assume it would
848     // be written into before CoroBegin as well.
849     if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
850       MayWriteBeforeCoroBegin = true;
851     }
852   }
853   // We need to provide this overload as PtrUseVisitor uses a pointer based
854   // visiting function.
855   void visit(Instruction *I) { return visit(*I); }
856 
857   void visitPHINode(PHINode &I) {
858     enqueueUsers(I);
859     handleAlias(I);
860   }
861 
862   void visitSelectInst(SelectInst &I) {
863     enqueueUsers(I);
864     handleAlias(I);
865   }
866 
867   void visitStoreInst(StoreInst &SI) {
868     // Regardless whether the alias of the alloca is the value operand or the
869     // pointer operand, we need to assume the alloca is been written.
870     handleMayWrite(SI);
871 
872     if (SI.getValueOperand() != U->get())
873       return;
874 
875     // We are storing the pointer into a memory location, potentially escaping.
876     // As an optimization, we try to detect simple cases where it doesn't
877     // actually escape, for example:
878     //   %ptr = alloca ..
879     //   %addr = alloca ..
880     //   store %ptr, %addr
881     //   %x = load %addr
882     //   ..
883     // If %addr is only used by loading from it, we could simply treat %x as
884     // another alias of %ptr, and not considering %ptr being escaped.
885     auto IsSimpleStoreThenLoad = [&]() {
886       auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
887       // If the memory location we are storing to is not an alloca, it
888       // could be an alias of some other memory locations, which is difficult
889       // to analyze.
890       if (!AI)
891         return false;
892       // StoreAliases contains aliases of the memory location stored into.
893       SmallVector<Instruction *, 4> StoreAliases = {AI};
894       while (!StoreAliases.empty()) {
895         Instruction *I = StoreAliases.back();
896         StoreAliases.pop_back();
897         for (User *U : I->users()) {
898           // If we are loading from the memory location, we are creating an
899           // alias of the original pointer.
900           if (auto *LI = dyn_cast<LoadInst>(U)) {
901             enqueueUsers(*LI);
902             handleAlias(*LI);
903             continue;
904           }
905           // If we are overriding the memory location, the pointer certainly
906           // won't escape.
907           if (auto *S = dyn_cast<StoreInst>(U))
908             if (S->getPointerOperand() == I)
909               continue;
910           if (auto *II = dyn_cast<IntrinsicInst>(U))
911             if (II->isLifetimeStartOrEnd())
912               continue;
913           // BitCastInst creats aliases of the memory location being stored
914           // into.
915           if (auto *BI = dyn_cast<BitCastInst>(U)) {
916             StoreAliases.push_back(BI);
917             continue;
918           }
919           return false;
920         }
921       }
922 
923       return true;
924     };
925 
926     if (!IsSimpleStoreThenLoad())
927       PI.setEscaped(&SI);
928   }
929 
930   // All mem intrinsics modify the data.
931   void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
932 
933   void visitBitCastInst(BitCastInst &BC) {
934     Base::visitBitCastInst(BC);
935     handleAlias(BC);
936   }
937 
938   void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
939     Base::visitAddrSpaceCastInst(ASC);
940     handleAlias(ASC);
941   }
942 
943   void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
944     // The base visitor will adjust Offset accordingly.
945     Base::visitGetElementPtrInst(GEPI);
946     handleAlias(GEPI);
947   }
948 
949   void visitCallBase(CallBase &CB) {
950     for (unsigned Op = 0, OpCount = CB.getNumArgOperands(); Op < OpCount; ++Op)
951       if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
952         PI.setEscaped(&CB);
953     handleMayWrite(CB);
954   }
955 
956   bool getShouldLiveOnFrame() const {
957     if (!ShouldLiveOnFrame)
958       ShouldLiveOnFrame = computeShouldLiveOnFrame();
959     return ShouldLiveOnFrame.getValue();
960   }
961 
962   bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
963 
964   DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const {
965     assert(getShouldLiveOnFrame() && "This method should only be called if the "
966                                      "alloca needs to live on the frame.");
967     for (const auto &P : AliasOffetMap)
968       if (!P.second)
969         report_fatal_error("Unable to handle an alias with unknown offset "
970                            "created before CoroBegin.");
971     return AliasOffetMap;
972   }
973 
974 private:
975   const DominatorTree &DT;
976   const CoroBeginInst &CoroBegin;
977   const SuspendCrossingInfo &Checker;
978   // All alias to the original AllocaInst, created before CoroBegin and used
979   // after CoroBegin. Each entry contains the instruction and the offset in the
980   // original Alloca. They need to be recreated after CoroBegin off the frame.
981   DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{};
982   SmallPtrSet<BasicBlock *, 2> UserBBs{};
983   bool MayWriteBeforeCoroBegin{false};
984 
985   mutable llvm::Optional<bool> ShouldLiveOnFrame{};
986 
987   bool computeShouldLiveOnFrame() const {
988     if (PI.isEscaped())
989       return true;
990 
991     for (auto *BB1 : UserBBs)
992       for (auto *BB2 : UserBBs)
993         if (Checker.hasPathCrossingSuspendPoint(BB1, BB2))
994           return true;
995 
996     return false;
997   }
998 
999   void handleMayWrite(const Instruction &I) {
1000     if (!DT.dominates(&CoroBegin, &I))
1001       MayWriteBeforeCoroBegin = true;
1002   }
1003 
1004   bool usedAfterCoroBegin(Instruction &I) {
1005     for (auto &U : I.uses())
1006       if (DT.dominates(&CoroBegin, U))
1007         return true;
1008     return false;
1009   }
1010 
1011   void handleAlias(Instruction &I) {
1012     // We track all aliases created prior to CoroBegin but used after.
1013     // These aliases may need to be recreated after CoroBegin if the alloca
1014     // need to live on the frame.
1015     if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
1016       return;
1017 
1018     if (!IsOffsetKnown) {
1019       AliasOffetMap[&I].reset();
1020     } else {
1021       auto Itr = AliasOffetMap.find(&I);
1022       if (Itr == AliasOffetMap.end()) {
1023         AliasOffetMap[&I] = Offset;
1024       } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) {
1025         // If we have seen two different possible values for this alias, we set
1026         // it to empty.
1027         AliasOffetMap[&I].reset();
1028       }
1029     }
1030   }
1031 };
1032 } // namespace
1033 
1034 // We need to make room to insert a spill after initial PHIs, but before
1035 // catchswitch instruction. Placing it before violates the requirement that
1036 // catchswitch, like all other EHPads must be the first nonPHI in a block.
1037 //
1038 // Split away catchswitch into a separate block and insert in its place:
1039 //
1040 //   cleanuppad <InsertPt> cleanupret.
1041 //
1042 // cleanupret instruction will act as an insert point for the spill.
1043 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
1044   BasicBlock *CurrentBlock = CatchSwitch->getParent();
1045   BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
1046   CurrentBlock->getTerminator()->eraseFromParent();
1047 
1048   auto *CleanupPad =
1049       CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
1050   auto *CleanupRet =
1051       CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
1052   return CleanupRet;
1053 }
1054 
1055 // Replace all alloca and SSA values that are accessed across suspend points
1056 // with GetElementPointer from coroutine frame + loads and stores. Create an
1057 // AllocaSpillBB that will become the new entry block for the resume parts of
1058 // the coroutine:
1059 //
1060 //    %hdl = coro.begin(...)
1061 //    whatever
1062 //
1063 // becomes:
1064 //
1065 //    %hdl = coro.begin(...)
1066 //    %FramePtr = bitcast i8* hdl to %f.frame*
1067 //    br label %AllocaSpillBB
1068 //
1069 //  AllocaSpillBB:
1070 //    ; geps corresponding to allocas that were moved to coroutine frame
1071 //    br label PostSpill
1072 //
1073 //  PostSpill:
1074 //    whatever
1075 //
1076 //
1077 static Instruction *insertSpills(const FrameDataInfo &FrameData,
1078                                  coro::Shape &Shape) {
1079   auto *CB = Shape.CoroBegin;
1080   LLVMContext &C = CB->getContext();
1081   IRBuilder<> Builder(CB->getNextNode());
1082   StructType *FrameTy = Shape.FrameTy;
1083   PointerType *FramePtrTy = FrameTy->getPointerTo();
1084   auto *FramePtr =
1085       cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
1086   DominatorTree DT(*CB->getFunction());
1087 
1088   // Create a GEP with the given index into the coroutine frame for the original
1089   // value Orig. Appends an extra 0 index for array-allocas, preserving the
1090   // original type.
1091   auto GetFramePointer = [&](Value *Orig) -> Value * {
1092     FieldIDType Index = FrameData.getFieldIndex(Orig);
1093     SmallVector<Value *, 3> Indices = {
1094         ConstantInt::get(Type::getInt32Ty(C), 0),
1095         ConstantInt::get(Type::getInt32Ty(C), Index),
1096     };
1097 
1098     if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1099       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1100         auto Count = CI->getValue().getZExtValue();
1101         if (Count > 1) {
1102           Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
1103         }
1104       } else {
1105         report_fatal_error("Coroutines cannot handle non static allocas yet");
1106       }
1107     }
1108 
1109     auto GEP = cast<GetElementPtrInst>(
1110         Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1111     if (isa<AllocaInst>(Orig)) {
1112       // If the type of GEP is not equal to the type of AllocaInst, it implies
1113       // that the AllocaInst may be reused in the Frame slot of other
1114       // AllocaInst. So we cast the GEP to the type of AllocaInst.
1115       if (GEP->getResultElementType() != Orig->getType())
1116         return Builder.CreateBitCast(GEP, Orig->getType(),
1117                                      Orig->getName() + Twine(".cast"));
1118     }
1119     return GEP;
1120   };
1121 
1122   for (auto const &E : FrameData.Spills) {
1123     Value *Def = E.first;
1124     // Create a store instruction storing the value into the
1125     // coroutine frame.
1126     Instruction *InsertPt = nullptr;
1127     if (auto *Arg = dyn_cast<Argument>(Def)) {
1128       // For arguments, we will place the store instruction right after
1129       // the coroutine frame pointer instruction, i.e. bitcast of
1130       // coro.begin from i8* to %f.frame*.
1131       InsertPt = FramePtr->getNextNode();
1132 
1133       // If we're spilling an Argument, make sure we clear 'nocapture'
1134       // from the coroutine function.
1135       Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1136 
1137     } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1138       // Don't spill immediately after a suspend; splitting assumes
1139       // that the suspend will be followed by a branch.
1140       InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI();
1141     } else {
1142       auto *I = cast<Instruction>(Def);
1143       if (!DT.dominates(CB, I)) {
1144         // If it is not dominated by CoroBegin, then spill should be
1145         // inserted immediately after CoroFrame is computed.
1146         InsertPt = FramePtr->getNextNode();
1147       } else if (auto *II = dyn_cast<InvokeInst>(I)) {
1148         // If we are spilling the result of the invoke instruction, split
1149         // the normal edge and insert the spill in the new block.
1150         auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
1151         InsertPt = NewBB->getTerminator();
1152       } else if (isa<PHINode>(I)) {
1153         // Skip the PHINodes and EH pads instructions.
1154         BasicBlock *DefBlock = I->getParent();
1155         if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
1156           InsertPt = splitBeforeCatchSwitch(CSI);
1157         else
1158           InsertPt = &*DefBlock->getFirstInsertionPt();
1159       } else {
1160         assert(!I->isTerminator() && "unexpected terminator");
1161         // For all other values, the spill is placed immediately after
1162         // the definition.
1163         InsertPt = I->getNextNode();
1164       }
1165     }
1166 
1167     auto Index = FrameData.getFieldIndex(Def);
1168     Builder.SetInsertPoint(InsertPt);
1169     auto *G = Builder.CreateConstInBoundsGEP2_32(
1170         FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1171     Builder.CreateStore(Def, G);
1172 
1173     BasicBlock *CurrentBlock = nullptr;
1174     Value *CurrentReload = nullptr;
1175     for (auto *U : E.second) {
1176       // If we have not seen the use block, create a load instruction to reload
1177       // the spilled value from the coroutine frame. Populates the Value pointer
1178       // reference provided with the frame GEP.
1179       if (CurrentBlock != U->getParent()) {
1180         CurrentBlock = U->getParent();
1181         Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt());
1182 
1183         auto *GEP = GetFramePointer(E.first);
1184         GEP->setName(E.first->getName() + Twine(".reload.addr"));
1185         CurrentReload = Builder.CreateLoad(
1186             FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1187             E.first->getName() + Twine(".reload"));
1188       }
1189 
1190       // If we have a single edge PHINode, remove it and replace it with a
1191       // reload from the coroutine frame. (We already took care of multi edge
1192       // PHINodes by rewriting them in the rewritePHIs function).
1193       if (auto *PN = dyn_cast<PHINode>(U)) {
1194         assert(PN->getNumIncomingValues() == 1 &&
1195                "unexpected number of incoming "
1196                "values in the PHINode");
1197         PN->replaceAllUsesWith(CurrentReload);
1198         PN->eraseFromParent();
1199         continue;
1200       }
1201 
1202       // Replace all uses of CurrentValue in the current instruction with
1203       // reload.
1204       U->replaceUsesOfWith(Def, CurrentReload);
1205     }
1206   }
1207 
1208   BasicBlock *FramePtrBB = FramePtr->getParent();
1209 
1210   auto SpillBlock =
1211       FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
1212   SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1213   Shape.AllocaSpillBlock = SpillBlock;
1214 
1215   // retcon and retcon.once lowering assumes all uses have been sunk.
1216   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1217       Shape.ABI == coro::ABI::Async) {
1218     // If we found any allocas, replace all of their remaining uses with Geps.
1219     Builder.SetInsertPoint(&SpillBlock->front());
1220     for (const auto &P : FrameData.Allocas) {
1221       AllocaInst *Alloca = P.Alloca;
1222       auto *G = GetFramePointer(Alloca);
1223 
1224       // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1225       // here, as we are changing location of the instruction.
1226       G->takeName(Alloca);
1227       Alloca->replaceAllUsesWith(G);
1228       Alloca->eraseFromParent();
1229     }
1230     return FramePtr;
1231   }
1232 
1233   // If we found any alloca, replace all of their remaining uses with GEP
1234   // instructions. Because new dbg.declare have been created for these alloca,
1235   // we also delete the original dbg.declare and replace other uses with undef.
1236   // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1237   // as some of the uses may not be dominated by CoroBegin.
1238   Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
1239   SmallVector<Instruction *, 4> UsersToUpdate;
1240   for (const auto &A : FrameData.Allocas) {
1241     AllocaInst *Alloca = A.Alloca;
1242     UsersToUpdate.clear();
1243     for (User *U : Alloca->users()) {
1244       auto *I = cast<Instruction>(U);
1245       if (DT.dominates(CB, I))
1246         UsersToUpdate.push_back(I);
1247     }
1248     if (UsersToUpdate.empty())
1249       continue;
1250     auto *G = GetFramePointer(Alloca);
1251     G->setName(Alloca->getName() + Twine(".reload.addr"));
1252 
1253     SmallPtrSet<BasicBlock *, 4> SeenDbgBBs;
1254     TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Alloca);
1255     DIBuilder DIB(*Alloca->getModule(), /*AllowUnresolved*/ false);
1256     Instruction *FirstDbgDecl = nullptr;
1257 
1258     if (!DIs.empty()) {
1259       FirstDbgDecl = DIB.insertDeclare(G, DIs.front()->getVariable(),
1260                                        DIs.front()->getExpression(),
1261                                        DIs.front()->getDebugLoc(), DIs.front());
1262       SeenDbgBBs.insert(DIs.front()->getParent());
1263     }
1264     for (auto *DI : FindDbgDeclareUses(Alloca))
1265       DI->eraseFromParent();
1266     replaceDbgUsesWithUndef(Alloca);
1267 
1268     for (Instruction *I : UsersToUpdate) {
1269       I->replaceUsesOfWith(Alloca, G);
1270 
1271       // After cloning, transformations might not guarantee that all uses
1272       // of this alloca are dominated by the already existing dbg.declare's,
1273       // compromising the debug quality. Instead of writing another
1274       // transformation to patch each clone, go ahead and early populate
1275       // basic blocks that use such allocas with more debug info.
1276       if (SeenDbgBBs.count(I->getParent()))
1277         continue;
1278 
1279       // If there isn't a prior dbg.declare for this alloca, it probably
1280       // means the state hasn't changed prior to one of the relevant suspend
1281       // point for this frame access.
1282       if (!FirstDbgDecl)
1283         continue;
1284 
1285       // These instructions are all dominated by the alloca, insert the
1286       // dbg.value in the beginning of the BB to enhance debugging
1287       // experience and allow values to be inspected as early as possible.
1288       // Prefer dbg.value over dbg.declare since it better sets expectations
1289       // that control flow can be later changed by other passes.
1290       auto *DI = cast<DbgDeclareInst>(FirstDbgDecl);
1291       BasicBlock *CurrentBlock = I->getParent();
1292       auto *DerefExpr =
1293           DIExpression::append(DI->getExpression(), dwarf::DW_OP_deref);
1294       DIB.insertDbgValueIntrinsic(G, DI->getVariable(), DerefExpr,
1295                                   DI->getDebugLoc(),
1296                                   &*CurrentBlock->getFirstInsertionPt());
1297       SeenDbgBBs.insert(CurrentBlock);
1298     }
1299   }
1300   Builder.SetInsertPoint(FramePtr->getNextNode());
1301   for (const auto &A : FrameData.Allocas) {
1302     AllocaInst *Alloca = A.Alloca;
1303     if (A.MayWriteBeforeCoroBegin) {
1304       // isEscaped really means potentially modified before CoroBegin.
1305       if (Alloca->isArrayAllocation())
1306         report_fatal_error(
1307             "Coroutines cannot handle copying of array allocas yet");
1308 
1309       auto *G = GetFramePointer(Alloca);
1310       auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
1311       Builder.CreateStore(Value, G);
1312     }
1313     // For each alias to Alloca created before CoroBegin but used after
1314     // CoroBegin, we recreate them after CoroBegin by appplying the offset
1315     // to the pointer in the frame.
1316     for (const auto &Alias : A.Aliases) {
1317       auto *FramePtr = GetFramePointer(Alloca);
1318       auto *FramePtrRaw =
1319           Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C));
1320       auto *AliasPtr = Builder.CreateGEP(
1321           FramePtrRaw,
1322           ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue()));
1323       auto *AliasPtrTyped =
1324           Builder.CreateBitCast(AliasPtr, Alias.first->getType());
1325       Alias.first->replaceUsesWithIf(
1326           AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); });
1327     }
1328   }
1329   return FramePtr;
1330 }
1331 
1332 // Sets the unwind edge of an instruction to a particular successor.
1333 static void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ) {
1334   if (auto *II = dyn_cast<InvokeInst>(TI))
1335     II->setUnwindDest(Succ);
1336   else if (auto *CS = dyn_cast<CatchSwitchInst>(TI))
1337     CS->setUnwindDest(Succ);
1338   else if (auto *CR = dyn_cast<CleanupReturnInst>(TI))
1339     CR->setUnwindDest(Succ);
1340   else
1341     llvm_unreachable("unexpected terminator instruction");
1342 }
1343 
1344 // Replaces all uses of OldPred with the NewPred block in all PHINodes in a
1345 // block.
1346 static void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred,
1347                            BasicBlock *NewPred, PHINode *Until = nullptr) {
1348   unsigned BBIdx = 0;
1349   for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) {
1350     PHINode *PN = cast<PHINode>(I);
1351 
1352     // We manually update the LandingPadReplacement PHINode and it is the last
1353     // PHI Node. So, if we find it, we are done.
1354     if (Until == PN)
1355       break;
1356 
1357     // Reuse the previous value of BBIdx if it lines up.  In cases where we
1358     // have multiple phi nodes with *lots* of predecessors, this is a speed
1359     // win because we don't have to scan the PHI looking for TIBB.  This
1360     // happens because the BB list of PHI nodes are usually in the same
1361     // order.
1362     if (PN->getIncomingBlock(BBIdx) != OldPred)
1363       BBIdx = PN->getBasicBlockIndex(OldPred);
1364 
1365     assert(BBIdx != (unsigned)-1 && "Invalid PHI Index!");
1366     PN->setIncomingBlock(BBIdx, NewPred);
1367   }
1368 }
1369 
1370 // Uses SplitEdge unless the successor block is an EHPad, in which case do EH
1371 // specific handling.
1372 static BasicBlock *ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ,
1373                                     LandingPadInst *OriginalPad,
1374                                     PHINode *LandingPadReplacement) {
1375   auto *PadInst = Succ->getFirstNonPHI();
1376   if (!LandingPadReplacement && !PadInst->isEHPad())
1377     return SplitEdge(BB, Succ);
1378 
1379   auto *NewBB = BasicBlock::Create(BB->getContext(), "", BB->getParent(), Succ);
1380   setUnwindEdgeTo(BB->getTerminator(), NewBB);
1381   updatePhiNodes(Succ, BB, NewBB, LandingPadReplacement);
1382 
1383   if (LandingPadReplacement) {
1384     auto *NewLP = OriginalPad->clone();
1385     auto *Terminator = BranchInst::Create(Succ, NewBB);
1386     NewLP->insertBefore(Terminator);
1387     LandingPadReplacement->addIncoming(NewLP, NewBB);
1388     return NewBB;
1389   }
1390   Value *ParentPad = nullptr;
1391   if (auto *FuncletPad = dyn_cast<FuncletPadInst>(PadInst))
1392     ParentPad = FuncletPad->getParentPad();
1393   else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(PadInst))
1394     ParentPad = CatchSwitch->getParentPad();
1395   else
1396     llvm_unreachable("handling for other EHPads not implemented yet");
1397 
1398   auto *NewCleanupPad = CleanupPadInst::Create(ParentPad, {}, "", NewBB);
1399   CleanupReturnInst::Create(NewCleanupPad, Succ, NewBB);
1400   return NewBB;
1401 }
1402 
1403 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1404 // PHI in InsertedBB.
1405 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB,
1406                                          BasicBlock *InsertedBB,
1407                                          BasicBlock *PredBB,
1408                                          PHINode *UntilPHI = nullptr) {
1409   auto *PN = cast<PHINode>(&SuccBB->front());
1410   do {
1411     int Index = PN->getBasicBlockIndex(InsertedBB);
1412     Value *V = PN->getIncomingValue(Index);
1413     PHINode *InputV = PHINode::Create(
1414         V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(),
1415         &InsertedBB->front());
1416     InputV->addIncoming(V, PredBB);
1417     PN->setIncomingValue(Index, InputV);
1418     PN = dyn_cast<PHINode>(PN->getNextNode());
1419   } while (PN != UntilPHI);
1420 }
1421 
1422 // Rewrites the PHI Nodes in a cleanuppad.
1423 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1424                                      CleanupPadInst *CleanupPad) {
1425   // For every incoming edge to a CleanupPad we will create a new block holding
1426   // all incoming values in single-value PHI nodes. We will then create another
1427   // block to act as a dispather (as all unwind edges for related EH blocks
1428   // must be the same).
1429   //
1430   // cleanuppad:
1431   //    %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1432   //    %3 = cleanuppad within none []
1433   //
1434   // It will create:
1435   //
1436   // cleanuppad.corodispatch
1437   //    %2 = phi i8[0, %catchswitch], [1, %catch.1]
1438   //    %3 = cleanuppad within none []
1439   //    switch i8 % 2, label %unreachable
1440   //            [i8 0, label %cleanuppad.from.catchswitch
1441   //             i8 1, label %cleanuppad.from.catch.1]
1442   // cleanuppad.from.catchswitch:
1443   //    %4 = phi i32 [%0, %catchswitch]
1444   //    br %label cleanuppad
1445   // cleanuppad.from.catch.1:
1446   //    %6 = phi i32 [%1, %catch.1]
1447   //    br %label cleanuppad
1448   // cleanuppad:
1449   //    %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1450   //                 [%6, %cleanuppad.from.catch.1]
1451 
1452   // Unreachable BB, in case switching on an invalid value in the dispatcher.
1453   auto *UnreachBB = BasicBlock::Create(
1454       CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
1455   IRBuilder<> Builder(UnreachBB);
1456   Builder.CreateUnreachable();
1457 
1458   // Create a new cleanuppad which will be the dispatcher.
1459   auto *NewCleanupPadBB =
1460       BasicBlock::Create(CleanupPadBB->getContext(),
1461                          CleanupPadBB->getName() + Twine(".corodispatch"),
1462                          CleanupPadBB->getParent(), CleanupPadBB);
1463   Builder.SetInsertPoint(NewCleanupPadBB);
1464   auto *SwitchType = Builder.getInt8Ty();
1465   auto *SetDispatchValuePN =
1466       Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
1467   CleanupPad->removeFromParent();
1468   CleanupPad->insertAfter(SetDispatchValuePN);
1469   auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1470                                                 pred_size(CleanupPadBB));
1471 
1472   int SwitchIndex = 0;
1473   SmallVector<BasicBlock *, 8> Preds(pred_begin(CleanupPadBB),
1474                                      pred_end(CleanupPadBB));
1475   for (BasicBlock *Pred : Preds) {
1476     // Create a new cleanuppad and move the PHI values to there.
1477     auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
1478                                       CleanupPadBB->getName() +
1479                                           Twine(".from.") + Pred->getName(),
1480                                       CleanupPadBB->getParent(), CleanupPadBB);
1481     updatePhiNodes(CleanupPadBB, Pred, CaseBB);
1482     CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1483                     Pred->getName());
1484     Builder.SetInsertPoint(CaseBB);
1485     Builder.CreateBr(CleanupPadBB);
1486     movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
1487 
1488     // Update this Pred to the new unwind point.
1489     setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1490 
1491     // Setup the switch in the dispatcher.
1492     auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1493     SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1494     SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1495     SwitchIndex++;
1496   }
1497 }
1498 
1499 static void rewritePHIs(BasicBlock &BB) {
1500   // For every incoming edge we will create a block holding all
1501   // incoming values in a single PHI nodes.
1502   //
1503   // loop:
1504   //    %n.val = phi i32[%n, %entry], [%inc, %loop]
1505   //
1506   // It will create:
1507   //
1508   // loop.from.entry:
1509   //    %n.loop.pre = phi i32 [%n, %entry]
1510   //    br %label loop
1511   // loop.from.loop:
1512   //    %inc.loop.pre = phi i32 [%inc, %loop]
1513   //    br %label loop
1514   //
1515   // After this rewrite, further analysis will ignore any phi nodes with more
1516   // than one incoming edge.
1517 
1518   // TODO: Simplify PHINodes in the basic block to remove duplicate
1519   // predecessors.
1520 
1521   // Special case for CleanupPad: all EH blocks must have the same unwind edge
1522   // so we need to create an additional "dispatcher" block.
1523   if (auto *CleanupPad =
1524           dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
1525     SmallVector<BasicBlock *, 8> Preds(pred_begin(&BB), pred_end(&BB));
1526     for (BasicBlock *Pred : Preds) {
1527       if (CatchSwitchInst *CS =
1528               dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
1529         // CleanupPad with a CatchSwitch predecessor: therefore this is an
1530         // unwind destination that needs to be handle specially.
1531         assert(CS->getUnwindDest() == &BB);
1532         rewritePHIsForCleanupPad(&BB, CleanupPad);
1533         return;
1534       }
1535     }
1536   }
1537 
1538   LandingPadInst *LandingPad = nullptr;
1539   PHINode *ReplPHI = nullptr;
1540   if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
1541     // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1542     // We replace the original landing pad with a PHINode that will collect the
1543     // results from all of them.
1544     ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
1545     ReplPHI->takeName(LandingPad);
1546     LandingPad->replaceAllUsesWith(ReplPHI);
1547     // We will erase the original landing pad at the end of this function after
1548     // ehAwareSplitEdge cloned it in the transition blocks.
1549   }
1550 
1551   SmallVector<BasicBlock *, 8> Preds(pred_begin(&BB), pred_end(&BB));
1552   for (BasicBlock *Pred : Preds) {
1553     auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
1554     IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1555 
1556     // Stop the moving of values at ReplPHI, as this is either null or the PHI
1557     // that replaced the landing pad.
1558     movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
1559   }
1560 
1561   if (LandingPad) {
1562     // Calls to ehAwareSplitEdge function cloned the original lading pad.
1563     // No longer need it.
1564     LandingPad->eraseFromParent();
1565   }
1566 }
1567 
1568 static void rewritePHIs(Function &F) {
1569   SmallVector<BasicBlock *, 8> WorkList;
1570 
1571   for (BasicBlock &BB : F)
1572     if (auto *PN = dyn_cast<PHINode>(&BB.front()))
1573       if (PN->getNumIncomingValues() > 1)
1574         WorkList.push_back(&BB);
1575 
1576   for (BasicBlock *BB : WorkList)
1577     rewritePHIs(*BB);
1578 }
1579 
1580 // Check for instructions that we can recreate on resume as opposed to spill
1581 // the result into a coroutine frame.
1582 static bool materializable(Instruction &V) {
1583   return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
1584          isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
1585 }
1586 
1587 // Check for structural coroutine intrinsics that should not be spilled into
1588 // the coroutine frame.
1589 static bool isCoroutineStructureIntrinsic(Instruction &I) {
1590   return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
1591          isa<CoroSuspendInst>(&I);
1592 }
1593 
1594 // For every use of the value that is across suspend point, recreate that value
1595 // after a suspend point.
1596 static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
1597                                               const SpillInfo &Spills) {
1598   for (const auto &E : Spills) {
1599     Value *Def = E.first;
1600     BasicBlock *CurrentBlock = nullptr;
1601     Instruction *CurrentMaterialization = nullptr;
1602     for (Instruction *U : E.second) {
1603       // If we have not seen this block, materialize the value.
1604       if (CurrentBlock != U->getParent()) {
1605         CurrentBlock = U->getParent();
1606         CurrentMaterialization = cast<Instruction>(Def)->clone();
1607         CurrentMaterialization->setName(Def->getName());
1608         CurrentMaterialization->insertBefore(
1609             &*CurrentBlock->getFirstInsertionPt());
1610       }
1611       if (auto *PN = dyn_cast<PHINode>(U)) {
1612         assert(PN->getNumIncomingValues() == 1 &&
1613                "unexpected number of incoming "
1614                "values in the PHINode");
1615         PN->replaceAllUsesWith(CurrentMaterialization);
1616         PN->eraseFromParent();
1617         continue;
1618       }
1619       // Replace all uses of Def in the current instruction with the
1620       // CurrentMaterialization for the block.
1621       U->replaceUsesOfWith(Def, CurrentMaterialization);
1622     }
1623   }
1624 }
1625 
1626 // Splits the block at a particular instruction unless it is the first
1627 // instruction in the block with a single predecessor.
1628 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
1629   auto *BB = I->getParent();
1630   if (&BB->front() == I) {
1631     if (BB->getSinglePredecessor()) {
1632       BB->setName(Name);
1633       return BB;
1634     }
1635   }
1636   return BB->splitBasicBlock(I, Name);
1637 }
1638 
1639 // Split above and below a particular instruction so that it
1640 // will be all alone by itself in a block.
1641 static void splitAround(Instruction *I, const Twine &Name) {
1642   splitBlockIfNotFirst(I, Name);
1643   splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
1644 }
1645 
1646 static bool isSuspendBlock(BasicBlock *BB) {
1647   return isa<AnyCoroSuspendInst>(BB->front());
1648 }
1649 
1650 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet;
1651 
1652 /// Does control flow starting at the given block ever reach a suspend
1653 /// instruction before reaching a block in VisitedOrFreeBBs?
1654 static bool isSuspendReachableFrom(BasicBlock *From,
1655                                    VisitedBlocksSet &VisitedOrFreeBBs) {
1656   // Eagerly try to add this block to the visited set.  If it's already
1657   // there, stop recursing; this path doesn't reach a suspend before
1658   // either looping or reaching a freeing block.
1659   if (!VisitedOrFreeBBs.insert(From).second)
1660     return false;
1661 
1662   // We assume that we'll already have split suspends into their own blocks.
1663   if (isSuspendBlock(From))
1664     return true;
1665 
1666   // Recurse on the successors.
1667   for (auto Succ : successors(From)) {
1668     if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
1669       return true;
1670   }
1671 
1672   return false;
1673 }
1674 
1675 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a
1676 /// suspend point?
1677 static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
1678   // Seed the visited set with all the basic blocks containing a free
1679   // so that we won't pass them up.
1680   VisitedBlocksSet VisitedOrFreeBBs;
1681   for (auto User : AI->users()) {
1682     if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
1683       VisitedOrFreeBBs.insert(FI->getParent());
1684   }
1685 
1686   return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
1687 }
1688 
1689 /// After we split the coroutine, will the given basic block be along
1690 /// an obvious exit path for the resumption function?
1691 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
1692                                               unsigned depth = 3) {
1693   // If we've bottomed out our depth count, stop searching and assume
1694   // that the path might loop back.
1695   if (depth == 0) return false;
1696 
1697   // If this is a suspend block, we're about to exit the resumption function.
1698   if (isSuspendBlock(BB)) return true;
1699 
1700   // Recurse into the successors.
1701   for (auto Succ : successors(BB)) {
1702     if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
1703       return false;
1704   }
1705 
1706   // If none of the successors leads back in a loop, we're on an exit/abort.
1707   return true;
1708 }
1709 
1710 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
1711   // Look for a free that isn't sufficiently obviously followed by
1712   // either a suspend or a termination, i.e. something that will leave
1713   // the coro resumption frame.
1714   for (auto U : AI->users()) {
1715     auto FI = dyn_cast<CoroAllocaFreeInst>(U);
1716     if (!FI) continue;
1717 
1718     if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
1719       return true;
1720   }
1721 
1722   // If we never found one, we don't need a stack save.
1723   return false;
1724 }
1725 
1726 /// Turn each of the given local allocas into a normal (dynamic) alloca
1727 /// instruction.
1728 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
1729                               SmallVectorImpl<Instruction*> &DeadInsts) {
1730   for (auto AI : LocalAllocas) {
1731     auto M = AI->getModule();
1732     IRBuilder<> Builder(AI);
1733 
1734     // Save the stack depth.  Try to avoid doing this if the stackrestore
1735     // is going to immediately precede a return or something.
1736     Value *StackSave = nullptr;
1737     if (localAllocaNeedsStackSave(AI))
1738       StackSave = Builder.CreateCall(
1739                             Intrinsic::getDeclaration(M, Intrinsic::stacksave));
1740 
1741     // Allocate memory.
1742     auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
1743     Alloca->setAlignment(Align(AI->getAlignment()));
1744 
1745     for (auto U : AI->users()) {
1746       // Replace gets with the allocation.
1747       if (isa<CoroAllocaGetInst>(U)) {
1748         U->replaceAllUsesWith(Alloca);
1749 
1750       // Replace frees with stackrestores.  This is safe because
1751       // alloca.alloc is required to obey a stack discipline, although we
1752       // don't enforce that structurally.
1753       } else {
1754         auto FI = cast<CoroAllocaFreeInst>(U);
1755         if (StackSave) {
1756           Builder.SetInsertPoint(FI);
1757           Builder.CreateCall(
1758                     Intrinsic::getDeclaration(M, Intrinsic::stackrestore),
1759                              StackSave);
1760         }
1761       }
1762       DeadInsts.push_back(cast<Instruction>(U));
1763     }
1764 
1765     DeadInsts.push_back(AI);
1766   }
1767 }
1768 
1769 /// Turn the given coro.alloca.alloc call into a dynamic allocation.
1770 /// This happens during the all-instructions iteration, so it must not
1771 /// delete the call.
1772 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI,
1773                                         coro::Shape &Shape,
1774                                    SmallVectorImpl<Instruction*> &DeadInsts) {
1775   IRBuilder<> Builder(AI);
1776   auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
1777 
1778   for (User *U : AI->users()) {
1779     if (isa<CoroAllocaGetInst>(U)) {
1780       U->replaceAllUsesWith(Alloc);
1781     } else {
1782       auto FI = cast<CoroAllocaFreeInst>(U);
1783       Builder.SetInsertPoint(FI);
1784       Shape.emitDealloc(Builder, Alloc, nullptr);
1785     }
1786     DeadInsts.push_back(cast<Instruction>(U));
1787   }
1788 
1789   // Push this on last so that it gets deleted after all the others.
1790   DeadInsts.push_back(AI);
1791 
1792   // Return the new allocation value so that we can check for needed spills.
1793   return cast<Instruction>(Alloc);
1794 }
1795 
1796 /// Get the current swifterror value.
1797 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
1798                                      coro::Shape &Shape) {
1799   // Make a fake function pointer as a sort of intrinsic.
1800   auto FnTy = FunctionType::get(ValueTy, {}, false);
1801   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
1802 
1803   auto Call = Builder.CreateCall(FnTy, Fn, {});
1804   Shape.SwiftErrorOps.push_back(Call);
1805 
1806   return Call;
1807 }
1808 
1809 /// Set the given value as the current swifterror value.
1810 ///
1811 /// Returns a slot that can be used as a swifterror slot.
1812 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
1813                                      coro::Shape &Shape) {
1814   // Make a fake function pointer as a sort of intrinsic.
1815   auto FnTy = FunctionType::get(V->getType()->getPointerTo(),
1816                                 {V->getType()}, false);
1817   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
1818 
1819   auto Call = Builder.CreateCall(FnTy, Fn, { V });
1820   Shape.SwiftErrorOps.push_back(Call);
1821 
1822   return Call;
1823 }
1824 
1825 /// Set the swifterror value from the given alloca before a call,
1826 /// then put in back in the alloca afterwards.
1827 ///
1828 /// Returns an address that will stand in for the swifterror slot
1829 /// until splitting.
1830 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
1831                                                  AllocaInst *Alloca,
1832                                                  coro::Shape &Shape) {
1833   auto ValueTy = Alloca->getAllocatedType();
1834   IRBuilder<> Builder(Call);
1835 
1836   // Load the current value from the alloca and set it as the
1837   // swifterror value.
1838   auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
1839   auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
1840 
1841   // Move to after the call.  Since swifterror only has a guaranteed
1842   // value on normal exits, we can ignore implicit and explicit unwind
1843   // edges.
1844   if (isa<CallInst>(Call)) {
1845     Builder.SetInsertPoint(Call->getNextNode());
1846   } else {
1847     auto Invoke = cast<InvokeInst>(Call);
1848     Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
1849   }
1850 
1851   // Get the current swifterror value and store it to the alloca.
1852   auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
1853   Builder.CreateStore(ValueAfterCall, Alloca);
1854 
1855   return Addr;
1856 }
1857 
1858 /// Eliminate a formerly-swifterror alloca by inserting the get/set
1859 /// intrinsics and attempting to MemToReg the alloca away.
1860 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
1861                                       coro::Shape &Shape) {
1862   for (auto UI = Alloca->use_begin(), UE = Alloca->use_end(); UI != UE; ) {
1863     // We're likely changing the use list, so use a mutation-safe
1864     // iteration pattern.
1865     auto &Use = *UI;
1866     ++UI;
1867 
1868     // swifterror values can only be used in very specific ways.
1869     // We take advantage of that here.
1870     auto User = Use.getUser();
1871     if (isa<LoadInst>(User) || isa<StoreInst>(User))
1872       continue;
1873 
1874     assert(isa<CallInst>(User) || isa<InvokeInst>(User));
1875     auto Call = cast<Instruction>(User);
1876 
1877     auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
1878 
1879     // Use the returned slot address as the call argument.
1880     Use.set(Addr);
1881   }
1882 
1883   // All the uses should be loads and stores now.
1884   assert(isAllocaPromotable(Alloca));
1885 }
1886 
1887 /// "Eliminate" a swifterror argument by reducing it to the alloca case
1888 /// and then loading and storing in the prologue and epilog.
1889 ///
1890 /// The argument keeps the swifterror flag.
1891 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
1892                                         coro::Shape &Shape,
1893                              SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
1894   IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
1895 
1896   auto ArgTy = cast<PointerType>(Arg.getType());
1897   auto ValueTy = ArgTy->getElementType();
1898 
1899   // Reduce to the alloca case:
1900 
1901   // Create an alloca and replace all uses of the arg with it.
1902   auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
1903   Arg.replaceAllUsesWith(Alloca);
1904 
1905   // Set an initial value in the alloca.  swifterror is always null on entry.
1906   auto InitialValue = Constant::getNullValue(ValueTy);
1907   Builder.CreateStore(InitialValue, Alloca);
1908 
1909   // Find all the suspends in the function and save and restore around them.
1910   for (auto Suspend : Shape.CoroSuspends) {
1911     (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
1912   }
1913 
1914   // Find all the coro.ends in the function and restore the error value.
1915   for (auto End : Shape.CoroEnds) {
1916     Builder.SetInsertPoint(End);
1917     auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
1918     (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
1919   }
1920 
1921   // Now we can use the alloca logic.
1922   AllocasToPromote.push_back(Alloca);
1923   eliminateSwiftErrorAlloca(F, Alloca, Shape);
1924 }
1925 
1926 /// Eliminate all problematic uses of swifterror arguments and allocas
1927 /// from the function.  We'll fix them up later when splitting the function.
1928 static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
1929   SmallVector<AllocaInst*, 4> AllocasToPromote;
1930 
1931   // Look for a swifterror argument.
1932   for (auto &Arg : F.args()) {
1933     if (!Arg.hasSwiftErrorAttr()) continue;
1934 
1935     eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
1936     break;
1937   }
1938 
1939   // Look for swifterror allocas.
1940   for (auto &Inst : F.getEntryBlock()) {
1941     auto Alloca = dyn_cast<AllocaInst>(&Inst);
1942     if (!Alloca || !Alloca->isSwiftError()) continue;
1943 
1944     // Clear the swifterror flag.
1945     Alloca->setSwiftError(false);
1946 
1947     AllocasToPromote.push_back(Alloca);
1948     eliminateSwiftErrorAlloca(F, Alloca, Shape);
1949   }
1950 
1951   // If we have any allocas to promote, compute a dominator tree and
1952   // promote them en masse.
1953   if (!AllocasToPromote.empty()) {
1954     DominatorTree DT(F);
1955     PromoteMemToReg(AllocasToPromote, DT);
1956   }
1957 }
1958 
1959 /// retcon and retcon.once conventions assume that all spill uses can be sunk
1960 /// after the coro.begin intrinsic.
1961 static void sinkSpillUsesAfterCoroBegin(Function &F,
1962                                         const FrameDataInfo &FrameData,
1963                                         CoroBeginInst *CoroBegin) {
1964   DominatorTree Dom(F);
1965 
1966   SmallSetVector<Instruction *, 32> ToMove;
1967   SmallVector<Instruction *, 32> Worklist;
1968 
1969   // Collect all users that precede coro.begin.
1970   for (auto *Def : FrameData.getAllDefs()) {
1971     for (User *U : Def->users()) {
1972       auto Inst = cast<Instruction>(U);
1973       if (Inst->getParent() != CoroBegin->getParent() ||
1974           Dom.dominates(CoroBegin, Inst))
1975         continue;
1976       if (ToMove.insert(Inst))
1977         Worklist.push_back(Inst);
1978     }
1979   }
1980   // Recursively collect users before coro.begin.
1981   while (!Worklist.empty()) {
1982     auto *Def = Worklist.back();
1983     Worklist.pop_back();
1984     for (User *U : Def->users()) {
1985       auto Inst = cast<Instruction>(U);
1986       if (Dom.dominates(CoroBegin, Inst))
1987         continue;
1988       if (ToMove.insert(Inst))
1989         Worklist.push_back(Inst);
1990     }
1991   }
1992 
1993   // Sort by dominance.
1994   SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
1995   std::sort(InsertionList.begin(), InsertionList.end(),
1996             [&Dom](Instruction *A, Instruction *B) -> bool {
1997               // If a dominates b it should preceed (<) b.
1998               return Dom.dominates(A, B);
1999             });
2000 
2001   Instruction *InsertPt = CoroBegin->getNextNode();
2002   for (Instruction *Inst : InsertionList)
2003     Inst->moveBefore(InsertPt);
2004 
2005   return;
2006 }
2007 
2008 /// For each local variable that all of its user are only used inside one of
2009 /// suspended region, we sink their lifetime.start markers to the place where
2010 /// after the suspend block. Doing so minimizes the lifetime of each variable,
2011 /// hence minimizing the amount of data we end up putting on the frame.
2012 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
2013                                      SuspendCrossingInfo &Checker) {
2014   DominatorTree DT(F);
2015 
2016   // Collect all possible basic blocks which may dominate all uses of allocas.
2017   SmallPtrSet<BasicBlock *, 4> DomSet;
2018   DomSet.insert(&F.getEntryBlock());
2019   for (auto *CSI : Shape.CoroSuspends) {
2020     BasicBlock *SuspendBlock = CSI->getParent();
2021     assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
2022            "should have split coro.suspend into its own block");
2023     DomSet.insert(SuspendBlock->getSingleSuccessor());
2024   }
2025 
2026   for (Instruction &I : instructions(F)) {
2027     AllocaInst* AI = dyn_cast<AllocaInst>(&I);
2028     if (!AI)
2029       continue;
2030 
2031     for (BasicBlock *DomBB : DomSet) {
2032       bool Valid = true;
2033       SmallVector<Instruction *, 1> Lifetimes;
2034 
2035       auto isLifetimeStart = [](Instruction* I) {
2036         if (auto* II = dyn_cast<IntrinsicInst>(I))
2037           return II->getIntrinsicID() == Intrinsic::lifetime_start;
2038         return false;
2039       };
2040 
2041       auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
2042         if (isLifetimeStart(U)) {
2043           Lifetimes.push_back(U);
2044           return true;
2045         }
2046         if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2047           return false;
2048         if (isLifetimeStart(U->user_back())) {
2049           Lifetimes.push_back(U->user_back());
2050           return true;
2051         }
2052         return false;
2053       };
2054 
2055       for (User *U : AI->users()) {
2056         Instruction *UI = cast<Instruction>(U);
2057         // For all users except lifetime.start markers, if they are all
2058         // dominated by one of the basic blocks and do not cross
2059         // suspend points as well, then there is no need to spill the
2060         // instruction.
2061         if (!DT.dominates(DomBB, UI->getParent()) ||
2062             Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2063           // Skip lifetime.start, GEP and bitcast used by lifetime.start
2064           // markers.
2065           if (collectLifetimeStart(UI, AI))
2066             continue;
2067           Valid = false;
2068           break;
2069         }
2070       }
2071       // Sink lifetime.start markers to dominate block when they are
2072       // only used outside the region.
2073       if (Valid && Lifetimes.size() != 0) {
2074         // May be AI itself, when the type of AI is i8*
2075         auto *NewBitCast = [&](AllocaInst *AI) -> Value* {
2076           if (isa<AllocaInst>(Lifetimes[0]->getOperand(1)))
2077             return AI;
2078           auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext());
2079           return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "",
2080                                   DomBB->getTerminator());
2081         }(AI);
2082 
2083         auto *NewLifetime = Lifetimes[0]->clone();
2084         NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast);
2085         NewLifetime->insertBefore(DomBB->getTerminator());
2086 
2087         // All the outsided lifetime.start markers are no longer necessary.
2088         for (Instruction *S : Lifetimes)
2089           S->eraseFromParent();
2090 
2091         break;
2092       }
2093     }
2094   }
2095 }
2096 
2097 static void collectFrameAllocas(Function &F, coro::Shape &Shape,
2098                                 const SuspendCrossingInfo &Checker,
2099                                 SmallVectorImpl<AllocaInfo> &Allocas) {
2100   // Collect lifetime.start info for each alloca.
2101   using LifetimeStart = SmallPtrSet<Instruction *, 2>;
2102   llvm::DenseMap<AllocaInst *, std::unique_ptr<LifetimeStart>> LifetimeMap;
2103   for (Instruction &I : instructions(F)) {
2104     auto *II = dyn_cast<IntrinsicInst>(&I);
2105     if (!II || II->getIntrinsicID() != Intrinsic::lifetime_start)
2106       continue;
2107 
2108     if (auto *OpInst = dyn_cast<Instruction>(II->getOperand(1))) {
2109       if (auto *AI = dyn_cast<AllocaInst>(OpInst->stripPointerCasts())) {
2110 
2111         if (LifetimeMap.find(AI) == LifetimeMap.end())
2112           LifetimeMap[AI] = std::make_unique<LifetimeStart>();
2113         LifetimeMap[AI]->insert(isa<AllocaInst>(OpInst) ? II : OpInst);
2114       }
2115     }
2116   }
2117 
2118   for (Instruction &I : instructions(F)) {
2119     auto *AI = dyn_cast<AllocaInst>(&I);
2120     if (!AI)
2121       continue;
2122     // The PromiseAlloca will be specially handled since it needs to be in a
2123     // fixed position in the frame.
2124     if (AI == Shape.SwitchLowering.PromiseAlloca) {
2125       continue;
2126     }
2127     bool ShouldLiveOnFrame = false;
2128     auto Iter = LifetimeMap.find(AI);
2129     if (Iter != LifetimeMap.end()) {
2130       // Check against lifetime.start if the instruction has the info.
2131       for (User *U : I.users()) {
2132         for (auto *S : *Iter->second)
2133           if ((ShouldLiveOnFrame = Checker.isDefinitionAcrossSuspend(*S, U)))
2134             break;
2135         if (ShouldLiveOnFrame)
2136           break;
2137       }
2138       if (!ShouldLiveOnFrame)
2139         continue;
2140     }
2141     // At this point, either ShouldLiveOnFrame is true or we didn't have
2142     // lifetime information. We will need to rely on more precise pointer
2143     // tracking.
2144     DominatorTree DT(F);
2145     AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT,
2146                              *Shape.CoroBegin, Checker};
2147     Visitor.visitPtr(*AI);
2148     if (!Visitor.getShouldLiveOnFrame())
2149       continue;
2150     Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
2151                          Visitor.getMayWriteBeforeCoroBegin());
2152   }
2153 }
2154 
2155 void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
2156   eliminateSwiftError(F, Shape);
2157 
2158   if (Shape.ABI == coro::ABI::Switch &&
2159       Shape.SwitchLowering.PromiseAlloca) {
2160     Shape.getSwitchCoroId()->clearPromise();
2161   }
2162 
2163   // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2164   // intrinsics are in their own blocks to simplify the logic of building up
2165   // SuspendCrossing data.
2166   for (auto *CSI : Shape.CoroSuspends) {
2167     if (auto *Save = CSI->getCoroSave())
2168       splitAround(Save, "CoroSave");
2169     splitAround(CSI, "CoroSuspend");
2170   }
2171 
2172   // Put CoroEnds into their own blocks.
2173   for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2174     splitAround(CE, "CoroEnd");
2175 
2176     // Emit the musttail call function in a new block before the CoroEnd.
2177     // We do this here so that the right suspend crossing info is computed for
2178     // the uses of the musttail call function call. (Arguments to the coro.end
2179     // instructions would be ignored)
2180     if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
2181       auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2182       if (!MustTailCallFn)
2183         continue;
2184       IRBuilder<> Builder(AsyncEnd);
2185       SmallVector<Value *, 8> Args(AsyncEnd->args());
2186       auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
2187       auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
2188                                       Arguments, Builder);
2189       splitAround(Call, "MustTailCall.Before.CoroEnd");
2190     }
2191   }
2192 
2193   // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2194   // never has its definition separated from the PHI by the suspend point.
2195   rewritePHIs(F);
2196 
2197   // Build suspend crossing info.
2198   SuspendCrossingInfo Checker(F, Shape);
2199 
2200   IRBuilder<> Builder(F.getContext());
2201   FrameDataInfo FrameData;
2202   SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas;
2203   SmallVector<Instruction*, 4> DeadInstructions;
2204 
2205   {
2206     SpillInfo Spills;
2207     for (int Repeat = 0; Repeat < 4; ++Repeat) {
2208       // See if there are materializable instructions across suspend points.
2209       for (Instruction &I : instructions(F))
2210         if (materializable(I))
2211           for (User *U : I.users())
2212             if (Checker.isDefinitionAcrossSuspend(I, U))
2213               Spills[&I].push_back(cast<Instruction>(U));
2214 
2215       if (Spills.empty())
2216         break;
2217 
2218       // Rewrite materializable instructions to be materialized at the use
2219       // point.
2220       LLVM_DEBUG(dumpSpills("Materializations", Spills));
2221       rewriteMaterializableInstructions(Builder, Spills);
2222       Spills.clear();
2223     }
2224   }
2225 
2226   sinkLifetimeStartMarkers(F, Shape, Checker);
2227   collectFrameAllocas(F, Shape, Checker, FrameData.Allocas);
2228   LLVM_DEBUG(dumpAllocas(FrameData.Allocas));
2229 
2230   // Collect the spills for arguments and other not-materializable values.
2231   for (Argument &A : F.args())
2232     for (User *U : A.users())
2233       if (Checker.isDefinitionAcrossSuspend(A, U))
2234         FrameData.Spills[&A].push_back(cast<Instruction>(U));
2235 
2236   for (Instruction &I : instructions(F)) {
2237     // Values returned from coroutine structure intrinsics should not be part
2238     // of the Coroutine Frame.
2239     if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
2240       continue;
2241 
2242     // The Coroutine Promise always included into coroutine frame, no need to
2243     // check for suspend crossing.
2244     if (Shape.ABI == coro::ABI::Switch &&
2245         Shape.SwitchLowering.PromiseAlloca == &I)
2246       continue;
2247 
2248     // Handle alloca.alloc specially here.
2249     if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
2250       // Check whether the alloca's lifetime is bounded by suspend points.
2251       if (isLocalAlloca(AI)) {
2252         LocalAllocas.push_back(AI);
2253         continue;
2254       }
2255 
2256       // If not, do a quick rewrite of the alloca and then add spills of
2257       // the rewritten value.  The rewrite doesn't invalidate anything in
2258       // Spills because the other alloca intrinsics have no other operands
2259       // besides AI, and it doesn't invalidate the iteration because we delay
2260       // erasing AI.
2261       auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
2262 
2263       for (User *U : Alloc->users()) {
2264         if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
2265           FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
2266       }
2267       continue;
2268     }
2269 
2270     // Ignore alloca.get; we process this as part of coro.alloca.alloc.
2271     if (isa<CoroAllocaGetInst>(I))
2272       continue;
2273 
2274     if (isa<AllocaInst>(I))
2275       continue;
2276 
2277     for (User *U : I.users())
2278       if (Checker.isDefinitionAcrossSuspend(I, U)) {
2279         // We cannot spill a token.
2280         if (I.getType()->isTokenTy())
2281           report_fatal_error(
2282               "token definition is separated from the use by a suspend point");
2283         FrameData.Spills[&I].push_back(cast<Instruction>(U));
2284       }
2285   }
2286   LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
2287   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2288       Shape.ABI == coro::ABI::Async)
2289     sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin);
2290   Shape.FrameTy = buildFrameType(F, Shape, FrameData);
2291   // Add PromiseAlloca to Allocas list so that it is processed in insertSpills.
2292   if (Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.PromiseAlloca)
2293     // We assume that the promise alloca won't be modified before
2294     // CoroBegin and no alias will be create before CoroBegin.
2295     FrameData.Allocas.emplace_back(
2296         Shape.SwitchLowering.PromiseAlloca,
2297         DenseMap<Instruction *, llvm::Optional<APInt>>{}, false);
2298   Shape.FramePtr = insertSpills(FrameData, Shape);
2299   lowerLocalAllocas(LocalAllocas, DeadInstructions);
2300 
2301   for (auto I : DeadInstructions)
2302     I->eraseFromParent();
2303 }
2304