1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This file contains classes used to discover if for a particular value
9 // there from sue to definition that crosses a suspend block.
10 //
11 // Using the information discovered we form a Coroutine Frame structure to
12 // contain those values. All uses of those values are replaced with appropriate
13 // GEP + load from the coroutine frame. At the point of the definition we spill
14 // the value into the coroutine frame.
15 //===----------------------------------------------------------------------===//
16 
17 #include "CoroInternal.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/SmallString.h"
20 #include "llvm/Analysis/PtrUseVisitor.h"
21 #include "llvm/Analysis/StackLifetime.h"
22 #include "llvm/Config/llvm-config.h"
23 #include "llvm/IR/CFG.h"
24 #include "llvm/IR/DIBuilder.h"
25 #include "llvm/IR/Dominators.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/InstIterator.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/MathExtras.h"
31 #include "llvm/Support/OptimizedStructLayout.h"
32 #include "llvm/Support/circular_raw_ostream.h"
33 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
36 #include <algorithm>
37 
38 using namespace llvm;
39 
40 // The "coro-suspend-crossing" flag is very noisy. There is another debug type,
41 // "coro-frame", which results in leaner debug spew.
42 #define DEBUG_TYPE "coro-suspend-crossing"
43 
44 static cl::opt<bool> EnableReuseStorageInFrame(
45     "reuse-storage-in-coroutine-frame", cl::Hidden,
46     cl::desc(
47         "Enable the optimization which would reuse the storage in the coroutine \
48          frame for allocas whose liferanges are not overlapped, for testing purposes"),
49     llvm::cl::init(false));
50 
51 enum { SmallVectorThreshold = 32 };
52 
53 // Provides two way mapping between the blocks and numbers.
54 namespace {
55 class BlockToIndexMapping {
56   SmallVector<BasicBlock *, SmallVectorThreshold> V;
57 
58 public:
59   size_t size() const { return V.size(); }
60 
61   BlockToIndexMapping(Function &F) {
62     for (BasicBlock &BB : F)
63       V.push_back(&BB);
64     llvm::sort(V);
65   }
66 
67   size_t blockToIndex(BasicBlock *BB) const {
68     auto *I = llvm::lower_bound(V, BB);
69     assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
70     return I - V.begin();
71   }
72 
73   BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
74 };
75 } // end anonymous namespace
76 
77 // The SuspendCrossingInfo maintains data that allows to answer a question
78 // whether given two BasicBlocks A and B there is a path from A to B that
79 // passes through a suspend point.
80 //
81 // For every basic block 'i' it maintains a BlockData that consists of:
82 //   Consumes:  a bit vector which contains a set of indices of blocks that can
83 //              reach block 'i'
84 //   Kills: a bit vector which contains a set of indices of blocks that can
85 //          reach block 'i', but one of the path will cross a suspend point
86 //   Suspend: a boolean indicating whether block 'i' contains a suspend point.
87 //   End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
88 //
89 namespace {
90 struct SuspendCrossingInfo {
91   BlockToIndexMapping Mapping;
92 
93   struct BlockData {
94     BitVector Consumes;
95     BitVector Kills;
96     bool Suspend = false;
97     bool End = false;
98   };
99   SmallVector<BlockData, SmallVectorThreshold> Block;
100 
101   iterator_range<succ_iterator> successors(BlockData const &BD) const {
102     BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
103     return llvm::successors(BB);
104   }
105 
106   BlockData &getBlockData(BasicBlock *BB) {
107     return Block[Mapping.blockToIndex(BB)];
108   }
109 
110   void dump() const;
111   void dump(StringRef Label, BitVector const &BV) const;
112 
113   SuspendCrossingInfo(Function &F, coro::Shape &Shape);
114 
115   bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
116     size_t const DefIndex = Mapping.blockToIndex(DefBB);
117     size_t const UseIndex = Mapping.blockToIndex(UseBB);
118 
119     bool const Result = Block[UseIndex].Kills[DefIndex];
120     LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
121                       << " answer is " << Result << "\n");
122     return Result;
123   }
124 
125   bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
126     auto *I = cast<Instruction>(U);
127 
128     // We rewrote PHINodes, so that only the ones with exactly one incoming
129     // value need to be analyzed.
130     if (auto *PN = dyn_cast<PHINode>(I))
131       if (PN->getNumIncomingValues() > 1)
132         return false;
133 
134     BasicBlock *UseBB = I->getParent();
135 
136     // As a special case, treat uses by an llvm.coro.suspend.retcon or an
137     // llvm.coro.suspend.async as if they were uses in the suspend's single
138     // predecessor: the uses conceptually occur before the suspend.
139     if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
140       UseBB = UseBB->getSinglePredecessor();
141       assert(UseBB && "should have split coro.suspend into its own block");
142     }
143 
144     return hasPathCrossingSuspendPoint(DefBB, UseBB);
145   }
146 
147   bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
148     return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
149   }
150 
151   bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
152     auto *DefBB = I.getParent();
153 
154     // As a special case, treat values produced by an llvm.coro.suspend.*
155     // as if they were defined in the single successor: the uses
156     // conceptually occur after the suspend.
157     if (isa<AnyCoroSuspendInst>(I)) {
158       DefBB = DefBB->getSingleSuccessor();
159       assert(DefBB && "should have split coro.suspend into its own block");
160     }
161 
162     return isDefinitionAcrossSuspend(DefBB, U);
163   }
164 };
165 } // end anonymous namespace
166 
167 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
168 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
169                                                 BitVector const &BV) const {
170   dbgs() << Label << ":";
171   for (size_t I = 0, N = BV.size(); I < N; ++I)
172     if (BV[I])
173       dbgs() << " " << Mapping.indexToBlock(I)->getName();
174   dbgs() << "\n";
175 }
176 
177 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
178   for (size_t I = 0, N = Block.size(); I < N; ++I) {
179     BasicBlock *const B = Mapping.indexToBlock(I);
180     dbgs() << B->getName() << ":\n";
181     dump("   Consumes", Block[I].Consumes);
182     dump("      Kills", Block[I].Kills);
183   }
184   dbgs() << "\n";
185 }
186 #endif
187 
188 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
189     : Mapping(F) {
190   const size_t N = Mapping.size();
191   Block.resize(N);
192 
193   // Initialize every block so that it consumes itself
194   for (size_t I = 0; I < N; ++I) {
195     auto &B = Block[I];
196     B.Consumes.resize(N);
197     B.Kills.resize(N);
198     B.Consumes.set(I);
199   }
200 
201   // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
202   // the code beyond coro.end is reachable during initial invocation of the
203   // coroutine.
204   for (auto *CE : Shape.CoroEnds)
205     getBlockData(CE->getParent()).End = true;
206 
207   // Mark all suspend blocks and indicate that they kill everything they
208   // consume. Note, that crossing coro.save also requires a spill, as any code
209   // between coro.save and coro.suspend may resume the coroutine and all of the
210   // state needs to be saved by that time.
211   auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
212     BasicBlock *SuspendBlock = BarrierInst->getParent();
213     auto &B = getBlockData(SuspendBlock);
214     B.Suspend = true;
215     B.Kills |= B.Consumes;
216   };
217   for (auto *CSI : Shape.CoroSuspends) {
218     markSuspendBlock(CSI);
219     if (auto *Save = CSI->getCoroSave())
220       markSuspendBlock(Save);
221   }
222 
223   // Iterate propagating consumes and kills until they stop changing.
224   int Iteration = 0;
225   (void)Iteration;
226 
227   bool Changed;
228   do {
229     LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
230     LLVM_DEBUG(dbgs() << "==============\n");
231 
232     Changed = false;
233     for (size_t I = 0; I < N; ++I) {
234       auto &B = Block[I];
235       for (BasicBlock *SI : successors(B)) {
236 
237         auto SuccNo = Mapping.blockToIndex(SI);
238 
239         // Saved Consumes and Kills bitsets so that it is easy to see
240         // if anything changed after propagation.
241         auto &S = Block[SuccNo];
242         auto SavedConsumes = S.Consumes;
243         auto SavedKills = S.Kills;
244 
245         // Propagate Kills and Consumes from block B into its successor S.
246         S.Consumes |= B.Consumes;
247         S.Kills |= B.Kills;
248 
249         // If block B is a suspend block, it should propagate kills into the
250         // its successor for every block B consumes.
251         if (B.Suspend) {
252           S.Kills |= B.Consumes;
253         }
254         if (S.Suspend) {
255           // If block S is a suspend block, it should kill all of the blocks it
256           // consumes.
257           S.Kills |= S.Consumes;
258         } else if (S.End) {
259           // If block S is an end block, it should not propagate kills as the
260           // blocks following coro.end() are reached during initial invocation
261           // of the coroutine while all the data are still available on the
262           // stack or in the registers.
263           S.Kills.reset();
264         } else {
265           // This is reached when S block it not Suspend nor coro.end and it
266           // need to make sure that it is not in the kill set.
267           S.Kills.reset(SuccNo);
268         }
269 
270         // See if anything changed.
271         Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
272 
273         if (S.Kills != SavedKills) {
274           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
275                             << "\n");
276           LLVM_DEBUG(dump("S.Kills", S.Kills));
277           LLVM_DEBUG(dump("SavedKills", SavedKills));
278         }
279         if (S.Consumes != SavedConsumes) {
280           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
281           LLVM_DEBUG(dump("S.Consume", S.Consumes));
282           LLVM_DEBUG(dump("SavedCons", SavedConsumes));
283         }
284       }
285     }
286   } while (Changed);
287   LLVM_DEBUG(dump());
288 }
289 
290 #undef DEBUG_TYPE // "coro-suspend-crossing"
291 #define DEBUG_TYPE "coro-frame"
292 
293 namespace {
294 class FrameTypeBuilder;
295 // Mapping from the to-be-spilled value to all the users that need reload.
296 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>;
297 struct AllocaInfo {
298   AllocaInst *Alloca;
299   DenseMap<Instruction *, llvm::Optional<APInt>> Aliases;
300   bool MayWriteBeforeCoroBegin;
301   AllocaInfo(AllocaInst *Alloca,
302              DenseMap<Instruction *, llvm::Optional<APInt>> Aliases,
303              bool MayWriteBeforeCoroBegin)
304       : Alloca(Alloca), Aliases(std::move(Aliases)),
305         MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
306 };
307 struct FrameDataInfo {
308   // All the values (that are not allocas) that needs to be spilled to the
309   // frame.
310   SpillInfo Spills;
311   // Allocas contains all values defined as allocas that need to live in the
312   // frame.
313   SmallVector<AllocaInfo, 8> Allocas;
314 
315   SmallVector<Value *, 8> getAllDefs() const {
316     SmallVector<Value *, 8> Defs;
317     for (const auto &P : Spills)
318       Defs.push_back(P.first);
319     for (const auto &A : Allocas)
320       Defs.push_back(A.Alloca);
321     return Defs;
322   }
323 
324   uint32_t getFieldIndex(Value *V) const {
325     auto Itr = FieldIndexMap.find(V);
326     assert(Itr != FieldIndexMap.end() &&
327            "Value does not have a frame field index");
328     return Itr->second;
329   }
330 
331   void setFieldIndex(Value *V, uint32_t Index) {
332     assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
333            "Cannot set the index for the same field twice.");
334     FieldIndexMap[V] = Index;
335   }
336 
337   // Remap the index of every field in the frame, using the final layout index.
338   void updateLayoutIndex(FrameTypeBuilder &B);
339 
340 private:
341   // LayoutIndexUpdateStarted is used to avoid updating the index of any field
342   // twice by mistake.
343   bool LayoutIndexUpdateStarted = false;
344   // Map from values to their slot indexes on the frame. They will be first set
345   // with their original insertion field index. After the frame is built, their
346   // indexes will be updated into the final layout index.
347   DenseMap<Value *, uint32_t> FieldIndexMap;
348 };
349 } // namespace
350 
351 #ifndef NDEBUG
352 static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
353   dbgs() << "------------- " << Title << "--------------\n";
354   for (const auto &E : Spills) {
355     E.first->dump();
356     dbgs() << "   user: ";
357     for (auto *I : E.second)
358       I->dump();
359   }
360 }
361 
362 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
363   dbgs() << "------------- Allocas --------------\n";
364   for (const auto &A : Allocas) {
365     A.Alloca->dump();
366   }
367 }
368 #endif
369 
370 namespace {
371 using FieldIDType = size_t;
372 // We cannot rely solely on natural alignment of a type when building a
373 // coroutine frame and if the alignment specified on the Alloca instruction
374 // differs from the natural alignment of the alloca type we will need to insert
375 // padding.
376 class FrameTypeBuilder {
377 private:
378   struct Field {
379     uint64_t Size;
380     uint64_t Offset;
381     Type *Ty;
382     FieldIDType LayoutFieldIndex;
383     Align Alignment;
384     Align TyAlignment;
385   };
386 
387   const DataLayout &DL;
388   LLVMContext &Context;
389   uint64_t StructSize = 0;
390   Align StructAlign;
391   bool IsFinished = false;
392 
393   SmallVector<Field, 8> Fields;
394   DenseMap<Value*, unsigned> FieldIndexByKey;
395 
396 public:
397   FrameTypeBuilder(LLVMContext &Context, DataLayout const &DL)
398       : DL(DL), Context(Context) {}
399 
400   /// Add a field to this structure for the storage of an `alloca`
401   /// instruction.
402   LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI,
403                                                bool IsHeader = false) {
404     Type *Ty = AI->getAllocatedType();
405 
406     // Make an array type if this is a static array allocation.
407     if (AI->isArrayAllocation()) {
408       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
409         Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
410       else
411         report_fatal_error("Coroutines cannot handle non static allocas yet");
412     }
413 
414     return addField(Ty, AI->getAlign(), IsHeader);
415   }
416 
417   /// We want to put the allocas whose lifetime-ranges are not overlapped
418   /// into one slot of coroutine frame.
419   /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
420   ///
421   ///     cppcoro::task<void> alternative_paths(bool cond) {
422   ///         if (cond) {
423   ///             big_structure a;
424   ///             process(a);
425   ///             co_await something();
426   ///         } else {
427   ///             big_structure b;
428   ///             process2(b);
429   ///             co_await something();
430   ///         }
431   ///     }
432   ///
433   /// We want to put variable a and variable b in the same slot to
434   /// reduce the size of coroutine frame.
435   ///
436   /// This function use StackLifetime algorithm to partition the AllocaInsts in
437   /// Spills to non-overlapped sets in order to put Alloca in the same
438   /// non-overlapped set into the same slot in the Coroutine Frame. Then add
439   /// field for the allocas in the same non-overlapped set by using the largest
440   /// type as the field type.
441   ///
442   /// Side Effects: Because We sort the allocas, the order of allocas in the
443   /// frame may be different with the order in the source code.
444   void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
445                           coro::Shape &Shape);
446 
447   /// Add a field to this structure.
448   LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment,
449                                       bool IsHeader = false) {
450     assert(!IsFinished && "adding fields to a finished builder");
451     assert(Ty && "must provide a type for a field");
452 
453     // The field size is always the alloc size of the type.
454     uint64_t FieldSize = DL.getTypeAllocSize(Ty);
455 
456     // The field alignment might not be the type alignment, but we need
457     // to remember the type alignment anyway to build the type.
458     Align TyAlignment = DL.getABITypeAlign(Ty);
459     if (!FieldAlignment) FieldAlignment = TyAlignment;
460 
461     // Lay out header fields immediately.
462     uint64_t Offset;
463     if (IsHeader) {
464       Offset = alignTo(StructSize, FieldAlignment);
465       StructSize = Offset + FieldSize;
466 
467     // Everything else has a flexible offset.
468     } else {
469       Offset = OptimizedStructLayoutField::FlexibleOffset;
470     }
471 
472     Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment});
473     return Fields.size() - 1;
474   }
475 
476   /// Finish the layout and set the body on the given type.
477   void finish(StructType *Ty);
478 
479   uint64_t getStructSize() const {
480     assert(IsFinished && "not yet finished!");
481     return StructSize;
482   }
483 
484   Align getStructAlign() const {
485     assert(IsFinished && "not yet finished!");
486     return StructAlign;
487   }
488 
489   FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
490     assert(IsFinished && "not yet finished!");
491     return Fields[Id].LayoutFieldIndex;
492   }
493 };
494 } // namespace
495 
496 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
497   auto Updater = [&](Value *I) {
498     setFieldIndex(I, B.getLayoutFieldIndex(getFieldIndex(I)));
499   };
500   LayoutIndexUpdateStarted = true;
501   for (auto &S : Spills)
502     Updater(S.first);
503   for (const auto &A : Allocas)
504     Updater(A.Alloca);
505   LayoutIndexUpdateStarted = false;
506 }
507 
508 void FrameTypeBuilder::addFieldForAllocas(const Function &F,
509                                           FrameDataInfo &FrameData,
510                                           coro::Shape &Shape) {
511   using AllocaSetType = SmallVector<AllocaInst *, 4>;
512   SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
513 
514   // We need to add field for allocas at the end of this function. However, this
515   // function has multiple exits, so we use this helper to avoid redundant code.
516   struct RTTIHelper {
517     std::function<void()> func;
518     RTTIHelper(std::function<void()> &&func) : func(func) {}
519     ~RTTIHelper() { func(); }
520   } Helper([&]() {
521     for (auto AllocaList : NonOverlapedAllocas) {
522       auto *LargestAI = *AllocaList.begin();
523       FieldIDType Id = addFieldForAlloca(LargestAI);
524       for (auto *Alloca : AllocaList)
525         FrameData.setFieldIndex(Alloca, Id);
526     }
527   });
528 
529   if (!Shape.ReuseFrameSlot && !EnableReuseStorageInFrame) {
530     for (const auto &A : FrameData.Allocas) {
531       AllocaInst *Alloca = A.Alloca;
532       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
533     }
534     return;
535   }
536 
537   // Because there are pathes from the lifetime.start to coro.end
538   // for each alloca, the liferanges for every alloca is overlaped
539   // in the blocks who contain coro.end and the successor blocks.
540   // So we choose to skip there blocks when we calculates the liferange
541   // for each alloca. It should be reasonable since there shouldn't be uses
542   // in these blocks and the coroutine frame shouldn't be used outside the
543   // coroutine body.
544   //
545   // Note that the user of coro.suspend may not be SwitchInst. However, this
546   // case seems too complex to handle. And it is harmless to skip these
547   // patterns since it just prevend putting the allocas to live in the same
548   // slot.
549   DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
550   for (auto CoroSuspendInst : Shape.CoroSuspends) {
551     for (auto U : CoroSuspendInst->users()) {
552       if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
553         auto *SWI = const_cast<SwitchInst *>(ConstSWI);
554         DefaultSuspendDest[SWI] = SWI->getDefaultDest();
555         SWI->setDefaultDest(SWI->getSuccessor(1));
556       }
557     }
558   }
559 
560   auto ExtractAllocas = [&]() {
561     AllocaSetType Allocas;
562     Allocas.reserve(FrameData.Allocas.size());
563     for (const auto &A : FrameData.Allocas)
564       Allocas.push_back(A.Alloca);
565     return Allocas;
566   };
567   StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
568                                       StackLifetime::LivenessType::May);
569   StackLifetimeAnalyzer.run();
570   auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
571     return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
572         StackLifetimeAnalyzer.getLiveRange(AI2));
573   };
574   auto GetAllocaSize = [&](const AllocaInfo &A) {
575     Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL);
576     assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
577     assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
578     return RetSize->getFixedSize();
579   };
580   // Put larger allocas in the front. So the larger allocas have higher
581   // priority to merge, which can save more space potentially. Also each
582   // AllocaSet would be ordered. So we can get the largest Alloca in one
583   // AllocaSet easily.
584   sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
585     return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
586   });
587   for (const auto &A : FrameData.Allocas) {
588     AllocaInst *Alloca = A.Alloca;
589     bool Merged = false;
590     // Try to find if the Alloca is not inferenced with any existing
591     // NonOverlappedAllocaSet. If it is true, insert the alloca to that
592     // NonOverlappedAllocaSet.
593     for (auto &AllocaSet : NonOverlapedAllocas) {
594       assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
595       bool NoInference = none_of(AllocaSet, [&](auto Iter) {
596         return IsAllocaInferenre(Alloca, Iter);
597       });
598       // If the alignment of A is multiple of the alignment of B, the address
599       // of A should satisfy the requirement for aligning for B.
600       //
601       // There may be other more fine-grained strategies to handle the alignment
602       // infomation during the merging process. But it seems hard to handle
603       // these strategies and benefit little.
604       bool Alignable = [&]() -> bool {
605         auto *LargestAlloca = *AllocaSet.begin();
606         return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
607                0;
608       }();
609       bool CouldMerge = NoInference && Alignable;
610       if (!CouldMerge)
611         continue;
612       AllocaSet.push_back(Alloca);
613       Merged = true;
614       break;
615     }
616     if (!Merged) {
617       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
618     }
619   }
620   // Recover the default target destination for each Switch statement
621   // reserved.
622   for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
623     SwitchInst *SWI = SwitchAndDefaultDest.first;
624     BasicBlock *DestBB = SwitchAndDefaultDest.second;
625     SWI->setDefaultDest(DestBB);
626   }
627   // This Debug Info could tell us which allocas are merged into one slot.
628   LLVM_DEBUG(for (auto &AllocaSet
629                   : NonOverlapedAllocas) {
630     if (AllocaSet.size() > 1) {
631       dbgs() << "In Function:" << F.getName() << "\n";
632       dbgs() << "Find Union Set "
633              << "\n";
634       dbgs() << "\tAllocas are \n";
635       for (auto Alloca : AllocaSet)
636         dbgs() << "\t\t" << *Alloca << "\n";
637     }
638   });
639 }
640 
641 void FrameTypeBuilder::finish(StructType *Ty) {
642   assert(!IsFinished && "already finished!");
643 
644   // Prepare the optimal-layout field array.
645   // The Id in the layout field is a pointer to our Field for it.
646   SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
647   LayoutFields.reserve(Fields.size());
648   for (auto &Field : Fields) {
649     LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
650                               Field.Offset);
651   }
652 
653   // Perform layout.
654   auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
655   StructSize = SizeAndAlign.first;
656   StructAlign = SizeAndAlign.second;
657 
658   auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
659     return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
660   };
661 
662   // We need to produce a packed struct type if there's a field whose
663   // assigned offset isn't a multiple of its natural type alignment.
664   bool Packed = [&] {
665     for (auto &LayoutField : LayoutFields) {
666       auto &F = getField(LayoutField);
667       if (!isAligned(F.TyAlignment, LayoutField.Offset))
668         return true;
669     }
670     return false;
671   }();
672 
673   // Build the struct body.
674   SmallVector<Type*, 16> FieldTypes;
675   FieldTypes.reserve(LayoutFields.size() * 3 / 2);
676   uint64_t LastOffset = 0;
677   for (auto &LayoutField : LayoutFields) {
678     auto &F = getField(LayoutField);
679 
680     auto Offset = LayoutField.Offset;
681 
682     // Add a padding field if there's a padding gap and we're either
683     // building a packed struct or the padding gap is more than we'd
684     // get from aligning to the field type's natural alignment.
685     assert(Offset >= LastOffset);
686     if (Offset != LastOffset) {
687       if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
688         FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
689                                             Offset - LastOffset));
690     }
691 
692     F.Offset = Offset;
693     F.LayoutFieldIndex = FieldTypes.size();
694 
695     FieldTypes.push_back(F.Ty);
696     LastOffset = Offset + F.Size;
697   }
698 
699   Ty->setBody(FieldTypes, Packed);
700 
701 #ifndef NDEBUG
702   // Check that the IR layout matches the offsets we expect.
703   auto Layout = DL.getStructLayout(Ty);
704   for (auto &F : Fields) {
705     assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
706     assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
707   }
708 #endif
709 
710   IsFinished = true;
711 }
712 
713 // Build a struct that will keep state for an active coroutine.
714 //   struct f.frame {
715 //     ResumeFnTy ResumeFnAddr;
716 //     ResumeFnTy DestroyFnAddr;
717 //     int ResumeIndex;
718 //     ... promise (if present) ...
719 //     ... spills ...
720 //   };
721 static StructType *buildFrameType(Function &F, coro::Shape &Shape,
722                                   FrameDataInfo &FrameData) {
723   LLVMContext &C = F.getContext();
724   const DataLayout &DL = F.getParent()->getDataLayout();
725   StructType *FrameTy = [&] {
726     SmallString<32> Name(F.getName());
727     Name.append(".Frame");
728     return StructType::create(C, Name);
729   }();
730 
731   FrameTypeBuilder B(C, DL);
732 
733   AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
734   Optional<FieldIDType> SwitchIndexFieldId;
735 
736   if (Shape.ABI == coro::ABI::Switch) {
737     auto *FramePtrTy = FrameTy->getPointerTo();
738     auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
739                                    /*IsVarArg=*/false);
740     auto *FnPtrTy = FnTy->getPointerTo();
741 
742     // Add header fields for the resume and destroy functions.
743     // We can rely on these being perfectly packed.
744     (void)B.addField(FnPtrTy, None, /*header*/ true);
745     (void)B.addField(FnPtrTy, None, /*header*/ true);
746 
747     // PromiseAlloca field needs to be explicitly added here because it's
748     // a header field with a fixed offset based on its alignment. Hence it
749     // needs special handling and cannot be added to FrameData.Allocas.
750     if (PromiseAlloca)
751       FrameData.setFieldIndex(
752           PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
753 
754     // Add a field to store the suspend index.  This doesn't need to
755     // be in the header.
756     unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
757     Type *IndexType = Type::getIntNTy(C, IndexBits);
758 
759     SwitchIndexFieldId = B.addField(IndexType, None);
760   } else {
761     assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
762   }
763 
764   // Because multiple allocas may own the same field slot,
765   // we add allocas to field here.
766   B.addFieldForAllocas(F, FrameData, Shape);
767   // Add PromiseAlloca to Allocas list so that
768   // 1. updateLayoutIndex could update its index after
769   // `performOptimizedStructLayout`
770   // 2. it is processed in insertSpills.
771   if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
772     // We assume that the promise alloca won't be modified before
773     // CoroBegin and no alias will be create before CoroBegin.
774     FrameData.Allocas.emplace_back(
775         PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false);
776   // Create an entry for every spilled value.
777   for (auto &S : FrameData.Spills) {
778     FieldIDType Id = B.addField(S.first->getType(), None);
779     FrameData.setFieldIndex(S.first, Id);
780   }
781 
782   B.finish(FrameTy);
783   FrameData.updateLayoutIndex(B);
784   Shape.FrameAlign = B.getStructAlign();
785   Shape.FrameSize = B.getStructSize();
786 
787   switch (Shape.ABI) {
788   case coro::ABI::Switch:
789     // In the switch ABI, remember the switch-index field.
790     Shape.SwitchLowering.IndexField =
791         B.getLayoutFieldIndex(*SwitchIndexFieldId);
792 
793     // Also round the frame size up to a multiple of its alignment, as is
794     // generally expected in C/C++.
795     Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
796     break;
797 
798   // In the retcon ABI, remember whether the frame is inline in the storage.
799   case coro::ABI::Retcon:
800   case coro::ABI::RetconOnce: {
801     auto Id = Shape.getRetconCoroId();
802     Shape.RetconLowering.IsFrameInlineInStorage
803       = (B.getStructSize() <= Id->getStorageSize() &&
804          B.getStructAlign() <= Id->getStorageAlignment());
805     break;
806   }
807   case coro::ABI::Async: {
808     Shape.AsyncLowering.FrameOffset =
809         alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign);
810     // Also make the final context size a multiple of the context alignment to
811     // make allocation easier for allocators.
812     Shape.AsyncLowering.ContextSize =
813         alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize,
814                 Shape.AsyncLowering.getContextAlignment());
815     if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
816       report_fatal_error(
817           "The alignment requirment of frame variables cannot be higher than "
818           "the alignment of the async function context");
819     }
820     break;
821   }
822   }
823 
824   return FrameTy;
825 }
826 
827 // We use a pointer use visitor to track how an alloca is being used.
828 // The goal is to be able to answer the following three questions:
829 // 1. Should this alloca be allocated on the frame instead.
830 // 2. Could the content of the alloca be modified prior to CoroBegn, which would
831 // require copying the data from alloca to the frame after CoroBegin.
832 // 3. Is there any alias created for this alloca prior to CoroBegin, but used
833 // after CoroBegin. In that case, we will need to recreate the alias after
834 // CoroBegin based off the frame. To answer question 1, we track two things:
835 //   a. List of all BasicBlocks that use this alloca or any of the aliases of
836 //   the alloca. In the end, we check if there exists any two basic blocks that
837 //   cross suspension points. If so, this alloca must be put on the frame. b.
838 //   Whether the alloca or any alias of the alloca is escaped at some point,
839 //   either by storing the address somewhere, or the address is used in a
840 //   function call that might capture. If it's ever escaped, this alloca must be
841 //   put on the frame conservatively.
842 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
843 // Whenever a potential write happens, either through a store instruction, a
844 // function call or any of the memory intrinsics, we check whether this
845 // instruction is prior to CoroBegin. To answer question 3, we track the offsets
846 // of all aliases created for the alloca prior to CoroBegin but used after
847 // CoroBegin. llvm::Optional is used to be able to represent the case when the
848 // offset is unknown (e.g. when you have a PHINode that takes in different
849 // offset values). We cannot handle unknown offsets and will assert. This is the
850 // potential issue left out. An ideal solution would likely require a
851 // significant redesign.
852 namespace {
853 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
854   using Base = PtrUseVisitor<AllocaUseVisitor>;
855   AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
856                    const CoroBeginInst &CB, const SuspendCrossingInfo &Checker)
857       : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker) {}
858 
859   void visit(Instruction &I) {
860     Users.insert(&I);
861     Base::visit(I);
862     // If the pointer is escaped prior to CoroBegin, we have to assume it would
863     // be written into before CoroBegin as well.
864     if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
865       MayWriteBeforeCoroBegin = true;
866     }
867   }
868   // We need to provide this overload as PtrUseVisitor uses a pointer based
869   // visiting function.
870   void visit(Instruction *I) { return visit(*I); }
871 
872   void visitPHINode(PHINode &I) {
873     enqueueUsers(I);
874     handleAlias(I);
875   }
876 
877   void visitSelectInst(SelectInst &I) {
878     enqueueUsers(I);
879     handleAlias(I);
880   }
881 
882   void visitStoreInst(StoreInst &SI) {
883     // Regardless whether the alias of the alloca is the value operand or the
884     // pointer operand, we need to assume the alloca is been written.
885     handleMayWrite(SI);
886 
887     if (SI.getValueOperand() != U->get())
888       return;
889 
890     // We are storing the pointer into a memory location, potentially escaping.
891     // As an optimization, we try to detect simple cases where it doesn't
892     // actually escape, for example:
893     //   %ptr = alloca ..
894     //   %addr = alloca ..
895     //   store %ptr, %addr
896     //   %x = load %addr
897     //   ..
898     // If %addr is only used by loading from it, we could simply treat %x as
899     // another alias of %ptr, and not considering %ptr being escaped.
900     auto IsSimpleStoreThenLoad = [&]() {
901       auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
902       // If the memory location we are storing to is not an alloca, it
903       // could be an alias of some other memory locations, which is difficult
904       // to analyze.
905       if (!AI)
906         return false;
907       // StoreAliases contains aliases of the memory location stored into.
908       SmallVector<Instruction *, 4> StoreAliases = {AI};
909       while (!StoreAliases.empty()) {
910         Instruction *I = StoreAliases.pop_back_val();
911         for (User *U : I->users()) {
912           // If we are loading from the memory location, we are creating an
913           // alias of the original pointer.
914           if (auto *LI = dyn_cast<LoadInst>(U)) {
915             enqueueUsers(*LI);
916             handleAlias(*LI);
917             continue;
918           }
919           // If we are overriding the memory location, the pointer certainly
920           // won't escape.
921           if (auto *S = dyn_cast<StoreInst>(U))
922             if (S->getPointerOperand() == I)
923               continue;
924           if (auto *II = dyn_cast<IntrinsicInst>(U))
925             if (II->isLifetimeStartOrEnd())
926               continue;
927           // BitCastInst creats aliases of the memory location being stored
928           // into.
929           if (auto *BI = dyn_cast<BitCastInst>(U)) {
930             StoreAliases.push_back(BI);
931             continue;
932           }
933           return false;
934         }
935       }
936 
937       return true;
938     };
939 
940     if (!IsSimpleStoreThenLoad())
941       PI.setEscaped(&SI);
942   }
943 
944   // All mem intrinsics modify the data.
945   void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
946 
947   void visitBitCastInst(BitCastInst &BC) {
948     Base::visitBitCastInst(BC);
949     handleAlias(BC);
950   }
951 
952   void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
953     Base::visitAddrSpaceCastInst(ASC);
954     handleAlias(ASC);
955   }
956 
957   void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
958     // The base visitor will adjust Offset accordingly.
959     Base::visitGetElementPtrInst(GEPI);
960     handleAlias(GEPI);
961   }
962 
963   void visitIntrinsicInst(IntrinsicInst &II) {
964     if (II.getIntrinsicID() != Intrinsic::lifetime_start)
965       return Base::visitIntrinsicInst(II);
966     LifetimeStarts.insert(&II);
967   }
968 
969   void visitCallBase(CallBase &CB) {
970     for (unsigned Op = 0, OpCount = CB.getNumArgOperands(); Op < OpCount; ++Op)
971       if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
972         PI.setEscaped(&CB);
973     handleMayWrite(CB);
974   }
975 
976   bool getShouldLiveOnFrame() const {
977     if (!ShouldLiveOnFrame)
978       ShouldLiveOnFrame = computeShouldLiveOnFrame();
979     return ShouldLiveOnFrame.getValue();
980   }
981 
982   bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
983 
984   DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const {
985     assert(getShouldLiveOnFrame() && "This method should only be called if the "
986                                      "alloca needs to live on the frame.");
987     for (const auto &P : AliasOffetMap)
988       if (!P.second)
989         report_fatal_error("Unable to handle an alias with unknown offset "
990                            "created before CoroBegin.");
991     return AliasOffetMap;
992   }
993 
994 private:
995   const DominatorTree &DT;
996   const CoroBeginInst &CoroBegin;
997   const SuspendCrossingInfo &Checker;
998   // All alias to the original AllocaInst, created before CoroBegin and used
999   // after CoroBegin. Each entry contains the instruction and the offset in the
1000   // original Alloca. They need to be recreated after CoroBegin off the frame.
1001   DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{};
1002   SmallPtrSet<Instruction *, 4> Users{};
1003   SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
1004   bool MayWriteBeforeCoroBegin{false};
1005 
1006   mutable llvm::Optional<bool> ShouldLiveOnFrame{};
1007 
1008   bool computeShouldLiveOnFrame() const {
1009     // If lifetime information is available, we check it first since it's
1010     // more precise. We look at every pair of lifetime.start intrinsic and
1011     // every basic block that uses the pointer to see if they cross suspension
1012     // points. The uses cover both direct uses as well as indirect uses.
1013     if (!LifetimeStarts.empty()) {
1014       for (auto *I : Users)
1015         for (auto *S : LifetimeStarts)
1016           if (Checker.isDefinitionAcrossSuspend(*S, I))
1017             return true;
1018       return false;
1019     }
1020     // FIXME: Ideally the isEscaped check should come at the beginning.
1021     // However there are a few loose ends that need to be fixed first before
1022     // we can do that. We need to make sure we are not over-conservative, so
1023     // that the data accessed in-between await_suspend and symmetric transfer
1024     // is always put on the stack, and also data accessed after coro.end is
1025     // always put on the stack (esp the return object). To fix that, we need
1026     // to:
1027     //  1) Potentially treat sret as nocapture in calls
1028     //  2) Special handle the return object and put it on the stack
1029     //  3) Utilize lifetime.end intrinsic
1030     if (PI.isEscaped())
1031       return true;
1032 
1033     for (auto *U1 : Users)
1034       for (auto *U2 : Users)
1035         if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1036           return true;
1037 
1038     return false;
1039   }
1040 
1041   void handleMayWrite(const Instruction &I) {
1042     if (!DT.dominates(&CoroBegin, &I))
1043       MayWriteBeforeCoroBegin = true;
1044   }
1045 
1046   bool usedAfterCoroBegin(Instruction &I) {
1047     for (auto &U : I.uses())
1048       if (DT.dominates(&CoroBegin, U))
1049         return true;
1050     return false;
1051   }
1052 
1053   void handleAlias(Instruction &I) {
1054     // We track all aliases created prior to CoroBegin but used after.
1055     // These aliases may need to be recreated after CoroBegin if the alloca
1056     // need to live on the frame.
1057     if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
1058       return;
1059 
1060     if (!IsOffsetKnown) {
1061       AliasOffetMap[&I].reset();
1062     } else {
1063       auto Itr = AliasOffetMap.find(&I);
1064       if (Itr == AliasOffetMap.end()) {
1065         AliasOffetMap[&I] = Offset;
1066       } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) {
1067         // If we have seen two different possible values for this alias, we set
1068         // it to empty.
1069         AliasOffetMap[&I].reset();
1070       }
1071     }
1072   }
1073 };
1074 } // namespace
1075 
1076 // We need to make room to insert a spill after initial PHIs, but before
1077 // catchswitch instruction. Placing it before violates the requirement that
1078 // catchswitch, like all other EHPads must be the first nonPHI in a block.
1079 //
1080 // Split away catchswitch into a separate block and insert in its place:
1081 //
1082 //   cleanuppad <InsertPt> cleanupret.
1083 //
1084 // cleanupret instruction will act as an insert point for the spill.
1085 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
1086   BasicBlock *CurrentBlock = CatchSwitch->getParent();
1087   BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
1088   CurrentBlock->getTerminator()->eraseFromParent();
1089 
1090   auto *CleanupPad =
1091       CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
1092   auto *CleanupRet =
1093       CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
1094   return CleanupRet;
1095 }
1096 
1097 // Replace all alloca and SSA values that are accessed across suspend points
1098 // with GetElementPointer from coroutine frame + loads and stores. Create an
1099 // AllocaSpillBB that will become the new entry block for the resume parts of
1100 // the coroutine:
1101 //
1102 //    %hdl = coro.begin(...)
1103 //    whatever
1104 //
1105 // becomes:
1106 //
1107 //    %hdl = coro.begin(...)
1108 //    %FramePtr = bitcast i8* hdl to %f.frame*
1109 //    br label %AllocaSpillBB
1110 //
1111 //  AllocaSpillBB:
1112 //    ; geps corresponding to allocas that were moved to coroutine frame
1113 //    br label PostSpill
1114 //
1115 //  PostSpill:
1116 //    whatever
1117 //
1118 //
1119 static Instruction *insertSpills(const FrameDataInfo &FrameData,
1120                                  coro::Shape &Shape) {
1121   auto *CB = Shape.CoroBegin;
1122   LLVMContext &C = CB->getContext();
1123   IRBuilder<> Builder(CB->getNextNode());
1124   StructType *FrameTy = Shape.FrameTy;
1125   PointerType *FramePtrTy = FrameTy->getPointerTo();
1126   auto *FramePtr =
1127       cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
1128   DominatorTree DT(*CB->getFunction());
1129   SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
1130 
1131   // Create a GEP with the given index into the coroutine frame for the original
1132   // value Orig. Appends an extra 0 index for array-allocas, preserving the
1133   // original type.
1134   auto GetFramePointer = [&](Value *Orig) -> Value * {
1135     FieldIDType Index = FrameData.getFieldIndex(Orig);
1136     SmallVector<Value *, 3> Indices = {
1137         ConstantInt::get(Type::getInt32Ty(C), 0),
1138         ConstantInt::get(Type::getInt32Ty(C), Index),
1139     };
1140 
1141     if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1142       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1143         auto Count = CI->getValue().getZExtValue();
1144         if (Count > 1) {
1145           Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
1146         }
1147       } else {
1148         report_fatal_error("Coroutines cannot handle non static allocas yet");
1149       }
1150     }
1151 
1152     auto GEP = cast<GetElementPtrInst>(
1153         Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1154     if (isa<AllocaInst>(Orig)) {
1155       // If the type of GEP is not equal to the type of AllocaInst, it implies
1156       // that the AllocaInst may be reused in the Frame slot of other
1157       // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1158       // the Frame storage.
1159       //
1160       // Note: If we change the strategy dealing with alignment, we need to refine
1161       // this casting.
1162       if (GEP->getResultElementType() != Orig->getType())
1163         return Builder.CreateBitCast(GEP, Orig->getType(),
1164                                      Orig->getName() + Twine(".cast"));
1165     }
1166     return GEP;
1167   };
1168 
1169   for (auto const &E : FrameData.Spills) {
1170     Value *Def = E.first;
1171     // Create a store instruction storing the value into the
1172     // coroutine frame.
1173     Instruction *InsertPt = nullptr;
1174     if (auto *Arg = dyn_cast<Argument>(Def)) {
1175       // For arguments, we will place the store instruction right after
1176       // the coroutine frame pointer instruction, i.e. bitcast of
1177       // coro.begin from i8* to %f.frame*.
1178       InsertPt = FramePtr->getNextNode();
1179 
1180       // If we're spilling an Argument, make sure we clear 'nocapture'
1181       // from the coroutine function.
1182       Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1183 
1184     } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1185       // Don't spill immediately after a suspend; splitting assumes
1186       // that the suspend will be followed by a branch.
1187       InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI();
1188     } else {
1189       auto *I = cast<Instruction>(Def);
1190       if (!DT.dominates(CB, I)) {
1191         // If it is not dominated by CoroBegin, then spill should be
1192         // inserted immediately after CoroFrame is computed.
1193         InsertPt = FramePtr->getNextNode();
1194       } else if (auto *II = dyn_cast<InvokeInst>(I)) {
1195         // If we are spilling the result of the invoke instruction, split
1196         // the normal edge and insert the spill in the new block.
1197         auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
1198         InsertPt = NewBB->getTerminator();
1199       } else if (isa<PHINode>(I)) {
1200         // Skip the PHINodes and EH pads instructions.
1201         BasicBlock *DefBlock = I->getParent();
1202         if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
1203           InsertPt = splitBeforeCatchSwitch(CSI);
1204         else
1205           InsertPt = &*DefBlock->getFirstInsertionPt();
1206       } else {
1207         assert(!I->isTerminator() && "unexpected terminator");
1208         // For all other values, the spill is placed immediately after
1209         // the definition.
1210         InsertPt = I->getNextNode();
1211       }
1212     }
1213 
1214     auto Index = FrameData.getFieldIndex(Def);
1215     Builder.SetInsertPoint(InsertPt);
1216     auto *G = Builder.CreateConstInBoundsGEP2_32(
1217         FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1218     Builder.CreateStore(Def, G);
1219 
1220     BasicBlock *CurrentBlock = nullptr;
1221     Value *CurrentReload = nullptr;
1222     for (auto *U : E.second) {
1223       // If we have not seen the use block, create a load instruction to reload
1224       // the spilled value from the coroutine frame. Populates the Value pointer
1225       // reference provided with the frame GEP.
1226       if (CurrentBlock != U->getParent()) {
1227         CurrentBlock = U->getParent();
1228         Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt());
1229 
1230         auto *GEP = GetFramePointer(E.first);
1231         GEP->setName(E.first->getName() + Twine(".reload.addr"));
1232         CurrentReload = Builder.CreateLoad(
1233             FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1234             E.first->getName() + Twine(".reload"));
1235 
1236         TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def);
1237         for (DbgDeclareInst *DDI : DIs) {
1238           bool AllowUnresolved = false;
1239           // This dbg.declare is preserved for all coro-split function
1240           // fragments. It will be unreachable in the main function, and
1241           // processed by coro::salvageDebugInfo() by CoroCloner.
1242           DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
1243               .insertDeclare(CurrentReload, DDI->getVariable(),
1244                              DDI->getExpression(), DDI->getDebugLoc(),
1245                              &*Builder.GetInsertPoint());
1246           // This dbg.declare is for the main function entry point.  It
1247           // will be deleted in all coro-split functions.
1248           coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.ReuseFrameSlot);
1249         }
1250       }
1251 
1252       // If we have a single edge PHINode, remove it and replace it with a
1253       // reload from the coroutine frame. (We already took care of multi edge
1254       // PHINodes by rewriting them in the rewritePHIs function).
1255       if (auto *PN = dyn_cast<PHINode>(U)) {
1256         assert(PN->getNumIncomingValues() == 1 &&
1257                "unexpected number of incoming "
1258                "values in the PHINode");
1259         PN->replaceAllUsesWith(CurrentReload);
1260         PN->eraseFromParent();
1261         continue;
1262       }
1263 
1264       // Replace all uses of CurrentValue in the current instruction with
1265       // reload.
1266       U->replaceUsesOfWith(Def, CurrentReload);
1267     }
1268   }
1269 
1270   BasicBlock *FramePtrBB = FramePtr->getParent();
1271 
1272   auto SpillBlock =
1273       FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
1274   SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1275   Shape.AllocaSpillBlock = SpillBlock;
1276 
1277   // retcon and retcon.once lowering assumes all uses have been sunk.
1278   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1279       Shape.ABI == coro::ABI::Async) {
1280     // If we found any allocas, replace all of their remaining uses with Geps.
1281     Builder.SetInsertPoint(&SpillBlock->front());
1282     for (const auto &P : FrameData.Allocas) {
1283       AllocaInst *Alloca = P.Alloca;
1284       auto *G = GetFramePointer(Alloca);
1285 
1286       // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1287       // here, as we are changing location of the instruction.
1288       G->takeName(Alloca);
1289       Alloca->replaceAllUsesWith(G);
1290       Alloca->eraseFromParent();
1291     }
1292     return FramePtr;
1293   }
1294 
1295   // If we found any alloca, replace all of their remaining uses with GEP
1296   // instructions. Because new dbg.declare have been created for these alloca,
1297   // we also delete the original dbg.declare and replace other uses with undef.
1298   // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1299   // as some of the uses may not be dominated by CoroBegin.
1300   Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
1301   SmallVector<Instruction *, 4> UsersToUpdate;
1302   for (const auto &A : FrameData.Allocas) {
1303     AllocaInst *Alloca = A.Alloca;
1304     UsersToUpdate.clear();
1305     for (User *U : Alloca->users()) {
1306       auto *I = cast<Instruction>(U);
1307       if (DT.dominates(CB, I))
1308         UsersToUpdate.push_back(I);
1309     }
1310     if (UsersToUpdate.empty())
1311       continue;
1312     auto *G = GetFramePointer(Alloca);
1313     G->setName(Alloca->getName() + Twine(".reload.addr"));
1314 
1315     TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Alloca);
1316     if (!DIs.empty())
1317       DIBuilder(*Alloca->getModule(),
1318                 /*AllowUnresolved*/ false)
1319           .insertDeclare(G, DIs.front()->getVariable(),
1320                          DIs.front()->getExpression(),
1321                          DIs.front()->getDebugLoc(), DIs.front());
1322     for (auto *DI : FindDbgDeclareUses(Alloca))
1323       DI->eraseFromParent();
1324     replaceDbgUsesWithUndef(Alloca);
1325 
1326     for (Instruction *I : UsersToUpdate)
1327       I->replaceUsesOfWith(Alloca, G);
1328   }
1329   Builder.SetInsertPoint(FramePtr->getNextNode());
1330   for (const auto &A : FrameData.Allocas) {
1331     AllocaInst *Alloca = A.Alloca;
1332     if (A.MayWriteBeforeCoroBegin) {
1333       // isEscaped really means potentially modified before CoroBegin.
1334       if (Alloca->isArrayAllocation())
1335         report_fatal_error(
1336             "Coroutines cannot handle copying of array allocas yet");
1337 
1338       auto *G = GetFramePointer(Alloca);
1339       auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
1340       Builder.CreateStore(Value, G);
1341     }
1342     // For each alias to Alloca created before CoroBegin but used after
1343     // CoroBegin, we recreate them after CoroBegin by appplying the offset
1344     // to the pointer in the frame.
1345     for (const auto &Alias : A.Aliases) {
1346       auto *FramePtr = GetFramePointer(Alloca);
1347       auto *FramePtrRaw =
1348           Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C));
1349       auto *AliasPtr = Builder.CreateGEP(
1350           FramePtrRaw,
1351           ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue()));
1352       auto *AliasPtrTyped =
1353           Builder.CreateBitCast(AliasPtr, Alias.first->getType());
1354       Alias.first->replaceUsesWithIf(
1355           AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); });
1356     }
1357   }
1358   return FramePtr;
1359 }
1360 
1361 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1362 // PHI in InsertedBB.
1363 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB,
1364                                          BasicBlock *InsertedBB,
1365                                          BasicBlock *PredBB,
1366                                          PHINode *UntilPHI = nullptr) {
1367   auto *PN = cast<PHINode>(&SuccBB->front());
1368   do {
1369     int Index = PN->getBasicBlockIndex(InsertedBB);
1370     Value *V = PN->getIncomingValue(Index);
1371     PHINode *InputV = PHINode::Create(
1372         V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(),
1373         &InsertedBB->front());
1374     InputV->addIncoming(V, PredBB);
1375     PN->setIncomingValue(Index, InputV);
1376     PN = dyn_cast<PHINode>(PN->getNextNode());
1377   } while (PN != UntilPHI);
1378 }
1379 
1380 // Rewrites the PHI Nodes in a cleanuppad.
1381 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1382                                      CleanupPadInst *CleanupPad) {
1383   // For every incoming edge to a CleanupPad we will create a new block holding
1384   // all incoming values in single-value PHI nodes. We will then create another
1385   // block to act as a dispather (as all unwind edges for related EH blocks
1386   // must be the same).
1387   //
1388   // cleanuppad:
1389   //    %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1390   //    %3 = cleanuppad within none []
1391   //
1392   // It will create:
1393   //
1394   // cleanuppad.corodispatch
1395   //    %2 = phi i8[0, %catchswitch], [1, %catch.1]
1396   //    %3 = cleanuppad within none []
1397   //    switch i8 % 2, label %unreachable
1398   //            [i8 0, label %cleanuppad.from.catchswitch
1399   //             i8 1, label %cleanuppad.from.catch.1]
1400   // cleanuppad.from.catchswitch:
1401   //    %4 = phi i32 [%0, %catchswitch]
1402   //    br %label cleanuppad
1403   // cleanuppad.from.catch.1:
1404   //    %6 = phi i32 [%1, %catch.1]
1405   //    br %label cleanuppad
1406   // cleanuppad:
1407   //    %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1408   //                 [%6, %cleanuppad.from.catch.1]
1409 
1410   // Unreachable BB, in case switching on an invalid value in the dispatcher.
1411   auto *UnreachBB = BasicBlock::Create(
1412       CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
1413   IRBuilder<> Builder(UnreachBB);
1414   Builder.CreateUnreachable();
1415 
1416   // Create a new cleanuppad which will be the dispatcher.
1417   auto *NewCleanupPadBB =
1418       BasicBlock::Create(CleanupPadBB->getContext(),
1419                          CleanupPadBB->getName() + Twine(".corodispatch"),
1420                          CleanupPadBB->getParent(), CleanupPadBB);
1421   Builder.SetInsertPoint(NewCleanupPadBB);
1422   auto *SwitchType = Builder.getInt8Ty();
1423   auto *SetDispatchValuePN =
1424       Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
1425   CleanupPad->removeFromParent();
1426   CleanupPad->insertAfter(SetDispatchValuePN);
1427   auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1428                                                 pred_size(CleanupPadBB));
1429 
1430   int SwitchIndex = 0;
1431   SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
1432   for (BasicBlock *Pred : Preds) {
1433     // Create a new cleanuppad and move the PHI values to there.
1434     auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
1435                                       CleanupPadBB->getName() +
1436                                           Twine(".from.") + Pred->getName(),
1437                                       CleanupPadBB->getParent(), CleanupPadBB);
1438     updatePhiNodes(CleanupPadBB, Pred, CaseBB);
1439     CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1440                     Pred->getName());
1441     Builder.SetInsertPoint(CaseBB);
1442     Builder.CreateBr(CleanupPadBB);
1443     movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
1444 
1445     // Update this Pred to the new unwind point.
1446     setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1447 
1448     // Setup the switch in the dispatcher.
1449     auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1450     SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1451     SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1452     SwitchIndex++;
1453   }
1454 }
1455 
1456 static void rewritePHIs(BasicBlock &BB) {
1457   // For every incoming edge we will create a block holding all
1458   // incoming values in a single PHI nodes.
1459   //
1460   // loop:
1461   //    %n.val = phi i32[%n, %entry], [%inc, %loop]
1462   //
1463   // It will create:
1464   //
1465   // loop.from.entry:
1466   //    %n.loop.pre = phi i32 [%n, %entry]
1467   //    br %label loop
1468   // loop.from.loop:
1469   //    %inc.loop.pre = phi i32 [%inc, %loop]
1470   //    br %label loop
1471   //
1472   // After this rewrite, further analysis will ignore any phi nodes with more
1473   // than one incoming edge.
1474 
1475   // TODO: Simplify PHINodes in the basic block to remove duplicate
1476   // predecessors.
1477 
1478   // Special case for CleanupPad: all EH blocks must have the same unwind edge
1479   // so we need to create an additional "dispatcher" block.
1480   if (auto *CleanupPad =
1481           dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
1482     SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1483     for (BasicBlock *Pred : Preds) {
1484       if (CatchSwitchInst *CS =
1485               dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
1486         // CleanupPad with a CatchSwitch predecessor: therefore this is an
1487         // unwind destination that needs to be handle specially.
1488         assert(CS->getUnwindDest() == &BB);
1489         (void)CS;
1490         rewritePHIsForCleanupPad(&BB, CleanupPad);
1491         return;
1492       }
1493     }
1494   }
1495 
1496   LandingPadInst *LandingPad = nullptr;
1497   PHINode *ReplPHI = nullptr;
1498   if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
1499     // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1500     // We replace the original landing pad with a PHINode that will collect the
1501     // results from all of them.
1502     ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
1503     ReplPHI->takeName(LandingPad);
1504     LandingPad->replaceAllUsesWith(ReplPHI);
1505     // We will erase the original landing pad at the end of this function after
1506     // ehAwareSplitEdge cloned it in the transition blocks.
1507   }
1508 
1509   SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1510   for (BasicBlock *Pred : Preds) {
1511     auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
1512     IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1513 
1514     // Stop the moving of values at ReplPHI, as this is either null or the PHI
1515     // that replaced the landing pad.
1516     movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
1517   }
1518 
1519   if (LandingPad) {
1520     // Calls to ehAwareSplitEdge function cloned the original lading pad.
1521     // No longer need it.
1522     LandingPad->eraseFromParent();
1523   }
1524 }
1525 
1526 static void rewritePHIs(Function &F) {
1527   SmallVector<BasicBlock *, 8> WorkList;
1528 
1529   for (BasicBlock &BB : F)
1530     if (auto *PN = dyn_cast<PHINode>(&BB.front()))
1531       if (PN->getNumIncomingValues() > 1)
1532         WorkList.push_back(&BB);
1533 
1534   for (BasicBlock *BB : WorkList)
1535     rewritePHIs(*BB);
1536 }
1537 
1538 // Check for instructions that we can recreate on resume as opposed to spill
1539 // the result into a coroutine frame.
1540 static bool materializable(Instruction &V) {
1541   return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
1542          isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
1543 }
1544 
1545 // Check for structural coroutine intrinsics that should not be spilled into
1546 // the coroutine frame.
1547 static bool isCoroutineStructureIntrinsic(Instruction &I) {
1548   return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
1549          isa<CoroSuspendInst>(&I);
1550 }
1551 
1552 // For every use of the value that is across suspend point, recreate that value
1553 // after a suspend point.
1554 static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
1555                                               const SpillInfo &Spills) {
1556   for (const auto &E : Spills) {
1557     Value *Def = E.first;
1558     BasicBlock *CurrentBlock = nullptr;
1559     Instruction *CurrentMaterialization = nullptr;
1560     for (Instruction *U : E.second) {
1561       // If we have not seen this block, materialize the value.
1562       if (CurrentBlock != U->getParent()) {
1563         CurrentBlock = U->getParent();
1564         CurrentMaterialization = cast<Instruction>(Def)->clone();
1565         CurrentMaterialization->setName(Def->getName());
1566         CurrentMaterialization->insertBefore(
1567             &*CurrentBlock->getFirstInsertionPt());
1568       }
1569       if (auto *PN = dyn_cast<PHINode>(U)) {
1570         assert(PN->getNumIncomingValues() == 1 &&
1571                "unexpected number of incoming "
1572                "values in the PHINode");
1573         PN->replaceAllUsesWith(CurrentMaterialization);
1574         PN->eraseFromParent();
1575         continue;
1576       }
1577       // Replace all uses of Def in the current instruction with the
1578       // CurrentMaterialization for the block.
1579       U->replaceUsesOfWith(Def, CurrentMaterialization);
1580     }
1581   }
1582 }
1583 
1584 // Splits the block at a particular instruction unless it is the first
1585 // instruction in the block with a single predecessor.
1586 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
1587   auto *BB = I->getParent();
1588   if (&BB->front() == I) {
1589     if (BB->getSinglePredecessor()) {
1590       BB->setName(Name);
1591       return BB;
1592     }
1593   }
1594   return BB->splitBasicBlock(I, Name);
1595 }
1596 
1597 // Split above and below a particular instruction so that it
1598 // will be all alone by itself in a block.
1599 static void splitAround(Instruction *I, const Twine &Name) {
1600   splitBlockIfNotFirst(I, Name);
1601   splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
1602 }
1603 
1604 static bool isSuspendBlock(BasicBlock *BB) {
1605   return isa<AnyCoroSuspendInst>(BB->front());
1606 }
1607 
1608 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet;
1609 
1610 /// Does control flow starting at the given block ever reach a suspend
1611 /// instruction before reaching a block in VisitedOrFreeBBs?
1612 static bool isSuspendReachableFrom(BasicBlock *From,
1613                                    VisitedBlocksSet &VisitedOrFreeBBs) {
1614   // Eagerly try to add this block to the visited set.  If it's already
1615   // there, stop recursing; this path doesn't reach a suspend before
1616   // either looping or reaching a freeing block.
1617   if (!VisitedOrFreeBBs.insert(From).second)
1618     return false;
1619 
1620   // We assume that we'll already have split suspends into their own blocks.
1621   if (isSuspendBlock(From))
1622     return true;
1623 
1624   // Recurse on the successors.
1625   for (auto Succ : successors(From)) {
1626     if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
1627       return true;
1628   }
1629 
1630   return false;
1631 }
1632 
1633 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a
1634 /// suspend point?
1635 static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
1636   // Seed the visited set with all the basic blocks containing a free
1637   // so that we won't pass them up.
1638   VisitedBlocksSet VisitedOrFreeBBs;
1639   for (auto User : AI->users()) {
1640     if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
1641       VisitedOrFreeBBs.insert(FI->getParent());
1642   }
1643 
1644   return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
1645 }
1646 
1647 /// After we split the coroutine, will the given basic block be along
1648 /// an obvious exit path for the resumption function?
1649 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
1650                                               unsigned depth = 3) {
1651   // If we've bottomed out our depth count, stop searching and assume
1652   // that the path might loop back.
1653   if (depth == 0) return false;
1654 
1655   // If this is a suspend block, we're about to exit the resumption function.
1656   if (isSuspendBlock(BB)) return true;
1657 
1658   // Recurse into the successors.
1659   for (auto Succ : successors(BB)) {
1660     if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
1661       return false;
1662   }
1663 
1664   // If none of the successors leads back in a loop, we're on an exit/abort.
1665   return true;
1666 }
1667 
1668 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
1669   // Look for a free that isn't sufficiently obviously followed by
1670   // either a suspend or a termination, i.e. something that will leave
1671   // the coro resumption frame.
1672   for (auto U : AI->users()) {
1673     auto FI = dyn_cast<CoroAllocaFreeInst>(U);
1674     if (!FI) continue;
1675 
1676     if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
1677       return true;
1678   }
1679 
1680   // If we never found one, we don't need a stack save.
1681   return false;
1682 }
1683 
1684 /// Turn each of the given local allocas into a normal (dynamic) alloca
1685 /// instruction.
1686 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
1687                               SmallVectorImpl<Instruction*> &DeadInsts) {
1688   for (auto AI : LocalAllocas) {
1689     auto M = AI->getModule();
1690     IRBuilder<> Builder(AI);
1691 
1692     // Save the stack depth.  Try to avoid doing this if the stackrestore
1693     // is going to immediately precede a return or something.
1694     Value *StackSave = nullptr;
1695     if (localAllocaNeedsStackSave(AI))
1696       StackSave = Builder.CreateCall(
1697                             Intrinsic::getDeclaration(M, Intrinsic::stacksave));
1698 
1699     // Allocate memory.
1700     auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
1701     Alloca->setAlignment(Align(AI->getAlignment()));
1702 
1703     for (auto U : AI->users()) {
1704       // Replace gets with the allocation.
1705       if (isa<CoroAllocaGetInst>(U)) {
1706         U->replaceAllUsesWith(Alloca);
1707 
1708       // Replace frees with stackrestores.  This is safe because
1709       // alloca.alloc is required to obey a stack discipline, although we
1710       // don't enforce that structurally.
1711       } else {
1712         auto FI = cast<CoroAllocaFreeInst>(U);
1713         if (StackSave) {
1714           Builder.SetInsertPoint(FI);
1715           Builder.CreateCall(
1716                     Intrinsic::getDeclaration(M, Intrinsic::stackrestore),
1717                              StackSave);
1718         }
1719       }
1720       DeadInsts.push_back(cast<Instruction>(U));
1721     }
1722 
1723     DeadInsts.push_back(AI);
1724   }
1725 }
1726 
1727 /// Turn the given coro.alloca.alloc call into a dynamic allocation.
1728 /// This happens during the all-instructions iteration, so it must not
1729 /// delete the call.
1730 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI,
1731                                         coro::Shape &Shape,
1732                                    SmallVectorImpl<Instruction*> &DeadInsts) {
1733   IRBuilder<> Builder(AI);
1734   auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
1735 
1736   for (User *U : AI->users()) {
1737     if (isa<CoroAllocaGetInst>(U)) {
1738       U->replaceAllUsesWith(Alloc);
1739     } else {
1740       auto FI = cast<CoroAllocaFreeInst>(U);
1741       Builder.SetInsertPoint(FI);
1742       Shape.emitDealloc(Builder, Alloc, nullptr);
1743     }
1744     DeadInsts.push_back(cast<Instruction>(U));
1745   }
1746 
1747   // Push this on last so that it gets deleted after all the others.
1748   DeadInsts.push_back(AI);
1749 
1750   // Return the new allocation value so that we can check for needed spills.
1751   return cast<Instruction>(Alloc);
1752 }
1753 
1754 /// Get the current swifterror value.
1755 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
1756                                      coro::Shape &Shape) {
1757   // Make a fake function pointer as a sort of intrinsic.
1758   auto FnTy = FunctionType::get(ValueTy, {}, false);
1759   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
1760 
1761   auto Call = Builder.CreateCall(FnTy, Fn, {});
1762   Shape.SwiftErrorOps.push_back(Call);
1763 
1764   return Call;
1765 }
1766 
1767 /// Set the given value as the current swifterror value.
1768 ///
1769 /// Returns a slot that can be used as a swifterror slot.
1770 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
1771                                      coro::Shape &Shape) {
1772   // Make a fake function pointer as a sort of intrinsic.
1773   auto FnTy = FunctionType::get(V->getType()->getPointerTo(),
1774                                 {V->getType()}, false);
1775   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
1776 
1777   auto Call = Builder.CreateCall(FnTy, Fn, { V });
1778   Shape.SwiftErrorOps.push_back(Call);
1779 
1780   return Call;
1781 }
1782 
1783 /// Set the swifterror value from the given alloca before a call,
1784 /// then put in back in the alloca afterwards.
1785 ///
1786 /// Returns an address that will stand in for the swifterror slot
1787 /// until splitting.
1788 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
1789                                                  AllocaInst *Alloca,
1790                                                  coro::Shape &Shape) {
1791   auto ValueTy = Alloca->getAllocatedType();
1792   IRBuilder<> Builder(Call);
1793 
1794   // Load the current value from the alloca and set it as the
1795   // swifterror value.
1796   auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
1797   auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
1798 
1799   // Move to after the call.  Since swifterror only has a guaranteed
1800   // value on normal exits, we can ignore implicit and explicit unwind
1801   // edges.
1802   if (isa<CallInst>(Call)) {
1803     Builder.SetInsertPoint(Call->getNextNode());
1804   } else {
1805     auto Invoke = cast<InvokeInst>(Call);
1806     Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
1807   }
1808 
1809   // Get the current swifterror value and store it to the alloca.
1810   auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
1811   Builder.CreateStore(ValueAfterCall, Alloca);
1812 
1813   return Addr;
1814 }
1815 
1816 /// Eliminate a formerly-swifterror alloca by inserting the get/set
1817 /// intrinsics and attempting to MemToReg the alloca away.
1818 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
1819                                       coro::Shape &Shape) {
1820   for (auto UI = Alloca->use_begin(), UE = Alloca->use_end(); UI != UE; ) {
1821     // We're likely changing the use list, so use a mutation-safe
1822     // iteration pattern.
1823     auto &Use = *UI;
1824     ++UI;
1825 
1826     // swifterror values can only be used in very specific ways.
1827     // We take advantage of that here.
1828     auto User = Use.getUser();
1829     if (isa<LoadInst>(User) || isa<StoreInst>(User))
1830       continue;
1831 
1832     assert(isa<CallInst>(User) || isa<InvokeInst>(User));
1833     auto Call = cast<Instruction>(User);
1834 
1835     auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
1836 
1837     // Use the returned slot address as the call argument.
1838     Use.set(Addr);
1839   }
1840 
1841   // All the uses should be loads and stores now.
1842   assert(isAllocaPromotable(Alloca));
1843 }
1844 
1845 /// "Eliminate" a swifterror argument by reducing it to the alloca case
1846 /// and then loading and storing in the prologue and epilog.
1847 ///
1848 /// The argument keeps the swifterror flag.
1849 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
1850                                         coro::Shape &Shape,
1851                              SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
1852   IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
1853 
1854   auto ArgTy = cast<PointerType>(Arg.getType());
1855   auto ValueTy = ArgTy->getElementType();
1856 
1857   // Reduce to the alloca case:
1858 
1859   // Create an alloca and replace all uses of the arg with it.
1860   auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
1861   Arg.replaceAllUsesWith(Alloca);
1862 
1863   // Set an initial value in the alloca.  swifterror is always null on entry.
1864   auto InitialValue = Constant::getNullValue(ValueTy);
1865   Builder.CreateStore(InitialValue, Alloca);
1866 
1867   // Find all the suspends in the function and save and restore around them.
1868   for (auto Suspend : Shape.CoroSuspends) {
1869     (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
1870   }
1871 
1872   // Find all the coro.ends in the function and restore the error value.
1873   for (auto End : Shape.CoroEnds) {
1874     Builder.SetInsertPoint(End);
1875     auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
1876     (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
1877   }
1878 
1879   // Now we can use the alloca logic.
1880   AllocasToPromote.push_back(Alloca);
1881   eliminateSwiftErrorAlloca(F, Alloca, Shape);
1882 }
1883 
1884 /// Eliminate all problematic uses of swifterror arguments and allocas
1885 /// from the function.  We'll fix them up later when splitting the function.
1886 static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
1887   SmallVector<AllocaInst*, 4> AllocasToPromote;
1888 
1889   // Look for a swifterror argument.
1890   for (auto &Arg : F.args()) {
1891     if (!Arg.hasSwiftErrorAttr()) continue;
1892 
1893     eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
1894     break;
1895   }
1896 
1897   // Look for swifterror allocas.
1898   for (auto &Inst : F.getEntryBlock()) {
1899     auto Alloca = dyn_cast<AllocaInst>(&Inst);
1900     if (!Alloca || !Alloca->isSwiftError()) continue;
1901 
1902     // Clear the swifterror flag.
1903     Alloca->setSwiftError(false);
1904 
1905     AllocasToPromote.push_back(Alloca);
1906     eliminateSwiftErrorAlloca(F, Alloca, Shape);
1907   }
1908 
1909   // If we have any allocas to promote, compute a dominator tree and
1910   // promote them en masse.
1911   if (!AllocasToPromote.empty()) {
1912     DominatorTree DT(F);
1913     PromoteMemToReg(AllocasToPromote, DT);
1914   }
1915 }
1916 
1917 /// retcon and retcon.once conventions assume that all spill uses can be sunk
1918 /// after the coro.begin intrinsic.
1919 static void sinkSpillUsesAfterCoroBegin(Function &F,
1920                                         const FrameDataInfo &FrameData,
1921                                         CoroBeginInst *CoroBegin) {
1922   DominatorTree Dom(F);
1923 
1924   SmallSetVector<Instruction *, 32> ToMove;
1925   SmallVector<Instruction *, 32> Worklist;
1926 
1927   // Collect all users that precede coro.begin.
1928   for (auto *Def : FrameData.getAllDefs()) {
1929     for (User *U : Def->users()) {
1930       auto Inst = cast<Instruction>(U);
1931       if (Inst->getParent() != CoroBegin->getParent() ||
1932           Dom.dominates(CoroBegin, Inst))
1933         continue;
1934       if (ToMove.insert(Inst))
1935         Worklist.push_back(Inst);
1936     }
1937   }
1938   // Recursively collect users before coro.begin.
1939   while (!Worklist.empty()) {
1940     auto *Def = Worklist.pop_back_val();
1941     for (User *U : Def->users()) {
1942       auto Inst = cast<Instruction>(U);
1943       if (Dom.dominates(CoroBegin, Inst))
1944         continue;
1945       if (ToMove.insert(Inst))
1946         Worklist.push_back(Inst);
1947     }
1948   }
1949 
1950   // Sort by dominance.
1951   SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
1952   llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
1953     // If a dominates b it should preceed (<) b.
1954     return Dom.dominates(A, B);
1955   });
1956 
1957   Instruction *InsertPt = CoroBegin->getNextNode();
1958   for (Instruction *Inst : InsertionList)
1959     Inst->moveBefore(InsertPt);
1960 }
1961 
1962 /// For each local variable that all of its user are only used inside one of
1963 /// suspended region, we sink their lifetime.start markers to the place where
1964 /// after the suspend block. Doing so minimizes the lifetime of each variable,
1965 /// hence minimizing the amount of data we end up putting on the frame.
1966 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
1967                                      SuspendCrossingInfo &Checker) {
1968   DominatorTree DT(F);
1969 
1970   // Collect all possible basic blocks which may dominate all uses of allocas.
1971   SmallPtrSet<BasicBlock *, 4> DomSet;
1972   DomSet.insert(&F.getEntryBlock());
1973   for (auto *CSI : Shape.CoroSuspends) {
1974     BasicBlock *SuspendBlock = CSI->getParent();
1975     assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
1976            "should have split coro.suspend into its own block");
1977     DomSet.insert(SuspendBlock->getSingleSuccessor());
1978   }
1979 
1980   for (Instruction &I : instructions(F)) {
1981     AllocaInst* AI = dyn_cast<AllocaInst>(&I);
1982     if (!AI)
1983       continue;
1984 
1985     for (BasicBlock *DomBB : DomSet) {
1986       bool Valid = true;
1987       SmallVector<Instruction *, 1> Lifetimes;
1988 
1989       auto isLifetimeStart = [](Instruction* I) {
1990         if (auto* II = dyn_cast<IntrinsicInst>(I))
1991           return II->getIntrinsicID() == Intrinsic::lifetime_start;
1992         return false;
1993       };
1994 
1995       auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
1996         if (isLifetimeStart(U)) {
1997           Lifetimes.push_back(U);
1998           return true;
1999         }
2000         if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2001           return false;
2002         if (isLifetimeStart(U->user_back())) {
2003           Lifetimes.push_back(U->user_back());
2004           return true;
2005         }
2006         return false;
2007       };
2008 
2009       for (User *U : AI->users()) {
2010         Instruction *UI = cast<Instruction>(U);
2011         // For all users except lifetime.start markers, if they are all
2012         // dominated by one of the basic blocks and do not cross
2013         // suspend points as well, then there is no need to spill the
2014         // instruction.
2015         if (!DT.dominates(DomBB, UI->getParent()) ||
2016             Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2017           // Skip lifetime.start, GEP and bitcast used by lifetime.start
2018           // markers.
2019           if (collectLifetimeStart(UI, AI))
2020             continue;
2021           Valid = false;
2022           break;
2023         }
2024       }
2025       // Sink lifetime.start markers to dominate block when they are
2026       // only used outside the region.
2027       if (Valid && Lifetimes.size() != 0) {
2028         // May be AI itself, when the type of AI is i8*
2029         auto *NewBitCast = [&](AllocaInst *AI) -> Value* {
2030           if (isa<AllocaInst>(Lifetimes[0]->getOperand(1)))
2031             return AI;
2032           auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext());
2033           return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "",
2034                                   DomBB->getTerminator());
2035         }(AI);
2036 
2037         auto *NewLifetime = Lifetimes[0]->clone();
2038         NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast);
2039         NewLifetime->insertBefore(DomBB->getTerminator());
2040 
2041         // All the outsided lifetime.start markers are no longer necessary.
2042         for (Instruction *S : Lifetimes)
2043           S->eraseFromParent();
2044 
2045         break;
2046       }
2047     }
2048   }
2049 }
2050 
2051 static void collectFrameAllocas(Function &F, coro::Shape &Shape,
2052                                 const SuspendCrossingInfo &Checker,
2053                                 SmallVectorImpl<AllocaInfo> &Allocas) {
2054   for (Instruction &I : instructions(F)) {
2055     auto *AI = dyn_cast<AllocaInst>(&I);
2056     if (!AI)
2057       continue;
2058     // The PromiseAlloca will be specially handled since it needs to be in a
2059     // fixed position in the frame.
2060     if (AI == Shape.SwitchLowering.PromiseAlloca) {
2061       continue;
2062     }
2063     DominatorTree DT(F);
2064     AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT,
2065                              *Shape.CoroBegin, Checker};
2066     Visitor.visitPtr(*AI);
2067     if (!Visitor.getShouldLiveOnFrame())
2068       continue;
2069     Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
2070                          Visitor.getMayWriteBeforeCoroBegin());
2071   }
2072 }
2073 
2074 void coro::salvageDebugInfo(
2075     SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
2076     DbgDeclareInst *DDI, bool ReuseFrameSlot) {
2077   Function *F = DDI->getFunction();
2078   IRBuilder<> Builder(F->getContext());
2079   auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
2080   while (isa<IntrinsicInst>(InsertPt))
2081     ++InsertPt;
2082   Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
2083   DIExpression *Expr = DDI->getExpression();
2084   // Follow the pointer arithmetic all the way to the incoming
2085   // function argument and convert into a DIExpression.
2086   bool OutermostLoad = true;
2087   Value *Storage = DDI->getAddress();
2088   Value *OriginalStorage = Storage;
2089   while (Storage) {
2090     if (auto *LdInst = dyn_cast<LoadInst>(Storage)) {
2091       Storage = LdInst->getOperand(0);
2092       // FIXME: This is a heuristic that works around the fact that
2093       // LLVM IR debug intrinsics cannot yet distinguish between
2094       // memory and value locations: Because a dbg.declare(alloca) is
2095       // implicitly a memory location no DW_OP_deref operation for the
2096       // last direct load from an alloca is necessary.  This condition
2097       // effectively drops the *last* DW_OP_deref in the expression.
2098       if (!OutermostLoad)
2099         Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2100       OutermostLoad = false;
2101     } else if (auto *StInst = dyn_cast<StoreInst>(Storage)) {
2102       Storage = StInst->getOperand(0);
2103     } else if (auto *GEPInst = dyn_cast<GetElementPtrInst>(Storage)) {
2104       Expr = llvm::salvageDebugInfoImpl(*GEPInst, Expr,
2105                                         /*WithStackValue=*/false, 0);
2106       if (!Expr)
2107         return;
2108       Storage = GEPInst->getOperand(0);
2109     } else if (auto *BCInst = dyn_cast<llvm::BitCastInst>(Storage))
2110       Storage = BCInst->getOperand(0);
2111     else
2112       break;
2113   }
2114   if (!Storage)
2115     return;
2116 
2117   // Store a pointer to the coroutine frame object in an alloca so it
2118   // is available throughout the function when producing unoptimized
2119   // code. Extending the lifetime this way is correct because the
2120   // variable has been declared by a dbg.declare intrinsic.
2121   //
2122   // Avoid to create the alloca would be eliminated by optimization
2123   // passes and the corresponding dbg.declares would be invalid.
2124   if (!ReuseFrameSlot && !EnableReuseStorageInFrame)
2125     if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) {
2126       auto &Cached = DbgPtrAllocaCache[Storage];
2127       if (!Cached) {
2128         Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
2129                                       Arg->getName() + ".debug");
2130         Builder.CreateStore(Storage, Cached);
2131       }
2132       Storage = Cached;
2133       // FIXME: LLVM lacks nuanced semantics to differentiate between
2134       // memory and direct locations at the IR level. The backend will
2135       // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
2136       // location. Thus, if there are deref and offset operations in the
2137       // expression, we need to add a DW_OP_deref at the *start* of the
2138       // expression to first load the contents of the alloca before
2139       // adjusting it with the expression.
2140       if (Expr && Expr->isComplex())
2141         Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2142     }
2143   DDI->replaceVariableLocationOp(OriginalStorage, Storage);
2144   DDI->setExpression(Expr);
2145   if (auto *InsertPt = dyn_cast<Instruction>(Storage))
2146     DDI->moveAfter(InsertPt);
2147   else if (isa<Argument>(Storage))
2148     DDI->moveAfter(F->getEntryBlock().getFirstNonPHI());
2149 }
2150 
2151 void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
2152   // Don't eliminate swifterror in async functions that won't be split.
2153   if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2154     eliminateSwiftError(F, Shape);
2155 
2156   if (Shape.ABI == coro::ABI::Switch &&
2157       Shape.SwitchLowering.PromiseAlloca) {
2158     Shape.getSwitchCoroId()->clearPromise();
2159   }
2160 
2161   // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2162   // intrinsics are in their own blocks to simplify the logic of building up
2163   // SuspendCrossing data.
2164   for (auto *CSI : Shape.CoroSuspends) {
2165     if (auto *Save = CSI->getCoroSave())
2166       splitAround(Save, "CoroSave");
2167     splitAround(CSI, "CoroSuspend");
2168   }
2169 
2170   // Put CoroEnds into their own blocks.
2171   for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2172     splitAround(CE, "CoroEnd");
2173 
2174     // Emit the musttail call function in a new block before the CoroEnd.
2175     // We do this here so that the right suspend crossing info is computed for
2176     // the uses of the musttail call function call. (Arguments to the coro.end
2177     // instructions would be ignored)
2178     if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
2179       auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2180       if (!MustTailCallFn)
2181         continue;
2182       IRBuilder<> Builder(AsyncEnd);
2183       SmallVector<Value *, 8> Args(AsyncEnd->args());
2184       auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
2185       auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
2186                                       Arguments, Builder);
2187       splitAround(Call, "MustTailCall.Before.CoroEnd");
2188     }
2189   }
2190 
2191   // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2192   // never has its definition separated from the PHI by the suspend point.
2193   rewritePHIs(F);
2194 
2195   // Build suspend crossing info.
2196   SuspendCrossingInfo Checker(F, Shape);
2197 
2198   IRBuilder<> Builder(F.getContext());
2199   FrameDataInfo FrameData;
2200   SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas;
2201   SmallVector<Instruction*, 4> DeadInstructions;
2202 
2203   {
2204     SpillInfo Spills;
2205     for (int Repeat = 0; Repeat < 4; ++Repeat) {
2206       // See if there are materializable instructions across suspend points.
2207       for (Instruction &I : instructions(F))
2208         if (materializable(I))
2209           for (User *U : I.users())
2210             if (Checker.isDefinitionAcrossSuspend(I, U))
2211               Spills[&I].push_back(cast<Instruction>(U));
2212 
2213       if (Spills.empty())
2214         break;
2215 
2216       // Rewrite materializable instructions to be materialized at the use
2217       // point.
2218       LLVM_DEBUG(dumpSpills("Materializations", Spills));
2219       rewriteMaterializableInstructions(Builder, Spills);
2220       Spills.clear();
2221     }
2222   }
2223 
2224   sinkLifetimeStartMarkers(F, Shape, Checker);
2225   if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2226     collectFrameAllocas(F, Shape, Checker, FrameData.Allocas);
2227   LLVM_DEBUG(dumpAllocas(FrameData.Allocas));
2228 
2229   // Collect the spills for arguments and other not-materializable values.
2230   for (Argument &A : F.args())
2231     for (User *U : A.users())
2232       if (Checker.isDefinitionAcrossSuspend(A, U))
2233         FrameData.Spills[&A].push_back(cast<Instruction>(U));
2234 
2235   for (Instruction &I : instructions(F)) {
2236     // Values returned from coroutine structure intrinsics should not be part
2237     // of the Coroutine Frame.
2238     if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
2239       continue;
2240 
2241     // The Coroutine Promise always included into coroutine frame, no need to
2242     // check for suspend crossing.
2243     if (Shape.ABI == coro::ABI::Switch &&
2244         Shape.SwitchLowering.PromiseAlloca == &I)
2245       continue;
2246 
2247     // Handle alloca.alloc specially here.
2248     if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
2249       // Check whether the alloca's lifetime is bounded by suspend points.
2250       if (isLocalAlloca(AI)) {
2251         LocalAllocas.push_back(AI);
2252         continue;
2253       }
2254 
2255       // If not, do a quick rewrite of the alloca and then add spills of
2256       // the rewritten value.  The rewrite doesn't invalidate anything in
2257       // Spills because the other alloca intrinsics have no other operands
2258       // besides AI, and it doesn't invalidate the iteration because we delay
2259       // erasing AI.
2260       auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
2261 
2262       for (User *U : Alloc->users()) {
2263         if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
2264           FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
2265       }
2266       continue;
2267     }
2268 
2269     // Ignore alloca.get; we process this as part of coro.alloca.alloc.
2270     if (isa<CoroAllocaGetInst>(I))
2271       continue;
2272 
2273     if (isa<AllocaInst>(I))
2274       continue;
2275 
2276     for (User *U : I.users())
2277       if (Checker.isDefinitionAcrossSuspend(I, U)) {
2278         // We cannot spill a token.
2279         if (I.getType()->isTokenTy())
2280           report_fatal_error(
2281               "token definition is separated from the use by a suspend point");
2282         FrameData.Spills[&I].push_back(cast<Instruction>(U));
2283       }
2284   }
2285   LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
2286   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2287       Shape.ABI == coro::ABI::Async)
2288     sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin);
2289   Shape.FrameTy = buildFrameType(F, Shape, FrameData);
2290   Shape.FramePtr = insertSpills(FrameData, Shape);
2291   lowerLocalAllocas(LocalAllocas, DeadInstructions);
2292 
2293   for (auto I : DeadInstructions)
2294     I->eraseFromParent();
2295 }
2296