1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This file contains classes used to discover if for a particular value
9 // there from sue to definition that crosses a suspend block.
10 //
11 // Using the information discovered we form a Coroutine Frame structure to
12 // contain those values. All uses of those values are replaced with appropriate
13 // GEP + load from the coroutine frame. At the point of the definition we spill
14 // the value into the coroutine frame.
15 //===----------------------------------------------------------------------===//
16 
17 #include "CoroInternal.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/ScopeExit.h"
20 #include "llvm/ADT/SmallString.h"
21 #include "llvm/Analysis/PtrUseVisitor.h"
22 #include "llvm/Analysis/StackLifetime.h"
23 #include "llvm/Config/llvm-config.h"
24 #include "llvm/IR/CFG.h"
25 #include "llvm/IR/DIBuilder.h"
26 #include "llvm/IR/DebugInfo.h"
27 #include "llvm/IR/Dominators.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/InstIterator.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/Support/OptimizedStructLayout.h"
35 #include "llvm/Support/circular_raw_ostream.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
38 #include "llvm/Transforms/Utils/Local.h"
39 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
40 #include <algorithm>
41 
42 using namespace llvm;
43 
44 // The "coro-suspend-crossing" flag is very noisy. There is another debug type,
45 // "coro-frame", which results in leaner debug spew.
46 #define DEBUG_TYPE "coro-suspend-crossing"
47 
48 static cl::opt<bool> EnableReuseStorageInFrame(
49     "reuse-storage-in-coroutine-frame", cl::Hidden,
50     cl::desc(
51         "Enable the optimization which would reuse the storage in the coroutine \
52          frame for allocas whose liferanges are not overlapped, for testing purposes"),
53     llvm::cl::init(false));
54 
55 enum { SmallVectorThreshold = 32 };
56 
57 // Provides two way mapping between the blocks and numbers.
58 namespace {
59 class BlockToIndexMapping {
60   SmallVector<BasicBlock *, SmallVectorThreshold> V;
61 
62 public:
63   size_t size() const { return V.size(); }
64 
65   BlockToIndexMapping(Function &F) {
66     for (BasicBlock &BB : F)
67       V.push_back(&BB);
68     llvm::sort(V);
69   }
70 
71   size_t blockToIndex(BasicBlock *BB) const {
72     auto *I = llvm::lower_bound(V, BB);
73     assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
74     return I - V.begin();
75   }
76 
77   BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
78 };
79 } // end anonymous namespace
80 
81 // The SuspendCrossingInfo maintains data that allows to answer a question
82 // whether given two BasicBlocks A and B there is a path from A to B that
83 // passes through a suspend point.
84 //
85 // For every basic block 'i' it maintains a BlockData that consists of:
86 //   Consumes:  a bit vector which contains a set of indices of blocks that can
87 //              reach block 'i'
88 //   Kills: a bit vector which contains a set of indices of blocks that can
89 //          reach block 'i', but one of the path will cross a suspend point
90 //   Suspend: a boolean indicating whether block 'i' contains a suspend point.
91 //   End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
92 //
93 namespace {
94 struct SuspendCrossingInfo {
95   BlockToIndexMapping Mapping;
96 
97   struct BlockData {
98     BitVector Consumes;
99     BitVector Kills;
100     bool Suspend = false;
101     bool End = false;
102   };
103   SmallVector<BlockData, SmallVectorThreshold> Block;
104 
105   iterator_range<succ_iterator> successors(BlockData const &BD) const {
106     BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
107     return llvm::successors(BB);
108   }
109 
110   BlockData &getBlockData(BasicBlock *BB) {
111     return Block[Mapping.blockToIndex(BB)];
112   }
113 
114   void dump() const;
115   void dump(StringRef Label, BitVector const &BV) const;
116 
117   SuspendCrossingInfo(Function &F, coro::Shape &Shape);
118 
119   bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
120     size_t const DefIndex = Mapping.blockToIndex(DefBB);
121     size_t const UseIndex = Mapping.blockToIndex(UseBB);
122 
123     bool const Result = Block[UseIndex].Kills[DefIndex];
124     LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
125                       << " answer is " << Result << "\n");
126     return Result;
127   }
128 
129   bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
130     auto *I = cast<Instruction>(U);
131 
132     // We rewrote PHINodes, so that only the ones with exactly one incoming
133     // value need to be analyzed.
134     if (auto *PN = dyn_cast<PHINode>(I))
135       if (PN->getNumIncomingValues() > 1)
136         return false;
137 
138     BasicBlock *UseBB = I->getParent();
139 
140     // As a special case, treat uses by an llvm.coro.suspend.retcon or an
141     // llvm.coro.suspend.async as if they were uses in the suspend's single
142     // predecessor: the uses conceptually occur before the suspend.
143     if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
144       UseBB = UseBB->getSinglePredecessor();
145       assert(UseBB && "should have split coro.suspend into its own block");
146     }
147 
148     return hasPathCrossingSuspendPoint(DefBB, UseBB);
149   }
150 
151   bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
152     return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
153   }
154 
155   bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
156     auto *DefBB = I.getParent();
157 
158     // As a special case, treat values produced by an llvm.coro.suspend.*
159     // as if they were defined in the single successor: the uses
160     // conceptually occur after the suspend.
161     if (isa<AnyCoroSuspendInst>(I)) {
162       DefBB = DefBB->getSingleSuccessor();
163       assert(DefBB && "should have split coro.suspend into its own block");
164     }
165 
166     return isDefinitionAcrossSuspend(DefBB, U);
167   }
168 
169   bool isDefinitionAcrossSuspend(Value &V, User *U) const {
170     if (auto *Arg = dyn_cast<Argument>(&V))
171       return isDefinitionAcrossSuspend(*Arg, U);
172     if (auto *Inst = dyn_cast<Instruction>(&V))
173       return isDefinitionAcrossSuspend(*Inst, U);
174 
175     llvm_unreachable(
176         "Coroutine could only collect Argument and Instruction now.");
177   }
178 };
179 } // end anonymous namespace
180 
181 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
182 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
183                                                 BitVector const &BV) const {
184   dbgs() << Label << ":";
185   for (size_t I = 0, N = BV.size(); I < N; ++I)
186     if (BV[I])
187       dbgs() << " " << Mapping.indexToBlock(I)->getName();
188   dbgs() << "\n";
189 }
190 
191 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
192   for (size_t I = 0, N = Block.size(); I < N; ++I) {
193     BasicBlock *const B = Mapping.indexToBlock(I);
194     dbgs() << B->getName() << ":\n";
195     dump("   Consumes", Block[I].Consumes);
196     dump("      Kills", Block[I].Kills);
197   }
198   dbgs() << "\n";
199 }
200 #endif
201 
202 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
203     : Mapping(F) {
204   const size_t N = Mapping.size();
205   Block.resize(N);
206 
207   // Initialize every block so that it consumes itself
208   for (size_t I = 0; I < N; ++I) {
209     auto &B = Block[I];
210     B.Consumes.resize(N);
211     B.Kills.resize(N);
212     B.Consumes.set(I);
213   }
214 
215   // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
216   // the code beyond coro.end is reachable during initial invocation of the
217   // coroutine.
218   for (auto *CE : Shape.CoroEnds)
219     getBlockData(CE->getParent()).End = true;
220 
221   // Mark all suspend blocks and indicate that they kill everything they
222   // consume. Note, that crossing coro.save also requires a spill, as any code
223   // between coro.save and coro.suspend may resume the coroutine and all of the
224   // state needs to be saved by that time.
225   auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
226     BasicBlock *SuspendBlock = BarrierInst->getParent();
227     auto &B = getBlockData(SuspendBlock);
228     B.Suspend = true;
229     B.Kills |= B.Consumes;
230   };
231   for (auto *CSI : Shape.CoroSuspends) {
232     markSuspendBlock(CSI);
233     if (auto *Save = CSI->getCoroSave())
234       markSuspendBlock(Save);
235   }
236 
237   // Iterate propagating consumes and kills until they stop changing.
238   int Iteration = 0;
239   (void)Iteration;
240 
241   bool Changed;
242   do {
243     LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
244     LLVM_DEBUG(dbgs() << "==============\n");
245 
246     Changed = false;
247     for (size_t I = 0; I < N; ++I) {
248       auto &B = Block[I];
249       for (BasicBlock *SI : successors(B)) {
250 
251         auto SuccNo = Mapping.blockToIndex(SI);
252 
253         // Saved Consumes and Kills bitsets so that it is easy to see
254         // if anything changed after propagation.
255         auto &S = Block[SuccNo];
256         auto SavedConsumes = S.Consumes;
257         auto SavedKills = S.Kills;
258 
259         // Propagate Kills and Consumes from block B into its successor S.
260         S.Consumes |= B.Consumes;
261         S.Kills |= B.Kills;
262 
263         // If block B is a suspend block, it should propagate kills into the
264         // its successor for every block B consumes.
265         if (B.Suspend) {
266           S.Kills |= B.Consumes;
267         }
268         if (S.Suspend) {
269           // If block S is a suspend block, it should kill all of the blocks it
270           // consumes.
271           S.Kills |= S.Consumes;
272         } else if (S.End) {
273           // If block S is an end block, it should not propagate kills as the
274           // blocks following coro.end() are reached during initial invocation
275           // of the coroutine while all the data are still available on the
276           // stack or in the registers.
277           S.Kills.reset();
278         } else {
279           // This is reached when S block it not Suspend nor coro.end and it
280           // need to make sure that it is not in the kill set.
281           S.Kills.reset(SuccNo);
282         }
283 
284         // See if anything changed.
285         Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
286 
287         if (S.Kills != SavedKills) {
288           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
289                             << "\n");
290           LLVM_DEBUG(dump("S.Kills", S.Kills));
291           LLVM_DEBUG(dump("SavedKills", SavedKills));
292         }
293         if (S.Consumes != SavedConsumes) {
294           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
295           LLVM_DEBUG(dump("S.Consume", S.Consumes));
296           LLVM_DEBUG(dump("SavedCons", SavedConsumes));
297         }
298       }
299     }
300   } while (Changed);
301   LLVM_DEBUG(dump());
302 }
303 
304 #undef DEBUG_TYPE // "coro-suspend-crossing"
305 #define DEBUG_TYPE "coro-frame"
306 
307 namespace {
308 class FrameTypeBuilder;
309 // Mapping from the to-be-spilled value to all the users that need reload.
310 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>;
311 struct AllocaInfo {
312   AllocaInst *Alloca;
313   DenseMap<Instruction *, llvm::Optional<APInt>> Aliases;
314   bool MayWriteBeforeCoroBegin;
315   AllocaInfo(AllocaInst *Alloca,
316              DenseMap<Instruction *, llvm::Optional<APInt>> Aliases,
317              bool MayWriteBeforeCoroBegin)
318       : Alloca(Alloca), Aliases(std::move(Aliases)),
319         MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
320 };
321 struct FrameDataInfo {
322   // All the values (that are not allocas) that needs to be spilled to the
323   // frame.
324   SpillInfo Spills;
325   // Allocas contains all values defined as allocas that need to live in the
326   // frame.
327   SmallVector<AllocaInfo, 8> Allocas;
328 
329   SmallVector<Value *, 8> getAllDefs() const {
330     SmallVector<Value *, 8> Defs;
331     for (const auto &P : Spills)
332       Defs.push_back(P.first);
333     for (const auto &A : Allocas)
334       Defs.push_back(A.Alloca);
335     return Defs;
336   }
337 
338   uint32_t getFieldIndex(Value *V) const {
339     auto Itr = FieldIndexMap.find(V);
340     assert(Itr != FieldIndexMap.end() &&
341            "Value does not have a frame field index");
342     return Itr->second;
343   }
344 
345   void setFieldIndex(Value *V, uint32_t Index) {
346     assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
347            "Cannot set the index for the same field twice.");
348     FieldIndexMap[V] = Index;
349   }
350 
351   uint64_t getAlign(Value *V) const {
352     auto Iter = FieldAlignMap.find(V);
353     assert(Iter != FieldAlignMap.end());
354     return Iter->second;
355   }
356 
357   void setAlign(Value *V, uint64_t Align) {
358     assert(FieldAlignMap.count(V) == 0);
359     FieldAlignMap.insert({V, Align});
360   }
361 
362   uint64_t getOffset(Value *V) const {
363     auto Iter = FieldOffsetMap.find(V);
364     assert(Iter != FieldOffsetMap.end());
365     return Iter->second;
366   }
367 
368   void setOffset(Value *V, uint64_t Offset) {
369     assert(FieldOffsetMap.count(V) == 0);
370     FieldOffsetMap.insert({V, Offset});
371   }
372 
373   // Remap the index of every field in the frame, using the final layout index.
374   void updateLayoutIndex(FrameTypeBuilder &B);
375 
376 private:
377   // LayoutIndexUpdateStarted is used to avoid updating the index of any field
378   // twice by mistake.
379   bool LayoutIndexUpdateStarted = false;
380   // Map from values to their slot indexes on the frame. They will be first set
381   // with their original insertion field index. After the frame is built, their
382   // indexes will be updated into the final layout index.
383   DenseMap<Value *, uint32_t> FieldIndexMap;
384   // Map from values to their alignment on the frame. They would be set after
385   // the frame is built.
386   DenseMap<Value *, uint64_t> FieldAlignMap;
387   // Map from values to their offset on the frame. They would be set after
388   // the frame is built.
389   DenseMap<Value *, uint64_t> FieldOffsetMap;
390 };
391 } // namespace
392 
393 #ifndef NDEBUG
394 static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
395   dbgs() << "------------- " << Title << "--------------\n";
396   for (const auto &E : Spills) {
397     E.first->dump();
398     dbgs() << "   user: ";
399     for (auto *I : E.second)
400       I->dump();
401   }
402 }
403 
404 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
405   dbgs() << "------------- Allocas --------------\n";
406   for (const auto &A : Allocas) {
407     A.Alloca->dump();
408   }
409 }
410 #endif
411 
412 namespace {
413 using FieldIDType = size_t;
414 // We cannot rely solely on natural alignment of a type when building a
415 // coroutine frame and if the alignment specified on the Alloca instruction
416 // differs from the natural alignment of the alloca type we will need to insert
417 // padding.
418 class FrameTypeBuilder {
419 private:
420   struct Field {
421     uint64_t Size;
422     uint64_t Offset;
423     Type *Ty;
424     FieldIDType LayoutFieldIndex;
425     Align Alignment;
426     Align TyAlignment;
427   };
428 
429   const DataLayout &DL;
430   LLVMContext &Context;
431   uint64_t StructSize = 0;
432   Align StructAlign;
433   bool IsFinished = false;
434 
435   Optional<Align> MaxFrameAlignment;
436 
437   SmallVector<Field, 8> Fields;
438   DenseMap<Value*, unsigned> FieldIndexByKey;
439 
440 public:
441   FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL,
442                    Optional<Align> MaxFrameAlignment)
443       : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
444 
445   /// Add a field to this structure for the storage of an `alloca`
446   /// instruction.
447   LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI,
448                                                bool IsHeader = false) {
449     Type *Ty = AI->getAllocatedType();
450 
451     // Make an array type if this is a static array allocation.
452     if (AI->isArrayAllocation()) {
453       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
454         Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
455       else
456         report_fatal_error("Coroutines cannot handle non static allocas yet");
457     }
458 
459     return addField(Ty, AI->getAlign(), IsHeader);
460   }
461 
462   /// We want to put the allocas whose lifetime-ranges are not overlapped
463   /// into one slot of coroutine frame.
464   /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
465   ///
466   ///     cppcoro::task<void> alternative_paths(bool cond) {
467   ///         if (cond) {
468   ///             big_structure a;
469   ///             process(a);
470   ///             co_await something();
471   ///         } else {
472   ///             big_structure b;
473   ///             process2(b);
474   ///             co_await something();
475   ///         }
476   ///     }
477   ///
478   /// We want to put variable a and variable b in the same slot to
479   /// reduce the size of coroutine frame.
480   ///
481   /// This function use StackLifetime algorithm to partition the AllocaInsts in
482   /// Spills to non-overlapped sets in order to put Alloca in the same
483   /// non-overlapped set into the same slot in the Coroutine Frame. Then add
484   /// field for the allocas in the same non-overlapped set by using the largest
485   /// type as the field type.
486   ///
487   /// Side Effects: Because We sort the allocas, the order of allocas in the
488   /// frame may be different with the order in the source code.
489   void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
490                           coro::Shape &Shape);
491 
492   /// Add a field to this structure.
493   LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment,
494                                       bool IsHeader = false,
495                                       bool IsSpillOfValue = false) {
496     assert(!IsFinished && "adding fields to a finished builder");
497     assert(Ty && "must provide a type for a field");
498 
499     // The field size is always the alloc size of the type.
500     uint64_t FieldSize = DL.getTypeAllocSize(Ty);
501 
502     // For an alloca with size=0, we don't need to add a field and they
503     // can just point to any index in the frame. Use index 0.
504     if (FieldSize == 0) {
505       return 0;
506     }
507 
508     // The field alignment might not be the type alignment, but we need
509     // to remember the type alignment anyway to build the type.
510     // If we are spilling values we don't need to worry about ABI alignment
511     // concerns.
512     auto ABIAlign = DL.getABITypeAlign(Ty);
513     Align TyAlignment =
514         (IsSpillOfValue && MaxFrameAlignment)
515             ? (*MaxFrameAlignment < ABIAlign ? *MaxFrameAlignment : ABIAlign)
516             : ABIAlign;
517     if (!FieldAlignment) {
518       FieldAlignment = TyAlignment;
519     }
520 
521     // Lay out header fields immediately.
522     uint64_t Offset;
523     if (IsHeader) {
524       Offset = alignTo(StructSize, FieldAlignment);
525       StructSize = Offset + FieldSize;
526 
527     // Everything else has a flexible offset.
528     } else {
529       Offset = OptimizedStructLayoutField::FlexibleOffset;
530     }
531 
532     Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment});
533     return Fields.size() - 1;
534   }
535 
536   /// Finish the layout and set the body on the given type.
537   void finish(StructType *Ty);
538 
539   uint64_t getStructSize() const {
540     assert(IsFinished && "not yet finished!");
541     return StructSize;
542   }
543 
544   Align getStructAlign() const {
545     assert(IsFinished && "not yet finished!");
546     return StructAlign;
547   }
548 
549   FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
550     assert(IsFinished && "not yet finished!");
551     return Fields[Id].LayoutFieldIndex;
552   }
553 
554   Field getLayoutField(FieldIDType Id) const {
555     assert(IsFinished && "not yet finished!");
556     return Fields[Id];
557   }
558 };
559 } // namespace
560 
561 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
562   auto Updater = [&](Value *I) {
563     auto Field = B.getLayoutField(getFieldIndex(I));
564     setFieldIndex(I, Field.LayoutFieldIndex);
565     setAlign(I, Field.Alignment.value());
566     setOffset(I, Field.Offset);
567   };
568   LayoutIndexUpdateStarted = true;
569   for (auto &S : Spills)
570     Updater(S.first);
571   for (const auto &A : Allocas)
572     Updater(A.Alloca);
573   LayoutIndexUpdateStarted = false;
574 }
575 
576 void FrameTypeBuilder::addFieldForAllocas(const Function &F,
577                                           FrameDataInfo &FrameData,
578                                           coro::Shape &Shape) {
579   using AllocaSetType = SmallVector<AllocaInst *, 4>;
580   SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
581 
582   // We need to add field for allocas at the end of this function.
583   auto AddFieldForAllocasAtExit = make_scope_exit([&]() {
584     for (auto AllocaList : NonOverlapedAllocas) {
585       auto *LargestAI = *AllocaList.begin();
586       FieldIDType Id = addFieldForAlloca(LargestAI);
587       for (auto *Alloca : AllocaList)
588         FrameData.setFieldIndex(Alloca, Id);
589     }
590   });
591 
592   if (!Shape.OptimizeFrame && !EnableReuseStorageInFrame) {
593     for (const auto &A : FrameData.Allocas) {
594       AllocaInst *Alloca = A.Alloca;
595       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
596     }
597     return;
598   }
599 
600   // Because there are pathes from the lifetime.start to coro.end
601   // for each alloca, the liferanges for every alloca is overlaped
602   // in the blocks who contain coro.end and the successor blocks.
603   // So we choose to skip there blocks when we calculates the liferange
604   // for each alloca. It should be reasonable since there shouldn't be uses
605   // in these blocks and the coroutine frame shouldn't be used outside the
606   // coroutine body.
607   //
608   // Note that the user of coro.suspend may not be SwitchInst. However, this
609   // case seems too complex to handle. And it is harmless to skip these
610   // patterns since it just prevend putting the allocas to live in the same
611   // slot.
612   DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
613   for (auto CoroSuspendInst : Shape.CoroSuspends) {
614     for (auto U : CoroSuspendInst->users()) {
615       if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
616         auto *SWI = const_cast<SwitchInst *>(ConstSWI);
617         DefaultSuspendDest[SWI] = SWI->getDefaultDest();
618         SWI->setDefaultDest(SWI->getSuccessor(1));
619       }
620     }
621   }
622 
623   auto ExtractAllocas = [&]() {
624     AllocaSetType Allocas;
625     Allocas.reserve(FrameData.Allocas.size());
626     for (const auto &A : FrameData.Allocas)
627       Allocas.push_back(A.Alloca);
628     return Allocas;
629   };
630   StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
631                                       StackLifetime::LivenessType::May);
632   StackLifetimeAnalyzer.run();
633   auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
634     return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
635         StackLifetimeAnalyzer.getLiveRange(AI2));
636   };
637   auto GetAllocaSize = [&](const AllocaInfo &A) {
638     Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL);
639     assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
640     assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
641     return RetSize->getFixedSize();
642   };
643   // Put larger allocas in the front. So the larger allocas have higher
644   // priority to merge, which can save more space potentially. Also each
645   // AllocaSet would be ordered. So we can get the largest Alloca in one
646   // AllocaSet easily.
647   sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
648     return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
649   });
650   for (const auto &A : FrameData.Allocas) {
651     AllocaInst *Alloca = A.Alloca;
652     bool Merged = false;
653     // Try to find if the Alloca is not inferenced with any existing
654     // NonOverlappedAllocaSet. If it is true, insert the alloca to that
655     // NonOverlappedAllocaSet.
656     for (auto &AllocaSet : NonOverlapedAllocas) {
657       assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
658       bool NoInference = none_of(AllocaSet, [&](auto Iter) {
659         return IsAllocaInferenre(Alloca, Iter);
660       });
661       // If the alignment of A is multiple of the alignment of B, the address
662       // of A should satisfy the requirement for aligning for B.
663       //
664       // There may be other more fine-grained strategies to handle the alignment
665       // infomation during the merging process. But it seems hard to handle
666       // these strategies and benefit little.
667       bool Alignable = [&]() -> bool {
668         auto *LargestAlloca = *AllocaSet.begin();
669         return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
670                0;
671       }();
672       bool CouldMerge = NoInference && Alignable;
673       if (!CouldMerge)
674         continue;
675       AllocaSet.push_back(Alloca);
676       Merged = true;
677       break;
678     }
679     if (!Merged) {
680       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
681     }
682   }
683   // Recover the default target destination for each Switch statement
684   // reserved.
685   for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
686     SwitchInst *SWI = SwitchAndDefaultDest.first;
687     BasicBlock *DestBB = SwitchAndDefaultDest.second;
688     SWI->setDefaultDest(DestBB);
689   }
690   // This Debug Info could tell us which allocas are merged into one slot.
691   LLVM_DEBUG(for (auto &AllocaSet
692                   : NonOverlapedAllocas) {
693     if (AllocaSet.size() > 1) {
694       dbgs() << "In Function:" << F.getName() << "\n";
695       dbgs() << "Find Union Set "
696              << "\n";
697       dbgs() << "\tAllocas are \n";
698       for (auto Alloca : AllocaSet)
699         dbgs() << "\t\t" << *Alloca << "\n";
700     }
701   });
702 }
703 
704 void FrameTypeBuilder::finish(StructType *Ty) {
705   assert(!IsFinished && "already finished!");
706 
707   // Prepare the optimal-layout field array.
708   // The Id in the layout field is a pointer to our Field for it.
709   SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
710   LayoutFields.reserve(Fields.size());
711   for (auto &Field : Fields) {
712     LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
713                               Field.Offset);
714   }
715 
716   // Perform layout.
717   auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
718   StructSize = SizeAndAlign.first;
719   StructAlign = SizeAndAlign.second;
720 
721   auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
722     return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
723   };
724 
725   // We need to produce a packed struct type if there's a field whose
726   // assigned offset isn't a multiple of its natural type alignment.
727   bool Packed = [&] {
728     for (auto &LayoutField : LayoutFields) {
729       auto &F = getField(LayoutField);
730       if (!isAligned(F.TyAlignment, LayoutField.Offset))
731         return true;
732     }
733     return false;
734   }();
735 
736   // Build the struct body.
737   SmallVector<Type*, 16> FieldTypes;
738   FieldTypes.reserve(LayoutFields.size() * 3 / 2);
739   uint64_t LastOffset = 0;
740   for (auto &LayoutField : LayoutFields) {
741     auto &F = getField(LayoutField);
742 
743     auto Offset = LayoutField.Offset;
744 
745     // Add a padding field if there's a padding gap and we're either
746     // building a packed struct or the padding gap is more than we'd
747     // get from aligning to the field type's natural alignment.
748     assert(Offset >= LastOffset);
749     if (Offset != LastOffset) {
750       if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
751         FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
752                                             Offset - LastOffset));
753     }
754 
755     F.Offset = Offset;
756     F.LayoutFieldIndex = FieldTypes.size();
757 
758     FieldTypes.push_back(F.Ty);
759     LastOffset = Offset + F.Size;
760   }
761 
762   Ty->setBody(FieldTypes, Packed);
763 
764 #ifndef NDEBUG
765   // Check that the IR layout matches the offsets we expect.
766   auto Layout = DL.getStructLayout(Ty);
767   for (auto &F : Fields) {
768     assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
769     assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
770   }
771 #endif
772 
773   IsFinished = true;
774 }
775 
776 static void cacheDIVar(FrameDataInfo &FrameData,
777                        DenseMap<Value *, DILocalVariable *> &DIVarCache) {
778   for (auto *V : FrameData.getAllDefs()) {
779     if (DIVarCache.find(V) != DIVarCache.end())
780       continue;
781 
782     auto DDIs = FindDbgDeclareUses(V);
783     auto *I = llvm::find_if(DDIs, [](DbgDeclareInst *DDI) {
784       return DDI->getExpression()->getNumElements() == 0;
785     });
786     if (I != DDIs.end())
787       DIVarCache.insert({V, (*I)->getVariable()});
788   }
789 }
790 
791 /// Create name for Type. It uses MDString to store new created string to
792 /// avoid memory leak.
793 static StringRef solveTypeName(Type *Ty) {
794   if (Ty->isIntegerTy()) {
795     // The longest name in common may be '__int_128', which has 9 bits.
796     SmallString<16> Buffer;
797     raw_svector_ostream OS(Buffer);
798     OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth();
799     auto *MDName = MDString::get(Ty->getContext(), OS.str());
800     return MDName->getString();
801   }
802 
803   if (Ty->isFloatingPointTy()) {
804     if (Ty->isFloatTy())
805       return "__float_";
806     if (Ty->isDoubleTy())
807       return "__double_";
808     return "__floating_type_";
809   }
810 
811   if (Ty->isPointerTy()) {
812     auto *PtrTy = cast<PointerType>(Ty);
813     Type *PointeeTy = PtrTy->getPointerElementType();
814     auto Name = solveTypeName(PointeeTy);
815     if (Name == "UnknownType")
816       return "PointerType";
817     SmallString<16> Buffer;
818     Twine(Name + "_Ptr").toStringRef(Buffer);
819     auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
820     return MDName->getString();
821   }
822 
823   if (Ty->isStructTy()) {
824     if (!cast<StructType>(Ty)->hasName())
825       return "__LiteralStructType_";
826 
827     auto Name = Ty->getStructName();
828 
829     SmallString<16> Buffer(Name);
830     for_each(Buffer, [](auto &Iter) {
831       if (Iter == '.' || Iter == ':')
832         Iter = '_';
833     });
834     auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
835     return MDName->getString();
836   }
837 
838   return "UnknownType";
839 }
840 
841 static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
842                            const DataLayout &Layout, DIScope *Scope,
843                            unsigned LineNum,
844                            DenseMap<Type *, DIType *> &DITypeCache) {
845   if (DIType *DT = DITypeCache.lookup(Ty))
846     return DT;
847 
848   StringRef Name = solveTypeName(Ty);
849 
850   DIType *RetType = nullptr;
851 
852   if (Ty->isIntegerTy()) {
853     auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
854     RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed,
855                                       llvm::DINode::FlagArtificial);
856   } else if (Ty->isFloatingPointTy()) {
857     RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
858                                       dwarf::DW_ATE_float,
859                                       llvm::DINode::FlagArtificial);
860   } else if (Ty->isPointerTy()) {
861     // Construct BasicType instead of PointerType to avoid infinite
862     // search problem.
863     // For example, we would be in trouble if we traverse recursively:
864     //
865     //  struct Node {
866     //      Node* ptr;
867     //  };
868     RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
869                                       dwarf::DW_ATE_address,
870                                       llvm::DINode::FlagArtificial);
871   } else if (Ty->isStructTy()) {
872     auto *DIStruct = Builder.createStructType(
873         Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty),
874         Layout.getPrefTypeAlignment(Ty), llvm::DINode::FlagArtificial, nullptr,
875         llvm::DINodeArray());
876 
877     auto *StructTy = cast<StructType>(Ty);
878     SmallVector<Metadata *, 16> Elements;
879     for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
880       DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
881                                  Scope, LineNum, DITypeCache);
882       assert(DITy);
883       Elements.push_back(Builder.createMemberType(
884           Scope, DITy->getName(), Scope->getFile(), LineNum,
885           DITy->getSizeInBits(), DITy->getAlignInBits(),
886           Layout.getStructLayout(StructTy)->getElementOffsetInBits(I),
887           llvm::DINode::FlagArtificial, DITy));
888     }
889 
890     Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements));
891 
892     RetType = DIStruct;
893   } else {
894     LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n";);
895     SmallString<32> Buffer;
896     raw_svector_ostream OS(Buffer);
897     OS << Name.str() << "_" << Layout.getTypeSizeInBits(Ty);
898     RetType = Builder.createBasicType(OS.str(), Layout.getTypeSizeInBits(Ty),
899                                       dwarf::DW_ATE_address,
900                                       llvm::DINode::FlagArtificial);
901   }
902 
903   DITypeCache.insert({Ty, RetType});
904   return RetType;
905 }
906 
907 /// Build artificial debug info for C++ coroutine frames to allow users to
908 /// inspect the contents of the frame directly
909 ///
910 /// Create Debug information for coroutine frame with debug name "__coro_frame".
911 /// The debug information for the fields of coroutine frame is constructed from
912 /// the following way:
913 /// 1. For all the value in the Frame, we search the use of dbg.declare to find
914 ///    the corresponding debug variables for the value. If we can find the
915 ///    debug variable, we can get full and accurate debug information.
916 /// 2. If we can't get debug information in step 1 and 2, we could only try to
917 ///    build the DIType by Type. We did this in solveDIType. We only handle
918 ///    integer, float, double, integer type and struct type for now.
919 static void buildFrameDebugInfo(Function &F, coro::Shape &Shape,
920                                 FrameDataInfo &FrameData) {
921   DISubprogram *DIS = F.getSubprogram();
922   // If there is no DISubprogram for F, it implies the Function are not compiled
923   // with debug info. So we also don't need to generate debug info for the frame
924   // neither.
925   if (!DIS || !DIS->getUnit() ||
926       !dwarf::isCPlusPlus(
927           (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage()))
928     return;
929 
930   assert(Shape.ABI == coro::ABI::Switch &&
931          "We could only build debug infomation for C++ coroutine now.\n");
932 
933   DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
934 
935   AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
936   assert(PromiseAlloca &&
937          "Coroutine with switch ABI should own Promise alloca");
938 
939   TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(PromiseAlloca);
940   if (DIs.empty())
941     return;
942 
943   DbgDeclareInst *PromiseDDI = DIs.front();
944   DILocalVariable *PromiseDIVariable = PromiseDDI->getVariable();
945   DILocalScope *PromiseDIScope = PromiseDIVariable->getScope();
946   DIFile *DFile = PromiseDIScope->getFile();
947   DILocation *DILoc = PromiseDDI->getDebugLoc().get();
948   unsigned LineNum = PromiseDIVariable->getLine();
949 
950   DICompositeType *FrameDITy = DBuilder.createStructType(
951       DIS, "__coro_frame_ty", DFile, LineNum, Shape.FrameSize * 8,
952       Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr,
953       llvm::DINodeArray());
954   StructType *FrameTy = Shape.FrameTy;
955   SmallVector<Metadata *, 16> Elements;
956   DataLayout Layout = F.getParent()->getDataLayout();
957 
958   DenseMap<Value *, DILocalVariable *> DIVarCache;
959   cacheDIVar(FrameData, DIVarCache);
960 
961   unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
962   unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
963   unsigned IndexIndex = Shape.SwitchLowering.IndexField;
964 
965   DenseMap<unsigned, StringRef> NameCache;
966   NameCache.insert({ResumeIndex, "__resume_fn"});
967   NameCache.insert({DestroyIndex, "__destroy_fn"});
968   NameCache.insert({IndexIndex, "__coro_index"});
969 
970   Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex),
971        *DestroyFnTy = FrameTy->getElementType(DestroyIndex),
972        *IndexTy = FrameTy->getElementType(IndexIndex);
973 
974   DenseMap<unsigned, DIType *> TyCache;
975   TyCache.insert({ResumeIndex,
976                   DBuilder.createBasicType("__resume_fn",
977                                            Layout.getTypeSizeInBits(ResumeFnTy),
978                                            dwarf::DW_ATE_address)});
979   TyCache.insert(
980       {DestroyIndex, DBuilder.createBasicType(
981                          "__destroy_fn", Layout.getTypeSizeInBits(DestroyFnTy),
982                          dwarf::DW_ATE_address)});
983 
984   /// FIXME: If we fill the field `SizeInBits` with the actual size of
985   /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
986   TyCache.insert({IndexIndex, DBuilder.createBasicType(
987                                   "__coro_index",
988                                   (Layout.getTypeSizeInBits(IndexTy) < 8)
989                                       ? 8
990                                       : Layout.getTypeSizeInBits(IndexTy),
991                                   dwarf::DW_ATE_unsigned_char)});
992 
993   for (auto *V : FrameData.getAllDefs()) {
994     if (DIVarCache.find(V) == DIVarCache.end())
995       continue;
996 
997     auto Index = FrameData.getFieldIndex(V);
998 
999     NameCache.insert({Index, DIVarCache[V]->getName()});
1000     TyCache.insert({Index, DIVarCache[V]->getType()});
1001   }
1002 
1003   // Cache from index to (Align, Offset Pair)
1004   DenseMap<unsigned, std::pair<unsigned, unsigned>> OffsetCache;
1005   // The Align and Offset of Resume function and Destroy function are fixed.
1006   OffsetCache.insert({ResumeIndex, {8, 0}});
1007   OffsetCache.insert({DestroyIndex, {8, 8}});
1008   OffsetCache.insert(
1009       {IndexIndex,
1010        {Shape.SwitchLowering.IndexAlign, Shape.SwitchLowering.IndexOffset}});
1011 
1012   for (auto *V : FrameData.getAllDefs()) {
1013     auto Index = FrameData.getFieldIndex(V);
1014 
1015     OffsetCache.insert(
1016         {Index, {FrameData.getAlign(V), FrameData.getOffset(V)}});
1017   }
1018 
1019   DenseMap<Type *, DIType *> DITypeCache;
1020   // This counter is used to avoid same type names. e.g., there would be
1021   // many i32 and i64 types in one coroutine. And we would use i32_0 and
1022   // i32_1 to avoid the same type. Since it makes no sense the name of the
1023   // fields confilicts with each other.
1024   unsigned UnknownTypeNum = 0;
1025   for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
1026     if (OffsetCache.find(Index) == OffsetCache.end())
1027       continue;
1028 
1029     std::string Name;
1030     uint64_t SizeInBits;
1031     uint32_t AlignInBits;
1032     uint64_t OffsetInBits;
1033     DIType *DITy = nullptr;
1034 
1035     Type *Ty = FrameTy->getElementType(Index);
1036     assert(Ty->isSized() && "We can't handle type which is not sized.\n");
1037     SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedSize();
1038     AlignInBits = OffsetCache[Index].first * 8;
1039     OffsetInBits = OffsetCache[Index].second * 8;
1040 
1041     if (NameCache.find(Index) != NameCache.end()) {
1042       Name = NameCache[Index].str();
1043       DITy = TyCache[Index];
1044     } else {
1045       DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
1046       assert(DITy && "SolveDIType shouldn't return nullptr.\n");
1047       Name = DITy->getName().str();
1048       Name += "_" + std::to_string(UnknownTypeNum);
1049       UnknownTypeNum++;
1050     }
1051 
1052     Elements.push_back(DBuilder.createMemberType(
1053         FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
1054         llvm::DINode::FlagArtificial, DITy));
1055   }
1056 
1057   DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements));
1058 
1059   auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame",
1060                                                  DFile, LineNum, FrameDITy,
1061                                                  true, DINode::FlagArtificial);
1062   assert(FrameDIVar->isValidLocationForIntrinsic(PromiseDDI->getDebugLoc()));
1063 
1064   // Subprogram would have ContainedNodes field which records the debug
1065   // variables it contained. So we need to add __coro_frame to the
1066   // ContainedNodes of it.
1067   //
1068   // If we don't add __coro_frame to the RetainedNodes, user may get
1069   // `no symbol __coro_frame in context` rather than `__coro_frame`
1070   // is optimized out, which is more precise.
1071   if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) {
1072     auto RetainedNodes = SubProgram->getRetainedNodes();
1073     SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
1074                                                  RetainedNodes.end());
1075     RetainedNodesVec.push_back(FrameDIVar);
1076     SubProgram->replaceOperandWith(
1077         7, (MDTuple::get(F.getContext(), RetainedNodesVec)));
1078   }
1079 
1080   DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar,
1081                          DBuilder.createExpression(), DILoc,
1082                          Shape.getInsertPtAfterFramePtr());
1083 }
1084 
1085 // Build a struct that will keep state for an active coroutine.
1086 //   struct f.frame {
1087 //     ResumeFnTy ResumeFnAddr;
1088 //     ResumeFnTy DestroyFnAddr;
1089 //     int ResumeIndex;
1090 //     ... promise (if present) ...
1091 //     ... spills ...
1092 //   };
1093 static StructType *buildFrameType(Function &F, coro::Shape &Shape,
1094                                   FrameDataInfo &FrameData) {
1095   LLVMContext &C = F.getContext();
1096   const DataLayout &DL = F.getParent()->getDataLayout();
1097   StructType *FrameTy = [&] {
1098     SmallString<32> Name(F.getName());
1099     Name.append(".Frame");
1100     return StructType::create(C, Name);
1101   }();
1102 
1103   // We will use this value to cap the alignment of spilled values.
1104   Optional<Align> MaxFrameAlignment;
1105   if (Shape.ABI == coro::ABI::Async)
1106     MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
1107   FrameTypeBuilder B(C, DL, MaxFrameAlignment);
1108 
1109   AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
1110   Optional<FieldIDType> SwitchIndexFieldId;
1111 
1112   if (Shape.ABI == coro::ABI::Switch) {
1113     auto *FramePtrTy = FrameTy->getPointerTo();
1114     auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
1115                                    /*IsVarArg=*/false);
1116     auto *FnPtrTy = FnTy->getPointerTo();
1117 
1118     // Add header fields for the resume and destroy functions.
1119     // We can rely on these being perfectly packed.
1120     (void)B.addField(FnPtrTy, None, /*header*/ true);
1121     (void)B.addField(FnPtrTy, None, /*header*/ true);
1122 
1123     // PromiseAlloca field needs to be explicitly added here because it's
1124     // a header field with a fixed offset based on its alignment. Hence it
1125     // needs special handling and cannot be added to FrameData.Allocas.
1126     if (PromiseAlloca)
1127       FrameData.setFieldIndex(
1128           PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
1129 
1130     // Add a field to store the suspend index.  This doesn't need to
1131     // be in the header.
1132     unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
1133     Type *IndexType = Type::getIntNTy(C, IndexBits);
1134 
1135     SwitchIndexFieldId = B.addField(IndexType, None);
1136   } else {
1137     assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
1138   }
1139 
1140   // Because multiple allocas may own the same field slot,
1141   // we add allocas to field here.
1142   B.addFieldForAllocas(F, FrameData, Shape);
1143   // Add PromiseAlloca to Allocas list so that
1144   // 1. updateLayoutIndex could update its index after
1145   // `performOptimizedStructLayout`
1146   // 2. it is processed in insertSpills.
1147   if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
1148     // We assume that the promise alloca won't be modified before
1149     // CoroBegin and no alias will be create before CoroBegin.
1150     FrameData.Allocas.emplace_back(
1151         PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false);
1152   // Create an entry for every spilled value.
1153   for (auto &S : FrameData.Spills) {
1154     Type *FieldType = S.first->getType();
1155     // For byval arguments, we need to store the pointed value in the frame,
1156     // instead of the pointer itself.
1157     if (const Argument *A = dyn_cast<Argument>(S.first))
1158       if (A->hasByValAttr())
1159         FieldType = A->getParamByValType();
1160     FieldIDType Id =
1161         B.addField(FieldType, None, false /*header*/, true /*IsSpillOfValue*/);
1162     FrameData.setFieldIndex(S.first, Id);
1163   }
1164 
1165   B.finish(FrameTy);
1166   FrameData.updateLayoutIndex(B);
1167   Shape.FrameAlign = B.getStructAlign();
1168   Shape.FrameSize = B.getStructSize();
1169 
1170   switch (Shape.ABI) {
1171   case coro::ABI::Switch: {
1172     // In the switch ABI, remember the switch-index field.
1173     auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
1174     Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
1175     Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
1176     Shape.SwitchLowering.IndexOffset = IndexField.Offset;
1177 
1178     // Also round the frame size up to a multiple of its alignment, as is
1179     // generally expected in C/C++.
1180     Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
1181     break;
1182   }
1183 
1184   // In the retcon ABI, remember whether the frame is inline in the storage.
1185   case coro::ABI::Retcon:
1186   case coro::ABI::RetconOnce: {
1187     auto Id = Shape.getRetconCoroId();
1188     Shape.RetconLowering.IsFrameInlineInStorage
1189       = (B.getStructSize() <= Id->getStorageSize() &&
1190          B.getStructAlign() <= Id->getStorageAlignment());
1191     break;
1192   }
1193   case coro::ABI::Async: {
1194     Shape.AsyncLowering.FrameOffset =
1195         alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign);
1196     // Also make the final context size a multiple of the context alignment to
1197     // make allocation easier for allocators.
1198     Shape.AsyncLowering.ContextSize =
1199         alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize,
1200                 Shape.AsyncLowering.getContextAlignment());
1201     if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
1202       report_fatal_error(
1203           "The alignment requirment of frame variables cannot be higher than "
1204           "the alignment of the async function context");
1205     }
1206     break;
1207   }
1208   }
1209 
1210   return FrameTy;
1211 }
1212 
1213 // We use a pointer use visitor to track how an alloca is being used.
1214 // The goal is to be able to answer the following three questions:
1215 // 1. Should this alloca be allocated on the frame instead.
1216 // 2. Could the content of the alloca be modified prior to CoroBegn, which would
1217 // require copying the data from alloca to the frame after CoroBegin.
1218 // 3. Is there any alias created for this alloca prior to CoroBegin, but used
1219 // after CoroBegin. In that case, we will need to recreate the alias after
1220 // CoroBegin based off the frame. To answer question 1, we track two things:
1221 //   a. List of all BasicBlocks that use this alloca or any of the aliases of
1222 //   the alloca. In the end, we check if there exists any two basic blocks that
1223 //   cross suspension points. If so, this alloca must be put on the frame. b.
1224 //   Whether the alloca or any alias of the alloca is escaped at some point,
1225 //   either by storing the address somewhere, or the address is used in a
1226 //   function call that might capture. If it's ever escaped, this alloca must be
1227 //   put on the frame conservatively.
1228 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
1229 // Whenever a potential write happens, either through a store instruction, a
1230 // function call or any of the memory intrinsics, we check whether this
1231 // instruction is prior to CoroBegin. To answer question 3, we track the offsets
1232 // of all aliases created for the alloca prior to CoroBegin but used after
1233 // CoroBegin. llvm::Optional is used to be able to represent the case when the
1234 // offset is unknown (e.g. when you have a PHINode that takes in different
1235 // offset values). We cannot handle unknown offsets and will assert. This is the
1236 // potential issue left out. An ideal solution would likely require a
1237 // significant redesign.
1238 namespace {
1239 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
1240   using Base = PtrUseVisitor<AllocaUseVisitor>;
1241   AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
1242                    const CoroBeginInst &CB, const SuspendCrossingInfo &Checker,
1243                    bool ShouldUseLifetimeStartInfo)
1244       : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker),
1245         ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {}
1246 
1247   void visit(Instruction &I) {
1248     Users.insert(&I);
1249     Base::visit(I);
1250     // If the pointer is escaped prior to CoroBegin, we have to assume it would
1251     // be written into before CoroBegin as well.
1252     if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
1253       MayWriteBeforeCoroBegin = true;
1254     }
1255   }
1256   // We need to provide this overload as PtrUseVisitor uses a pointer based
1257   // visiting function.
1258   void visit(Instruction *I) { return visit(*I); }
1259 
1260   void visitPHINode(PHINode &I) {
1261     enqueueUsers(I);
1262     handleAlias(I);
1263   }
1264 
1265   void visitSelectInst(SelectInst &I) {
1266     enqueueUsers(I);
1267     handleAlias(I);
1268   }
1269 
1270   void visitStoreInst(StoreInst &SI) {
1271     // Regardless whether the alias of the alloca is the value operand or the
1272     // pointer operand, we need to assume the alloca is been written.
1273     handleMayWrite(SI);
1274 
1275     if (SI.getValueOperand() != U->get())
1276       return;
1277 
1278     // We are storing the pointer into a memory location, potentially escaping.
1279     // As an optimization, we try to detect simple cases where it doesn't
1280     // actually escape, for example:
1281     //   %ptr = alloca ..
1282     //   %addr = alloca ..
1283     //   store %ptr, %addr
1284     //   %x = load %addr
1285     //   ..
1286     // If %addr is only used by loading from it, we could simply treat %x as
1287     // another alias of %ptr, and not considering %ptr being escaped.
1288     auto IsSimpleStoreThenLoad = [&]() {
1289       auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
1290       // If the memory location we are storing to is not an alloca, it
1291       // could be an alias of some other memory locations, which is difficult
1292       // to analyze.
1293       if (!AI)
1294         return false;
1295       // StoreAliases contains aliases of the memory location stored into.
1296       SmallVector<Instruction *, 4> StoreAliases = {AI};
1297       while (!StoreAliases.empty()) {
1298         Instruction *I = StoreAliases.pop_back_val();
1299         for (User *U : I->users()) {
1300           // If we are loading from the memory location, we are creating an
1301           // alias of the original pointer.
1302           if (auto *LI = dyn_cast<LoadInst>(U)) {
1303             enqueueUsers(*LI);
1304             handleAlias(*LI);
1305             continue;
1306           }
1307           // If we are overriding the memory location, the pointer certainly
1308           // won't escape.
1309           if (auto *S = dyn_cast<StoreInst>(U))
1310             if (S->getPointerOperand() == I)
1311               continue;
1312           if (auto *II = dyn_cast<IntrinsicInst>(U))
1313             if (II->isLifetimeStartOrEnd())
1314               continue;
1315           // BitCastInst creats aliases of the memory location being stored
1316           // into.
1317           if (auto *BI = dyn_cast<BitCastInst>(U)) {
1318             StoreAliases.push_back(BI);
1319             continue;
1320           }
1321           return false;
1322         }
1323       }
1324 
1325       return true;
1326     };
1327 
1328     if (!IsSimpleStoreThenLoad())
1329       PI.setEscaped(&SI);
1330   }
1331 
1332   // All mem intrinsics modify the data.
1333   void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
1334 
1335   void visitBitCastInst(BitCastInst &BC) {
1336     Base::visitBitCastInst(BC);
1337     handleAlias(BC);
1338   }
1339 
1340   void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
1341     Base::visitAddrSpaceCastInst(ASC);
1342     handleAlias(ASC);
1343   }
1344 
1345   void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
1346     // The base visitor will adjust Offset accordingly.
1347     Base::visitGetElementPtrInst(GEPI);
1348     handleAlias(GEPI);
1349   }
1350 
1351   void visitIntrinsicInst(IntrinsicInst &II) {
1352     // When we found the lifetime markers refers to a
1353     // subrange of the original alloca, ignore the lifetime
1354     // markers to avoid misleading the analysis.
1355     if (II.getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown ||
1356         !Offset.isZero())
1357       return Base::visitIntrinsicInst(II);
1358     LifetimeStarts.insert(&II);
1359   }
1360 
1361   void visitCallBase(CallBase &CB) {
1362     for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op)
1363       if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
1364         PI.setEscaped(&CB);
1365     handleMayWrite(CB);
1366   }
1367 
1368   bool getShouldLiveOnFrame() const {
1369     if (!ShouldLiveOnFrame)
1370       ShouldLiveOnFrame = computeShouldLiveOnFrame();
1371     return ShouldLiveOnFrame.getValue();
1372   }
1373 
1374   bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
1375 
1376   DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const {
1377     assert(getShouldLiveOnFrame() && "This method should only be called if the "
1378                                      "alloca needs to live on the frame.");
1379     for (const auto &P : AliasOffetMap)
1380       if (!P.second)
1381         report_fatal_error("Unable to handle an alias with unknown offset "
1382                            "created before CoroBegin.");
1383     return AliasOffetMap;
1384   }
1385 
1386 private:
1387   const DominatorTree &DT;
1388   const CoroBeginInst &CoroBegin;
1389   const SuspendCrossingInfo &Checker;
1390   // All alias to the original AllocaInst, created before CoroBegin and used
1391   // after CoroBegin. Each entry contains the instruction and the offset in the
1392   // original Alloca. They need to be recreated after CoroBegin off the frame.
1393   DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{};
1394   SmallPtrSet<Instruction *, 4> Users{};
1395   SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
1396   bool MayWriteBeforeCoroBegin{false};
1397   bool ShouldUseLifetimeStartInfo{true};
1398 
1399   mutable llvm::Optional<bool> ShouldLiveOnFrame{};
1400 
1401   bool computeShouldLiveOnFrame() const {
1402     // If lifetime information is available, we check it first since it's
1403     // more precise. We look at every pair of lifetime.start intrinsic and
1404     // every basic block that uses the pointer to see if they cross suspension
1405     // points. The uses cover both direct uses as well as indirect uses.
1406     if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
1407       for (auto *I : Users)
1408         for (auto *S : LifetimeStarts)
1409           if (Checker.isDefinitionAcrossSuspend(*S, I))
1410             return true;
1411       return false;
1412     }
1413     // FIXME: Ideally the isEscaped check should come at the beginning.
1414     // However there are a few loose ends that need to be fixed first before
1415     // we can do that. We need to make sure we are not over-conservative, so
1416     // that the data accessed in-between await_suspend and symmetric transfer
1417     // is always put on the stack, and also data accessed after coro.end is
1418     // always put on the stack (esp the return object). To fix that, we need
1419     // to:
1420     //  1) Potentially treat sret as nocapture in calls
1421     //  2) Special handle the return object and put it on the stack
1422     //  3) Utilize lifetime.end intrinsic
1423     if (PI.isEscaped())
1424       return true;
1425 
1426     for (auto *U1 : Users)
1427       for (auto *U2 : Users)
1428         if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1429           return true;
1430 
1431     return false;
1432   }
1433 
1434   void handleMayWrite(const Instruction &I) {
1435     if (!DT.dominates(&CoroBegin, &I))
1436       MayWriteBeforeCoroBegin = true;
1437   }
1438 
1439   bool usedAfterCoroBegin(Instruction &I) {
1440     for (auto &U : I.uses())
1441       if (DT.dominates(&CoroBegin, U))
1442         return true;
1443     return false;
1444   }
1445 
1446   void handleAlias(Instruction &I) {
1447     // We track all aliases created prior to CoroBegin but used after.
1448     // These aliases may need to be recreated after CoroBegin if the alloca
1449     // need to live on the frame.
1450     if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
1451       return;
1452 
1453     if (!IsOffsetKnown) {
1454       AliasOffetMap[&I].reset();
1455     } else {
1456       auto Itr = AliasOffetMap.find(&I);
1457       if (Itr == AliasOffetMap.end()) {
1458         AliasOffetMap[&I] = Offset;
1459       } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) {
1460         // If we have seen two different possible values for this alias, we set
1461         // it to empty.
1462         AliasOffetMap[&I].reset();
1463       }
1464     }
1465   }
1466 };
1467 } // namespace
1468 
1469 // We need to make room to insert a spill after initial PHIs, but before
1470 // catchswitch instruction. Placing it before violates the requirement that
1471 // catchswitch, like all other EHPads must be the first nonPHI in a block.
1472 //
1473 // Split away catchswitch into a separate block and insert in its place:
1474 //
1475 //   cleanuppad <InsertPt> cleanupret.
1476 //
1477 // cleanupret instruction will act as an insert point for the spill.
1478 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
1479   BasicBlock *CurrentBlock = CatchSwitch->getParent();
1480   BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
1481   CurrentBlock->getTerminator()->eraseFromParent();
1482 
1483   auto *CleanupPad =
1484       CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
1485   auto *CleanupRet =
1486       CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
1487   return CleanupRet;
1488 }
1489 
1490 static void createFramePtr(coro::Shape &Shape) {
1491   auto *CB = Shape.CoroBegin;
1492   IRBuilder<> Builder(CB->getNextNode());
1493   StructType *FrameTy = Shape.FrameTy;
1494   PointerType *FramePtrTy = FrameTy->getPointerTo();
1495   Shape.FramePtr =
1496       cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
1497 }
1498 
1499 // Replace all alloca and SSA values that are accessed across suspend points
1500 // with GetElementPointer from coroutine frame + loads and stores. Create an
1501 // AllocaSpillBB that will become the new entry block for the resume parts of
1502 // the coroutine:
1503 //
1504 //    %hdl = coro.begin(...)
1505 //    whatever
1506 //
1507 // becomes:
1508 //
1509 //    %hdl = coro.begin(...)
1510 //    %FramePtr = bitcast i8* hdl to %f.frame*
1511 //    br label %AllocaSpillBB
1512 //
1513 //  AllocaSpillBB:
1514 //    ; geps corresponding to allocas that were moved to coroutine frame
1515 //    br label PostSpill
1516 //
1517 //  PostSpill:
1518 //    whatever
1519 //
1520 //
1521 static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
1522   auto *CB = Shape.CoroBegin;
1523   LLVMContext &C = CB->getContext();
1524   IRBuilder<> Builder(C);
1525   StructType *FrameTy = Shape.FrameTy;
1526   Value *FramePtr = Shape.FramePtr;
1527   DominatorTree DT(*CB->getFunction());
1528   SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
1529 
1530   // Create a GEP with the given index into the coroutine frame for the original
1531   // value Orig. Appends an extra 0 index for array-allocas, preserving the
1532   // original type.
1533   auto GetFramePointer = [&](Value *Orig) -> Value * {
1534     FieldIDType Index = FrameData.getFieldIndex(Orig);
1535     SmallVector<Value *, 3> Indices = {
1536         ConstantInt::get(Type::getInt32Ty(C), 0),
1537         ConstantInt::get(Type::getInt32Ty(C), Index),
1538     };
1539 
1540     if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1541       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1542         auto Count = CI->getValue().getZExtValue();
1543         if (Count > 1) {
1544           Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
1545         }
1546       } else {
1547         report_fatal_error("Coroutines cannot handle non static allocas yet");
1548       }
1549     }
1550 
1551     auto GEP = cast<GetElementPtrInst>(
1552         Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1553     if (isa<AllocaInst>(Orig)) {
1554       // If the type of GEP is not equal to the type of AllocaInst, it implies
1555       // that the AllocaInst may be reused in the Frame slot of other
1556       // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1557       // the Frame storage.
1558       //
1559       // Note: If we change the strategy dealing with alignment, we need to refine
1560       // this casting.
1561       if (GEP->getResultElementType() != Orig->getType())
1562         return Builder.CreateBitCast(GEP, Orig->getType(),
1563                                      Orig->getName() + Twine(".cast"));
1564     }
1565     return GEP;
1566   };
1567 
1568   for (auto const &E : FrameData.Spills) {
1569     Value *Def = E.first;
1570     auto SpillAlignment = Align(FrameData.getAlign(Def));
1571     // Create a store instruction storing the value into the
1572     // coroutine frame.
1573     Instruction *InsertPt = nullptr;
1574     Type *ByValTy = nullptr;
1575     if (auto *Arg = dyn_cast<Argument>(Def)) {
1576       // For arguments, we will place the store instruction right after
1577       // the coroutine frame pointer instruction, i.e. bitcast of
1578       // coro.begin from i8* to %f.frame*.
1579       InsertPt = Shape.getInsertPtAfterFramePtr();
1580 
1581       // If we're spilling an Argument, make sure we clear 'nocapture'
1582       // from the coroutine function.
1583       Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1584 
1585       if (Arg->hasByValAttr())
1586         ByValTy = Arg->getParamByValType();
1587     } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1588       // Don't spill immediately after a suspend; splitting assumes
1589       // that the suspend will be followed by a branch.
1590       InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI();
1591     } else {
1592       auto *I = cast<Instruction>(Def);
1593       if (!DT.dominates(CB, I)) {
1594         // If it is not dominated by CoroBegin, then spill should be
1595         // inserted immediately after CoroFrame is computed.
1596         InsertPt = Shape.getInsertPtAfterFramePtr();
1597       } else if (auto *II = dyn_cast<InvokeInst>(I)) {
1598         // If we are spilling the result of the invoke instruction, split
1599         // the normal edge and insert the spill in the new block.
1600         auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
1601         InsertPt = NewBB->getTerminator();
1602       } else if (isa<PHINode>(I)) {
1603         // Skip the PHINodes and EH pads instructions.
1604         BasicBlock *DefBlock = I->getParent();
1605         if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
1606           InsertPt = splitBeforeCatchSwitch(CSI);
1607         else
1608           InsertPt = &*DefBlock->getFirstInsertionPt();
1609       } else {
1610         assert(!I->isTerminator() && "unexpected terminator");
1611         // For all other values, the spill is placed immediately after
1612         // the definition.
1613         InsertPt = I->getNextNode();
1614       }
1615     }
1616 
1617     auto Index = FrameData.getFieldIndex(Def);
1618     Builder.SetInsertPoint(InsertPt);
1619     auto *G = Builder.CreateConstInBoundsGEP2_32(
1620         FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1621     if (ByValTy) {
1622       // For byval arguments, we need to store the pointed value in the frame,
1623       // instead of the pointer itself.
1624       auto *Value = Builder.CreateLoad(ByValTy, Def);
1625       Builder.CreateAlignedStore(Value, G, SpillAlignment);
1626     } else {
1627       Builder.CreateAlignedStore(Def, G, SpillAlignment);
1628     }
1629 
1630     BasicBlock *CurrentBlock = nullptr;
1631     Value *CurrentReload = nullptr;
1632     for (auto *U : E.second) {
1633       // If we have not seen the use block, create a load instruction to reload
1634       // the spilled value from the coroutine frame. Populates the Value pointer
1635       // reference provided with the frame GEP.
1636       if (CurrentBlock != U->getParent()) {
1637         CurrentBlock = U->getParent();
1638         Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt());
1639 
1640         auto *GEP = GetFramePointer(E.first);
1641         GEP->setName(E.first->getName() + Twine(".reload.addr"));
1642         if (ByValTy)
1643           CurrentReload = GEP;
1644         else
1645           CurrentReload = Builder.CreateAlignedLoad(
1646               FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1647               SpillAlignment, E.first->getName() + Twine(".reload"));
1648 
1649         TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def);
1650         for (DbgDeclareInst *DDI : DIs) {
1651           bool AllowUnresolved = false;
1652           // This dbg.declare is preserved for all coro-split function
1653           // fragments. It will be unreachable in the main function, and
1654           // processed by coro::salvageDebugInfo() by CoroCloner.
1655           DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
1656               .insertDeclare(CurrentReload, DDI->getVariable(),
1657                              DDI->getExpression(), DDI->getDebugLoc(),
1658                              &*Builder.GetInsertPoint());
1659           // This dbg.declare is for the main function entry point.  It
1660           // will be deleted in all coro-split functions.
1661           coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.OptimizeFrame);
1662         }
1663       }
1664 
1665       // Salvage debug info on any dbg.addr that we see. We do not insert them
1666       // into each block where we have a use though.
1667       if (auto *DI = dyn_cast<DbgAddrIntrinsic>(U)) {
1668         coro::salvageDebugInfo(DbgPtrAllocaCache, DI, Shape.OptimizeFrame);
1669       }
1670 
1671       // If we have a single edge PHINode, remove it and replace it with a
1672       // reload from the coroutine frame. (We already took care of multi edge
1673       // PHINodes by rewriting them in the rewritePHIs function).
1674       if (auto *PN = dyn_cast<PHINode>(U)) {
1675         assert(PN->getNumIncomingValues() == 1 &&
1676                "unexpected number of incoming "
1677                "values in the PHINode");
1678         PN->replaceAllUsesWith(CurrentReload);
1679         PN->eraseFromParent();
1680         continue;
1681       }
1682 
1683       // Replace all uses of CurrentValue in the current instruction with
1684       // reload.
1685       U->replaceUsesOfWith(Def, CurrentReload);
1686     }
1687   }
1688 
1689   BasicBlock *FramePtrBB = Shape.getInsertPtAfterFramePtr()->getParent();
1690 
1691   auto SpillBlock = FramePtrBB->splitBasicBlock(
1692       Shape.getInsertPtAfterFramePtr(), "AllocaSpillBB");
1693   SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1694   Shape.AllocaSpillBlock = SpillBlock;
1695 
1696   // retcon and retcon.once lowering assumes all uses have been sunk.
1697   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1698       Shape.ABI == coro::ABI::Async) {
1699     // If we found any allocas, replace all of their remaining uses with Geps.
1700     Builder.SetInsertPoint(&SpillBlock->front());
1701     for (const auto &P : FrameData.Allocas) {
1702       AllocaInst *Alloca = P.Alloca;
1703       auto *G = GetFramePointer(Alloca);
1704 
1705       // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1706       // here, as we are changing location of the instruction.
1707       G->takeName(Alloca);
1708       Alloca->replaceAllUsesWith(G);
1709       Alloca->eraseFromParent();
1710     }
1711     return;
1712   }
1713 
1714   // If we found any alloca, replace all of their remaining uses with GEP
1715   // instructions. To remain debugbility, we replace the uses of allocas for
1716   // dbg.declares and dbg.values with the reload from the frame.
1717   // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1718   // as some of the uses may not be dominated by CoroBegin.
1719   Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
1720   SmallVector<Instruction *, 4> UsersToUpdate;
1721   for (const auto &A : FrameData.Allocas) {
1722     AllocaInst *Alloca = A.Alloca;
1723     UsersToUpdate.clear();
1724     for (User *U : Alloca->users()) {
1725       auto *I = cast<Instruction>(U);
1726       if (DT.dominates(CB, I))
1727         UsersToUpdate.push_back(I);
1728     }
1729     if (UsersToUpdate.empty())
1730       continue;
1731     auto *G = GetFramePointer(Alloca);
1732     G->setName(Alloca->getName() + Twine(".reload.addr"));
1733 
1734     SmallVector<DbgVariableIntrinsic *, 4> DIs;
1735     findDbgUsers(DIs, Alloca);
1736     for (auto *DVI : DIs)
1737       DVI->replaceUsesOfWith(Alloca, G);
1738 
1739     for (Instruction *I : UsersToUpdate)
1740       I->replaceUsesOfWith(Alloca, G);
1741   }
1742   Builder.SetInsertPoint(Shape.getInsertPtAfterFramePtr());
1743   for (const auto &A : FrameData.Allocas) {
1744     AllocaInst *Alloca = A.Alloca;
1745     if (A.MayWriteBeforeCoroBegin) {
1746       // isEscaped really means potentially modified before CoroBegin.
1747       if (Alloca->isArrayAllocation())
1748         report_fatal_error(
1749             "Coroutines cannot handle copying of array allocas yet");
1750 
1751       auto *G = GetFramePointer(Alloca);
1752       auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
1753       Builder.CreateStore(Value, G);
1754     }
1755     // For each alias to Alloca created before CoroBegin but used after
1756     // CoroBegin, we recreate them after CoroBegin by appplying the offset
1757     // to the pointer in the frame.
1758     for (const auto &Alias : A.Aliases) {
1759       auto *FramePtr = GetFramePointer(Alloca);
1760       auto *FramePtrRaw =
1761           Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C));
1762       auto *AliasPtr = Builder.CreateGEP(
1763           Type::getInt8Ty(C), FramePtrRaw,
1764           ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue()));
1765       auto *AliasPtrTyped =
1766           Builder.CreateBitCast(AliasPtr, Alias.first->getType());
1767       Alias.first->replaceUsesWithIf(
1768           AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); });
1769     }
1770   }
1771 }
1772 
1773 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1774 // PHI in InsertedBB.
1775 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB,
1776                                          BasicBlock *InsertedBB,
1777                                          BasicBlock *PredBB,
1778                                          PHINode *UntilPHI = nullptr) {
1779   auto *PN = cast<PHINode>(&SuccBB->front());
1780   do {
1781     int Index = PN->getBasicBlockIndex(InsertedBB);
1782     Value *V = PN->getIncomingValue(Index);
1783     PHINode *InputV = PHINode::Create(
1784         V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(),
1785         &InsertedBB->front());
1786     InputV->addIncoming(V, PredBB);
1787     PN->setIncomingValue(Index, InputV);
1788     PN = dyn_cast<PHINode>(PN->getNextNode());
1789   } while (PN != UntilPHI);
1790 }
1791 
1792 // Rewrites the PHI Nodes in a cleanuppad.
1793 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1794                                      CleanupPadInst *CleanupPad) {
1795   // For every incoming edge to a CleanupPad we will create a new block holding
1796   // all incoming values in single-value PHI nodes. We will then create another
1797   // block to act as a dispather (as all unwind edges for related EH blocks
1798   // must be the same).
1799   //
1800   // cleanuppad:
1801   //    %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1802   //    %3 = cleanuppad within none []
1803   //
1804   // It will create:
1805   //
1806   // cleanuppad.corodispatch
1807   //    %2 = phi i8[0, %catchswitch], [1, %catch.1]
1808   //    %3 = cleanuppad within none []
1809   //    switch i8 % 2, label %unreachable
1810   //            [i8 0, label %cleanuppad.from.catchswitch
1811   //             i8 1, label %cleanuppad.from.catch.1]
1812   // cleanuppad.from.catchswitch:
1813   //    %4 = phi i32 [%0, %catchswitch]
1814   //    br %label cleanuppad
1815   // cleanuppad.from.catch.1:
1816   //    %6 = phi i32 [%1, %catch.1]
1817   //    br %label cleanuppad
1818   // cleanuppad:
1819   //    %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1820   //                 [%6, %cleanuppad.from.catch.1]
1821 
1822   // Unreachable BB, in case switching on an invalid value in the dispatcher.
1823   auto *UnreachBB = BasicBlock::Create(
1824       CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
1825   IRBuilder<> Builder(UnreachBB);
1826   Builder.CreateUnreachable();
1827 
1828   // Create a new cleanuppad which will be the dispatcher.
1829   auto *NewCleanupPadBB =
1830       BasicBlock::Create(CleanupPadBB->getContext(),
1831                          CleanupPadBB->getName() + Twine(".corodispatch"),
1832                          CleanupPadBB->getParent(), CleanupPadBB);
1833   Builder.SetInsertPoint(NewCleanupPadBB);
1834   auto *SwitchType = Builder.getInt8Ty();
1835   auto *SetDispatchValuePN =
1836       Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
1837   CleanupPad->removeFromParent();
1838   CleanupPad->insertAfter(SetDispatchValuePN);
1839   auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1840                                                 pred_size(CleanupPadBB));
1841 
1842   int SwitchIndex = 0;
1843   SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
1844   for (BasicBlock *Pred : Preds) {
1845     // Create a new cleanuppad and move the PHI values to there.
1846     auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
1847                                       CleanupPadBB->getName() +
1848                                           Twine(".from.") + Pred->getName(),
1849                                       CleanupPadBB->getParent(), CleanupPadBB);
1850     updatePhiNodes(CleanupPadBB, Pred, CaseBB);
1851     CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1852                     Pred->getName());
1853     Builder.SetInsertPoint(CaseBB);
1854     Builder.CreateBr(CleanupPadBB);
1855     movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
1856 
1857     // Update this Pred to the new unwind point.
1858     setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1859 
1860     // Setup the switch in the dispatcher.
1861     auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1862     SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1863     SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1864     SwitchIndex++;
1865   }
1866 }
1867 
1868 static void cleanupSinglePredPHIs(Function &F) {
1869   SmallVector<PHINode *, 32> Worklist;
1870   for (auto &BB : F) {
1871     for (auto &Phi : BB.phis()) {
1872       if (Phi.getNumIncomingValues() == 1) {
1873         Worklist.push_back(&Phi);
1874       } else
1875         break;
1876     }
1877   }
1878   while (!Worklist.empty()) {
1879     auto *Phi = Worklist.pop_back_val();
1880     auto *OriginalValue = Phi->getIncomingValue(0);
1881     Phi->replaceAllUsesWith(OriginalValue);
1882   }
1883 }
1884 
1885 static void rewritePHIs(BasicBlock &BB) {
1886   // For every incoming edge we will create a block holding all
1887   // incoming values in a single PHI nodes.
1888   //
1889   // loop:
1890   //    %n.val = phi i32[%n, %entry], [%inc, %loop]
1891   //
1892   // It will create:
1893   //
1894   // loop.from.entry:
1895   //    %n.loop.pre = phi i32 [%n, %entry]
1896   //    br %label loop
1897   // loop.from.loop:
1898   //    %inc.loop.pre = phi i32 [%inc, %loop]
1899   //    br %label loop
1900   //
1901   // After this rewrite, further analysis will ignore any phi nodes with more
1902   // than one incoming edge.
1903 
1904   // TODO: Simplify PHINodes in the basic block to remove duplicate
1905   // predecessors.
1906 
1907   // Special case for CleanupPad: all EH blocks must have the same unwind edge
1908   // so we need to create an additional "dispatcher" block.
1909   if (auto *CleanupPad =
1910           dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
1911     SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1912     for (BasicBlock *Pred : Preds) {
1913       if (CatchSwitchInst *CS =
1914               dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
1915         // CleanupPad with a CatchSwitch predecessor: therefore this is an
1916         // unwind destination that needs to be handle specially.
1917         assert(CS->getUnwindDest() == &BB);
1918         (void)CS;
1919         rewritePHIsForCleanupPad(&BB, CleanupPad);
1920         return;
1921       }
1922     }
1923   }
1924 
1925   LandingPadInst *LandingPad = nullptr;
1926   PHINode *ReplPHI = nullptr;
1927   if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
1928     // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1929     // We replace the original landing pad with a PHINode that will collect the
1930     // results from all of them.
1931     ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
1932     ReplPHI->takeName(LandingPad);
1933     LandingPad->replaceAllUsesWith(ReplPHI);
1934     // We will erase the original landing pad at the end of this function after
1935     // ehAwareSplitEdge cloned it in the transition blocks.
1936   }
1937 
1938   SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1939   for (BasicBlock *Pred : Preds) {
1940     auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
1941     IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1942 
1943     // Stop the moving of values at ReplPHI, as this is either null or the PHI
1944     // that replaced the landing pad.
1945     movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
1946   }
1947 
1948   if (LandingPad) {
1949     // Calls to ehAwareSplitEdge function cloned the original lading pad.
1950     // No longer need it.
1951     LandingPad->eraseFromParent();
1952   }
1953 }
1954 
1955 static void rewritePHIs(Function &F) {
1956   SmallVector<BasicBlock *, 8> WorkList;
1957 
1958   for (BasicBlock &BB : F)
1959     if (auto *PN = dyn_cast<PHINode>(&BB.front()))
1960       if (PN->getNumIncomingValues() > 1)
1961         WorkList.push_back(&BB);
1962 
1963   for (BasicBlock *BB : WorkList)
1964     rewritePHIs(*BB);
1965 }
1966 
1967 // Check for instructions that we can recreate on resume as opposed to spill
1968 // the result into a coroutine frame.
1969 static bool materializable(Instruction &V) {
1970   return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
1971          isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
1972 }
1973 
1974 // Check for structural coroutine intrinsics that should not be spilled into
1975 // the coroutine frame.
1976 static bool isCoroutineStructureIntrinsic(Instruction &I) {
1977   return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
1978          isa<CoroSuspendInst>(&I);
1979 }
1980 
1981 // For every use of the value that is across suspend point, recreate that value
1982 // after a suspend point.
1983 static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
1984                                               const SpillInfo &Spills) {
1985   for (const auto &E : Spills) {
1986     Value *Def = E.first;
1987     BasicBlock *CurrentBlock = nullptr;
1988     Instruction *CurrentMaterialization = nullptr;
1989     for (Instruction *U : E.second) {
1990       // If we have not seen this block, materialize the value.
1991       if (CurrentBlock != U->getParent()) {
1992 
1993         bool IsInCoroSuspendBlock = isa<AnyCoroSuspendInst>(U);
1994         CurrentBlock = U->getParent();
1995         auto *InsertBlock = IsInCoroSuspendBlock
1996                                 ? CurrentBlock->getSinglePredecessor()
1997                                 : CurrentBlock;
1998         CurrentMaterialization = cast<Instruction>(Def)->clone();
1999         CurrentMaterialization->setName(Def->getName());
2000         CurrentMaterialization->insertBefore(
2001             IsInCoroSuspendBlock ? InsertBlock->getTerminator()
2002                                  : &*InsertBlock->getFirstInsertionPt());
2003       }
2004       if (auto *PN = dyn_cast<PHINode>(U)) {
2005         assert(PN->getNumIncomingValues() == 1 &&
2006                "unexpected number of incoming "
2007                "values in the PHINode");
2008         PN->replaceAllUsesWith(CurrentMaterialization);
2009         PN->eraseFromParent();
2010         continue;
2011       }
2012       // Replace all uses of Def in the current instruction with the
2013       // CurrentMaterialization for the block.
2014       U->replaceUsesOfWith(Def, CurrentMaterialization);
2015     }
2016   }
2017 }
2018 
2019 // Splits the block at a particular instruction unless it is the first
2020 // instruction in the block with a single predecessor.
2021 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
2022   auto *BB = I->getParent();
2023   if (&BB->front() == I) {
2024     if (BB->getSinglePredecessor()) {
2025       BB->setName(Name);
2026       return BB;
2027     }
2028   }
2029   return BB->splitBasicBlock(I, Name);
2030 }
2031 
2032 // Split above and below a particular instruction so that it
2033 // will be all alone by itself in a block.
2034 static void splitAround(Instruction *I, const Twine &Name) {
2035   splitBlockIfNotFirst(I, Name);
2036   splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
2037 }
2038 
2039 static bool isSuspendBlock(BasicBlock *BB) {
2040   return isa<AnyCoroSuspendInst>(BB->front());
2041 }
2042 
2043 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet;
2044 
2045 /// Does control flow starting at the given block ever reach a suspend
2046 /// instruction before reaching a block in VisitedOrFreeBBs?
2047 static bool isSuspendReachableFrom(BasicBlock *From,
2048                                    VisitedBlocksSet &VisitedOrFreeBBs) {
2049   // Eagerly try to add this block to the visited set.  If it's already
2050   // there, stop recursing; this path doesn't reach a suspend before
2051   // either looping or reaching a freeing block.
2052   if (!VisitedOrFreeBBs.insert(From).second)
2053     return false;
2054 
2055   // We assume that we'll already have split suspends into their own blocks.
2056   if (isSuspendBlock(From))
2057     return true;
2058 
2059   // Recurse on the successors.
2060   for (auto Succ : successors(From)) {
2061     if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
2062       return true;
2063   }
2064 
2065   return false;
2066 }
2067 
2068 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a
2069 /// suspend point?
2070 static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
2071   // Seed the visited set with all the basic blocks containing a free
2072   // so that we won't pass them up.
2073   VisitedBlocksSet VisitedOrFreeBBs;
2074   for (auto User : AI->users()) {
2075     if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
2076       VisitedOrFreeBBs.insert(FI->getParent());
2077   }
2078 
2079   return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
2080 }
2081 
2082 /// After we split the coroutine, will the given basic block be along
2083 /// an obvious exit path for the resumption function?
2084 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
2085                                               unsigned depth = 3) {
2086   // If we've bottomed out our depth count, stop searching and assume
2087   // that the path might loop back.
2088   if (depth == 0) return false;
2089 
2090   // If this is a suspend block, we're about to exit the resumption function.
2091   if (isSuspendBlock(BB)) return true;
2092 
2093   // Recurse into the successors.
2094   for (auto Succ : successors(BB)) {
2095     if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
2096       return false;
2097   }
2098 
2099   // If none of the successors leads back in a loop, we're on an exit/abort.
2100   return true;
2101 }
2102 
2103 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
2104   // Look for a free that isn't sufficiently obviously followed by
2105   // either a suspend or a termination, i.e. something that will leave
2106   // the coro resumption frame.
2107   for (auto U : AI->users()) {
2108     auto FI = dyn_cast<CoroAllocaFreeInst>(U);
2109     if (!FI) continue;
2110 
2111     if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
2112       return true;
2113   }
2114 
2115   // If we never found one, we don't need a stack save.
2116   return false;
2117 }
2118 
2119 /// Turn each of the given local allocas into a normal (dynamic) alloca
2120 /// instruction.
2121 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
2122                               SmallVectorImpl<Instruction*> &DeadInsts) {
2123   for (auto AI : LocalAllocas) {
2124     auto M = AI->getModule();
2125     IRBuilder<> Builder(AI);
2126 
2127     // Save the stack depth.  Try to avoid doing this if the stackrestore
2128     // is going to immediately precede a return or something.
2129     Value *StackSave = nullptr;
2130     if (localAllocaNeedsStackSave(AI))
2131       StackSave = Builder.CreateCall(
2132                             Intrinsic::getDeclaration(M, Intrinsic::stacksave));
2133 
2134     // Allocate memory.
2135     auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
2136     Alloca->setAlignment(Align(AI->getAlignment()));
2137 
2138     for (auto U : AI->users()) {
2139       // Replace gets with the allocation.
2140       if (isa<CoroAllocaGetInst>(U)) {
2141         U->replaceAllUsesWith(Alloca);
2142 
2143       // Replace frees with stackrestores.  This is safe because
2144       // alloca.alloc is required to obey a stack discipline, although we
2145       // don't enforce that structurally.
2146       } else {
2147         auto FI = cast<CoroAllocaFreeInst>(U);
2148         if (StackSave) {
2149           Builder.SetInsertPoint(FI);
2150           Builder.CreateCall(
2151                     Intrinsic::getDeclaration(M, Intrinsic::stackrestore),
2152                              StackSave);
2153         }
2154       }
2155       DeadInsts.push_back(cast<Instruction>(U));
2156     }
2157 
2158     DeadInsts.push_back(AI);
2159   }
2160 }
2161 
2162 /// Turn the given coro.alloca.alloc call into a dynamic allocation.
2163 /// This happens during the all-instructions iteration, so it must not
2164 /// delete the call.
2165 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI,
2166                                         coro::Shape &Shape,
2167                                    SmallVectorImpl<Instruction*> &DeadInsts) {
2168   IRBuilder<> Builder(AI);
2169   auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
2170 
2171   for (User *U : AI->users()) {
2172     if (isa<CoroAllocaGetInst>(U)) {
2173       U->replaceAllUsesWith(Alloc);
2174     } else {
2175       auto FI = cast<CoroAllocaFreeInst>(U);
2176       Builder.SetInsertPoint(FI);
2177       Shape.emitDealloc(Builder, Alloc, nullptr);
2178     }
2179     DeadInsts.push_back(cast<Instruction>(U));
2180   }
2181 
2182   // Push this on last so that it gets deleted after all the others.
2183   DeadInsts.push_back(AI);
2184 
2185   // Return the new allocation value so that we can check for needed spills.
2186   return cast<Instruction>(Alloc);
2187 }
2188 
2189 /// Get the current swifterror value.
2190 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
2191                                      coro::Shape &Shape) {
2192   // Make a fake function pointer as a sort of intrinsic.
2193   auto FnTy = FunctionType::get(ValueTy, {}, false);
2194   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2195 
2196   auto Call = Builder.CreateCall(FnTy, Fn, {});
2197   Shape.SwiftErrorOps.push_back(Call);
2198 
2199   return Call;
2200 }
2201 
2202 /// Set the given value as the current swifterror value.
2203 ///
2204 /// Returns a slot that can be used as a swifterror slot.
2205 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
2206                                      coro::Shape &Shape) {
2207   // Make a fake function pointer as a sort of intrinsic.
2208   auto FnTy = FunctionType::get(V->getType()->getPointerTo(),
2209                                 {V->getType()}, false);
2210   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2211 
2212   auto Call = Builder.CreateCall(FnTy, Fn, { V });
2213   Shape.SwiftErrorOps.push_back(Call);
2214 
2215   return Call;
2216 }
2217 
2218 /// Set the swifterror value from the given alloca before a call,
2219 /// then put in back in the alloca afterwards.
2220 ///
2221 /// Returns an address that will stand in for the swifterror slot
2222 /// until splitting.
2223 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
2224                                                  AllocaInst *Alloca,
2225                                                  coro::Shape &Shape) {
2226   auto ValueTy = Alloca->getAllocatedType();
2227   IRBuilder<> Builder(Call);
2228 
2229   // Load the current value from the alloca and set it as the
2230   // swifterror value.
2231   auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
2232   auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
2233 
2234   // Move to after the call.  Since swifterror only has a guaranteed
2235   // value on normal exits, we can ignore implicit and explicit unwind
2236   // edges.
2237   if (isa<CallInst>(Call)) {
2238     Builder.SetInsertPoint(Call->getNextNode());
2239   } else {
2240     auto Invoke = cast<InvokeInst>(Call);
2241     Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
2242   }
2243 
2244   // Get the current swifterror value and store it to the alloca.
2245   auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
2246   Builder.CreateStore(ValueAfterCall, Alloca);
2247 
2248   return Addr;
2249 }
2250 
2251 /// Eliminate a formerly-swifterror alloca by inserting the get/set
2252 /// intrinsics and attempting to MemToReg the alloca away.
2253 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
2254                                       coro::Shape &Shape) {
2255   for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) {
2256     // swifterror values can only be used in very specific ways.
2257     // We take advantage of that here.
2258     auto User = Use.getUser();
2259     if (isa<LoadInst>(User) || isa<StoreInst>(User))
2260       continue;
2261 
2262     assert(isa<CallInst>(User) || isa<InvokeInst>(User));
2263     auto Call = cast<Instruction>(User);
2264 
2265     auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
2266 
2267     // Use the returned slot address as the call argument.
2268     Use.set(Addr);
2269   }
2270 
2271   // All the uses should be loads and stores now.
2272   assert(isAllocaPromotable(Alloca));
2273 }
2274 
2275 /// "Eliminate" a swifterror argument by reducing it to the alloca case
2276 /// and then loading and storing in the prologue and epilog.
2277 ///
2278 /// The argument keeps the swifterror flag.
2279 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
2280                                         coro::Shape &Shape,
2281                              SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
2282   IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
2283 
2284   auto ArgTy = cast<PointerType>(Arg.getType());
2285   auto ValueTy = ArgTy->getPointerElementType();
2286 
2287   // Reduce to the alloca case:
2288 
2289   // Create an alloca and replace all uses of the arg with it.
2290   auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
2291   Arg.replaceAllUsesWith(Alloca);
2292 
2293   // Set an initial value in the alloca.  swifterror is always null on entry.
2294   auto InitialValue = Constant::getNullValue(ValueTy);
2295   Builder.CreateStore(InitialValue, Alloca);
2296 
2297   // Find all the suspends in the function and save and restore around them.
2298   for (auto Suspend : Shape.CoroSuspends) {
2299     (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
2300   }
2301 
2302   // Find all the coro.ends in the function and restore the error value.
2303   for (auto End : Shape.CoroEnds) {
2304     Builder.SetInsertPoint(End);
2305     auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
2306     (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
2307   }
2308 
2309   // Now we can use the alloca logic.
2310   AllocasToPromote.push_back(Alloca);
2311   eliminateSwiftErrorAlloca(F, Alloca, Shape);
2312 }
2313 
2314 /// Eliminate all problematic uses of swifterror arguments and allocas
2315 /// from the function.  We'll fix them up later when splitting the function.
2316 static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
2317   SmallVector<AllocaInst*, 4> AllocasToPromote;
2318 
2319   // Look for a swifterror argument.
2320   for (auto &Arg : F.args()) {
2321     if (!Arg.hasSwiftErrorAttr()) continue;
2322 
2323     eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
2324     break;
2325   }
2326 
2327   // Look for swifterror allocas.
2328   for (auto &Inst : F.getEntryBlock()) {
2329     auto Alloca = dyn_cast<AllocaInst>(&Inst);
2330     if (!Alloca || !Alloca->isSwiftError()) continue;
2331 
2332     // Clear the swifterror flag.
2333     Alloca->setSwiftError(false);
2334 
2335     AllocasToPromote.push_back(Alloca);
2336     eliminateSwiftErrorAlloca(F, Alloca, Shape);
2337   }
2338 
2339   // If we have any allocas to promote, compute a dominator tree and
2340   // promote them en masse.
2341   if (!AllocasToPromote.empty()) {
2342     DominatorTree DT(F);
2343     PromoteMemToReg(AllocasToPromote, DT);
2344   }
2345 }
2346 
2347 /// retcon and retcon.once conventions assume that all spill uses can be sunk
2348 /// after the coro.begin intrinsic.
2349 static void sinkSpillUsesAfterCoroBegin(Function &F,
2350                                         const FrameDataInfo &FrameData,
2351                                         CoroBeginInst *CoroBegin) {
2352   DominatorTree Dom(F);
2353 
2354   SmallSetVector<Instruction *, 32> ToMove;
2355   SmallVector<Instruction *, 32> Worklist;
2356 
2357   // Collect all users that precede coro.begin.
2358   for (auto *Def : FrameData.getAllDefs()) {
2359     for (User *U : Def->users()) {
2360       auto Inst = cast<Instruction>(U);
2361       if (Inst->getParent() != CoroBegin->getParent() ||
2362           Dom.dominates(CoroBegin, Inst))
2363         continue;
2364       if (ToMove.insert(Inst))
2365         Worklist.push_back(Inst);
2366     }
2367   }
2368   // Recursively collect users before coro.begin.
2369   while (!Worklist.empty()) {
2370     auto *Def = Worklist.pop_back_val();
2371     for (User *U : Def->users()) {
2372       auto Inst = cast<Instruction>(U);
2373       if (Dom.dominates(CoroBegin, Inst))
2374         continue;
2375       if (ToMove.insert(Inst))
2376         Worklist.push_back(Inst);
2377     }
2378   }
2379 
2380   // Sort by dominance.
2381   SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
2382   llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
2383     // If a dominates b it should preceed (<) b.
2384     return Dom.dominates(A, B);
2385   });
2386 
2387   Instruction *InsertPt = CoroBegin->getNextNode();
2388   for (Instruction *Inst : InsertionList)
2389     Inst->moveBefore(InsertPt);
2390 }
2391 
2392 /// For each local variable that all of its user are only used inside one of
2393 /// suspended region, we sink their lifetime.start markers to the place where
2394 /// after the suspend block. Doing so minimizes the lifetime of each variable,
2395 /// hence minimizing the amount of data we end up putting on the frame.
2396 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
2397                                      SuspendCrossingInfo &Checker) {
2398   DominatorTree DT(F);
2399 
2400   // Collect all possible basic blocks which may dominate all uses of allocas.
2401   SmallPtrSet<BasicBlock *, 4> DomSet;
2402   DomSet.insert(&F.getEntryBlock());
2403   for (auto *CSI : Shape.CoroSuspends) {
2404     BasicBlock *SuspendBlock = CSI->getParent();
2405     assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
2406            "should have split coro.suspend into its own block");
2407     DomSet.insert(SuspendBlock->getSingleSuccessor());
2408   }
2409 
2410   for (Instruction &I : instructions(F)) {
2411     AllocaInst* AI = dyn_cast<AllocaInst>(&I);
2412     if (!AI)
2413       continue;
2414 
2415     for (BasicBlock *DomBB : DomSet) {
2416       bool Valid = true;
2417       SmallVector<Instruction *, 1> Lifetimes;
2418 
2419       auto isLifetimeStart = [](Instruction* I) {
2420         if (auto* II = dyn_cast<IntrinsicInst>(I))
2421           return II->getIntrinsicID() == Intrinsic::lifetime_start;
2422         return false;
2423       };
2424 
2425       auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
2426         if (isLifetimeStart(U)) {
2427           Lifetimes.push_back(U);
2428           return true;
2429         }
2430         if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2431           return false;
2432         if (isLifetimeStart(U->user_back())) {
2433           Lifetimes.push_back(U->user_back());
2434           return true;
2435         }
2436         return false;
2437       };
2438 
2439       for (User *U : AI->users()) {
2440         Instruction *UI = cast<Instruction>(U);
2441         // For all users except lifetime.start markers, if they are all
2442         // dominated by one of the basic blocks and do not cross
2443         // suspend points as well, then there is no need to spill the
2444         // instruction.
2445         if (!DT.dominates(DomBB, UI->getParent()) ||
2446             Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2447           // Skip lifetime.start, GEP and bitcast used by lifetime.start
2448           // markers.
2449           if (collectLifetimeStart(UI, AI))
2450             continue;
2451           Valid = false;
2452           break;
2453         }
2454       }
2455       // Sink lifetime.start markers to dominate block when they are
2456       // only used outside the region.
2457       if (Valid && Lifetimes.size() != 0) {
2458         // May be AI itself, when the type of AI is i8*
2459         auto *NewBitCast = [&](AllocaInst *AI) -> Value* {
2460           if (isa<AllocaInst>(Lifetimes[0]->getOperand(1)))
2461             return AI;
2462           auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext());
2463           return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "",
2464                                   DomBB->getTerminator());
2465         }(AI);
2466 
2467         auto *NewLifetime = Lifetimes[0]->clone();
2468         NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast);
2469         NewLifetime->insertBefore(DomBB->getTerminator());
2470 
2471         // All the outsided lifetime.start markers are no longer necessary.
2472         for (Instruction *S : Lifetimes)
2473           S->eraseFromParent();
2474 
2475         break;
2476       }
2477     }
2478   }
2479 }
2480 
2481 static void collectFrameAllocas(Function &F, coro::Shape &Shape,
2482                                 const SuspendCrossingInfo &Checker,
2483                                 SmallVectorImpl<AllocaInfo> &Allocas) {
2484   for (Instruction &I : instructions(F)) {
2485     auto *AI = dyn_cast<AllocaInst>(&I);
2486     if (!AI)
2487       continue;
2488     // The PromiseAlloca will be specially handled since it needs to be in a
2489     // fixed position in the frame.
2490     if (AI == Shape.SwitchLowering.PromiseAlloca) {
2491       continue;
2492     }
2493     DominatorTree DT(F);
2494     // The code that uses lifetime.start intrinsic does not work for functions
2495     // with loops without exit. Disable it on ABIs we know to generate such
2496     // code.
2497     bool ShouldUseLifetimeStartInfo =
2498         (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2499          Shape.ABI != coro::ABI::RetconOnce);
2500     AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT,
2501                              *Shape.CoroBegin, Checker,
2502                              ShouldUseLifetimeStartInfo};
2503     Visitor.visitPtr(*AI);
2504     if (!Visitor.getShouldLiveOnFrame())
2505       continue;
2506     Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
2507                          Visitor.getMayWriteBeforeCoroBegin());
2508   }
2509 }
2510 
2511 void coro::salvageDebugInfo(
2512     SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
2513     DbgVariableIntrinsic *DVI, bool OptimizeFrame) {
2514   Function *F = DVI->getFunction();
2515   IRBuilder<> Builder(F->getContext());
2516   auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
2517   while (isa<IntrinsicInst>(InsertPt))
2518     ++InsertPt;
2519   Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
2520   DIExpression *Expr = DVI->getExpression();
2521   // Follow the pointer arithmetic all the way to the incoming
2522   // function argument and convert into a DIExpression.
2523   bool SkipOutermostLoad = !isa<DbgValueInst>(DVI);
2524   Value *Storage = DVI->getVariableLocationOp(0);
2525   Value *OriginalStorage = Storage;
2526   while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
2527     if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
2528       Storage = LdInst->getOperand(0);
2529       // FIXME: This is a heuristic that works around the fact that
2530       // LLVM IR debug intrinsics cannot yet distinguish between
2531       // memory and value locations: Because a dbg.declare(alloca) is
2532       // implicitly a memory location no DW_OP_deref operation for the
2533       // last direct load from an alloca is necessary.  This condition
2534       // effectively drops the *last* DW_OP_deref in the expression.
2535       if (!SkipOutermostLoad)
2536         Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2537     } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) {
2538       Storage = StInst->getOperand(0);
2539     } else {
2540       SmallVector<uint64_t, 16> Ops;
2541       SmallVector<Value *, 0> AdditionalValues;
2542       Value *Op = llvm::salvageDebugInfoImpl(
2543           *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops,
2544           AdditionalValues);
2545       if (!Op || !AdditionalValues.empty()) {
2546         // If salvaging failed or salvaging produced more than one location
2547         // operand, give up.
2548         break;
2549       }
2550       Storage = Op;
2551       Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false);
2552     }
2553     SkipOutermostLoad = false;
2554   }
2555   if (!Storage)
2556     return;
2557 
2558   // Store a pointer to the coroutine frame object in an alloca so it
2559   // is available throughout the function when producing unoptimized
2560   // code. Extending the lifetime this way is correct because the
2561   // variable has been declared by a dbg.declare intrinsic.
2562   //
2563   // Avoid to create the alloca would be eliminated by optimization
2564   // passes and the corresponding dbg.declares would be invalid.
2565   if (!OptimizeFrame && !EnableReuseStorageInFrame)
2566     if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) {
2567       auto &Cached = DbgPtrAllocaCache[Storage];
2568       if (!Cached) {
2569         Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
2570                                       Arg->getName() + ".debug");
2571         Builder.CreateStore(Storage, Cached);
2572       }
2573       Storage = Cached;
2574       // FIXME: LLVM lacks nuanced semantics to differentiate between
2575       // memory and direct locations at the IR level. The backend will
2576       // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
2577       // location. Thus, if there are deref and offset operations in the
2578       // expression, we need to add a DW_OP_deref at the *start* of the
2579       // expression to first load the contents of the alloca before
2580       // adjusting it with the expression.
2581       if (Expr && Expr->isComplex())
2582         Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2583     }
2584 
2585   DVI->replaceVariableLocationOp(OriginalStorage, Storage);
2586   DVI->setExpression(Expr);
2587   // We only hoist dbg.declare today since it doesn't make sense to hoist
2588   // dbg.value or dbg.addr since they do not have the same function wide
2589   // guarantees that dbg.declare does.
2590   if (!isa<DbgValueInst>(DVI) && !isa<DbgAddrIntrinsic>(DVI)) {
2591     if (auto *II = dyn_cast<InvokeInst>(Storage))
2592       DVI->moveBefore(II->getNormalDest()->getFirstNonPHI());
2593     else if (auto *CBI = dyn_cast<CallBrInst>(Storage))
2594       DVI->moveBefore(CBI->getDefaultDest()->getFirstNonPHI());
2595     else if (auto *InsertPt = dyn_cast<Instruction>(Storage)) {
2596       assert(!InsertPt->isTerminator() &&
2597              "Unimaged terminator that could return a storage.");
2598       DVI->moveAfter(InsertPt);
2599     } else if (isa<Argument>(Storage))
2600       DVI->moveAfter(F->getEntryBlock().getFirstNonPHI());
2601   }
2602 }
2603 
2604 void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
2605   // Don't eliminate swifterror in async functions that won't be split.
2606   if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2607     eliminateSwiftError(F, Shape);
2608 
2609   if (Shape.ABI == coro::ABI::Switch &&
2610       Shape.SwitchLowering.PromiseAlloca) {
2611     Shape.getSwitchCoroId()->clearPromise();
2612   }
2613 
2614   // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2615   // intrinsics are in their own blocks to simplify the logic of building up
2616   // SuspendCrossing data.
2617   for (auto *CSI : Shape.CoroSuspends) {
2618     if (auto *Save = CSI->getCoroSave())
2619       splitAround(Save, "CoroSave");
2620     splitAround(CSI, "CoroSuspend");
2621   }
2622 
2623   // Put CoroEnds into their own blocks.
2624   for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2625     splitAround(CE, "CoroEnd");
2626 
2627     // Emit the musttail call function in a new block before the CoroEnd.
2628     // We do this here so that the right suspend crossing info is computed for
2629     // the uses of the musttail call function call. (Arguments to the coro.end
2630     // instructions would be ignored)
2631     if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
2632       auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2633       if (!MustTailCallFn)
2634         continue;
2635       IRBuilder<> Builder(AsyncEnd);
2636       SmallVector<Value *, 8> Args(AsyncEnd->args());
2637       auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
2638       auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
2639                                       Arguments, Builder);
2640       splitAround(Call, "MustTailCall.Before.CoroEnd");
2641     }
2642   }
2643 
2644   // Later code makes structural assumptions about single predecessors phis e.g
2645   // that they are not live accross a suspend point.
2646   cleanupSinglePredPHIs(F);
2647 
2648   // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2649   // never has its definition separated from the PHI by the suspend point.
2650   rewritePHIs(F);
2651 
2652   // Build suspend crossing info.
2653   SuspendCrossingInfo Checker(F, Shape);
2654 
2655   IRBuilder<> Builder(F.getContext());
2656   FrameDataInfo FrameData;
2657   SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas;
2658   SmallVector<Instruction*, 4> DeadInstructions;
2659 
2660   {
2661     SpillInfo Spills;
2662     for (int Repeat = 0; Repeat < 4; ++Repeat) {
2663       // See if there are materializable instructions across suspend points.
2664       for (Instruction &I : instructions(F))
2665         if (materializable(I)) {
2666           for (User *U : I.users())
2667             if (Checker.isDefinitionAcrossSuspend(I, U))
2668               Spills[&I].push_back(cast<Instruction>(U));
2669 
2670           // Manually add dbg.value metadata uses of I.
2671           SmallVector<DbgValueInst *, 16> DVIs;
2672           findDbgValues(DVIs, &I);
2673           for (auto *DVI : DVIs)
2674             if (Checker.isDefinitionAcrossSuspend(I, DVI))
2675               Spills[&I].push_back(DVI);
2676         }
2677 
2678       if (Spills.empty())
2679         break;
2680 
2681       // Rewrite materializable instructions to be materialized at the use
2682       // point.
2683       LLVM_DEBUG(dumpSpills("Materializations", Spills));
2684       rewriteMaterializableInstructions(Builder, Spills);
2685       Spills.clear();
2686     }
2687   }
2688 
2689   if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2690       Shape.ABI != coro::ABI::RetconOnce)
2691     sinkLifetimeStartMarkers(F, Shape, Checker);
2692 
2693   if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2694     collectFrameAllocas(F, Shape, Checker, FrameData.Allocas);
2695   LLVM_DEBUG(dumpAllocas(FrameData.Allocas));
2696 
2697   // Collect the spills for arguments and other not-materializable values.
2698   for (Argument &A : F.args())
2699     for (User *U : A.users())
2700       if (Checker.isDefinitionAcrossSuspend(A, U))
2701         FrameData.Spills[&A].push_back(cast<Instruction>(U));
2702 
2703   for (Instruction &I : instructions(F)) {
2704     // Values returned from coroutine structure intrinsics should not be part
2705     // of the Coroutine Frame.
2706     if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
2707       continue;
2708 
2709     // The Coroutine Promise always included into coroutine frame, no need to
2710     // check for suspend crossing.
2711     if (Shape.ABI == coro::ABI::Switch &&
2712         Shape.SwitchLowering.PromiseAlloca == &I)
2713       continue;
2714 
2715     // Handle alloca.alloc specially here.
2716     if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
2717       // Check whether the alloca's lifetime is bounded by suspend points.
2718       if (isLocalAlloca(AI)) {
2719         LocalAllocas.push_back(AI);
2720         continue;
2721       }
2722 
2723       // If not, do a quick rewrite of the alloca and then add spills of
2724       // the rewritten value.  The rewrite doesn't invalidate anything in
2725       // Spills because the other alloca intrinsics have no other operands
2726       // besides AI, and it doesn't invalidate the iteration because we delay
2727       // erasing AI.
2728       auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
2729 
2730       for (User *U : Alloc->users()) {
2731         if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
2732           FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
2733       }
2734       continue;
2735     }
2736 
2737     // Ignore alloca.get; we process this as part of coro.alloca.alloc.
2738     if (isa<CoroAllocaGetInst>(I))
2739       continue;
2740 
2741     if (isa<AllocaInst>(I))
2742       continue;
2743 
2744     for (User *U : I.users())
2745       if (Checker.isDefinitionAcrossSuspend(I, U)) {
2746         // We cannot spill a token.
2747         if (I.getType()->isTokenTy())
2748           report_fatal_error(
2749               "token definition is separated from the use by a suspend point");
2750         FrameData.Spills[&I].push_back(cast<Instruction>(U));
2751       }
2752   }
2753 
2754   // We don't want the layout of coroutine frame to be affected
2755   // by debug information. So we only choose to salvage DbgValueInst for
2756   // whose value is already in the frame.
2757   // We would handle the dbg.values for allocas specially
2758   for (auto &Iter : FrameData.Spills) {
2759     auto *V = Iter.first;
2760     SmallVector<DbgValueInst *, 16> DVIs;
2761     findDbgValues(DVIs, V);
2762     llvm::for_each(DVIs, [&](DbgValueInst *DVI) {
2763       if (Checker.isDefinitionAcrossSuspend(*V, DVI))
2764         FrameData.Spills[V].push_back(DVI);
2765     });
2766   }
2767 
2768   LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
2769   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2770       Shape.ABI == coro::ABI::Async)
2771     sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin);
2772   Shape.FrameTy = buildFrameType(F, Shape, FrameData);
2773   createFramePtr(Shape);
2774   // For now, this works for C++ programs only.
2775   buildFrameDebugInfo(F, Shape, FrameData);
2776   insertSpills(FrameData, Shape);
2777   lowerLocalAllocas(LocalAllocas, DeadInstructions);
2778 
2779   for (auto I : DeadInstructions)
2780     I->eraseFromParent();
2781 }
2782