1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This file contains classes used to discover if for a particular value
9 // there from sue to definition that crosses a suspend block.
10 //
11 // Using the information discovered we form a Coroutine Frame structure to
12 // contain those values. All uses of those values are replaced with appropriate
13 // GEP + load from the coroutine frame. At the point of the definition we spill
14 // the value into the coroutine frame.
15 //===----------------------------------------------------------------------===//
16 
17 #include "CoroInternal.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/ScopeExit.h"
20 #include "llvm/ADT/SmallString.h"
21 #include "llvm/Analysis/PtrUseVisitor.h"
22 #include "llvm/Analysis/StackLifetime.h"
23 #include "llvm/Config/llvm-config.h"
24 #include "llvm/IR/CFG.h"
25 #include "llvm/IR/DIBuilder.h"
26 #include "llvm/IR/DebugInfo.h"
27 #include "llvm/IR/Dominators.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/InstIterator.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Support/OptimizedStructLayout.h"
34 #include "llvm/Support/circular_raw_ostream.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
37 #include "llvm/Transforms/Utils/Local.h"
38 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
39 #include <algorithm>
40 
41 using namespace llvm;
42 
43 // The "coro-suspend-crossing" flag is very noisy. There is another debug type,
44 // "coro-frame", which results in leaner debug spew.
45 #define DEBUG_TYPE "coro-suspend-crossing"
46 
47 static cl::opt<bool> EnableReuseStorageInFrame(
48     "reuse-storage-in-coroutine-frame", cl::Hidden,
49     cl::desc(
50         "Enable the optimization which would reuse the storage in the coroutine \
51          frame for allocas whose liferanges are not overlapped, for testing purposes"),
52     llvm::cl::init(false));
53 
54 enum { SmallVectorThreshold = 32 };
55 
56 // Provides two way mapping between the blocks and numbers.
57 namespace {
58 class BlockToIndexMapping {
59   SmallVector<BasicBlock *, SmallVectorThreshold> V;
60 
61 public:
62   size_t size() const { return V.size(); }
63 
64   BlockToIndexMapping(Function &F) {
65     for (BasicBlock &BB : F)
66       V.push_back(&BB);
67     llvm::sort(V);
68   }
69 
70   size_t blockToIndex(BasicBlock *BB) const {
71     auto *I = llvm::lower_bound(V, BB);
72     assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
73     return I - V.begin();
74   }
75 
76   BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
77 };
78 } // end anonymous namespace
79 
80 // The SuspendCrossingInfo maintains data that allows to answer a question
81 // whether given two BasicBlocks A and B there is a path from A to B that
82 // passes through a suspend point.
83 //
84 // For every basic block 'i' it maintains a BlockData that consists of:
85 //   Consumes:  a bit vector which contains a set of indices of blocks that can
86 //              reach block 'i'
87 //   Kills: a bit vector which contains a set of indices of blocks that can
88 //          reach block 'i', but one of the path will cross a suspend point
89 //   Suspend: a boolean indicating whether block 'i' contains a suspend point.
90 //   End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
91 //
92 namespace {
93 struct SuspendCrossingInfo {
94   BlockToIndexMapping Mapping;
95 
96   struct BlockData {
97     BitVector Consumes;
98     BitVector Kills;
99     bool Suspend = false;
100     bool End = false;
101   };
102   SmallVector<BlockData, SmallVectorThreshold> Block;
103 
104   iterator_range<succ_iterator> successors(BlockData const &BD) const {
105     BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
106     return llvm::successors(BB);
107   }
108 
109   BlockData &getBlockData(BasicBlock *BB) {
110     return Block[Mapping.blockToIndex(BB)];
111   }
112 
113   void dump() const;
114   void dump(StringRef Label, BitVector const &BV) const;
115 
116   SuspendCrossingInfo(Function &F, coro::Shape &Shape);
117 
118   bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
119     size_t const DefIndex = Mapping.blockToIndex(DefBB);
120     size_t const UseIndex = Mapping.blockToIndex(UseBB);
121 
122     bool const Result = Block[UseIndex].Kills[DefIndex];
123     LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
124                       << " answer is " << Result << "\n");
125     return Result;
126   }
127 
128   bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
129     auto *I = cast<Instruction>(U);
130 
131     // We rewrote PHINodes, so that only the ones with exactly one incoming
132     // value need to be analyzed.
133     if (auto *PN = dyn_cast<PHINode>(I))
134       if (PN->getNumIncomingValues() > 1)
135         return false;
136 
137     BasicBlock *UseBB = I->getParent();
138 
139     // As a special case, treat uses by an llvm.coro.suspend.retcon or an
140     // llvm.coro.suspend.async as if they were uses in the suspend's single
141     // predecessor: the uses conceptually occur before the suspend.
142     if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
143       UseBB = UseBB->getSinglePredecessor();
144       assert(UseBB && "should have split coro.suspend into its own block");
145     }
146 
147     return hasPathCrossingSuspendPoint(DefBB, UseBB);
148   }
149 
150   bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
151     return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
152   }
153 
154   bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
155     auto *DefBB = I.getParent();
156 
157     // As a special case, treat values produced by an llvm.coro.suspend.*
158     // as if they were defined in the single successor: the uses
159     // conceptually occur after the suspend.
160     if (isa<AnyCoroSuspendInst>(I)) {
161       DefBB = DefBB->getSingleSuccessor();
162       assert(DefBB && "should have split coro.suspend into its own block");
163     }
164 
165     return isDefinitionAcrossSuspend(DefBB, U);
166   }
167 
168   bool isDefinitionAcrossSuspend(Value &V, User *U) const {
169     if (auto *Arg = dyn_cast<Argument>(&V))
170       return isDefinitionAcrossSuspend(*Arg, U);
171     if (auto *Inst = dyn_cast<Instruction>(&V))
172       return isDefinitionAcrossSuspend(*Inst, U);
173 
174     llvm_unreachable(
175         "Coroutine could only collect Argument and Instruction now.");
176   }
177 };
178 } // end anonymous namespace
179 
180 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
181 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
182                                                 BitVector const &BV) const {
183   dbgs() << Label << ":";
184   for (size_t I = 0, N = BV.size(); I < N; ++I)
185     if (BV[I])
186       dbgs() << " " << Mapping.indexToBlock(I)->getName();
187   dbgs() << "\n";
188 }
189 
190 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
191   for (size_t I = 0, N = Block.size(); I < N; ++I) {
192     BasicBlock *const B = Mapping.indexToBlock(I);
193     dbgs() << B->getName() << ":\n";
194     dump("   Consumes", Block[I].Consumes);
195     dump("      Kills", Block[I].Kills);
196   }
197   dbgs() << "\n";
198 }
199 #endif
200 
201 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
202     : Mapping(F) {
203   const size_t N = Mapping.size();
204   Block.resize(N);
205 
206   // Initialize every block so that it consumes itself
207   for (size_t I = 0; I < N; ++I) {
208     auto &B = Block[I];
209     B.Consumes.resize(N);
210     B.Kills.resize(N);
211     B.Consumes.set(I);
212   }
213 
214   // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
215   // the code beyond coro.end is reachable during initial invocation of the
216   // coroutine.
217   for (auto *CE : Shape.CoroEnds)
218     getBlockData(CE->getParent()).End = true;
219 
220   // Mark all suspend blocks and indicate that they kill everything they
221   // consume. Note, that crossing coro.save also requires a spill, as any code
222   // between coro.save and coro.suspend may resume the coroutine and all of the
223   // state needs to be saved by that time.
224   auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
225     BasicBlock *SuspendBlock = BarrierInst->getParent();
226     auto &B = getBlockData(SuspendBlock);
227     B.Suspend = true;
228     B.Kills |= B.Consumes;
229   };
230   for (auto *CSI : Shape.CoroSuspends) {
231     markSuspendBlock(CSI);
232     if (auto *Save = CSI->getCoroSave())
233       markSuspendBlock(Save);
234   }
235 
236   // Iterate propagating consumes and kills until they stop changing.
237   int Iteration = 0;
238   (void)Iteration;
239 
240   bool Changed;
241   do {
242     LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
243     LLVM_DEBUG(dbgs() << "==============\n");
244 
245     Changed = false;
246     for (size_t I = 0; I < N; ++I) {
247       auto &B = Block[I];
248       for (BasicBlock *SI : successors(B)) {
249 
250         auto SuccNo = Mapping.blockToIndex(SI);
251 
252         // Saved Consumes and Kills bitsets so that it is easy to see
253         // if anything changed after propagation.
254         auto &S = Block[SuccNo];
255         auto SavedConsumes = S.Consumes;
256         auto SavedKills = S.Kills;
257 
258         // Propagate Kills and Consumes from block B into its successor S.
259         S.Consumes |= B.Consumes;
260         S.Kills |= B.Kills;
261 
262         // If block B is a suspend block, it should propagate kills into the
263         // its successor for every block B consumes.
264         if (B.Suspend) {
265           S.Kills |= B.Consumes;
266         }
267         if (S.Suspend) {
268           // If block S is a suspend block, it should kill all of the blocks it
269           // consumes.
270           S.Kills |= S.Consumes;
271         } else if (S.End) {
272           // If block S is an end block, it should not propagate kills as the
273           // blocks following coro.end() are reached during initial invocation
274           // of the coroutine while all the data are still available on the
275           // stack or in the registers.
276           S.Kills.reset();
277         } else {
278           // This is reached when S block it not Suspend nor coro.end and it
279           // need to make sure that it is not in the kill set.
280           S.Kills.reset(SuccNo);
281         }
282 
283         // See if anything changed.
284         Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
285 
286         if (S.Kills != SavedKills) {
287           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
288                             << "\n");
289           LLVM_DEBUG(dump("S.Kills", S.Kills));
290           LLVM_DEBUG(dump("SavedKills", SavedKills));
291         }
292         if (S.Consumes != SavedConsumes) {
293           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
294           LLVM_DEBUG(dump("S.Consume", S.Consumes));
295           LLVM_DEBUG(dump("SavedCons", SavedConsumes));
296         }
297       }
298     }
299   } while (Changed);
300   LLVM_DEBUG(dump());
301 }
302 
303 #undef DEBUG_TYPE // "coro-suspend-crossing"
304 #define DEBUG_TYPE "coro-frame"
305 
306 namespace {
307 class FrameTypeBuilder;
308 // Mapping from the to-be-spilled value to all the users that need reload.
309 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>;
310 struct AllocaInfo {
311   AllocaInst *Alloca;
312   DenseMap<Instruction *, llvm::Optional<APInt>> Aliases;
313   bool MayWriteBeforeCoroBegin;
314   AllocaInfo(AllocaInst *Alloca,
315              DenseMap<Instruction *, llvm::Optional<APInt>> Aliases,
316              bool MayWriteBeforeCoroBegin)
317       : Alloca(Alloca), Aliases(std::move(Aliases)),
318         MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
319 };
320 struct FrameDataInfo {
321   // All the values (that are not allocas) that needs to be spilled to the
322   // frame.
323   SpillInfo Spills;
324   // Allocas contains all values defined as allocas that need to live in the
325   // frame.
326   SmallVector<AllocaInfo, 8> Allocas;
327 
328   SmallVector<Value *, 8> getAllDefs() const {
329     SmallVector<Value *, 8> Defs;
330     for (const auto &P : Spills)
331       Defs.push_back(P.first);
332     for (const auto &A : Allocas)
333       Defs.push_back(A.Alloca);
334     return Defs;
335   }
336 
337   uint32_t getFieldIndex(Value *V) const {
338     auto Itr = FieldIndexMap.find(V);
339     assert(Itr != FieldIndexMap.end() &&
340            "Value does not have a frame field index");
341     return Itr->second;
342   }
343 
344   void setFieldIndex(Value *V, uint32_t Index) {
345     assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
346            "Cannot set the index for the same field twice.");
347     FieldIndexMap[V] = Index;
348   }
349 
350   uint64_t getAlign(Value *V) const {
351     auto Iter = FieldAlignMap.find(V);
352     assert(Iter != FieldAlignMap.end());
353     return Iter->second;
354   }
355 
356   void setAlign(Value *V, uint64_t Align) {
357     assert(FieldAlignMap.count(V) == 0);
358     FieldAlignMap.insert({V, Align});
359   }
360 
361   uint64_t getOffset(Value *V) const {
362     auto Iter = FieldOffsetMap.find(V);
363     assert(Iter != FieldOffsetMap.end());
364     return Iter->second;
365   }
366 
367   void setOffset(Value *V, uint64_t Offset) {
368     assert(FieldOffsetMap.count(V) == 0);
369     FieldOffsetMap.insert({V, Offset});
370   }
371 
372   // Remap the index of every field in the frame, using the final layout index.
373   void updateLayoutIndex(FrameTypeBuilder &B);
374 
375 private:
376   // LayoutIndexUpdateStarted is used to avoid updating the index of any field
377   // twice by mistake.
378   bool LayoutIndexUpdateStarted = false;
379   // Map from values to their slot indexes on the frame. They will be first set
380   // with their original insertion field index. After the frame is built, their
381   // indexes will be updated into the final layout index.
382   DenseMap<Value *, uint32_t> FieldIndexMap;
383   // Map from values to their alignment on the frame. They would be set after
384   // the frame is built.
385   DenseMap<Value *, uint64_t> FieldAlignMap;
386   // Map from values to their offset on the frame. They would be set after
387   // the frame is built.
388   DenseMap<Value *, uint64_t> FieldOffsetMap;
389 };
390 } // namespace
391 
392 #ifndef NDEBUG
393 static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
394   dbgs() << "------------- " << Title << "--------------\n";
395   for (const auto &E : Spills) {
396     E.first->dump();
397     dbgs() << "   user: ";
398     for (auto *I : E.second)
399       I->dump();
400   }
401 }
402 
403 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
404   dbgs() << "------------- Allocas --------------\n";
405   for (const auto &A : Allocas) {
406     A.Alloca->dump();
407   }
408 }
409 #endif
410 
411 namespace {
412 using FieldIDType = size_t;
413 // We cannot rely solely on natural alignment of a type when building a
414 // coroutine frame and if the alignment specified on the Alloca instruction
415 // differs from the natural alignment of the alloca type we will need to insert
416 // padding.
417 class FrameTypeBuilder {
418 private:
419   struct Field {
420     uint64_t Size;
421     uint64_t Offset;
422     Type *Ty;
423     FieldIDType LayoutFieldIndex;
424     Align Alignment;
425     Align TyAlignment;
426   };
427 
428   const DataLayout &DL;
429   LLVMContext &Context;
430   uint64_t StructSize = 0;
431   Align StructAlign;
432   bool IsFinished = false;
433 
434   Optional<Align> MaxFrameAlignment;
435 
436   SmallVector<Field, 8> Fields;
437   DenseMap<Value*, unsigned> FieldIndexByKey;
438 
439 public:
440   FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL,
441                    Optional<Align> MaxFrameAlignment)
442       : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
443 
444   /// Add a field to this structure for the storage of an `alloca`
445   /// instruction.
446   LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI,
447                                                bool IsHeader = false) {
448     Type *Ty = AI->getAllocatedType();
449 
450     // Make an array type if this is a static array allocation.
451     if (AI->isArrayAllocation()) {
452       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
453         Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
454       else
455         report_fatal_error("Coroutines cannot handle non static allocas yet");
456     }
457 
458     return addField(Ty, AI->getAlign(), IsHeader);
459   }
460 
461   /// We want to put the allocas whose lifetime-ranges are not overlapped
462   /// into one slot of coroutine frame.
463   /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
464   ///
465   ///     cppcoro::task<void> alternative_paths(bool cond) {
466   ///         if (cond) {
467   ///             big_structure a;
468   ///             process(a);
469   ///             co_await something();
470   ///         } else {
471   ///             big_structure b;
472   ///             process2(b);
473   ///             co_await something();
474   ///         }
475   ///     }
476   ///
477   /// We want to put variable a and variable b in the same slot to
478   /// reduce the size of coroutine frame.
479   ///
480   /// This function use StackLifetime algorithm to partition the AllocaInsts in
481   /// Spills to non-overlapped sets in order to put Alloca in the same
482   /// non-overlapped set into the same slot in the Coroutine Frame. Then add
483   /// field for the allocas in the same non-overlapped set by using the largest
484   /// type as the field type.
485   ///
486   /// Side Effects: Because We sort the allocas, the order of allocas in the
487   /// frame may be different with the order in the source code.
488   void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
489                           coro::Shape &Shape);
490 
491   /// Add a field to this structure.
492   LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment,
493                                       bool IsHeader = false,
494                                       bool IsSpillOfValue = false) {
495     assert(!IsFinished && "adding fields to a finished builder");
496     assert(Ty && "must provide a type for a field");
497 
498     // The field size is always the alloc size of the type.
499     uint64_t FieldSize = DL.getTypeAllocSize(Ty);
500 
501     // For an alloca with size=0, we don't need to add a field and they
502     // can just point to any index in the frame. Use index 0.
503     if (FieldSize == 0) {
504       return 0;
505     }
506 
507     // The field alignment might not be the type alignment, but we need
508     // to remember the type alignment anyway to build the type.
509     // If we are spilling values we don't need to worry about ABI alignment
510     // concerns.
511     auto ABIAlign = DL.getABITypeAlign(Ty);
512     Align TyAlignment =
513         (IsSpillOfValue && MaxFrameAlignment)
514             ? (*MaxFrameAlignment < ABIAlign ? *MaxFrameAlignment : ABIAlign)
515             : ABIAlign;
516     if (!FieldAlignment) {
517       FieldAlignment = TyAlignment;
518     }
519 
520     // Lay out header fields immediately.
521     uint64_t Offset;
522     if (IsHeader) {
523       Offset = alignTo(StructSize, FieldAlignment);
524       StructSize = Offset + FieldSize;
525 
526     // Everything else has a flexible offset.
527     } else {
528       Offset = OptimizedStructLayoutField::FlexibleOffset;
529     }
530 
531     Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment});
532     return Fields.size() - 1;
533   }
534 
535   /// Finish the layout and set the body on the given type.
536   void finish(StructType *Ty);
537 
538   uint64_t getStructSize() const {
539     assert(IsFinished && "not yet finished!");
540     return StructSize;
541   }
542 
543   Align getStructAlign() const {
544     assert(IsFinished && "not yet finished!");
545     return StructAlign;
546   }
547 
548   FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
549     assert(IsFinished && "not yet finished!");
550     return Fields[Id].LayoutFieldIndex;
551   }
552 
553   Field getLayoutField(FieldIDType Id) const {
554     assert(IsFinished && "not yet finished!");
555     return Fields[Id];
556   }
557 };
558 } // namespace
559 
560 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
561   auto Updater = [&](Value *I) {
562     auto Field = B.getLayoutField(getFieldIndex(I));
563     setFieldIndex(I, Field.LayoutFieldIndex);
564     setAlign(I, Field.Alignment.value());
565     setOffset(I, Field.Offset);
566   };
567   LayoutIndexUpdateStarted = true;
568   for (auto &S : Spills)
569     Updater(S.first);
570   for (const auto &A : Allocas)
571     Updater(A.Alloca);
572   LayoutIndexUpdateStarted = false;
573 }
574 
575 void FrameTypeBuilder::addFieldForAllocas(const Function &F,
576                                           FrameDataInfo &FrameData,
577                                           coro::Shape &Shape) {
578   using AllocaSetType = SmallVector<AllocaInst *, 4>;
579   SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
580 
581   // We need to add field for allocas at the end of this function.
582   auto AddFieldForAllocasAtExit = make_scope_exit([&]() {
583     for (auto AllocaList : NonOverlapedAllocas) {
584       auto *LargestAI = *AllocaList.begin();
585       FieldIDType Id = addFieldForAlloca(LargestAI);
586       for (auto *Alloca : AllocaList)
587         FrameData.setFieldIndex(Alloca, Id);
588     }
589   });
590 
591   if (!Shape.OptimizeFrame && !EnableReuseStorageInFrame) {
592     for (const auto &A : FrameData.Allocas) {
593       AllocaInst *Alloca = A.Alloca;
594       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
595     }
596     return;
597   }
598 
599   // Because there are pathes from the lifetime.start to coro.end
600   // for each alloca, the liferanges for every alloca is overlaped
601   // in the blocks who contain coro.end and the successor blocks.
602   // So we choose to skip there blocks when we calculates the liferange
603   // for each alloca. It should be reasonable since there shouldn't be uses
604   // in these blocks and the coroutine frame shouldn't be used outside the
605   // coroutine body.
606   //
607   // Note that the user of coro.suspend may not be SwitchInst. However, this
608   // case seems too complex to handle. And it is harmless to skip these
609   // patterns since it just prevend putting the allocas to live in the same
610   // slot.
611   DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
612   for (auto CoroSuspendInst : Shape.CoroSuspends) {
613     for (auto U : CoroSuspendInst->users()) {
614       if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
615         auto *SWI = const_cast<SwitchInst *>(ConstSWI);
616         DefaultSuspendDest[SWI] = SWI->getDefaultDest();
617         SWI->setDefaultDest(SWI->getSuccessor(1));
618       }
619     }
620   }
621 
622   auto ExtractAllocas = [&]() {
623     AllocaSetType Allocas;
624     Allocas.reserve(FrameData.Allocas.size());
625     for (const auto &A : FrameData.Allocas)
626       Allocas.push_back(A.Alloca);
627     return Allocas;
628   };
629   StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
630                                       StackLifetime::LivenessType::May);
631   StackLifetimeAnalyzer.run();
632   auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
633     return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
634         StackLifetimeAnalyzer.getLiveRange(AI2));
635   };
636   auto GetAllocaSize = [&](const AllocaInfo &A) {
637     Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL);
638     assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
639     assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
640     return RetSize->getFixedSize();
641   };
642   // Put larger allocas in the front. So the larger allocas have higher
643   // priority to merge, which can save more space potentially. Also each
644   // AllocaSet would be ordered. So we can get the largest Alloca in one
645   // AllocaSet easily.
646   sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
647     return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
648   });
649   for (const auto &A : FrameData.Allocas) {
650     AllocaInst *Alloca = A.Alloca;
651     bool Merged = false;
652     // Try to find if the Alloca is not inferenced with any existing
653     // NonOverlappedAllocaSet. If it is true, insert the alloca to that
654     // NonOverlappedAllocaSet.
655     for (auto &AllocaSet : NonOverlapedAllocas) {
656       assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
657       bool NoInference = none_of(AllocaSet, [&](auto Iter) {
658         return IsAllocaInferenre(Alloca, Iter);
659       });
660       // If the alignment of A is multiple of the alignment of B, the address
661       // of A should satisfy the requirement for aligning for B.
662       //
663       // There may be other more fine-grained strategies to handle the alignment
664       // infomation during the merging process. But it seems hard to handle
665       // these strategies and benefit little.
666       bool Alignable = [&]() -> bool {
667         auto *LargestAlloca = *AllocaSet.begin();
668         return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
669                0;
670       }();
671       bool CouldMerge = NoInference && Alignable;
672       if (!CouldMerge)
673         continue;
674       AllocaSet.push_back(Alloca);
675       Merged = true;
676       break;
677     }
678     if (!Merged) {
679       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
680     }
681   }
682   // Recover the default target destination for each Switch statement
683   // reserved.
684   for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
685     SwitchInst *SWI = SwitchAndDefaultDest.first;
686     BasicBlock *DestBB = SwitchAndDefaultDest.second;
687     SWI->setDefaultDest(DestBB);
688   }
689   // This Debug Info could tell us which allocas are merged into one slot.
690   LLVM_DEBUG(for (auto &AllocaSet
691                   : NonOverlapedAllocas) {
692     if (AllocaSet.size() > 1) {
693       dbgs() << "In Function:" << F.getName() << "\n";
694       dbgs() << "Find Union Set "
695              << "\n";
696       dbgs() << "\tAllocas are \n";
697       for (auto Alloca : AllocaSet)
698         dbgs() << "\t\t" << *Alloca << "\n";
699     }
700   });
701 }
702 
703 void FrameTypeBuilder::finish(StructType *Ty) {
704   assert(!IsFinished && "already finished!");
705 
706   // Prepare the optimal-layout field array.
707   // The Id in the layout field is a pointer to our Field for it.
708   SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
709   LayoutFields.reserve(Fields.size());
710   for (auto &Field : Fields) {
711     LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
712                               Field.Offset);
713   }
714 
715   // Perform layout.
716   auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
717   StructSize = SizeAndAlign.first;
718   StructAlign = SizeAndAlign.second;
719 
720   auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
721     return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
722   };
723 
724   // We need to produce a packed struct type if there's a field whose
725   // assigned offset isn't a multiple of its natural type alignment.
726   bool Packed = [&] {
727     for (auto &LayoutField : LayoutFields) {
728       auto &F = getField(LayoutField);
729       if (!isAligned(F.TyAlignment, LayoutField.Offset))
730         return true;
731     }
732     return false;
733   }();
734 
735   // Build the struct body.
736   SmallVector<Type*, 16> FieldTypes;
737   FieldTypes.reserve(LayoutFields.size() * 3 / 2);
738   uint64_t LastOffset = 0;
739   for (auto &LayoutField : LayoutFields) {
740     auto &F = getField(LayoutField);
741 
742     auto Offset = LayoutField.Offset;
743 
744     // Add a padding field if there's a padding gap and we're either
745     // building a packed struct or the padding gap is more than we'd
746     // get from aligning to the field type's natural alignment.
747     assert(Offset >= LastOffset);
748     if (Offset != LastOffset) {
749       if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
750         FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
751                                             Offset - LastOffset));
752     }
753 
754     F.Offset = Offset;
755     F.LayoutFieldIndex = FieldTypes.size();
756 
757     FieldTypes.push_back(F.Ty);
758     LastOffset = Offset + F.Size;
759   }
760 
761   Ty->setBody(FieldTypes, Packed);
762 
763 #ifndef NDEBUG
764   // Check that the IR layout matches the offsets we expect.
765   auto Layout = DL.getStructLayout(Ty);
766   for (auto &F : Fields) {
767     assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
768     assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
769   }
770 #endif
771 
772   IsFinished = true;
773 }
774 
775 static void cacheDIVar(FrameDataInfo &FrameData,
776                        DenseMap<Value *, DILocalVariable *> &DIVarCache) {
777   for (auto *V : FrameData.getAllDefs()) {
778     if (DIVarCache.find(V) != DIVarCache.end())
779       continue;
780 
781     auto DDIs = FindDbgDeclareUses(V);
782     auto *I = llvm::find_if(DDIs, [](DbgDeclareInst *DDI) {
783       return DDI->getExpression()->getNumElements() == 0;
784     });
785     if (I != DDIs.end())
786       DIVarCache.insert({V, (*I)->getVariable()});
787   }
788 }
789 
790 /// Create name for Type. It uses MDString to store new created string to
791 /// avoid memory leak.
792 static StringRef solveTypeName(Type *Ty) {
793   if (Ty->isIntegerTy()) {
794     // The longest name in common may be '__int_128', which has 9 bits.
795     SmallString<16> Buffer;
796     raw_svector_ostream OS(Buffer);
797     OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth();
798     auto *MDName = MDString::get(Ty->getContext(), OS.str());
799     return MDName->getString();
800   }
801 
802   if (Ty->isFloatingPointTy()) {
803     if (Ty->isFloatTy())
804       return "__float_";
805     if (Ty->isDoubleTy())
806       return "__double_";
807     return "__floating_type_";
808   }
809 
810   if (Ty->isPointerTy()) {
811     auto *PtrTy = cast<PointerType>(Ty);
812     Type *PointeeTy = PtrTy->getPointerElementType();
813     auto Name = solveTypeName(PointeeTy);
814     if (Name == "UnknownType")
815       return "PointerType";
816     SmallString<16> Buffer;
817     Twine(Name + "_Ptr").toStringRef(Buffer);
818     auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
819     return MDName->getString();
820   }
821 
822   if (Ty->isStructTy()) {
823     if (!cast<StructType>(Ty)->hasName())
824       return "__LiteralStructType_";
825 
826     auto Name = Ty->getStructName();
827 
828     SmallString<16> Buffer(Name);
829     for_each(Buffer, [](auto &Iter) {
830       if (Iter == '.' || Iter == ':')
831         Iter = '_';
832     });
833     auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
834     return MDName->getString();
835   }
836 
837   return "UnknownType";
838 }
839 
840 static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
841                            const DataLayout &Layout, DIScope *Scope,
842                            unsigned LineNum,
843                            DenseMap<Type *, DIType *> &DITypeCache) {
844   if (DIType *DT = DITypeCache.lookup(Ty))
845     return DT;
846 
847   StringRef Name = solveTypeName(Ty);
848 
849   DIType *RetType = nullptr;
850 
851   if (Ty->isIntegerTy()) {
852     auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
853     RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed,
854                                       llvm::DINode::FlagArtificial);
855   } else if (Ty->isFloatingPointTy()) {
856     RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
857                                       dwarf::DW_ATE_float,
858                                       llvm::DINode::FlagArtificial);
859   } else if (Ty->isPointerTy()) {
860     // Construct BasicType instead of PointerType to avoid infinite
861     // search problem.
862     // For example, we would be in trouble if we traverse recursively:
863     //
864     //  struct Node {
865     //      Node* ptr;
866     //  };
867     RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
868                                       dwarf::DW_ATE_address,
869                                       llvm::DINode::FlagArtificial);
870   } else if (Ty->isStructTy()) {
871     auto *DIStruct = Builder.createStructType(
872         Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty),
873         Layout.getPrefTypeAlignment(Ty), llvm::DINode::FlagArtificial, nullptr,
874         llvm::DINodeArray());
875 
876     auto *StructTy = cast<StructType>(Ty);
877     SmallVector<Metadata *, 16> Elements;
878     for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
879       DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
880                                  Scope, LineNum, DITypeCache);
881       assert(DITy);
882       Elements.push_back(Builder.createMemberType(
883           Scope, DITy->getName(), Scope->getFile(), LineNum,
884           DITy->getSizeInBits(), DITy->getAlignInBits(),
885           Layout.getStructLayout(StructTy)->getElementOffsetInBits(I),
886           llvm::DINode::FlagArtificial, DITy));
887     }
888 
889     Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements));
890 
891     RetType = DIStruct;
892   } else {
893     LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n";);
894     SmallString<32> Buffer;
895     raw_svector_ostream OS(Buffer);
896     OS << Name.str() << "_" << Layout.getTypeSizeInBits(Ty);
897     RetType = Builder.createBasicType(OS.str(), Layout.getTypeSizeInBits(Ty),
898                                       dwarf::DW_ATE_address,
899                                       llvm::DINode::FlagArtificial);
900   }
901 
902   DITypeCache.insert({Ty, RetType});
903   return RetType;
904 }
905 
906 /// Build artificial debug info for C++ coroutine frames to allow users to
907 /// inspect the contents of the frame directly
908 ///
909 /// Create Debug information for coroutine frame with debug name "__coro_frame".
910 /// The debug information for the fields of coroutine frame is constructed from
911 /// the following way:
912 /// 1. For all the value in the Frame, we search the use of dbg.declare to find
913 ///    the corresponding debug variables for the value. If we can find the
914 ///    debug variable, we can get full and accurate debug information.
915 /// 2. If we can't get debug information in step 1 and 2, we could only try to
916 ///    build the DIType by Type. We did this in solveDIType. We only handle
917 ///    integer, float, double, integer type and struct type for now.
918 static void buildFrameDebugInfo(Function &F, coro::Shape &Shape,
919                                 FrameDataInfo &FrameData) {
920   DISubprogram *DIS = F.getSubprogram();
921   // If there is no DISubprogram for F, it implies the Function are not compiled
922   // with debug info. So we also don't need to generate debug info for the frame
923   // neither.
924   if (!DIS || !DIS->getUnit() ||
925       !dwarf::isCPlusPlus(
926           (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage()))
927     return;
928 
929   assert(Shape.ABI == coro::ABI::Switch &&
930          "We could only build debug infomation for C++ coroutine now.\n");
931 
932   DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
933 
934   AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
935   assert(PromiseAlloca &&
936          "Coroutine with switch ABI should own Promise alloca");
937 
938   TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(PromiseAlloca);
939   if (DIs.empty())
940     return;
941 
942   DbgDeclareInst *PromiseDDI = DIs.front();
943   DILocalVariable *PromiseDIVariable = PromiseDDI->getVariable();
944   DILocalScope *PromiseDIScope = PromiseDIVariable->getScope();
945   DIFile *DFile = PromiseDIScope->getFile();
946   DILocation *DILoc = PromiseDDI->getDebugLoc().get();
947   unsigned LineNum = PromiseDIVariable->getLine();
948 
949   DICompositeType *FrameDITy = DBuilder.createStructType(
950       DIS, "__coro_frame_ty", DFile, LineNum, Shape.FrameSize * 8,
951       Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr,
952       llvm::DINodeArray());
953   StructType *FrameTy = Shape.FrameTy;
954   SmallVector<Metadata *, 16> Elements;
955   DataLayout Layout = F.getParent()->getDataLayout();
956 
957   DenseMap<Value *, DILocalVariable *> DIVarCache;
958   cacheDIVar(FrameData, DIVarCache);
959 
960   unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
961   unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
962   unsigned IndexIndex = Shape.SwitchLowering.IndexField;
963 
964   DenseMap<unsigned, StringRef> NameCache;
965   NameCache.insert({ResumeIndex, "__resume_fn"});
966   NameCache.insert({DestroyIndex, "__destroy_fn"});
967   NameCache.insert({IndexIndex, "__coro_index"});
968 
969   Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex),
970        *DestroyFnTy = FrameTy->getElementType(DestroyIndex),
971        *IndexTy = FrameTy->getElementType(IndexIndex);
972 
973   DenseMap<unsigned, DIType *> TyCache;
974   TyCache.insert({ResumeIndex,
975                   DBuilder.createBasicType("__resume_fn",
976                                            Layout.getTypeSizeInBits(ResumeFnTy),
977                                            dwarf::DW_ATE_address)});
978   TyCache.insert(
979       {DestroyIndex, DBuilder.createBasicType(
980                          "__destroy_fn", Layout.getTypeSizeInBits(DestroyFnTy),
981                          dwarf::DW_ATE_address)});
982 
983   /// FIXME: If we fill the field `SizeInBits` with the actual size of
984   /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
985   TyCache.insert({IndexIndex, DBuilder.createBasicType(
986                                   "__coro_index",
987                                   (Layout.getTypeSizeInBits(IndexTy) < 8)
988                                       ? 8
989                                       : Layout.getTypeSizeInBits(IndexTy),
990                                   dwarf::DW_ATE_unsigned_char)});
991 
992   for (auto *V : FrameData.getAllDefs()) {
993     if (DIVarCache.find(V) == DIVarCache.end())
994       continue;
995 
996     auto Index = FrameData.getFieldIndex(V);
997 
998     NameCache.insert({Index, DIVarCache[V]->getName()});
999     TyCache.insert({Index, DIVarCache[V]->getType()});
1000   }
1001 
1002   // Cache from index to (Align, Offset Pair)
1003   DenseMap<unsigned, std::pair<unsigned, unsigned>> OffsetCache;
1004   // The Align and Offset of Resume function and Destroy function are fixed.
1005   OffsetCache.insert({ResumeIndex, {8, 0}});
1006   OffsetCache.insert({DestroyIndex, {8, 8}});
1007   OffsetCache.insert(
1008       {IndexIndex,
1009        {Shape.SwitchLowering.IndexAlign, Shape.SwitchLowering.IndexOffset}});
1010 
1011   for (auto *V : FrameData.getAllDefs()) {
1012     auto Index = FrameData.getFieldIndex(V);
1013 
1014     OffsetCache.insert(
1015         {Index, {FrameData.getAlign(V), FrameData.getOffset(V)}});
1016   }
1017 
1018   DenseMap<Type *, DIType *> DITypeCache;
1019   // This counter is used to avoid same type names. e.g., there would be
1020   // many i32 and i64 types in one coroutine. And we would use i32_0 and
1021   // i32_1 to avoid the same type. Since it makes no sense the name of the
1022   // fields confilicts with each other.
1023   unsigned UnknownTypeNum = 0;
1024   for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
1025     if (OffsetCache.find(Index) == OffsetCache.end())
1026       continue;
1027 
1028     std::string Name;
1029     uint64_t SizeInBits;
1030     uint32_t AlignInBits;
1031     uint64_t OffsetInBits;
1032     DIType *DITy = nullptr;
1033 
1034     Type *Ty = FrameTy->getElementType(Index);
1035     assert(Ty->isSized() && "We can't handle type which is not sized.\n");
1036     SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedSize();
1037     AlignInBits = OffsetCache[Index].first * 8;
1038     OffsetInBits = OffsetCache[Index].second * 8;
1039 
1040     if (NameCache.find(Index) != NameCache.end()) {
1041       Name = NameCache[Index].str();
1042       DITy = TyCache[Index];
1043     } else {
1044       DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
1045       assert(DITy && "SolveDIType shouldn't return nullptr.\n");
1046       Name = DITy->getName().str();
1047       Name += "_" + std::to_string(UnknownTypeNum);
1048       UnknownTypeNum++;
1049     }
1050 
1051     Elements.push_back(DBuilder.createMemberType(
1052         FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
1053         llvm::DINode::FlagArtificial, DITy));
1054   }
1055 
1056   DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements));
1057 
1058   auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame",
1059                                                  DFile, LineNum, FrameDITy,
1060                                                  true, DINode::FlagArtificial);
1061   assert(FrameDIVar->isValidLocationForIntrinsic(PromiseDDI->getDebugLoc()));
1062 
1063   // Subprogram would have ContainedNodes field which records the debug
1064   // variables it contained. So we need to add __coro_frame to the
1065   // ContainedNodes of it.
1066   //
1067   // If we don't add __coro_frame to the RetainedNodes, user may get
1068   // `no symbol __coro_frame in context` rather than `__coro_frame`
1069   // is optimized out, which is more precise.
1070   if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) {
1071     auto RetainedNodes = SubProgram->getRetainedNodes();
1072     SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
1073                                                  RetainedNodes.end());
1074     RetainedNodesVec.push_back(FrameDIVar);
1075     SubProgram->replaceOperandWith(
1076         7, (MDTuple::get(F.getContext(), RetainedNodesVec)));
1077   }
1078 
1079   DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar,
1080                          DBuilder.createExpression(), DILoc,
1081                          Shape.FramePtr->getNextNode());
1082 }
1083 
1084 // Build a struct that will keep state for an active coroutine.
1085 //   struct f.frame {
1086 //     ResumeFnTy ResumeFnAddr;
1087 //     ResumeFnTy DestroyFnAddr;
1088 //     int ResumeIndex;
1089 //     ... promise (if present) ...
1090 //     ... spills ...
1091 //   };
1092 static StructType *buildFrameType(Function &F, coro::Shape &Shape,
1093                                   FrameDataInfo &FrameData) {
1094   LLVMContext &C = F.getContext();
1095   const DataLayout &DL = F.getParent()->getDataLayout();
1096   StructType *FrameTy = [&] {
1097     SmallString<32> Name(F.getName());
1098     Name.append(".Frame");
1099     return StructType::create(C, Name);
1100   }();
1101 
1102   // We will use this value to cap the alignment of spilled values.
1103   Optional<Align> MaxFrameAlignment;
1104   if (Shape.ABI == coro::ABI::Async)
1105     MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
1106   FrameTypeBuilder B(C, DL, MaxFrameAlignment);
1107 
1108   AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
1109   Optional<FieldIDType> SwitchIndexFieldId;
1110 
1111   if (Shape.ABI == coro::ABI::Switch) {
1112     auto *FramePtrTy = FrameTy->getPointerTo();
1113     auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
1114                                    /*IsVarArg=*/false);
1115     auto *FnPtrTy = FnTy->getPointerTo();
1116 
1117     // Add header fields for the resume and destroy functions.
1118     // We can rely on these being perfectly packed.
1119     (void)B.addField(FnPtrTy, None, /*header*/ true);
1120     (void)B.addField(FnPtrTy, None, /*header*/ true);
1121 
1122     // PromiseAlloca field needs to be explicitly added here because it's
1123     // a header field with a fixed offset based on its alignment. Hence it
1124     // needs special handling and cannot be added to FrameData.Allocas.
1125     if (PromiseAlloca)
1126       FrameData.setFieldIndex(
1127           PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
1128 
1129     // Add a field to store the suspend index.  This doesn't need to
1130     // be in the header.
1131     unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
1132     Type *IndexType = Type::getIntNTy(C, IndexBits);
1133 
1134     SwitchIndexFieldId = B.addField(IndexType, None);
1135   } else {
1136     assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
1137   }
1138 
1139   // Because multiple allocas may own the same field slot,
1140   // we add allocas to field here.
1141   B.addFieldForAllocas(F, FrameData, Shape);
1142   // Add PromiseAlloca to Allocas list so that
1143   // 1. updateLayoutIndex could update its index after
1144   // `performOptimizedStructLayout`
1145   // 2. it is processed in insertSpills.
1146   if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
1147     // We assume that the promise alloca won't be modified before
1148     // CoroBegin and no alias will be create before CoroBegin.
1149     FrameData.Allocas.emplace_back(
1150         PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false);
1151   // Create an entry for every spilled value.
1152   for (auto &S : FrameData.Spills) {
1153     Type *FieldType = S.first->getType();
1154     // For byval arguments, we need to store the pointed value in the frame,
1155     // instead of the pointer itself.
1156     if (const Argument *A = dyn_cast<Argument>(S.first))
1157       if (A->hasByValAttr())
1158         FieldType = A->getParamByValType();
1159     FieldIDType Id =
1160         B.addField(FieldType, None, false /*header*/, true /*IsSpillOfValue*/);
1161     FrameData.setFieldIndex(S.first, Id);
1162   }
1163 
1164   B.finish(FrameTy);
1165   FrameData.updateLayoutIndex(B);
1166   Shape.FrameAlign = B.getStructAlign();
1167   Shape.FrameSize = B.getStructSize();
1168 
1169   switch (Shape.ABI) {
1170   case coro::ABI::Switch: {
1171     // In the switch ABI, remember the switch-index field.
1172     auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
1173     Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
1174     Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
1175     Shape.SwitchLowering.IndexOffset = IndexField.Offset;
1176 
1177     // Also round the frame size up to a multiple of its alignment, as is
1178     // generally expected in C/C++.
1179     Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
1180     break;
1181   }
1182 
1183   // In the retcon ABI, remember whether the frame is inline in the storage.
1184   case coro::ABI::Retcon:
1185   case coro::ABI::RetconOnce: {
1186     auto Id = Shape.getRetconCoroId();
1187     Shape.RetconLowering.IsFrameInlineInStorage
1188       = (B.getStructSize() <= Id->getStorageSize() &&
1189          B.getStructAlign() <= Id->getStorageAlignment());
1190     break;
1191   }
1192   case coro::ABI::Async: {
1193     Shape.AsyncLowering.FrameOffset =
1194         alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign);
1195     // Also make the final context size a multiple of the context alignment to
1196     // make allocation easier for allocators.
1197     Shape.AsyncLowering.ContextSize =
1198         alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize,
1199                 Shape.AsyncLowering.getContextAlignment());
1200     if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
1201       report_fatal_error(
1202           "The alignment requirment of frame variables cannot be higher than "
1203           "the alignment of the async function context");
1204     }
1205     break;
1206   }
1207   }
1208 
1209   return FrameTy;
1210 }
1211 
1212 // We use a pointer use visitor to track how an alloca is being used.
1213 // The goal is to be able to answer the following three questions:
1214 // 1. Should this alloca be allocated on the frame instead.
1215 // 2. Could the content of the alloca be modified prior to CoroBegn, which would
1216 // require copying the data from alloca to the frame after CoroBegin.
1217 // 3. Is there any alias created for this alloca prior to CoroBegin, but used
1218 // after CoroBegin. In that case, we will need to recreate the alias after
1219 // CoroBegin based off the frame. To answer question 1, we track two things:
1220 //   a. List of all BasicBlocks that use this alloca or any of the aliases of
1221 //   the alloca. In the end, we check if there exists any two basic blocks that
1222 //   cross suspension points. If so, this alloca must be put on the frame. b.
1223 //   Whether the alloca or any alias of the alloca is escaped at some point,
1224 //   either by storing the address somewhere, or the address is used in a
1225 //   function call that might capture. If it's ever escaped, this alloca must be
1226 //   put on the frame conservatively.
1227 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
1228 // Whenever a potential write happens, either through a store instruction, a
1229 // function call or any of the memory intrinsics, we check whether this
1230 // instruction is prior to CoroBegin. To answer question 3, we track the offsets
1231 // of all aliases created for the alloca prior to CoroBegin but used after
1232 // CoroBegin. llvm::Optional is used to be able to represent the case when the
1233 // offset is unknown (e.g. when you have a PHINode that takes in different
1234 // offset values). We cannot handle unknown offsets and will assert. This is the
1235 // potential issue left out. An ideal solution would likely require a
1236 // significant redesign.
1237 namespace {
1238 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
1239   using Base = PtrUseVisitor<AllocaUseVisitor>;
1240   AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
1241                    const CoroBeginInst &CB, const SuspendCrossingInfo &Checker,
1242                    bool ShouldUseLifetimeStartInfo)
1243       : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker),
1244         ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {}
1245 
1246   void visit(Instruction &I) {
1247     Users.insert(&I);
1248     Base::visit(I);
1249     // If the pointer is escaped prior to CoroBegin, we have to assume it would
1250     // be written into before CoroBegin as well.
1251     if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
1252       MayWriteBeforeCoroBegin = true;
1253     }
1254   }
1255   // We need to provide this overload as PtrUseVisitor uses a pointer based
1256   // visiting function.
1257   void visit(Instruction *I) { return visit(*I); }
1258 
1259   void visitPHINode(PHINode &I) {
1260     enqueueUsers(I);
1261     handleAlias(I);
1262   }
1263 
1264   void visitSelectInst(SelectInst &I) {
1265     enqueueUsers(I);
1266     handleAlias(I);
1267   }
1268 
1269   void visitStoreInst(StoreInst &SI) {
1270     // Regardless whether the alias of the alloca is the value operand or the
1271     // pointer operand, we need to assume the alloca is been written.
1272     handleMayWrite(SI);
1273 
1274     if (SI.getValueOperand() != U->get())
1275       return;
1276 
1277     // We are storing the pointer into a memory location, potentially escaping.
1278     // As an optimization, we try to detect simple cases where it doesn't
1279     // actually escape, for example:
1280     //   %ptr = alloca ..
1281     //   %addr = alloca ..
1282     //   store %ptr, %addr
1283     //   %x = load %addr
1284     //   ..
1285     // If %addr is only used by loading from it, we could simply treat %x as
1286     // another alias of %ptr, and not considering %ptr being escaped.
1287     auto IsSimpleStoreThenLoad = [&]() {
1288       auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
1289       // If the memory location we are storing to is not an alloca, it
1290       // could be an alias of some other memory locations, which is difficult
1291       // to analyze.
1292       if (!AI)
1293         return false;
1294       // StoreAliases contains aliases of the memory location stored into.
1295       SmallVector<Instruction *, 4> StoreAliases = {AI};
1296       while (!StoreAliases.empty()) {
1297         Instruction *I = StoreAliases.pop_back_val();
1298         for (User *U : I->users()) {
1299           // If we are loading from the memory location, we are creating an
1300           // alias of the original pointer.
1301           if (auto *LI = dyn_cast<LoadInst>(U)) {
1302             enqueueUsers(*LI);
1303             handleAlias(*LI);
1304             continue;
1305           }
1306           // If we are overriding the memory location, the pointer certainly
1307           // won't escape.
1308           if (auto *S = dyn_cast<StoreInst>(U))
1309             if (S->getPointerOperand() == I)
1310               continue;
1311           if (auto *II = dyn_cast<IntrinsicInst>(U))
1312             if (II->isLifetimeStartOrEnd())
1313               continue;
1314           // BitCastInst creats aliases of the memory location being stored
1315           // into.
1316           if (auto *BI = dyn_cast<BitCastInst>(U)) {
1317             StoreAliases.push_back(BI);
1318             continue;
1319           }
1320           return false;
1321         }
1322       }
1323 
1324       return true;
1325     };
1326 
1327     if (!IsSimpleStoreThenLoad())
1328       PI.setEscaped(&SI);
1329   }
1330 
1331   // All mem intrinsics modify the data.
1332   void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
1333 
1334   void visitBitCastInst(BitCastInst &BC) {
1335     Base::visitBitCastInst(BC);
1336     handleAlias(BC);
1337   }
1338 
1339   void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
1340     Base::visitAddrSpaceCastInst(ASC);
1341     handleAlias(ASC);
1342   }
1343 
1344   void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
1345     // The base visitor will adjust Offset accordingly.
1346     Base::visitGetElementPtrInst(GEPI);
1347     handleAlias(GEPI);
1348   }
1349 
1350   void visitIntrinsicInst(IntrinsicInst &II) {
1351     // When we found the lifetime markers refers to a
1352     // subrange of the original alloca, ignore the lifetime
1353     // markers to avoid misleading the analysis.
1354     if (II.getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown ||
1355         !Offset.isZero())
1356       return Base::visitIntrinsicInst(II);
1357     LifetimeStarts.insert(&II);
1358   }
1359 
1360   void visitCallBase(CallBase &CB) {
1361     for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op)
1362       if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
1363         PI.setEscaped(&CB);
1364     handleMayWrite(CB);
1365   }
1366 
1367   bool getShouldLiveOnFrame() const {
1368     if (!ShouldLiveOnFrame)
1369       ShouldLiveOnFrame = computeShouldLiveOnFrame();
1370     return ShouldLiveOnFrame.getValue();
1371   }
1372 
1373   bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
1374 
1375   DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const {
1376     assert(getShouldLiveOnFrame() && "This method should only be called if the "
1377                                      "alloca needs to live on the frame.");
1378     for (const auto &P : AliasOffetMap)
1379       if (!P.second)
1380         report_fatal_error("Unable to handle an alias with unknown offset "
1381                            "created before CoroBegin.");
1382     return AliasOffetMap;
1383   }
1384 
1385 private:
1386   const DominatorTree &DT;
1387   const CoroBeginInst &CoroBegin;
1388   const SuspendCrossingInfo &Checker;
1389   // All alias to the original AllocaInst, created before CoroBegin and used
1390   // after CoroBegin. Each entry contains the instruction and the offset in the
1391   // original Alloca. They need to be recreated after CoroBegin off the frame.
1392   DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{};
1393   SmallPtrSet<Instruction *, 4> Users{};
1394   SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
1395   bool MayWriteBeforeCoroBegin{false};
1396   bool ShouldUseLifetimeStartInfo{true};
1397 
1398   mutable llvm::Optional<bool> ShouldLiveOnFrame{};
1399 
1400   bool computeShouldLiveOnFrame() const {
1401     // If lifetime information is available, we check it first since it's
1402     // more precise. We look at every pair of lifetime.start intrinsic and
1403     // every basic block that uses the pointer to see if they cross suspension
1404     // points. The uses cover both direct uses as well as indirect uses.
1405     if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
1406       for (auto *I : Users)
1407         for (auto *S : LifetimeStarts)
1408           if (Checker.isDefinitionAcrossSuspend(*S, I))
1409             return true;
1410       return false;
1411     }
1412     // FIXME: Ideally the isEscaped check should come at the beginning.
1413     // However there are a few loose ends that need to be fixed first before
1414     // we can do that. We need to make sure we are not over-conservative, so
1415     // that the data accessed in-between await_suspend and symmetric transfer
1416     // is always put on the stack, and also data accessed after coro.end is
1417     // always put on the stack (esp the return object). To fix that, we need
1418     // to:
1419     //  1) Potentially treat sret as nocapture in calls
1420     //  2) Special handle the return object and put it on the stack
1421     //  3) Utilize lifetime.end intrinsic
1422     if (PI.isEscaped())
1423       return true;
1424 
1425     for (auto *U1 : Users)
1426       for (auto *U2 : Users)
1427         if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1428           return true;
1429 
1430     return false;
1431   }
1432 
1433   void handleMayWrite(const Instruction &I) {
1434     if (!DT.dominates(&CoroBegin, &I))
1435       MayWriteBeforeCoroBegin = true;
1436   }
1437 
1438   bool usedAfterCoroBegin(Instruction &I) {
1439     for (auto &U : I.uses())
1440       if (DT.dominates(&CoroBegin, U))
1441         return true;
1442     return false;
1443   }
1444 
1445   void handleAlias(Instruction &I) {
1446     // We track all aliases created prior to CoroBegin but used after.
1447     // These aliases may need to be recreated after CoroBegin if the alloca
1448     // need to live on the frame.
1449     if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
1450       return;
1451 
1452     if (!IsOffsetKnown) {
1453       AliasOffetMap[&I].reset();
1454     } else {
1455       auto Itr = AliasOffetMap.find(&I);
1456       if (Itr == AliasOffetMap.end()) {
1457         AliasOffetMap[&I] = Offset;
1458       } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) {
1459         // If we have seen two different possible values for this alias, we set
1460         // it to empty.
1461         AliasOffetMap[&I].reset();
1462       }
1463     }
1464   }
1465 };
1466 } // namespace
1467 
1468 // We need to make room to insert a spill after initial PHIs, but before
1469 // catchswitch instruction. Placing it before violates the requirement that
1470 // catchswitch, like all other EHPads must be the first nonPHI in a block.
1471 //
1472 // Split away catchswitch into a separate block and insert in its place:
1473 //
1474 //   cleanuppad <InsertPt> cleanupret.
1475 //
1476 // cleanupret instruction will act as an insert point for the spill.
1477 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
1478   BasicBlock *CurrentBlock = CatchSwitch->getParent();
1479   BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
1480   CurrentBlock->getTerminator()->eraseFromParent();
1481 
1482   auto *CleanupPad =
1483       CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
1484   auto *CleanupRet =
1485       CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
1486   return CleanupRet;
1487 }
1488 
1489 static void createFramePtr(coro::Shape &Shape) {
1490   auto *CB = Shape.CoroBegin;
1491   IRBuilder<> Builder(CB->getNextNode());
1492   StructType *FrameTy = Shape.FrameTy;
1493   PointerType *FramePtrTy = FrameTy->getPointerTo();
1494   Shape.FramePtr =
1495       cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
1496 }
1497 
1498 // Replace all alloca and SSA values that are accessed across suspend points
1499 // with GetElementPointer from coroutine frame + loads and stores. Create an
1500 // AllocaSpillBB that will become the new entry block for the resume parts of
1501 // the coroutine:
1502 //
1503 //    %hdl = coro.begin(...)
1504 //    whatever
1505 //
1506 // becomes:
1507 //
1508 //    %hdl = coro.begin(...)
1509 //    %FramePtr = bitcast i8* hdl to %f.frame*
1510 //    br label %AllocaSpillBB
1511 //
1512 //  AllocaSpillBB:
1513 //    ; geps corresponding to allocas that were moved to coroutine frame
1514 //    br label PostSpill
1515 //
1516 //  PostSpill:
1517 //    whatever
1518 //
1519 //
1520 static Instruction *insertSpills(const FrameDataInfo &FrameData,
1521                                  coro::Shape &Shape) {
1522   auto *CB = Shape.CoroBegin;
1523   LLVMContext &C = CB->getContext();
1524   IRBuilder<> Builder(C);
1525   StructType *FrameTy = Shape.FrameTy;
1526   Instruction *FramePtr = Shape.FramePtr;
1527   DominatorTree DT(*CB->getFunction());
1528   SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
1529 
1530   // Create a GEP with the given index into the coroutine frame for the original
1531   // value Orig. Appends an extra 0 index for array-allocas, preserving the
1532   // original type.
1533   auto GetFramePointer = [&](Value *Orig) -> Value * {
1534     FieldIDType Index = FrameData.getFieldIndex(Orig);
1535     SmallVector<Value *, 3> Indices = {
1536         ConstantInt::get(Type::getInt32Ty(C), 0),
1537         ConstantInt::get(Type::getInt32Ty(C), Index),
1538     };
1539 
1540     if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1541       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1542         auto Count = CI->getValue().getZExtValue();
1543         if (Count > 1) {
1544           Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
1545         }
1546       } else {
1547         report_fatal_error("Coroutines cannot handle non static allocas yet");
1548       }
1549     }
1550 
1551     auto GEP = cast<GetElementPtrInst>(
1552         Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1553     if (isa<AllocaInst>(Orig)) {
1554       // If the type of GEP is not equal to the type of AllocaInst, it implies
1555       // that the AllocaInst may be reused in the Frame slot of other
1556       // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1557       // the Frame storage.
1558       //
1559       // Note: If we change the strategy dealing with alignment, we need to refine
1560       // this casting.
1561       if (GEP->getResultElementType() != Orig->getType())
1562         return Builder.CreateBitCast(GEP, Orig->getType(),
1563                                      Orig->getName() + Twine(".cast"));
1564     }
1565     return GEP;
1566   };
1567 
1568   for (auto const &E : FrameData.Spills) {
1569     Value *Def = E.first;
1570     auto SpillAlignment = Align(FrameData.getAlign(Def));
1571     // Create a store instruction storing the value into the
1572     // coroutine frame.
1573     Instruction *InsertPt = nullptr;
1574     Type *ByValTy = nullptr;
1575     if (auto *Arg = dyn_cast<Argument>(Def)) {
1576       // For arguments, we will place the store instruction right after
1577       // the coroutine frame pointer instruction, i.e. bitcast of
1578       // coro.begin from i8* to %f.frame*.
1579       InsertPt = FramePtr->getNextNode();
1580 
1581       // If we're spilling an Argument, make sure we clear 'nocapture'
1582       // from the coroutine function.
1583       Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1584 
1585       if (Arg->hasByValAttr())
1586         ByValTy = Arg->getParamByValType();
1587     } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1588       // Don't spill immediately after a suspend; splitting assumes
1589       // that the suspend will be followed by a branch.
1590       InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI();
1591     } else {
1592       auto *I = cast<Instruction>(Def);
1593       if (!DT.dominates(CB, I)) {
1594         // If it is not dominated by CoroBegin, then spill should be
1595         // inserted immediately after CoroFrame is computed.
1596         InsertPt = FramePtr->getNextNode();
1597       } else if (auto *II = dyn_cast<InvokeInst>(I)) {
1598         // If we are spilling the result of the invoke instruction, split
1599         // the normal edge and insert the spill in the new block.
1600         auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
1601         InsertPt = NewBB->getTerminator();
1602       } else if (isa<PHINode>(I)) {
1603         // Skip the PHINodes and EH pads instructions.
1604         BasicBlock *DefBlock = I->getParent();
1605         if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
1606           InsertPt = splitBeforeCatchSwitch(CSI);
1607         else
1608           InsertPt = &*DefBlock->getFirstInsertionPt();
1609       } else {
1610         assert(!I->isTerminator() && "unexpected terminator");
1611         // For all other values, the spill is placed immediately after
1612         // the definition.
1613         InsertPt = I->getNextNode();
1614       }
1615     }
1616 
1617     auto Index = FrameData.getFieldIndex(Def);
1618     Builder.SetInsertPoint(InsertPt);
1619     auto *G = Builder.CreateConstInBoundsGEP2_32(
1620         FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1621     if (ByValTy) {
1622       // For byval arguments, we need to store the pointed value in the frame,
1623       // instead of the pointer itself.
1624       auto *Value = Builder.CreateLoad(ByValTy, Def);
1625       Builder.CreateAlignedStore(Value, G, SpillAlignment);
1626     } else {
1627       Builder.CreateAlignedStore(Def, G, SpillAlignment);
1628     }
1629 
1630     BasicBlock *CurrentBlock = nullptr;
1631     Value *CurrentReload = nullptr;
1632     for (auto *U : E.second) {
1633       // If we have not seen the use block, create a load instruction to reload
1634       // the spilled value from the coroutine frame. Populates the Value pointer
1635       // reference provided with the frame GEP.
1636       if (CurrentBlock != U->getParent()) {
1637         CurrentBlock = U->getParent();
1638         Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt());
1639 
1640         auto *GEP = GetFramePointer(E.first);
1641         GEP->setName(E.first->getName() + Twine(".reload.addr"));
1642         if (ByValTy)
1643           CurrentReload = GEP;
1644         else
1645           CurrentReload = Builder.CreateAlignedLoad(
1646               FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1647               SpillAlignment, E.first->getName() + Twine(".reload"));
1648 
1649         TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def);
1650         for (DbgDeclareInst *DDI : DIs) {
1651           bool AllowUnresolved = false;
1652           // This dbg.declare is preserved for all coro-split function
1653           // fragments. It will be unreachable in the main function, and
1654           // processed by coro::salvageDebugInfo() by CoroCloner.
1655           DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
1656               .insertDeclare(CurrentReload, DDI->getVariable(),
1657                              DDI->getExpression(), DDI->getDebugLoc(),
1658                              &*Builder.GetInsertPoint());
1659           // This dbg.declare is for the main function entry point.  It
1660           // will be deleted in all coro-split functions.
1661           coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.OptimizeFrame);
1662         }
1663       }
1664 
1665       // If we have a single edge PHINode, remove it and replace it with a
1666       // reload from the coroutine frame. (We already took care of multi edge
1667       // PHINodes by rewriting them in the rewritePHIs function).
1668       if (auto *PN = dyn_cast<PHINode>(U)) {
1669         assert(PN->getNumIncomingValues() == 1 &&
1670                "unexpected number of incoming "
1671                "values in the PHINode");
1672         PN->replaceAllUsesWith(CurrentReload);
1673         PN->eraseFromParent();
1674         continue;
1675       }
1676 
1677       // Replace all uses of CurrentValue in the current instruction with
1678       // reload.
1679       U->replaceUsesOfWith(Def, CurrentReload);
1680     }
1681   }
1682 
1683   BasicBlock *FramePtrBB = FramePtr->getParent();
1684 
1685   auto SpillBlock =
1686       FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
1687   SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1688   Shape.AllocaSpillBlock = SpillBlock;
1689 
1690   // retcon and retcon.once lowering assumes all uses have been sunk.
1691   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1692       Shape.ABI == coro::ABI::Async) {
1693     // If we found any allocas, replace all of their remaining uses with Geps.
1694     Builder.SetInsertPoint(&SpillBlock->front());
1695     for (const auto &P : FrameData.Allocas) {
1696       AllocaInst *Alloca = P.Alloca;
1697       auto *G = GetFramePointer(Alloca);
1698 
1699       // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1700       // here, as we are changing location of the instruction.
1701       G->takeName(Alloca);
1702       Alloca->replaceAllUsesWith(G);
1703       Alloca->eraseFromParent();
1704     }
1705     return FramePtr;
1706   }
1707 
1708   // If we found any alloca, replace all of their remaining uses with GEP
1709   // instructions. To remain debugbility, we replace the uses of allocas for
1710   // dbg.declares and dbg.values with the reload from the frame.
1711   // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1712   // as some of the uses may not be dominated by CoroBegin.
1713   Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
1714   SmallVector<Instruction *, 4> UsersToUpdate;
1715   for (const auto &A : FrameData.Allocas) {
1716     AllocaInst *Alloca = A.Alloca;
1717     UsersToUpdate.clear();
1718     for (User *U : Alloca->users()) {
1719       auto *I = cast<Instruction>(U);
1720       if (DT.dominates(CB, I))
1721         UsersToUpdate.push_back(I);
1722     }
1723     if (UsersToUpdate.empty())
1724       continue;
1725     auto *G = GetFramePointer(Alloca);
1726     G->setName(Alloca->getName() + Twine(".reload.addr"));
1727 
1728     SmallVector<DbgVariableIntrinsic *, 4> DIs;
1729     findDbgUsers(DIs, Alloca);
1730     for (auto *DVI : DIs)
1731       DVI->replaceUsesOfWith(Alloca, G);
1732 
1733     for (Instruction *I : UsersToUpdate)
1734       I->replaceUsesOfWith(Alloca, G);
1735   }
1736   Builder.SetInsertPoint(FramePtr->getNextNode());
1737   for (const auto &A : FrameData.Allocas) {
1738     AllocaInst *Alloca = A.Alloca;
1739     if (A.MayWriteBeforeCoroBegin) {
1740       // isEscaped really means potentially modified before CoroBegin.
1741       if (Alloca->isArrayAllocation())
1742         report_fatal_error(
1743             "Coroutines cannot handle copying of array allocas yet");
1744 
1745       auto *G = GetFramePointer(Alloca);
1746       auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
1747       Builder.CreateStore(Value, G);
1748     }
1749     // For each alias to Alloca created before CoroBegin but used after
1750     // CoroBegin, we recreate them after CoroBegin by appplying the offset
1751     // to the pointer in the frame.
1752     for (const auto &Alias : A.Aliases) {
1753       auto *FramePtr = GetFramePointer(Alloca);
1754       auto *FramePtrRaw =
1755           Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C));
1756       auto *AliasPtr = Builder.CreateGEP(
1757           Type::getInt8Ty(C), FramePtrRaw,
1758           ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue()));
1759       auto *AliasPtrTyped =
1760           Builder.CreateBitCast(AliasPtr, Alias.first->getType());
1761       Alias.first->replaceUsesWithIf(
1762           AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); });
1763     }
1764   }
1765   return FramePtr;
1766 }
1767 
1768 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1769 // PHI in InsertedBB.
1770 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB,
1771                                          BasicBlock *InsertedBB,
1772                                          BasicBlock *PredBB,
1773                                          PHINode *UntilPHI = nullptr) {
1774   auto *PN = cast<PHINode>(&SuccBB->front());
1775   do {
1776     int Index = PN->getBasicBlockIndex(InsertedBB);
1777     Value *V = PN->getIncomingValue(Index);
1778     PHINode *InputV = PHINode::Create(
1779         V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(),
1780         &InsertedBB->front());
1781     InputV->addIncoming(V, PredBB);
1782     PN->setIncomingValue(Index, InputV);
1783     PN = dyn_cast<PHINode>(PN->getNextNode());
1784   } while (PN != UntilPHI);
1785 }
1786 
1787 // Rewrites the PHI Nodes in a cleanuppad.
1788 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1789                                      CleanupPadInst *CleanupPad) {
1790   // For every incoming edge to a CleanupPad we will create a new block holding
1791   // all incoming values in single-value PHI nodes. We will then create another
1792   // block to act as a dispather (as all unwind edges for related EH blocks
1793   // must be the same).
1794   //
1795   // cleanuppad:
1796   //    %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1797   //    %3 = cleanuppad within none []
1798   //
1799   // It will create:
1800   //
1801   // cleanuppad.corodispatch
1802   //    %2 = phi i8[0, %catchswitch], [1, %catch.1]
1803   //    %3 = cleanuppad within none []
1804   //    switch i8 % 2, label %unreachable
1805   //            [i8 0, label %cleanuppad.from.catchswitch
1806   //             i8 1, label %cleanuppad.from.catch.1]
1807   // cleanuppad.from.catchswitch:
1808   //    %4 = phi i32 [%0, %catchswitch]
1809   //    br %label cleanuppad
1810   // cleanuppad.from.catch.1:
1811   //    %6 = phi i32 [%1, %catch.1]
1812   //    br %label cleanuppad
1813   // cleanuppad:
1814   //    %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1815   //                 [%6, %cleanuppad.from.catch.1]
1816 
1817   // Unreachable BB, in case switching on an invalid value in the dispatcher.
1818   auto *UnreachBB = BasicBlock::Create(
1819       CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
1820   IRBuilder<> Builder(UnreachBB);
1821   Builder.CreateUnreachable();
1822 
1823   // Create a new cleanuppad which will be the dispatcher.
1824   auto *NewCleanupPadBB =
1825       BasicBlock::Create(CleanupPadBB->getContext(),
1826                          CleanupPadBB->getName() + Twine(".corodispatch"),
1827                          CleanupPadBB->getParent(), CleanupPadBB);
1828   Builder.SetInsertPoint(NewCleanupPadBB);
1829   auto *SwitchType = Builder.getInt8Ty();
1830   auto *SetDispatchValuePN =
1831       Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
1832   CleanupPad->removeFromParent();
1833   CleanupPad->insertAfter(SetDispatchValuePN);
1834   auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1835                                                 pred_size(CleanupPadBB));
1836 
1837   int SwitchIndex = 0;
1838   SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
1839   for (BasicBlock *Pred : Preds) {
1840     // Create a new cleanuppad and move the PHI values to there.
1841     auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
1842                                       CleanupPadBB->getName() +
1843                                           Twine(".from.") + Pred->getName(),
1844                                       CleanupPadBB->getParent(), CleanupPadBB);
1845     updatePhiNodes(CleanupPadBB, Pred, CaseBB);
1846     CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1847                     Pred->getName());
1848     Builder.SetInsertPoint(CaseBB);
1849     Builder.CreateBr(CleanupPadBB);
1850     movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
1851 
1852     // Update this Pred to the new unwind point.
1853     setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1854 
1855     // Setup the switch in the dispatcher.
1856     auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1857     SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1858     SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1859     SwitchIndex++;
1860   }
1861 }
1862 
1863 static void cleanupSinglePredPHIs(Function &F) {
1864   SmallVector<PHINode *, 32> Worklist;
1865   for (auto &BB : F) {
1866     for (auto &Phi : BB.phis()) {
1867       if (Phi.getNumIncomingValues() == 1) {
1868         Worklist.push_back(&Phi);
1869       } else
1870         break;
1871     }
1872   }
1873   while (!Worklist.empty()) {
1874     auto *Phi = Worklist.pop_back_val();
1875     auto *OriginalValue = Phi->getIncomingValue(0);
1876     Phi->replaceAllUsesWith(OriginalValue);
1877   }
1878 }
1879 
1880 static void rewritePHIs(BasicBlock &BB) {
1881   // For every incoming edge we will create a block holding all
1882   // incoming values in a single PHI nodes.
1883   //
1884   // loop:
1885   //    %n.val = phi i32[%n, %entry], [%inc, %loop]
1886   //
1887   // It will create:
1888   //
1889   // loop.from.entry:
1890   //    %n.loop.pre = phi i32 [%n, %entry]
1891   //    br %label loop
1892   // loop.from.loop:
1893   //    %inc.loop.pre = phi i32 [%inc, %loop]
1894   //    br %label loop
1895   //
1896   // After this rewrite, further analysis will ignore any phi nodes with more
1897   // than one incoming edge.
1898 
1899   // TODO: Simplify PHINodes in the basic block to remove duplicate
1900   // predecessors.
1901 
1902   // Special case for CleanupPad: all EH blocks must have the same unwind edge
1903   // so we need to create an additional "dispatcher" block.
1904   if (auto *CleanupPad =
1905           dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
1906     SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1907     for (BasicBlock *Pred : Preds) {
1908       if (CatchSwitchInst *CS =
1909               dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
1910         // CleanupPad with a CatchSwitch predecessor: therefore this is an
1911         // unwind destination that needs to be handle specially.
1912         assert(CS->getUnwindDest() == &BB);
1913         (void)CS;
1914         rewritePHIsForCleanupPad(&BB, CleanupPad);
1915         return;
1916       }
1917     }
1918   }
1919 
1920   LandingPadInst *LandingPad = nullptr;
1921   PHINode *ReplPHI = nullptr;
1922   if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
1923     // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1924     // We replace the original landing pad with a PHINode that will collect the
1925     // results from all of them.
1926     ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
1927     ReplPHI->takeName(LandingPad);
1928     LandingPad->replaceAllUsesWith(ReplPHI);
1929     // We will erase the original landing pad at the end of this function after
1930     // ehAwareSplitEdge cloned it in the transition blocks.
1931   }
1932 
1933   SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1934   for (BasicBlock *Pred : Preds) {
1935     auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
1936     IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1937 
1938     // Stop the moving of values at ReplPHI, as this is either null or the PHI
1939     // that replaced the landing pad.
1940     movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
1941   }
1942 
1943   if (LandingPad) {
1944     // Calls to ehAwareSplitEdge function cloned the original lading pad.
1945     // No longer need it.
1946     LandingPad->eraseFromParent();
1947   }
1948 }
1949 
1950 static void rewritePHIs(Function &F) {
1951   SmallVector<BasicBlock *, 8> WorkList;
1952 
1953   for (BasicBlock &BB : F)
1954     if (auto *PN = dyn_cast<PHINode>(&BB.front()))
1955       if (PN->getNumIncomingValues() > 1)
1956         WorkList.push_back(&BB);
1957 
1958   for (BasicBlock *BB : WorkList)
1959     rewritePHIs(*BB);
1960 }
1961 
1962 // Check for instructions that we can recreate on resume as opposed to spill
1963 // the result into a coroutine frame.
1964 static bool materializable(Instruction &V) {
1965   return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
1966          isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
1967 }
1968 
1969 // Check for structural coroutine intrinsics that should not be spilled into
1970 // the coroutine frame.
1971 static bool isCoroutineStructureIntrinsic(Instruction &I) {
1972   return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
1973          isa<CoroSuspendInst>(&I);
1974 }
1975 
1976 // For every use of the value that is across suspend point, recreate that value
1977 // after a suspend point.
1978 static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
1979                                               const SpillInfo &Spills) {
1980   for (const auto &E : Spills) {
1981     Value *Def = E.first;
1982     BasicBlock *CurrentBlock = nullptr;
1983     Instruction *CurrentMaterialization = nullptr;
1984     for (Instruction *U : E.second) {
1985       // If we have not seen this block, materialize the value.
1986       if (CurrentBlock != U->getParent()) {
1987 
1988         bool IsInCoroSuspendBlock = isa<AnyCoroSuspendInst>(U);
1989         CurrentBlock = U->getParent();
1990         auto *InsertBlock = IsInCoroSuspendBlock
1991                                 ? CurrentBlock->getSinglePredecessor()
1992                                 : CurrentBlock;
1993         CurrentMaterialization = cast<Instruction>(Def)->clone();
1994         CurrentMaterialization->setName(Def->getName());
1995         CurrentMaterialization->insertBefore(
1996             IsInCoroSuspendBlock ? InsertBlock->getTerminator()
1997                                  : &*InsertBlock->getFirstInsertionPt());
1998       }
1999       if (auto *PN = dyn_cast<PHINode>(U)) {
2000         assert(PN->getNumIncomingValues() == 1 &&
2001                "unexpected number of incoming "
2002                "values in the PHINode");
2003         PN->replaceAllUsesWith(CurrentMaterialization);
2004         PN->eraseFromParent();
2005         continue;
2006       }
2007       // Replace all uses of Def in the current instruction with the
2008       // CurrentMaterialization for the block.
2009       U->replaceUsesOfWith(Def, CurrentMaterialization);
2010     }
2011   }
2012 }
2013 
2014 // Splits the block at a particular instruction unless it is the first
2015 // instruction in the block with a single predecessor.
2016 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
2017   auto *BB = I->getParent();
2018   if (&BB->front() == I) {
2019     if (BB->getSinglePredecessor()) {
2020       BB->setName(Name);
2021       return BB;
2022     }
2023   }
2024   return BB->splitBasicBlock(I, Name);
2025 }
2026 
2027 // Split above and below a particular instruction so that it
2028 // will be all alone by itself in a block.
2029 static void splitAround(Instruction *I, const Twine &Name) {
2030   splitBlockIfNotFirst(I, Name);
2031   splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
2032 }
2033 
2034 static bool isSuspendBlock(BasicBlock *BB) {
2035   return isa<AnyCoroSuspendInst>(BB->front());
2036 }
2037 
2038 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet;
2039 
2040 /// Does control flow starting at the given block ever reach a suspend
2041 /// instruction before reaching a block in VisitedOrFreeBBs?
2042 static bool isSuspendReachableFrom(BasicBlock *From,
2043                                    VisitedBlocksSet &VisitedOrFreeBBs) {
2044   // Eagerly try to add this block to the visited set.  If it's already
2045   // there, stop recursing; this path doesn't reach a suspend before
2046   // either looping or reaching a freeing block.
2047   if (!VisitedOrFreeBBs.insert(From).second)
2048     return false;
2049 
2050   // We assume that we'll already have split suspends into their own blocks.
2051   if (isSuspendBlock(From))
2052     return true;
2053 
2054   // Recurse on the successors.
2055   for (auto Succ : successors(From)) {
2056     if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
2057       return true;
2058   }
2059 
2060   return false;
2061 }
2062 
2063 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a
2064 /// suspend point?
2065 static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
2066   // Seed the visited set with all the basic blocks containing a free
2067   // so that we won't pass them up.
2068   VisitedBlocksSet VisitedOrFreeBBs;
2069   for (auto User : AI->users()) {
2070     if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
2071       VisitedOrFreeBBs.insert(FI->getParent());
2072   }
2073 
2074   return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
2075 }
2076 
2077 /// After we split the coroutine, will the given basic block be along
2078 /// an obvious exit path for the resumption function?
2079 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
2080                                               unsigned depth = 3) {
2081   // If we've bottomed out our depth count, stop searching and assume
2082   // that the path might loop back.
2083   if (depth == 0) return false;
2084 
2085   // If this is a suspend block, we're about to exit the resumption function.
2086   if (isSuspendBlock(BB)) return true;
2087 
2088   // Recurse into the successors.
2089   for (auto Succ : successors(BB)) {
2090     if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
2091       return false;
2092   }
2093 
2094   // If none of the successors leads back in a loop, we're on an exit/abort.
2095   return true;
2096 }
2097 
2098 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
2099   // Look for a free that isn't sufficiently obviously followed by
2100   // either a suspend or a termination, i.e. something that will leave
2101   // the coro resumption frame.
2102   for (auto U : AI->users()) {
2103     auto FI = dyn_cast<CoroAllocaFreeInst>(U);
2104     if (!FI) continue;
2105 
2106     if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
2107       return true;
2108   }
2109 
2110   // If we never found one, we don't need a stack save.
2111   return false;
2112 }
2113 
2114 /// Turn each of the given local allocas into a normal (dynamic) alloca
2115 /// instruction.
2116 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
2117                               SmallVectorImpl<Instruction*> &DeadInsts) {
2118   for (auto AI : LocalAllocas) {
2119     auto M = AI->getModule();
2120     IRBuilder<> Builder(AI);
2121 
2122     // Save the stack depth.  Try to avoid doing this if the stackrestore
2123     // is going to immediately precede a return or something.
2124     Value *StackSave = nullptr;
2125     if (localAllocaNeedsStackSave(AI))
2126       StackSave = Builder.CreateCall(
2127                             Intrinsic::getDeclaration(M, Intrinsic::stacksave));
2128 
2129     // Allocate memory.
2130     auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
2131     Alloca->setAlignment(Align(AI->getAlignment()));
2132 
2133     for (auto U : AI->users()) {
2134       // Replace gets with the allocation.
2135       if (isa<CoroAllocaGetInst>(U)) {
2136         U->replaceAllUsesWith(Alloca);
2137 
2138       // Replace frees with stackrestores.  This is safe because
2139       // alloca.alloc is required to obey a stack discipline, although we
2140       // don't enforce that structurally.
2141       } else {
2142         auto FI = cast<CoroAllocaFreeInst>(U);
2143         if (StackSave) {
2144           Builder.SetInsertPoint(FI);
2145           Builder.CreateCall(
2146                     Intrinsic::getDeclaration(M, Intrinsic::stackrestore),
2147                              StackSave);
2148         }
2149       }
2150       DeadInsts.push_back(cast<Instruction>(U));
2151     }
2152 
2153     DeadInsts.push_back(AI);
2154   }
2155 }
2156 
2157 /// Turn the given coro.alloca.alloc call into a dynamic allocation.
2158 /// This happens during the all-instructions iteration, so it must not
2159 /// delete the call.
2160 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI,
2161                                         coro::Shape &Shape,
2162                                    SmallVectorImpl<Instruction*> &DeadInsts) {
2163   IRBuilder<> Builder(AI);
2164   auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
2165 
2166   for (User *U : AI->users()) {
2167     if (isa<CoroAllocaGetInst>(U)) {
2168       U->replaceAllUsesWith(Alloc);
2169     } else {
2170       auto FI = cast<CoroAllocaFreeInst>(U);
2171       Builder.SetInsertPoint(FI);
2172       Shape.emitDealloc(Builder, Alloc, nullptr);
2173     }
2174     DeadInsts.push_back(cast<Instruction>(U));
2175   }
2176 
2177   // Push this on last so that it gets deleted after all the others.
2178   DeadInsts.push_back(AI);
2179 
2180   // Return the new allocation value so that we can check for needed spills.
2181   return cast<Instruction>(Alloc);
2182 }
2183 
2184 /// Get the current swifterror value.
2185 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
2186                                      coro::Shape &Shape) {
2187   // Make a fake function pointer as a sort of intrinsic.
2188   auto FnTy = FunctionType::get(ValueTy, {}, false);
2189   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2190 
2191   auto Call = Builder.CreateCall(FnTy, Fn, {});
2192   Shape.SwiftErrorOps.push_back(Call);
2193 
2194   return Call;
2195 }
2196 
2197 /// Set the given value as the current swifterror value.
2198 ///
2199 /// Returns a slot that can be used as a swifterror slot.
2200 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
2201                                      coro::Shape &Shape) {
2202   // Make a fake function pointer as a sort of intrinsic.
2203   auto FnTy = FunctionType::get(V->getType()->getPointerTo(),
2204                                 {V->getType()}, false);
2205   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2206 
2207   auto Call = Builder.CreateCall(FnTy, Fn, { V });
2208   Shape.SwiftErrorOps.push_back(Call);
2209 
2210   return Call;
2211 }
2212 
2213 /// Set the swifterror value from the given alloca before a call,
2214 /// then put in back in the alloca afterwards.
2215 ///
2216 /// Returns an address that will stand in for the swifterror slot
2217 /// until splitting.
2218 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
2219                                                  AllocaInst *Alloca,
2220                                                  coro::Shape &Shape) {
2221   auto ValueTy = Alloca->getAllocatedType();
2222   IRBuilder<> Builder(Call);
2223 
2224   // Load the current value from the alloca and set it as the
2225   // swifterror value.
2226   auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
2227   auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
2228 
2229   // Move to after the call.  Since swifterror only has a guaranteed
2230   // value on normal exits, we can ignore implicit and explicit unwind
2231   // edges.
2232   if (isa<CallInst>(Call)) {
2233     Builder.SetInsertPoint(Call->getNextNode());
2234   } else {
2235     auto Invoke = cast<InvokeInst>(Call);
2236     Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
2237   }
2238 
2239   // Get the current swifterror value and store it to the alloca.
2240   auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
2241   Builder.CreateStore(ValueAfterCall, Alloca);
2242 
2243   return Addr;
2244 }
2245 
2246 /// Eliminate a formerly-swifterror alloca by inserting the get/set
2247 /// intrinsics and attempting to MemToReg the alloca away.
2248 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
2249                                       coro::Shape &Shape) {
2250   for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) {
2251     // swifterror values can only be used in very specific ways.
2252     // We take advantage of that here.
2253     auto User = Use.getUser();
2254     if (isa<LoadInst>(User) || isa<StoreInst>(User))
2255       continue;
2256 
2257     assert(isa<CallInst>(User) || isa<InvokeInst>(User));
2258     auto Call = cast<Instruction>(User);
2259 
2260     auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
2261 
2262     // Use the returned slot address as the call argument.
2263     Use.set(Addr);
2264   }
2265 
2266   // All the uses should be loads and stores now.
2267   assert(isAllocaPromotable(Alloca));
2268 }
2269 
2270 /// "Eliminate" a swifterror argument by reducing it to the alloca case
2271 /// and then loading and storing in the prologue and epilog.
2272 ///
2273 /// The argument keeps the swifterror flag.
2274 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
2275                                         coro::Shape &Shape,
2276                              SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
2277   IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
2278 
2279   auto ArgTy = cast<PointerType>(Arg.getType());
2280   auto ValueTy = ArgTy->getPointerElementType();
2281 
2282   // Reduce to the alloca case:
2283 
2284   // Create an alloca and replace all uses of the arg with it.
2285   auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
2286   Arg.replaceAllUsesWith(Alloca);
2287 
2288   // Set an initial value in the alloca.  swifterror is always null on entry.
2289   auto InitialValue = Constant::getNullValue(ValueTy);
2290   Builder.CreateStore(InitialValue, Alloca);
2291 
2292   // Find all the suspends in the function and save and restore around them.
2293   for (auto Suspend : Shape.CoroSuspends) {
2294     (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
2295   }
2296 
2297   // Find all the coro.ends in the function and restore the error value.
2298   for (auto End : Shape.CoroEnds) {
2299     Builder.SetInsertPoint(End);
2300     auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
2301     (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
2302   }
2303 
2304   // Now we can use the alloca logic.
2305   AllocasToPromote.push_back(Alloca);
2306   eliminateSwiftErrorAlloca(F, Alloca, Shape);
2307 }
2308 
2309 /// Eliminate all problematic uses of swifterror arguments and allocas
2310 /// from the function.  We'll fix them up later when splitting the function.
2311 static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
2312   SmallVector<AllocaInst*, 4> AllocasToPromote;
2313 
2314   // Look for a swifterror argument.
2315   for (auto &Arg : F.args()) {
2316     if (!Arg.hasSwiftErrorAttr()) continue;
2317 
2318     eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
2319     break;
2320   }
2321 
2322   // Look for swifterror allocas.
2323   for (auto &Inst : F.getEntryBlock()) {
2324     auto Alloca = dyn_cast<AllocaInst>(&Inst);
2325     if (!Alloca || !Alloca->isSwiftError()) continue;
2326 
2327     // Clear the swifterror flag.
2328     Alloca->setSwiftError(false);
2329 
2330     AllocasToPromote.push_back(Alloca);
2331     eliminateSwiftErrorAlloca(F, Alloca, Shape);
2332   }
2333 
2334   // If we have any allocas to promote, compute a dominator tree and
2335   // promote them en masse.
2336   if (!AllocasToPromote.empty()) {
2337     DominatorTree DT(F);
2338     PromoteMemToReg(AllocasToPromote, DT);
2339   }
2340 }
2341 
2342 /// retcon and retcon.once conventions assume that all spill uses can be sunk
2343 /// after the coro.begin intrinsic.
2344 static void sinkSpillUsesAfterCoroBegin(Function &F,
2345                                         const FrameDataInfo &FrameData,
2346                                         CoroBeginInst *CoroBegin) {
2347   DominatorTree Dom(F);
2348 
2349   SmallSetVector<Instruction *, 32> ToMove;
2350   SmallVector<Instruction *, 32> Worklist;
2351 
2352   // Collect all users that precede coro.begin.
2353   for (auto *Def : FrameData.getAllDefs()) {
2354     for (User *U : Def->users()) {
2355       auto Inst = cast<Instruction>(U);
2356       if (Inst->getParent() != CoroBegin->getParent() ||
2357           Dom.dominates(CoroBegin, Inst))
2358         continue;
2359       if (ToMove.insert(Inst))
2360         Worklist.push_back(Inst);
2361     }
2362   }
2363   // Recursively collect users before coro.begin.
2364   while (!Worklist.empty()) {
2365     auto *Def = Worklist.pop_back_val();
2366     for (User *U : Def->users()) {
2367       auto Inst = cast<Instruction>(U);
2368       if (Dom.dominates(CoroBegin, Inst))
2369         continue;
2370       if (ToMove.insert(Inst))
2371         Worklist.push_back(Inst);
2372     }
2373   }
2374 
2375   // Sort by dominance.
2376   SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
2377   llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
2378     // If a dominates b it should preceed (<) b.
2379     return Dom.dominates(A, B);
2380   });
2381 
2382   Instruction *InsertPt = CoroBegin->getNextNode();
2383   for (Instruction *Inst : InsertionList)
2384     Inst->moveBefore(InsertPt);
2385 }
2386 
2387 /// For each local variable that all of its user are only used inside one of
2388 /// suspended region, we sink their lifetime.start markers to the place where
2389 /// after the suspend block. Doing so minimizes the lifetime of each variable,
2390 /// hence minimizing the amount of data we end up putting on the frame.
2391 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
2392                                      SuspendCrossingInfo &Checker) {
2393   DominatorTree DT(F);
2394 
2395   // Collect all possible basic blocks which may dominate all uses of allocas.
2396   SmallPtrSet<BasicBlock *, 4> DomSet;
2397   DomSet.insert(&F.getEntryBlock());
2398   for (auto *CSI : Shape.CoroSuspends) {
2399     BasicBlock *SuspendBlock = CSI->getParent();
2400     assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
2401            "should have split coro.suspend into its own block");
2402     DomSet.insert(SuspendBlock->getSingleSuccessor());
2403   }
2404 
2405   for (Instruction &I : instructions(F)) {
2406     AllocaInst* AI = dyn_cast<AllocaInst>(&I);
2407     if (!AI)
2408       continue;
2409 
2410     for (BasicBlock *DomBB : DomSet) {
2411       bool Valid = true;
2412       SmallVector<Instruction *, 1> Lifetimes;
2413 
2414       auto isLifetimeStart = [](Instruction* I) {
2415         if (auto* II = dyn_cast<IntrinsicInst>(I))
2416           return II->getIntrinsicID() == Intrinsic::lifetime_start;
2417         return false;
2418       };
2419 
2420       auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
2421         if (isLifetimeStart(U)) {
2422           Lifetimes.push_back(U);
2423           return true;
2424         }
2425         if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2426           return false;
2427         if (isLifetimeStart(U->user_back())) {
2428           Lifetimes.push_back(U->user_back());
2429           return true;
2430         }
2431         return false;
2432       };
2433 
2434       for (User *U : AI->users()) {
2435         Instruction *UI = cast<Instruction>(U);
2436         // For all users except lifetime.start markers, if they are all
2437         // dominated by one of the basic blocks and do not cross
2438         // suspend points as well, then there is no need to spill the
2439         // instruction.
2440         if (!DT.dominates(DomBB, UI->getParent()) ||
2441             Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2442           // Skip lifetime.start, GEP and bitcast used by lifetime.start
2443           // markers.
2444           if (collectLifetimeStart(UI, AI))
2445             continue;
2446           Valid = false;
2447           break;
2448         }
2449       }
2450       // Sink lifetime.start markers to dominate block when they are
2451       // only used outside the region.
2452       if (Valid && Lifetimes.size() != 0) {
2453         // May be AI itself, when the type of AI is i8*
2454         auto *NewBitCast = [&](AllocaInst *AI) -> Value* {
2455           if (isa<AllocaInst>(Lifetimes[0]->getOperand(1)))
2456             return AI;
2457           auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext());
2458           return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "",
2459                                   DomBB->getTerminator());
2460         }(AI);
2461 
2462         auto *NewLifetime = Lifetimes[0]->clone();
2463         NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast);
2464         NewLifetime->insertBefore(DomBB->getTerminator());
2465 
2466         // All the outsided lifetime.start markers are no longer necessary.
2467         for (Instruction *S : Lifetimes)
2468           S->eraseFromParent();
2469 
2470         break;
2471       }
2472     }
2473   }
2474 }
2475 
2476 static void collectFrameAllocas(Function &F, coro::Shape &Shape,
2477                                 const SuspendCrossingInfo &Checker,
2478                                 SmallVectorImpl<AllocaInfo> &Allocas) {
2479   for (Instruction &I : instructions(F)) {
2480     auto *AI = dyn_cast<AllocaInst>(&I);
2481     if (!AI)
2482       continue;
2483     // The PromiseAlloca will be specially handled since it needs to be in a
2484     // fixed position in the frame.
2485     if (AI == Shape.SwitchLowering.PromiseAlloca) {
2486       continue;
2487     }
2488     DominatorTree DT(F);
2489     // The code that uses lifetime.start intrinsic does not work for functions
2490     // with loops without exit. Disable it on ABIs we know to generate such
2491     // code.
2492     bool ShouldUseLifetimeStartInfo =
2493         (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2494          Shape.ABI != coro::ABI::RetconOnce);
2495     AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT,
2496                              *Shape.CoroBegin, Checker,
2497                              ShouldUseLifetimeStartInfo};
2498     Visitor.visitPtr(*AI);
2499     if (!Visitor.getShouldLiveOnFrame())
2500       continue;
2501     Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
2502                          Visitor.getMayWriteBeforeCoroBegin());
2503   }
2504 }
2505 
2506 void coro::salvageDebugInfo(
2507     SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
2508     DbgVariableIntrinsic *DVI, bool OptimizeFrame) {
2509   Function *F = DVI->getFunction();
2510   IRBuilder<> Builder(F->getContext());
2511   auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
2512   while (isa<IntrinsicInst>(InsertPt))
2513     ++InsertPt;
2514   Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
2515   DIExpression *Expr = DVI->getExpression();
2516   // Follow the pointer arithmetic all the way to the incoming
2517   // function argument and convert into a DIExpression.
2518   bool SkipOutermostLoad = !isa<DbgValueInst>(DVI);
2519   Value *Storage = DVI->getVariableLocationOp(0);
2520   Value *OriginalStorage = Storage;
2521   while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
2522     if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
2523       Storage = LdInst->getOperand(0);
2524       // FIXME: This is a heuristic that works around the fact that
2525       // LLVM IR debug intrinsics cannot yet distinguish between
2526       // memory and value locations: Because a dbg.declare(alloca) is
2527       // implicitly a memory location no DW_OP_deref operation for the
2528       // last direct load from an alloca is necessary.  This condition
2529       // effectively drops the *last* DW_OP_deref in the expression.
2530       if (!SkipOutermostLoad)
2531         Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2532     } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) {
2533       Storage = StInst->getOperand(0);
2534     } else {
2535       SmallVector<uint64_t, 16> Ops;
2536       SmallVector<Value *, 0> AdditionalValues;
2537       Value *Op = llvm::salvageDebugInfoImpl(
2538           *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops,
2539           AdditionalValues);
2540       if (!Op || !AdditionalValues.empty()) {
2541         // If salvaging failed or salvaging produced more than one location
2542         // operand, give up.
2543         break;
2544       }
2545       Storage = Op;
2546       Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false);
2547     }
2548     SkipOutermostLoad = false;
2549   }
2550   if (!Storage)
2551     return;
2552 
2553   // Store a pointer to the coroutine frame object in an alloca so it
2554   // is available throughout the function when producing unoptimized
2555   // code. Extending the lifetime this way is correct because the
2556   // variable has been declared by a dbg.declare intrinsic.
2557   //
2558   // Avoid to create the alloca would be eliminated by optimization
2559   // passes and the corresponding dbg.declares would be invalid.
2560   if (!OptimizeFrame && !EnableReuseStorageInFrame)
2561     if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) {
2562       auto &Cached = DbgPtrAllocaCache[Storage];
2563       if (!Cached) {
2564         Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
2565                                       Arg->getName() + ".debug");
2566         Builder.CreateStore(Storage, Cached);
2567       }
2568       Storage = Cached;
2569       // FIXME: LLVM lacks nuanced semantics to differentiate between
2570       // memory and direct locations at the IR level. The backend will
2571       // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
2572       // location. Thus, if there are deref and offset operations in the
2573       // expression, we need to add a DW_OP_deref at the *start* of the
2574       // expression to first load the contents of the alloca before
2575       // adjusting it with the expression.
2576       if (Expr && Expr->isComplex())
2577         Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2578     }
2579 
2580   DVI->replaceVariableLocationOp(OriginalStorage, Storage);
2581   DVI->setExpression(Expr);
2582   /// It makes no sense to move the dbg.value intrinsic.
2583   if (!isa<DbgValueInst>(DVI)) {
2584     if (auto *II = dyn_cast<InvokeInst>(Storage))
2585       DVI->moveBefore(II->getNormalDest()->getFirstNonPHI());
2586     else if (auto *CBI = dyn_cast<CallBrInst>(Storage))
2587       DVI->moveBefore(CBI->getDefaultDest()->getFirstNonPHI());
2588     else if (auto *InsertPt = dyn_cast<Instruction>(Storage)) {
2589       assert(!InsertPt->isTerminator() &&
2590              "Unimaged terminator that could return a storage.");
2591       DVI->moveAfter(InsertPt);
2592     } else if (isa<Argument>(Storage))
2593       DVI->moveAfter(F->getEntryBlock().getFirstNonPHI());
2594   }
2595 }
2596 
2597 void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
2598   // Don't eliminate swifterror in async functions that won't be split.
2599   if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2600     eliminateSwiftError(F, Shape);
2601 
2602   if (Shape.ABI == coro::ABI::Switch &&
2603       Shape.SwitchLowering.PromiseAlloca) {
2604     Shape.getSwitchCoroId()->clearPromise();
2605   }
2606 
2607   // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2608   // intrinsics are in their own blocks to simplify the logic of building up
2609   // SuspendCrossing data.
2610   for (auto *CSI : Shape.CoroSuspends) {
2611     if (auto *Save = CSI->getCoroSave())
2612       splitAround(Save, "CoroSave");
2613     splitAround(CSI, "CoroSuspend");
2614   }
2615 
2616   // Put CoroEnds into their own blocks.
2617   for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2618     splitAround(CE, "CoroEnd");
2619 
2620     // Emit the musttail call function in a new block before the CoroEnd.
2621     // We do this here so that the right suspend crossing info is computed for
2622     // the uses of the musttail call function call. (Arguments to the coro.end
2623     // instructions would be ignored)
2624     if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
2625       auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2626       if (!MustTailCallFn)
2627         continue;
2628       IRBuilder<> Builder(AsyncEnd);
2629       SmallVector<Value *, 8> Args(AsyncEnd->args());
2630       auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
2631       auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
2632                                       Arguments, Builder);
2633       splitAround(Call, "MustTailCall.Before.CoroEnd");
2634     }
2635   }
2636 
2637   // Later code makes structural assumptions about single predecessors phis e.g
2638   // that they are not live accross a suspend point.
2639   cleanupSinglePredPHIs(F);
2640 
2641   // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2642   // never has its definition separated from the PHI by the suspend point.
2643   rewritePHIs(F);
2644 
2645   // Build suspend crossing info.
2646   SuspendCrossingInfo Checker(F, Shape);
2647 
2648   IRBuilder<> Builder(F.getContext());
2649   FrameDataInfo FrameData;
2650   SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas;
2651   SmallVector<Instruction*, 4> DeadInstructions;
2652 
2653   {
2654     SpillInfo Spills;
2655     for (int Repeat = 0; Repeat < 4; ++Repeat) {
2656       // See if there are materializable instructions across suspend points.
2657       for (Instruction &I : instructions(F))
2658         if (materializable(I)) {
2659           for (User *U : I.users())
2660             if (Checker.isDefinitionAcrossSuspend(I, U))
2661               Spills[&I].push_back(cast<Instruction>(U));
2662 
2663           // Manually add dbg.value metadata uses of I.
2664           SmallVector<DbgValueInst *, 16> DVIs;
2665           findDbgValues(DVIs, &I);
2666           for (auto *DVI : DVIs)
2667             if (Checker.isDefinitionAcrossSuspend(I, DVI))
2668               Spills[&I].push_back(DVI);
2669         }
2670 
2671       if (Spills.empty())
2672         break;
2673 
2674       // Rewrite materializable instructions to be materialized at the use
2675       // point.
2676       LLVM_DEBUG(dumpSpills("Materializations", Spills));
2677       rewriteMaterializableInstructions(Builder, Spills);
2678       Spills.clear();
2679     }
2680   }
2681 
2682   if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2683       Shape.ABI != coro::ABI::RetconOnce)
2684     sinkLifetimeStartMarkers(F, Shape, Checker);
2685 
2686   if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2687     collectFrameAllocas(F, Shape, Checker, FrameData.Allocas);
2688   LLVM_DEBUG(dumpAllocas(FrameData.Allocas));
2689 
2690   // Collect the spills for arguments and other not-materializable values.
2691   for (Argument &A : F.args())
2692     for (User *U : A.users())
2693       if (Checker.isDefinitionAcrossSuspend(A, U))
2694         FrameData.Spills[&A].push_back(cast<Instruction>(U));
2695 
2696   for (Instruction &I : instructions(F)) {
2697     // Values returned from coroutine structure intrinsics should not be part
2698     // of the Coroutine Frame.
2699     if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
2700       continue;
2701 
2702     // The Coroutine Promise always included into coroutine frame, no need to
2703     // check for suspend crossing.
2704     if (Shape.ABI == coro::ABI::Switch &&
2705         Shape.SwitchLowering.PromiseAlloca == &I)
2706       continue;
2707 
2708     // Handle alloca.alloc specially here.
2709     if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
2710       // Check whether the alloca's lifetime is bounded by suspend points.
2711       if (isLocalAlloca(AI)) {
2712         LocalAllocas.push_back(AI);
2713         continue;
2714       }
2715 
2716       // If not, do a quick rewrite of the alloca and then add spills of
2717       // the rewritten value.  The rewrite doesn't invalidate anything in
2718       // Spills because the other alloca intrinsics have no other operands
2719       // besides AI, and it doesn't invalidate the iteration because we delay
2720       // erasing AI.
2721       auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
2722 
2723       for (User *U : Alloc->users()) {
2724         if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
2725           FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
2726       }
2727       continue;
2728     }
2729 
2730     // Ignore alloca.get; we process this as part of coro.alloca.alloc.
2731     if (isa<CoroAllocaGetInst>(I))
2732       continue;
2733 
2734     if (isa<AllocaInst>(I))
2735       continue;
2736 
2737     for (User *U : I.users())
2738       if (Checker.isDefinitionAcrossSuspend(I, U)) {
2739         // We cannot spill a token.
2740         if (I.getType()->isTokenTy())
2741           report_fatal_error(
2742               "token definition is separated from the use by a suspend point");
2743         FrameData.Spills[&I].push_back(cast<Instruction>(U));
2744       }
2745   }
2746 
2747   // We don't want the layout of coroutine frame to be affected
2748   // by debug information. So we only choose to salvage DbgValueInst for
2749   // whose value is already in the frame.
2750   // We would handle the dbg.values for allocas specially
2751   for (auto &Iter : FrameData.Spills) {
2752     auto *V = Iter.first;
2753     SmallVector<DbgValueInst *, 16> DVIs;
2754     findDbgValues(DVIs, V);
2755     llvm::for_each(DVIs, [&](DbgValueInst *DVI) {
2756       if (Checker.isDefinitionAcrossSuspend(*V, DVI))
2757         FrameData.Spills[V].push_back(DVI);
2758     });
2759   }
2760 
2761   LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
2762   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2763       Shape.ABI == coro::ABI::Async)
2764     sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin);
2765   Shape.FrameTy = buildFrameType(F, Shape, FrameData);
2766   createFramePtr(Shape);
2767   // For now, this works for C++ programs only.
2768   buildFrameDebugInfo(F, Shape, FrameData);
2769   insertSpills(FrameData, Shape);
2770   lowerLocalAllocas(LocalAllocas, DeadInstructions);
2771 
2772   for (auto I : DeadInstructions)
2773     I->eraseFromParent();
2774 }
2775