1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This file contains classes used to discover if for a particular value
9 // there from sue to definition that crosses a suspend block.
10 //
11 // Using the information discovered we form a Coroutine Frame structure to
12 // contain those values. All uses of those values are replaced with appropriate
13 // GEP + load from the coroutine frame. At the point of the definition we spill
14 // the value into the coroutine frame.
15 //===----------------------------------------------------------------------===//
16 
17 #include "CoroInternal.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/ScopeExit.h"
20 #include "llvm/ADT/SmallString.h"
21 #include "llvm/Analysis/PtrUseVisitor.h"
22 #include "llvm/Analysis/StackLifetime.h"
23 #include "llvm/Config/llvm-config.h"
24 #include "llvm/IR/CFG.h"
25 #include "llvm/IR/DIBuilder.h"
26 #include "llvm/IR/Dominators.h"
27 #include "llvm/IR/IRBuilder.h"
28 #include "llvm/IR/InstIterator.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/MathExtras.h"
32 #include "llvm/Support/OptimizedStructLayout.h"
33 #include "llvm/Support/circular_raw_ostream.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
36 #include "llvm/Transforms/Utils/Local.h"
37 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
38 #include <algorithm>
39 
40 using namespace llvm;
41 
42 // The "coro-suspend-crossing" flag is very noisy. There is another debug type,
43 // "coro-frame", which results in leaner debug spew.
44 #define DEBUG_TYPE "coro-suspend-crossing"
45 
46 static cl::opt<bool> EnableReuseStorageInFrame(
47     "reuse-storage-in-coroutine-frame", cl::Hidden,
48     cl::desc(
49         "Enable the optimization which would reuse the storage in the coroutine \
50          frame for allocas whose liferanges are not overlapped, for testing purposes"),
51     llvm::cl::init(false));
52 
53 enum { SmallVectorThreshold = 32 };
54 
55 // Provides two way mapping between the blocks and numbers.
56 namespace {
57 class BlockToIndexMapping {
58   SmallVector<BasicBlock *, SmallVectorThreshold> V;
59 
60 public:
61   size_t size() const { return V.size(); }
62 
63   BlockToIndexMapping(Function &F) {
64     for (BasicBlock &BB : F)
65       V.push_back(&BB);
66     llvm::sort(V);
67   }
68 
69   size_t blockToIndex(BasicBlock *BB) const {
70     auto *I = llvm::lower_bound(V, BB);
71     assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
72     return I - V.begin();
73   }
74 
75   BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
76 };
77 } // end anonymous namespace
78 
79 // The SuspendCrossingInfo maintains data that allows to answer a question
80 // whether given two BasicBlocks A and B there is a path from A to B that
81 // passes through a suspend point.
82 //
83 // For every basic block 'i' it maintains a BlockData that consists of:
84 //   Consumes:  a bit vector which contains a set of indices of blocks that can
85 //              reach block 'i'
86 //   Kills: a bit vector which contains a set of indices of blocks that can
87 //          reach block 'i', but one of the path will cross a suspend point
88 //   Suspend: a boolean indicating whether block 'i' contains a suspend point.
89 //   End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
90 //
91 namespace {
92 struct SuspendCrossingInfo {
93   BlockToIndexMapping Mapping;
94 
95   struct BlockData {
96     BitVector Consumes;
97     BitVector Kills;
98     bool Suspend = false;
99     bool End = false;
100   };
101   SmallVector<BlockData, SmallVectorThreshold> Block;
102 
103   iterator_range<succ_iterator> successors(BlockData const &BD) const {
104     BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
105     return llvm::successors(BB);
106   }
107 
108   BlockData &getBlockData(BasicBlock *BB) {
109     return Block[Mapping.blockToIndex(BB)];
110   }
111 
112   void dump() const;
113   void dump(StringRef Label, BitVector const &BV) const;
114 
115   SuspendCrossingInfo(Function &F, coro::Shape &Shape);
116 
117   bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
118     size_t const DefIndex = Mapping.blockToIndex(DefBB);
119     size_t const UseIndex = Mapping.blockToIndex(UseBB);
120 
121     bool const Result = Block[UseIndex].Kills[DefIndex];
122     LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
123                       << " answer is " << Result << "\n");
124     return Result;
125   }
126 
127   bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
128     auto *I = cast<Instruction>(U);
129 
130     // We rewrote PHINodes, so that only the ones with exactly one incoming
131     // value need to be analyzed.
132     if (auto *PN = dyn_cast<PHINode>(I))
133       if (PN->getNumIncomingValues() > 1)
134         return false;
135 
136     BasicBlock *UseBB = I->getParent();
137 
138     // As a special case, treat uses by an llvm.coro.suspend.retcon or an
139     // llvm.coro.suspend.async as if they were uses in the suspend's single
140     // predecessor: the uses conceptually occur before the suspend.
141     if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
142       UseBB = UseBB->getSinglePredecessor();
143       assert(UseBB && "should have split coro.suspend into its own block");
144     }
145 
146     return hasPathCrossingSuspendPoint(DefBB, UseBB);
147   }
148 
149   bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
150     return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
151   }
152 
153   bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
154     auto *DefBB = I.getParent();
155 
156     // As a special case, treat values produced by an llvm.coro.suspend.*
157     // as if they were defined in the single successor: the uses
158     // conceptually occur after the suspend.
159     if (isa<AnyCoroSuspendInst>(I)) {
160       DefBB = DefBB->getSingleSuccessor();
161       assert(DefBB && "should have split coro.suspend into its own block");
162     }
163 
164     return isDefinitionAcrossSuspend(DefBB, U);
165   }
166 
167   bool isDefinitionAcrossSuspend(Value &V, User *U) const {
168     if (auto *Arg = dyn_cast<Argument>(&V))
169       return isDefinitionAcrossSuspend(*Arg, U);
170     if (auto *Inst = dyn_cast<Instruction>(&V))
171       return isDefinitionAcrossSuspend(*Inst, U);
172 
173     llvm_unreachable(
174         "Coroutine could only collect Argument and Instruction now.");
175   }
176 };
177 } // end anonymous namespace
178 
179 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
180 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
181                                                 BitVector const &BV) const {
182   dbgs() << Label << ":";
183   for (size_t I = 0, N = BV.size(); I < N; ++I)
184     if (BV[I])
185       dbgs() << " " << Mapping.indexToBlock(I)->getName();
186   dbgs() << "\n";
187 }
188 
189 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
190   for (size_t I = 0, N = Block.size(); I < N; ++I) {
191     BasicBlock *const B = Mapping.indexToBlock(I);
192     dbgs() << B->getName() << ":\n";
193     dump("   Consumes", Block[I].Consumes);
194     dump("      Kills", Block[I].Kills);
195   }
196   dbgs() << "\n";
197 }
198 #endif
199 
200 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
201     : Mapping(F) {
202   const size_t N = Mapping.size();
203   Block.resize(N);
204 
205   // Initialize every block so that it consumes itself
206   for (size_t I = 0; I < N; ++I) {
207     auto &B = Block[I];
208     B.Consumes.resize(N);
209     B.Kills.resize(N);
210     B.Consumes.set(I);
211   }
212 
213   // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
214   // the code beyond coro.end is reachable during initial invocation of the
215   // coroutine.
216   for (auto *CE : Shape.CoroEnds)
217     getBlockData(CE->getParent()).End = true;
218 
219   // Mark all suspend blocks and indicate that they kill everything they
220   // consume. Note, that crossing coro.save also requires a spill, as any code
221   // between coro.save and coro.suspend may resume the coroutine and all of the
222   // state needs to be saved by that time.
223   auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
224     BasicBlock *SuspendBlock = BarrierInst->getParent();
225     auto &B = getBlockData(SuspendBlock);
226     B.Suspend = true;
227     B.Kills |= B.Consumes;
228   };
229   for (auto *CSI : Shape.CoroSuspends) {
230     markSuspendBlock(CSI);
231     if (auto *Save = CSI->getCoroSave())
232       markSuspendBlock(Save);
233   }
234 
235   // Iterate propagating consumes and kills until they stop changing.
236   int Iteration = 0;
237   (void)Iteration;
238 
239   bool Changed;
240   do {
241     LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
242     LLVM_DEBUG(dbgs() << "==============\n");
243 
244     Changed = false;
245     for (size_t I = 0; I < N; ++I) {
246       auto &B = Block[I];
247       for (BasicBlock *SI : successors(B)) {
248 
249         auto SuccNo = Mapping.blockToIndex(SI);
250 
251         // Saved Consumes and Kills bitsets so that it is easy to see
252         // if anything changed after propagation.
253         auto &S = Block[SuccNo];
254         auto SavedConsumes = S.Consumes;
255         auto SavedKills = S.Kills;
256 
257         // Propagate Kills and Consumes from block B into its successor S.
258         S.Consumes |= B.Consumes;
259         S.Kills |= B.Kills;
260 
261         // If block B is a suspend block, it should propagate kills into the
262         // its successor for every block B consumes.
263         if (B.Suspend) {
264           S.Kills |= B.Consumes;
265         }
266         if (S.Suspend) {
267           // If block S is a suspend block, it should kill all of the blocks it
268           // consumes.
269           S.Kills |= S.Consumes;
270         } else if (S.End) {
271           // If block S is an end block, it should not propagate kills as the
272           // blocks following coro.end() are reached during initial invocation
273           // of the coroutine while all the data are still available on the
274           // stack or in the registers.
275           S.Kills.reset();
276         } else {
277           // This is reached when S block it not Suspend nor coro.end and it
278           // need to make sure that it is not in the kill set.
279           S.Kills.reset(SuccNo);
280         }
281 
282         // See if anything changed.
283         Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
284 
285         if (S.Kills != SavedKills) {
286           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
287                             << "\n");
288           LLVM_DEBUG(dump("S.Kills", S.Kills));
289           LLVM_DEBUG(dump("SavedKills", SavedKills));
290         }
291         if (S.Consumes != SavedConsumes) {
292           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
293           LLVM_DEBUG(dump("S.Consume", S.Consumes));
294           LLVM_DEBUG(dump("SavedCons", SavedConsumes));
295         }
296       }
297     }
298   } while (Changed);
299   LLVM_DEBUG(dump());
300 }
301 
302 #undef DEBUG_TYPE // "coro-suspend-crossing"
303 #define DEBUG_TYPE "coro-frame"
304 
305 namespace {
306 class FrameTypeBuilder;
307 // Mapping from the to-be-spilled value to all the users that need reload.
308 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>;
309 struct AllocaInfo {
310   AllocaInst *Alloca;
311   DenseMap<Instruction *, llvm::Optional<APInt>> Aliases;
312   bool MayWriteBeforeCoroBegin;
313   AllocaInfo(AllocaInst *Alloca,
314              DenseMap<Instruction *, llvm::Optional<APInt>> Aliases,
315              bool MayWriteBeforeCoroBegin)
316       : Alloca(Alloca), Aliases(std::move(Aliases)),
317         MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
318 };
319 struct FrameDataInfo {
320   // All the values (that are not allocas) that needs to be spilled to the
321   // frame.
322   SpillInfo Spills;
323   // Allocas contains all values defined as allocas that need to live in the
324   // frame.
325   SmallVector<AllocaInfo, 8> Allocas;
326 
327   SmallVector<Value *, 8> getAllDefs() const {
328     SmallVector<Value *, 8> Defs;
329     for (const auto &P : Spills)
330       Defs.push_back(P.first);
331     for (const auto &A : Allocas)
332       Defs.push_back(A.Alloca);
333     return Defs;
334   }
335 
336   uint32_t getFieldIndex(Value *V) const {
337     auto Itr = FieldIndexMap.find(V);
338     assert(Itr != FieldIndexMap.end() &&
339            "Value does not have a frame field index");
340     return Itr->second;
341   }
342 
343   void setFieldIndex(Value *V, uint32_t Index) {
344     assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
345            "Cannot set the index for the same field twice.");
346     FieldIndexMap[V] = Index;
347   }
348 
349   uint64_t getAlign(Value *V) const {
350     auto Iter = FieldAlignMap.find(V);
351     assert(Iter != FieldAlignMap.end());
352     return Iter->second;
353   }
354 
355   void setAlign(Value *V, uint64_t Align) {
356     assert(FieldAlignMap.count(V) == 0);
357     FieldAlignMap.insert({V, Align});
358   }
359 
360   uint64_t getOffset(Value *V) const {
361     auto Iter = FieldOffsetMap.find(V);
362     assert(Iter != FieldOffsetMap.end());
363     return Iter->second;
364   }
365 
366   void setOffset(Value *V, uint64_t Offset) {
367     assert(FieldOffsetMap.count(V) == 0);
368     FieldOffsetMap.insert({V, Offset});
369   }
370 
371   // Remap the index of every field in the frame, using the final layout index.
372   void updateLayoutIndex(FrameTypeBuilder &B);
373 
374 private:
375   // LayoutIndexUpdateStarted is used to avoid updating the index of any field
376   // twice by mistake.
377   bool LayoutIndexUpdateStarted = false;
378   // Map from values to their slot indexes on the frame. They will be first set
379   // with their original insertion field index. After the frame is built, their
380   // indexes will be updated into the final layout index.
381   DenseMap<Value *, uint32_t> FieldIndexMap;
382   // Map from values to their alignment on the frame. They would be set after
383   // the frame is built.
384   DenseMap<Value *, uint64_t> FieldAlignMap;
385   // Map from values to their offset on the frame. They would be set after
386   // the frame is built.
387   DenseMap<Value *, uint64_t> FieldOffsetMap;
388 };
389 } // namespace
390 
391 #ifndef NDEBUG
392 static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
393   dbgs() << "------------- " << Title << "--------------\n";
394   for (const auto &E : Spills) {
395     E.first->dump();
396     dbgs() << "   user: ";
397     for (auto *I : E.second)
398       I->dump();
399   }
400 }
401 
402 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
403   dbgs() << "------------- Allocas --------------\n";
404   for (const auto &A : Allocas) {
405     A.Alloca->dump();
406   }
407 }
408 #endif
409 
410 namespace {
411 using FieldIDType = size_t;
412 // We cannot rely solely on natural alignment of a type when building a
413 // coroutine frame and if the alignment specified on the Alloca instruction
414 // differs from the natural alignment of the alloca type we will need to insert
415 // padding.
416 class FrameTypeBuilder {
417 private:
418   struct Field {
419     uint64_t Size;
420     uint64_t Offset;
421     Type *Ty;
422     FieldIDType LayoutFieldIndex;
423     Align Alignment;
424     Align TyAlignment;
425   };
426 
427   const DataLayout &DL;
428   LLVMContext &Context;
429   uint64_t StructSize = 0;
430   Align StructAlign;
431   bool IsFinished = false;
432 
433   Optional<Align> MaxFrameAlignment;
434 
435   SmallVector<Field, 8> Fields;
436   DenseMap<Value*, unsigned> FieldIndexByKey;
437 
438 public:
439   FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL,
440                    Optional<Align> MaxFrameAlignment)
441       : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
442 
443   /// Add a field to this structure for the storage of an `alloca`
444   /// instruction.
445   LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI,
446                                                bool IsHeader = false) {
447     Type *Ty = AI->getAllocatedType();
448 
449     // Make an array type if this is a static array allocation.
450     if (AI->isArrayAllocation()) {
451       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
452         Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
453       else
454         report_fatal_error("Coroutines cannot handle non static allocas yet");
455     }
456 
457     return addField(Ty, AI->getAlign(), IsHeader);
458   }
459 
460   /// We want to put the allocas whose lifetime-ranges are not overlapped
461   /// into one slot of coroutine frame.
462   /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
463   ///
464   ///     cppcoro::task<void> alternative_paths(bool cond) {
465   ///         if (cond) {
466   ///             big_structure a;
467   ///             process(a);
468   ///             co_await something();
469   ///         } else {
470   ///             big_structure b;
471   ///             process2(b);
472   ///             co_await something();
473   ///         }
474   ///     }
475   ///
476   /// We want to put variable a and variable b in the same slot to
477   /// reduce the size of coroutine frame.
478   ///
479   /// This function use StackLifetime algorithm to partition the AllocaInsts in
480   /// Spills to non-overlapped sets in order to put Alloca in the same
481   /// non-overlapped set into the same slot in the Coroutine Frame. Then add
482   /// field for the allocas in the same non-overlapped set by using the largest
483   /// type as the field type.
484   ///
485   /// Side Effects: Because We sort the allocas, the order of allocas in the
486   /// frame may be different with the order in the source code.
487   void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
488                           coro::Shape &Shape);
489 
490   /// Add a field to this structure.
491   LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment,
492                                       bool IsHeader = false,
493                                       bool IsSpillOfValue = false) {
494     assert(!IsFinished && "adding fields to a finished builder");
495     assert(Ty && "must provide a type for a field");
496 
497     // The field size is always the alloc size of the type.
498     uint64_t FieldSize = DL.getTypeAllocSize(Ty);
499 
500     // For an alloca with size=0, we don't need to add a field and they
501     // can just point to any index in the frame. Use index 0.
502     if (FieldSize == 0) {
503       return 0;
504     }
505 
506     // The field alignment might not be the type alignment, but we need
507     // to remember the type alignment anyway to build the type.
508     // If we are spilling values we don't need to worry about ABI alignment
509     // concerns.
510     auto ABIAlign = DL.getABITypeAlign(Ty);
511     Align TyAlignment =
512         (IsSpillOfValue && MaxFrameAlignment)
513             ? (*MaxFrameAlignment < ABIAlign ? *MaxFrameAlignment : ABIAlign)
514             : ABIAlign;
515     if (!FieldAlignment) {
516       FieldAlignment = TyAlignment;
517     }
518 
519     // Lay out header fields immediately.
520     uint64_t Offset;
521     if (IsHeader) {
522       Offset = alignTo(StructSize, FieldAlignment);
523       StructSize = Offset + FieldSize;
524 
525     // Everything else has a flexible offset.
526     } else {
527       Offset = OptimizedStructLayoutField::FlexibleOffset;
528     }
529 
530     Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment});
531     return Fields.size() - 1;
532   }
533 
534   /// Finish the layout and set the body on the given type.
535   void finish(StructType *Ty);
536 
537   uint64_t getStructSize() const {
538     assert(IsFinished && "not yet finished!");
539     return StructSize;
540   }
541 
542   Align getStructAlign() const {
543     assert(IsFinished && "not yet finished!");
544     return StructAlign;
545   }
546 
547   FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
548     assert(IsFinished && "not yet finished!");
549     return Fields[Id].LayoutFieldIndex;
550   }
551 
552   Field getLayoutField(FieldIDType Id) const {
553     assert(IsFinished && "not yet finished!");
554     return Fields[Id];
555   }
556 };
557 } // namespace
558 
559 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
560   auto Updater = [&](Value *I) {
561     auto Field = B.getLayoutField(getFieldIndex(I));
562     setFieldIndex(I, Field.LayoutFieldIndex);
563     setAlign(I, Field.Alignment.value());
564     setOffset(I, Field.Offset);
565   };
566   LayoutIndexUpdateStarted = true;
567   for (auto &S : Spills)
568     Updater(S.first);
569   for (const auto &A : Allocas)
570     Updater(A.Alloca);
571   LayoutIndexUpdateStarted = false;
572 }
573 
574 void FrameTypeBuilder::addFieldForAllocas(const Function &F,
575                                           FrameDataInfo &FrameData,
576                                           coro::Shape &Shape) {
577   using AllocaSetType = SmallVector<AllocaInst *, 4>;
578   SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
579 
580   // We need to add field for allocas at the end of this function.
581   auto AddFieldForAllocasAtExit = make_scope_exit([&]() {
582     for (auto AllocaList : NonOverlapedAllocas) {
583       auto *LargestAI = *AllocaList.begin();
584       FieldIDType Id = addFieldForAlloca(LargestAI);
585       for (auto *Alloca : AllocaList)
586         FrameData.setFieldIndex(Alloca, Id);
587     }
588   });
589 
590   if (!Shape.ReuseFrameSlot && !EnableReuseStorageInFrame) {
591     for (const auto &A : FrameData.Allocas) {
592       AllocaInst *Alloca = A.Alloca;
593       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
594     }
595     return;
596   }
597 
598   // Because there are pathes from the lifetime.start to coro.end
599   // for each alloca, the liferanges for every alloca is overlaped
600   // in the blocks who contain coro.end and the successor blocks.
601   // So we choose to skip there blocks when we calculates the liferange
602   // for each alloca. It should be reasonable since there shouldn't be uses
603   // in these blocks and the coroutine frame shouldn't be used outside the
604   // coroutine body.
605   //
606   // Note that the user of coro.suspend may not be SwitchInst. However, this
607   // case seems too complex to handle. And it is harmless to skip these
608   // patterns since it just prevend putting the allocas to live in the same
609   // slot.
610   DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
611   for (auto CoroSuspendInst : Shape.CoroSuspends) {
612     for (auto U : CoroSuspendInst->users()) {
613       if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
614         auto *SWI = const_cast<SwitchInst *>(ConstSWI);
615         DefaultSuspendDest[SWI] = SWI->getDefaultDest();
616         SWI->setDefaultDest(SWI->getSuccessor(1));
617       }
618     }
619   }
620 
621   auto ExtractAllocas = [&]() {
622     AllocaSetType Allocas;
623     Allocas.reserve(FrameData.Allocas.size());
624     for (const auto &A : FrameData.Allocas)
625       Allocas.push_back(A.Alloca);
626     return Allocas;
627   };
628   StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
629                                       StackLifetime::LivenessType::May);
630   StackLifetimeAnalyzer.run();
631   auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
632     return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
633         StackLifetimeAnalyzer.getLiveRange(AI2));
634   };
635   auto GetAllocaSize = [&](const AllocaInfo &A) {
636     Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL);
637     assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
638     assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
639     return RetSize->getFixedSize();
640   };
641   // Put larger allocas in the front. So the larger allocas have higher
642   // priority to merge, which can save more space potentially. Also each
643   // AllocaSet would be ordered. So we can get the largest Alloca in one
644   // AllocaSet easily.
645   sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
646     return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
647   });
648   for (const auto &A : FrameData.Allocas) {
649     AllocaInst *Alloca = A.Alloca;
650     bool Merged = false;
651     // Try to find if the Alloca is not inferenced with any existing
652     // NonOverlappedAllocaSet. If it is true, insert the alloca to that
653     // NonOverlappedAllocaSet.
654     for (auto &AllocaSet : NonOverlapedAllocas) {
655       assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
656       bool NoInference = none_of(AllocaSet, [&](auto Iter) {
657         return IsAllocaInferenre(Alloca, Iter);
658       });
659       // If the alignment of A is multiple of the alignment of B, the address
660       // of A should satisfy the requirement for aligning for B.
661       //
662       // There may be other more fine-grained strategies to handle the alignment
663       // infomation during the merging process. But it seems hard to handle
664       // these strategies and benefit little.
665       bool Alignable = [&]() -> bool {
666         auto *LargestAlloca = *AllocaSet.begin();
667         return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
668                0;
669       }();
670       bool CouldMerge = NoInference && Alignable;
671       if (!CouldMerge)
672         continue;
673       AllocaSet.push_back(Alloca);
674       Merged = true;
675       break;
676     }
677     if (!Merged) {
678       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
679     }
680   }
681   // Recover the default target destination for each Switch statement
682   // reserved.
683   for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
684     SwitchInst *SWI = SwitchAndDefaultDest.first;
685     BasicBlock *DestBB = SwitchAndDefaultDest.second;
686     SWI->setDefaultDest(DestBB);
687   }
688   // This Debug Info could tell us which allocas are merged into one slot.
689   LLVM_DEBUG(for (auto &AllocaSet
690                   : NonOverlapedAllocas) {
691     if (AllocaSet.size() > 1) {
692       dbgs() << "In Function:" << F.getName() << "\n";
693       dbgs() << "Find Union Set "
694              << "\n";
695       dbgs() << "\tAllocas are \n";
696       for (auto Alloca : AllocaSet)
697         dbgs() << "\t\t" << *Alloca << "\n";
698     }
699   });
700 }
701 
702 void FrameTypeBuilder::finish(StructType *Ty) {
703   assert(!IsFinished && "already finished!");
704 
705   // Prepare the optimal-layout field array.
706   // The Id in the layout field is a pointer to our Field for it.
707   SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
708   LayoutFields.reserve(Fields.size());
709   for (auto &Field : Fields) {
710     LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
711                               Field.Offset);
712   }
713 
714   // Perform layout.
715   auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
716   StructSize = SizeAndAlign.first;
717   StructAlign = SizeAndAlign.second;
718 
719   auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
720     return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
721   };
722 
723   // We need to produce a packed struct type if there's a field whose
724   // assigned offset isn't a multiple of its natural type alignment.
725   bool Packed = [&] {
726     for (auto &LayoutField : LayoutFields) {
727       auto &F = getField(LayoutField);
728       if (!isAligned(F.TyAlignment, LayoutField.Offset))
729         return true;
730     }
731     return false;
732   }();
733 
734   // Build the struct body.
735   SmallVector<Type*, 16> FieldTypes;
736   FieldTypes.reserve(LayoutFields.size() * 3 / 2);
737   uint64_t LastOffset = 0;
738   for (auto &LayoutField : LayoutFields) {
739     auto &F = getField(LayoutField);
740 
741     auto Offset = LayoutField.Offset;
742 
743     // Add a padding field if there's a padding gap and we're either
744     // building a packed struct or the padding gap is more than we'd
745     // get from aligning to the field type's natural alignment.
746     assert(Offset >= LastOffset);
747     if (Offset != LastOffset) {
748       if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
749         FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
750                                             Offset - LastOffset));
751     }
752 
753     F.Offset = Offset;
754     F.LayoutFieldIndex = FieldTypes.size();
755 
756     FieldTypes.push_back(F.Ty);
757     LastOffset = Offset + F.Size;
758   }
759 
760   Ty->setBody(FieldTypes, Packed);
761 
762 #ifndef NDEBUG
763   // Check that the IR layout matches the offsets we expect.
764   auto Layout = DL.getStructLayout(Ty);
765   for (auto &F : Fields) {
766     assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
767     assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
768   }
769 #endif
770 
771   IsFinished = true;
772 }
773 
774 static void cacheDIVar(FrameDataInfo &FrameData,
775                        DenseMap<Value *, DILocalVariable *> &DIVarCache) {
776   for (auto *V : FrameData.getAllDefs()) {
777     if (DIVarCache.find(V) != DIVarCache.end())
778       continue;
779 
780     auto DDIs = FindDbgDeclareUses(V);
781     auto *I = llvm::find_if(DDIs, [](DbgDeclareInst *DDI) {
782       return DDI->getExpression()->getNumElements() == 0;
783     });
784     if (I != DDIs.end())
785       DIVarCache.insert({V, (*I)->getVariable()});
786   }
787 }
788 
789 /// Create name for Type. It uses MDString to store new created string to
790 /// avoid memory leak.
791 static StringRef solveTypeName(Type *Ty) {
792   if (Ty->isIntegerTy()) {
793     // The longest name in common may be '__int_128', which has 9 bits.
794     SmallString<16> Buffer;
795     raw_svector_ostream OS(Buffer);
796     OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth();
797     auto *MDName = MDString::get(Ty->getContext(), OS.str());
798     return MDName->getString();
799   }
800 
801   if (Ty->isFloatingPointTy()) {
802     if (Ty->isFloatTy())
803       return "__float_";
804     if (Ty->isDoubleTy())
805       return "__double_";
806     return "__floating_type_";
807   }
808 
809   if (Ty->isPointerTy()) {
810     auto *PtrTy = cast<PointerType>(Ty);
811     Type *PointeeTy = PtrTy->getElementType();
812     auto Name = solveTypeName(PointeeTy);
813     if (Name == "UnknownType")
814       return "PointerType";
815     SmallString<16> Buffer;
816     Twine(Name + "_Ptr").toStringRef(Buffer);
817     auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
818     return MDName->getString();
819   }
820 
821   if (Ty->isStructTy()) {
822     if (!cast<StructType>(Ty)->hasName())
823       return "__LiteralStructType_";
824 
825     auto Name = Ty->getStructName();
826 
827     SmallString<16> Buffer(Name);
828     for_each(Buffer, [](auto &Iter) {
829       if (Iter == '.' || Iter == ':')
830         Iter = '_';
831     });
832     auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
833     return MDName->getString();
834   }
835 
836   return "UnknownType";
837 }
838 
839 static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
840                            const DataLayout &Layout, DIScope *Scope,
841                            unsigned LineNum,
842                            DenseMap<Type *, DIType *> &DITypeCache) {
843   if (DIType *DT = DITypeCache.lookup(Ty))
844     return DT;
845 
846   StringRef Name = solveTypeName(Ty);
847 
848   DIType *RetType = nullptr;
849 
850   if (Ty->isIntegerTy()) {
851     auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
852     RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed,
853                                       llvm::DINode::FlagArtificial);
854   } else if (Ty->isFloatingPointTy()) {
855     RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
856                                       dwarf::DW_ATE_float,
857                                       llvm::DINode::FlagArtificial);
858   } else if (Ty->isPointerTy()) {
859     // Construct BasicType instead of PointerType to avoid infinite
860     // search problem.
861     // For example, we would be in trouble if we traverse recursively:
862     //
863     //  struct Node {
864     //      Node* ptr;
865     //  };
866     RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
867                                       dwarf::DW_ATE_address,
868                                       llvm::DINode::FlagArtificial);
869   } else if (Ty->isStructTy()) {
870     auto *DIStruct = Builder.createStructType(
871         Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty),
872         Layout.getPrefTypeAlignment(Ty), llvm::DINode::FlagArtificial, nullptr,
873         llvm::DINodeArray());
874 
875     auto *StructTy = cast<StructType>(Ty);
876     SmallVector<Metadata *, 16> Elements;
877     for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
878       DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
879                                  Scope, LineNum, DITypeCache);
880       assert(DITy);
881       Elements.push_back(Builder.createMemberType(
882           Scope, DITy->getName(), Scope->getFile(), LineNum,
883           DITy->getSizeInBits(), DITy->getAlignInBits(),
884           Layout.getStructLayout(StructTy)->getElementOffsetInBits(I),
885           llvm::DINode::FlagArtificial, DITy));
886     }
887 
888     Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements));
889 
890     RetType = DIStruct;
891   } else {
892     LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n";);
893     SmallString<32> Buffer;
894     raw_svector_ostream OS(Buffer);
895     OS << Name.str() << "_" << Layout.getTypeSizeInBits(Ty);
896     RetType = Builder.createBasicType(OS.str(), Layout.getTypeSizeInBits(Ty),
897                                       dwarf::DW_ATE_address,
898                                       llvm::DINode::FlagArtificial);
899   }
900 
901   DITypeCache.insert({Ty, RetType});
902   return RetType;
903 }
904 
905 /// Build artificial debug info for C++ coroutine frames to allow users to
906 /// inspect the contents of the frame directly
907 ///
908 /// Create Debug information for coroutine frame with debug name "__coro_frame".
909 /// The debug information for the fields of coroutine frame is constructed from
910 /// the following way:
911 /// 1. For all the value in the Frame, we search the use of dbg.declare to find
912 ///    the corresponding debug variables for the value. If we can find the
913 ///    debug variable, we can get full and accurate debug information.
914 /// 2. If we can't get debug information in step 1 and 2, we could only try to
915 ///    build the DIType by Type. We did this in solveDIType. We only handle
916 ///    integer, float, double, integer type and struct type for now.
917 static void buildFrameDebugInfo(Function &F, coro::Shape &Shape,
918                                 FrameDataInfo &FrameData) {
919   DISubprogram *DIS = F.getSubprogram();
920   // If there is no DISubprogram for F, it implies the Function are not compiled
921   // with debug info. So we also don't need to generate debug info for the frame
922   // neither.
923   if (!DIS || !DIS->getUnit() ||
924       !dwarf::isCPlusPlus(
925           (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage()))
926     return;
927 
928   assert(Shape.ABI == coro::ABI::Switch &&
929          "We could only build debug infomation for C++ coroutine now.\n");
930 
931   DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
932 
933   AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
934   assert(PromiseAlloca &&
935          "Coroutine with switch ABI should own Promise alloca");
936 
937   TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(PromiseAlloca);
938   if (DIs.empty())
939     return;
940 
941   DbgDeclareInst *PromiseDDI = DIs.front();
942   DILocalVariable *PromiseDIVariable = PromiseDDI->getVariable();
943   DILocalScope *PromiseDIScope = PromiseDIVariable->getScope();
944   DIFile *DFile = PromiseDIScope->getFile();
945   DILocation *DILoc = PromiseDDI->getDebugLoc().get();
946   unsigned LineNum = PromiseDIVariable->getLine();
947 
948   DICompositeType *FrameDITy = DBuilder.createStructType(
949       DIS, "__coro_frame_ty", DFile, LineNum, Shape.FrameSize * 8,
950       Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr,
951       llvm::DINodeArray());
952   StructType *FrameTy = Shape.FrameTy;
953   SmallVector<Metadata *, 16> Elements;
954   DataLayout Layout = F.getParent()->getDataLayout();
955 
956   DenseMap<Value *, DILocalVariable *> DIVarCache;
957   cacheDIVar(FrameData, DIVarCache);
958 
959   unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
960   unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
961   unsigned IndexIndex = Shape.SwitchLowering.IndexField;
962 
963   DenseMap<unsigned, StringRef> NameCache;
964   NameCache.insert({ResumeIndex, "__resume_fn"});
965   NameCache.insert({DestroyIndex, "__destroy_fn"});
966   NameCache.insert({IndexIndex, "__coro_index"});
967 
968   Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex),
969        *DestroyFnTy = FrameTy->getElementType(DestroyIndex),
970        *IndexTy = FrameTy->getElementType(IndexIndex);
971 
972   DenseMap<unsigned, DIType *> TyCache;
973   TyCache.insert({ResumeIndex,
974                   DBuilder.createBasicType("__resume_fn",
975                                            Layout.getTypeSizeInBits(ResumeFnTy),
976                                            dwarf::DW_ATE_address)});
977   TyCache.insert(
978       {DestroyIndex, DBuilder.createBasicType(
979                          "__destroy_fn", Layout.getTypeSizeInBits(DestroyFnTy),
980                          dwarf::DW_ATE_address)});
981 
982   /// FIXME: If we fill the field `SizeInBits` with the actual size of
983   /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
984   TyCache.insert({IndexIndex, DBuilder.createBasicType(
985                                   "__coro_index",
986                                   (Layout.getTypeSizeInBits(IndexTy) < 8)
987                                       ? 8
988                                       : Layout.getTypeSizeInBits(IndexTy),
989                                   dwarf::DW_ATE_unsigned_char)});
990 
991   for (auto *V : FrameData.getAllDefs()) {
992     if (DIVarCache.find(V) == DIVarCache.end())
993       continue;
994 
995     auto Index = FrameData.getFieldIndex(V);
996 
997     NameCache.insert({Index, DIVarCache[V]->getName()});
998     TyCache.insert({Index, DIVarCache[V]->getType()});
999   }
1000 
1001   // Cache from index to (Align, Offset Pair)
1002   DenseMap<unsigned, std::pair<unsigned, unsigned>> OffsetCache;
1003   // The Align and Offset of Resume function and Destroy function are fixed.
1004   OffsetCache.insert({ResumeIndex, {8, 0}});
1005   OffsetCache.insert({DestroyIndex, {8, 8}});
1006   OffsetCache.insert(
1007       {IndexIndex,
1008        {Shape.SwitchLowering.IndexAlign, Shape.SwitchLowering.IndexOffset}});
1009 
1010   for (auto *V : FrameData.getAllDefs()) {
1011     auto Index = FrameData.getFieldIndex(V);
1012 
1013     OffsetCache.insert(
1014         {Index, {FrameData.getAlign(V), FrameData.getOffset(V)}});
1015   }
1016 
1017   DenseMap<Type *, DIType *> DITypeCache;
1018   // This counter is used to avoid same type names. e.g., there would be
1019   // many i32 and i64 types in one coroutine. And we would use i32_0 and
1020   // i32_1 to avoid the same type. Since it makes no sense the name of the
1021   // fields confilicts with each other.
1022   unsigned UnknownTypeNum = 0;
1023   for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
1024     if (OffsetCache.find(Index) == OffsetCache.end())
1025       continue;
1026 
1027     std::string Name;
1028     uint64_t SizeInBits;
1029     uint32_t AlignInBits;
1030     uint64_t OffsetInBits;
1031     DIType *DITy = nullptr;
1032 
1033     Type *Ty = FrameTy->getElementType(Index);
1034     assert(Ty->isSized() && "We can't handle type which is not sized.\n");
1035     SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedSize();
1036     AlignInBits = OffsetCache[Index].first * 8;
1037     OffsetInBits = OffsetCache[Index].second * 8;
1038 
1039     if (NameCache.find(Index) != NameCache.end()) {
1040       Name = NameCache[Index].str();
1041       DITy = TyCache[Index];
1042     } else {
1043       DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
1044       assert(DITy && "SolveDIType shouldn't return nullptr.\n");
1045       Name = DITy->getName().str();
1046       Name += "_" + std::to_string(UnknownTypeNum);
1047       UnknownTypeNum++;
1048     }
1049 
1050     Elements.push_back(DBuilder.createMemberType(
1051         FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
1052         llvm::DINode::FlagArtificial, DITy));
1053   }
1054 
1055   DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements));
1056 
1057   auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame",
1058                                                  DFile, LineNum, FrameDITy,
1059                                                  true, DINode::FlagArtificial);
1060   assert(FrameDIVar->isValidLocationForIntrinsic(PromiseDDI->getDebugLoc()));
1061 
1062   // Subprogram would have ContainedNodes field which records the debug
1063   // variables it contained. So we need to add __coro_frame to the
1064   // ContainedNodes of it.
1065   //
1066   // If we don't add __coro_frame to the RetainedNodes, user may get
1067   // `no symbol __coro_frame in context` rather than `__coro_frame`
1068   // is optimized out, which is more precise.
1069   if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) {
1070     auto RetainedNodes = SubProgram->getRetainedNodes();
1071     SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
1072                                                  RetainedNodes.end());
1073     RetainedNodesVec.push_back(FrameDIVar);
1074     SubProgram->replaceOperandWith(
1075         7, (MDTuple::get(F.getContext(), RetainedNodesVec)));
1076   }
1077 
1078   DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar,
1079                          DBuilder.createExpression(), DILoc,
1080                          Shape.FramePtr->getNextNode());
1081 }
1082 
1083 // Build a struct that will keep state for an active coroutine.
1084 //   struct f.frame {
1085 //     ResumeFnTy ResumeFnAddr;
1086 //     ResumeFnTy DestroyFnAddr;
1087 //     int ResumeIndex;
1088 //     ... promise (if present) ...
1089 //     ... spills ...
1090 //   };
1091 static StructType *buildFrameType(Function &F, coro::Shape &Shape,
1092                                   FrameDataInfo &FrameData) {
1093   LLVMContext &C = F.getContext();
1094   const DataLayout &DL = F.getParent()->getDataLayout();
1095   StructType *FrameTy = [&] {
1096     SmallString<32> Name(F.getName());
1097     Name.append(".Frame");
1098     return StructType::create(C, Name);
1099   }();
1100 
1101   // We will use this value to cap the alignment of spilled values.
1102   Optional<Align> MaxFrameAlignment;
1103   if (Shape.ABI == coro::ABI::Async)
1104     MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
1105   FrameTypeBuilder B(C, DL, MaxFrameAlignment);
1106 
1107   AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
1108   Optional<FieldIDType> SwitchIndexFieldId;
1109 
1110   if (Shape.ABI == coro::ABI::Switch) {
1111     auto *FramePtrTy = FrameTy->getPointerTo();
1112     auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
1113                                    /*IsVarArg=*/false);
1114     auto *FnPtrTy = FnTy->getPointerTo();
1115 
1116     // Add header fields for the resume and destroy functions.
1117     // We can rely on these being perfectly packed.
1118     (void)B.addField(FnPtrTy, None, /*header*/ true);
1119     (void)B.addField(FnPtrTy, None, /*header*/ true);
1120 
1121     // PromiseAlloca field needs to be explicitly added here because it's
1122     // a header field with a fixed offset based on its alignment. Hence it
1123     // needs special handling and cannot be added to FrameData.Allocas.
1124     if (PromiseAlloca)
1125       FrameData.setFieldIndex(
1126           PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
1127 
1128     // Add a field to store the suspend index.  This doesn't need to
1129     // be in the header.
1130     unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
1131     Type *IndexType = Type::getIntNTy(C, IndexBits);
1132 
1133     SwitchIndexFieldId = B.addField(IndexType, None);
1134   } else {
1135     assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
1136   }
1137 
1138   // Because multiple allocas may own the same field slot,
1139   // we add allocas to field here.
1140   B.addFieldForAllocas(F, FrameData, Shape);
1141   // Add PromiseAlloca to Allocas list so that
1142   // 1. updateLayoutIndex could update its index after
1143   // `performOptimizedStructLayout`
1144   // 2. it is processed in insertSpills.
1145   if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
1146     // We assume that the promise alloca won't be modified before
1147     // CoroBegin and no alias will be create before CoroBegin.
1148     FrameData.Allocas.emplace_back(
1149         PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false);
1150   // Create an entry for every spilled value.
1151   for (auto &S : FrameData.Spills) {
1152     Type *FieldType = S.first->getType();
1153     // For byval arguments, we need to store the pointed value in the frame,
1154     // instead of the pointer itself.
1155     if (const Argument *A = dyn_cast<Argument>(S.first))
1156       if (A->hasByValAttr())
1157         FieldType = A->getParamByValType();
1158     FieldIDType Id =
1159         B.addField(FieldType, None, false /*header*/, true /*IsSpillOfValue*/);
1160     FrameData.setFieldIndex(S.first, Id);
1161   }
1162 
1163   B.finish(FrameTy);
1164   FrameData.updateLayoutIndex(B);
1165   Shape.FrameAlign = B.getStructAlign();
1166   Shape.FrameSize = B.getStructSize();
1167 
1168   switch (Shape.ABI) {
1169   case coro::ABI::Switch: {
1170     // In the switch ABI, remember the switch-index field.
1171     auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
1172     Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
1173     Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
1174     Shape.SwitchLowering.IndexOffset = IndexField.Offset;
1175 
1176     // Also round the frame size up to a multiple of its alignment, as is
1177     // generally expected in C/C++.
1178     Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
1179     break;
1180   }
1181 
1182   // In the retcon ABI, remember whether the frame is inline in the storage.
1183   case coro::ABI::Retcon:
1184   case coro::ABI::RetconOnce: {
1185     auto Id = Shape.getRetconCoroId();
1186     Shape.RetconLowering.IsFrameInlineInStorage
1187       = (B.getStructSize() <= Id->getStorageSize() &&
1188          B.getStructAlign() <= Id->getStorageAlignment());
1189     break;
1190   }
1191   case coro::ABI::Async: {
1192     Shape.AsyncLowering.FrameOffset =
1193         alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign);
1194     // Also make the final context size a multiple of the context alignment to
1195     // make allocation easier for allocators.
1196     Shape.AsyncLowering.ContextSize =
1197         alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize,
1198                 Shape.AsyncLowering.getContextAlignment());
1199     if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
1200       report_fatal_error(
1201           "The alignment requirment of frame variables cannot be higher than "
1202           "the alignment of the async function context");
1203     }
1204     break;
1205   }
1206   }
1207 
1208   return FrameTy;
1209 }
1210 
1211 // We use a pointer use visitor to track how an alloca is being used.
1212 // The goal is to be able to answer the following three questions:
1213 // 1. Should this alloca be allocated on the frame instead.
1214 // 2. Could the content of the alloca be modified prior to CoroBegn, which would
1215 // require copying the data from alloca to the frame after CoroBegin.
1216 // 3. Is there any alias created for this alloca prior to CoroBegin, but used
1217 // after CoroBegin. In that case, we will need to recreate the alias after
1218 // CoroBegin based off the frame. To answer question 1, we track two things:
1219 //   a. List of all BasicBlocks that use this alloca or any of the aliases of
1220 //   the alloca. In the end, we check if there exists any two basic blocks that
1221 //   cross suspension points. If so, this alloca must be put on the frame. b.
1222 //   Whether the alloca or any alias of the alloca is escaped at some point,
1223 //   either by storing the address somewhere, or the address is used in a
1224 //   function call that might capture. If it's ever escaped, this alloca must be
1225 //   put on the frame conservatively.
1226 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
1227 // Whenever a potential write happens, either through a store instruction, a
1228 // function call or any of the memory intrinsics, we check whether this
1229 // instruction is prior to CoroBegin. To answer question 3, we track the offsets
1230 // of all aliases created for the alloca prior to CoroBegin but used after
1231 // CoroBegin. llvm::Optional is used to be able to represent the case when the
1232 // offset is unknown (e.g. when you have a PHINode that takes in different
1233 // offset values). We cannot handle unknown offsets and will assert. This is the
1234 // potential issue left out. An ideal solution would likely require a
1235 // significant redesign.
1236 namespace {
1237 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
1238   using Base = PtrUseVisitor<AllocaUseVisitor>;
1239   AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
1240                    const CoroBeginInst &CB, const SuspendCrossingInfo &Checker,
1241                    bool ShouldUseLifetimeStartInfo)
1242       : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker),
1243         ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {}
1244 
1245   void visit(Instruction &I) {
1246     Users.insert(&I);
1247     Base::visit(I);
1248     // If the pointer is escaped prior to CoroBegin, we have to assume it would
1249     // be written into before CoroBegin as well.
1250     if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
1251       MayWriteBeforeCoroBegin = true;
1252     }
1253   }
1254   // We need to provide this overload as PtrUseVisitor uses a pointer based
1255   // visiting function.
1256   void visit(Instruction *I) { return visit(*I); }
1257 
1258   void visitPHINode(PHINode &I) {
1259     enqueueUsers(I);
1260     handleAlias(I);
1261   }
1262 
1263   void visitSelectInst(SelectInst &I) {
1264     enqueueUsers(I);
1265     handleAlias(I);
1266   }
1267 
1268   void visitStoreInst(StoreInst &SI) {
1269     // Regardless whether the alias of the alloca is the value operand or the
1270     // pointer operand, we need to assume the alloca is been written.
1271     handleMayWrite(SI);
1272 
1273     if (SI.getValueOperand() != U->get())
1274       return;
1275 
1276     // We are storing the pointer into a memory location, potentially escaping.
1277     // As an optimization, we try to detect simple cases where it doesn't
1278     // actually escape, for example:
1279     //   %ptr = alloca ..
1280     //   %addr = alloca ..
1281     //   store %ptr, %addr
1282     //   %x = load %addr
1283     //   ..
1284     // If %addr is only used by loading from it, we could simply treat %x as
1285     // another alias of %ptr, and not considering %ptr being escaped.
1286     auto IsSimpleStoreThenLoad = [&]() {
1287       auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
1288       // If the memory location we are storing to is not an alloca, it
1289       // could be an alias of some other memory locations, which is difficult
1290       // to analyze.
1291       if (!AI)
1292         return false;
1293       // StoreAliases contains aliases of the memory location stored into.
1294       SmallVector<Instruction *, 4> StoreAliases = {AI};
1295       while (!StoreAliases.empty()) {
1296         Instruction *I = StoreAliases.pop_back_val();
1297         for (User *U : I->users()) {
1298           // If we are loading from the memory location, we are creating an
1299           // alias of the original pointer.
1300           if (auto *LI = dyn_cast<LoadInst>(U)) {
1301             enqueueUsers(*LI);
1302             handleAlias(*LI);
1303             continue;
1304           }
1305           // If we are overriding the memory location, the pointer certainly
1306           // won't escape.
1307           if (auto *S = dyn_cast<StoreInst>(U))
1308             if (S->getPointerOperand() == I)
1309               continue;
1310           if (auto *II = dyn_cast<IntrinsicInst>(U))
1311             if (II->isLifetimeStartOrEnd())
1312               continue;
1313           // BitCastInst creats aliases of the memory location being stored
1314           // into.
1315           if (auto *BI = dyn_cast<BitCastInst>(U)) {
1316             StoreAliases.push_back(BI);
1317             continue;
1318           }
1319           return false;
1320         }
1321       }
1322 
1323       return true;
1324     };
1325 
1326     if (!IsSimpleStoreThenLoad())
1327       PI.setEscaped(&SI);
1328   }
1329 
1330   // All mem intrinsics modify the data.
1331   void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
1332 
1333   void visitBitCastInst(BitCastInst &BC) {
1334     Base::visitBitCastInst(BC);
1335     handleAlias(BC);
1336   }
1337 
1338   void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
1339     Base::visitAddrSpaceCastInst(ASC);
1340     handleAlias(ASC);
1341   }
1342 
1343   void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
1344     // The base visitor will adjust Offset accordingly.
1345     Base::visitGetElementPtrInst(GEPI);
1346     handleAlias(GEPI);
1347   }
1348 
1349   void visitIntrinsicInst(IntrinsicInst &II) {
1350     // When we found the lifetime markers refers to a
1351     // subrange of the original alloca, ignore the lifetime
1352     // markers to avoid misleading the analysis.
1353     if (II.getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown ||
1354         !Offset.isZero())
1355       return Base::visitIntrinsicInst(II);
1356     LifetimeStarts.insert(&II);
1357   }
1358 
1359   void visitCallBase(CallBase &CB) {
1360     for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op)
1361       if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
1362         PI.setEscaped(&CB);
1363     handleMayWrite(CB);
1364   }
1365 
1366   bool getShouldLiveOnFrame() const {
1367     if (!ShouldLiveOnFrame)
1368       ShouldLiveOnFrame = computeShouldLiveOnFrame();
1369     return ShouldLiveOnFrame.getValue();
1370   }
1371 
1372   bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
1373 
1374   DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const {
1375     assert(getShouldLiveOnFrame() && "This method should only be called if the "
1376                                      "alloca needs to live on the frame.");
1377     for (const auto &P : AliasOffetMap)
1378       if (!P.second)
1379         report_fatal_error("Unable to handle an alias with unknown offset "
1380                            "created before CoroBegin.");
1381     return AliasOffetMap;
1382   }
1383 
1384 private:
1385   const DominatorTree &DT;
1386   const CoroBeginInst &CoroBegin;
1387   const SuspendCrossingInfo &Checker;
1388   // All alias to the original AllocaInst, created before CoroBegin and used
1389   // after CoroBegin. Each entry contains the instruction and the offset in the
1390   // original Alloca. They need to be recreated after CoroBegin off the frame.
1391   DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{};
1392   SmallPtrSet<Instruction *, 4> Users{};
1393   SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
1394   bool MayWriteBeforeCoroBegin{false};
1395   bool ShouldUseLifetimeStartInfo{true};
1396 
1397   mutable llvm::Optional<bool> ShouldLiveOnFrame{};
1398 
1399   bool computeShouldLiveOnFrame() const {
1400     // If lifetime information is available, we check it first since it's
1401     // more precise. We look at every pair of lifetime.start intrinsic and
1402     // every basic block that uses the pointer to see if they cross suspension
1403     // points. The uses cover both direct uses as well as indirect uses.
1404     if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
1405       for (auto *I : Users)
1406         for (auto *S : LifetimeStarts)
1407           if (Checker.isDefinitionAcrossSuspend(*S, I))
1408             return true;
1409       return false;
1410     }
1411     // FIXME: Ideally the isEscaped check should come at the beginning.
1412     // However there are a few loose ends that need to be fixed first before
1413     // we can do that. We need to make sure we are not over-conservative, so
1414     // that the data accessed in-between await_suspend and symmetric transfer
1415     // is always put on the stack, and also data accessed after coro.end is
1416     // always put on the stack (esp the return object). To fix that, we need
1417     // to:
1418     //  1) Potentially treat sret as nocapture in calls
1419     //  2) Special handle the return object and put it on the stack
1420     //  3) Utilize lifetime.end intrinsic
1421     if (PI.isEscaped())
1422       return true;
1423 
1424     for (auto *U1 : Users)
1425       for (auto *U2 : Users)
1426         if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1427           return true;
1428 
1429     return false;
1430   }
1431 
1432   void handleMayWrite(const Instruction &I) {
1433     if (!DT.dominates(&CoroBegin, &I))
1434       MayWriteBeforeCoroBegin = true;
1435   }
1436 
1437   bool usedAfterCoroBegin(Instruction &I) {
1438     for (auto &U : I.uses())
1439       if (DT.dominates(&CoroBegin, U))
1440         return true;
1441     return false;
1442   }
1443 
1444   void handleAlias(Instruction &I) {
1445     // We track all aliases created prior to CoroBegin but used after.
1446     // These aliases may need to be recreated after CoroBegin if the alloca
1447     // need to live on the frame.
1448     if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
1449       return;
1450 
1451     if (!IsOffsetKnown) {
1452       AliasOffetMap[&I].reset();
1453     } else {
1454       auto Itr = AliasOffetMap.find(&I);
1455       if (Itr == AliasOffetMap.end()) {
1456         AliasOffetMap[&I] = Offset;
1457       } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) {
1458         // If we have seen two different possible values for this alias, we set
1459         // it to empty.
1460         AliasOffetMap[&I].reset();
1461       }
1462     }
1463   }
1464 };
1465 } // namespace
1466 
1467 // We need to make room to insert a spill after initial PHIs, but before
1468 // catchswitch instruction. Placing it before violates the requirement that
1469 // catchswitch, like all other EHPads must be the first nonPHI in a block.
1470 //
1471 // Split away catchswitch into a separate block and insert in its place:
1472 //
1473 //   cleanuppad <InsertPt> cleanupret.
1474 //
1475 // cleanupret instruction will act as an insert point for the spill.
1476 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
1477   BasicBlock *CurrentBlock = CatchSwitch->getParent();
1478   BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
1479   CurrentBlock->getTerminator()->eraseFromParent();
1480 
1481   auto *CleanupPad =
1482       CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
1483   auto *CleanupRet =
1484       CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
1485   return CleanupRet;
1486 }
1487 
1488 static void createFramePtr(coro::Shape &Shape) {
1489   auto *CB = Shape.CoroBegin;
1490   IRBuilder<> Builder(CB->getNextNode());
1491   StructType *FrameTy = Shape.FrameTy;
1492   PointerType *FramePtrTy = FrameTy->getPointerTo();
1493   Shape.FramePtr =
1494       cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
1495 }
1496 
1497 // Replace all alloca and SSA values that are accessed across suspend points
1498 // with GetElementPointer from coroutine frame + loads and stores. Create an
1499 // AllocaSpillBB that will become the new entry block for the resume parts of
1500 // the coroutine:
1501 //
1502 //    %hdl = coro.begin(...)
1503 //    whatever
1504 //
1505 // becomes:
1506 //
1507 //    %hdl = coro.begin(...)
1508 //    %FramePtr = bitcast i8* hdl to %f.frame*
1509 //    br label %AllocaSpillBB
1510 //
1511 //  AllocaSpillBB:
1512 //    ; geps corresponding to allocas that were moved to coroutine frame
1513 //    br label PostSpill
1514 //
1515 //  PostSpill:
1516 //    whatever
1517 //
1518 //
1519 static Instruction *insertSpills(const FrameDataInfo &FrameData,
1520                                  coro::Shape &Shape) {
1521   auto *CB = Shape.CoroBegin;
1522   LLVMContext &C = CB->getContext();
1523   IRBuilder<> Builder(C);
1524   StructType *FrameTy = Shape.FrameTy;
1525   Instruction *FramePtr = Shape.FramePtr;
1526   DominatorTree DT(*CB->getFunction());
1527   SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
1528 
1529   // Create a GEP with the given index into the coroutine frame for the original
1530   // value Orig. Appends an extra 0 index for array-allocas, preserving the
1531   // original type.
1532   auto GetFramePointer = [&](Value *Orig) -> Value * {
1533     FieldIDType Index = FrameData.getFieldIndex(Orig);
1534     SmallVector<Value *, 3> Indices = {
1535         ConstantInt::get(Type::getInt32Ty(C), 0),
1536         ConstantInt::get(Type::getInt32Ty(C), Index),
1537     };
1538 
1539     if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1540       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1541         auto Count = CI->getValue().getZExtValue();
1542         if (Count > 1) {
1543           Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
1544         }
1545       } else {
1546         report_fatal_error("Coroutines cannot handle non static allocas yet");
1547       }
1548     }
1549 
1550     auto GEP = cast<GetElementPtrInst>(
1551         Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1552     if (isa<AllocaInst>(Orig)) {
1553       // If the type of GEP is not equal to the type of AllocaInst, it implies
1554       // that the AllocaInst may be reused in the Frame slot of other
1555       // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1556       // the Frame storage.
1557       //
1558       // Note: If we change the strategy dealing with alignment, we need to refine
1559       // this casting.
1560       if (GEP->getResultElementType() != Orig->getType())
1561         return Builder.CreateBitCast(GEP, Orig->getType(),
1562                                      Orig->getName() + Twine(".cast"));
1563     }
1564     return GEP;
1565   };
1566 
1567   for (auto const &E : FrameData.Spills) {
1568     Value *Def = E.first;
1569     auto SpillAlignment = Align(FrameData.getAlign(Def));
1570     // Create a store instruction storing the value into the
1571     // coroutine frame.
1572     Instruction *InsertPt = nullptr;
1573     bool NeedToCopyArgPtrValue = false;
1574     if (auto *Arg = dyn_cast<Argument>(Def)) {
1575       // For arguments, we will place the store instruction right after
1576       // the coroutine frame pointer instruction, i.e. bitcast of
1577       // coro.begin from i8* to %f.frame*.
1578       InsertPt = FramePtr->getNextNode();
1579 
1580       // If we're spilling an Argument, make sure we clear 'nocapture'
1581       // from the coroutine function.
1582       Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1583 
1584       if (Arg->hasByValAttr())
1585         NeedToCopyArgPtrValue = true;
1586 
1587     } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1588       // Don't spill immediately after a suspend; splitting assumes
1589       // that the suspend will be followed by a branch.
1590       InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI();
1591     } else {
1592       auto *I = cast<Instruction>(Def);
1593       if (!DT.dominates(CB, I)) {
1594         // If it is not dominated by CoroBegin, then spill should be
1595         // inserted immediately after CoroFrame is computed.
1596         InsertPt = FramePtr->getNextNode();
1597       } else if (auto *II = dyn_cast<InvokeInst>(I)) {
1598         // If we are spilling the result of the invoke instruction, split
1599         // the normal edge and insert the spill in the new block.
1600         auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
1601         InsertPt = NewBB->getTerminator();
1602       } else if (isa<PHINode>(I)) {
1603         // Skip the PHINodes and EH pads instructions.
1604         BasicBlock *DefBlock = I->getParent();
1605         if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
1606           InsertPt = splitBeforeCatchSwitch(CSI);
1607         else
1608           InsertPt = &*DefBlock->getFirstInsertionPt();
1609       } else {
1610         assert(!I->isTerminator() && "unexpected terminator");
1611         // For all other values, the spill is placed immediately after
1612         // the definition.
1613         InsertPt = I->getNextNode();
1614       }
1615     }
1616 
1617     auto Index = FrameData.getFieldIndex(Def);
1618     Builder.SetInsertPoint(InsertPt);
1619     auto *G = Builder.CreateConstInBoundsGEP2_32(
1620         FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1621     if (NeedToCopyArgPtrValue) {
1622       // For byval arguments, we need to store the pointed value in the frame,
1623       // instead of the pointer itself.
1624       auto *Value =
1625           Builder.CreateLoad(Def->getType()->getPointerElementType(), Def);
1626       Builder.CreateAlignedStore(Value, G, SpillAlignment);
1627     } else {
1628       Builder.CreateAlignedStore(Def, G, SpillAlignment);
1629     }
1630 
1631     BasicBlock *CurrentBlock = nullptr;
1632     Value *CurrentReload = nullptr;
1633     for (auto *U : E.second) {
1634       // If we have not seen the use block, create a load instruction to reload
1635       // the spilled value from the coroutine frame. Populates the Value pointer
1636       // reference provided with the frame GEP.
1637       if (CurrentBlock != U->getParent()) {
1638         CurrentBlock = U->getParent();
1639         Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt());
1640 
1641         auto *GEP = GetFramePointer(E.first);
1642         GEP->setName(E.first->getName() + Twine(".reload.addr"));
1643         if (NeedToCopyArgPtrValue)
1644           CurrentReload = GEP;
1645         else
1646           CurrentReload = Builder.CreateAlignedLoad(
1647               FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1648               SpillAlignment, E.first->getName() + Twine(".reload"));
1649 
1650         TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def);
1651         for (DbgDeclareInst *DDI : DIs) {
1652           bool AllowUnresolved = false;
1653           // This dbg.declare is preserved for all coro-split function
1654           // fragments. It will be unreachable in the main function, and
1655           // processed by coro::salvageDebugInfo() by CoroCloner.
1656           DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
1657               .insertDeclare(CurrentReload, DDI->getVariable(),
1658                              DDI->getExpression(), DDI->getDebugLoc(),
1659                              &*Builder.GetInsertPoint());
1660           // This dbg.declare is for the main function entry point.  It
1661           // will be deleted in all coro-split functions.
1662           coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.ReuseFrameSlot);
1663         }
1664       }
1665 
1666       // If we have a single edge PHINode, remove it and replace it with a
1667       // reload from the coroutine frame. (We already took care of multi edge
1668       // PHINodes by rewriting them in the rewritePHIs function).
1669       if (auto *PN = dyn_cast<PHINode>(U)) {
1670         assert(PN->getNumIncomingValues() == 1 &&
1671                "unexpected number of incoming "
1672                "values in the PHINode");
1673         PN->replaceAllUsesWith(CurrentReload);
1674         PN->eraseFromParent();
1675         continue;
1676       }
1677 
1678       // Replace all uses of CurrentValue in the current instruction with
1679       // reload.
1680       U->replaceUsesOfWith(Def, CurrentReload);
1681     }
1682   }
1683 
1684   BasicBlock *FramePtrBB = FramePtr->getParent();
1685 
1686   auto SpillBlock =
1687       FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
1688   SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1689   Shape.AllocaSpillBlock = SpillBlock;
1690 
1691   // retcon and retcon.once lowering assumes all uses have been sunk.
1692   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1693       Shape.ABI == coro::ABI::Async) {
1694     // If we found any allocas, replace all of their remaining uses with Geps.
1695     Builder.SetInsertPoint(&SpillBlock->front());
1696     for (const auto &P : FrameData.Allocas) {
1697       AllocaInst *Alloca = P.Alloca;
1698       auto *G = GetFramePointer(Alloca);
1699 
1700       // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1701       // here, as we are changing location of the instruction.
1702       G->takeName(Alloca);
1703       Alloca->replaceAllUsesWith(G);
1704       Alloca->eraseFromParent();
1705     }
1706     return FramePtr;
1707   }
1708 
1709   // If we found any alloca, replace all of their remaining uses with GEP
1710   // instructions. To remain debugbility, we replace the uses of allocas for
1711   // dbg.declares and dbg.values with the reload from the frame.
1712   // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1713   // as some of the uses may not be dominated by CoroBegin.
1714   Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
1715   SmallVector<Instruction *, 4> UsersToUpdate;
1716   for (const auto &A : FrameData.Allocas) {
1717     AllocaInst *Alloca = A.Alloca;
1718     UsersToUpdate.clear();
1719     for (User *U : Alloca->users()) {
1720       auto *I = cast<Instruction>(U);
1721       if (DT.dominates(CB, I))
1722         UsersToUpdate.push_back(I);
1723     }
1724     if (UsersToUpdate.empty())
1725       continue;
1726     auto *G = GetFramePointer(Alloca);
1727     G->setName(Alloca->getName() + Twine(".reload.addr"));
1728 
1729     SmallVector<DbgVariableIntrinsic *, 4> DIs;
1730     findDbgUsers(DIs, Alloca);
1731     for (auto *DVI : DIs)
1732       DVI->replaceUsesOfWith(Alloca, G);
1733 
1734     for (Instruction *I : UsersToUpdate)
1735       I->replaceUsesOfWith(Alloca, G);
1736   }
1737   Builder.SetInsertPoint(FramePtr->getNextNode());
1738   for (const auto &A : FrameData.Allocas) {
1739     AllocaInst *Alloca = A.Alloca;
1740     if (A.MayWriteBeforeCoroBegin) {
1741       // isEscaped really means potentially modified before CoroBegin.
1742       if (Alloca->isArrayAllocation())
1743         report_fatal_error(
1744             "Coroutines cannot handle copying of array allocas yet");
1745 
1746       auto *G = GetFramePointer(Alloca);
1747       auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
1748       Builder.CreateStore(Value, G);
1749     }
1750     // For each alias to Alloca created before CoroBegin but used after
1751     // CoroBegin, we recreate them after CoroBegin by appplying the offset
1752     // to the pointer in the frame.
1753     for (const auto &Alias : A.Aliases) {
1754       auto *FramePtr = GetFramePointer(Alloca);
1755       auto *FramePtrRaw =
1756           Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C));
1757       auto *AliasPtr = Builder.CreateGEP(
1758           Type::getInt8Ty(C), FramePtrRaw,
1759           ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue()));
1760       auto *AliasPtrTyped =
1761           Builder.CreateBitCast(AliasPtr, Alias.first->getType());
1762       Alias.first->replaceUsesWithIf(
1763           AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); });
1764     }
1765   }
1766   return FramePtr;
1767 }
1768 
1769 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1770 // PHI in InsertedBB.
1771 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB,
1772                                          BasicBlock *InsertedBB,
1773                                          BasicBlock *PredBB,
1774                                          PHINode *UntilPHI = nullptr) {
1775   auto *PN = cast<PHINode>(&SuccBB->front());
1776   do {
1777     int Index = PN->getBasicBlockIndex(InsertedBB);
1778     Value *V = PN->getIncomingValue(Index);
1779     PHINode *InputV = PHINode::Create(
1780         V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(),
1781         &InsertedBB->front());
1782     InputV->addIncoming(V, PredBB);
1783     PN->setIncomingValue(Index, InputV);
1784     PN = dyn_cast<PHINode>(PN->getNextNode());
1785   } while (PN != UntilPHI);
1786 }
1787 
1788 // Rewrites the PHI Nodes in a cleanuppad.
1789 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1790                                      CleanupPadInst *CleanupPad) {
1791   // For every incoming edge to a CleanupPad we will create a new block holding
1792   // all incoming values in single-value PHI nodes. We will then create another
1793   // block to act as a dispather (as all unwind edges for related EH blocks
1794   // must be the same).
1795   //
1796   // cleanuppad:
1797   //    %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1798   //    %3 = cleanuppad within none []
1799   //
1800   // It will create:
1801   //
1802   // cleanuppad.corodispatch
1803   //    %2 = phi i8[0, %catchswitch], [1, %catch.1]
1804   //    %3 = cleanuppad within none []
1805   //    switch i8 % 2, label %unreachable
1806   //            [i8 0, label %cleanuppad.from.catchswitch
1807   //             i8 1, label %cleanuppad.from.catch.1]
1808   // cleanuppad.from.catchswitch:
1809   //    %4 = phi i32 [%0, %catchswitch]
1810   //    br %label cleanuppad
1811   // cleanuppad.from.catch.1:
1812   //    %6 = phi i32 [%1, %catch.1]
1813   //    br %label cleanuppad
1814   // cleanuppad:
1815   //    %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1816   //                 [%6, %cleanuppad.from.catch.1]
1817 
1818   // Unreachable BB, in case switching on an invalid value in the dispatcher.
1819   auto *UnreachBB = BasicBlock::Create(
1820       CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
1821   IRBuilder<> Builder(UnreachBB);
1822   Builder.CreateUnreachable();
1823 
1824   // Create a new cleanuppad which will be the dispatcher.
1825   auto *NewCleanupPadBB =
1826       BasicBlock::Create(CleanupPadBB->getContext(),
1827                          CleanupPadBB->getName() + Twine(".corodispatch"),
1828                          CleanupPadBB->getParent(), CleanupPadBB);
1829   Builder.SetInsertPoint(NewCleanupPadBB);
1830   auto *SwitchType = Builder.getInt8Ty();
1831   auto *SetDispatchValuePN =
1832       Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
1833   CleanupPad->removeFromParent();
1834   CleanupPad->insertAfter(SetDispatchValuePN);
1835   auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1836                                                 pred_size(CleanupPadBB));
1837 
1838   int SwitchIndex = 0;
1839   SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
1840   for (BasicBlock *Pred : Preds) {
1841     // Create a new cleanuppad and move the PHI values to there.
1842     auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
1843                                       CleanupPadBB->getName() +
1844                                           Twine(".from.") + Pred->getName(),
1845                                       CleanupPadBB->getParent(), CleanupPadBB);
1846     updatePhiNodes(CleanupPadBB, Pred, CaseBB);
1847     CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1848                     Pred->getName());
1849     Builder.SetInsertPoint(CaseBB);
1850     Builder.CreateBr(CleanupPadBB);
1851     movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
1852 
1853     // Update this Pred to the new unwind point.
1854     setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1855 
1856     // Setup the switch in the dispatcher.
1857     auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1858     SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1859     SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1860     SwitchIndex++;
1861   }
1862 }
1863 
1864 static void cleanupSinglePredPHIs(Function &F) {
1865   SmallVector<PHINode *, 32> Worklist;
1866   for (auto &BB : F) {
1867     for (auto &Phi : BB.phis()) {
1868       if (Phi.getNumIncomingValues() == 1) {
1869         Worklist.push_back(&Phi);
1870       } else
1871         break;
1872     }
1873   }
1874   while (!Worklist.empty()) {
1875     auto *Phi = Worklist.pop_back_val();
1876     auto *OriginalValue = Phi->getIncomingValue(0);
1877     Phi->replaceAllUsesWith(OriginalValue);
1878   }
1879 }
1880 
1881 static void rewritePHIs(BasicBlock &BB) {
1882   // For every incoming edge we will create a block holding all
1883   // incoming values in a single PHI nodes.
1884   //
1885   // loop:
1886   //    %n.val = phi i32[%n, %entry], [%inc, %loop]
1887   //
1888   // It will create:
1889   //
1890   // loop.from.entry:
1891   //    %n.loop.pre = phi i32 [%n, %entry]
1892   //    br %label loop
1893   // loop.from.loop:
1894   //    %inc.loop.pre = phi i32 [%inc, %loop]
1895   //    br %label loop
1896   //
1897   // After this rewrite, further analysis will ignore any phi nodes with more
1898   // than one incoming edge.
1899 
1900   // TODO: Simplify PHINodes in the basic block to remove duplicate
1901   // predecessors.
1902 
1903   // Special case for CleanupPad: all EH blocks must have the same unwind edge
1904   // so we need to create an additional "dispatcher" block.
1905   if (auto *CleanupPad =
1906           dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
1907     SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1908     for (BasicBlock *Pred : Preds) {
1909       if (CatchSwitchInst *CS =
1910               dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
1911         // CleanupPad with a CatchSwitch predecessor: therefore this is an
1912         // unwind destination that needs to be handle specially.
1913         assert(CS->getUnwindDest() == &BB);
1914         (void)CS;
1915         rewritePHIsForCleanupPad(&BB, CleanupPad);
1916         return;
1917       }
1918     }
1919   }
1920 
1921   LandingPadInst *LandingPad = nullptr;
1922   PHINode *ReplPHI = nullptr;
1923   if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
1924     // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1925     // We replace the original landing pad with a PHINode that will collect the
1926     // results from all of them.
1927     ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
1928     ReplPHI->takeName(LandingPad);
1929     LandingPad->replaceAllUsesWith(ReplPHI);
1930     // We will erase the original landing pad at the end of this function after
1931     // ehAwareSplitEdge cloned it in the transition blocks.
1932   }
1933 
1934   SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1935   for (BasicBlock *Pred : Preds) {
1936     auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
1937     IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1938 
1939     // Stop the moving of values at ReplPHI, as this is either null or the PHI
1940     // that replaced the landing pad.
1941     movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
1942   }
1943 
1944   if (LandingPad) {
1945     // Calls to ehAwareSplitEdge function cloned the original lading pad.
1946     // No longer need it.
1947     LandingPad->eraseFromParent();
1948   }
1949 }
1950 
1951 static void rewritePHIs(Function &F) {
1952   SmallVector<BasicBlock *, 8> WorkList;
1953 
1954   for (BasicBlock &BB : F)
1955     if (auto *PN = dyn_cast<PHINode>(&BB.front()))
1956       if (PN->getNumIncomingValues() > 1)
1957         WorkList.push_back(&BB);
1958 
1959   for (BasicBlock *BB : WorkList)
1960     rewritePHIs(*BB);
1961 }
1962 
1963 // Check for instructions that we can recreate on resume as opposed to spill
1964 // the result into a coroutine frame.
1965 static bool materializable(Instruction &V) {
1966   return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
1967          isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
1968 }
1969 
1970 // Check for structural coroutine intrinsics that should not be spilled into
1971 // the coroutine frame.
1972 static bool isCoroutineStructureIntrinsic(Instruction &I) {
1973   return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
1974          isa<CoroSuspendInst>(&I);
1975 }
1976 
1977 // For every use of the value that is across suspend point, recreate that value
1978 // after a suspend point.
1979 static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
1980                                               const SpillInfo &Spills) {
1981   for (const auto &E : Spills) {
1982     Value *Def = E.first;
1983     BasicBlock *CurrentBlock = nullptr;
1984     Instruction *CurrentMaterialization = nullptr;
1985     for (Instruction *U : E.second) {
1986       // If we have not seen this block, materialize the value.
1987       if (CurrentBlock != U->getParent()) {
1988 
1989         bool IsInCoroSuspendBlock = isa<AnyCoroSuspendInst>(U);
1990         CurrentBlock = U->getParent();
1991         auto *InsertBlock = IsInCoroSuspendBlock
1992                                 ? CurrentBlock->getSinglePredecessor()
1993                                 : CurrentBlock;
1994         CurrentMaterialization = cast<Instruction>(Def)->clone();
1995         CurrentMaterialization->setName(Def->getName());
1996         CurrentMaterialization->insertBefore(
1997             IsInCoroSuspendBlock ? InsertBlock->getTerminator()
1998                                  : &*InsertBlock->getFirstInsertionPt());
1999       }
2000       if (auto *PN = dyn_cast<PHINode>(U)) {
2001         assert(PN->getNumIncomingValues() == 1 &&
2002                "unexpected number of incoming "
2003                "values in the PHINode");
2004         PN->replaceAllUsesWith(CurrentMaterialization);
2005         PN->eraseFromParent();
2006         continue;
2007       }
2008       // Replace all uses of Def in the current instruction with the
2009       // CurrentMaterialization for the block.
2010       U->replaceUsesOfWith(Def, CurrentMaterialization);
2011     }
2012   }
2013 }
2014 
2015 // Splits the block at a particular instruction unless it is the first
2016 // instruction in the block with a single predecessor.
2017 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
2018   auto *BB = I->getParent();
2019   if (&BB->front() == I) {
2020     if (BB->getSinglePredecessor()) {
2021       BB->setName(Name);
2022       return BB;
2023     }
2024   }
2025   return BB->splitBasicBlock(I, Name);
2026 }
2027 
2028 // Split above and below a particular instruction so that it
2029 // will be all alone by itself in a block.
2030 static void splitAround(Instruction *I, const Twine &Name) {
2031   splitBlockIfNotFirst(I, Name);
2032   splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
2033 }
2034 
2035 static bool isSuspendBlock(BasicBlock *BB) {
2036   return isa<AnyCoroSuspendInst>(BB->front());
2037 }
2038 
2039 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet;
2040 
2041 /// Does control flow starting at the given block ever reach a suspend
2042 /// instruction before reaching a block in VisitedOrFreeBBs?
2043 static bool isSuspendReachableFrom(BasicBlock *From,
2044                                    VisitedBlocksSet &VisitedOrFreeBBs) {
2045   // Eagerly try to add this block to the visited set.  If it's already
2046   // there, stop recursing; this path doesn't reach a suspend before
2047   // either looping or reaching a freeing block.
2048   if (!VisitedOrFreeBBs.insert(From).second)
2049     return false;
2050 
2051   // We assume that we'll already have split suspends into their own blocks.
2052   if (isSuspendBlock(From))
2053     return true;
2054 
2055   // Recurse on the successors.
2056   for (auto Succ : successors(From)) {
2057     if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
2058       return true;
2059   }
2060 
2061   return false;
2062 }
2063 
2064 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a
2065 /// suspend point?
2066 static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
2067   // Seed the visited set with all the basic blocks containing a free
2068   // so that we won't pass them up.
2069   VisitedBlocksSet VisitedOrFreeBBs;
2070   for (auto User : AI->users()) {
2071     if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
2072       VisitedOrFreeBBs.insert(FI->getParent());
2073   }
2074 
2075   return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
2076 }
2077 
2078 /// After we split the coroutine, will the given basic block be along
2079 /// an obvious exit path for the resumption function?
2080 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
2081                                               unsigned depth = 3) {
2082   // If we've bottomed out our depth count, stop searching and assume
2083   // that the path might loop back.
2084   if (depth == 0) return false;
2085 
2086   // If this is a suspend block, we're about to exit the resumption function.
2087   if (isSuspendBlock(BB)) return true;
2088 
2089   // Recurse into the successors.
2090   for (auto Succ : successors(BB)) {
2091     if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
2092       return false;
2093   }
2094 
2095   // If none of the successors leads back in a loop, we're on an exit/abort.
2096   return true;
2097 }
2098 
2099 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
2100   // Look for a free that isn't sufficiently obviously followed by
2101   // either a suspend or a termination, i.e. something that will leave
2102   // the coro resumption frame.
2103   for (auto U : AI->users()) {
2104     auto FI = dyn_cast<CoroAllocaFreeInst>(U);
2105     if (!FI) continue;
2106 
2107     if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
2108       return true;
2109   }
2110 
2111   // If we never found one, we don't need a stack save.
2112   return false;
2113 }
2114 
2115 /// Turn each of the given local allocas into a normal (dynamic) alloca
2116 /// instruction.
2117 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
2118                               SmallVectorImpl<Instruction*> &DeadInsts) {
2119   for (auto AI : LocalAllocas) {
2120     auto M = AI->getModule();
2121     IRBuilder<> Builder(AI);
2122 
2123     // Save the stack depth.  Try to avoid doing this if the stackrestore
2124     // is going to immediately precede a return or something.
2125     Value *StackSave = nullptr;
2126     if (localAllocaNeedsStackSave(AI))
2127       StackSave = Builder.CreateCall(
2128                             Intrinsic::getDeclaration(M, Intrinsic::stacksave));
2129 
2130     // Allocate memory.
2131     auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
2132     Alloca->setAlignment(Align(AI->getAlignment()));
2133 
2134     for (auto U : AI->users()) {
2135       // Replace gets with the allocation.
2136       if (isa<CoroAllocaGetInst>(U)) {
2137         U->replaceAllUsesWith(Alloca);
2138 
2139       // Replace frees with stackrestores.  This is safe because
2140       // alloca.alloc is required to obey a stack discipline, although we
2141       // don't enforce that structurally.
2142       } else {
2143         auto FI = cast<CoroAllocaFreeInst>(U);
2144         if (StackSave) {
2145           Builder.SetInsertPoint(FI);
2146           Builder.CreateCall(
2147                     Intrinsic::getDeclaration(M, Intrinsic::stackrestore),
2148                              StackSave);
2149         }
2150       }
2151       DeadInsts.push_back(cast<Instruction>(U));
2152     }
2153 
2154     DeadInsts.push_back(AI);
2155   }
2156 }
2157 
2158 /// Turn the given coro.alloca.alloc call into a dynamic allocation.
2159 /// This happens during the all-instructions iteration, so it must not
2160 /// delete the call.
2161 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI,
2162                                         coro::Shape &Shape,
2163                                    SmallVectorImpl<Instruction*> &DeadInsts) {
2164   IRBuilder<> Builder(AI);
2165   auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
2166 
2167   for (User *U : AI->users()) {
2168     if (isa<CoroAllocaGetInst>(U)) {
2169       U->replaceAllUsesWith(Alloc);
2170     } else {
2171       auto FI = cast<CoroAllocaFreeInst>(U);
2172       Builder.SetInsertPoint(FI);
2173       Shape.emitDealloc(Builder, Alloc, nullptr);
2174     }
2175     DeadInsts.push_back(cast<Instruction>(U));
2176   }
2177 
2178   // Push this on last so that it gets deleted after all the others.
2179   DeadInsts.push_back(AI);
2180 
2181   // Return the new allocation value so that we can check for needed spills.
2182   return cast<Instruction>(Alloc);
2183 }
2184 
2185 /// Get the current swifterror value.
2186 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
2187                                      coro::Shape &Shape) {
2188   // Make a fake function pointer as a sort of intrinsic.
2189   auto FnTy = FunctionType::get(ValueTy, {}, false);
2190   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2191 
2192   auto Call = Builder.CreateCall(FnTy, Fn, {});
2193   Shape.SwiftErrorOps.push_back(Call);
2194 
2195   return Call;
2196 }
2197 
2198 /// Set the given value as the current swifterror value.
2199 ///
2200 /// Returns a slot that can be used as a swifterror slot.
2201 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
2202                                      coro::Shape &Shape) {
2203   // Make a fake function pointer as a sort of intrinsic.
2204   auto FnTy = FunctionType::get(V->getType()->getPointerTo(),
2205                                 {V->getType()}, false);
2206   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2207 
2208   auto Call = Builder.CreateCall(FnTy, Fn, { V });
2209   Shape.SwiftErrorOps.push_back(Call);
2210 
2211   return Call;
2212 }
2213 
2214 /// Set the swifterror value from the given alloca before a call,
2215 /// then put in back in the alloca afterwards.
2216 ///
2217 /// Returns an address that will stand in for the swifterror slot
2218 /// until splitting.
2219 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
2220                                                  AllocaInst *Alloca,
2221                                                  coro::Shape &Shape) {
2222   auto ValueTy = Alloca->getAllocatedType();
2223   IRBuilder<> Builder(Call);
2224 
2225   // Load the current value from the alloca and set it as the
2226   // swifterror value.
2227   auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
2228   auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
2229 
2230   // Move to after the call.  Since swifterror only has a guaranteed
2231   // value on normal exits, we can ignore implicit and explicit unwind
2232   // edges.
2233   if (isa<CallInst>(Call)) {
2234     Builder.SetInsertPoint(Call->getNextNode());
2235   } else {
2236     auto Invoke = cast<InvokeInst>(Call);
2237     Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
2238   }
2239 
2240   // Get the current swifterror value and store it to the alloca.
2241   auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
2242   Builder.CreateStore(ValueAfterCall, Alloca);
2243 
2244   return Addr;
2245 }
2246 
2247 /// Eliminate a formerly-swifterror alloca by inserting the get/set
2248 /// intrinsics and attempting to MemToReg the alloca away.
2249 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
2250                                       coro::Shape &Shape) {
2251   for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) {
2252     // swifterror values can only be used in very specific ways.
2253     // We take advantage of that here.
2254     auto User = Use.getUser();
2255     if (isa<LoadInst>(User) || isa<StoreInst>(User))
2256       continue;
2257 
2258     assert(isa<CallInst>(User) || isa<InvokeInst>(User));
2259     auto Call = cast<Instruction>(User);
2260 
2261     auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
2262 
2263     // Use the returned slot address as the call argument.
2264     Use.set(Addr);
2265   }
2266 
2267   // All the uses should be loads and stores now.
2268   assert(isAllocaPromotable(Alloca));
2269 }
2270 
2271 /// "Eliminate" a swifterror argument by reducing it to the alloca case
2272 /// and then loading and storing in the prologue and epilog.
2273 ///
2274 /// The argument keeps the swifterror flag.
2275 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
2276                                         coro::Shape &Shape,
2277                              SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
2278   IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
2279 
2280   auto ArgTy = cast<PointerType>(Arg.getType());
2281   auto ValueTy = ArgTy->getElementType();
2282 
2283   // Reduce to the alloca case:
2284 
2285   // Create an alloca and replace all uses of the arg with it.
2286   auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
2287   Arg.replaceAllUsesWith(Alloca);
2288 
2289   // Set an initial value in the alloca.  swifterror is always null on entry.
2290   auto InitialValue = Constant::getNullValue(ValueTy);
2291   Builder.CreateStore(InitialValue, Alloca);
2292 
2293   // Find all the suspends in the function and save and restore around them.
2294   for (auto Suspend : Shape.CoroSuspends) {
2295     (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
2296   }
2297 
2298   // Find all the coro.ends in the function and restore the error value.
2299   for (auto End : Shape.CoroEnds) {
2300     Builder.SetInsertPoint(End);
2301     auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
2302     (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
2303   }
2304 
2305   // Now we can use the alloca logic.
2306   AllocasToPromote.push_back(Alloca);
2307   eliminateSwiftErrorAlloca(F, Alloca, Shape);
2308 }
2309 
2310 /// Eliminate all problematic uses of swifterror arguments and allocas
2311 /// from the function.  We'll fix them up later when splitting the function.
2312 static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
2313   SmallVector<AllocaInst*, 4> AllocasToPromote;
2314 
2315   // Look for a swifterror argument.
2316   for (auto &Arg : F.args()) {
2317     if (!Arg.hasSwiftErrorAttr()) continue;
2318 
2319     eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
2320     break;
2321   }
2322 
2323   // Look for swifterror allocas.
2324   for (auto &Inst : F.getEntryBlock()) {
2325     auto Alloca = dyn_cast<AllocaInst>(&Inst);
2326     if (!Alloca || !Alloca->isSwiftError()) continue;
2327 
2328     // Clear the swifterror flag.
2329     Alloca->setSwiftError(false);
2330 
2331     AllocasToPromote.push_back(Alloca);
2332     eliminateSwiftErrorAlloca(F, Alloca, Shape);
2333   }
2334 
2335   // If we have any allocas to promote, compute a dominator tree and
2336   // promote them en masse.
2337   if (!AllocasToPromote.empty()) {
2338     DominatorTree DT(F);
2339     PromoteMemToReg(AllocasToPromote, DT);
2340   }
2341 }
2342 
2343 /// retcon and retcon.once conventions assume that all spill uses can be sunk
2344 /// after the coro.begin intrinsic.
2345 static void sinkSpillUsesAfterCoroBegin(Function &F,
2346                                         const FrameDataInfo &FrameData,
2347                                         CoroBeginInst *CoroBegin) {
2348   DominatorTree Dom(F);
2349 
2350   SmallSetVector<Instruction *, 32> ToMove;
2351   SmallVector<Instruction *, 32> Worklist;
2352 
2353   // Collect all users that precede coro.begin.
2354   for (auto *Def : FrameData.getAllDefs()) {
2355     for (User *U : Def->users()) {
2356       auto Inst = cast<Instruction>(U);
2357       if (Inst->getParent() != CoroBegin->getParent() ||
2358           Dom.dominates(CoroBegin, Inst))
2359         continue;
2360       if (ToMove.insert(Inst))
2361         Worklist.push_back(Inst);
2362     }
2363   }
2364   // Recursively collect users before coro.begin.
2365   while (!Worklist.empty()) {
2366     auto *Def = Worklist.pop_back_val();
2367     for (User *U : Def->users()) {
2368       auto Inst = cast<Instruction>(U);
2369       if (Dom.dominates(CoroBegin, Inst))
2370         continue;
2371       if (ToMove.insert(Inst))
2372         Worklist.push_back(Inst);
2373     }
2374   }
2375 
2376   // Sort by dominance.
2377   SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
2378   llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
2379     // If a dominates b it should preceed (<) b.
2380     return Dom.dominates(A, B);
2381   });
2382 
2383   Instruction *InsertPt = CoroBegin->getNextNode();
2384   for (Instruction *Inst : InsertionList)
2385     Inst->moveBefore(InsertPt);
2386 }
2387 
2388 /// For each local variable that all of its user are only used inside one of
2389 /// suspended region, we sink their lifetime.start markers to the place where
2390 /// after the suspend block. Doing so minimizes the lifetime of each variable,
2391 /// hence minimizing the amount of data we end up putting on the frame.
2392 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
2393                                      SuspendCrossingInfo &Checker) {
2394   DominatorTree DT(F);
2395 
2396   // Collect all possible basic blocks which may dominate all uses of allocas.
2397   SmallPtrSet<BasicBlock *, 4> DomSet;
2398   DomSet.insert(&F.getEntryBlock());
2399   for (auto *CSI : Shape.CoroSuspends) {
2400     BasicBlock *SuspendBlock = CSI->getParent();
2401     assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
2402            "should have split coro.suspend into its own block");
2403     DomSet.insert(SuspendBlock->getSingleSuccessor());
2404   }
2405 
2406   for (Instruction &I : instructions(F)) {
2407     AllocaInst* AI = dyn_cast<AllocaInst>(&I);
2408     if (!AI)
2409       continue;
2410 
2411     for (BasicBlock *DomBB : DomSet) {
2412       bool Valid = true;
2413       SmallVector<Instruction *, 1> Lifetimes;
2414 
2415       auto isLifetimeStart = [](Instruction* I) {
2416         if (auto* II = dyn_cast<IntrinsicInst>(I))
2417           return II->getIntrinsicID() == Intrinsic::lifetime_start;
2418         return false;
2419       };
2420 
2421       auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
2422         if (isLifetimeStart(U)) {
2423           Lifetimes.push_back(U);
2424           return true;
2425         }
2426         if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2427           return false;
2428         if (isLifetimeStart(U->user_back())) {
2429           Lifetimes.push_back(U->user_back());
2430           return true;
2431         }
2432         return false;
2433       };
2434 
2435       for (User *U : AI->users()) {
2436         Instruction *UI = cast<Instruction>(U);
2437         // For all users except lifetime.start markers, if they are all
2438         // dominated by one of the basic blocks and do not cross
2439         // suspend points as well, then there is no need to spill the
2440         // instruction.
2441         if (!DT.dominates(DomBB, UI->getParent()) ||
2442             Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2443           // Skip lifetime.start, GEP and bitcast used by lifetime.start
2444           // markers.
2445           if (collectLifetimeStart(UI, AI))
2446             continue;
2447           Valid = false;
2448           break;
2449         }
2450       }
2451       // Sink lifetime.start markers to dominate block when they are
2452       // only used outside the region.
2453       if (Valid && Lifetimes.size() != 0) {
2454         // May be AI itself, when the type of AI is i8*
2455         auto *NewBitCast = [&](AllocaInst *AI) -> Value* {
2456           if (isa<AllocaInst>(Lifetimes[0]->getOperand(1)))
2457             return AI;
2458           auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext());
2459           return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "",
2460                                   DomBB->getTerminator());
2461         }(AI);
2462 
2463         auto *NewLifetime = Lifetimes[0]->clone();
2464         NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast);
2465         NewLifetime->insertBefore(DomBB->getTerminator());
2466 
2467         // All the outsided lifetime.start markers are no longer necessary.
2468         for (Instruction *S : Lifetimes)
2469           S->eraseFromParent();
2470 
2471         break;
2472       }
2473     }
2474   }
2475 }
2476 
2477 static void collectFrameAllocas(Function &F, coro::Shape &Shape,
2478                                 const SuspendCrossingInfo &Checker,
2479                                 SmallVectorImpl<AllocaInfo> &Allocas) {
2480   for (Instruction &I : instructions(F)) {
2481     auto *AI = dyn_cast<AllocaInst>(&I);
2482     if (!AI)
2483       continue;
2484     // The PromiseAlloca will be specially handled since it needs to be in a
2485     // fixed position in the frame.
2486     if (AI == Shape.SwitchLowering.PromiseAlloca) {
2487       continue;
2488     }
2489     DominatorTree DT(F);
2490     // The code that uses lifetime.start intrinsic does not work for functions
2491     // with loops without exit. Disable it on ABIs we know to generate such
2492     // code.
2493     bool ShouldUseLifetimeStartInfo =
2494         (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2495          Shape.ABI != coro::ABI::RetconOnce);
2496     AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT,
2497                              *Shape.CoroBegin, Checker,
2498                              ShouldUseLifetimeStartInfo};
2499     Visitor.visitPtr(*AI);
2500     if (!Visitor.getShouldLiveOnFrame())
2501       continue;
2502     Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
2503                          Visitor.getMayWriteBeforeCoroBegin());
2504   }
2505 }
2506 
2507 void coro::salvageDebugInfo(
2508     SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
2509     DbgVariableIntrinsic *DVI, bool ReuseFrameSlot) {
2510   Function *F = DVI->getFunction();
2511   IRBuilder<> Builder(F->getContext());
2512   auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
2513   while (isa<IntrinsicInst>(InsertPt))
2514     ++InsertPt;
2515   Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
2516   DIExpression *Expr = DVI->getExpression();
2517   // Follow the pointer arithmetic all the way to the incoming
2518   // function argument and convert into a DIExpression.
2519   bool SkipOutermostLoad = !isa<DbgValueInst>(DVI);
2520   Value *Storage = DVI->getVariableLocationOp(0);
2521   Value *OriginalStorage = Storage;
2522   while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
2523     if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
2524       Storage = LdInst->getOperand(0);
2525       // FIXME: This is a heuristic that works around the fact that
2526       // LLVM IR debug intrinsics cannot yet distinguish between
2527       // memory and value locations: Because a dbg.declare(alloca) is
2528       // implicitly a memory location no DW_OP_deref operation for the
2529       // last direct load from an alloca is necessary.  This condition
2530       // effectively drops the *last* DW_OP_deref in the expression.
2531       if (!SkipOutermostLoad)
2532         Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2533     } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) {
2534       Storage = StInst->getOperand(0);
2535     } else {
2536       SmallVector<uint64_t, 16> Ops;
2537       SmallVector<Value *, 0> AdditionalValues;
2538       Value *Op = llvm::salvageDebugInfoImpl(
2539           *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops,
2540           AdditionalValues);
2541       if (!Op || !AdditionalValues.empty()) {
2542         // If salvaging failed or salvaging produced more than one location
2543         // operand, give up.
2544         break;
2545       }
2546       Storage = Op;
2547       Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false);
2548     }
2549     SkipOutermostLoad = false;
2550   }
2551   if (!Storage)
2552     return;
2553 
2554   // Store a pointer to the coroutine frame object in an alloca so it
2555   // is available throughout the function when producing unoptimized
2556   // code. Extending the lifetime this way is correct because the
2557   // variable has been declared by a dbg.declare intrinsic.
2558   //
2559   // Avoid to create the alloca would be eliminated by optimization
2560   // passes and the corresponding dbg.declares would be invalid.
2561   if (!ReuseFrameSlot && !EnableReuseStorageInFrame)
2562     if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) {
2563       auto &Cached = DbgPtrAllocaCache[Storage];
2564       if (!Cached) {
2565         Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
2566                                       Arg->getName() + ".debug");
2567         Builder.CreateStore(Storage, Cached);
2568       }
2569       Storage = Cached;
2570       // FIXME: LLVM lacks nuanced semantics to differentiate between
2571       // memory and direct locations at the IR level. The backend will
2572       // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
2573       // location. Thus, if there are deref and offset operations in the
2574       // expression, we need to add a DW_OP_deref at the *start* of the
2575       // expression to first load the contents of the alloca before
2576       // adjusting it with the expression.
2577       if (Expr && Expr->isComplex())
2578         Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2579     }
2580 
2581   DVI->replaceVariableLocationOp(OriginalStorage, Storage);
2582   DVI->setExpression(Expr);
2583   /// It makes no sense to move the dbg.value intrinsic.
2584   if (!isa<DbgValueInst>(DVI)) {
2585     if (auto *II = dyn_cast<InvokeInst>(Storage))
2586       DVI->moveBefore(II->getNormalDest()->getFirstNonPHI());
2587     else if (auto *CBI = dyn_cast<CallBrInst>(Storage))
2588       DVI->moveBefore(CBI->getDefaultDest()->getFirstNonPHI());
2589     else if (auto *InsertPt = dyn_cast<Instruction>(Storage)) {
2590       assert(!InsertPt->isTerminator() &&
2591              "Unimaged terminator that could return a storage.");
2592       DVI->moveAfter(InsertPt);
2593     } else if (isa<Argument>(Storage))
2594       DVI->moveAfter(F->getEntryBlock().getFirstNonPHI());
2595   }
2596 }
2597 
2598 void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
2599   // Don't eliminate swifterror in async functions that won't be split.
2600   if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2601     eliminateSwiftError(F, Shape);
2602 
2603   if (Shape.ABI == coro::ABI::Switch &&
2604       Shape.SwitchLowering.PromiseAlloca) {
2605     Shape.getSwitchCoroId()->clearPromise();
2606   }
2607 
2608   // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2609   // intrinsics are in their own blocks to simplify the logic of building up
2610   // SuspendCrossing data.
2611   for (auto *CSI : Shape.CoroSuspends) {
2612     if (auto *Save = CSI->getCoroSave())
2613       splitAround(Save, "CoroSave");
2614     splitAround(CSI, "CoroSuspend");
2615   }
2616 
2617   // Put CoroEnds into their own blocks.
2618   for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2619     splitAround(CE, "CoroEnd");
2620 
2621     // Emit the musttail call function in a new block before the CoroEnd.
2622     // We do this here so that the right suspend crossing info is computed for
2623     // the uses of the musttail call function call. (Arguments to the coro.end
2624     // instructions would be ignored)
2625     if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
2626       auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2627       if (!MustTailCallFn)
2628         continue;
2629       IRBuilder<> Builder(AsyncEnd);
2630       SmallVector<Value *, 8> Args(AsyncEnd->args());
2631       auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
2632       auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
2633                                       Arguments, Builder);
2634       splitAround(Call, "MustTailCall.Before.CoroEnd");
2635     }
2636   }
2637 
2638   // Later code makes structural assumptions about single predecessors phis e.g
2639   // that they are not live accross a suspend point.
2640   cleanupSinglePredPHIs(F);
2641 
2642   // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2643   // never has its definition separated from the PHI by the suspend point.
2644   rewritePHIs(F);
2645 
2646   // Build suspend crossing info.
2647   SuspendCrossingInfo Checker(F, Shape);
2648 
2649   IRBuilder<> Builder(F.getContext());
2650   FrameDataInfo FrameData;
2651   SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas;
2652   SmallVector<Instruction*, 4> DeadInstructions;
2653 
2654   {
2655     SpillInfo Spills;
2656     for (int Repeat = 0; Repeat < 4; ++Repeat) {
2657       // See if there are materializable instructions across suspend points.
2658       for (Instruction &I : instructions(F))
2659         if (materializable(I)) {
2660           for (User *U : I.users())
2661             if (Checker.isDefinitionAcrossSuspend(I, U))
2662               Spills[&I].push_back(cast<Instruction>(U));
2663 
2664           // Manually add dbg.value metadata uses of I.
2665           SmallVector<DbgValueInst *, 16> DVIs;
2666           findDbgValues(DVIs, &I);
2667           for (auto *DVI : DVIs)
2668             if (Checker.isDefinitionAcrossSuspend(I, DVI))
2669               Spills[&I].push_back(DVI);
2670         }
2671 
2672       if (Spills.empty())
2673         break;
2674 
2675       // Rewrite materializable instructions to be materialized at the use
2676       // point.
2677       LLVM_DEBUG(dumpSpills("Materializations", Spills));
2678       rewriteMaterializableInstructions(Builder, Spills);
2679       Spills.clear();
2680     }
2681   }
2682 
2683   if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2684       Shape.ABI != coro::ABI::RetconOnce)
2685     sinkLifetimeStartMarkers(F, Shape, Checker);
2686 
2687   if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2688     collectFrameAllocas(F, Shape, Checker, FrameData.Allocas);
2689   LLVM_DEBUG(dumpAllocas(FrameData.Allocas));
2690 
2691   // Collect the spills for arguments and other not-materializable values.
2692   for (Argument &A : F.args())
2693     for (User *U : A.users())
2694       if (Checker.isDefinitionAcrossSuspend(A, U))
2695         FrameData.Spills[&A].push_back(cast<Instruction>(U));
2696 
2697   for (Instruction &I : instructions(F)) {
2698     // Values returned from coroutine structure intrinsics should not be part
2699     // of the Coroutine Frame.
2700     if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
2701       continue;
2702 
2703     // The Coroutine Promise always included into coroutine frame, no need to
2704     // check for suspend crossing.
2705     if (Shape.ABI == coro::ABI::Switch &&
2706         Shape.SwitchLowering.PromiseAlloca == &I)
2707       continue;
2708 
2709     // Handle alloca.alloc specially here.
2710     if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
2711       // Check whether the alloca's lifetime is bounded by suspend points.
2712       if (isLocalAlloca(AI)) {
2713         LocalAllocas.push_back(AI);
2714         continue;
2715       }
2716 
2717       // If not, do a quick rewrite of the alloca and then add spills of
2718       // the rewritten value.  The rewrite doesn't invalidate anything in
2719       // Spills because the other alloca intrinsics have no other operands
2720       // besides AI, and it doesn't invalidate the iteration because we delay
2721       // erasing AI.
2722       auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
2723 
2724       for (User *U : Alloc->users()) {
2725         if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
2726           FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
2727       }
2728       continue;
2729     }
2730 
2731     // Ignore alloca.get; we process this as part of coro.alloca.alloc.
2732     if (isa<CoroAllocaGetInst>(I))
2733       continue;
2734 
2735     if (isa<AllocaInst>(I))
2736       continue;
2737 
2738     for (User *U : I.users())
2739       if (Checker.isDefinitionAcrossSuspend(I, U)) {
2740         // We cannot spill a token.
2741         if (I.getType()->isTokenTy())
2742           report_fatal_error(
2743               "token definition is separated from the use by a suspend point");
2744         FrameData.Spills[&I].push_back(cast<Instruction>(U));
2745       }
2746   }
2747 
2748   // We don't want the layout of coroutine frame to be affected
2749   // by debug information. So we only choose to salvage DbgValueInst for
2750   // whose value is already in the frame.
2751   // We would handle the dbg.values for allocas specially
2752   for (auto &Iter : FrameData.Spills) {
2753     auto *V = Iter.first;
2754     SmallVector<DbgValueInst *, 16> DVIs;
2755     findDbgValues(DVIs, V);
2756     llvm::for_each(DVIs, [&](DbgValueInst *DVI) {
2757       if (Checker.isDefinitionAcrossSuspend(*V, DVI))
2758         FrameData.Spills[V].push_back(DVI);
2759     });
2760   }
2761 
2762   LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
2763   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2764       Shape.ABI == coro::ABI::Async)
2765     sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin);
2766   Shape.FrameTy = buildFrameType(F, Shape, FrameData);
2767   createFramePtr(Shape);
2768   // For now, this works for C++ programs only.
2769   buildFrameDebugInfo(F, Shape, FrameData);
2770   insertSpills(FrameData, Shape);
2771   lowerLocalAllocas(LocalAllocas, DeadInstructions);
2772 
2773   for (auto I : DeadInstructions)
2774     I->eraseFromParent();
2775 }
2776