1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This file contains classes used to discover if for a particular value
9 // there from sue to definition that crosses a suspend block.
10 //
11 // Using the information discovered we form a Coroutine Frame structure to
12 // contain those values. All uses of those values are replaced with appropriate
13 // GEP + load from the coroutine frame. At the point of the definition we spill
14 // the value into the coroutine frame.
15 //===----------------------------------------------------------------------===//
16 
17 #include "CoroInternal.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/ScopeExit.h"
20 #include "llvm/ADT/SmallString.h"
21 #include "llvm/Analysis/PtrUseVisitor.h"
22 #include "llvm/Analysis/StackLifetime.h"
23 #include "llvm/Config/llvm-config.h"
24 #include "llvm/IR/CFG.h"
25 #include "llvm/IR/DIBuilder.h"
26 #include "llvm/IR/DebugInfo.h"
27 #include "llvm/IR/Dominators.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/InstIterator.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/MathExtras.h"
33 #include "llvm/Support/OptimizedStructLayout.h"
34 #include "llvm/Support/circular_raw_ostream.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
37 #include "llvm/Transforms/Utils/Local.h"
38 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
39 #include <algorithm>
40 
41 using namespace llvm;
42 
43 // The "coro-suspend-crossing" flag is very noisy. There is another debug type,
44 // "coro-frame", which results in leaner debug spew.
45 #define DEBUG_TYPE "coro-suspend-crossing"
46 
47 static cl::opt<bool> EnableReuseStorageInFrame(
48     "reuse-storage-in-coroutine-frame", cl::Hidden,
49     cl::desc(
50         "Enable the optimization which would reuse the storage in the coroutine \
51          frame for allocas whose liferanges are not overlapped, for testing purposes"),
52     llvm::cl::init(false));
53 
54 enum { SmallVectorThreshold = 32 };
55 
56 // Provides two way mapping between the blocks and numbers.
57 namespace {
58 class BlockToIndexMapping {
59   SmallVector<BasicBlock *, SmallVectorThreshold> V;
60 
61 public:
62   size_t size() const { return V.size(); }
63 
64   BlockToIndexMapping(Function &F) {
65     for (BasicBlock &BB : F)
66       V.push_back(&BB);
67     llvm::sort(V);
68   }
69 
70   size_t blockToIndex(BasicBlock *BB) const {
71     auto *I = llvm::lower_bound(V, BB);
72     assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
73     return I - V.begin();
74   }
75 
76   BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
77 };
78 } // end anonymous namespace
79 
80 // The SuspendCrossingInfo maintains data that allows to answer a question
81 // whether given two BasicBlocks A and B there is a path from A to B that
82 // passes through a suspend point.
83 //
84 // For every basic block 'i' it maintains a BlockData that consists of:
85 //   Consumes:  a bit vector which contains a set of indices of blocks that can
86 //              reach block 'i'
87 //   Kills: a bit vector which contains a set of indices of blocks that can
88 //          reach block 'i', but one of the path will cross a suspend point
89 //   Suspend: a boolean indicating whether block 'i' contains a suspend point.
90 //   End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
91 //
92 namespace {
93 struct SuspendCrossingInfo {
94   BlockToIndexMapping Mapping;
95 
96   struct BlockData {
97     BitVector Consumes;
98     BitVector Kills;
99     bool Suspend = false;
100     bool End = false;
101   };
102   SmallVector<BlockData, SmallVectorThreshold> Block;
103 
104   iterator_range<succ_iterator> successors(BlockData const &BD) const {
105     BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
106     return llvm::successors(BB);
107   }
108 
109   BlockData &getBlockData(BasicBlock *BB) {
110     return Block[Mapping.blockToIndex(BB)];
111   }
112 
113   void dump() const;
114   void dump(StringRef Label, BitVector const &BV) const;
115 
116   SuspendCrossingInfo(Function &F, coro::Shape &Shape);
117 
118   bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
119     size_t const DefIndex = Mapping.blockToIndex(DefBB);
120     size_t const UseIndex = Mapping.blockToIndex(UseBB);
121 
122     bool const Result = Block[UseIndex].Kills[DefIndex];
123     LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
124                       << " answer is " << Result << "\n");
125     return Result;
126   }
127 
128   bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
129     auto *I = cast<Instruction>(U);
130 
131     // We rewrote PHINodes, so that only the ones with exactly one incoming
132     // value need to be analyzed.
133     if (auto *PN = dyn_cast<PHINode>(I))
134       if (PN->getNumIncomingValues() > 1)
135         return false;
136 
137     BasicBlock *UseBB = I->getParent();
138 
139     // As a special case, treat uses by an llvm.coro.suspend.retcon or an
140     // llvm.coro.suspend.async as if they were uses in the suspend's single
141     // predecessor: the uses conceptually occur before the suspend.
142     if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
143       UseBB = UseBB->getSinglePredecessor();
144       assert(UseBB && "should have split coro.suspend into its own block");
145     }
146 
147     return hasPathCrossingSuspendPoint(DefBB, UseBB);
148   }
149 
150   bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
151     return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
152   }
153 
154   bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
155     auto *DefBB = I.getParent();
156 
157     // As a special case, treat values produced by an llvm.coro.suspend.*
158     // as if they were defined in the single successor: the uses
159     // conceptually occur after the suspend.
160     if (isa<AnyCoroSuspendInst>(I)) {
161       DefBB = DefBB->getSingleSuccessor();
162       assert(DefBB && "should have split coro.suspend into its own block");
163     }
164 
165     return isDefinitionAcrossSuspend(DefBB, U);
166   }
167 
168   bool isDefinitionAcrossSuspend(Value &V, User *U) const {
169     if (auto *Arg = dyn_cast<Argument>(&V))
170       return isDefinitionAcrossSuspend(*Arg, U);
171     if (auto *Inst = dyn_cast<Instruction>(&V))
172       return isDefinitionAcrossSuspend(*Inst, U);
173 
174     llvm_unreachable(
175         "Coroutine could only collect Argument and Instruction now.");
176   }
177 };
178 } // end anonymous namespace
179 
180 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
181 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
182                                                 BitVector const &BV) const {
183   dbgs() << Label << ":";
184   for (size_t I = 0, N = BV.size(); I < N; ++I)
185     if (BV[I])
186       dbgs() << " " << Mapping.indexToBlock(I)->getName();
187   dbgs() << "\n";
188 }
189 
190 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
191   for (size_t I = 0, N = Block.size(); I < N; ++I) {
192     BasicBlock *const B = Mapping.indexToBlock(I);
193     dbgs() << B->getName() << ":\n";
194     dump("   Consumes", Block[I].Consumes);
195     dump("      Kills", Block[I].Kills);
196   }
197   dbgs() << "\n";
198 }
199 #endif
200 
201 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
202     : Mapping(F) {
203   const size_t N = Mapping.size();
204   Block.resize(N);
205 
206   // Initialize every block so that it consumes itself
207   for (size_t I = 0; I < N; ++I) {
208     auto &B = Block[I];
209     B.Consumes.resize(N);
210     B.Kills.resize(N);
211     B.Consumes.set(I);
212   }
213 
214   // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
215   // the code beyond coro.end is reachable during initial invocation of the
216   // coroutine.
217   for (auto *CE : Shape.CoroEnds)
218     getBlockData(CE->getParent()).End = true;
219 
220   // Mark all suspend blocks and indicate that they kill everything they
221   // consume. Note, that crossing coro.save also requires a spill, as any code
222   // between coro.save and coro.suspend may resume the coroutine and all of the
223   // state needs to be saved by that time.
224   auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
225     BasicBlock *SuspendBlock = BarrierInst->getParent();
226     auto &B = getBlockData(SuspendBlock);
227     B.Suspend = true;
228     B.Kills |= B.Consumes;
229   };
230   for (auto *CSI : Shape.CoroSuspends) {
231     markSuspendBlock(CSI);
232     if (auto *Save = CSI->getCoroSave())
233       markSuspendBlock(Save);
234   }
235 
236   // Iterate propagating consumes and kills until they stop changing.
237   int Iteration = 0;
238   (void)Iteration;
239 
240   bool Changed;
241   do {
242     LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
243     LLVM_DEBUG(dbgs() << "==============\n");
244 
245     Changed = false;
246     for (size_t I = 0; I < N; ++I) {
247       auto &B = Block[I];
248       for (BasicBlock *SI : successors(B)) {
249 
250         auto SuccNo = Mapping.blockToIndex(SI);
251 
252         // Saved Consumes and Kills bitsets so that it is easy to see
253         // if anything changed after propagation.
254         auto &S = Block[SuccNo];
255         auto SavedConsumes = S.Consumes;
256         auto SavedKills = S.Kills;
257 
258         // Propagate Kills and Consumes from block B into its successor S.
259         S.Consumes |= B.Consumes;
260         S.Kills |= B.Kills;
261 
262         // If block B is a suspend block, it should propagate kills into the
263         // its successor for every block B consumes.
264         if (B.Suspend) {
265           S.Kills |= B.Consumes;
266         }
267         if (S.Suspend) {
268           // If block S is a suspend block, it should kill all of the blocks it
269           // consumes.
270           S.Kills |= S.Consumes;
271         } else if (S.End) {
272           // If block S is an end block, it should not propagate kills as the
273           // blocks following coro.end() are reached during initial invocation
274           // of the coroutine while all the data are still available on the
275           // stack or in the registers.
276           S.Kills.reset();
277         } else {
278           // This is reached when S block it not Suspend nor coro.end and it
279           // need to make sure that it is not in the kill set.
280           S.Kills.reset(SuccNo);
281         }
282 
283         // See if anything changed.
284         Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
285 
286         if (S.Kills != SavedKills) {
287           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
288                             << "\n");
289           LLVM_DEBUG(dump("S.Kills", S.Kills));
290           LLVM_DEBUG(dump("SavedKills", SavedKills));
291         }
292         if (S.Consumes != SavedConsumes) {
293           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
294           LLVM_DEBUG(dump("S.Consume", S.Consumes));
295           LLVM_DEBUG(dump("SavedCons", SavedConsumes));
296         }
297       }
298     }
299   } while (Changed);
300   LLVM_DEBUG(dump());
301 }
302 
303 #undef DEBUG_TYPE // "coro-suspend-crossing"
304 #define DEBUG_TYPE "coro-frame"
305 
306 namespace {
307 class FrameTypeBuilder;
308 // Mapping from the to-be-spilled value to all the users that need reload.
309 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>;
310 struct AllocaInfo {
311   AllocaInst *Alloca;
312   DenseMap<Instruction *, llvm::Optional<APInt>> Aliases;
313   bool MayWriteBeforeCoroBegin;
314   AllocaInfo(AllocaInst *Alloca,
315              DenseMap<Instruction *, llvm::Optional<APInt>> Aliases,
316              bool MayWriteBeforeCoroBegin)
317       : Alloca(Alloca), Aliases(std::move(Aliases)),
318         MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
319 };
320 struct FrameDataInfo {
321   // All the values (that are not allocas) that needs to be spilled to the
322   // frame.
323   SpillInfo Spills;
324   // Allocas contains all values defined as allocas that need to live in the
325   // frame.
326   SmallVector<AllocaInfo, 8> Allocas;
327 
328   SmallVector<Value *, 8> getAllDefs() const {
329     SmallVector<Value *, 8> Defs;
330     for (const auto &P : Spills)
331       Defs.push_back(P.first);
332     for (const auto &A : Allocas)
333       Defs.push_back(A.Alloca);
334     return Defs;
335   }
336 
337   uint32_t getFieldIndex(Value *V) const {
338     auto Itr = FieldIndexMap.find(V);
339     assert(Itr != FieldIndexMap.end() &&
340            "Value does not have a frame field index");
341     return Itr->second;
342   }
343 
344   void setFieldIndex(Value *V, uint32_t Index) {
345     assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
346            "Cannot set the index for the same field twice.");
347     FieldIndexMap[V] = Index;
348   }
349 
350   uint64_t getAlign(Value *V) const {
351     auto Iter = FieldAlignMap.find(V);
352     assert(Iter != FieldAlignMap.end());
353     return Iter->second;
354   }
355 
356   void setAlign(Value *V, uint64_t Align) {
357     assert(FieldAlignMap.count(V) == 0);
358     FieldAlignMap.insert({V, Align});
359   }
360 
361   uint64_t getOffset(Value *V) const {
362     auto Iter = FieldOffsetMap.find(V);
363     assert(Iter != FieldOffsetMap.end());
364     return Iter->second;
365   }
366 
367   void setOffset(Value *V, uint64_t Offset) {
368     assert(FieldOffsetMap.count(V) == 0);
369     FieldOffsetMap.insert({V, Offset});
370   }
371 
372   // Remap the index of every field in the frame, using the final layout index.
373   void updateLayoutIndex(FrameTypeBuilder &B);
374 
375 private:
376   // LayoutIndexUpdateStarted is used to avoid updating the index of any field
377   // twice by mistake.
378   bool LayoutIndexUpdateStarted = false;
379   // Map from values to their slot indexes on the frame. They will be first set
380   // with their original insertion field index. After the frame is built, their
381   // indexes will be updated into the final layout index.
382   DenseMap<Value *, uint32_t> FieldIndexMap;
383   // Map from values to their alignment on the frame. They would be set after
384   // the frame is built.
385   DenseMap<Value *, uint64_t> FieldAlignMap;
386   // Map from values to their offset on the frame. They would be set after
387   // the frame is built.
388   DenseMap<Value *, uint64_t> FieldOffsetMap;
389 };
390 } // namespace
391 
392 #ifndef NDEBUG
393 static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
394   dbgs() << "------------- " << Title << "--------------\n";
395   for (const auto &E : Spills) {
396     E.first->dump();
397     dbgs() << "   user: ";
398     for (auto *I : E.second)
399       I->dump();
400   }
401 }
402 
403 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
404   dbgs() << "------------- Allocas --------------\n";
405   for (const auto &A : Allocas) {
406     A.Alloca->dump();
407   }
408 }
409 #endif
410 
411 namespace {
412 using FieldIDType = size_t;
413 // We cannot rely solely on natural alignment of a type when building a
414 // coroutine frame and if the alignment specified on the Alloca instruction
415 // differs from the natural alignment of the alloca type we will need to insert
416 // padding.
417 class FrameTypeBuilder {
418 private:
419   struct Field {
420     uint64_t Size;
421     uint64_t Offset;
422     Type *Ty;
423     FieldIDType LayoutFieldIndex;
424     Align Alignment;
425     Align TyAlignment;
426   };
427 
428   const DataLayout &DL;
429   LLVMContext &Context;
430   uint64_t StructSize = 0;
431   Align StructAlign;
432   bool IsFinished = false;
433 
434   Optional<Align> MaxFrameAlignment;
435 
436   SmallVector<Field, 8> Fields;
437   DenseMap<Value*, unsigned> FieldIndexByKey;
438 
439 public:
440   FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL,
441                    Optional<Align> MaxFrameAlignment)
442       : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
443 
444   /// Add a field to this structure for the storage of an `alloca`
445   /// instruction.
446   LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI,
447                                                bool IsHeader = false) {
448     Type *Ty = AI->getAllocatedType();
449 
450     // Make an array type if this is a static array allocation.
451     if (AI->isArrayAllocation()) {
452       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
453         Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
454       else
455         report_fatal_error("Coroutines cannot handle non static allocas yet");
456     }
457 
458     return addField(Ty, AI->getAlign(), IsHeader);
459   }
460 
461   /// We want to put the allocas whose lifetime-ranges are not overlapped
462   /// into one slot of coroutine frame.
463   /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
464   ///
465   ///     cppcoro::task<void> alternative_paths(bool cond) {
466   ///         if (cond) {
467   ///             big_structure a;
468   ///             process(a);
469   ///             co_await something();
470   ///         } else {
471   ///             big_structure b;
472   ///             process2(b);
473   ///             co_await something();
474   ///         }
475   ///     }
476   ///
477   /// We want to put variable a and variable b in the same slot to
478   /// reduce the size of coroutine frame.
479   ///
480   /// This function use StackLifetime algorithm to partition the AllocaInsts in
481   /// Spills to non-overlapped sets in order to put Alloca in the same
482   /// non-overlapped set into the same slot in the Coroutine Frame. Then add
483   /// field for the allocas in the same non-overlapped set by using the largest
484   /// type as the field type.
485   ///
486   /// Side Effects: Because We sort the allocas, the order of allocas in the
487   /// frame may be different with the order in the source code.
488   void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
489                           coro::Shape &Shape);
490 
491   /// Add a field to this structure.
492   LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment,
493                                       bool IsHeader = false,
494                                       bool IsSpillOfValue = false) {
495     assert(!IsFinished && "adding fields to a finished builder");
496     assert(Ty && "must provide a type for a field");
497 
498     // The field size is always the alloc size of the type.
499     uint64_t FieldSize = DL.getTypeAllocSize(Ty);
500 
501     // For an alloca with size=0, we don't need to add a field and they
502     // can just point to any index in the frame. Use index 0.
503     if (FieldSize == 0) {
504       return 0;
505     }
506 
507     // The field alignment might not be the type alignment, but we need
508     // to remember the type alignment anyway to build the type.
509     // If we are spilling values we don't need to worry about ABI alignment
510     // concerns.
511     auto ABIAlign = DL.getABITypeAlign(Ty);
512     Align TyAlignment =
513         (IsSpillOfValue && MaxFrameAlignment)
514             ? (*MaxFrameAlignment < ABIAlign ? *MaxFrameAlignment : ABIAlign)
515             : ABIAlign;
516     if (!FieldAlignment) {
517       FieldAlignment = TyAlignment;
518     }
519 
520     // Lay out header fields immediately.
521     uint64_t Offset;
522     if (IsHeader) {
523       Offset = alignTo(StructSize, FieldAlignment);
524       StructSize = Offset + FieldSize;
525 
526     // Everything else has a flexible offset.
527     } else {
528       Offset = OptimizedStructLayoutField::FlexibleOffset;
529     }
530 
531     Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment});
532     return Fields.size() - 1;
533   }
534 
535   /// Finish the layout and set the body on the given type.
536   void finish(StructType *Ty);
537 
538   uint64_t getStructSize() const {
539     assert(IsFinished && "not yet finished!");
540     return StructSize;
541   }
542 
543   Align getStructAlign() const {
544     assert(IsFinished && "not yet finished!");
545     return StructAlign;
546   }
547 
548   FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
549     assert(IsFinished && "not yet finished!");
550     return Fields[Id].LayoutFieldIndex;
551   }
552 
553   Field getLayoutField(FieldIDType Id) const {
554     assert(IsFinished && "not yet finished!");
555     return Fields[Id];
556   }
557 };
558 } // namespace
559 
560 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
561   auto Updater = [&](Value *I) {
562     auto Field = B.getLayoutField(getFieldIndex(I));
563     setFieldIndex(I, Field.LayoutFieldIndex);
564     setAlign(I, Field.Alignment.value());
565     setOffset(I, Field.Offset);
566   };
567   LayoutIndexUpdateStarted = true;
568   for (auto &S : Spills)
569     Updater(S.first);
570   for (const auto &A : Allocas)
571     Updater(A.Alloca);
572   LayoutIndexUpdateStarted = false;
573 }
574 
575 void FrameTypeBuilder::addFieldForAllocas(const Function &F,
576                                           FrameDataInfo &FrameData,
577                                           coro::Shape &Shape) {
578   using AllocaSetType = SmallVector<AllocaInst *, 4>;
579   SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
580 
581   // We need to add field for allocas at the end of this function.
582   auto AddFieldForAllocasAtExit = make_scope_exit([&]() {
583     for (auto AllocaList : NonOverlapedAllocas) {
584       auto *LargestAI = *AllocaList.begin();
585       FieldIDType Id = addFieldForAlloca(LargestAI);
586       for (auto *Alloca : AllocaList)
587         FrameData.setFieldIndex(Alloca, Id);
588     }
589   });
590 
591   if (!Shape.OptimizeFrame && !EnableReuseStorageInFrame) {
592     for (const auto &A : FrameData.Allocas) {
593       AllocaInst *Alloca = A.Alloca;
594       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
595     }
596     return;
597   }
598 
599   // Because there are pathes from the lifetime.start to coro.end
600   // for each alloca, the liferanges for every alloca is overlaped
601   // in the blocks who contain coro.end and the successor blocks.
602   // So we choose to skip there blocks when we calculates the liferange
603   // for each alloca. It should be reasonable since there shouldn't be uses
604   // in these blocks and the coroutine frame shouldn't be used outside the
605   // coroutine body.
606   //
607   // Note that the user of coro.suspend may not be SwitchInst. However, this
608   // case seems too complex to handle. And it is harmless to skip these
609   // patterns since it just prevend putting the allocas to live in the same
610   // slot.
611   DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
612   for (auto CoroSuspendInst : Shape.CoroSuspends) {
613     for (auto U : CoroSuspendInst->users()) {
614       if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
615         auto *SWI = const_cast<SwitchInst *>(ConstSWI);
616         DefaultSuspendDest[SWI] = SWI->getDefaultDest();
617         SWI->setDefaultDest(SWI->getSuccessor(1));
618       }
619     }
620   }
621 
622   auto ExtractAllocas = [&]() {
623     AllocaSetType Allocas;
624     Allocas.reserve(FrameData.Allocas.size());
625     for (const auto &A : FrameData.Allocas)
626       Allocas.push_back(A.Alloca);
627     return Allocas;
628   };
629   StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
630                                       StackLifetime::LivenessType::May);
631   StackLifetimeAnalyzer.run();
632   auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
633     return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
634         StackLifetimeAnalyzer.getLiveRange(AI2));
635   };
636   auto GetAllocaSize = [&](const AllocaInfo &A) {
637     Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL);
638     assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
639     assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
640     return RetSize->getFixedSize();
641   };
642   // Put larger allocas in the front. So the larger allocas have higher
643   // priority to merge, which can save more space potentially. Also each
644   // AllocaSet would be ordered. So we can get the largest Alloca in one
645   // AllocaSet easily.
646   sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
647     return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
648   });
649   for (const auto &A : FrameData.Allocas) {
650     AllocaInst *Alloca = A.Alloca;
651     bool Merged = false;
652     // Try to find if the Alloca is not inferenced with any existing
653     // NonOverlappedAllocaSet. If it is true, insert the alloca to that
654     // NonOverlappedAllocaSet.
655     for (auto &AllocaSet : NonOverlapedAllocas) {
656       assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
657       bool NoInference = none_of(AllocaSet, [&](auto Iter) {
658         return IsAllocaInferenre(Alloca, Iter);
659       });
660       // If the alignment of A is multiple of the alignment of B, the address
661       // of A should satisfy the requirement for aligning for B.
662       //
663       // There may be other more fine-grained strategies to handle the alignment
664       // infomation during the merging process. But it seems hard to handle
665       // these strategies and benefit little.
666       bool Alignable = [&]() -> bool {
667         auto *LargestAlloca = *AllocaSet.begin();
668         return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
669                0;
670       }();
671       bool CouldMerge = NoInference && Alignable;
672       if (!CouldMerge)
673         continue;
674       AllocaSet.push_back(Alloca);
675       Merged = true;
676       break;
677     }
678     if (!Merged) {
679       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
680     }
681   }
682   // Recover the default target destination for each Switch statement
683   // reserved.
684   for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
685     SwitchInst *SWI = SwitchAndDefaultDest.first;
686     BasicBlock *DestBB = SwitchAndDefaultDest.second;
687     SWI->setDefaultDest(DestBB);
688   }
689   // This Debug Info could tell us which allocas are merged into one slot.
690   LLVM_DEBUG(for (auto &AllocaSet
691                   : NonOverlapedAllocas) {
692     if (AllocaSet.size() > 1) {
693       dbgs() << "In Function:" << F.getName() << "\n";
694       dbgs() << "Find Union Set "
695              << "\n";
696       dbgs() << "\tAllocas are \n";
697       for (auto Alloca : AllocaSet)
698         dbgs() << "\t\t" << *Alloca << "\n";
699     }
700   });
701 }
702 
703 void FrameTypeBuilder::finish(StructType *Ty) {
704   assert(!IsFinished && "already finished!");
705 
706   // Prepare the optimal-layout field array.
707   // The Id in the layout field is a pointer to our Field for it.
708   SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
709   LayoutFields.reserve(Fields.size());
710   for (auto &Field : Fields) {
711     LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
712                               Field.Offset);
713   }
714 
715   // Perform layout.
716   auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
717   StructSize = SizeAndAlign.first;
718   StructAlign = SizeAndAlign.second;
719 
720   auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
721     return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
722   };
723 
724   // We need to produce a packed struct type if there's a field whose
725   // assigned offset isn't a multiple of its natural type alignment.
726   bool Packed = [&] {
727     for (auto &LayoutField : LayoutFields) {
728       auto &F = getField(LayoutField);
729       if (!isAligned(F.TyAlignment, LayoutField.Offset))
730         return true;
731     }
732     return false;
733   }();
734 
735   // Build the struct body.
736   SmallVector<Type*, 16> FieldTypes;
737   FieldTypes.reserve(LayoutFields.size() * 3 / 2);
738   uint64_t LastOffset = 0;
739   for (auto &LayoutField : LayoutFields) {
740     auto &F = getField(LayoutField);
741 
742     auto Offset = LayoutField.Offset;
743 
744     // Add a padding field if there's a padding gap and we're either
745     // building a packed struct or the padding gap is more than we'd
746     // get from aligning to the field type's natural alignment.
747     assert(Offset >= LastOffset);
748     if (Offset != LastOffset) {
749       if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
750         FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
751                                             Offset - LastOffset));
752     }
753 
754     F.Offset = Offset;
755     F.LayoutFieldIndex = FieldTypes.size();
756 
757     FieldTypes.push_back(F.Ty);
758     LastOffset = Offset + F.Size;
759   }
760 
761   Ty->setBody(FieldTypes, Packed);
762 
763 #ifndef NDEBUG
764   // Check that the IR layout matches the offsets we expect.
765   auto Layout = DL.getStructLayout(Ty);
766   for (auto &F : Fields) {
767     assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
768     assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
769   }
770 #endif
771 
772   IsFinished = true;
773 }
774 
775 static void cacheDIVar(FrameDataInfo &FrameData,
776                        DenseMap<Value *, DILocalVariable *> &DIVarCache) {
777   for (auto *V : FrameData.getAllDefs()) {
778     if (DIVarCache.find(V) != DIVarCache.end())
779       continue;
780 
781     auto DDIs = FindDbgDeclareUses(V);
782     auto *I = llvm::find_if(DDIs, [](DbgDeclareInst *DDI) {
783       return DDI->getExpression()->getNumElements() == 0;
784     });
785     if (I != DDIs.end())
786       DIVarCache.insert({V, (*I)->getVariable()});
787   }
788 }
789 
790 /// Create name for Type. It uses MDString to store new created string to
791 /// avoid memory leak.
792 static StringRef solveTypeName(Type *Ty) {
793   if (Ty->isIntegerTy()) {
794     // The longest name in common may be '__int_128', which has 9 bits.
795     SmallString<16> Buffer;
796     raw_svector_ostream OS(Buffer);
797     OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth();
798     auto *MDName = MDString::get(Ty->getContext(), OS.str());
799     return MDName->getString();
800   }
801 
802   if (Ty->isFloatingPointTy()) {
803     if (Ty->isFloatTy())
804       return "__float_";
805     if (Ty->isDoubleTy())
806       return "__double_";
807     return "__floating_type_";
808   }
809 
810   if (Ty->isPointerTy()) {
811     auto *PtrTy = cast<PointerType>(Ty);
812     Type *PointeeTy = PtrTy->getPointerElementType();
813     auto Name = solveTypeName(PointeeTy);
814     if (Name == "UnknownType")
815       return "PointerType";
816     SmallString<16> Buffer;
817     Twine(Name + "_Ptr").toStringRef(Buffer);
818     auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
819     return MDName->getString();
820   }
821 
822   if (Ty->isStructTy()) {
823     if (!cast<StructType>(Ty)->hasName())
824       return "__LiteralStructType_";
825 
826     auto Name = Ty->getStructName();
827 
828     SmallString<16> Buffer(Name);
829     for_each(Buffer, [](auto &Iter) {
830       if (Iter == '.' || Iter == ':')
831         Iter = '_';
832     });
833     auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
834     return MDName->getString();
835   }
836 
837   return "UnknownType";
838 }
839 
840 static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
841                            const DataLayout &Layout, DIScope *Scope,
842                            unsigned LineNum,
843                            DenseMap<Type *, DIType *> &DITypeCache) {
844   if (DIType *DT = DITypeCache.lookup(Ty))
845     return DT;
846 
847   StringRef Name = solveTypeName(Ty);
848 
849   DIType *RetType = nullptr;
850 
851   if (Ty->isIntegerTy()) {
852     auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
853     RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed,
854                                       llvm::DINode::FlagArtificial);
855   } else if (Ty->isFloatingPointTy()) {
856     RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
857                                       dwarf::DW_ATE_float,
858                                       llvm::DINode::FlagArtificial);
859   } else if (Ty->isPointerTy()) {
860     // Construct BasicType instead of PointerType to avoid infinite
861     // search problem.
862     // For example, we would be in trouble if we traverse recursively:
863     //
864     //  struct Node {
865     //      Node* ptr;
866     //  };
867     RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
868                                       dwarf::DW_ATE_address,
869                                       llvm::DINode::FlagArtificial);
870   } else if (Ty->isStructTy()) {
871     auto *DIStruct = Builder.createStructType(
872         Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty),
873         Layout.getPrefTypeAlignment(Ty), llvm::DINode::FlagArtificial, nullptr,
874         llvm::DINodeArray());
875 
876     auto *StructTy = cast<StructType>(Ty);
877     SmallVector<Metadata *, 16> Elements;
878     for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
879       DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
880                                  Scope, LineNum, DITypeCache);
881       assert(DITy);
882       Elements.push_back(Builder.createMemberType(
883           Scope, DITy->getName(), Scope->getFile(), LineNum,
884           DITy->getSizeInBits(), DITy->getAlignInBits(),
885           Layout.getStructLayout(StructTy)->getElementOffsetInBits(I),
886           llvm::DINode::FlagArtificial, DITy));
887     }
888 
889     Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements));
890 
891     RetType = DIStruct;
892   } else {
893     LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n";);
894     SmallString<32> Buffer;
895     raw_svector_ostream OS(Buffer);
896     OS << Name.str() << "_" << Layout.getTypeSizeInBits(Ty);
897     RetType = Builder.createBasicType(OS.str(), Layout.getTypeSizeInBits(Ty),
898                                       dwarf::DW_ATE_address,
899                                       llvm::DINode::FlagArtificial);
900   }
901 
902   DITypeCache.insert({Ty, RetType});
903   return RetType;
904 }
905 
906 /// Build artificial debug info for C++ coroutine frames to allow users to
907 /// inspect the contents of the frame directly
908 ///
909 /// Create Debug information for coroutine frame with debug name "__coro_frame".
910 /// The debug information for the fields of coroutine frame is constructed from
911 /// the following way:
912 /// 1. For all the value in the Frame, we search the use of dbg.declare to find
913 ///    the corresponding debug variables for the value. If we can find the
914 ///    debug variable, we can get full and accurate debug information.
915 /// 2. If we can't get debug information in step 1 and 2, we could only try to
916 ///    build the DIType by Type. We did this in solveDIType. We only handle
917 ///    integer, float, double, integer type and struct type for now.
918 static void buildFrameDebugInfo(Function &F, coro::Shape &Shape,
919                                 FrameDataInfo &FrameData) {
920   DISubprogram *DIS = F.getSubprogram();
921   // If there is no DISubprogram for F, it implies the Function are not compiled
922   // with debug info. So we also don't need to generate debug info for the frame
923   // neither.
924   if (!DIS || !DIS->getUnit() ||
925       !dwarf::isCPlusPlus(
926           (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage()))
927     return;
928 
929   assert(Shape.ABI == coro::ABI::Switch &&
930          "We could only build debug infomation for C++ coroutine now.\n");
931 
932   DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
933 
934   AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
935   assert(PromiseAlloca &&
936          "Coroutine with switch ABI should own Promise alloca");
937 
938   TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(PromiseAlloca);
939   if (DIs.empty())
940     return;
941 
942   DbgDeclareInst *PromiseDDI = DIs.front();
943   DILocalVariable *PromiseDIVariable = PromiseDDI->getVariable();
944   DILocalScope *PromiseDIScope = PromiseDIVariable->getScope();
945   DIFile *DFile = PromiseDIScope->getFile();
946   DILocation *DILoc = PromiseDDI->getDebugLoc().get();
947   unsigned LineNum = PromiseDIVariable->getLine();
948 
949   DICompositeType *FrameDITy = DBuilder.createStructType(
950       DIS, "__coro_frame_ty", DFile, LineNum, Shape.FrameSize * 8,
951       Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr,
952       llvm::DINodeArray());
953   StructType *FrameTy = Shape.FrameTy;
954   SmallVector<Metadata *, 16> Elements;
955   DataLayout Layout = F.getParent()->getDataLayout();
956 
957   DenseMap<Value *, DILocalVariable *> DIVarCache;
958   cacheDIVar(FrameData, DIVarCache);
959 
960   unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
961   unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
962   unsigned IndexIndex = Shape.SwitchLowering.IndexField;
963 
964   DenseMap<unsigned, StringRef> NameCache;
965   NameCache.insert({ResumeIndex, "__resume_fn"});
966   NameCache.insert({DestroyIndex, "__destroy_fn"});
967   NameCache.insert({IndexIndex, "__coro_index"});
968 
969   Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex),
970        *DestroyFnTy = FrameTy->getElementType(DestroyIndex),
971        *IndexTy = FrameTy->getElementType(IndexIndex);
972 
973   DenseMap<unsigned, DIType *> TyCache;
974   TyCache.insert({ResumeIndex,
975                   DBuilder.createBasicType("__resume_fn",
976                                            Layout.getTypeSizeInBits(ResumeFnTy),
977                                            dwarf::DW_ATE_address)});
978   TyCache.insert(
979       {DestroyIndex, DBuilder.createBasicType(
980                          "__destroy_fn", Layout.getTypeSizeInBits(DestroyFnTy),
981                          dwarf::DW_ATE_address)});
982 
983   /// FIXME: If we fill the field `SizeInBits` with the actual size of
984   /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
985   TyCache.insert({IndexIndex, DBuilder.createBasicType(
986                                   "__coro_index",
987                                   (Layout.getTypeSizeInBits(IndexTy) < 8)
988                                       ? 8
989                                       : Layout.getTypeSizeInBits(IndexTy),
990                                   dwarf::DW_ATE_unsigned_char)});
991 
992   for (auto *V : FrameData.getAllDefs()) {
993     if (DIVarCache.find(V) == DIVarCache.end())
994       continue;
995 
996     auto Index = FrameData.getFieldIndex(V);
997 
998     NameCache.insert({Index, DIVarCache[V]->getName()});
999     TyCache.insert({Index, DIVarCache[V]->getType()});
1000   }
1001 
1002   // Cache from index to (Align, Offset Pair)
1003   DenseMap<unsigned, std::pair<unsigned, unsigned>> OffsetCache;
1004   // The Align and Offset of Resume function and Destroy function are fixed.
1005   OffsetCache.insert({ResumeIndex, {8, 0}});
1006   OffsetCache.insert({DestroyIndex, {8, 8}});
1007   OffsetCache.insert(
1008       {IndexIndex,
1009        {Shape.SwitchLowering.IndexAlign, Shape.SwitchLowering.IndexOffset}});
1010 
1011   for (auto *V : FrameData.getAllDefs()) {
1012     auto Index = FrameData.getFieldIndex(V);
1013 
1014     OffsetCache.insert(
1015         {Index, {FrameData.getAlign(V), FrameData.getOffset(V)}});
1016   }
1017 
1018   DenseMap<Type *, DIType *> DITypeCache;
1019   // This counter is used to avoid same type names. e.g., there would be
1020   // many i32 and i64 types in one coroutine. And we would use i32_0 and
1021   // i32_1 to avoid the same type. Since it makes no sense the name of the
1022   // fields confilicts with each other.
1023   unsigned UnknownTypeNum = 0;
1024   for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
1025     if (OffsetCache.find(Index) == OffsetCache.end())
1026       continue;
1027 
1028     std::string Name;
1029     uint64_t SizeInBits;
1030     uint32_t AlignInBits;
1031     uint64_t OffsetInBits;
1032     DIType *DITy = nullptr;
1033 
1034     Type *Ty = FrameTy->getElementType(Index);
1035     assert(Ty->isSized() && "We can't handle type which is not sized.\n");
1036     SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedSize();
1037     AlignInBits = OffsetCache[Index].first * 8;
1038     OffsetInBits = OffsetCache[Index].second * 8;
1039 
1040     if (NameCache.find(Index) != NameCache.end()) {
1041       Name = NameCache[Index].str();
1042       DITy = TyCache[Index];
1043     } else {
1044       DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
1045       assert(DITy && "SolveDIType shouldn't return nullptr.\n");
1046       Name = DITy->getName().str();
1047       Name += "_" + std::to_string(UnknownTypeNum);
1048       UnknownTypeNum++;
1049     }
1050 
1051     Elements.push_back(DBuilder.createMemberType(
1052         FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
1053         llvm::DINode::FlagArtificial, DITy));
1054   }
1055 
1056   DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements));
1057 
1058   auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame",
1059                                                  DFile, LineNum, FrameDITy,
1060                                                  true, DINode::FlagArtificial);
1061   assert(FrameDIVar->isValidLocationForIntrinsic(PromiseDDI->getDebugLoc()));
1062 
1063   // Subprogram would have ContainedNodes field which records the debug
1064   // variables it contained. So we need to add __coro_frame to the
1065   // ContainedNodes of it.
1066   //
1067   // If we don't add __coro_frame to the RetainedNodes, user may get
1068   // `no symbol __coro_frame in context` rather than `__coro_frame`
1069   // is optimized out, which is more precise.
1070   if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) {
1071     auto RetainedNodes = SubProgram->getRetainedNodes();
1072     SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
1073                                                  RetainedNodes.end());
1074     RetainedNodesVec.push_back(FrameDIVar);
1075     SubProgram->replaceOperandWith(
1076         7, (MDTuple::get(F.getContext(), RetainedNodesVec)));
1077   }
1078 
1079   DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar,
1080                          DBuilder.createExpression(), DILoc,
1081                          Shape.FramePtr->getNextNode());
1082 }
1083 
1084 // Build a struct that will keep state for an active coroutine.
1085 //   struct f.frame {
1086 //     ResumeFnTy ResumeFnAddr;
1087 //     ResumeFnTy DestroyFnAddr;
1088 //     int ResumeIndex;
1089 //     ... promise (if present) ...
1090 //     ... spills ...
1091 //   };
1092 static StructType *buildFrameType(Function &F, coro::Shape &Shape,
1093                                   FrameDataInfo &FrameData) {
1094   LLVMContext &C = F.getContext();
1095   const DataLayout &DL = F.getParent()->getDataLayout();
1096   StructType *FrameTy = [&] {
1097     SmallString<32> Name(F.getName());
1098     Name.append(".Frame");
1099     return StructType::create(C, Name);
1100   }();
1101 
1102   // We will use this value to cap the alignment of spilled values.
1103   Optional<Align> MaxFrameAlignment;
1104   if (Shape.ABI == coro::ABI::Async)
1105     MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
1106   FrameTypeBuilder B(C, DL, MaxFrameAlignment);
1107 
1108   AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
1109   Optional<FieldIDType> SwitchIndexFieldId;
1110 
1111   if (Shape.ABI == coro::ABI::Switch) {
1112     auto *FramePtrTy = FrameTy->getPointerTo();
1113     auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
1114                                    /*IsVarArg=*/false);
1115     auto *FnPtrTy = FnTy->getPointerTo();
1116 
1117     // Add header fields for the resume and destroy functions.
1118     // We can rely on these being perfectly packed.
1119     (void)B.addField(FnPtrTy, None, /*header*/ true);
1120     (void)B.addField(FnPtrTy, None, /*header*/ true);
1121 
1122     // PromiseAlloca field needs to be explicitly added here because it's
1123     // a header field with a fixed offset based on its alignment. Hence it
1124     // needs special handling and cannot be added to FrameData.Allocas.
1125     if (PromiseAlloca)
1126       FrameData.setFieldIndex(
1127           PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
1128 
1129     // Add a field to store the suspend index.  This doesn't need to
1130     // be in the header.
1131     unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
1132     Type *IndexType = Type::getIntNTy(C, IndexBits);
1133 
1134     SwitchIndexFieldId = B.addField(IndexType, None);
1135   } else {
1136     assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
1137   }
1138 
1139   // Because multiple allocas may own the same field slot,
1140   // we add allocas to field here.
1141   B.addFieldForAllocas(F, FrameData, Shape);
1142   // Add PromiseAlloca to Allocas list so that
1143   // 1. updateLayoutIndex could update its index after
1144   // `performOptimizedStructLayout`
1145   // 2. it is processed in insertSpills.
1146   if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
1147     // We assume that the promise alloca won't be modified before
1148     // CoroBegin and no alias will be create before CoroBegin.
1149     FrameData.Allocas.emplace_back(
1150         PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false);
1151   // Create an entry for every spilled value.
1152   for (auto &S : FrameData.Spills) {
1153     Type *FieldType = S.first->getType();
1154     // For byval arguments, we need to store the pointed value in the frame,
1155     // instead of the pointer itself.
1156     if (const Argument *A = dyn_cast<Argument>(S.first))
1157       if (A->hasByValAttr())
1158         FieldType = A->getParamByValType();
1159     FieldIDType Id =
1160         B.addField(FieldType, None, false /*header*/, true /*IsSpillOfValue*/);
1161     FrameData.setFieldIndex(S.first, Id);
1162   }
1163 
1164   B.finish(FrameTy);
1165   FrameData.updateLayoutIndex(B);
1166   Shape.FrameAlign = B.getStructAlign();
1167   Shape.FrameSize = B.getStructSize();
1168 
1169   switch (Shape.ABI) {
1170   case coro::ABI::Switch: {
1171     // In the switch ABI, remember the switch-index field.
1172     auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
1173     Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
1174     Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
1175     Shape.SwitchLowering.IndexOffset = IndexField.Offset;
1176 
1177     // Also round the frame size up to a multiple of its alignment, as is
1178     // generally expected in C/C++.
1179     Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
1180     break;
1181   }
1182 
1183   // In the retcon ABI, remember whether the frame is inline in the storage.
1184   case coro::ABI::Retcon:
1185   case coro::ABI::RetconOnce: {
1186     auto Id = Shape.getRetconCoroId();
1187     Shape.RetconLowering.IsFrameInlineInStorage
1188       = (B.getStructSize() <= Id->getStorageSize() &&
1189          B.getStructAlign() <= Id->getStorageAlignment());
1190     break;
1191   }
1192   case coro::ABI::Async: {
1193     Shape.AsyncLowering.FrameOffset =
1194         alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign);
1195     // Also make the final context size a multiple of the context alignment to
1196     // make allocation easier for allocators.
1197     Shape.AsyncLowering.ContextSize =
1198         alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize,
1199                 Shape.AsyncLowering.getContextAlignment());
1200     if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
1201       report_fatal_error(
1202           "The alignment requirment of frame variables cannot be higher than "
1203           "the alignment of the async function context");
1204     }
1205     break;
1206   }
1207   }
1208 
1209   return FrameTy;
1210 }
1211 
1212 // We use a pointer use visitor to track how an alloca is being used.
1213 // The goal is to be able to answer the following three questions:
1214 // 1. Should this alloca be allocated on the frame instead.
1215 // 2. Could the content of the alloca be modified prior to CoroBegn, which would
1216 // require copying the data from alloca to the frame after CoroBegin.
1217 // 3. Is there any alias created for this alloca prior to CoroBegin, but used
1218 // after CoroBegin. In that case, we will need to recreate the alias after
1219 // CoroBegin based off the frame. To answer question 1, we track two things:
1220 //   a. List of all BasicBlocks that use this alloca or any of the aliases of
1221 //   the alloca. In the end, we check if there exists any two basic blocks that
1222 //   cross suspension points. If so, this alloca must be put on the frame. b.
1223 //   Whether the alloca or any alias of the alloca is escaped at some point,
1224 //   either by storing the address somewhere, or the address is used in a
1225 //   function call that might capture. If it's ever escaped, this alloca must be
1226 //   put on the frame conservatively.
1227 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
1228 // Whenever a potential write happens, either through a store instruction, a
1229 // function call or any of the memory intrinsics, we check whether this
1230 // instruction is prior to CoroBegin. To answer question 3, we track the offsets
1231 // of all aliases created for the alloca prior to CoroBegin but used after
1232 // CoroBegin. llvm::Optional is used to be able to represent the case when the
1233 // offset is unknown (e.g. when you have a PHINode that takes in different
1234 // offset values). We cannot handle unknown offsets and will assert. This is the
1235 // potential issue left out. An ideal solution would likely require a
1236 // significant redesign.
1237 namespace {
1238 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
1239   using Base = PtrUseVisitor<AllocaUseVisitor>;
1240   AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
1241                    const CoroBeginInst &CB, const SuspendCrossingInfo &Checker,
1242                    bool ShouldUseLifetimeStartInfo)
1243       : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker),
1244         ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {}
1245 
1246   void visit(Instruction &I) {
1247     Users.insert(&I);
1248     Base::visit(I);
1249     // If the pointer is escaped prior to CoroBegin, we have to assume it would
1250     // be written into before CoroBegin as well.
1251     if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
1252       MayWriteBeforeCoroBegin = true;
1253     }
1254   }
1255   // We need to provide this overload as PtrUseVisitor uses a pointer based
1256   // visiting function.
1257   void visit(Instruction *I) { return visit(*I); }
1258 
1259   void visitPHINode(PHINode &I) {
1260     enqueueUsers(I);
1261     handleAlias(I);
1262   }
1263 
1264   void visitSelectInst(SelectInst &I) {
1265     enqueueUsers(I);
1266     handleAlias(I);
1267   }
1268 
1269   void visitStoreInst(StoreInst &SI) {
1270     // Regardless whether the alias of the alloca is the value operand or the
1271     // pointer operand, we need to assume the alloca is been written.
1272     handleMayWrite(SI);
1273 
1274     if (SI.getValueOperand() != U->get())
1275       return;
1276 
1277     // We are storing the pointer into a memory location, potentially escaping.
1278     // As an optimization, we try to detect simple cases where it doesn't
1279     // actually escape, for example:
1280     //   %ptr = alloca ..
1281     //   %addr = alloca ..
1282     //   store %ptr, %addr
1283     //   %x = load %addr
1284     //   ..
1285     // If %addr is only used by loading from it, we could simply treat %x as
1286     // another alias of %ptr, and not considering %ptr being escaped.
1287     auto IsSimpleStoreThenLoad = [&]() {
1288       auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
1289       // If the memory location we are storing to is not an alloca, it
1290       // could be an alias of some other memory locations, which is difficult
1291       // to analyze.
1292       if (!AI)
1293         return false;
1294       // StoreAliases contains aliases of the memory location stored into.
1295       SmallVector<Instruction *, 4> StoreAliases = {AI};
1296       while (!StoreAliases.empty()) {
1297         Instruction *I = StoreAliases.pop_back_val();
1298         for (User *U : I->users()) {
1299           // If we are loading from the memory location, we are creating an
1300           // alias of the original pointer.
1301           if (auto *LI = dyn_cast<LoadInst>(U)) {
1302             enqueueUsers(*LI);
1303             handleAlias(*LI);
1304             continue;
1305           }
1306           // If we are overriding the memory location, the pointer certainly
1307           // won't escape.
1308           if (auto *S = dyn_cast<StoreInst>(U))
1309             if (S->getPointerOperand() == I)
1310               continue;
1311           if (auto *II = dyn_cast<IntrinsicInst>(U))
1312             if (II->isLifetimeStartOrEnd())
1313               continue;
1314           // BitCastInst creats aliases of the memory location being stored
1315           // into.
1316           if (auto *BI = dyn_cast<BitCastInst>(U)) {
1317             StoreAliases.push_back(BI);
1318             continue;
1319           }
1320           return false;
1321         }
1322       }
1323 
1324       return true;
1325     };
1326 
1327     if (!IsSimpleStoreThenLoad())
1328       PI.setEscaped(&SI);
1329   }
1330 
1331   // All mem intrinsics modify the data.
1332   void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
1333 
1334   void visitBitCastInst(BitCastInst &BC) {
1335     Base::visitBitCastInst(BC);
1336     handleAlias(BC);
1337   }
1338 
1339   void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
1340     Base::visitAddrSpaceCastInst(ASC);
1341     handleAlias(ASC);
1342   }
1343 
1344   void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
1345     // The base visitor will adjust Offset accordingly.
1346     Base::visitGetElementPtrInst(GEPI);
1347     handleAlias(GEPI);
1348   }
1349 
1350   void visitIntrinsicInst(IntrinsicInst &II) {
1351     // When we found the lifetime markers refers to a
1352     // subrange of the original alloca, ignore the lifetime
1353     // markers to avoid misleading the analysis.
1354     if (II.getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown ||
1355         !Offset.isZero())
1356       return Base::visitIntrinsicInst(II);
1357     LifetimeStarts.insert(&II);
1358   }
1359 
1360   void visitCallBase(CallBase &CB) {
1361     for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op)
1362       if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
1363         PI.setEscaped(&CB);
1364     handleMayWrite(CB);
1365   }
1366 
1367   bool getShouldLiveOnFrame() const {
1368     if (!ShouldLiveOnFrame)
1369       ShouldLiveOnFrame = computeShouldLiveOnFrame();
1370     return ShouldLiveOnFrame.getValue();
1371   }
1372 
1373   bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
1374 
1375   DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const {
1376     assert(getShouldLiveOnFrame() && "This method should only be called if the "
1377                                      "alloca needs to live on the frame.");
1378     for (const auto &P : AliasOffetMap)
1379       if (!P.second)
1380         report_fatal_error("Unable to handle an alias with unknown offset "
1381                            "created before CoroBegin.");
1382     return AliasOffetMap;
1383   }
1384 
1385 private:
1386   const DominatorTree &DT;
1387   const CoroBeginInst &CoroBegin;
1388   const SuspendCrossingInfo &Checker;
1389   // All alias to the original AllocaInst, created before CoroBegin and used
1390   // after CoroBegin. Each entry contains the instruction and the offset in the
1391   // original Alloca. They need to be recreated after CoroBegin off the frame.
1392   DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{};
1393   SmallPtrSet<Instruction *, 4> Users{};
1394   SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
1395   bool MayWriteBeforeCoroBegin{false};
1396   bool ShouldUseLifetimeStartInfo{true};
1397 
1398   mutable llvm::Optional<bool> ShouldLiveOnFrame{};
1399 
1400   bool computeShouldLiveOnFrame() const {
1401     // If lifetime information is available, we check it first since it's
1402     // more precise. We look at every pair of lifetime.start intrinsic and
1403     // every basic block that uses the pointer to see if they cross suspension
1404     // points. The uses cover both direct uses as well as indirect uses.
1405     if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
1406       for (auto *I : Users)
1407         for (auto *S : LifetimeStarts)
1408           if (Checker.isDefinitionAcrossSuspend(*S, I))
1409             return true;
1410       return false;
1411     }
1412     // FIXME: Ideally the isEscaped check should come at the beginning.
1413     // However there are a few loose ends that need to be fixed first before
1414     // we can do that. We need to make sure we are not over-conservative, so
1415     // that the data accessed in-between await_suspend and symmetric transfer
1416     // is always put on the stack, and also data accessed after coro.end is
1417     // always put on the stack (esp the return object). To fix that, we need
1418     // to:
1419     //  1) Potentially treat sret as nocapture in calls
1420     //  2) Special handle the return object and put it on the stack
1421     //  3) Utilize lifetime.end intrinsic
1422     if (PI.isEscaped())
1423       return true;
1424 
1425     for (auto *U1 : Users)
1426       for (auto *U2 : Users)
1427         if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1428           return true;
1429 
1430     return false;
1431   }
1432 
1433   void handleMayWrite(const Instruction &I) {
1434     if (!DT.dominates(&CoroBegin, &I))
1435       MayWriteBeforeCoroBegin = true;
1436   }
1437 
1438   bool usedAfterCoroBegin(Instruction &I) {
1439     for (auto &U : I.uses())
1440       if (DT.dominates(&CoroBegin, U))
1441         return true;
1442     return false;
1443   }
1444 
1445   void handleAlias(Instruction &I) {
1446     // We track all aliases created prior to CoroBegin but used after.
1447     // These aliases may need to be recreated after CoroBegin if the alloca
1448     // need to live on the frame.
1449     if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
1450       return;
1451 
1452     if (!IsOffsetKnown) {
1453       AliasOffetMap[&I].reset();
1454     } else {
1455       auto Itr = AliasOffetMap.find(&I);
1456       if (Itr == AliasOffetMap.end()) {
1457         AliasOffetMap[&I] = Offset;
1458       } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) {
1459         // If we have seen two different possible values for this alias, we set
1460         // it to empty.
1461         AliasOffetMap[&I].reset();
1462       }
1463     }
1464   }
1465 };
1466 } // namespace
1467 
1468 // We need to make room to insert a spill after initial PHIs, but before
1469 // catchswitch instruction. Placing it before violates the requirement that
1470 // catchswitch, like all other EHPads must be the first nonPHI in a block.
1471 //
1472 // Split away catchswitch into a separate block and insert in its place:
1473 //
1474 //   cleanuppad <InsertPt> cleanupret.
1475 //
1476 // cleanupret instruction will act as an insert point for the spill.
1477 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
1478   BasicBlock *CurrentBlock = CatchSwitch->getParent();
1479   BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
1480   CurrentBlock->getTerminator()->eraseFromParent();
1481 
1482   auto *CleanupPad =
1483       CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
1484   auto *CleanupRet =
1485       CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
1486   return CleanupRet;
1487 }
1488 
1489 static void createFramePtr(coro::Shape &Shape) {
1490   auto *CB = Shape.CoroBegin;
1491   IRBuilder<> Builder(CB->getNextNode());
1492   StructType *FrameTy = Shape.FrameTy;
1493   PointerType *FramePtrTy = FrameTy->getPointerTo();
1494   Shape.FramePtr =
1495       cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
1496 }
1497 
1498 // Replace all alloca and SSA values that are accessed across suspend points
1499 // with GetElementPointer from coroutine frame + loads and stores. Create an
1500 // AllocaSpillBB that will become the new entry block for the resume parts of
1501 // the coroutine:
1502 //
1503 //    %hdl = coro.begin(...)
1504 //    whatever
1505 //
1506 // becomes:
1507 //
1508 //    %hdl = coro.begin(...)
1509 //    %FramePtr = bitcast i8* hdl to %f.frame*
1510 //    br label %AllocaSpillBB
1511 //
1512 //  AllocaSpillBB:
1513 //    ; geps corresponding to allocas that were moved to coroutine frame
1514 //    br label PostSpill
1515 //
1516 //  PostSpill:
1517 //    whatever
1518 //
1519 //
1520 static Instruction *insertSpills(const FrameDataInfo &FrameData,
1521                                  coro::Shape &Shape) {
1522   auto *CB = Shape.CoroBegin;
1523   LLVMContext &C = CB->getContext();
1524   IRBuilder<> Builder(C);
1525   StructType *FrameTy = Shape.FrameTy;
1526   Instruction *FramePtr = Shape.FramePtr;
1527   DominatorTree DT(*CB->getFunction());
1528   SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
1529 
1530   // Create a GEP with the given index into the coroutine frame for the original
1531   // value Orig. Appends an extra 0 index for array-allocas, preserving the
1532   // original type.
1533   auto GetFramePointer = [&](Value *Orig) -> Value * {
1534     FieldIDType Index = FrameData.getFieldIndex(Orig);
1535     SmallVector<Value *, 3> Indices = {
1536         ConstantInt::get(Type::getInt32Ty(C), 0),
1537         ConstantInt::get(Type::getInt32Ty(C), Index),
1538     };
1539 
1540     if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1541       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1542         auto Count = CI->getValue().getZExtValue();
1543         if (Count > 1) {
1544           Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
1545         }
1546       } else {
1547         report_fatal_error("Coroutines cannot handle non static allocas yet");
1548       }
1549     }
1550 
1551     auto GEP = cast<GetElementPtrInst>(
1552         Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1553     if (isa<AllocaInst>(Orig)) {
1554       // If the type of GEP is not equal to the type of AllocaInst, it implies
1555       // that the AllocaInst may be reused in the Frame slot of other
1556       // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1557       // the Frame storage.
1558       //
1559       // Note: If we change the strategy dealing with alignment, we need to refine
1560       // this casting.
1561       if (GEP->getResultElementType() != Orig->getType())
1562         return Builder.CreateBitCast(GEP, Orig->getType(),
1563                                      Orig->getName() + Twine(".cast"));
1564     }
1565     return GEP;
1566   };
1567 
1568   for (auto const &E : FrameData.Spills) {
1569     Value *Def = E.first;
1570     auto SpillAlignment = Align(FrameData.getAlign(Def));
1571     // Create a store instruction storing the value into the
1572     // coroutine frame.
1573     Instruction *InsertPt = nullptr;
1574     bool NeedToCopyArgPtrValue = false;
1575     if (auto *Arg = dyn_cast<Argument>(Def)) {
1576       // For arguments, we will place the store instruction right after
1577       // the coroutine frame pointer instruction, i.e. bitcast of
1578       // coro.begin from i8* to %f.frame*.
1579       InsertPt = FramePtr->getNextNode();
1580 
1581       // If we're spilling an Argument, make sure we clear 'nocapture'
1582       // from the coroutine function.
1583       Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1584 
1585       if (Arg->hasByValAttr())
1586         NeedToCopyArgPtrValue = true;
1587 
1588     } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1589       // Don't spill immediately after a suspend; splitting assumes
1590       // that the suspend will be followed by a branch.
1591       InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI();
1592     } else {
1593       auto *I = cast<Instruction>(Def);
1594       if (!DT.dominates(CB, I)) {
1595         // If it is not dominated by CoroBegin, then spill should be
1596         // inserted immediately after CoroFrame is computed.
1597         InsertPt = FramePtr->getNextNode();
1598       } else if (auto *II = dyn_cast<InvokeInst>(I)) {
1599         // If we are spilling the result of the invoke instruction, split
1600         // the normal edge and insert the spill in the new block.
1601         auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
1602         InsertPt = NewBB->getTerminator();
1603       } else if (isa<PHINode>(I)) {
1604         // Skip the PHINodes and EH pads instructions.
1605         BasicBlock *DefBlock = I->getParent();
1606         if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
1607           InsertPt = splitBeforeCatchSwitch(CSI);
1608         else
1609           InsertPt = &*DefBlock->getFirstInsertionPt();
1610       } else {
1611         assert(!I->isTerminator() && "unexpected terminator");
1612         // For all other values, the spill is placed immediately after
1613         // the definition.
1614         InsertPt = I->getNextNode();
1615       }
1616     }
1617 
1618     auto Index = FrameData.getFieldIndex(Def);
1619     Builder.SetInsertPoint(InsertPt);
1620     auto *G = Builder.CreateConstInBoundsGEP2_32(
1621         FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1622     if (NeedToCopyArgPtrValue) {
1623       // For byval arguments, we need to store the pointed value in the frame,
1624       // instead of the pointer itself.
1625       auto *Value =
1626           Builder.CreateLoad(Def->getType()->getPointerElementType(), Def);
1627       Builder.CreateAlignedStore(Value, G, SpillAlignment);
1628     } else {
1629       Builder.CreateAlignedStore(Def, G, SpillAlignment);
1630     }
1631 
1632     BasicBlock *CurrentBlock = nullptr;
1633     Value *CurrentReload = nullptr;
1634     for (auto *U : E.second) {
1635       // If we have not seen the use block, create a load instruction to reload
1636       // the spilled value from the coroutine frame. Populates the Value pointer
1637       // reference provided with the frame GEP.
1638       if (CurrentBlock != U->getParent()) {
1639         CurrentBlock = U->getParent();
1640         Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt());
1641 
1642         auto *GEP = GetFramePointer(E.first);
1643         GEP->setName(E.first->getName() + Twine(".reload.addr"));
1644         if (NeedToCopyArgPtrValue)
1645           CurrentReload = GEP;
1646         else
1647           CurrentReload = Builder.CreateAlignedLoad(
1648               FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1649               SpillAlignment, E.first->getName() + Twine(".reload"));
1650 
1651         TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def);
1652         for (DbgDeclareInst *DDI : DIs) {
1653           bool AllowUnresolved = false;
1654           // This dbg.declare is preserved for all coro-split function
1655           // fragments. It will be unreachable in the main function, and
1656           // processed by coro::salvageDebugInfo() by CoroCloner.
1657           DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
1658               .insertDeclare(CurrentReload, DDI->getVariable(),
1659                              DDI->getExpression(), DDI->getDebugLoc(),
1660                              &*Builder.GetInsertPoint());
1661           // This dbg.declare is for the main function entry point.  It
1662           // will be deleted in all coro-split functions.
1663           coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.OptimizeFrame);
1664         }
1665       }
1666 
1667       // If we have a single edge PHINode, remove it and replace it with a
1668       // reload from the coroutine frame. (We already took care of multi edge
1669       // PHINodes by rewriting them in the rewritePHIs function).
1670       if (auto *PN = dyn_cast<PHINode>(U)) {
1671         assert(PN->getNumIncomingValues() == 1 &&
1672                "unexpected number of incoming "
1673                "values in the PHINode");
1674         PN->replaceAllUsesWith(CurrentReload);
1675         PN->eraseFromParent();
1676         continue;
1677       }
1678 
1679       // Replace all uses of CurrentValue in the current instruction with
1680       // reload.
1681       U->replaceUsesOfWith(Def, CurrentReload);
1682     }
1683   }
1684 
1685   BasicBlock *FramePtrBB = FramePtr->getParent();
1686 
1687   auto SpillBlock =
1688       FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
1689   SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1690   Shape.AllocaSpillBlock = SpillBlock;
1691 
1692   // retcon and retcon.once lowering assumes all uses have been sunk.
1693   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1694       Shape.ABI == coro::ABI::Async) {
1695     // If we found any allocas, replace all of their remaining uses with Geps.
1696     Builder.SetInsertPoint(&SpillBlock->front());
1697     for (const auto &P : FrameData.Allocas) {
1698       AllocaInst *Alloca = P.Alloca;
1699       auto *G = GetFramePointer(Alloca);
1700 
1701       // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1702       // here, as we are changing location of the instruction.
1703       G->takeName(Alloca);
1704       Alloca->replaceAllUsesWith(G);
1705       Alloca->eraseFromParent();
1706     }
1707     return FramePtr;
1708   }
1709 
1710   // If we found any alloca, replace all of their remaining uses with GEP
1711   // instructions. To remain debugbility, we replace the uses of allocas for
1712   // dbg.declares and dbg.values with the reload from the frame.
1713   // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1714   // as some of the uses may not be dominated by CoroBegin.
1715   Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
1716   SmallVector<Instruction *, 4> UsersToUpdate;
1717   for (const auto &A : FrameData.Allocas) {
1718     AllocaInst *Alloca = A.Alloca;
1719     UsersToUpdate.clear();
1720     for (User *U : Alloca->users()) {
1721       auto *I = cast<Instruction>(U);
1722       if (DT.dominates(CB, I))
1723         UsersToUpdate.push_back(I);
1724     }
1725     if (UsersToUpdate.empty())
1726       continue;
1727     auto *G = GetFramePointer(Alloca);
1728     G->setName(Alloca->getName() + Twine(".reload.addr"));
1729 
1730     SmallVector<DbgVariableIntrinsic *, 4> DIs;
1731     findDbgUsers(DIs, Alloca);
1732     for (auto *DVI : DIs)
1733       DVI->replaceUsesOfWith(Alloca, G);
1734 
1735     for (Instruction *I : UsersToUpdate)
1736       I->replaceUsesOfWith(Alloca, G);
1737   }
1738   Builder.SetInsertPoint(FramePtr->getNextNode());
1739   for (const auto &A : FrameData.Allocas) {
1740     AllocaInst *Alloca = A.Alloca;
1741     if (A.MayWriteBeforeCoroBegin) {
1742       // isEscaped really means potentially modified before CoroBegin.
1743       if (Alloca->isArrayAllocation())
1744         report_fatal_error(
1745             "Coroutines cannot handle copying of array allocas yet");
1746 
1747       auto *G = GetFramePointer(Alloca);
1748       auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
1749       Builder.CreateStore(Value, G);
1750     }
1751     // For each alias to Alloca created before CoroBegin but used after
1752     // CoroBegin, we recreate them after CoroBegin by appplying the offset
1753     // to the pointer in the frame.
1754     for (const auto &Alias : A.Aliases) {
1755       auto *FramePtr = GetFramePointer(Alloca);
1756       auto *FramePtrRaw =
1757           Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C));
1758       auto *AliasPtr = Builder.CreateGEP(
1759           Type::getInt8Ty(C), FramePtrRaw,
1760           ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue()));
1761       auto *AliasPtrTyped =
1762           Builder.CreateBitCast(AliasPtr, Alias.first->getType());
1763       Alias.first->replaceUsesWithIf(
1764           AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); });
1765     }
1766   }
1767   return FramePtr;
1768 }
1769 
1770 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1771 // PHI in InsertedBB.
1772 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB,
1773                                          BasicBlock *InsertedBB,
1774                                          BasicBlock *PredBB,
1775                                          PHINode *UntilPHI = nullptr) {
1776   auto *PN = cast<PHINode>(&SuccBB->front());
1777   do {
1778     int Index = PN->getBasicBlockIndex(InsertedBB);
1779     Value *V = PN->getIncomingValue(Index);
1780     PHINode *InputV = PHINode::Create(
1781         V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(),
1782         &InsertedBB->front());
1783     InputV->addIncoming(V, PredBB);
1784     PN->setIncomingValue(Index, InputV);
1785     PN = dyn_cast<PHINode>(PN->getNextNode());
1786   } while (PN != UntilPHI);
1787 }
1788 
1789 // Rewrites the PHI Nodes in a cleanuppad.
1790 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1791                                      CleanupPadInst *CleanupPad) {
1792   // For every incoming edge to a CleanupPad we will create a new block holding
1793   // all incoming values in single-value PHI nodes. We will then create another
1794   // block to act as a dispather (as all unwind edges for related EH blocks
1795   // must be the same).
1796   //
1797   // cleanuppad:
1798   //    %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1799   //    %3 = cleanuppad within none []
1800   //
1801   // It will create:
1802   //
1803   // cleanuppad.corodispatch
1804   //    %2 = phi i8[0, %catchswitch], [1, %catch.1]
1805   //    %3 = cleanuppad within none []
1806   //    switch i8 % 2, label %unreachable
1807   //            [i8 0, label %cleanuppad.from.catchswitch
1808   //             i8 1, label %cleanuppad.from.catch.1]
1809   // cleanuppad.from.catchswitch:
1810   //    %4 = phi i32 [%0, %catchswitch]
1811   //    br %label cleanuppad
1812   // cleanuppad.from.catch.1:
1813   //    %6 = phi i32 [%1, %catch.1]
1814   //    br %label cleanuppad
1815   // cleanuppad:
1816   //    %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1817   //                 [%6, %cleanuppad.from.catch.1]
1818 
1819   // Unreachable BB, in case switching on an invalid value in the dispatcher.
1820   auto *UnreachBB = BasicBlock::Create(
1821       CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
1822   IRBuilder<> Builder(UnreachBB);
1823   Builder.CreateUnreachable();
1824 
1825   // Create a new cleanuppad which will be the dispatcher.
1826   auto *NewCleanupPadBB =
1827       BasicBlock::Create(CleanupPadBB->getContext(),
1828                          CleanupPadBB->getName() + Twine(".corodispatch"),
1829                          CleanupPadBB->getParent(), CleanupPadBB);
1830   Builder.SetInsertPoint(NewCleanupPadBB);
1831   auto *SwitchType = Builder.getInt8Ty();
1832   auto *SetDispatchValuePN =
1833       Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
1834   CleanupPad->removeFromParent();
1835   CleanupPad->insertAfter(SetDispatchValuePN);
1836   auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1837                                                 pred_size(CleanupPadBB));
1838 
1839   int SwitchIndex = 0;
1840   SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
1841   for (BasicBlock *Pred : Preds) {
1842     // Create a new cleanuppad and move the PHI values to there.
1843     auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
1844                                       CleanupPadBB->getName() +
1845                                           Twine(".from.") + Pred->getName(),
1846                                       CleanupPadBB->getParent(), CleanupPadBB);
1847     updatePhiNodes(CleanupPadBB, Pred, CaseBB);
1848     CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1849                     Pred->getName());
1850     Builder.SetInsertPoint(CaseBB);
1851     Builder.CreateBr(CleanupPadBB);
1852     movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
1853 
1854     // Update this Pred to the new unwind point.
1855     setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1856 
1857     // Setup the switch in the dispatcher.
1858     auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1859     SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1860     SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1861     SwitchIndex++;
1862   }
1863 }
1864 
1865 static void cleanupSinglePredPHIs(Function &F) {
1866   SmallVector<PHINode *, 32> Worklist;
1867   for (auto &BB : F) {
1868     for (auto &Phi : BB.phis()) {
1869       if (Phi.getNumIncomingValues() == 1) {
1870         Worklist.push_back(&Phi);
1871       } else
1872         break;
1873     }
1874   }
1875   while (!Worklist.empty()) {
1876     auto *Phi = Worklist.pop_back_val();
1877     auto *OriginalValue = Phi->getIncomingValue(0);
1878     Phi->replaceAllUsesWith(OriginalValue);
1879   }
1880 }
1881 
1882 static void rewritePHIs(BasicBlock &BB) {
1883   // For every incoming edge we will create a block holding all
1884   // incoming values in a single PHI nodes.
1885   //
1886   // loop:
1887   //    %n.val = phi i32[%n, %entry], [%inc, %loop]
1888   //
1889   // It will create:
1890   //
1891   // loop.from.entry:
1892   //    %n.loop.pre = phi i32 [%n, %entry]
1893   //    br %label loop
1894   // loop.from.loop:
1895   //    %inc.loop.pre = phi i32 [%inc, %loop]
1896   //    br %label loop
1897   //
1898   // After this rewrite, further analysis will ignore any phi nodes with more
1899   // than one incoming edge.
1900 
1901   // TODO: Simplify PHINodes in the basic block to remove duplicate
1902   // predecessors.
1903 
1904   // Special case for CleanupPad: all EH blocks must have the same unwind edge
1905   // so we need to create an additional "dispatcher" block.
1906   if (auto *CleanupPad =
1907           dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
1908     SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1909     for (BasicBlock *Pred : Preds) {
1910       if (CatchSwitchInst *CS =
1911               dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
1912         // CleanupPad with a CatchSwitch predecessor: therefore this is an
1913         // unwind destination that needs to be handle specially.
1914         assert(CS->getUnwindDest() == &BB);
1915         (void)CS;
1916         rewritePHIsForCleanupPad(&BB, CleanupPad);
1917         return;
1918       }
1919     }
1920   }
1921 
1922   LandingPadInst *LandingPad = nullptr;
1923   PHINode *ReplPHI = nullptr;
1924   if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
1925     // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1926     // We replace the original landing pad with a PHINode that will collect the
1927     // results from all of them.
1928     ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
1929     ReplPHI->takeName(LandingPad);
1930     LandingPad->replaceAllUsesWith(ReplPHI);
1931     // We will erase the original landing pad at the end of this function after
1932     // ehAwareSplitEdge cloned it in the transition blocks.
1933   }
1934 
1935   SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1936   for (BasicBlock *Pred : Preds) {
1937     auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
1938     IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1939 
1940     // Stop the moving of values at ReplPHI, as this is either null or the PHI
1941     // that replaced the landing pad.
1942     movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
1943   }
1944 
1945   if (LandingPad) {
1946     // Calls to ehAwareSplitEdge function cloned the original lading pad.
1947     // No longer need it.
1948     LandingPad->eraseFromParent();
1949   }
1950 }
1951 
1952 static void rewritePHIs(Function &F) {
1953   SmallVector<BasicBlock *, 8> WorkList;
1954 
1955   for (BasicBlock &BB : F)
1956     if (auto *PN = dyn_cast<PHINode>(&BB.front()))
1957       if (PN->getNumIncomingValues() > 1)
1958         WorkList.push_back(&BB);
1959 
1960   for (BasicBlock *BB : WorkList)
1961     rewritePHIs(*BB);
1962 }
1963 
1964 // Check for instructions that we can recreate on resume as opposed to spill
1965 // the result into a coroutine frame.
1966 static bool materializable(Instruction &V) {
1967   return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
1968          isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
1969 }
1970 
1971 // Check for structural coroutine intrinsics that should not be spilled into
1972 // the coroutine frame.
1973 static bool isCoroutineStructureIntrinsic(Instruction &I) {
1974   return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
1975          isa<CoroSuspendInst>(&I);
1976 }
1977 
1978 // For every use of the value that is across suspend point, recreate that value
1979 // after a suspend point.
1980 static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
1981                                               const SpillInfo &Spills) {
1982   for (const auto &E : Spills) {
1983     Value *Def = E.first;
1984     BasicBlock *CurrentBlock = nullptr;
1985     Instruction *CurrentMaterialization = nullptr;
1986     for (Instruction *U : E.second) {
1987       // If we have not seen this block, materialize the value.
1988       if (CurrentBlock != U->getParent()) {
1989 
1990         bool IsInCoroSuspendBlock = isa<AnyCoroSuspendInst>(U);
1991         CurrentBlock = U->getParent();
1992         auto *InsertBlock = IsInCoroSuspendBlock
1993                                 ? CurrentBlock->getSinglePredecessor()
1994                                 : CurrentBlock;
1995         CurrentMaterialization = cast<Instruction>(Def)->clone();
1996         CurrentMaterialization->setName(Def->getName());
1997         CurrentMaterialization->insertBefore(
1998             IsInCoroSuspendBlock ? InsertBlock->getTerminator()
1999                                  : &*InsertBlock->getFirstInsertionPt());
2000       }
2001       if (auto *PN = dyn_cast<PHINode>(U)) {
2002         assert(PN->getNumIncomingValues() == 1 &&
2003                "unexpected number of incoming "
2004                "values in the PHINode");
2005         PN->replaceAllUsesWith(CurrentMaterialization);
2006         PN->eraseFromParent();
2007         continue;
2008       }
2009       // Replace all uses of Def in the current instruction with the
2010       // CurrentMaterialization for the block.
2011       U->replaceUsesOfWith(Def, CurrentMaterialization);
2012     }
2013   }
2014 }
2015 
2016 // Splits the block at a particular instruction unless it is the first
2017 // instruction in the block with a single predecessor.
2018 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
2019   auto *BB = I->getParent();
2020   if (&BB->front() == I) {
2021     if (BB->getSinglePredecessor()) {
2022       BB->setName(Name);
2023       return BB;
2024     }
2025   }
2026   return BB->splitBasicBlock(I, Name);
2027 }
2028 
2029 // Split above and below a particular instruction so that it
2030 // will be all alone by itself in a block.
2031 static void splitAround(Instruction *I, const Twine &Name) {
2032   splitBlockIfNotFirst(I, Name);
2033   splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
2034 }
2035 
2036 static bool isSuspendBlock(BasicBlock *BB) {
2037   return isa<AnyCoroSuspendInst>(BB->front());
2038 }
2039 
2040 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet;
2041 
2042 /// Does control flow starting at the given block ever reach a suspend
2043 /// instruction before reaching a block in VisitedOrFreeBBs?
2044 static bool isSuspendReachableFrom(BasicBlock *From,
2045                                    VisitedBlocksSet &VisitedOrFreeBBs) {
2046   // Eagerly try to add this block to the visited set.  If it's already
2047   // there, stop recursing; this path doesn't reach a suspend before
2048   // either looping or reaching a freeing block.
2049   if (!VisitedOrFreeBBs.insert(From).second)
2050     return false;
2051 
2052   // We assume that we'll already have split suspends into their own blocks.
2053   if (isSuspendBlock(From))
2054     return true;
2055 
2056   // Recurse on the successors.
2057   for (auto Succ : successors(From)) {
2058     if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
2059       return true;
2060   }
2061 
2062   return false;
2063 }
2064 
2065 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a
2066 /// suspend point?
2067 static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
2068   // Seed the visited set with all the basic blocks containing a free
2069   // so that we won't pass them up.
2070   VisitedBlocksSet VisitedOrFreeBBs;
2071   for (auto User : AI->users()) {
2072     if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
2073       VisitedOrFreeBBs.insert(FI->getParent());
2074   }
2075 
2076   return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
2077 }
2078 
2079 /// After we split the coroutine, will the given basic block be along
2080 /// an obvious exit path for the resumption function?
2081 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
2082                                               unsigned depth = 3) {
2083   // If we've bottomed out our depth count, stop searching and assume
2084   // that the path might loop back.
2085   if (depth == 0) return false;
2086 
2087   // If this is a suspend block, we're about to exit the resumption function.
2088   if (isSuspendBlock(BB)) return true;
2089 
2090   // Recurse into the successors.
2091   for (auto Succ : successors(BB)) {
2092     if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
2093       return false;
2094   }
2095 
2096   // If none of the successors leads back in a loop, we're on an exit/abort.
2097   return true;
2098 }
2099 
2100 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
2101   // Look for a free that isn't sufficiently obviously followed by
2102   // either a suspend or a termination, i.e. something that will leave
2103   // the coro resumption frame.
2104   for (auto U : AI->users()) {
2105     auto FI = dyn_cast<CoroAllocaFreeInst>(U);
2106     if (!FI) continue;
2107 
2108     if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
2109       return true;
2110   }
2111 
2112   // If we never found one, we don't need a stack save.
2113   return false;
2114 }
2115 
2116 /// Turn each of the given local allocas into a normal (dynamic) alloca
2117 /// instruction.
2118 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
2119                               SmallVectorImpl<Instruction*> &DeadInsts) {
2120   for (auto AI : LocalAllocas) {
2121     auto M = AI->getModule();
2122     IRBuilder<> Builder(AI);
2123 
2124     // Save the stack depth.  Try to avoid doing this if the stackrestore
2125     // is going to immediately precede a return or something.
2126     Value *StackSave = nullptr;
2127     if (localAllocaNeedsStackSave(AI))
2128       StackSave = Builder.CreateCall(
2129                             Intrinsic::getDeclaration(M, Intrinsic::stacksave));
2130 
2131     // Allocate memory.
2132     auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
2133     Alloca->setAlignment(Align(AI->getAlignment()));
2134 
2135     for (auto U : AI->users()) {
2136       // Replace gets with the allocation.
2137       if (isa<CoroAllocaGetInst>(U)) {
2138         U->replaceAllUsesWith(Alloca);
2139 
2140       // Replace frees with stackrestores.  This is safe because
2141       // alloca.alloc is required to obey a stack discipline, although we
2142       // don't enforce that structurally.
2143       } else {
2144         auto FI = cast<CoroAllocaFreeInst>(U);
2145         if (StackSave) {
2146           Builder.SetInsertPoint(FI);
2147           Builder.CreateCall(
2148                     Intrinsic::getDeclaration(M, Intrinsic::stackrestore),
2149                              StackSave);
2150         }
2151       }
2152       DeadInsts.push_back(cast<Instruction>(U));
2153     }
2154 
2155     DeadInsts.push_back(AI);
2156   }
2157 }
2158 
2159 /// Turn the given coro.alloca.alloc call into a dynamic allocation.
2160 /// This happens during the all-instructions iteration, so it must not
2161 /// delete the call.
2162 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI,
2163                                         coro::Shape &Shape,
2164                                    SmallVectorImpl<Instruction*> &DeadInsts) {
2165   IRBuilder<> Builder(AI);
2166   auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
2167 
2168   for (User *U : AI->users()) {
2169     if (isa<CoroAllocaGetInst>(U)) {
2170       U->replaceAllUsesWith(Alloc);
2171     } else {
2172       auto FI = cast<CoroAllocaFreeInst>(U);
2173       Builder.SetInsertPoint(FI);
2174       Shape.emitDealloc(Builder, Alloc, nullptr);
2175     }
2176     DeadInsts.push_back(cast<Instruction>(U));
2177   }
2178 
2179   // Push this on last so that it gets deleted after all the others.
2180   DeadInsts.push_back(AI);
2181 
2182   // Return the new allocation value so that we can check for needed spills.
2183   return cast<Instruction>(Alloc);
2184 }
2185 
2186 /// Get the current swifterror value.
2187 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
2188                                      coro::Shape &Shape) {
2189   // Make a fake function pointer as a sort of intrinsic.
2190   auto FnTy = FunctionType::get(ValueTy, {}, false);
2191   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2192 
2193   auto Call = Builder.CreateCall(FnTy, Fn, {});
2194   Shape.SwiftErrorOps.push_back(Call);
2195 
2196   return Call;
2197 }
2198 
2199 /// Set the given value as the current swifterror value.
2200 ///
2201 /// Returns a slot that can be used as a swifterror slot.
2202 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
2203                                      coro::Shape &Shape) {
2204   // Make a fake function pointer as a sort of intrinsic.
2205   auto FnTy = FunctionType::get(V->getType()->getPointerTo(),
2206                                 {V->getType()}, false);
2207   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2208 
2209   auto Call = Builder.CreateCall(FnTy, Fn, { V });
2210   Shape.SwiftErrorOps.push_back(Call);
2211 
2212   return Call;
2213 }
2214 
2215 /// Set the swifterror value from the given alloca before a call,
2216 /// then put in back in the alloca afterwards.
2217 ///
2218 /// Returns an address that will stand in for the swifterror slot
2219 /// until splitting.
2220 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
2221                                                  AllocaInst *Alloca,
2222                                                  coro::Shape &Shape) {
2223   auto ValueTy = Alloca->getAllocatedType();
2224   IRBuilder<> Builder(Call);
2225 
2226   // Load the current value from the alloca and set it as the
2227   // swifterror value.
2228   auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
2229   auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
2230 
2231   // Move to after the call.  Since swifterror only has a guaranteed
2232   // value on normal exits, we can ignore implicit and explicit unwind
2233   // edges.
2234   if (isa<CallInst>(Call)) {
2235     Builder.SetInsertPoint(Call->getNextNode());
2236   } else {
2237     auto Invoke = cast<InvokeInst>(Call);
2238     Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
2239   }
2240 
2241   // Get the current swifterror value and store it to the alloca.
2242   auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
2243   Builder.CreateStore(ValueAfterCall, Alloca);
2244 
2245   return Addr;
2246 }
2247 
2248 /// Eliminate a formerly-swifterror alloca by inserting the get/set
2249 /// intrinsics and attempting to MemToReg the alloca away.
2250 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
2251                                       coro::Shape &Shape) {
2252   for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) {
2253     // swifterror values can only be used in very specific ways.
2254     // We take advantage of that here.
2255     auto User = Use.getUser();
2256     if (isa<LoadInst>(User) || isa<StoreInst>(User))
2257       continue;
2258 
2259     assert(isa<CallInst>(User) || isa<InvokeInst>(User));
2260     auto Call = cast<Instruction>(User);
2261 
2262     auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
2263 
2264     // Use the returned slot address as the call argument.
2265     Use.set(Addr);
2266   }
2267 
2268   // All the uses should be loads and stores now.
2269   assert(isAllocaPromotable(Alloca));
2270 }
2271 
2272 /// "Eliminate" a swifterror argument by reducing it to the alloca case
2273 /// and then loading and storing in the prologue and epilog.
2274 ///
2275 /// The argument keeps the swifterror flag.
2276 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
2277                                         coro::Shape &Shape,
2278                              SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
2279   IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
2280 
2281   auto ArgTy = cast<PointerType>(Arg.getType());
2282   auto ValueTy = ArgTy->getPointerElementType();
2283 
2284   // Reduce to the alloca case:
2285 
2286   // Create an alloca and replace all uses of the arg with it.
2287   auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
2288   Arg.replaceAllUsesWith(Alloca);
2289 
2290   // Set an initial value in the alloca.  swifterror is always null on entry.
2291   auto InitialValue = Constant::getNullValue(ValueTy);
2292   Builder.CreateStore(InitialValue, Alloca);
2293 
2294   // Find all the suspends in the function and save and restore around them.
2295   for (auto Suspend : Shape.CoroSuspends) {
2296     (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
2297   }
2298 
2299   // Find all the coro.ends in the function and restore the error value.
2300   for (auto End : Shape.CoroEnds) {
2301     Builder.SetInsertPoint(End);
2302     auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
2303     (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
2304   }
2305 
2306   // Now we can use the alloca logic.
2307   AllocasToPromote.push_back(Alloca);
2308   eliminateSwiftErrorAlloca(F, Alloca, Shape);
2309 }
2310 
2311 /// Eliminate all problematic uses of swifterror arguments and allocas
2312 /// from the function.  We'll fix them up later when splitting the function.
2313 static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
2314   SmallVector<AllocaInst*, 4> AllocasToPromote;
2315 
2316   // Look for a swifterror argument.
2317   for (auto &Arg : F.args()) {
2318     if (!Arg.hasSwiftErrorAttr()) continue;
2319 
2320     eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
2321     break;
2322   }
2323 
2324   // Look for swifterror allocas.
2325   for (auto &Inst : F.getEntryBlock()) {
2326     auto Alloca = dyn_cast<AllocaInst>(&Inst);
2327     if (!Alloca || !Alloca->isSwiftError()) continue;
2328 
2329     // Clear the swifterror flag.
2330     Alloca->setSwiftError(false);
2331 
2332     AllocasToPromote.push_back(Alloca);
2333     eliminateSwiftErrorAlloca(F, Alloca, Shape);
2334   }
2335 
2336   // If we have any allocas to promote, compute a dominator tree and
2337   // promote them en masse.
2338   if (!AllocasToPromote.empty()) {
2339     DominatorTree DT(F);
2340     PromoteMemToReg(AllocasToPromote, DT);
2341   }
2342 }
2343 
2344 /// retcon and retcon.once conventions assume that all spill uses can be sunk
2345 /// after the coro.begin intrinsic.
2346 static void sinkSpillUsesAfterCoroBegin(Function &F,
2347                                         const FrameDataInfo &FrameData,
2348                                         CoroBeginInst *CoroBegin) {
2349   DominatorTree Dom(F);
2350 
2351   SmallSetVector<Instruction *, 32> ToMove;
2352   SmallVector<Instruction *, 32> Worklist;
2353 
2354   // Collect all users that precede coro.begin.
2355   for (auto *Def : FrameData.getAllDefs()) {
2356     for (User *U : Def->users()) {
2357       auto Inst = cast<Instruction>(U);
2358       if (Inst->getParent() != CoroBegin->getParent() ||
2359           Dom.dominates(CoroBegin, Inst))
2360         continue;
2361       if (ToMove.insert(Inst))
2362         Worklist.push_back(Inst);
2363     }
2364   }
2365   // Recursively collect users before coro.begin.
2366   while (!Worklist.empty()) {
2367     auto *Def = Worklist.pop_back_val();
2368     for (User *U : Def->users()) {
2369       auto Inst = cast<Instruction>(U);
2370       if (Dom.dominates(CoroBegin, Inst))
2371         continue;
2372       if (ToMove.insert(Inst))
2373         Worklist.push_back(Inst);
2374     }
2375   }
2376 
2377   // Sort by dominance.
2378   SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
2379   llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
2380     // If a dominates b it should preceed (<) b.
2381     return Dom.dominates(A, B);
2382   });
2383 
2384   Instruction *InsertPt = CoroBegin->getNextNode();
2385   for (Instruction *Inst : InsertionList)
2386     Inst->moveBefore(InsertPt);
2387 }
2388 
2389 /// For each local variable that all of its user are only used inside one of
2390 /// suspended region, we sink their lifetime.start markers to the place where
2391 /// after the suspend block. Doing so minimizes the lifetime of each variable,
2392 /// hence minimizing the amount of data we end up putting on the frame.
2393 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
2394                                      SuspendCrossingInfo &Checker) {
2395   DominatorTree DT(F);
2396 
2397   // Collect all possible basic blocks which may dominate all uses of allocas.
2398   SmallPtrSet<BasicBlock *, 4> DomSet;
2399   DomSet.insert(&F.getEntryBlock());
2400   for (auto *CSI : Shape.CoroSuspends) {
2401     BasicBlock *SuspendBlock = CSI->getParent();
2402     assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
2403            "should have split coro.suspend into its own block");
2404     DomSet.insert(SuspendBlock->getSingleSuccessor());
2405   }
2406 
2407   for (Instruction &I : instructions(F)) {
2408     AllocaInst* AI = dyn_cast<AllocaInst>(&I);
2409     if (!AI)
2410       continue;
2411 
2412     for (BasicBlock *DomBB : DomSet) {
2413       bool Valid = true;
2414       SmallVector<Instruction *, 1> Lifetimes;
2415 
2416       auto isLifetimeStart = [](Instruction* I) {
2417         if (auto* II = dyn_cast<IntrinsicInst>(I))
2418           return II->getIntrinsicID() == Intrinsic::lifetime_start;
2419         return false;
2420       };
2421 
2422       auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
2423         if (isLifetimeStart(U)) {
2424           Lifetimes.push_back(U);
2425           return true;
2426         }
2427         if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2428           return false;
2429         if (isLifetimeStart(U->user_back())) {
2430           Lifetimes.push_back(U->user_back());
2431           return true;
2432         }
2433         return false;
2434       };
2435 
2436       for (User *U : AI->users()) {
2437         Instruction *UI = cast<Instruction>(U);
2438         // For all users except lifetime.start markers, if they are all
2439         // dominated by one of the basic blocks and do not cross
2440         // suspend points as well, then there is no need to spill the
2441         // instruction.
2442         if (!DT.dominates(DomBB, UI->getParent()) ||
2443             Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2444           // Skip lifetime.start, GEP and bitcast used by lifetime.start
2445           // markers.
2446           if (collectLifetimeStart(UI, AI))
2447             continue;
2448           Valid = false;
2449           break;
2450         }
2451       }
2452       // Sink lifetime.start markers to dominate block when they are
2453       // only used outside the region.
2454       if (Valid && Lifetimes.size() != 0) {
2455         // May be AI itself, when the type of AI is i8*
2456         auto *NewBitCast = [&](AllocaInst *AI) -> Value* {
2457           if (isa<AllocaInst>(Lifetimes[0]->getOperand(1)))
2458             return AI;
2459           auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext());
2460           return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "",
2461                                   DomBB->getTerminator());
2462         }(AI);
2463 
2464         auto *NewLifetime = Lifetimes[0]->clone();
2465         NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast);
2466         NewLifetime->insertBefore(DomBB->getTerminator());
2467 
2468         // All the outsided lifetime.start markers are no longer necessary.
2469         for (Instruction *S : Lifetimes)
2470           S->eraseFromParent();
2471 
2472         break;
2473       }
2474     }
2475   }
2476 }
2477 
2478 static void collectFrameAllocas(Function &F, coro::Shape &Shape,
2479                                 const SuspendCrossingInfo &Checker,
2480                                 SmallVectorImpl<AllocaInfo> &Allocas) {
2481   for (Instruction &I : instructions(F)) {
2482     auto *AI = dyn_cast<AllocaInst>(&I);
2483     if (!AI)
2484       continue;
2485     // The PromiseAlloca will be specially handled since it needs to be in a
2486     // fixed position in the frame.
2487     if (AI == Shape.SwitchLowering.PromiseAlloca) {
2488       continue;
2489     }
2490     DominatorTree DT(F);
2491     // The code that uses lifetime.start intrinsic does not work for functions
2492     // with loops without exit. Disable it on ABIs we know to generate such
2493     // code.
2494     bool ShouldUseLifetimeStartInfo =
2495         (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2496          Shape.ABI != coro::ABI::RetconOnce);
2497     AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT,
2498                              *Shape.CoroBegin, Checker,
2499                              ShouldUseLifetimeStartInfo};
2500     Visitor.visitPtr(*AI);
2501     if (!Visitor.getShouldLiveOnFrame())
2502       continue;
2503     Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
2504                          Visitor.getMayWriteBeforeCoroBegin());
2505   }
2506 }
2507 
2508 void coro::salvageDebugInfo(
2509     SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
2510     DbgVariableIntrinsic *DVI, bool OptimizeFrame) {
2511   Function *F = DVI->getFunction();
2512   IRBuilder<> Builder(F->getContext());
2513   auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
2514   while (isa<IntrinsicInst>(InsertPt))
2515     ++InsertPt;
2516   Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
2517   DIExpression *Expr = DVI->getExpression();
2518   // Follow the pointer arithmetic all the way to the incoming
2519   // function argument and convert into a DIExpression.
2520   bool SkipOutermostLoad = !isa<DbgValueInst>(DVI);
2521   Value *Storage = DVI->getVariableLocationOp(0);
2522   Value *OriginalStorage = Storage;
2523   while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
2524     if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
2525       Storage = LdInst->getOperand(0);
2526       // FIXME: This is a heuristic that works around the fact that
2527       // LLVM IR debug intrinsics cannot yet distinguish between
2528       // memory and value locations: Because a dbg.declare(alloca) is
2529       // implicitly a memory location no DW_OP_deref operation for the
2530       // last direct load from an alloca is necessary.  This condition
2531       // effectively drops the *last* DW_OP_deref in the expression.
2532       if (!SkipOutermostLoad)
2533         Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2534     } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) {
2535       Storage = StInst->getOperand(0);
2536     } else {
2537       SmallVector<uint64_t, 16> Ops;
2538       SmallVector<Value *, 0> AdditionalValues;
2539       Value *Op = llvm::salvageDebugInfoImpl(
2540           *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops,
2541           AdditionalValues);
2542       if (!Op || !AdditionalValues.empty()) {
2543         // If salvaging failed or salvaging produced more than one location
2544         // operand, give up.
2545         break;
2546       }
2547       Storage = Op;
2548       Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false);
2549     }
2550     SkipOutermostLoad = false;
2551   }
2552   if (!Storage)
2553     return;
2554 
2555   // Store a pointer to the coroutine frame object in an alloca so it
2556   // is available throughout the function when producing unoptimized
2557   // code. Extending the lifetime this way is correct because the
2558   // variable has been declared by a dbg.declare intrinsic.
2559   //
2560   // Avoid to create the alloca would be eliminated by optimization
2561   // passes and the corresponding dbg.declares would be invalid.
2562   if (!OptimizeFrame && !EnableReuseStorageInFrame)
2563     if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) {
2564       auto &Cached = DbgPtrAllocaCache[Storage];
2565       if (!Cached) {
2566         Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
2567                                       Arg->getName() + ".debug");
2568         Builder.CreateStore(Storage, Cached);
2569       }
2570       Storage = Cached;
2571       // FIXME: LLVM lacks nuanced semantics to differentiate between
2572       // memory and direct locations at the IR level. The backend will
2573       // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
2574       // location. Thus, if there are deref and offset operations in the
2575       // expression, we need to add a DW_OP_deref at the *start* of the
2576       // expression to first load the contents of the alloca before
2577       // adjusting it with the expression.
2578       if (Expr && Expr->isComplex())
2579         Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2580     }
2581 
2582   DVI->replaceVariableLocationOp(OriginalStorage, Storage);
2583   DVI->setExpression(Expr);
2584   /// It makes no sense to move the dbg.value intrinsic.
2585   if (!isa<DbgValueInst>(DVI)) {
2586     if (auto *II = dyn_cast<InvokeInst>(Storage))
2587       DVI->moveBefore(II->getNormalDest()->getFirstNonPHI());
2588     else if (auto *CBI = dyn_cast<CallBrInst>(Storage))
2589       DVI->moveBefore(CBI->getDefaultDest()->getFirstNonPHI());
2590     else if (auto *InsertPt = dyn_cast<Instruction>(Storage)) {
2591       assert(!InsertPt->isTerminator() &&
2592              "Unimaged terminator that could return a storage.");
2593       DVI->moveAfter(InsertPt);
2594     } else if (isa<Argument>(Storage))
2595       DVI->moveAfter(F->getEntryBlock().getFirstNonPHI());
2596   }
2597 }
2598 
2599 void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
2600   // Don't eliminate swifterror in async functions that won't be split.
2601   if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2602     eliminateSwiftError(F, Shape);
2603 
2604   if (Shape.ABI == coro::ABI::Switch &&
2605       Shape.SwitchLowering.PromiseAlloca) {
2606     Shape.getSwitchCoroId()->clearPromise();
2607   }
2608 
2609   // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2610   // intrinsics are in their own blocks to simplify the logic of building up
2611   // SuspendCrossing data.
2612   for (auto *CSI : Shape.CoroSuspends) {
2613     if (auto *Save = CSI->getCoroSave())
2614       splitAround(Save, "CoroSave");
2615     splitAround(CSI, "CoroSuspend");
2616   }
2617 
2618   // Put CoroEnds into their own blocks.
2619   for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2620     splitAround(CE, "CoroEnd");
2621 
2622     // Emit the musttail call function in a new block before the CoroEnd.
2623     // We do this here so that the right suspend crossing info is computed for
2624     // the uses of the musttail call function call. (Arguments to the coro.end
2625     // instructions would be ignored)
2626     if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
2627       auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2628       if (!MustTailCallFn)
2629         continue;
2630       IRBuilder<> Builder(AsyncEnd);
2631       SmallVector<Value *, 8> Args(AsyncEnd->args());
2632       auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
2633       auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
2634                                       Arguments, Builder);
2635       splitAround(Call, "MustTailCall.Before.CoroEnd");
2636     }
2637   }
2638 
2639   // Later code makes structural assumptions about single predecessors phis e.g
2640   // that they are not live accross a suspend point.
2641   cleanupSinglePredPHIs(F);
2642 
2643   // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2644   // never has its definition separated from the PHI by the suspend point.
2645   rewritePHIs(F);
2646 
2647   // Build suspend crossing info.
2648   SuspendCrossingInfo Checker(F, Shape);
2649 
2650   IRBuilder<> Builder(F.getContext());
2651   FrameDataInfo FrameData;
2652   SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas;
2653   SmallVector<Instruction*, 4> DeadInstructions;
2654 
2655   {
2656     SpillInfo Spills;
2657     for (int Repeat = 0; Repeat < 4; ++Repeat) {
2658       // See if there are materializable instructions across suspend points.
2659       for (Instruction &I : instructions(F))
2660         if (materializable(I)) {
2661           for (User *U : I.users())
2662             if (Checker.isDefinitionAcrossSuspend(I, U))
2663               Spills[&I].push_back(cast<Instruction>(U));
2664 
2665           // Manually add dbg.value metadata uses of I.
2666           SmallVector<DbgValueInst *, 16> DVIs;
2667           findDbgValues(DVIs, &I);
2668           for (auto *DVI : DVIs)
2669             if (Checker.isDefinitionAcrossSuspend(I, DVI))
2670               Spills[&I].push_back(DVI);
2671         }
2672 
2673       if (Spills.empty())
2674         break;
2675 
2676       // Rewrite materializable instructions to be materialized at the use
2677       // point.
2678       LLVM_DEBUG(dumpSpills("Materializations", Spills));
2679       rewriteMaterializableInstructions(Builder, Spills);
2680       Spills.clear();
2681     }
2682   }
2683 
2684   if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2685       Shape.ABI != coro::ABI::RetconOnce)
2686     sinkLifetimeStartMarkers(F, Shape, Checker);
2687 
2688   if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2689     collectFrameAllocas(F, Shape, Checker, FrameData.Allocas);
2690   LLVM_DEBUG(dumpAllocas(FrameData.Allocas));
2691 
2692   // Collect the spills for arguments and other not-materializable values.
2693   for (Argument &A : F.args())
2694     for (User *U : A.users())
2695       if (Checker.isDefinitionAcrossSuspend(A, U))
2696         FrameData.Spills[&A].push_back(cast<Instruction>(U));
2697 
2698   for (Instruction &I : instructions(F)) {
2699     // Values returned from coroutine structure intrinsics should not be part
2700     // of the Coroutine Frame.
2701     if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
2702       continue;
2703 
2704     // The Coroutine Promise always included into coroutine frame, no need to
2705     // check for suspend crossing.
2706     if (Shape.ABI == coro::ABI::Switch &&
2707         Shape.SwitchLowering.PromiseAlloca == &I)
2708       continue;
2709 
2710     // Handle alloca.alloc specially here.
2711     if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
2712       // Check whether the alloca's lifetime is bounded by suspend points.
2713       if (isLocalAlloca(AI)) {
2714         LocalAllocas.push_back(AI);
2715         continue;
2716       }
2717 
2718       // If not, do a quick rewrite of the alloca and then add spills of
2719       // the rewritten value.  The rewrite doesn't invalidate anything in
2720       // Spills because the other alloca intrinsics have no other operands
2721       // besides AI, and it doesn't invalidate the iteration because we delay
2722       // erasing AI.
2723       auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
2724 
2725       for (User *U : Alloc->users()) {
2726         if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
2727           FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
2728       }
2729       continue;
2730     }
2731 
2732     // Ignore alloca.get; we process this as part of coro.alloca.alloc.
2733     if (isa<CoroAllocaGetInst>(I))
2734       continue;
2735 
2736     if (isa<AllocaInst>(I))
2737       continue;
2738 
2739     for (User *U : I.users())
2740       if (Checker.isDefinitionAcrossSuspend(I, U)) {
2741         // We cannot spill a token.
2742         if (I.getType()->isTokenTy())
2743           report_fatal_error(
2744               "token definition is separated from the use by a suspend point");
2745         FrameData.Spills[&I].push_back(cast<Instruction>(U));
2746       }
2747   }
2748 
2749   // We don't want the layout of coroutine frame to be affected
2750   // by debug information. So we only choose to salvage DbgValueInst for
2751   // whose value is already in the frame.
2752   // We would handle the dbg.values for allocas specially
2753   for (auto &Iter : FrameData.Spills) {
2754     auto *V = Iter.first;
2755     SmallVector<DbgValueInst *, 16> DVIs;
2756     findDbgValues(DVIs, V);
2757     llvm::for_each(DVIs, [&](DbgValueInst *DVI) {
2758       if (Checker.isDefinitionAcrossSuspend(*V, DVI))
2759         FrameData.Spills[V].push_back(DVI);
2760     });
2761   }
2762 
2763   LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
2764   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2765       Shape.ABI == coro::ABI::Async)
2766     sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin);
2767   Shape.FrameTy = buildFrameType(F, Shape, FrameData);
2768   createFramePtr(Shape);
2769   // For now, this works for C++ programs only.
2770   buildFrameDebugInfo(F, Shape, FrameData);
2771   insertSpills(FrameData, Shape);
2772   lowerLocalAllocas(LocalAllocas, DeadInstructions);
2773 
2774   for (auto I : DeadInstructions)
2775     I->eraseFromParent();
2776 }
2777