1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This file contains classes used to discover if for a particular value
9 // there from sue to definition that crosses a suspend block.
10 //
11 // Using the information discovered we form a Coroutine Frame structure to
12 // contain those values. All uses of those values are replaced with appropriate
13 // GEP + load from the coroutine frame. At the point of the definition we spill
14 // the value into the coroutine frame.
15 //===----------------------------------------------------------------------===//
16 
17 #include "CoroInternal.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/ScopeExit.h"
20 #include "llvm/ADT/SmallString.h"
21 #include "llvm/Analysis/PtrUseVisitor.h"
22 #include "llvm/Analysis/StackLifetime.h"
23 #include "llvm/Config/llvm-config.h"
24 #include "llvm/IR/CFG.h"
25 #include "llvm/IR/DIBuilder.h"
26 #include "llvm/IR/DebugInfo.h"
27 #include "llvm/IR/Dominators.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/InstIterator.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/Support/OptimizedStructLayout.h"
35 #include "llvm/Support/circular_raw_ostream.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
38 #include "llvm/Transforms/Utils/Local.h"
39 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
40 #include <algorithm>
41 
42 using namespace llvm;
43 
44 // The "coro-suspend-crossing" flag is very noisy. There is another debug type,
45 // "coro-frame", which results in leaner debug spew.
46 #define DEBUG_TYPE "coro-suspend-crossing"
47 
48 static cl::opt<bool> EnableReuseStorageInFrame(
49     "reuse-storage-in-coroutine-frame", cl::Hidden,
50     cl::desc(
51         "Enable the optimization which would reuse the storage in the coroutine \
52          frame for allocas whose liferanges are not overlapped, for testing purposes"),
53     llvm::cl::init(false));
54 
55 enum { SmallVectorThreshold = 32 };
56 
57 // Provides two way mapping between the blocks and numbers.
58 namespace {
59 class BlockToIndexMapping {
60   SmallVector<BasicBlock *, SmallVectorThreshold> V;
61 
62 public:
63   size_t size() const { return V.size(); }
64 
65   BlockToIndexMapping(Function &F) {
66     for (BasicBlock &BB : F)
67       V.push_back(&BB);
68     llvm::sort(V);
69   }
70 
71   size_t blockToIndex(BasicBlock *BB) const {
72     auto *I = llvm::lower_bound(V, BB);
73     assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
74     return I - V.begin();
75   }
76 
77   BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
78 };
79 } // end anonymous namespace
80 
81 // The SuspendCrossingInfo maintains data that allows to answer a question
82 // whether given two BasicBlocks A and B there is a path from A to B that
83 // passes through a suspend point.
84 //
85 // For every basic block 'i' it maintains a BlockData that consists of:
86 //   Consumes:  a bit vector which contains a set of indices of blocks that can
87 //              reach block 'i'
88 //   Kills: a bit vector which contains a set of indices of blocks that can
89 //          reach block 'i', but one of the path will cross a suspend point
90 //   Suspend: a boolean indicating whether block 'i' contains a suspend point.
91 //   End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
92 //
93 namespace {
94 struct SuspendCrossingInfo {
95   BlockToIndexMapping Mapping;
96 
97   struct BlockData {
98     BitVector Consumes;
99     BitVector Kills;
100     bool Suspend = false;
101     bool End = false;
102   };
103   SmallVector<BlockData, SmallVectorThreshold> Block;
104 
105   iterator_range<succ_iterator> successors(BlockData const &BD) const {
106     BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
107     return llvm::successors(BB);
108   }
109 
110   BlockData &getBlockData(BasicBlock *BB) {
111     return Block[Mapping.blockToIndex(BB)];
112   }
113 
114   void dump() const;
115   void dump(StringRef Label, BitVector const &BV) const;
116 
117   SuspendCrossingInfo(Function &F, coro::Shape &Shape);
118 
119   bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
120     size_t const DefIndex = Mapping.blockToIndex(DefBB);
121     size_t const UseIndex = Mapping.blockToIndex(UseBB);
122 
123     bool const Result = Block[UseIndex].Kills[DefIndex];
124     LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
125                       << " answer is " << Result << "\n");
126     return Result;
127   }
128 
129   bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
130     auto *I = cast<Instruction>(U);
131 
132     // We rewrote PHINodes, so that only the ones with exactly one incoming
133     // value need to be analyzed.
134     if (auto *PN = dyn_cast<PHINode>(I))
135       if (PN->getNumIncomingValues() > 1)
136         return false;
137 
138     BasicBlock *UseBB = I->getParent();
139 
140     // As a special case, treat uses by an llvm.coro.suspend.retcon or an
141     // llvm.coro.suspend.async as if they were uses in the suspend's single
142     // predecessor: the uses conceptually occur before the suspend.
143     if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
144       UseBB = UseBB->getSinglePredecessor();
145       assert(UseBB && "should have split coro.suspend into its own block");
146     }
147 
148     return hasPathCrossingSuspendPoint(DefBB, UseBB);
149   }
150 
151   bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
152     return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
153   }
154 
155   bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
156     auto *DefBB = I.getParent();
157 
158     // As a special case, treat values produced by an llvm.coro.suspend.*
159     // as if they were defined in the single successor: the uses
160     // conceptually occur after the suspend.
161     if (isa<AnyCoroSuspendInst>(I)) {
162       DefBB = DefBB->getSingleSuccessor();
163       assert(DefBB && "should have split coro.suspend into its own block");
164     }
165 
166     return isDefinitionAcrossSuspend(DefBB, U);
167   }
168 
169   bool isDefinitionAcrossSuspend(Value &V, User *U) const {
170     if (auto *Arg = dyn_cast<Argument>(&V))
171       return isDefinitionAcrossSuspend(*Arg, U);
172     if (auto *Inst = dyn_cast<Instruction>(&V))
173       return isDefinitionAcrossSuspend(*Inst, U);
174 
175     llvm_unreachable(
176         "Coroutine could only collect Argument and Instruction now.");
177   }
178 };
179 } // end anonymous namespace
180 
181 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
182 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
183                                                 BitVector const &BV) const {
184   dbgs() << Label << ":";
185   for (size_t I = 0, N = BV.size(); I < N; ++I)
186     if (BV[I])
187       dbgs() << " " << Mapping.indexToBlock(I)->getName();
188   dbgs() << "\n";
189 }
190 
191 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
192   for (size_t I = 0, N = Block.size(); I < N; ++I) {
193     BasicBlock *const B = Mapping.indexToBlock(I);
194     dbgs() << B->getName() << ":\n";
195     dump("   Consumes", Block[I].Consumes);
196     dump("      Kills", Block[I].Kills);
197   }
198   dbgs() << "\n";
199 }
200 #endif
201 
202 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
203     : Mapping(F) {
204   const size_t N = Mapping.size();
205   Block.resize(N);
206 
207   // Initialize every block so that it consumes itself
208   for (size_t I = 0; I < N; ++I) {
209     auto &B = Block[I];
210     B.Consumes.resize(N);
211     B.Kills.resize(N);
212     B.Consumes.set(I);
213   }
214 
215   // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
216   // the code beyond coro.end is reachable during initial invocation of the
217   // coroutine.
218   for (auto *CE : Shape.CoroEnds)
219     getBlockData(CE->getParent()).End = true;
220 
221   // Mark all suspend blocks and indicate that they kill everything they
222   // consume. Note, that crossing coro.save also requires a spill, as any code
223   // between coro.save and coro.suspend may resume the coroutine and all of the
224   // state needs to be saved by that time.
225   auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
226     BasicBlock *SuspendBlock = BarrierInst->getParent();
227     auto &B = getBlockData(SuspendBlock);
228     B.Suspend = true;
229     B.Kills |= B.Consumes;
230   };
231   for (auto *CSI : Shape.CoroSuspends) {
232     markSuspendBlock(CSI);
233     if (auto *Save = CSI->getCoroSave())
234       markSuspendBlock(Save);
235   }
236 
237   // Iterate propagating consumes and kills until they stop changing.
238   int Iteration = 0;
239   (void)Iteration;
240 
241   bool Changed;
242   do {
243     LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
244     LLVM_DEBUG(dbgs() << "==============\n");
245 
246     Changed = false;
247     for (size_t I = 0; I < N; ++I) {
248       auto &B = Block[I];
249       for (BasicBlock *SI : successors(B)) {
250 
251         auto SuccNo = Mapping.blockToIndex(SI);
252 
253         // Saved Consumes and Kills bitsets so that it is easy to see
254         // if anything changed after propagation.
255         auto &S = Block[SuccNo];
256         auto SavedConsumes = S.Consumes;
257         auto SavedKills = S.Kills;
258 
259         // Propagate Kills and Consumes from block B into its successor S.
260         S.Consumes |= B.Consumes;
261         S.Kills |= B.Kills;
262 
263         // If block B is a suspend block, it should propagate kills into the
264         // its successor for every block B consumes.
265         if (B.Suspend) {
266           S.Kills |= B.Consumes;
267         }
268         if (S.Suspend) {
269           // If block S is a suspend block, it should kill all of the blocks it
270           // consumes.
271           S.Kills |= S.Consumes;
272         } else if (S.End) {
273           // If block S is an end block, it should not propagate kills as the
274           // blocks following coro.end() are reached during initial invocation
275           // of the coroutine while all the data are still available on the
276           // stack or in the registers.
277           S.Kills.reset();
278         } else {
279           // This is reached when S block it not Suspend nor coro.end and it
280           // need to make sure that it is not in the kill set.
281           S.Kills.reset(SuccNo);
282         }
283 
284         // See if anything changed.
285         Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
286 
287         if (S.Kills != SavedKills) {
288           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
289                             << "\n");
290           LLVM_DEBUG(dump("S.Kills", S.Kills));
291           LLVM_DEBUG(dump("SavedKills", SavedKills));
292         }
293         if (S.Consumes != SavedConsumes) {
294           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
295           LLVM_DEBUG(dump("S.Consume", S.Consumes));
296           LLVM_DEBUG(dump("SavedCons", SavedConsumes));
297         }
298       }
299     }
300   } while (Changed);
301   LLVM_DEBUG(dump());
302 }
303 
304 #undef DEBUG_TYPE // "coro-suspend-crossing"
305 #define DEBUG_TYPE "coro-frame"
306 
307 namespace {
308 class FrameTypeBuilder;
309 // Mapping from the to-be-spilled value to all the users that need reload.
310 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>;
311 struct AllocaInfo {
312   AllocaInst *Alloca;
313   DenseMap<Instruction *, llvm::Optional<APInt>> Aliases;
314   bool MayWriteBeforeCoroBegin;
315   AllocaInfo(AllocaInst *Alloca,
316              DenseMap<Instruction *, llvm::Optional<APInt>> Aliases,
317              bool MayWriteBeforeCoroBegin)
318       : Alloca(Alloca), Aliases(std::move(Aliases)),
319         MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
320 };
321 struct FrameDataInfo {
322   // All the values (that are not allocas) that needs to be spilled to the
323   // frame.
324   SpillInfo Spills;
325   // Allocas contains all values defined as allocas that need to live in the
326   // frame.
327   SmallVector<AllocaInfo, 8> Allocas;
328 
329   SmallVector<Value *, 8> getAllDefs() const {
330     SmallVector<Value *, 8> Defs;
331     for (const auto &P : Spills)
332       Defs.push_back(P.first);
333     for (const auto &A : Allocas)
334       Defs.push_back(A.Alloca);
335     return Defs;
336   }
337 
338   uint32_t getFieldIndex(Value *V) const {
339     auto Itr = FieldIndexMap.find(V);
340     assert(Itr != FieldIndexMap.end() &&
341            "Value does not have a frame field index");
342     return Itr->second;
343   }
344 
345   void setFieldIndex(Value *V, uint32_t Index) {
346     assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
347            "Cannot set the index for the same field twice.");
348     FieldIndexMap[V] = Index;
349   }
350 
351   uint64_t getAlign(Value *V) const {
352     auto Iter = FieldAlignMap.find(V);
353     assert(Iter != FieldAlignMap.end());
354     return Iter->second;
355   }
356 
357   void setAlign(Value *V, uint64_t Align) {
358     assert(FieldAlignMap.count(V) == 0);
359     FieldAlignMap.insert({V, Align});
360   }
361 
362   uint64_t getOffset(Value *V) const {
363     auto Iter = FieldOffsetMap.find(V);
364     assert(Iter != FieldOffsetMap.end());
365     return Iter->second;
366   }
367 
368   void setOffset(Value *V, uint64_t Offset) {
369     assert(FieldOffsetMap.count(V) == 0);
370     FieldOffsetMap.insert({V, Offset});
371   }
372 
373   // Remap the index of every field in the frame, using the final layout index.
374   void updateLayoutIndex(FrameTypeBuilder &B);
375 
376 private:
377   // LayoutIndexUpdateStarted is used to avoid updating the index of any field
378   // twice by mistake.
379   bool LayoutIndexUpdateStarted = false;
380   // Map from values to their slot indexes on the frame. They will be first set
381   // with their original insertion field index. After the frame is built, their
382   // indexes will be updated into the final layout index.
383   DenseMap<Value *, uint32_t> FieldIndexMap;
384   // Map from values to their alignment on the frame. They would be set after
385   // the frame is built.
386   DenseMap<Value *, uint64_t> FieldAlignMap;
387   // Map from values to their offset on the frame. They would be set after
388   // the frame is built.
389   DenseMap<Value *, uint64_t> FieldOffsetMap;
390 };
391 } // namespace
392 
393 #ifndef NDEBUG
394 static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
395   dbgs() << "------------- " << Title << "--------------\n";
396   for (const auto &E : Spills) {
397     E.first->dump();
398     dbgs() << "   user: ";
399     for (auto *I : E.second)
400       I->dump();
401   }
402 }
403 
404 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
405   dbgs() << "------------- Allocas --------------\n";
406   for (const auto &A : Allocas) {
407     A.Alloca->dump();
408   }
409 }
410 #endif
411 
412 namespace {
413 using FieldIDType = size_t;
414 // We cannot rely solely on natural alignment of a type when building a
415 // coroutine frame and if the alignment specified on the Alloca instruction
416 // differs from the natural alignment of the alloca type we will need to insert
417 // padding.
418 class FrameTypeBuilder {
419 private:
420   struct Field {
421     uint64_t Size;
422     uint64_t Offset;
423     Type *Ty;
424     FieldIDType LayoutFieldIndex;
425     Align Alignment;
426     Align TyAlignment;
427   };
428 
429   const DataLayout &DL;
430   LLVMContext &Context;
431   uint64_t StructSize = 0;
432   Align StructAlign;
433   bool IsFinished = false;
434 
435   Optional<Align> MaxFrameAlignment;
436 
437   SmallVector<Field, 8> Fields;
438   DenseMap<Value*, unsigned> FieldIndexByKey;
439 
440 public:
441   FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL,
442                    Optional<Align> MaxFrameAlignment)
443       : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {}
444 
445   /// Add a field to this structure for the storage of an `alloca`
446   /// instruction.
447   LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI,
448                                                bool IsHeader = false) {
449     Type *Ty = AI->getAllocatedType();
450 
451     // Make an array type if this is a static array allocation.
452     if (AI->isArrayAllocation()) {
453       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
454         Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
455       else
456         report_fatal_error("Coroutines cannot handle non static allocas yet");
457     }
458 
459     return addField(Ty, AI->getAlign(), IsHeader);
460   }
461 
462   /// We want to put the allocas whose lifetime-ranges are not overlapped
463   /// into one slot of coroutine frame.
464   /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
465   ///
466   ///     cppcoro::task<void> alternative_paths(bool cond) {
467   ///         if (cond) {
468   ///             big_structure a;
469   ///             process(a);
470   ///             co_await something();
471   ///         } else {
472   ///             big_structure b;
473   ///             process2(b);
474   ///             co_await something();
475   ///         }
476   ///     }
477   ///
478   /// We want to put variable a and variable b in the same slot to
479   /// reduce the size of coroutine frame.
480   ///
481   /// This function use StackLifetime algorithm to partition the AllocaInsts in
482   /// Spills to non-overlapped sets in order to put Alloca in the same
483   /// non-overlapped set into the same slot in the Coroutine Frame. Then add
484   /// field for the allocas in the same non-overlapped set by using the largest
485   /// type as the field type.
486   ///
487   /// Side Effects: Because We sort the allocas, the order of allocas in the
488   /// frame may be different with the order in the source code.
489   void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
490                           coro::Shape &Shape);
491 
492   /// Add a field to this structure.
493   LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment,
494                                       bool IsHeader = false,
495                                       bool IsSpillOfValue = false) {
496     assert(!IsFinished && "adding fields to a finished builder");
497     assert(Ty && "must provide a type for a field");
498 
499     // The field size is always the alloc size of the type.
500     uint64_t FieldSize = DL.getTypeAllocSize(Ty);
501 
502     // For an alloca with size=0, we don't need to add a field and they
503     // can just point to any index in the frame. Use index 0.
504     if (FieldSize == 0) {
505       return 0;
506     }
507 
508     // The field alignment might not be the type alignment, but we need
509     // to remember the type alignment anyway to build the type.
510     // If we are spilling values we don't need to worry about ABI alignment
511     // concerns.
512     auto ABIAlign = DL.getABITypeAlign(Ty);
513     Align TyAlignment =
514         (IsSpillOfValue && MaxFrameAlignment)
515             ? (*MaxFrameAlignment < ABIAlign ? *MaxFrameAlignment : ABIAlign)
516             : ABIAlign;
517     if (!FieldAlignment) {
518       FieldAlignment = TyAlignment;
519     }
520 
521     // Lay out header fields immediately.
522     uint64_t Offset;
523     if (IsHeader) {
524       Offset = alignTo(StructSize, FieldAlignment);
525       StructSize = Offset + FieldSize;
526 
527     // Everything else has a flexible offset.
528     } else {
529       Offset = OptimizedStructLayoutField::FlexibleOffset;
530     }
531 
532     Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment});
533     return Fields.size() - 1;
534   }
535 
536   /// Finish the layout and set the body on the given type.
537   void finish(StructType *Ty);
538 
539   uint64_t getStructSize() const {
540     assert(IsFinished && "not yet finished!");
541     return StructSize;
542   }
543 
544   Align getStructAlign() const {
545     assert(IsFinished && "not yet finished!");
546     return StructAlign;
547   }
548 
549   FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
550     assert(IsFinished && "not yet finished!");
551     return Fields[Id].LayoutFieldIndex;
552   }
553 
554   Field getLayoutField(FieldIDType Id) const {
555     assert(IsFinished && "not yet finished!");
556     return Fields[Id];
557   }
558 };
559 } // namespace
560 
561 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
562   auto Updater = [&](Value *I) {
563     auto Field = B.getLayoutField(getFieldIndex(I));
564     setFieldIndex(I, Field.LayoutFieldIndex);
565     setAlign(I, Field.Alignment.value());
566     setOffset(I, Field.Offset);
567   };
568   LayoutIndexUpdateStarted = true;
569   for (auto &S : Spills)
570     Updater(S.first);
571   for (const auto &A : Allocas)
572     Updater(A.Alloca);
573   LayoutIndexUpdateStarted = false;
574 }
575 
576 void FrameTypeBuilder::addFieldForAllocas(const Function &F,
577                                           FrameDataInfo &FrameData,
578                                           coro::Shape &Shape) {
579   using AllocaSetType = SmallVector<AllocaInst *, 4>;
580   SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
581 
582   // We need to add field for allocas at the end of this function.
583   auto AddFieldForAllocasAtExit = make_scope_exit([&]() {
584     for (auto AllocaList : NonOverlapedAllocas) {
585       auto *LargestAI = *AllocaList.begin();
586       FieldIDType Id = addFieldForAlloca(LargestAI);
587       for (auto *Alloca : AllocaList)
588         FrameData.setFieldIndex(Alloca, Id);
589     }
590   });
591 
592   if (!Shape.OptimizeFrame && !EnableReuseStorageInFrame) {
593     for (const auto &A : FrameData.Allocas) {
594       AllocaInst *Alloca = A.Alloca;
595       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
596     }
597     return;
598   }
599 
600   // Because there are pathes from the lifetime.start to coro.end
601   // for each alloca, the liferanges for every alloca is overlaped
602   // in the blocks who contain coro.end and the successor blocks.
603   // So we choose to skip there blocks when we calculates the liferange
604   // for each alloca. It should be reasonable since there shouldn't be uses
605   // in these blocks and the coroutine frame shouldn't be used outside the
606   // coroutine body.
607   //
608   // Note that the user of coro.suspend may not be SwitchInst. However, this
609   // case seems too complex to handle. And it is harmless to skip these
610   // patterns since it just prevend putting the allocas to live in the same
611   // slot.
612   DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
613   for (auto CoroSuspendInst : Shape.CoroSuspends) {
614     for (auto U : CoroSuspendInst->users()) {
615       if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
616         auto *SWI = const_cast<SwitchInst *>(ConstSWI);
617         DefaultSuspendDest[SWI] = SWI->getDefaultDest();
618         SWI->setDefaultDest(SWI->getSuccessor(1));
619       }
620     }
621   }
622 
623   auto ExtractAllocas = [&]() {
624     AllocaSetType Allocas;
625     Allocas.reserve(FrameData.Allocas.size());
626     for (const auto &A : FrameData.Allocas)
627       Allocas.push_back(A.Alloca);
628     return Allocas;
629   };
630   StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
631                                       StackLifetime::LivenessType::May);
632   StackLifetimeAnalyzer.run();
633   auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
634     return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
635         StackLifetimeAnalyzer.getLiveRange(AI2));
636   };
637   auto GetAllocaSize = [&](const AllocaInfo &A) {
638     Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL);
639     assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
640     assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
641     return RetSize->getFixedSize();
642   };
643   // Put larger allocas in the front. So the larger allocas have higher
644   // priority to merge, which can save more space potentially. Also each
645   // AllocaSet would be ordered. So we can get the largest Alloca in one
646   // AllocaSet easily.
647   sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
648     return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
649   });
650   for (const auto &A : FrameData.Allocas) {
651     AllocaInst *Alloca = A.Alloca;
652     bool Merged = false;
653     // Try to find if the Alloca is not inferenced with any existing
654     // NonOverlappedAllocaSet. If it is true, insert the alloca to that
655     // NonOverlappedAllocaSet.
656     for (auto &AllocaSet : NonOverlapedAllocas) {
657       assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
658       bool NoInference = none_of(AllocaSet, [&](auto Iter) {
659         return IsAllocaInferenre(Alloca, Iter);
660       });
661       // If the alignment of A is multiple of the alignment of B, the address
662       // of A should satisfy the requirement for aligning for B.
663       //
664       // There may be other more fine-grained strategies to handle the alignment
665       // infomation during the merging process. But it seems hard to handle
666       // these strategies and benefit little.
667       bool Alignable = [&]() -> bool {
668         auto *LargestAlloca = *AllocaSet.begin();
669         return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
670                0;
671       }();
672       bool CouldMerge = NoInference && Alignable;
673       if (!CouldMerge)
674         continue;
675       AllocaSet.push_back(Alloca);
676       Merged = true;
677       break;
678     }
679     if (!Merged) {
680       NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
681     }
682   }
683   // Recover the default target destination for each Switch statement
684   // reserved.
685   for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
686     SwitchInst *SWI = SwitchAndDefaultDest.first;
687     BasicBlock *DestBB = SwitchAndDefaultDest.second;
688     SWI->setDefaultDest(DestBB);
689   }
690   // This Debug Info could tell us which allocas are merged into one slot.
691   LLVM_DEBUG(for (auto &AllocaSet
692                   : NonOverlapedAllocas) {
693     if (AllocaSet.size() > 1) {
694       dbgs() << "In Function:" << F.getName() << "\n";
695       dbgs() << "Find Union Set "
696              << "\n";
697       dbgs() << "\tAllocas are \n";
698       for (auto Alloca : AllocaSet)
699         dbgs() << "\t\t" << *Alloca << "\n";
700     }
701   });
702 }
703 
704 void FrameTypeBuilder::finish(StructType *Ty) {
705   assert(!IsFinished && "already finished!");
706 
707   // Prepare the optimal-layout field array.
708   // The Id in the layout field is a pointer to our Field for it.
709   SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
710   LayoutFields.reserve(Fields.size());
711   for (auto &Field : Fields) {
712     LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
713                               Field.Offset);
714   }
715 
716   // Perform layout.
717   auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
718   StructSize = SizeAndAlign.first;
719   StructAlign = SizeAndAlign.second;
720 
721   auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
722     return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
723   };
724 
725   // We need to produce a packed struct type if there's a field whose
726   // assigned offset isn't a multiple of its natural type alignment.
727   bool Packed = [&] {
728     for (auto &LayoutField : LayoutFields) {
729       auto &F = getField(LayoutField);
730       if (!isAligned(F.TyAlignment, LayoutField.Offset))
731         return true;
732     }
733     return false;
734   }();
735 
736   // Build the struct body.
737   SmallVector<Type*, 16> FieldTypes;
738   FieldTypes.reserve(LayoutFields.size() * 3 / 2);
739   uint64_t LastOffset = 0;
740   for (auto &LayoutField : LayoutFields) {
741     auto &F = getField(LayoutField);
742 
743     auto Offset = LayoutField.Offset;
744 
745     // Add a padding field if there's a padding gap and we're either
746     // building a packed struct or the padding gap is more than we'd
747     // get from aligning to the field type's natural alignment.
748     assert(Offset >= LastOffset);
749     if (Offset != LastOffset) {
750       if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
751         FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
752                                             Offset - LastOffset));
753     }
754 
755     F.Offset = Offset;
756     F.LayoutFieldIndex = FieldTypes.size();
757 
758     FieldTypes.push_back(F.Ty);
759     LastOffset = Offset + F.Size;
760   }
761 
762   Ty->setBody(FieldTypes, Packed);
763 
764 #ifndef NDEBUG
765   // Check that the IR layout matches the offsets we expect.
766   auto Layout = DL.getStructLayout(Ty);
767   for (auto &F : Fields) {
768     assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
769     assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
770   }
771 #endif
772 
773   IsFinished = true;
774 }
775 
776 static void cacheDIVar(FrameDataInfo &FrameData,
777                        DenseMap<Value *, DILocalVariable *> &DIVarCache) {
778   for (auto *V : FrameData.getAllDefs()) {
779     if (DIVarCache.find(V) != DIVarCache.end())
780       continue;
781 
782     auto DDIs = FindDbgDeclareUses(V);
783     auto *I = llvm::find_if(DDIs, [](DbgDeclareInst *DDI) {
784       return DDI->getExpression()->getNumElements() == 0;
785     });
786     if (I != DDIs.end())
787       DIVarCache.insert({V, (*I)->getVariable()});
788   }
789 }
790 
791 /// Create name for Type. It uses MDString to store new created string to
792 /// avoid memory leak.
793 static StringRef solveTypeName(Type *Ty) {
794   if (Ty->isIntegerTy()) {
795     // The longest name in common may be '__int_128', which has 9 bits.
796     SmallString<16> Buffer;
797     raw_svector_ostream OS(Buffer);
798     OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth();
799     auto *MDName = MDString::get(Ty->getContext(), OS.str());
800     return MDName->getString();
801   }
802 
803   if (Ty->isFloatingPointTy()) {
804     if (Ty->isFloatTy())
805       return "__float_";
806     if (Ty->isDoubleTy())
807       return "__double_";
808     return "__floating_type_";
809   }
810 
811   if (auto *PtrTy = dyn_cast<PointerType>(Ty)) {
812     if (PtrTy->isOpaque())
813       return "PointerType";
814     Type *PointeeTy = PtrTy->getNonOpaquePointerElementType();
815     auto Name = solveTypeName(PointeeTy);
816     if (Name == "UnknownType")
817       return "PointerType";
818     SmallString<16> Buffer;
819     Twine(Name + "_Ptr").toStringRef(Buffer);
820     auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
821     return MDName->getString();
822   }
823 
824   if (Ty->isStructTy()) {
825     if (!cast<StructType>(Ty)->hasName())
826       return "__LiteralStructType_";
827 
828     auto Name = Ty->getStructName();
829 
830     SmallString<16> Buffer(Name);
831     for_each(Buffer, [](auto &Iter) {
832       if (Iter == '.' || Iter == ':')
833         Iter = '_';
834     });
835     auto *MDName = MDString::get(Ty->getContext(), Buffer.str());
836     return MDName->getString();
837   }
838 
839   return "UnknownType";
840 }
841 
842 static DIType *solveDIType(DIBuilder &Builder, Type *Ty,
843                            const DataLayout &Layout, DIScope *Scope,
844                            unsigned LineNum,
845                            DenseMap<Type *, DIType *> &DITypeCache) {
846   if (DIType *DT = DITypeCache.lookup(Ty))
847     return DT;
848 
849   StringRef Name = solveTypeName(Ty);
850 
851   DIType *RetType = nullptr;
852 
853   if (Ty->isIntegerTy()) {
854     auto BitWidth = cast<IntegerType>(Ty)->getBitWidth();
855     RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed,
856                                       llvm::DINode::FlagArtificial);
857   } else if (Ty->isFloatingPointTy()) {
858     RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
859                                       dwarf::DW_ATE_float,
860                                       llvm::DINode::FlagArtificial);
861   } else if (Ty->isPointerTy()) {
862     // Construct BasicType instead of PointerType to avoid infinite
863     // search problem.
864     // For example, we would be in trouble if we traverse recursively:
865     //
866     //  struct Node {
867     //      Node* ptr;
868     //  };
869     RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty),
870                                       dwarf::DW_ATE_address,
871                                       llvm::DINode::FlagArtificial);
872   } else if (Ty->isStructTy()) {
873     auto *DIStruct = Builder.createStructType(
874         Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty),
875         Layout.getPrefTypeAlignment(Ty), llvm::DINode::FlagArtificial, nullptr,
876         llvm::DINodeArray());
877 
878     auto *StructTy = cast<StructType>(Ty);
879     SmallVector<Metadata *, 16> Elements;
880     for (unsigned I = 0; I < StructTy->getNumElements(); I++) {
881       DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout,
882                                  Scope, LineNum, DITypeCache);
883       assert(DITy);
884       Elements.push_back(Builder.createMemberType(
885           Scope, DITy->getName(), Scope->getFile(), LineNum,
886           DITy->getSizeInBits(), DITy->getAlignInBits(),
887           Layout.getStructLayout(StructTy)->getElementOffsetInBits(I),
888           llvm::DINode::FlagArtificial, DITy));
889     }
890 
891     Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements));
892 
893     RetType = DIStruct;
894   } else {
895     LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n";);
896     SmallString<32> Buffer;
897     raw_svector_ostream OS(Buffer);
898     OS << Name.str() << "_" << Layout.getTypeSizeInBits(Ty);
899     RetType = Builder.createBasicType(OS.str(), Layout.getTypeSizeInBits(Ty),
900                                       dwarf::DW_ATE_address,
901                                       llvm::DINode::FlagArtificial);
902   }
903 
904   DITypeCache.insert({Ty, RetType});
905   return RetType;
906 }
907 
908 /// Build artificial debug info for C++ coroutine frames to allow users to
909 /// inspect the contents of the frame directly
910 ///
911 /// Create Debug information for coroutine frame with debug name "__coro_frame".
912 /// The debug information for the fields of coroutine frame is constructed from
913 /// the following way:
914 /// 1. For all the value in the Frame, we search the use of dbg.declare to find
915 ///    the corresponding debug variables for the value. If we can find the
916 ///    debug variable, we can get full and accurate debug information.
917 /// 2. If we can't get debug information in step 1 and 2, we could only try to
918 ///    build the DIType by Type. We did this in solveDIType. We only handle
919 ///    integer, float, double, integer type and struct type for now.
920 static void buildFrameDebugInfo(Function &F, coro::Shape &Shape,
921                                 FrameDataInfo &FrameData) {
922   DISubprogram *DIS = F.getSubprogram();
923   // If there is no DISubprogram for F, it implies the Function are not compiled
924   // with debug info. So we also don't need to generate debug info for the frame
925   // neither.
926   if (!DIS || !DIS->getUnit() ||
927       !dwarf::isCPlusPlus(
928           (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage()))
929     return;
930 
931   assert(Shape.ABI == coro::ABI::Switch &&
932          "We could only build debug infomation for C++ coroutine now.\n");
933 
934   DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false);
935 
936   AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
937   assert(PromiseAlloca &&
938          "Coroutine with switch ABI should own Promise alloca");
939 
940   TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(PromiseAlloca);
941   if (DIs.empty())
942     return;
943 
944   DbgDeclareInst *PromiseDDI = DIs.front();
945   DILocalVariable *PromiseDIVariable = PromiseDDI->getVariable();
946   DILocalScope *PromiseDIScope = PromiseDIVariable->getScope();
947   DIFile *DFile = PromiseDIScope->getFile();
948   DILocation *DILoc = PromiseDDI->getDebugLoc().get();
949   unsigned LineNum = PromiseDIVariable->getLine();
950 
951   DICompositeType *FrameDITy = DBuilder.createStructType(
952       DIS, "__coro_frame_ty", DFile, LineNum, Shape.FrameSize * 8,
953       Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr,
954       llvm::DINodeArray());
955   StructType *FrameTy = Shape.FrameTy;
956   SmallVector<Metadata *, 16> Elements;
957   DataLayout Layout = F.getParent()->getDataLayout();
958 
959   DenseMap<Value *, DILocalVariable *> DIVarCache;
960   cacheDIVar(FrameData, DIVarCache);
961 
962   unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume;
963   unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy;
964   unsigned IndexIndex = Shape.SwitchLowering.IndexField;
965 
966   DenseMap<unsigned, StringRef> NameCache;
967   NameCache.insert({ResumeIndex, "__resume_fn"});
968   NameCache.insert({DestroyIndex, "__destroy_fn"});
969   NameCache.insert({IndexIndex, "__coro_index"});
970 
971   Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex),
972        *DestroyFnTy = FrameTy->getElementType(DestroyIndex),
973        *IndexTy = FrameTy->getElementType(IndexIndex);
974 
975   DenseMap<unsigned, DIType *> TyCache;
976   TyCache.insert({ResumeIndex,
977                   DBuilder.createBasicType("__resume_fn",
978                                            Layout.getTypeSizeInBits(ResumeFnTy),
979                                            dwarf::DW_ATE_address)});
980   TyCache.insert(
981       {DestroyIndex, DBuilder.createBasicType(
982                          "__destroy_fn", Layout.getTypeSizeInBits(DestroyFnTy),
983                          dwarf::DW_ATE_address)});
984 
985   /// FIXME: If we fill the field `SizeInBits` with the actual size of
986   /// __coro_index in bits, then __coro_index wouldn't show in the debugger.
987   TyCache.insert({IndexIndex, DBuilder.createBasicType(
988                                   "__coro_index",
989                                   (Layout.getTypeSizeInBits(IndexTy) < 8)
990                                       ? 8
991                                       : Layout.getTypeSizeInBits(IndexTy),
992                                   dwarf::DW_ATE_unsigned_char)});
993 
994   for (auto *V : FrameData.getAllDefs()) {
995     if (DIVarCache.find(V) == DIVarCache.end())
996       continue;
997 
998     auto Index = FrameData.getFieldIndex(V);
999 
1000     NameCache.insert({Index, DIVarCache[V]->getName()});
1001     TyCache.insert({Index, DIVarCache[V]->getType()});
1002   }
1003 
1004   // Cache from index to (Align, Offset Pair)
1005   DenseMap<unsigned, std::pair<unsigned, unsigned>> OffsetCache;
1006   // The Align and Offset of Resume function and Destroy function are fixed.
1007   OffsetCache.insert({ResumeIndex, {8, 0}});
1008   OffsetCache.insert({DestroyIndex, {8, 8}});
1009   OffsetCache.insert(
1010       {IndexIndex,
1011        {Shape.SwitchLowering.IndexAlign, Shape.SwitchLowering.IndexOffset}});
1012 
1013   for (auto *V : FrameData.getAllDefs()) {
1014     auto Index = FrameData.getFieldIndex(V);
1015 
1016     OffsetCache.insert(
1017         {Index, {FrameData.getAlign(V), FrameData.getOffset(V)}});
1018   }
1019 
1020   DenseMap<Type *, DIType *> DITypeCache;
1021   // This counter is used to avoid same type names. e.g., there would be
1022   // many i32 and i64 types in one coroutine. And we would use i32_0 and
1023   // i32_1 to avoid the same type. Since it makes no sense the name of the
1024   // fields confilicts with each other.
1025   unsigned UnknownTypeNum = 0;
1026   for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) {
1027     if (OffsetCache.find(Index) == OffsetCache.end())
1028       continue;
1029 
1030     std::string Name;
1031     uint64_t SizeInBits;
1032     uint32_t AlignInBits;
1033     uint64_t OffsetInBits;
1034     DIType *DITy = nullptr;
1035 
1036     Type *Ty = FrameTy->getElementType(Index);
1037     assert(Ty->isSized() && "We can't handle type which is not sized.\n");
1038     SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedSize();
1039     AlignInBits = OffsetCache[Index].first * 8;
1040     OffsetInBits = OffsetCache[Index].second * 8;
1041 
1042     if (NameCache.find(Index) != NameCache.end()) {
1043       Name = NameCache[Index].str();
1044       DITy = TyCache[Index];
1045     } else {
1046       DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache);
1047       assert(DITy && "SolveDIType shouldn't return nullptr.\n");
1048       Name = DITy->getName().str();
1049       Name += "_" + std::to_string(UnknownTypeNum);
1050       UnknownTypeNum++;
1051     }
1052 
1053     Elements.push_back(DBuilder.createMemberType(
1054         FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits,
1055         llvm::DINode::FlagArtificial, DITy));
1056   }
1057 
1058   DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements));
1059 
1060   auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame",
1061                                                  DFile, LineNum, FrameDITy,
1062                                                  true, DINode::FlagArtificial);
1063   assert(FrameDIVar->isValidLocationForIntrinsic(PromiseDDI->getDebugLoc()));
1064 
1065   // Subprogram would have ContainedNodes field which records the debug
1066   // variables it contained. So we need to add __coro_frame to the
1067   // ContainedNodes of it.
1068   //
1069   // If we don't add __coro_frame to the RetainedNodes, user may get
1070   // `no symbol __coro_frame in context` rather than `__coro_frame`
1071   // is optimized out, which is more precise.
1072   if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) {
1073     auto RetainedNodes = SubProgram->getRetainedNodes();
1074     SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(),
1075                                                  RetainedNodes.end());
1076     RetainedNodesVec.push_back(FrameDIVar);
1077     SubProgram->replaceOperandWith(
1078         7, (MDTuple::get(F.getContext(), RetainedNodesVec)));
1079   }
1080 
1081   DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar,
1082                          DBuilder.createExpression(), DILoc,
1083                          Shape.getInsertPtAfterFramePtr());
1084 }
1085 
1086 // Build a struct that will keep state for an active coroutine.
1087 //   struct f.frame {
1088 //     ResumeFnTy ResumeFnAddr;
1089 //     ResumeFnTy DestroyFnAddr;
1090 //     int ResumeIndex;
1091 //     ... promise (if present) ...
1092 //     ... spills ...
1093 //   };
1094 static StructType *buildFrameType(Function &F, coro::Shape &Shape,
1095                                   FrameDataInfo &FrameData) {
1096   LLVMContext &C = F.getContext();
1097   const DataLayout &DL = F.getParent()->getDataLayout();
1098   StructType *FrameTy = [&] {
1099     SmallString<32> Name(F.getName());
1100     Name.append(".Frame");
1101     return StructType::create(C, Name);
1102   }();
1103 
1104   // We will use this value to cap the alignment of spilled values.
1105   Optional<Align> MaxFrameAlignment;
1106   if (Shape.ABI == coro::ABI::Async)
1107     MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment();
1108   FrameTypeBuilder B(C, DL, MaxFrameAlignment);
1109 
1110   AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
1111   Optional<FieldIDType> SwitchIndexFieldId;
1112 
1113   if (Shape.ABI == coro::ABI::Switch) {
1114     auto *FramePtrTy = FrameTy->getPointerTo();
1115     auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
1116                                    /*IsVarArg=*/false);
1117     auto *FnPtrTy = FnTy->getPointerTo();
1118 
1119     // Add header fields for the resume and destroy functions.
1120     // We can rely on these being perfectly packed.
1121     (void)B.addField(FnPtrTy, None, /*header*/ true);
1122     (void)B.addField(FnPtrTy, None, /*header*/ true);
1123 
1124     // PromiseAlloca field needs to be explicitly added here because it's
1125     // a header field with a fixed offset based on its alignment. Hence it
1126     // needs special handling and cannot be added to FrameData.Allocas.
1127     if (PromiseAlloca)
1128       FrameData.setFieldIndex(
1129           PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
1130 
1131     // Add a field to store the suspend index.  This doesn't need to
1132     // be in the header.
1133     unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
1134     Type *IndexType = Type::getIntNTy(C, IndexBits);
1135 
1136     SwitchIndexFieldId = B.addField(IndexType, None);
1137   } else {
1138     assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
1139   }
1140 
1141   // Because multiple allocas may own the same field slot,
1142   // we add allocas to field here.
1143   B.addFieldForAllocas(F, FrameData, Shape);
1144   // Add PromiseAlloca to Allocas list so that
1145   // 1. updateLayoutIndex could update its index after
1146   // `performOptimizedStructLayout`
1147   // 2. it is processed in insertSpills.
1148   if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
1149     // We assume that the promise alloca won't be modified before
1150     // CoroBegin and no alias will be create before CoroBegin.
1151     FrameData.Allocas.emplace_back(
1152         PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false);
1153   // Create an entry for every spilled value.
1154   for (auto &S : FrameData.Spills) {
1155     Type *FieldType = S.first->getType();
1156     // For byval arguments, we need to store the pointed value in the frame,
1157     // instead of the pointer itself.
1158     if (const Argument *A = dyn_cast<Argument>(S.first))
1159       if (A->hasByValAttr())
1160         FieldType = A->getParamByValType();
1161     FieldIDType Id =
1162         B.addField(FieldType, None, false /*header*/, true /*IsSpillOfValue*/);
1163     FrameData.setFieldIndex(S.first, Id);
1164   }
1165 
1166   B.finish(FrameTy);
1167   FrameData.updateLayoutIndex(B);
1168   Shape.FrameAlign = B.getStructAlign();
1169   Shape.FrameSize = B.getStructSize();
1170 
1171   switch (Shape.ABI) {
1172   case coro::ABI::Switch: {
1173     // In the switch ABI, remember the switch-index field.
1174     auto IndexField = B.getLayoutField(*SwitchIndexFieldId);
1175     Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex;
1176     Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value();
1177     Shape.SwitchLowering.IndexOffset = IndexField.Offset;
1178 
1179     // Also round the frame size up to a multiple of its alignment, as is
1180     // generally expected in C/C++.
1181     Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
1182     break;
1183   }
1184 
1185   // In the retcon ABI, remember whether the frame is inline in the storage.
1186   case coro::ABI::Retcon:
1187   case coro::ABI::RetconOnce: {
1188     auto Id = Shape.getRetconCoroId();
1189     Shape.RetconLowering.IsFrameInlineInStorage
1190       = (B.getStructSize() <= Id->getStorageSize() &&
1191          B.getStructAlign() <= Id->getStorageAlignment());
1192     break;
1193   }
1194   case coro::ABI::Async: {
1195     Shape.AsyncLowering.FrameOffset =
1196         alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign);
1197     // Also make the final context size a multiple of the context alignment to
1198     // make allocation easier for allocators.
1199     Shape.AsyncLowering.ContextSize =
1200         alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize,
1201                 Shape.AsyncLowering.getContextAlignment());
1202     if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
1203       report_fatal_error(
1204           "The alignment requirment of frame variables cannot be higher than "
1205           "the alignment of the async function context");
1206     }
1207     break;
1208   }
1209   }
1210 
1211   return FrameTy;
1212 }
1213 
1214 // We use a pointer use visitor to track how an alloca is being used.
1215 // The goal is to be able to answer the following three questions:
1216 // 1. Should this alloca be allocated on the frame instead.
1217 // 2. Could the content of the alloca be modified prior to CoroBegn, which would
1218 // require copying the data from alloca to the frame after CoroBegin.
1219 // 3. Is there any alias created for this alloca prior to CoroBegin, but used
1220 // after CoroBegin. In that case, we will need to recreate the alias after
1221 // CoroBegin based off the frame. To answer question 1, we track two things:
1222 //   a. List of all BasicBlocks that use this alloca or any of the aliases of
1223 //   the alloca. In the end, we check if there exists any two basic blocks that
1224 //   cross suspension points. If so, this alloca must be put on the frame. b.
1225 //   Whether the alloca or any alias of the alloca is escaped at some point,
1226 //   either by storing the address somewhere, or the address is used in a
1227 //   function call that might capture. If it's ever escaped, this alloca must be
1228 //   put on the frame conservatively.
1229 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
1230 // Whenever a potential write happens, either through a store instruction, a
1231 // function call or any of the memory intrinsics, we check whether this
1232 // instruction is prior to CoroBegin. To answer question 3, we track the offsets
1233 // of all aliases created for the alloca prior to CoroBegin but used after
1234 // CoroBegin. llvm::Optional is used to be able to represent the case when the
1235 // offset is unknown (e.g. when you have a PHINode that takes in different
1236 // offset values). We cannot handle unknown offsets and will assert. This is the
1237 // potential issue left out. An ideal solution would likely require a
1238 // significant redesign.
1239 namespace {
1240 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
1241   using Base = PtrUseVisitor<AllocaUseVisitor>;
1242   AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
1243                    const CoroBeginInst &CB, const SuspendCrossingInfo &Checker,
1244                    bool ShouldUseLifetimeStartInfo)
1245       : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker),
1246         ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {}
1247 
1248   void visit(Instruction &I) {
1249     Users.insert(&I);
1250     Base::visit(I);
1251     // If the pointer is escaped prior to CoroBegin, we have to assume it would
1252     // be written into before CoroBegin as well.
1253     if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
1254       MayWriteBeforeCoroBegin = true;
1255     }
1256   }
1257   // We need to provide this overload as PtrUseVisitor uses a pointer based
1258   // visiting function.
1259   void visit(Instruction *I) { return visit(*I); }
1260 
1261   void visitPHINode(PHINode &I) {
1262     enqueueUsers(I);
1263     handleAlias(I);
1264   }
1265 
1266   void visitSelectInst(SelectInst &I) {
1267     enqueueUsers(I);
1268     handleAlias(I);
1269   }
1270 
1271   void visitStoreInst(StoreInst &SI) {
1272     // Regardless whether the alias of the alloca is the value operand or the
1273     // pointer operand, we need to assume the alloca is been written.
1274     handleMayWrite(SI);
1275 
1276     if (SI.getValueOperand() != U->get())
1277       return;
1278 
1279     // We are storing the pointer into a memory location, potentially escaping.
1280     // As an optimization, we try to detect simple cases where it doesn't
1281     // actually escape, for example:
1282     //   %ptr = alloca ..
1283     //   %addr = alloca ..
1284     //   store %ptr, %addr
1285     //   %x = load %addr
1286     //   ..
1287     // If %addr is only used by loading from it, we could simply treat %x as
1288     // another alias of %ptr, and not considering %ptr being escaped.
1289     auto IsSimpleStoreThenLoad = [&]() {
1290       auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
1291       // If the memory location we are storing to is not an alloca, it
1292       // could be an alias of some other memory locations, which is difficult
1293       // to analyze.
1294       if (!AI)
1295         return false;
1296       // StoreAliases contains aliases of the memory location stored into.
1297       SmallVector<Instruction *, 4> StoreAliases = {AI};
1298       while (!StoreAliases.empty()) {
1299         Instruction *I = StoreAliases.pop_back_val();
1300         for (User *U : I->users()) {
1301           // If we are loading from the memory location, we are creating an
1302           // alias of the original pointer.
1303           if (auto *LI = dyn_cast<LoadInst>(U)) {
1304             enqueueUsers(*LI);
1305             handleAlias(*LI);
1306             continue;
1307           }
1308           // If we are overriding the memory location, the pointer certainly
1309           // won't escape.
1310           if (auto *S = dyn_cast<StoreInst>(U))
1311             if (S->getPointerOperand() == I)
1312               continue;
1313           if (auto *II = dyn_cast<IntrinsicInst>(U))
1314             if (II->isLifetimeStartOrEnd())
1315               continue;
1316           // BitCastInst creats aliases of the memory location being stored
1317           // into.
1318           if (auto *BI = dyn_cast<BitCastInst>(U)) {
1319             StoreAliases.push_back(BI);
1320             continue;
1321           }
1322           return false;
1323         }
1324       }
1325 
1326       return true;
1327     };
1328 
1329     if (!IsSimpleStoreThenLoad())
1330       PI.setEscaped(&SI);
1331   }
1332 
1333   // All mem intrinsics modify the data.
1334   void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
1335 
1336   void visitBitCastInst(BitCastInst &BC) {
1337     Base::visitBitCastInst(BC);
1338     handleAlias(BC);
1339   }
1340 
1341   void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
1342     Base::visitAddrSpaceCastInst(ASC);
1343     handleAlias(ASC);
1344   }
1345 
1346   void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
1347     // The base visitor will adjust Offset accordingly.
1348     Base::visitGetElementPtrInst(GEPI);
1349     handleAlias(GEPI);
1350   }
1351 
1352   void visitIntrinsicInst(IntrinsicInst &II) {
1353     // When we found the lifetime markers refers to a
1354     // subrange of the original alloca, ignore the lifetime
1355     // markers to avoid misleading the analysis.
1356     if (II.getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown ||
1357         !Offset.isZero())
1358       return Base::visitIntrinsicInst(II);
1359     LifetimeStarts.insert(&II);
1360   }
1361 
1362   void visitCallBase(CallBase &CB) {
1363     for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op)
1364       if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
1365         PI.setEscaped(&CB);
1366     handleMayWrite(CB);
1367   }
1368 
1369   bool getShouldLiveOnFrame() const {
1370     if (!ShouldLiveOnFrame)
1371       ShouldLiveOnFrame = computeShouldLiveOnFrame();
1372     return ShouldLiveOnFrame.getValue();
1373   }
1374 
1375   bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
1376 
1377   DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const {
1378     assert(getShouldLiveOnFrame() && "This method should only be called if the "
1379                                      "alloca needs to live on the frame.");
1380     for (const auto &P : AliasOffetMap)
1381       if (!P.second)
1382         report_fatal_error("Unable to handle an alias with unknown offset "
1383                            "created before CoroBegin.");
1384     return AliasOffetMap;
1385   }
1386 
1387 private:
1388   const DominatorTree &DT;
1389   const CoroBeginInst &CoroBegin;
1390   const SuspendCrossingInfo &Checker;
1391   // All alias to the original AllocaInst, created before CoroBegin and used
1392   // after CoroBegin. Each entry contains the instruction and the offset in the
1393   // original Alloca. They need to be recreated after CoroBegin off the frame.
1394   DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{};
1395   SmallPtrSet<Instruction *, 4> Users{};
1396   SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
1397   bool MayWriteBeforeCoroBegin{false};
1398   bool ShouldUseLifetimeStartInfo{true};
1399 
1400   mutable llvm::Optional<bool> ShouldLiveOnFrame{};
1401 
1402   bool computeShouldLiveOnFrame() const {
1403     // If lifetime information is available, we check it first since it's
1404     // more precise. We look at every pair of lifetime.start intrinsic and
1405     // every basic block that uses the pointer to see if they cross suspension
1406     // points. The uses cover both direct uses as well as indirect uses.
1407     if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
1408       for (auto *I : Users)
1409         for (auto *S : LifetimeStarts)
1410           if (Checker.isDefinitionAcrossSuspend(*S, I))
1411             return true;
1412       return false;
1413     }
1414     // FIXME: Ideally the isEscaped check should come at the beginning.
1415     // However there are a few loose ends that need to be fixed first before
1416     // we can do that. We need to make sure we are not over-conservative, so
1417     // that the data accessed in-between await_suspend and symmetric transfer
1418     // is always put on the stack, and also data accessed after coro.end is
1419     // always put on the stack (esp the return object). To fix that, we need
1420     // to:
1421     //  1) Potentially treat sret as nocapture in calls
1422     //  2) Special handle the return object and put it on the stack
1423     //  3) Utilize lifetime.end intrinsic
1424     if (PI.isEscaped())
1425       return true;
1426 
1427     for (auto *U1 : Users)
1428       for (auto *U2 : Users)
1429         if (Checker.isDefinitionAcrossSuspend(*U1, U2))
1430           return true;
1431 
1432     return false;
1433   }
1434 
1435   void handleMayWrite(const Instruction &I) {
1436     if (!DT.dominates(&CoroBegin, &I))
1437       MayWriteBeforeCoroBegin = true;
1438   }
1439 
1440   bool usedAfterCoroBegin(Instruction &I) {
1441     for (auto &U : I.uses())
1442       if (DT.dominates(&CoroBegin, U))
1443         return true;
1444     return false;
1445   }
1446 
1447   void handleAlias(Instruction &I) {
1448     // We track all aliases created prior to CoroBegin but used after.
1449     // These aliases may need to be recreated after CoroBegin if the alloca
1450     // need to live on the frame.
1451     if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
1452       return;
1453 
1454     if (!IsOffsetKnown) {
1455       AliasOffetMap[&I].reset();
1456     } else {
1457       auto Itr = AliasOffetMap.find(&I);
1458       if (Itr == AliasOffetMap.end()) {
1459         AliasOffetMap[&I] = Offset;
1460       } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) {
1461         // If we have seen two different possible values for this alias, we set
1462         // it to empty.
1463         AliasOffetMap[&I].reset();
1464       }
1465     }
1466   }
1467 };
1468 } // namespace
1469 
1470 // We need to make room to insert a spill after initial PHIs, but before
1471 // catchswitch instruction. Placing it before violates the requirement that
1472 // catchswitch, like all other EHPads must be the first nonPHI in a block.
1473 //
1474 // Split away catchswitch into a separate block and insert in its place:
1475 //
1476 //   cleanuppad <InsertPt> cleanupret.
1477 //
1478 // cleanupret instruction will act as an insert point for the spill.
1479 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
1480   BasicBlock *CurrentBlock = CatchSwitch->getParent();
1481   BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
1482   CurrentBlock->getTerminator()->eraseFromParent();
1483 
1484   auto *CleanupPad =
1485       CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
1486   auto *CleanupRet =
1487       CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
1488   return CleanupRet;
1489 }
1490 
1491 static void createFramePtr(coro::Shape &Shape) {
1492   auto *CB = Shape.CoroBegin;
1493   IRBuilder<> Builder(CB->getNextNode());
1494   StructType *FrameTy = Shape.FrameTy;
1495   PointerType *FramePtrTy = FrameTy->getPointerTo();
1496   Shape.FramePtr =
1497       cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
1498 }
1499 
1500 // Replace all alloca and SSA values that are accessed across suspend points
1501 // with GetElementPointer from coroutine frame + loads and stores. Create an
1502 // AllocaSpillBB that will become the new entry block for the resume parts of
1503 // the coroutine:
1504 //
1505 //    %hdl = coro.begin(...)
1506 //    whatever
1507 //
1508 // becomes:
1509 //
1510 //    %hdl = coro.begin(...)
1511 //    %FramePtr = bitcast i8* hdl to %f.frame*
1512 //    br label %AllocaSpillBB
1513 //
1514 //  AllocaSpillBB:
1515 //    ; geps corresponding to allocas that were moved to coroutine frame
1516 //    br label PostSpill
1517 //
1518 //  PostSpill:
1519 //    whatever
1520 //
1521 //
1522 static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) {
1523   auto *CB = Shape.CoroBegin;
1524   LLVMContext &C = CB->getContext();
1525   IRBuilder<> Builder(C);
1526   StructType *FrameTy = Shape.FrameTy;
1527   Value *FramePtr = Shape.FramePtr;
1528   DominatorTree DT(*CB->getFunction());
1529   SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
1530 
1531   // Create a GEP with the given index into the coroutine frame for the original
1532   // value Orig. Appends an extra 0 index for array-allocas, preserving the
1533   // original type.
1534   auto GetFramePointer = [&](Value *Orig) -> Value * {
1535     FieldIDType Index = FrameData.getFieldIndex(Orig);
1536     SmallVector<Value *, 3> Indices = {
1537         ConstantInt::get(Type::getInt32Ty(C), 0),
1538         ConstantInt::get(Type::getInt32Ty(C), Index),
1539     };
1540 
1541     if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
1542       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
1543         auto Count = CI->getValue().getZExtValue();
1544         if (Count > 1) {
1545           Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
1546         }
1547       } else {
1548         report_fatal_error("Coroutines cannot handle non static allocas yet");
1549       }
1550     }
1551 
1552     auto GEP = cast<GetElementPtrInst>(
1553         Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
1554     if (isa<AllocaInst>(Orig)) {
1555       // If the type of GEP is not equal to the type of AllocaInst, it implies
1556       // that the AllocaInst may be reused in the Frame slot of other
1557       // AllocaInst. So We cast GEP to the AllocaInst here to re-use
1558       // the Frame storage.
1559       //
1560       // Note: If we change the strategy dealing with alignment, we need to refine
1561       // this casting.
1562       if (GEP->getResultElementType() != Orig->getType())
1563         return Builder.CreateBitCast(GEP, Orig->getType(),
1564                                      Orig->getName() + Twine(".cast"));
1565     }
1566     return GEP;
1567   };
1568 
1569   for (auto const &E : FrameData.Spills) {
1570     Value *Def = E.first;
1571     auto SpillAlignment = Align(FrameData.getAlign(Def));
1572     // Create a store instruction storing the value into the
1573     // coroutine frame.
1574     Instruction *InsertPt = nullptr;
1575     Type *ByValTy = nullptr;
1576     if (auto *Arg = dyn_cast<Argument>(Def)) {
1577       // For arguments, we will place the store instruction right after
1578       // the coroutine frame pointer instruction, i.e. bitcast of
1579       // coro.begin from i8* to %f.frame*.
1580       InsertPt = Shape.getInsertPtAfterFramePtr();
1581 
1582       // If we're spilling an Argument, make sure we clear 'nocapture'
1583       // from the coroutine function.
1584       Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
1585 
1586       if (Arg->hasByValAttr())
1587         ByValTy = Arg->getParamByValType();
1588     } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
1589       // Don't spill immediately after a suspend; splitting assumes
1590       // that the suspend will be followed by a branch.
1591       InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI();
1592     } else {
1593       auto *I = cast<Instruction>(Def);
1594       if (!DT.dominates(CB, I)) {
1595         // If it is not dominated by CoroBegin, then spill should be
1596         // inserted immediately after CoroFrame is computed.
1597         InsertPt = Shape.getInsertPtAfterFramePtr();
1598       } else if (auto *II = dyn_cast<InvokeInst>(I)) {
1599         // If we are spilling the result of the invoke instruction, split
1600         // the normal edge and insert the spill in the new block.
1601         auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
1602         InsertPt = NewBB->getTerminator();
1603       } else if (isa<PHINode>(I)) {
1604         // Skip the PHINodes and EH pads instructions.
1605         BasicBlock *DefBlock = I->getParent();
1606         if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
1607           InsertPt = splitBeforeCatchSwitch(CSI);
1608         else
1609           InsertPt = &*DefBlock->getFirstInsertionPt();
1610       } else {
1611         assert(!I->isTerminator() && "unexpected terminator");
1612         // For all other values, the spill is placed immediately after
1613         // the definition.
1614         InsertPt = I->getNextNode();
1615       }
1616     }
1617 
1618     auto Index = FrameData.getFieldIndex(Def);
1619     Builder.SetInsertPoint(InsertPt);
1620     auto *G = Builder.CreateConstInBoundsGEP2_32(
1621         FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
1622     if (ByValTy) {
1623       // For byval arguments, we need to store the pointed value in the frame,
1624       // instead of the pointer itself.
1625       auto *Value = Builder.CreateLoad(ByValTy, Def);
1626       Builder.CreateAlignedStore(Value, G, SpillAlignment);
1627     } else {
1628       Builder.CreateAlignedStore(Def, G, SpillAlignment);
1629     }
1630 
1631     BasicBlock *CurrentBlock = nullptr;
1632     Value *CurrentReload = nullptr;
1633     for (auto *U : E.second) {
1634       // If we have not seen the use block, create a load instruction to reload
1635       // the spilled value from the coroutine frame. Populates the Value pointer
1636       // reference provided with the frame GEP.
1637       if (CurrentBlock != U->getParent()) {
1638         CurrentBlock = U->getParent();
1639         Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt());
1640 
1641         auto *GEP = GetFramePointer(E.first);
1642         GEP->setName(E.first->getName() + Twine(".reload.addr"));
1643         if (ByValTy)
1644           CurrentReload = GEP;
1645         else
1646           CurrentReload = Builder.CreateAlignedLoad(
1647               FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
1648               SpillAlignment, E.first->getName() + Twine(".reload"));
1649 
1650         TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def);
1651         for (DbgDeclareInst *DDI : DIs) {
1652           bool AllowUnresolved = false;
1653           // This dbg.declare is preserved for all coro-split function
1654           // fragments. It will be unreachable in the main function, and
1655           // processed by coro::salvageDebugInfo() by CoroCloner.
1656           DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
1657               .insertDeclare(CurrentReload, DDI->getVariable(),
1658                              DDI->getExpression(), DDI->getDebugLoc(),
1659                              &*Builder.GetInsertPoint());
1660           // This dbg.declare is for the main function entry point.  It
1661           // will be deleted in all coro-split functions.
1662           coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.OptimizeFrame);
1663         }
1664       }
1665 
1666       // Salvage debug info on any dbg.addr that we see. We do not insert them
1667       // into each block where we have a use though.
1668       if (auto *DI = dyn_cast<DbgAddrIntrinsic>(U)) {
1669         coro::salvageDebugInfo(DbgPtrAllocaCache, DI, Shape.OptimizeFrame);
1670       }
1671 
1672       // If we have a single edge PHINode, remove it and replace it with a
1673       // reload from the coroutine frame. (We already took care of multi edge
1674       // PHINodes by rewriting them in the rewritePHIs function).
1675       if (auto *PN = dyn_cast<PHINode>(U)) {
1676         assert(PN->getNumIncomingValues() == 1 &&
1677                "unexpected number of incoming "
1678                "values in the PHINode");
1679         PN->replaceAllUsesWith(CurrentReload);
1680         PN->eraseFromParent();
1681         continue;
1682       }
1683 
1684       // Replace all uses of CurrentValue in the current instruction with
1685       // reload.
1686       U->replaceUsesOfWith(Def, CurrentReload);
1687     }
1688   }
1689 
1690   BasicBlock *FramePtrBB = Shape.getInsertPtAfterFramePtr()->getParent();
1691 
1692   auto SpillBlock = FramePtrBB->splitBasicBlock(
1693       Shape.getInsertPtAfterFramePtr(), "AllocaSpillBB");
1694   SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
1695   Shape.AllocaSpillBlock = SpillBlock;
1696 
1697   // retcon and retcon.once lowering assumes all uses have been sunk.
1698   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
1699       Shape.ABI == coro::ABI::Async) {
1700     // If we found any allocas, replace all of their remaining uses with Geps.
1701     Builder.SetInsertPoint(&SpillBlock->front());
1702     for (const auto &P : FrameData.Allocas) {
1703       AllocaInst *Alloca = P.Alloca;
1704       auto *G = GetFramePointer(Alloca);
1705 
1706       // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
1707       // here, as we are changing location of the instruction.
1708       G->takeName(Alloca);
1709       Alloca->replaceAllUsesWith(G);
1710       Alloca->eraseFromParent();
1711     }
1712     return;
1713   }
1714 
1715   // If we found any alloca, replace all of their remaining uses with GEP
1716   // instructions. To remain debugbility, we replace the uses of allocas for
1717   // dbg.declares and dbg.values with the reload from the frame.
1718   // Note: We cannot replace the alloca with GEP instructions indiscriminately,
1719   // as some of the uses may not be dominated by CoroBegin.
1720   Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
1721   SmallVector<Instruction *, 4> UsersToUpdate;
1722   for (const auto &A : FrameData.Allocas) {
1723     AllocaInst *Alloca = A.Alloca;
1724     UsersToUpdate.clear();
1725     for (User *U : Alloca->users()) {
1726       auto *I = cast<Instruction>(U);
1727       if (DT.dominates(CB, I))
1728         UsersToUpdate.push_back(I);
1729     }
1730     if (UsersToUpdate.empty())
1731       continue;
1732     auto *G = GetFramePointer(Alloca);
1733     G->setName(Alloca->getName() + Twine(".reload.addr"));
1734 
1735     SmallVector<DbgVariableIntrinsic *, 4> DIs;
1736     findDbgUsers(DIs, Alloca);
1737     for (auto *DVI : DIs)
1738       DVI->replaceUsesOfWith(Alloca, G);
1739 
1740     for (Instruction *I : UsersToUpdate)
1741       I->replaceUsesOfWith(Alloca, G);
1742   }
1743   Builder.SetInsertPoint(Shape.getInsertPtAfterFramePtr());
1744   for (const auto &A : FrameData.Allocas) {
1745     AllocaInst *Alloca = A.Alloca;
1746     if (A.MayWriteBeforeCoroBegin) {
1747       // isEscaped really means potentially modified before CoroBegin.
1748       if (Alloca->isArrayAllocation())
1749         report_fatal_error(
1750             "Coroutines cannot handle copying of array allocas yet");
1751 
1752       auto *G = GetFramePointer(Alloca);
1753       auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
1754       Builder.CreateStore(Value, G);
1755     }
1756     // For each alias to Alloca created before CoroBegin but used after
1757     // CoroBegin, we recreate them after CoroBegin by appplying the offset
1758     // to the pointer in the frame.
1759     for (const auto &Alias : A.Aliases) {
1760       auto *FramePtr = GetFramePointer(Alloca);
1761       auto *FramePtrRaw =
1762           Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C));
1763       auto *AliasPtr = Builder.CreateGEP(
1764           Type::getInt8Ty(C), FramePtrRaw,
1765           ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue()));
1766       auto *AliasPtrTyped =
1767           Builder.CreateBitCast(AliasPtr, Alias.first->getType());
1768       Alias.first->replaceUsesWithIf(
1769           AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); });
1770     }
1771   }
1772 }
1773 
1774 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
1775 // PHI in InsertedBB.
1776 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB,
1777                                          BasicBlock *InsertedBB,
1778                                          BasicBlock *PredBB,
1779                                          PHINode *UntilPHI = nullptr) {
1780   auto *PN = cast<PHINode>(&SuccBB->front());
1781   do {
1782     int Index = PN->getBasicBlockIndex(InsertedBB);
1783     Value *V = PN->getIncomingValue(Index);
1784     PHINode *InputV = PHINode::Create(
1785         V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(),
1786         &InsertedBB->front());
1787     InputV->addIncoming(V, PredBB);
1788     PN->setIncomingValue(Index, InputV);
1789     PN = dyn_cast<PHINode>(PN->getNextNode());
1790   } while (PN != UntilPHI);
1791 }
1792 
1793 // Rewrites the PHI Nodes in a cleanuppad.
1794 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
1795                                      CleanupPadInst *CleanupPad) {
1796   // For every incoming edge to a CleanupPad we will create a new block holding
1797   // all incoming values in single-value PHI nodes. We will then create another
1798   // block to act as a dispather (as all unwind edges for related EH blocks
1799   // must be the same).
1800   //
1801   // cleanuppad:
1802   //    %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
1803   //    %3 = cleanuppad within none []
1804   //
1805   // It will create:
1806   //
1807   // cleanuppad.corodispatch
1808   //    %2 = phi i8[0, %catchswitch], [1, %catch.1]
1809   //    %3 = cleanuppad within none []
1810   //    switch i8 % 2, label %unreachable
1811   //            [i8 0, label %cleanuppad.from.catchswitch
1812   //             i8 1, label %cleanuppad.from.catch.1]
1813   // cleanuppad.from.catchswitch:
1814   //    %4 = phi i32 [%0, %catchswitch]
1815   //    br %label cleanuppad
1816   // cleanuppad.from.catch.1:
1817   //    %6 = phi i32 [%1, %catch.1]
1818   //    br %label cleanuppad
1819   // cleanuppad:
1820   //    %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
1821   //                 [%6, %cleanuppad.from.catch.1]
1822 
1823   // Unreachable BB, in case switching on an invalid value in the dispatcher.
1824   auto *UnreachBB = BasicBlock::Create(
1825       CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
1826   IRBuilder<> Builder(UnreachBB);
1827   Builder.CreateUnreachable();
1828 
1829   // Create a new cleanuppad which will be the dispatcher.
1830   auto *NewCleanupPadBB =
1831       BasicBlock::Create(CleanupPadBB->getContext(),
1832                          CleanupPadBB->getName() + Twine(".corodispatch"),
1833                          CleanupPadBB->getParent(), CleanupPadBB);
1834   Builder.SetInsertPoint(NewCleanupPadBB);
1835   auto *SwitchType = Builder.getInt8Ty();
1836   auto *SetDispatchValuePN =
1837       Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
1838   CleanupPad->removeFromParent();
1839   CleanupPad->insertAfter(SetDispatchValuePN);
1840   auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
1841                                                 pred_size(CleanupPadBB));
1842 
1843   int SwitchIndex = 0;
1844   SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
1845   for (BasicBlock *Pred : Preds) {
1846     // Create a new cleanuppad and move the PHI values to there.
1847     auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
1848                                       CleanupPadBB->getName() +
1849                                           Twine(".from.") + Pred->getName(),
1850                                       CleanupPadBB->getParent(), CleanupPadBB);
1851     updatePhiNodes(CleanupPadBB, Pred, CaseBB);
1852     CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
1853                     Pred->getName());
1854     Builder.SetInsertPoint(CaseBB);
1855     Builder.CreateBr(CleanupPadBB);
1856     movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
1857 
1858     // Update this Pred to the new unwind point.
1859     setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
1860 
1861     // Setup the switch in the dispatcher.
1862     auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
1863     SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
1864     SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
1865     SwitchIndex++;
1866   }
1867 }
1868 
1869 static void cleanupSinglePredPHIs(Function &F) {
1870   SmallVector<PHINode *, 32> Worklist;
1871   for (auto &BB : F) {
1872     for (auto &Phi : BB.phis()) {
1873       if (Phi.getNumIncomingValues() == 1) {
1874         Worklist.push_back(&Phi);
1875       } else
1876         break;
1877     }
1878   }
1879   while (!Worklist.empty()) {
1880     auto *Phi = Worklist.pop_back_val();
1881     auto *OriginalValue = Phi->getIncomingValue(0);
1882     Phi->replaceAllUsesWith(OriginalValue);
1883   }
1884 }
1885 
1886 static void rewritePHIs(BasicBlock &BB) {
1887   // For every incoming edge we will create a block holding all
1888   // incoming values in a single PHI nodes.
1889   //
1890   // loop:
1891   //    %n.val = phi i32[%n, %entry], [%inc, %loop]
1892   //
1893   // It will create:
1894   //
1895   // loop.from.entry:
1896   //    %n.loop.pre = phi i32 [%n, %entry]
1897   //    br %label loop
1898   // loop.from.loop:
1899   //    %inc.loop.pre = phi i32 [%inc, %loop]
1900   //    br %label loop
1901   //
1902   // After this rewrite, further analysis will ignore any phi nodes with more
1903   // than one incoming edge.
1904 
1905   // TODO: Simplify PHINodes in the basic block to remove duplicate
1906   // predecessors.
1907 
1908   // Special case for CleanupPad: all EH blocks must have the same unwind edge
1909   // so we need to create an additional "dispatcher" block.
1910   if (auto *CleanupPad =
1911           dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
1912     SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1913     for (BasicBlock *Pred : Preds) {
1914       if (CatchSwitchInst *CS =
1915               dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
1916         // CleanupPad with a CatchSwitch predecessor: therefore this is an
1917         // unwind destination that needs to be handle specially.
1918         assert(CS->getUnwindDest() == &BB);
1919         (void)CS;
1920         rewritePHIsForCleanupPad(&BB, CleanupPad);
1921         return;
1922       }
1923     }
1924   }
1925 
1926   LandingPadInst *LandingPad = nullptr;
1927   PHINode *ReplPHI = nullptr;
1928   if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
1929     // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
1930     // We replace the original landing pad with a PHINode that will collect the
1931     // results from all of them.
1932     ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
1933     ReplPHI->takeName(LandingPad);
1934     LandingPad->replaceAllUsesWith(ReplPHI);
1935     // We will erase the original landing pad at the end of this function after
1936     // ehAwareSplitEdge cloned it in the transition blocks.
1937   }
1938 
1939   SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
1940   for (BasicBlock *Pred : Preds) {
1941     auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
1942     IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
1943 
1944     // Stop the moving of values at ReplPHI, as this is either null or the PHI
1945     // that replaced the landing pad.
1946     movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
1947   }
1948 
1949   if (LandingPad) {
1950     // Calls to ehAwareSplitEdge function cloned the original lading pad.
1951     // No longer need it.
1952     LandingPad->eraseFromParent();
1953   }
1954 }
1955 
1956 static void rewritePHIs(Function &F) {
1957   SmallVector<BasicBlock *, 8> WorkList;
1958 
1959   for (BasicBlock &BB : F)
1960     if (auto *PN = dyn_cast<PHINode>(&BB.front()))
1961       if (PN->getNumIncomingValues() > 1)
1962         WorkList.push_back(&BB);
1963 
1964   for (BasicBlock *BB : WorkList)
1965     rewritePHIs(*BB);
1966 }
1967 
1968 // Check for instructions that we can recreate on resume as opposed to spill
1969 // the result into a coroutine frame.
1970 static bool materializable(Instruction &V) {
1971   return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
1972          isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
1973 }
1974 
1975 // Check for structural coroutine intrinsics that should not be spilled into
1976 // the coroutine frame.
1977 static bool isCoroutineStructureIntrinsic(Instruction &I) {
1978   return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
1979          isa<CoroSuspendInst>(&I);
1980 }
1981 
1982 // For every use of the value that is across suspend point, recreate that value
1983 // after a suspend point.
1984 static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
1985                                               const SpillInfo &Spills) {
1986   for (const auto &E : Spills) {
1987     Value *Def = E.first;
1988     BasicBlock *CurrentBlock = nullptr;
1989     Instruction *CurrentMaterialization = nullptr;
1990     for (Instruction *U : E.second) {
1991       // If we have not seen this block, materialize the value.
1992       if (CurrentBlock != U->getParent()) {
1993 
1994         bool IsInCoroSuspendBlock = isa<AnyCoroSuspendInst>(U);
1995         CurrentBlock = U->getParent();
1996         auto *InsertBlock = IsInCoroSuspendBlock
1997                                 ? CurrentBlock->getSinglePredecessor()
1998                                 : CurrentBlock;
1999         CurrentMaterialization = cast<Instruction>(Def)->clone();
2000         CurrentMaterialization->setName(Def->getName());
2001         CurrentMaterialization->insertBefore(
2002             IsInCoroSuspendBlock ? InsertBlock->getTerminator()
2003                                  : &*InsertBlock->getFirstInsertionPt());
2004       }
2005       if (auto *PN = dyn_cast<PHINode>(U)) {
2006         assert(PN->getNumIncomingValues() == 1 &&
2007                "unexpected number of incoming "
2008                "values in the PHINode");
2009         PN->replaceAllUsesWith(CurrentMaterialization);
2010         PN->eraseFromParent();
2011         continue;
2012       }
2013       // Replace all uses of Def in the current instruction with the
2014       // CurrentMaterialization for the block.
2015       U->replaceUsesOfWith(Def, CurrentMaterialization);
2016     }
2017   }
2018 }
2019 
2020 // Splits the block at a particular instruction unless it is the first
2021 // instruction in the block with a single predecessor.
2022 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
2023   auto *BB = I->getParent();
2024   if (&BB->front() == I) {
2025     if (BB->getSinglePredecessor()) {
2026       BB->setName(Name);
2027       return BB;
2028     }
2029   }
2030   return BB->splitBasicBlock(I, Name);
2031 }
2032 
2033 // Split above and below a particular instruction so that it
2034 // will be all alone by itself in a block.
2035 static void splitAround(Instruction *I, const Twine &Name) {
2036   splitBlockIfNotFirst(I, Name);
2037   splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
2038 }
2039 
2040 static bool isSuspendBlock(BasicBlock *BB) {
2041   return isa<AnyCoroSuspendInst>(BB->front());
2042 }
2043 
2044 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet;
2045 
2046 /// Does control flow starting at the given block ever reach a suspend
2047 /// instruction before reaching a block in VisitedOrFreeBBs?
2048 static bool isSuspendReachableFrom(BasicBlock *From,
2049                                    VisitedBlocksSet &VisitedOrFreeBBs) {
2050   // Eagerly try to add this block to the visited set.  If it's already
2051   // there, stop recursing; this path doesn't reach a suspend before
2052   // either looping or reaching a freeing block.
2053   if (!VisitedOrFreeBBs.insert(From).second)
2054     return false;
2055 
2056   // We assume that we'll already have split suspends into their own blocks.
2057   if (isSuspendBlock(From))
2058     return true;
2059 
2060   // Recurse on the successors.
2061   for (auto Succ : successors(From)) {
2062     if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
2063       return true;
2064   }
2065 
2066   return false;
2067 }
2068 
2069 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a
2070 /// suspend point?
2071 static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
2072   // Seed the visited set with all the basic blocks containing a free
2073   // so that we won't pass them up.
2074   VisitedBlocksSet VisitedOrFreeBBs;
2075   for (auto User : AI->users()) {
2076     if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
2077       VisitedOrFreeBBs.insert(FI->getParent());
2078   }
2079 
2080   return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
2081 }
2082 
2083 /// After we split the coroutine, will the given basic block be along
2084 /// an obvious exit path for the resumption function?
2085 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
2086                                               unsigned depth = 3) {
2087   // If we've bottomed out our depth count, stop searching and assume
2088   // that the path might loop back.
2089   if (depth == 0) return false;
2090 
2091   // If this is a suspend block, we're about to exit the resumption function.
2092   if (isSuspendBlock(BB)) return true;
2093 
2094   // Recurse into the successors.
2095   for (auto Succ : successors(BB)) {
2096     if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
2097       return false;
2098   }
2099 
2100   // If none of the successors leads back in a loop, we're on an exit/abort.
2101   return true;
2102 }
2103 
2104 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
2105   // Look for a free that isn't sufficiently obviously followed by
2106   // either a suspend or a termination, i.e. something that will leave
2107   // the coro resumption frame.
2108   for (auto U : AI->users()) {
2109     auto FI = dyn_cast<CoroAllocaFreeInst>(U);
2110     if (!FI) continue;
2111 
2112     if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
2113       return true;
2114   }
2115 
2116   // If we never found one, we don't need a stack save.
2117   return false;
2118 }
2119 
2120 /// Turn each of the given local allocas into a normal (dynamic) alloca
2121 /// instruction.
2122 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
2123                               SmallVectorImpl<Instruction*> &DeadInsts) {
2124   for (auto AI : LocalAllocas) {
2125     auto M = AI->getModule();
2126     IRBuilder<> Builder(AI);
2127 
2128     // Save the stack depth.  Try to avoid doing this if the stackrestore
2129     // is going to immediately precede a return or something.
2130     Value *StackSave = nullptr;
2131     if (localAllocaNeedsStackSave(AI))
2132       StackSave = Builder.CreateCall(
2133                             Intrinsic::getDeclaration(M, Intrinsic::stacksave));
2134 
2135     // Allocate memory.
2136     auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
2137     Alloca->setAlignment(Align(AI->getAlignment()));
2138 
2139     for (auto U : AI->users()) {
2140       // Replace gets with the allocation.
2141       if (isa<CoroAllocaGetInst>(U)) {
2142         U->replaceAllUsesWith(Alloca);
2143 
2144       // Replace frees with stackrestores.  This is safe because
2145       // alloca.alloc is required to obey a stack discipline, although we
2146       // don't enforce that structurally.
2147       } else {
2148         auto FI = cast<CoroAllocaFreeInst>(U);
2149         if (StackSave) {
2150           Builder.SetInsertPoint(FI);
2151           Builder.CreateCall(
2152                     Intrinsic::getDeclaration(M, Intrinsic::stackrestore),
2153                              StackSave);
2154         }
2155       }
2156       DeadInsts.push_back(cast<Instruction>(U));
2157     }
2158 
2159     DeadInsts.push_back(AI);
2160   }
2161 }
2162 
2163 /// Turn the given coro.alloca.alloc call into a dynamic allocation.
2164 /// This happens during the all-instructions iteration, so it must not
2165 /// delete the call.
2166 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI,
2167                                         coro::Shape &Shape,
2168                                    SmallVectorImpl<Instruction*> &DeadInsts) {
2169   IRBuilder<> Builder(AI);
2170   auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
2171 
2172   for (User *U : AI->users()) {
2173     if (isa<CoroAllocaGetInst>(U)) {
2174       U->replaceAllUsesWith(Alloc);
2175     } else {
2176       auto FI = cast<CoroAllocaFreeInst>(U);
2177       Builder.SetInsertPoint(FI);
2178       Shape.emitDealloc(Builder, Alloc, nullptr);
2179     }
2180     DeadInsts.push_back(cast<Instruction>(U));
2181   }
2182 
2183   // Push this on last so that it gets deleted after all the others.
2184   DeadInsts.push_back(AI);
2185 
2186   // Return the new allocation value so that we can check for needed spills.
2187   return cast<Instruction>(Alloc);
2188 }
2189 
2190 /// Get the current swifterror value.
2191 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
2192                                      coro::Shape &Shape) {
2193   // Make a fake function pointer as a sort of intrinsic.
2194   auto FnTy = FunctionType::get(ValueTy, {}, false);
2195   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2196 
2197   auto Call = Builder.CreateCall(FnTy, Fn, {});
2198   Shape.SwiftErrorOps.push_back(Call);
2199 
2200   return Call;
2201 }
2202 
2203 /// Set the given value as the current swifterror value.
2204 ///
2205 /// Returns a slot that can be used as a swifterror slot.
2206 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
2207                                      coro::Shape &Shape) {
2208   // Make a fake function pointer as a sort of intrinsic.
2209   auto FnTy = FunctionType::get(V->getType()->getPointerTo(),
2210                                 {V->getType()}, false);
2211   auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
2212 
2213   auto Call = Builder.CreateCall(FnTy, Fn, { V });
2214   Shape.SwiftErrorOps.push_back(Call);
2215 
2216   return Call;
2217 }
2218 
2219 /// Set the swifterror value from the given alloca before a call,
2220 /// then put in back in the alloca afterwards.
2221 ///
2222 /// Returns an address that will stand in for the swifterror slot
2223 /// until splitting.
2224 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
2225                                                  AllocaInst *Alloca,
2226                                                  coro::Shape &Shape) {
2227   auto ValueTy = Alloca->getAllocatedType();
2228   IRBuilder<> Builder(Call);
2229 
2230   // Load the current value from the alloca and set it as the
2231   // swifterror value.
2232   auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
2233   auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
2234 
2235   // Move to after the call.  Since swifterror only has a guaranteed
2236   // value on normal exits, we can ignore implicit and explicit unwind
2237   // edges.
2238   if (isa<CallInst>(Call)) {
2239     Builder.SetInsertPoint(Call->getNextNode());
2240   } else {
2241     auto Invoke = cast<InvokeInst>(Call);
2242     Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
2243   }
2244 
2245   // Get the current swifterror value and store it to the alloca.
2246   auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
2247   Builder.CreateStore(ValueAfterCall, Alloca);
2248 
2249   return Addr;
2250 }
2251 
2252 /// Eliminate a formerly-swifterror alloca by inserting the get/set
2253 /// intrinsics and attempting to MemToReg the alloca away.
2254 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
2255                                       coro::Shape &Shape) {
2256   for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) {
2257     // swifterror values can only be used in very specific ways.
2258     // We take advantage of that here.
2259     auto User = Use.getUser();
2260     if (isa<LoadInst>(User) || isa<StoreInst>(User))
2261       continue;
2262 
2263     assert(isa<CallInst>(User) || isa<InvokeInst>(User));
2264     auto Call = cast<Instruction>(User);
2265 
2266     auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
2267 
2268     // Use the returned slot address as the call argument.
2269     Use.set(Addr);
2270   }
2271 
2272   // All the uses should be loads and stores now.
2273   assert(isAllocaPromotable(Alloca));
2274 }
2275 
2276 /// "Eliminate" a swifterror argument by reducing it to the alloca case
2277 /// and then loading and storing in the prologue and epilog.
2278 ///
2279 /// The argument keeps the swifterror flag.
2280 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
2281                                         coro::Shape &Shape,
2282                              SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
2283   IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
2284 
2285   auto ArgTy = cast<PointerType>(Arg.getType());
2286   // swifterror arguments are required to have pointer-to-pointer type,
2287   // so create a pointer-typed alloca with opaque pointers.
2288   auto ValueTy = ArgTy->isOpaque() ? PointerType::getUnqual(F.getContext())
2289                                    : ArgTy->getNonOpaquePointerElementType();
2290 
2291   // Reduce to the alloca case:
2292 
2293   // Create an alloca and replace all uses of the arg with it.
2294   auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
2295   Arg.replaceAllUsesWith(Alloca);
2296 
2297   // Set an initial value in the alloca.  swifterror is always null on entry.
2298   auto InitialValue = Constant::getNullValue(ValueTy);
2299   Builder.CreateStore(InitialValue, Alloca);
2300 
2301   // Find all the suspends in the function and save and restore around them.
2302   for (auto Suspend : Shape.CoroSuspends) {
2303     (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
2304   }
2305 
2306   // Find all the coro.ends in the function and restore the error value.
2307   for (auto End : Shape.CoroEnds) {
2308     Builder.SetInsertPoint(End);
2309     auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
2310     (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
2311   }
2312 
2313   // Now we can use the alloca logic.
2314   AllocasToPromote.push_back(Alloca);
2315   eliminateSwiftErrorAlloca(F, Alloca, Shape);
2316 }
2317 
2318 /// Eliminate all problematic uses of swifterror arguments and allocas
2319 /// from the function.  We'll fix them up later when splitting the function.
2320 static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
2321   SmallVector<AllocaInst*, 4> AllocasToPromote;
2322 
2323   // Look for a swifterror argument.
2324   for (auto &Arg : F.args()) {
2325     if (!Arg.hasSwiftErrorAttr()) continue;
2326 
2327     eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
2328     break;
2329   }
2330 
2331   // Look for swifterror allocas.
2332   for (auto &Inst : F.getEntryBlock()) {
2333     auto Alloca = dyn_cast<AllocaInst>(&Inst);
2334     if (!Alloca || !Alloca->isSwiftError()) continue;
2335 
2336     // Clear the swifterror flag.
2337     Alloca->setSwiftError(false);
2338 
2339     AllocasToPromote.push_back(Alloca);
2340     eliminateSwiftErrorAlloca(F, Alloca, Shape);
2341   }
2342 
2343   // If we have any allocas to promote, compute a dominator tree and
2344   // promote them en masse.
2345   if (!AllocasToPromote.empty()) {
2346     DominatorTree DT(F);
2347     PromoteMemToReg(AllocasToPromote, DT);
2348   }
2349 }
2350 
2351 /// retcon and retcon.once conventions assume that all spill uses can be sunk
2352 /// after the coro.begin intrinsic.
2353 static void sinkSpillUsesAfterCoroBegin(Function &F,
2354                                         const FrameDataInfo &FrameData,
2355                                         CoroBeginInst *CoroBegin) {
2356   DominatorTree Dom(F);
2357 
2358   SmallSetVector<Instruction *, 32> ToMove;
2359   SmallVector<Instruction *, 32> Worklist;
2360 
2361   // Collect all users that precede coro.begin.
2362   for (auto *Def : FrameData.getAllDefs()) {
2363     for (User *U : Def->users()) {
2364       auto Inst = cast<Instruction>(U);
2365       if (Inst->getParent() != CoroBegin->getParent() ||
2366           Dom.dominates(CoroBegin, Inst))
2367         continue;
2368       if (ToMove.insert(Inst))
2369         Worklist.push_back(Inst);
2370     }
2371   }
2372   // Recursively collect users before coro.begin.
2373   while (!Worklist.empty()) {
2374     auto *Def = Worklist.pop_back_val();
2375     for (User *U : Def->users()) {
2376       auto Inst = cast<Instruction>(U);
2377       if (Dom.dominates(CoroBegin, Inst))
2378         continue;
2379       if (ToMove.insert(Inst))
2380         Worklist.push_back(Inst);
2381     }
2382   }
2383 
2384   // Sort by dominance.
2385   SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
2386   llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
2387     // If a dominates b it should preceed (<) b.
2388     return Dom.dominates(A, B);
2389   });
2390 
2391   Instruction *InsertPt = CoroBegin->getNextNode();
2392   for (Instruction *Inst : InsertionList)
2393     Inst->moveBefore(InsertPt);
2394 }
2395 
2396 /// For each local variable that all of its user are only used inside one of
2397 /// suspended region, we sink their lifetime.start markers to the place where
2398 /// after the suspend block. Doing so minimizes the lifetime of each variable,
2399 /// hence minimizing the amount of data we end up putting on the frame.
2400 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
2401                                      SuspendCrossingInfo &Checker) {
2402   DominatorTree DT(F);
2403 
2404   // Collect all possible basic blocks which may dominate all uses of allocas.
2405   SmallPtrSet<BasicBlock *, 4> DomSet;
2406   DomSet.insert(&F.getEntryBlock());
2407   for (auto *CSI : Shape.CoroSuspends) {
2408     BasicBlock *SuspendBlock = CSI->getParent();
2409     assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
2410            "should have split coro.suspend into its own block");
2411     DomSet.insert(SuspendBlock->getSingleSuccessor());
2412   }
2413 
2414   for (Instruction &I : instructions(F)) {
2415     AllocaInst* AI = dyn_cast<AllocaInst>(&I);
2416     if (!AI)
2417       continue;
2418 
2419     for (BasicBlock *DomBB : DomSet) {
2420       bool Valid = true;
2421       SmallVector<Instruction *, 1> Lifetimes;
2422 
2423       auto isLifetimeStart = [](Instruction* I) {
2424         if (auto* II = dyn_cast<IntrinsicInst>(I))
2425           return II->getIntrinsicID() == Intrinsic::lifetime_start;
2426         return false;
2427       };
2428 
2429       auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
2430         if (isLifetimeStart(U)) {
2431           Lifetimes.push_back(U);
2432           return true;
2433         }
2434         if (!U->hasOneUse() || U->stripPointerCasts() != AI)
2435           return false;
2436         if (isLifetimeStart(U->user_back())) {
2437           Lifetimes.push_back(U->user_back());
2438           return true;
2439         }
2440         return false;
2441       };
2442 
2443       for (User *U : AI->users()) {
2444         Instruction *UI = cast<Instruction>(U);
2445         // For all users except lifetime.start markers, if they are all
2446         // dominated by one of the basic blocks and do not cross
2447         // suspend points as well, then there is no need to spill the
2448         // instruction.
2449         if (!DT.dominates(DomBB, UI->getParent()) ||
2450             Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
2451           // Skip lifetime.start, GEP and bitcast used by lifetime.start
2452           // markers.
2453           if (collectLifetimeStart(UI, AI))
2454             continue;
2455           Valid = false;
2456           break;
2457         }
2458       }
2459       // Sink lifetime.start markers to dominate block when they are
2460       // only used outside the region.
2461       if (Valid && Lifetimes.size() != 0) {
2462         // May be AI itself, when the type of AI is i8*
2463         auto *NewBitCast = [&](AllocaInst *AI) -> Value* {
2464           if (isa<AllocaInst>(Lifetimes[0]->getOperand(1)))
2465             return AI;
2466           auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext());
2467           return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "",
2468                                   DomBB->getTerminator());
2469         }(AI);
2470 
2471         auto *NewLifetime = Lifetimes[0]->clone();
2472         NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast);
2473         NewLifetime->insertBefore(DomBB->getTerminator());
2474 
2475         // All the outsided lifetime.start markers are no longer necessary.
2476         for (Instruction *S : Lifetimes)
2477           S->eraseFromParent();
2478 
2479         break;
2480       }
2481     }
2482   }
2483 }
2484 
2485 static void collectFrameAllocas(Function &F, coro::Shape &Shape,
2486                                 const SuspendCrossingInfo &Checker,
2487                                 SmallVectorImpl<AllocaInfo> &Allocas) {
2488   for (Instruction &I : instructions(F)) {
2489     auto *AI = dyn_cast<AllocaInst>(&I);
2490     if (!AI)
2491       continue;
2492     // The PromiseAlloca will be specially handled since it needs to be in a
2493     // fixed position in the frame.
2494     if (AI == Shape.SwitchLowering.PromiseAlloca) {
2495       continue;
2496     }
2497     DominatorTree DT(F);
2498     // The code that uses lifetime.start intrinsic does not work for functions
2499     // with loops without exit. Disable it on ABIs we know to generate such
2500     // code.
2501     bool ShouldUseLifetimeStartInfo =
2502         (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2503          Shape.ABI != coro::ABI::RetconOnce);
2504     AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT,
2505                              *Shape.CoroBegin, Checker,
2506                              ShouldUseLifetimeStartInfo};
2507     Visitor.visitPtr(*AI);
2508     if (!Visitor.getShouldLiveOnFrame())
2509       continue;
2510     Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
2511                          Visitor.getMayWriteBeforeCoroBegin());
2512   }
2513 }
2514 
2515 void coro::salvageDebugInfo(
2516     SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
2517     DbgVariableIntrinsic *DVI, bool OptimizeFrame) {
2518   Function *F = DVI->getFunction();
2519   IRBuilder<> Builder(F->getContext());
2520   auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
2521   while (isa<IntrinsicInst>(InsertPt))
2522     ++InsertPt;
2523   Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
2524   DIExpression *Expr = DVI->getExpression();
2525   // Follow the pointer arithmetic all the way to the incoming
2526   // function argument and convert into a DIExpression.
2527   bool SkipOutermostLoad = !isa<DbgValueInst>(DVI);
2528   Value *Storage = DVI->getVariableLocationOp(0);
2529   Value *OriginalStorage = Storage;
2530   while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) {
2531     if (auto *LdInst = dyn_cast<LoadInst>(Inst)) {
2532       Storage = LdInst->getOperand(0);
2533       // FIXME: This is a heuristic that works around the fact that
2534       // LLVM IR debug intrinsics cannot yet distinguish between
2535       // memory and value locations: Because a dbg.declare(alloca) is
2536       // implicitly a memory location no DW_OP_deref operation for the
2537       // last direct load from an alloca is necessary.  This condition
2538       // effectively drops the *last* DW_OP_deref in the expression.
2539       if (!SkipOutermostLoad)
2540         Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2541     } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) {
2542       Storage = StInst->getOperand(0);
2543     } else {
2544       SmallVector<uint64_t, 16> Ops;
2545       SmallVector<Value *, 0> AdditionalValues;
2546       Value *Op = llvm::salvageDebugInfoImpl(
2547           *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops,
2548           AdditionalValues);
2549       if (!Op || !AdditionalValues.empty()) {
2550         // If salvaging failed or salvaging produced more than one location
2551         // operand, give up.
2552         break;
2553       }
2554       Storage = Op;
2555       Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false);
2556     }
2557     SkipOutermostLoad = false;
2558   }
2559   if (!Storage)
2560     return;
2561 
2562   // Store a pointer to the coroutine frame object in an alloca so it
2563   // is available throughout the function when producing unoptimized
2564   // code. Extending the lifetime this way is correct because the
2565   // variable has been declared by a dbg.declare intrinsic.
2566   //
2567   // Avoid to create the alloca would be eliminated by optimization
2568   // passes and the corresponding dbg.declares would be invalid.
2569   if (!OptimizeFrame && !EnableReuseStorageInFrame)
2570     if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) {
2571       auto &Cached = DbgPtrAllocaCache[Storage];
2572       if (!Cached) {
2573         Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
2574                                       Arg->getName() + ".debug");
2575         Builder.CreateStore(Storage, Cached);
2576       }
2577       Storage = Cached;
2578       // FIXME: LLVM lacks nuanced semantics to differentiate between
2579       // memory and direct locations at the IR level. The backend will
2580       // turn a dbg.declare(alloca, ..., DIExpression()) into a memory
2581       // location. Thus, if there are deref and offset operations in the
2582       // expression, we need to add a DW_OP_deref at the *start* of the
2583       // expression to first load the contents of the alloca before
2584       // adjusting it with the expression.
2585       if (Expr && Expr->isComplex())
2586         Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2587     }
2588 
2589   DVI->replaceVariableLocationOp(OriginalStorage, Storage);
2590   DVI->setExpression(Expr);
2591   // We only hoist dbg.declare today since it doesn't make sense to hoist
2592   // dbg.value or dbg.addr since they do not have the same function wide
2593   // guarantees that dbg.declare does.
2594   if (!isa<DbgValueInst>(DVI) && !isa<DbgAddrIntrinsic>(DVI)) {
2595     if (auto *II = dyn_cast<InvokeInst>(Storage))
2596       DVI->moveBefore(II->getNormalDest()->getFirstNonPHI());
2597     else if (auto *CBI = dyn_cast<CallBrInst>(Storage))
2598       DVI->moveBefore(CBI->getDefaultDest()->getFirstNonPHI());
2599     else if (auto *InsertPt = dyn_cast<Instruction>(Storage)) {
2600       assert(!InsertPt->isTerminator() &&
2601              "Unimaged terminator that could return a storage.");
2602       DVI->moveAfter(InsertPt);
2603     } else if (isa<Argument>(Storage))
2604       DVI->moveAfter(F->getEntryBlock().getFirstNonPHI());
2605   }
2606 }
2607 
2608 void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
2609   // Don't eliminate swifterror in async functions that won't be split.
2610   if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2611     eliminateSwiftError(F, Shape);
2612 
2613   if (Shape.ABI == coro::ABI::Switch &&
2614       Shape.SwitchLowering.PromiseAlloca) {
2615     Shape.getSwitchCoroId()->clearPromise();
2616   }
2617 
2618   // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
2619   // intrinsics are in their own blocks to simplify the logic of building up
2620   // SuspendCrossing data.
2621   for (auto *CSI : Shape.CoroSuspends) {
2622     if (auto *Save = CSI->getCoroSave())
2623       splitAround(Save, "CoroSave");
2624     splitAround(CSI, "CoroSuspend");
2625   }
2626 
2627   // Put CoroEnds into their own blocks.
2628   for (AnyCoroEndInst *CE : Shape.CoroEnds) {
2629     splitAround(CE, "CoroEnd");
2630 
2631     // Emit the musttail call function in a new block before the CoroEnd.
2632     // We do this here so that the right suspend crossing info is computed for
2633     // the uses of the musttail call function call. (Arguments to the coro.end
2634     // instructions would be ignored)
2635     if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
2636       auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
2637       if (!MustTailCallFn)
2638         continue;
2639       IRBuilder<> Builder(AsyncEnd);
2640       SmallVector<Value *, 8> Args(AsyncEnd->args());
2641       auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
2642       auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
2643                                       Arguments, Builder);
2644       splitAround(Call, "MustTailCall.Before.CoroEnd");
2645     }
2646   }
2647 
2648   // Later code makes structural assumptions about single predecessors phis e.g
2649   // that they are not live accross a suspend point.
2650   cleanupSinglePredPHIs(F);
2651 
2652   // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
2653   // never has its definition separated from the PHI by the suspend point.
2654   rewritePHIs(F);
2655 
2656   // Build suspend crossing info.
2657   SuspendCrossingInfo Checker(F, Shape);
2658 
2659   IRBuilder<> Builder(F.getContext());
2660   FrameDataInfo FrameData;
2661   SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas;
2662   SmallVector<Instruction*, 4> DeadInstructions;
2663 
2664   {
2665     SpillInfo Spills;
2666     for (int Repeat = 0; Repeat < 4; ++Repeat) {
2667       // See if there are materializable instructions across suspend points.
2668       for (Instruction &I : instructions(F))
2669         if (materializable(I)) {
2670           for (User *U : I.users())
2671             if (Checker.isDefinitionAcrossSuspend(I, U))
2672               Spills[&I].push_back(cast<Instruction>(U));
2673 
2674           // Manually add dbg.value metadata uses of I.
2675           SmallVector<DbgValueInst *, 16> DVIs;
2676           findDbgValues(DVIs, &I);
2677           for (auto *DVI : DVIs)
2678             if (Checker.isDefinitionAcrossSuspend(I, DVI))
2679               Spills[&I].push_back(DVI);
2680         }
2681 
2682       if (Spills.empty())
2683         break;
2684 
2685       // Rewrite materializable instructions to be materialized at the use
2686       // point.
2687       LLVM_DEBUG(dumpSpills("Materializations", Spills));
2688       rewriteMaterializableInstructions(Builder, Spills);
2689       Spills.clear();
2690     }
2691   }
2692 
2693   if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
2694       Shape.ABI != coro::ABI::RetconOnce)
2695     sinkLifetimeStartMarkers(F, Shape, Checker);
2696 
2697   if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty())
2698     collectFrameAllocas(F, Shape, Checker, FrameData.Allocas);
2699   LLVM_DEBUG(dumpAllocas(FrameData.Allocas));
2700 
2701   // Collect the spills for arguments and other not-materializable values.
2702   for (Argument &A : F.args())
2703     for (User *U : A.users())
2704       if (Checker.isDefinitionAcrossSuspend(A, U))
2705         FrameData.Spills[&A].push_back(cast<Instruction>(U));
2706 
2707   for (Instruction &I : instructions(F)) {
2708     // Values returned from coroutine structure intrinsics should not be part
2709     // of the Coroutine Frame.
2710     if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
2711       continue;
2712 
2713     // The Coroutine Promise always included into coroutine frame, no need to
2714     // check for suspend crossing.
2715     if (Shape.ABI == coro::ABI::Switch &&
2716         Shape.SwitchLowering.PromiseAlloca == &I)
2717       continue;
2718 
2719     // Handle alloca.alloc specially here.
2720     if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
2721       // Check whether the alloca's lifetime is bounded by suspend points.
2722       if (isLocalAlloca(AI)) {
2723         LocalAllocas.push_back(AI);
2724         continue;
2725       }
2726 
2727       // If not, do a quick rewrite of the alloca and then add spills of
2728       // the rewritten value.  The rewrite doesn't invalidate anything in
2729       // Spills because the other alloca intrinsics have no other operands
2730       // besides AI, and it doesn't invalidate the iteration because we delay
2731       // erasing AI.
2732       auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
2733 
2734       for (User *U : Alloc->users()) {
2735         if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
2736           FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
2737       }
2738       continue;
2739     }
2740 
2741     // Ignore alloca.get; we process this as part of coro.alloca.alloc.
2742     if (isa<CoroAllocaGetInst>(I))
2743       continue;
2744 
2745     if (isa<AllocaInst>(I))
2746       continue;
2747 
2748     for (User *U : I.users())
2749       if (Checker.isDefinitionAcrossSuspend(I, U)) {
2750         // We cannot spill a token.
2751         if (I.getType()->isTokenTy())
2752           report_fatal_error(
2753               "token definition is separated from the use by a suspend point");
2754         FrameData.Spills[&I].push_back(cast<Instruction>(U));
2755       }
2756   }
2757 
2758   // We don't want the layout of coroutine frame to be affected
2759   // by debug information. So we only choose to salvage DbgValueInst for
2760   // whose value is already in the frame.
2761   // We would handle the dbg.values for allocas specially
2762   for (auto &Iter : FrameData.Spills) {
2763     auto *V = Iter.first;
2764     SmallVector<DbgValueInst *, 16> DVIs;
2765     findDbgValues(DVIs, V);
2766     llvm::for_each(DVIs, [&](DbgValueInst *DVI) {
2767       if (Checker.isDefinitionAcrossSuspend(*V, DVI))
2768         FrameData.Spills[V].push_back(DVI);
2769     });
2770   }
2771 
2772   LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
2773   if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
2774       Shape.ABI == coro::ABI::Async)
2775     sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin);
2776   Shape.FrameTy = buildFrameType(F, Shape, FrameData);
2777   createFramePtr(Shape);
2778   // For now, this works for C++ programs only.
2779   buildFrameDebugInfo(F, Shape, FrameData);
2780   insertSpills(FrameData, Shape);
2781   lowerLocalAllocas(LocalAllocas, DeadInstructions);
2782 
2783   for (auto I : DeadInstructions)
2784     I->eraseFromParent();
2785 }
2786