1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 // This file contains classes used to discover if for a particular value
10 // there from sue to definition that crosses a suspend block.
11 //
12 // Using the information discovered we form a Coroutine Frame structure to
13 // contain those values. All uses of those values are replaced with appropriate
14 // GEP + load from the coroutine frame. At the point of the definition we spill
15 // the value into the coroutine frame.
16 //
17 // TODO: pack values tightly using liveness info.
18 //===----------------------------------------------------------------------===//
19 
20 #include "CoroInternal.h"
21 #include "llvm/ADT/BitVector.h"
22 #include "llvm/Transforms/Utils/Local.h"
23 #include "llvm/Config/llvm-config.h"
24 #include "llvm/IR/CFG.h"
25 #include "llvm/IR/Dominators.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/InstIterator.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/MathExtras.h"
30 #include "llvm/Support/circular_raw_ostream.h"
31 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
32 
33 using namespace llvm;
34 
35 // The "coro-suspend-crossing" flag is very noisy. There is another debug type,
36 // "coro-frame", which results in leaner debug spew.
37 #define DEBUG_TYPE "coro-suspend-crossing"
38 
39 enum { SmallVectorThreshold = 32 };
40 
41 // Provides two way mapping between the blocks and numbers.
42 namespace {
43 class BlockToIndexMapping {
44   SmallVector<BasicBlock *, SmallVectorThreshold> V;
45 
46 public:
size() const47   size_t size() const { return V.size(); }
48 
BlockToIndexMapping(Function & F)49   BlockToIndexMapping(Function &F) {
50     for (BasicBlock &BB : F)
51       V.push_back(&BB);
52     llvm::sort(V);
53   }
54 
blockToIndex(BasicBlock * BB) const55   size_t blockToIndex(BasicBlock *BB) const {
56     auto *I = std::lower_bound(V.begin(), V.end(), BB);
57     assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
58     return I - V.begin();
59   }
60 
indexToBlock(unsigned Index) const61   BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
62 };
63 } // end anonymous namespace
64 
65 // The SuspendCrossingInfo maintains data that allows to answer a question
66 // whether given two BasicBlocks A and B there is a path from A to B that
67 // passes through a suspend point.
68 //
69 // For every basic block 'i' it maintains a BlockData that consists of:
70 //   Consumes:  a bit vector which contains a set of indices of blocks that can
71 //              reach block 'i'
72 //   Kills: a bit vector which contains a set of indices of blocks that can
73 //          reach block 'i', but one of the path will cross a suspend point
74 //   Suspend: a boolean indicating whether block 'i' contains a suspend point.
75 //   End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
76 //
77 namespace {
78 struct SuspendCrossingInfo {
79   BlockToIndexMapping Mapping;
80 
81   struct BlockData {
82     BitVector Consumes;
83     BitVector Kills;
84     bool Suspend = false;
85     bool End = false;
86   };
87   SmallVector<BlockData, SmallVectorThreshold> Block;
88 
successors__anon264c736b0311::SuspendCrossingInfo89   iterator_range<succ_iterator> successors(BlockData const &BD) const {
90     BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
91     return llvm::successors(BB);
92   }
93 
getBlockData__anon264c736b0311::SuspendCrossingInfo94   BlockData &getBlockData(BasicBlock *BB) {
95     return Block[Mapping.blockToIndex(BB)];
96   }
97 
98   void dump() const;
99   void dump(StringRef Label, BitVector const &BV) const;
100 
101   SuspendCrossingInfo(Function &F, coro::Shape &Shape);
102 
hasPathCrossingSuspendPoint__anon264c736b0311::SuspendCrossingInfo103   bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
104     size_t const DefIndex = Mapping.blockToIndex(DefBB);
105     size_t const UseIndex = Mapping.blockToIndex(UseBB);
106 
107     assert(Block[UseIndex].Consumes[DefIndex] && "use must consume def");
108     bool const Result = Block[UseIndex].Kills[DefIndex];
109     LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
110                       << " answer is " << Result << "\n");
111     return Result;
112   }
113 
isDefinitionAcrossSuspend__anon264c736b0311::SuspendCrossingInfo114   bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
115     auto *I = cast<Instruction>(U);
116 
117     // We rewrote PHINodes, so that only the ones with exactly one incoming
118     // value need to be analyzed.
119     if (auto *PN = dyn_cast<PHINode>(I))
120       if (PN->getNumIncomingValues() > 1)
121         return false;
122 
123     BasicBlock *UseBB = I->getParent();
124     return hasPathCrossingSuspendPoint(DefBB, UseBB);
125   }
126 
isDefinitionAcrossSuspend__anon264c736b0311::SuspendCrossingInfo127   bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
128     return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
129   }
130 
isDefinitionAcrossSuspend__anon264c736b0311::SuspendCrossingInfo131   bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
132     return isDefinitionAcrossSuspend(I.getParent(), U);
133   }
134 };
135 } // end anonymous namespace
136 
137 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump(StringRef Label,BitVector const & BV) const138 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
139                                                 BitVector const &BV) const {
140   dbgs() << Label << ":";
141   for (size_t I = 0, N = BV.size(); I < N; ++I)
142     if (BV[I])
143       dbgs() << " " << Mapping.indexToBlock(I)->getName();
144   dbgs() << "\n";
145 }
146 
dump() const147 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
148   for (size_t I = 0, N = Block.size(); I < N; ++I) {
149     BasicBlock *const B = Mapping.indexToBlock(I);
150     dbgs() << B->getName() << ":\n";
151     dump("   Consumes", Block[I].Consumes);
152     dump("      Kills", Block[I].Kills);
153   }
154   dbgs() << "\n";
155 }
156 #endif
157 
SuspendCrossingInfo(Function & F,coro::Shape & Shape)158 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
159     : Mapping(F) {
160   const size_t N = Mapping.size();
161   Block.resize(N);
162 
163   // Initialize every block so that it consumes itself
164   for (size_t I = 0; I < N; ++I) {
165     auto &B = Block[I];
166     B.Consumes.resize(N);
167     B.Kills.resize(N);
168     B.Consumes.set(I);
169   }
170 
171   // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
172   // the code beyond coro.end is reachable during initial invocation of the
173   // coroutine.
174   for (auto *CE : Shape.CoroEnds)
175     getBlockData(CE->getParent()).End = true;
176 
177   // Mark all suspend blocks and indicate that they kill everything they
178   // consume. Note, that crossing coro.save also requires a spill, as any code
179   // between coro.save and coro.suspend may resume the coroutine and all of the
180   // state needs to be saved by that time.
181   auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
182     BasicBlock *SuspendBlock = BarrierInst->getParent();
183     auto &B = getBlockData(SuspendBlock);
184     B.Suspend = true;
185     B.Kills |= B.Consumes;
186   };
187   for (CoroSuspendInst *CSI : Shape.CoroSuspends) {
188     markSuspendBlock(CSI);
189     markSuspendBlock(CSI->getCoroSave());
190   }
191 
192   // Iterate propagating consumes and kills until they stop changing.
193   int Iteration = 0;
194   (void)Iteration;
195 
196   bool Changed;
197   do {
198     LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
199     LLVM_DEBUG(dbgs() << "==============\n");
200 
201     Changed = false;
202     for (size_t I = 0; I < N; ++I) {
203       auto &B = Block[I];
204       for (BasicBlock *SI : successors(B)) {
205 
206         auto SuccNo = Mapping.blockToIndex(SI);
207 
208         // Saved Consumes and Kills bitsets so that it is easy to see
209         // if anything changed after propagation.
210         auto &S = Block[SuccNo];
211         auto SavedConsumes = S.Consumes;
212         auto SavedKills = S.Kills;
213 
214         // Propagate Kills and Consumes from block B into its successor S.
215         S.Consumes |= B.Consumes;
216         S.Kills |= B.Kills;
217 
218         // If block B is a suspend block, it should propagate kills into the
219         // its successor for every block B consumes.
220         if (B.Suspend) {
221           S.Kills |= B.Consumes;
222         }
223         if (S.Suspend) {
224           // If block S is a suspend block, it should kill all of the blocks it
225           // consumes.
226           S.Kills |= S.Consumes;
227         } else if (S.End) {
228           // If block S is an end block, it should not propagate kills as the
229           // blocks following coro.end() are reached during initial invocation
230           // of the coroutine while all the data are still available on the
231           // stack or in the registers.
232           S.Kills.reset();
233         } else {
234           // This is reached when S block it not Suspend nor coro.end and it
235           // need to make sure that it is not in the kill set.
236           S.Kills.reset(SuccNo);
237         }
238 
239         // See if anything changed.
240         Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
241 
242         if (S.Kills != SavedKills) {
243           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
244                             << "\n");
245           LLVM_DEBUG(dump("S.Kills", S.Kills));
246           LLVM_DEBUG(dump("SavedKills", SavedKills));
247         }
248         if (S.Consumes != SavedConsumes) {
249           LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
250           LLVM_DEBUG(dump("S.Consume", S.Consumes));
251           LLVM_DEBUG(dump("SavedCons", SavedConsumes));
252         }
253       }
254     }
255   } while (Changed);
256   LLVM_DEBUG(dump());
257 }
258 
259 #undef DEBUG_TYPE // "coro-suspend-crossing"
260 #define DEBUG_TYPE "coro-frame"
261 
262 // We build up the list of spills for every case where a use is separated
263 // from the definition by a suspend point.
264 
265 namespace {
266 class Spill {
267   Value *Def = nullptr;
268   Instruction *User = nullptr;
269   unsigned FieldNo = 0;
270 
271 public:
Spill(Value * Def,llvm::User * U)272   Spill(Value *Def, llvm::User *U) : Def(Def), User(cast<Instruction>(U)) {}
273 
def() const274   Value *def() const { return Def; }
user() const275   Instruction *user() const { return User; }
userBlock() const276   BasicBlock *userBlock() const { return User->getParent(); }
277 
278   // Note that field index is stored in the first SpillEntry for a particular
279   // definition. Subsequent mentions of a defintion do not have fieldNo
280   // assigned. This works out fine as the users of Spills capture the info about
281   // the definition the first time they encounter it. Consider refactoring
282   // SpillInfo into two arrays to normalize the spill representation.
fieldIndex() const283   unsigned fieldIndex() const {
284     assert(FieldNo && "Accessing unassigned field");
285     return FieldNo;
286   }
setFieldIndex(unsigned FieldNumber)287   void setFieldIndex(unsigned FieldNumber) {
288     assert(!FieldNo && "Reassigning field number");
289     FieldNo = FieldNumber;
290   }
291 };
292 } // namespace
293 
294 // Note that there may be more than one record with the same value of Def in
295 // the SpillInfo vector.
296 using SpillInfo = SmallVector<Spill, 8>;
297 
298 #ifndef NDEBUG
dump(StringRef Title,SpillInfo const & Spills)299 static void dump(StringRef Title, SpillInfo const &Spills) {
300   dbgs() << "------------- " << Title << "--------------\n";
301   Value *CurrentValue = nullptr;
302   for (auto const &E : Spills) {
303     if (CurrentValue != E.def()) {
304       CurrentValue = E.def();
305       CurrentValue->dump();
306     }
307     dbgs() << "   user: ";
308     E.user()->dump();
309   }
310 }
311 #endif
312 
313 namespace {
314 // We cannot rely solely on natural alignment of a type when building a
315 // coroutine frame and if the alignment specified on the Alloca instruction
316 // differs from the natural alignment of the alloca type we will need to insert
317 // padding.
318 struct PaddingCalculator {
319   const DataLayout &DL;
320   LLVMContext &Context;
321   unsigned StructSize = 0;
322 
PaddingCalculator__anon264c736b0611::PaddingCalculator323   PaddingCalculator(LLVMContext &Context, DataLayout const &DL)
324       : DL(DL), Context(Context) {}
325 
326   // Replicate the logic from IR/DataLayout.cpp to match field offset
327   // computation for LLVM structs.
addType__anon264c736b0611::PaddingCalculator328   void addType(Type *Ty) {
329     unsigned TyAlign = DL.getABITypeAlignment(Ty);
330     if ((StructSize & (TyAlign - 1)) != 0)
331       StructSize = alignTo(StructSize, TyAlign);
332 
333     StructSize += DL.getTypeAllocSize(Ty); // Consume space for this data item.
334   }
335 
addTypes__anon264c736b0611::PaddingCalculator336   void addTypes(SmallVectorImpl<Type *> const &Types) {
337     for (auto *Ty : Types)
338       addType(Ty);
339   }
340 
computePadding__anon264c736b0611::PaddingCalculator341   unsigned computePadding(Type *Ty, unsigned ForcedAlignment) {
342     unsigned TyAlign = DL.getABITypeAlignment(Ty);
343     auto Natural = alignTo(StructSize, TyAlign);
344     auto Forced = alignTo(StructSize, ForcedAlignment);
345 
346     // Return how many bytes of padding we need to insert.
347     if (Natural != Forced)
348       return std::max(Natural, Forced) - StructSize;
349 
350     // Rely on natural alignment.
351     return 0;
352   }
353 
354   // If padding required, return the padding field type to insert.
getPaddingType__anon264c736b0611::PaddingCalculator355   ArrayType *getPaddingType(Type *Ty, unsigned ForcedAlignment) {
356     if (auto Padding = computePadding(Ty, ForcedAlignment))
357       return ArrayType::get(Type::getInt8Ty(Context), Padding);
358 
359     return nullptr;
360   }
361 };
362 } // namespace
363 
364 // Build a struct that will keep state for an active coroutine.
365 //   struct f.frame {
366 //     ResumeFnTy ResumeFnAddr;
367 //     ResumeFnTy DestroyFnAddr;
368 //     int ResumeIndex;
369 //     ... promise (if present) ...
370 //     ... spills ...
371 //   };
buildFrameType(Function & F,coro::Shape & Shape,SpillInfo & Spills)372 static StructType *buildFrameType(Function &F, coro::Shape &Shape,
373                                   SpillInfo &Spills) {
374   LLVMContext &C = F.getContext();
375   const DataLayout &DL = F.getParent()->getDataLayout();
376   PaddingCalculator Padder(C, DL);
377   SmallString<32> Name(F.getName());
378   Name.append(".Frame");
379   StructType *FrameTy = StructType::create(C, Name);
380   auto *FramePtrTy = FrameTy->getPointerTo();
381   auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
382                                  /*IsVarArgs=*/false);
383   auto *FnPtrTy = FnTy->getPointerTo();
384 
385   // Figure out how wide should be an integer type storing the suspend index.
386   unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
387   Type *PromiseType = Shape.PromiseAlloca
388                           ? Shape.PromiseAlloca->getType()->getElementType()
389                           : Type::getInt1Ty(C);
390   SmallVector<Type *, 8> Types{FnPtrTy, FnPtrTy, PromiseType,
391                                Type::getIntNTy(C, IndexBits)};
392   Value *CurrentDef = nullptr;
393 
394   Padder.addTypes(Types);
395 
396   // Create an entry for every spilled value.
397   for (auto &S : Spills) {
398     if (CurrentDef == S.def())
399       continue;
400 
401     CurrentDef = S.def();
402     // PromiseAlloca was already added to Types array earlier.
403     if (CurrentDef == Shape.PromiseAlloca)
404       continue;
405 
406     Type *Ty = nullptr;
407     if (auto *AI = dyn_cast<AllocaInst>(CurrentDef)) {
408       Ty = AI->getAllocatedType();
409       if (unsigned AllocaAlignment = AI->getAlignment()) {
410         // If alignment is specified in alloca, see if we need to insert extra
411         // padding.
412         if (auto PaddingTy = Padder.getPaddingType(Ty, AllocaAlignment)) {
413           Types.push_back(PaddingTy);
414           Padder.addType(PaddingTy);
415         }
416       }
417     } else {
418       Ty = CurrentDef->getType();
419     }
420     S.setFieldIndex(Types.size());
421     Types.push_back(Ty);
422     Padder.addType(Ty);
423   }
424   FrameTy->setBody(Types);
425 
426   return FrameTy;
427 }
428 
429 // We need to make room to insert a spill after initial PHIs, but before
430 // catchswitch instruction. Placing it before violates the requirement that
431 // catchswitch, like all other EHPads must be the first nonPHI in a block.
432 //
433 // Split away catchswitch into a separate block and insert in its place:
434 //
435 //   cleanuppad <InsertPt> cleanupret.
436 //
437 // cleanupret instruction will act as an insert point for the spill.
splitBeforeCatchSwitch(CatchSwitchInst * CatchSwitch)438 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
439   BasicBlock *CurrentBlock = CatchSwitch->getParent();
440   BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
441   CurrentBlock->getTerminator()->eraseFromParent();
442 
443   auto *CleanupPad =
444       CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
445   auto *CleanupRet =
446       CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
447   return CleanupRet;
448 }
449 
450 // Replace all alloca and SSA values that are accessed across suspend points
451 // with GetElementPointer from coroutine frame + loads and stores. Create an
452 // AllocaSpillBB that will become the new entry block for the resume parts of
453 // the coroutine:
454 //
455 //    %hdl = coro.begin(...)
456 //    whatever
457 //
458 // becomes:
459 //
460 //    %hdl = coro.begin(...)
461 //    %FramePtr = bitcast i8* hdl to %f.frame*
462 //    br label %AllocaSpillBB
463 //
464 //  AllocaSpillBB:
465 //    ; geps corresponding to allocas that were moved to coroutine frame
466 //    br label PostSpill
467 //
468 //  PostSpill:
469 //    whatever
470 //
471 //
insertSpills(SpillInfo & Spills,coro::Shape & Shape)472 static Instruction *insertSpills(SpillInfo &Spills, coro::Shape &Shape) {
473   auto *CB = Shape.CoroBegin;
474   IRBuilder<> Builder(CB->getNextNode());
475   PointerType *FramePtrTy = Shape.FrameTy->getPointerTo();
476   auto *FramePtr =
477       cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
478   Type *FrameTy = FramePtrTy->getElementType();
479 
480   Value *CurrentValue = nullptr;
481   BasicBlock *CurrentBlock = nullptr;
482   Value *CurrentReload = nullptr;
483   unsigned Index = 0; // Proper field number will be read from field definition.
484 
485   // We need to keep track of any allocas that need "spilling"
486   // since they will live in the coroutine frame now, all access to them
487   // need to be changed, not just the access across suspend points
488   // we remember allocas and their indices to be handled once we processed
489   // all the spills.
490   SmallVector<std::pair<AllocaInst *, unsigned>, 4> Allocas;
491   // Promise alloca (if present) has a fixed field number (Shape::PromiseField)
492   if (Shape.PromiseAlloca)
493     Allocas.emplace_back(Shape.PromiseAlloca, coro::Shape::PromiseField);
494 
495   // Create a load instruction to reload the spilled value from the coroutine
496   // frame.
497   auto CreateReload = [&](Instruction *InsertBefore) {
498     assert(Index && "accessing unassigned field number");
499     Builder.SetInsertPoint(InsertBefore);
500     auto *G = Builder.CreateConstInBoundsGEP2_32(FrameTy, FramePtr, 0, Index,
501                                                  CurrentValue->getName() +
502                                                      Twine(".reload.addr"));
503     return isa<AllocaInst>(CurrentValue)
504                ? G
505                : Builder.CreateLoad(G,
506                                     CurrentValue->getName() + Twine(".reload"));
507   };
508 
509   for (auto const &E : Spills) {
510     // If we have not seen the value, generate a spill.
511     if (CurrentValue != E.def()) {
512       CurrentValue = E.def();
513       CurrentBlock = nullptr;
514       CurrentReload = nullptr;
515 
516       Index = E.fieldIndex();
517 
518       if (auto *AI = dyn_cast<AllocaInst>(CurrentValue)) {
519         // Spilled AllocaInst will be replaced with GEP from the coroutine frame
520         // there is no spill required.
521         Allocas.emplace_back(AI, Index);
522         if (!AI->isStaticAlloca())
523           report_fatal_error("Coroutines cannot handle non static allocas yet");
524       } else {
525         // Otherwise, create a store instruction storing the value into the
526         // coroutine frame.
527 
528         Instruction *InsertPt = nullptr;
529         if (isa<Argument>(CurrentValue)) {
530           // For arguments, we will place the store instruction right after
531           // the coroutine frame pointer instruction, i.e. bitcast of
532           // coro.begin from i8* to %f.frame*.
533           InsertPt = FramePtr->getNextNode();
534         } else if (auto *II = dyn_cast<InvokeInst>(CurrentValue)) {
535           // If we are spilling the result of the invoke instruction, split the
536           // normal edge and insert the spill in the new block.
537           auto NewBB = SplitEdge(II->getParent(), II->getNormalDest());
538           InsertPt = NewBB->getTerminator();
539         } else if (dyn_cast<PHINode>(CurrentValue)) {
540           // Skip the PHINodes and EH pads instructions.
541           BasicBlock *DefBlock = cast<Instruction>(E.def())->getParent();
542           if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
543             InsertPt = splitBeforeCatchSwitch(CSI);
544           else
545             InsertPt = &*DefBlock->getFirstInsertionPt();
546         } else {
547           // For all other values, the spill is placed immediately after
548           // the definition.
549           assert(!cast<Instruction>(E.def())->isTerminator() &&
550                  "unexpected terminator");
551           InsertPt = cast<Instruction>(E.def())->getNextNode();
552         }
553 
554         Builder.SetInsertPoint(InsertPt);
555         auto *G = Builder.CreateConstInBoundsGEP2_32(
556             FrameTy, FramePtr, 0, Index,
557             CurrentValue->getName() + Twine(".spill.addr"));
558         Builder.CreateStore(CurrentValue, G);
559       }
560     }
561 
562     // If we have not seen the use block, generate a reload in it.
563     if (CurrentBlock != E.userBlock()) {
564       CurrentBlock = E.userBlock();
565       CurrentReload = CreateReload(&*CurrentBlock->getFirstInsertionPt());
566     }
567 
568     // If we have a single edge PHINode, remove it and replace it with a reload
569     // from the coroutine frame. (We already took care of multi edge PHINodes
570     // by rewriting them in the rewritePHIs function).
571     if (auto *PN = dyn_cast<PHINode>(E.user())) {
572       assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming "
573                                                 "values in the PHINode");
574       PN->replaceAllUsesWith(CurrentReload);
575       PN->eraseFromParent();
576       continue;
577     }
578 
579     // Replace all uses of CurrentValue in the current instruction with reload.
580     E.user()->replaceUsesOfWith(CurrentValue, CurrentReload);
581   }
582 
583   BasicBlock *FramePtrBB = FramePtr->getParent();
584   Shape.AllocaSpillBlock =
585       FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
586   Shape.AllocaSpillBlock->splitBasicBlock(&Shape.AllocaSpillBlock->front(),
587                                           "PostSpill");
588 
589   Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
590   // If we found any allocas, replace all of their remaining uses with Geps.
591   for (auto &P : Allocas) {
592     auto *G =
593         Builder.CreateConstInBoundsGEP2_32(FrameTy, FramePtr, 0, P.second);
594     // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G)) here,
595     // as we are changing location of the instruction.
596     G->takeName(P.first);
597     P.first->replaceAllUsesWith(G);
598     P.first->eraseFromParent();
599   }
600   return FramePtr;
601 }
602 
603 // Sets the unwind edge of an instruction to a particular successor.
setUnwindEdgeTo(Instruction * TI,BasicBlock * Succ)604 static void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ) {
605   if (auto *II = dyn_cast<InvokeInst>(TI))
606     II->setUnwindDest(Succ);
607   else if (auto *CS = dyn_cast<CatchSwitchInst>(TI))
608     CS->setUnwindDest(Succ);
609   else if (auto *CR = dyn_cast<CleanupReturnInst>(TI))
610     CR->setUnwindDest(Succ);
611   else
612     llvm_unreachable("unexpected terminator instruction");
613 }
614 
615 // Replaces all uses of OldPred with the NewPred block in all PHINodes in a
616 // block.
updatePhiNodes(BasicBlock * DestBB,BasicBlock * OldPred,BasicBlock * NewPred,PHINode * LandingPadReplacement)617 static void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred,
618                            BasicBlock *NewPred,
619                            PHINode *LandingPadReplacement) {
620   unsigned BBIdx = 0;
621   for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) {
622     PHINode *PN = cast<PHINode>(I);
623 
624     // We manually update the LandingPadReplacement PHINode and it is the last
625     // PHI Node. So, if we find it, we are done.
626     if (LandingPadReplacement == PN)
627       break;
628 
629     // Reuse the previous value of BBIdx if it lines up.  In cases where we
630     // have multiple phi nodes with *lots* of predecessors, this is a speed
631     // win because we don't have to scan the PHI looking for TIBB.  This
632     // happens because the BB list of PHI nodes are usually in the same
633     // order.
634     if (PN->getIncomingBlock(BBIdx) != OldPred)
635       BBIdx = PN->getBasicBlockIndex(OldPred);
636 
637     assert(BBIdx != (unsigned)-1 && "Invalid PHI Index!");
638     PN->setIncomingBlock(BBIdx, NewPred);
639   }
640 }
641 
642 // Uses SplitEdge unless the successor block is an EHPad, in which case do EH
643 // specific handling.
ehAwareSplitEdge(BasicBlock * BB,BasicBlock * Succ,LandingPadInst * OriginalPad,PHINode * LandingPadReplacement)644 static BasicBlock *ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ,
645                                     LandingPadInst *OriginalPad,
646                                     PHINode *LandingPadReplacement) {
647   auto *PadInst = Succ->getFirstNonPHI();
648   if (!LandingPadReplacement && !PadInst->isEHPad())
649     return SplitEdge(BB, Succ);
650 
651   auto *NewBB = BasicBlock::Create(BB->getContext(), "", BB->getParent(), Succ);
652   setUnwindEdgeTo(BB->getTerminator(), NewBB);
653   updatePhiNodes(Succ, BB, NewBB, LandingPadReplacement);
654 
655   if (LandingPadReplacement) {
656     auto *NewLP = OriginalPad->clone();
657     auto *Terminator = BranchInst::Create(Succ, NewBB);
658     NewLP->insertBefore(Terminator);
659     LandingPadReplacement->addIncoming(NewLP, NewBB);
660     return NewBB;
661   }
662   Value *ParentPad = nullptr;
663   if (auto *FuncletPad = dyn_cast<FuncletPadInst>(PadInst))
664     ParentPad = FuncletPad->getParentPad();
665   else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(PadInst))
666     ParentPad = CatchSwitch->getParentPad();
667   else
668     llvm_unreachable("handling for other EHPads not implemented yet");
669 
670   auto *NewCleanupPad = CleanupPadInst::Create(ParentPad, {}, "", NewBB);
671   CleanupReturnInst::Create(NewCleanupPad, Succ, NewBB);
672   return NewBB;
673 }
674 
rewritePHIs(BasicBlock & BB)675 static void rewritePHIs(BasicBlock &BB) {
676   // For every incoming edge we will create a block holding all
677   // incoming values in a single PHI nodes.
678   //
679   // loop:
680   //    %n.val = phi i32[%n, %entry], [%inc, %loop]
681   //
682   // It will create:
683   //
684   // loop.from.entry:
685   //    %n.loop.pre = phi i32 [%n, %entry]
686   //    br %label loop
687   // loop.from.loop:
688   //    %inc.loop.pre = phi i32 [%inc, %loop]
689   //    br %label loop
690   //
691   // After this rewrite, further analysis will ignore any phi nodes with more
692   // than one incoming edge.
693 
694   // TODO: Simplify PHINodes in the basic block to remove duplicate
695   // predecessors.
696 
697   LandingPadInst *LandingPad = nullptr;
698   PHINode *ReplPHI = nullptr;
699   if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
700     // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
701     // We replace the original landing pad with a PHINode that will collect the
702     // results from all of them.
703     ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
704     ReplPHI->takeName(LandingPad);
705     LandingPad->replaceAllUsesWith(ReplPHI);
706     // We will erase the original landing pad at the end of this function after
707     // ehAwareSplitEdge cloned it in the transition blocks.
708   }
709 
710   SmallVector<BasicBlock *, 8> Preds(pred_begin(&BB), pred_end(&BB));
711   for (BasicBlock *Pred : Preds) {
712     auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
713     IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
714     auto *PN = cast<PHINode>(&BB.front());
715     do {
716       int Index = PN->getBasicBlockIndex(IncomingBB);
717       Value *V = PN->getIncomingValue(Index);
718       PHINode *InputV = PHINode::Create(
719           V->getType(), 1, V->getName() + Twine(".") + BB.getName(),
720           &IncomingBB->front());
721       InputV->addIncoming(V, Pred);
722       PN->setIncomingValue(Index, InputV);
723       PN = dyn_cast<PHINode>(PN->getNextNode());
724     } while (PN != ReplPHI); // ReplPHI is either null or the PHI that replaced
725                              // the landing pad.
726   }
727 
728   if (LandingPad) {
729     // Calls to ehAwareSplitEdge function cloned the original lading pad.
730     // No longer need it.
731     LandingPad->eraseFromParent();
732   }
733 }
734 
rewritePHIs(Function & F)735 static void rewritePHIs(Function &F) {
736   SmallVector<BasicBlock *, 8> WorkList;
737 
738   for (BasicBlock &BB : F)
739     if (auto *PN = dyn_cast<PHINode>(&BB.front()))
740       if (PN->getNumIncomingValues() > 1)
741         WorkList.push_back(&BB);
742 
743   for (BasicBlock *BB : WorkList)
744     rewritePHIs(*BB);
745 }
746 
747 // Check for instructions that we can recreate on resume as opposed to spill
748 // the result into a coroutine frame.
materializable(Instruction & V)749 static bool materializable(Instruction &V) {
750   return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
751          isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
752 }
753 
754 // Check for structural coroutine intrinsics that should not be spilled into
755 // the coroutine frame.
isCoroutineStructureIntrinsic(Instruction & I)756 static bool isCoroutineStructureIntrinsic(Instruction &I) {
757   return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
758          isa<CoroSuspendInst>(&I);
759 }
760 
761 // For every use of the value that is across suspend point, recreate that value
762 // after a suspend point.
rewriteMaterializableInstructions(IRBuilder<> & IRB,SpillInfo const & Spills)763 static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
764                                               SpillInfo const &Spills) {
765   BasicBlock *CurrentBlock = nullptr;
766   Instruction *CurrentMaterialization = nullptr;
767   Instruction *CurrentDef = nullptr;
768 
769   for (auto const &E : Spills) {
770     // If it is a new definition, update CurrentXXX variables.
771     if (CurrentDef != E.def()) {
772       CurrentDef = cast<Instruction>(E.def());
773       CurrentBlock = nullptr;
774       CurrentMaterialization = nullptr;
775     }
776 
777     // If we have not seen this block, materialize the value.
778     if (CurrentBlock != E.userBlock()) {
779       CurrentBlock = E.userBlock();
780       CurrentMaterialization = cast<Instruction>(CurrentDef)->clone();
781       CurrentMaterialization->setName(CurrentDef->getName());
782       CurrentMaterialization->insertBefore(
783           &*CurrentBlock->getFirstInsertionPt());
784     }
785 
786     if (auto *PN = dyn_cast<PHINode>(E.user())) {
787       assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming "
788                                                 "values in the PHINode");
789       PN->replaceAllUsesWith(CurrentMaterialization);
790       PN->eraseFromParent();
791       continue;
792     }
793 
794     // Replace all uses of CurrentDef in the current instruction with the
795     // CurrentMaterialization for the block.
796     E.user()->replaceUsesOfWith(CurrentDef, CurrentMaterialization);
797   }
798 }
799 
800 // Move early uses of spilled variable after CoroBegin.
801 // For example, if a parameter had address taken, we may end up with the code
802 // like:
803 //        define @f(i32 %n) {
804 //          %n.addr = alloca i32
805 //          store %n, %n.addr
806 //          ...
807 //          call @coro.begin
808 //    we need to move the store after coro.begin
moveSpillUsesAfterCoroBegin(Function & F,SpillInfo const & Spills,CoroBeginInst * CoroBegin)809 static void moveSpillUsesAfterCoroBegin(Function &F, SpillInfo const &Spills,
810                                         CoroBeginInst *CoroBegin) {
811   DominatorTree DT(F);
812   SmallVector<Instruction *, 8> NeedsMoving;
813 
814   Value *CurrentValue = nullptr;
815 
816   for (auto const &E : Spills) {
817     if (CurrentValue == E.def())
818       continue;
819 
820     CurrentValue = E.def();
821 
822     for (User *U : CurrentValue->users()) {
823       Instruction *I = cast<Instruction>(U);
824       if (!DT.dominates(CoroBegin, I)) {
825         LLVM_DEBUG(dbgs() << "will move: " << *I << "\n");
826 
827         // TODO: Make this more robust. Currently if we run into a situation
828         // where simple instruction move won't work we panic and
829         // report_fatal_error.
830         for (User *UI : I->users()) {
831           if (!DT.dominates(CoroBegin, cast<Instruction>(UI)))
832             report_fatal_error("cannot move instruction since its users are not"
833                                " dominated by CoroBegin");
834         }
835 
836         NeedsMoving.push_back(I);
837       }
838     }
839   }
840 
841   Instruction *InsertPt = CoroBegin->getNextNode();
842   for (Instruction *I : NeedsMoving)
843     I->moveBefore(InsertPt);
844 }
845 
846 // Splits the block at a particular instruction unless it is the first
847 // instruction in the block with a single predecessor.
splitBlockIfNotFirst(Instruction * I,const Twine & Name)848 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
849   auto *BB = I->getParent();
850   if (&BB->front() == I) {
851     if (BB->getSinglePredecessor()) {
852       BB->setName(Name);
853       return BB;
854     }
855   }
856   return BB->splitBasicBlock(I, Name);
857 }
858 
859 // Split above and below a particular instruction so that it
860 // will be all alone by itself in a block.
splitAround(Instruction * I,const Twine & Name)861 static void splitAround(Instruction *I, const Twine &Name) {
862   splitBlockIfNotFirst(I, Name);
863   splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
864 }
865 
buildCoroutineFrame(Function & F,Shape & Shape)866 void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
867   // Lower coro.dbg.declare to coro.dbg.value, since we are going to rewrite
868   // access to local variables.
869   LowerDbgDeclare(F);
870 
871   Shape.PromiseAlloca = Shape.CoroBegin->getId()->getPromise();
872   if (Shape.PromiseAlloca) {
873     Shape.CoroBegin->getId()->clearPromise();
874   }
875 
876   // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
877   // intrinsics are in their own blocks to simplify the logic of building up
878   // SuspendCrossing data.
879   for (CoroSuspendInst *CSI : Shape.CoroSuspends) {
880     splitAround(CSI->getCoroSave(), "CoroSave");
881     splitAround(CSI, "CoroSuspend");
882   }
883 
884   // Put CoroEnds into their own blocks.
885   for (CoroEndInst *CE : Shape.CoroEnds)
886     splitAround(CE, "CoroEnd");
887 
888   // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
889   // never has its definition separated from the PHI by the suspend point.
890   rewritePHIs(F);
891 
892   // Build suspend crossing info.
893   SuspendCrossingInfo Checker(F, Shape);
894 
895   IRBuilder<> Builder(F.getContext());
896   SpillInfo Spills;
897 
898   for (int Repeat = 0; Repeat < 4; ++Repeat) {
899     // See if there are materializable instructions across suspend points.
900     for (Instruction &I : instructions(F))
901       if (materializable(I))
902         for (User *U : I.users())
903           if (Checker.isDefinitionAcrossSuspend(I, U))
904             Spills.emplace_back(&I, U);
905 
906     if (Spills.empty())
907       break;
908 
909     // Rewrite materializable instructions to be materialized at the use point.
910     LLVM_DEBUG(dump("Materializations", Spills));
911     rewriteMaterializableInstructions(Builder, Spills);
912     Spills.clear();
913   }
914 
915   // Collect the spills for arguments and other not-materializable values.
916   for (Argument &A : F.args())
917     for (User *U : A.users())
918       if (Checker.isDefinitionAcrossSuspend(A, U))
919         Spills.emplace_back(&A, U);
920 
921   for (Instruction &I : instructions(F)) {
922     // Values returned from coroutine structure intrinsics should not be part
923     // of the Coroutine Frame.
924     if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
925       continue;
926     // The Coroutine Promise always included into coroutine frame, no need to
927     // check for suspend crossing.
928     if (Shape.PromiseAlloca == &I)
929       continue;
930 
931     for (User *U : I.users())
932       if (Checker.isDefinitionAcrossSuspend(I, U)) {
933         // We cannot spill a token.
934         if (I.getType()->isTokenTy())
935           report_fatal_error(
936               "token definition is separated from the use by a suspend point");
937         Spills.emplace_back(&I, U);
938       }
939   }
940   LLVM_DEBUG(dump("Spills", Spills));
941   moveSpillUsesAfterCoroBegin(F, Spills, Shape.CoroBegin);
942   Shape.FrameTy = buildFrameType(F, Shape, Spills);
943   Shape.FramePtr = insertSpills(Spills, Shape);
944 }
945