1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This file contains classes used to discover if for a particular value 10 // there from sue to definition that crosses a suspend block. 11 // 12 // Using the information discovered we form a Coroutine Frame structure to 13 // contain those values. All uses of those values are replaced with appropriate 14 // GEP + load from the coroutine frame. At the point of the definition we spill 15 // the value into the coroutine frame. 16 // 17 // TODO: pack values tightly using liveness info. 18 //===----------------------------------------------------------------------===// 19 20 #include "CoroInternal.h" 21 #include "llvm/ADT/BitVector.h" 22 #include "llvm/IR/CFG.h" 23 #include "llvm/IR/Dominators.h" 24 #include "llvm/IR/IRBuilder.h" 25 #include "llvm/IR/InstIterator.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/MathExtras.h" 28 #include "llvm/Support/circular_raw_ostream.h" 29 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 30 #include "llvm/Transforms/Utils/Local.h" 31 32 using namespace llvm; 33 34 // The "coro-suspend-crossing" flag is very noisy. There is another debug type, 35 // "coro-frame", which results in leaner debug spew. 36 #define DEBUG_TYPE "coro-suspend-crossing" 37 38 enum { SmallVectorThreshold = 32 }; 39 40 // Provides two way mapping between the blocks and numbers. 41 namespace { 42 class BlockToIndexMapping { 43 SmallVector<BasicBlock *, SmallVectorThreshold> V; 44 45 public: 46 size_t size() const { return V.size(); } 47 48 BlockToIndexMapping(Function &F) { 49 for (BasicBlock &BB : F) 50 V.push_back(&BB); 51 std::sort(V.begin(), V.end()); 52 } 53 54 size_t blockToIndex(BasicBlock *BB) const { 55 auto *I = std::lower_bound(V.begin(), V.end(), BB); 56 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block"); 57 return I - V.begin(); 58 } 59 60 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; } 61 }; 62 } // end anonymous namespace 63 64 // The SuspendCrossingInfo maintains data that allows to answer a question 65 // whether given two BasicBlocks A and B there is a path from A to B that 66 // passes through a suspend point. 67 // 68 // For every basic block 'i' it maintains a BlockData that consists of: 69 // Consumes: a bit vector which contains a set of indices of blocks that can 70 // reach block 'i' 71 // Kills: a bit vector which contains a set of indices of blocks that can 72 // reach block 'i', but one of the path will cross a suspend point 73 // Suspend: a boolean indicating whether block 'i' contains a suspend point. 74 // End: a boolean indicating whether block 'i' contains a coro.end intrinsic. 75 // 76 namespace { 77 struct SuspendCrossingInfo { 78 BlockToIndexMapping Mapping; 79 80 struct BlockData { 81 BitVector Consumes; 82 BitVector Kills; 83 bool Suspend = false; 84 bool End = false; 85 }; 86 SmallVector<BlockData, SmallVectorThreshold> Block; 87 88 iterator_range<succ_iterator> successors(BlockData const &BD) const { 89 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]); 90 return llvm::successors(BB); 91 } 92 93 BlockData &getBlockData(BasicBlock *BB) { 94 return Block[Mapping.blockToIndex(BB)]; 95 } 96 97 void dump() const; 98 void dump(StringRef Label, BitVector const &BV) const; 99 100 SuspendCrossingInfo(Function &F, coro::Shape &Shape); 101 102 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const { 103 size_t const DefIndex = Mapping.blockToIndex(DefBB); 104 size_t const UseIndex = Mapping.blockToIndex(UseBB); 105 106 assert(Block[UseIndex].Consumes[DefIndex] && "use must consume def"); 107 bool const Result = Block[UseIndex].Kills[DefIndex]; 108 DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName() 109 << " answer is " << Result << "\n"); 110 return Result; 111 } 112 113 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const { 114 auto *I = cast<Instruction>(U); 115 116 // We rewrote PHINodes, so that only the ones with exactly one incoming 117 // value need to be analyzed. 118 if (auto *PN = dyn_cast<PHINode>(I)) 119 if (PN->getNumIncomingValues() > 1) 120 return false; 121 122 BasicBlock *UseBB = I->getParent(); 123 return hasPathCrossingSuspendPoint(DefBB, UseBB); 124 } 125 126 bool isDefinitionAcrossSuspend(Argument &A, User *U) const { 127 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U); 128 } 129 130 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const { 131 return isDefinitionAcrossSuspend(I.getParent(), U); 132 } 133 }; 134 } // end anonymous namespace 135 136 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 137 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label, 138 BitVector const &BV) const { 139 dbgs() << Label << ":"; 140 for (size_t I = 0, N = BV.size(); I < N; ++I) 141 if (BV[I]) 142 dbgs() << " " << Mapping.indexToBlock(I)->getName(); 143 dbgs() << "\n"; 144 } 145 146 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const { 147 for (size_t I = 0, N = Block.size(); I < N; ++I) { 148 BasicBlock *const B = Mapping.indexToBlock(I); 149 dbgs() << B->getName() << ":\n"; 150 dump(" Consumes", Block[I].Consumes); 151 dump(" Kills", Block[I].Kills); 152 } 153 dbgs() << "\n"; 154 } 155 #endif 156 157 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape) 158 : Mapping(F) { 159 const size_t N = Mapping.size(); 160 Block.resize(N); 161 162 // Initialize every block so that it consumes itself 163 for (size_t I = 0; I < N; ++I) { 164 auto &B = Block[I]; 165 B.Consumes.resize(N); 166 B.Kills.resize(N); 167 B.Consumes.set(I); 168 } 169 170 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as 171 // the code beyond coro.end is reachable during initial invocation of the 172 // coroutine. 173 for (auto *CE : Shape.CoroEnds) 174 getBlockData(CE->getParent()).End = true; 175 176 // Mark all suspend blocks and indicate that they kill everything they 177 // consume. Note, that crossing coro.save also requires a spill, as any code 178 // between coro.save and coro.suspend may resume the coroutine and all of the 179 // state needs to be saved by that time. 180 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) { 181 BasicBlock *SuspendBlock = BarrierInst->getParent(); 182 auto &B = getBlockData(SuspendBlock); 183 B.Suspend = true; 184 B.Kills |= B.Consumes; 185 }; 186 for (CoroSuspendInst *CSI : Shape.CoroSuspends) { 187 markSuspendBlock(CSI); 188 markSuspendBlock(CSI->getCoroSave()); 189 } 190 191 // Iterate propagating consumes and kills until they stop changing. 192 int Iteration = 0; 193 (void)Iteration; 194 195 bool Changed; 196 do { 197 DEBUG(dbgs() << "iteration " << ++Iteration); 198 DEBUG(dbgs() << "==============\n"); 199 200 Changed = false; 201 for (size_t I = 0; I < N; ++I) { 202 auto &B = Block[I]; 203 for (BasicBlock *SI : successors(B)) { 204 205 auto SuccNo = Mapping.blockToIndex(SI); 206 207 // Saved Consumes and Kills bitsets so that it is easy to see 208 // if anything changed after propagation. 209 auto &S = Block[SuccNo]; 210 auto SavedConsumes = S.Consumes; 211 auto SavedKills = S.Kills; 212 213 // Propagate Kills and Consumes from block B into its successor S. 214 S.Consumes |= B.Consumes; 215 S.Kills |= B.Kills; 216 217 // If block B is a suspend block, it should propagate kills into the 218 // its successor for every block B consumes. 219 if (B.Suspend) { 220 S.Kills |= B.Consumes; 221 } 222 if (S.Suspend) { 223 // If block S is a suspend block, it should kill all of the blocks it 224 // consumes. 225 S.Kills |= S.Consumes; 226 } else if (S.End) { 227 // If block S is an end block, it should not propagate kills as the 228 // blocks following coro.end() are reached during initial invocation 229 // of the coroutine while all the data are still available on the 230 // stack or in the registers. 231 S.Kills.reset(); 232 } else { 233 // This is reached when S block it not Suspend nor coro.end and it 234 // need to make sure that it is not in the kill set. 235 S.Kills.reset(SuccNo); 236 } 237 238 // See if anything changed. 239 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes); 240 241 if (S.Kills != SavedKills) { 242 DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName() 243 << "\n"); 244 DEBUG(dump("S.Kills", S.Kills)); 245 DEBUG(dump("SavedKills", SavedKills)); 246 } 247 if (S.Consumes != SavedConsumes) { 248 DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n"); 249 DEBUG(dump("S.Consume", S.Consumes)); 250 DEBUG(dump("SavedCons", SavedConsumes)); 251 } 252 } 253 } 254 } while (Changed); 255 DEBUG(dump()); 256 } 257 258 #undef DEBUG_TYPE // "coro-suspend-crossing" 259 #define DEBUG_TYPE "coro-frame" 260 261 // We build up the list of spills for every case where a use is separated 262 // from the definition by a suspend point. 263 264 namespace { 265 class Spill { 266 Value *Def; 267 Instruction *User; 268 269 public: 270 Spill(Value *Def, llvm::User *U) : Def(Def), User(cast<Instruction>(U)) {} 271 272 Value *def() const { return Def; } 273 Instruction *user() const { return User; } 274 BasicBlock *userBlock() const { return User->getParent(); } 275 }; 276 } // namespace 277 278 // Note that there may be more than one record with the same value of Def in 279 // the SpillInfo vector. 280 using SpillInfo = SmallVector<Spill, 8>; 281 282 #ifndef NDEBUG 283 static void dump(StringRef Title, SpillInfo const &Spills) { 284 dbgs() << "------------- " << Title << "--------------\n"; 285 Value *CurrentValue = nullptr; 286 for (auto const &E : Spills) { 287 if (CurrentValue != E.def()) { 288 CurrentValue = E.def(); 289 CurrentValue->dump(); 290 } 291 dbgs() << " user: "; 292 E.user()->dump(); 293 } 294 } 295 #endif 296 297 // Build a struct that will keep state for an active coroutine. 298 // struct f.frame { 299 // ResumeFnTy ResumeFnAddr; 300 // ResumeFnTy DestroyFnAddr; 301 // int ResumeIndex; 302 // ... promise (if present) ... 303 // ... spills ... 304 // }; 305 static StructType *buildFrameType(Function &F, coro::Shape &Shape, 306 SpillInfo &Spills) { 307 LLVMContext &C = F.getContext(); 308 SmallString<32> Name(F.getName()); 309 Name.append(".Frame"); 310 StructType *FrameTy = StructType::create(C, Name); 311 auto *FramePtrTy = FrameTy->getPointerTo(); 312 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy, 313 /*IsVarArgs=*/false); 314 auto *FnPtrTy = FnTy->getPointerTo(); 315 316 // Figure out how wide should be an integer type storing the suspend index. 317 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size())); 318 Type *PromiseType = Shape.PromiseAlloca 319 ? Shape.PromiseAlloca->getType()->getElementType() 320 : Type::getInt1Ty(C); 321 SmallVector<Type *, 8> Types{FnPtrTy, FnPtrTy, PromiseType, 322 Type::getIntNTy(C, IndexBits)}; 323 Value *CurrentDef = nullptr; 324 325 // Create an entry for every spilled value. 326 for (auto const &S : Spills) { 327 if (CurrentDef == S.def()) 328 continue; 329 330 CurrentDef = S.def(); 331 // PromiseAlloca was already added to Types array earlier. 332 if (CurrentDef == Shape.PromiseAlloca) 333 continue; 334 335 Type *Ty = nullptr; 336 if (auto *AI = dyn_cast<AllocaInst>(CurrentDef)) 337 Ty = AI->getAllocatedType(); 338 else 339 Ty = CurrentDef->getType(); 340 341 Types.push_back(Ty); 342 } 343 FrameTy->setBody(Types); 344 345 return FrameTy; 346 } 347 348 // We need to make room to insert a spill after initial PHIs, but before 349 // catchswitch instruction. Placing it before violates the requirement that 350 // catchswitch, like all other EHPads must be the first nonPHI in a block. 351 // 352 // Split away catchswitch into a separate block and insert in its place: 353 // 354 // cleanuppad <InsertPt> cleanupret. 355 // 356 // cleanupret instruction will act as an insert point for the spill. 357 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) { 358 BasicBlock *CurrentBlock = CatchSwitch->getParent(); 359 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch); 360 CurrentBlock->getTerminator()->eraseFromParent(); 361 362 auto *CleanupPad = 363 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock); 364 auto *CleanupRet = 365 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock); 366 return CleanupRet; 367 } 368 369 // Replace all alloca and SSA values that are accessed across suspend points 370 // with GetElementPointer from coroutine frame + loads and stores. Create an 371 // AllocaSpillBB that will become the new entry block for the resume parts of 372 // the coroutine: 373 // 374 // %hdl = coro.begin(...) 375 // whatever 376 // 377 // becomes: 378 // 379 // %hdl = coro.begin(...) 380 // %FramePtr = bitcast i8* hdl to %f.frame* 381 // br label %AllocaSpillBB 382 // 383 // AllocaSpillBB: 384 // ; geps corresponding to allocas that were moved to coroutine frame 385 // br label PostSpill 386 // 387 // PostSpill: 388 // whatever 389 // 390 // 391 static Instruction *insertSpills(SpillInfo &Spills, coro::Shape &Shape) { 392 auto *CB = Shape.CoroBegin; 393 IRBuilder<> Builder(CB->getNextNode()); 394 PointerType *FramePtrTy = Shape.FrameTy->getPointerTo(); 395 auto *FramePtr = 396 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr")); 397 Type *FrameTy = FramePtrTy->getElementType(); 398 399 Value *CurrentValue = nullptr; 400 BasicBlock *CurrentBlock = nullptr; 401 Value *CurrentReload = nullptr; 402 unsigned Index = coro::Shape::LastKnownField; 403 404 // We need to keep track of any allocas that need "spilling" 405 // since they will live in the coroutine frame now, all access to them 406 // need to be changed, not just the access across suspend points 407 // we remember allocas and their indices to be handled once we processed 408 // all the spills. 409 SmallVector<std::pair<AllocaInst *, unsigned>, 4> Allocas; 410 // Promise alloca (if present) has a fixed field number (Shape::PromiseField) 411 if (Shape.PromiseAlloca) 412 Allocas.emplace_back(Shape.PromiseAlloca, coro::Shape::PromiseField); 413 414 // Create a load instruction to reload the spilled value from the coroutine 415 // frame. 416 auto CreateReload = [&](Instruction *InsertBefore) { 417 Builder.SetInsertPoint(InsertBefore); 418 auto *G = Builder.CreateConstInBoundsGEP2_32(FrameTy, FramePtr, 0, Index, 419 CurrentValue->getName() + 420 Twine(".reload.addr")); 421 return isa<AllocaInst>(CurrentValue) 422 ? G 423 : Builder.CreateLoad(G, 424 CurrentValue->getName() + Twine(".reload")); 425 }; 426 427 for (auto const &E : Spills) { 428 // If we have not seen the value, generate a spill. 429 if (CurrentValue != E.def()) { 430 CurrentValue = E.def(); 431 CurrentBlock = nullptr; 432 CurrentReload = nullptr; 433 434 ++Index; 435 436 if (auto *AI = dyn_cast<AllocaInst>(CurrentValue)) { 437 // Spilled AllocaInst will be replaced with GEP from the coroutine frame 438 // there is no spill required. 439 Allocas.emplace_back(AI, Index); 440 if (!AI->isStaticAlloca()) 441 report_fatal_error("Coroutines cannot handle non static allocas yet"); 442 } else { 443 // Otherwise, create a store instruction storing the value into the 444 // coroutine frame. 445 446 Instruction *InsertPt = nullptr; 447 if (isa<Argument>(CurrentValue)) { 448 // For arguments, we will place the store instruction right after 449 // the coroutine frame pointer instruction, i.e. bitcast of 450 // coro.begin from i8* to %f.frame*. 451 InsertPt = FramePtr->getNextNode(); 452 } else if (auto *II = dyn_cast<InvokeInst>(CurrentValue)) { 453 // If we are spilling the result of the invoke instruction, split the 454 // normal edge and insert the spill in the new block. 455 auto NewBB = SplitEdge(II->getParent(), II->getNormalDest()); 456 InsertPt = NewBB->getTerminator(); 457 } else if (dyn_cast<PHINode>(CurrentValue)) { 458 // Skip the PHINodes and EH pads instructions. 459 BasicBlock *DefBlock = cast<Instruction>(E.def())->getParent(); 460 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator())) 461 InsertPt = splitBeforeCatchSwitch(CSI); 462 else 463 InsertPt = &*DefBlock->getFirstInsertionPt(); 464 } else { 465 // For all other values, the spill is placed immediately after 466 // the definition. 467 assert(!isa<TerminatorInst>(E.def()) && "unexpected terminator"); 468 InsertPt = cast<Instruction>(E.def())->getNextNode(); 469 } 470 471 Builder.SetInsertPoint(InsertPt); 472 auto *G = Builder.CreateConstInBoundsGEP2_32( 473 FrameTy, FramePtr, 0, Index, 474 CurrentValue->getName() + Twine(".spill.addr")); 475 Builder.CreateStore(CurrentValue, G); 476 } 477 } 478 479 // If we have not seen the use block, generate a reload in it. 480 if (CurrentBlock != E.userBlock()) { 481 CurrentBlock = E.userBlock(); 482 CurrentReload = CreateReload(&*CurrentBlock->getFirstInsertionPt()); 483 } 484 485 // If we have a single edge PHINode, remove it and replace it with a reload 486 // from the coroutine frame. (We already took care of multi edge PHINodes 487 // by rewriting them in the rewritePHIs function). 488 if (auto *PN = dyn_cast<PHINode>(E.user())) { 489 assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming " 490 "values in the PHINode"); 491 PN->replaceAllUsesWith(CurrentReload); 492 PN->eraseFromParent(); 493 continue; 494 } 495 496 // Replace all uses of CurrentValue in the current instruction with reload. 497 E.user()->replaceUsesOfWith(CurrentValue, CurrentReload); 498 } 499 500 BasicBlock *FramePtrBB = FramePtr->getParent(); 501 Shape.AllocaSpillBlock = 502 FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB"); 503 Shape.AllocaSpillBlock->splitBasicBlock(&Shape.AllocaSpillBlock->front(), 504 "PostSpill"); 505 506 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front()); 507 // If we found any allocas, replace all of their remaining uses with Geps. 508 for (auto &P : Allocas) { 509 auto *G = 510 Builder.CreateConstInBoundsGEP2_32(FrameTy, FramePtr, 0, P.second); 511 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G)) here, 512 // as we are changing location of the instruction. 513 G->takeName(P.first); 514 P.first->replaceAllUsesWith(G); 515 P.first->eraseFromParent(); 516 } 517 return FramePtr; 518 } 519 520 // Sets the unwind edge of an instruction to a particular successor. 521 static void setUnwindEdgeTo(TerminatorInst *TI, BasicBlock *Succ) { 522 if (auto *II = dyn_cast<InvokeInst>(TI)) 523 II->setUnwindDest(Succ); 524 else if (auto *CS = dyn_cast<CatchSwitchInst>(TI)) 525 CS->setUnwindDest(Succ); 526 else if (auto *CR = dyn_cast<CleanupReturnInst>(TI)) 527 CR->setUnwindDest(Succ); 528 else 529 llvm_unreachable("unexpected terminator instruction"); 530 } 531 532 // Replaces all uses of OldPred with the NewPred block in all PHINodes in a 533 // block. 534 static void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, 535 BasicBlock *NewPred, 536 PHINode *LandingPadReplacement) { 537 unsigned BBIdx = 0; 538 for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) { 539 PHINode *PN = cast<PHINode>(I); 540 541 // We manually update the LandingPadReplacement PHINode and it is the last 542 // PHI Node. So, if we find it, we are done. 543 if (LandingPadReplacement == PN) 544 break; 545 546 // Reuse the previous value of BBIdx if it lines up. In cases where we 547 // have multiple phi nodes with *lots* of predecessors, this is a speed 548 // win because we don't have to scan the PHI looking for TIBB. This 549 // happens because the BB list of PHI nodes are usually in the same 550 // order. 551 if (PN->getIncomingBlock(BBIdx) != OldPred) 552 BBIdx = PN->getBasicBlockIndex(OldPred); 553 554 assert(BBIdx != (unsigned)-1 && "Invalid PHI Index!"); 555 PN->setIncomingBlock(BBIdx, NewPred); 556 } 557 } 558 559 // Uses SplitEdge unless the successor block is an EHPad, in which case do EH 560 // specific handling. 561 static BasicBlock *ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, 562 LandingPadInst *OriginalPad, 563 PHINode *LandingPadReplacement) { 564 auto *PadInst = Succ->getFirstNonPHI(); 565 if (!LandingPadReplacement && !PadInst->isEHPad()) 566 return SplitEdge(BB, Succ); 567 568 auto *NewBB = BasicBlock::Create(BB->getContext(), "", BB->getParent(), Succ); 569 setUnwindEdgeTo(BB->getTerminator(), NewBB); 570 updatePhiNodes(Succ, BB, NewBB, LandingPadReplacement); 571 572 if (LandingPadReplacement) { 573 auto *NewLP = OriginalPad->clone(); 574 auto *Terminator = BranchInst::Create(Succ, NewBB); 575 NewLP->insertBefore(Terminator); 576 LandingPadReplacement->addIncoming(NewLP, NewBB); 577 return NewBB; 578 } 579 Value *ParentPad = nullptr; 580 if (auto *FuncletPad = dyn_cast<FuncletPadInst>(PadInst)) 581 ParentPad = FuncletPad->getParentPad(); 582 else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(PadInst)) 583 ParentPad = CatchSwitch->getParentPad(); 584 else 585 llvm_unreachable("handling for other EHPads not implemented yet"); 586 587 auto *NewCleanupPad = CleanupPadInst::Create(ParentPad, {}, "", NewBB); 588 CleanupReturnInst::Create(NewCleanupPad, Succ, NewBB); 589 return NewBB; 590 } 591 592 static void rewritePHIs(BasicBlock &BB) { 593 // For every incoming edge we will create a block holding all 594 // incoming values in a single PHI nodes. 595 // 596 // loop: 597 // %n.val = phi i32[%n, %entry], [%inc, %loop] 598 // 599 // It will create: 600 // 601 // loop.from.entry: 602 // %n.loop.pre = phi i32 [%n, %entry] 603 // br %label loop 604 // loop.from.loop: 605 // %inc.loop.pre = phi i32 [%inc, %loop] 606 // br %label loop 607 // 608 // After this rewrite, further analysis will ignore any phi nodes with more 609 // than one incoming edge. 610 611 // TODO: Simplify PHINodes in the basic block to remove duplicate 612 // predecessors. 613 614 LandingPadInst *LandingPad = nullptr; 615 PHINode *ReplPHI = nullptr; 616 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) { 617 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks. 618 // We replace the original landing pad with a PHINode that will collect the 619 // results from all of them. 620 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad); 621 ReplPHI->takeName(LandingPad); 622 LandingPad->replaceAllUsesWith(ReplPHI); 623 // We will erase the original landing pad at the end of this function after 624 // ehAwareSplitEdge cloned it in the transition blocks. 625 } 626 627 SmallVector<BasicBlock *, 8> Preds(pred_begin(&BB), pred_end(&BB)); 628 for (BasicBlock *Pred : Preds) { 629 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI); 630 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName()); 631 auto *PN = cast<PHINode>(&BB.front()); 632 do { 633 int Index = PN->getBasicBlockIndex(IncomingBB); 634 Value *V = PN->getIncomingValue(Index); 635 PHINode *InputV = PHINode::Create( 636 V->getType(), 1, V->getName() + Twine(".") + BB.getName(), 637 &IncomingBB->front()); 638 InputV->addIncoming(V, Pred); 639 PN->setIncomingValue(Index, InputV); 640 PN = dyn_cast<PHINode>(PN->getNextNode()); 641 } while (PN != ReplPHI); // ReplPHI is either null or the PHI that replaced 642 // the landing pad. 643 } 644 645 if (LandingPad) { 646 // Calls to ehAwareSplitEdge function cloned the original lading pad. 647 // No longer need it. 648 LandingPad->eraseFromParent(); 649 } 650 } 651 652 static void rewritePHIs(Function &F) { 653 SmallVector<BasicBlock *, 8> WorkList; 654 655 for (BasicBlock &BB : F) 656 if (auto *PN = dyn_cast<PHINode>(&BB.front())) 657 if (PN->getNumIncomingValues() > 1) 658 WorkList.push_back(&BB); 659 660 for (BasicBlock *BB : WorkList) 661 rewritePHIs(*BB); 662 } 663 664 // Check for instructions that we can recreate on resume as opposed to spill 665 // the result into a coroutine frame. 666 static bool materializable(Instruction &V) { 667 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) || 668 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V); 669 } 670 671 // Check for structural coroutine intrinsics that should not be spilled into 672 // the coroutine frame. 673 static bool isCoroutineStructureIntrinsic(Instruction &I) { 674 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) || 675 isa<CoroSuspendInst>(&I); 676 } 677 678 // For every use of the value that is across suspend point, recreate that value 679 // after a suspend point. 680 static void rewriteMaterializableInstructions(IRBuilder<> &IRB, 681 SpillInfo const &Spills) { 682 BasicBlock *CurrentBlock = nullptr; 683 Instruction *CurrentMaterialization = nullptr; 684 Instruction *CurrentDef = nullptr; 685 686 for (auto const &E : Spills) { 687 // If it is a new definition, update CurrentXXX variables. 688 if (CurrentDef != E.def()) { 689 CurrentDef = cast<Instruction>(E.def()); 690 CurrentBlock = nullptr; 691 CurrentMaterialization = nullptr; 692 } 693 694 // If we have not seen this block, materialize the value. 695 if (CurrentBlock != E.userBlock()) { 696 CurrentBlock = E.userBlock(); 697 CurrentMaterialization = cast<Instruction>(CurrentDef)->clone(); 698 CurrentMaterialization->setName(CurrentDef->getName()); 699 CurrentMaterialization->insertBefore( 700 &*CurrentBlock->getFirstInsertionPt()); 701 } 702 703 if (auto *PN = dyn_cast<PHINode>(E.user())) { 704 assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming " 705 "values in the PHINode"); 706 PN->replaceAllUsesWith(CurrentMaterialization); 707 PN->eraseFromParent(); 708 continue; 709 } 710 711 // Replace all uses of CurrentDef in the current instruction with the 712 // CurrentMaterialization for the block. 713 E.user()->replaceUsesOfWith(CurrentDef, CurrentMaterialization); 714 } 715 } 716 717 // Move early uses of spilled variable after CoroBegin. 718 // For example, if a parameter had address taken, we may end up with the code 719 // like: 720 // define @f(i32 %n) { 721 // %n.addr = alloca i32 722 // store %n, %n.addr 723 // ... 724 // call @coro.begin 725 // we need to move the store after coro.begin 726 static void moveSpillUsesAfterCoroBegin(Function &F, SpillInfo const &Spills, 727 CoroBeginInst *CoroBegin) { 728 DominatorTree DT(F); 729 SmallVector<Instruction *, 8> NeedsMoving; 730 731 Value *CurrentValue = nullptr; 732 733 for (auto const &E : Spills) { 734 if (CurrentValue == E.def()) 735 continue; 736 737 CurrentValue = E.def(); 738 739 for (User *U : CurrentValue->users()) { 740 Instruction *I = cast<Instruction>(U); 741 if (!DT.dominates(CoroBegin, I)) { 742 // TODO: Make this more robust. Currently if we run into a situation 743 // where simple instruction move won't work we panic and 744 // report_fatal_error. 745 for (User *UI : I->users()) { 746 if (!DT.dominates(CoroBegin, cast<Instruction>(UI))) 747 report_fatal_error("cannot move instruction since its users are not" 748 " dominated by CoroBegin"); 749 } 750 751 DEBUG(dbgs() << "will move: " << *I << "\n"); 752 NeedsMoving.push_back(I); 753 } 754 } 755 } 756 757 Instruction *InsertPt = CoroBegin->getNextNode(); 758 for (Instruction *I : NeedsMoving) 759 I->moveBefore(InsertPt); 760 } 761 762 // Splits the block at a particular instruction unless it is the first 763 // instruction in the block with a single predecessor. 764 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) { 765 auto *BB = I->getParent(); 766 if (&BB->front() == I) { 767 if (BB->getSinglePredecessor()) { 768 BB->setName(Name); 769 return BB; 770 } 771 } 772 return BB->splitBasicBlock(I, Name); 773 } 774 775 // Split above and below a particular instruction so that it 776 // will be all alone by itself in a block. 777 static void splitAround(Instruction *I, const Twine &Name) { 778 splitBlockIfNotFirst(I, Name); 779 splitBlockIfNotFirst(I->getNextNode(), "After" + Name); 780 } 781 782 void coro::buildCoroutineFrame(Function &F, Shape &Shape) { 783 // Lower coro.dbg.declare to coro.dbg.value, since we are going to rewrite 784 // access to local variables. 785 LowerDbgDeclare(F); 786 787 Shape.PromiseAlloca = Shape.CoroBegin->getId()->getPromise(); 788 if (Shape.PromiseAlloca) { 789 Shape.CoroBegin->getId()->clearPromise(); 790 } 791 792 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end 793 // intrinsics are in their own blocks to simplify the logic of building up 794 // SuspendCrossing data. 795 for (CoroSuspendInst *CSI : Shape.CoroSuspends) { 796 splitAround(CSI->getCoroSave(), "CoroSave"); 797 splitAround(CSI, "CoroSuspend"); 798 } 799 800 // Put CoroEnds into their own blocks. 801 for (CoroEndInst *CE : Shape.CoroEnds) 802 splitAround(CE, "CoroEnd"); 803 804 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will 805 // never has its definition separated from the PHI by the suspend point. 806 rewritePHIs(F); 807 808 // Build suspend crossing info. 809 SuspendCrossingInfo Checker(F, Shape); 810 811 IRBuilder<> Builder(F.getContext()); 812 SpillInfo Spills; 813 814 for (int Repeat = 0; Repeat < 4; ++Repeat) { 815 // See if there are materializable instructions across suspend points. 816 for (Instruction &I : instructions(F)) 817 if (materializable(I)) 818 for (User *U : I.users()) 819 if (Checker.isDefinitionAcrossSuspend(I, U)) 820 Spills.emplace_back(&I, U); 821 822 if (Spills.empty()) 823 break; 824 825 // Rewrite materializable instructions to be materialized at the use point. 826 DEBUG(dump("Materializations", Spills)); 827 rewriteMaterializableInstructions(Builder, Spills); 828 Spills.clear(); 829 } 830 831 // Collect the spills for arguments and other not-materializable values. 832 for (Argument &A : F.args()) 833 for (User *U : A.users()) 834 if (Checker.isDefinitionAcrossSuspend(A, U)) 835 Spills.emplace_back(&A, U); 836 837 for (Instruction &I : instructions(F)) { 838 // Values returned from coroutine structure intrinsics should not be part 839 // of the Coroutine Frame. 840 if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin) 841 continue; 842 // The Coroutine Promise always included into coroutine frame, no need to 843 // check for suspend crossing. 844 if (Shape.PromiseAlloca == &I) 845 continue; 846 847 for (User *U : I.users()) 848 if (Checker.isDefinitionAcrossSuspend(I, U)) { 849 // We cannot spill a token. 850 if (I.getType()->isTokenTy()) 851 report_fatal_error( 852 "token definition is separated from the use by a suspend point"); 853 Spills.emplace_back(&I, U); 854 } 855 } 856 DEBUG(dump("Spills", Spills)); 857 moveSpillUsesAfterCoroBegin(F, Spills, Shape.CoroBegin); 858 Shape.FrameTy = buildFrameType(F, Shape, Spills); 859 Shape.FramePtr = insertSpills(Spills, Shape); 860 } 861