1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This file contains classes used to discover if for a particular value 10 // there from sue to definition that crosses a suspend block. 11 // 12 // Using the information discovered we form a Coroutine Frame structure to 13 // contain those values. All uses of those values are replaced with appropriate 14 // GEP + load from the coroutine frame. At the point of the definition we spill 15 // the value into the coroutine frame. 16 // 17 // TODO: pack values tightly using liveness info. 18 //===----------------------------------------------------------------------===// 19 20 #include "CoroInternal.h" 21 #include "llvm/ADT/BitVector.h" 22 #include "llvm/IR/CFG.h" 23 #include "llvm/IR/Dominators.h" 24 #include "llvm/IR/IRBuilder.h" 25 #include "llvm/IR/InstIterator.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/MathExtras.h" 28 #include "llvm/Support/circular_raw_ostream.h" 29 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 30 #include "llvm/Transforms/Utils/Local.h" 31 32 using namespace llvm; 33 34 // The "coro-suspend-crossing" flag is very noisy. There is another debug type, 35 // "coro-frame", which results in leaner debug spew. 36 #define DEBUG_TYPE "coro-suspend-crossing" 37 38 enum { SmallVectorThreshold = 32 }; 39 40 // Provides two way mapping between the blocks and numbers. 41 namespace { 42 class BlockToIndexMapping { 43 SmallVector<BasicBlock *, SmallVectorThreshold> V; 44 45 public: 46 size_t size() const { return V.size(); } 47 48 BlockToIndexMapping(Function &F) { 49 for (BasicBlock &BB : F) 50 V.push_back(&BB); 51 std::sort(V.begin(), V.end()); 52 } 53 54 size_t blockToIndex(BasicBlock *BB) const { 55 auto *I = std::lower_bound(V.begin(), V.end(), BB); 56 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block"); 57 return I - V.begin(); 58 } 59 60 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; } 61 }; 62 } // end anonymous namespace 63 64 // The SuspendCrossingInfo maintains data that allows to answer a question 65 // whether given two BasicBlocks A and B there is a path from A to B that 66 // passes through a suspend point. 67 // 68 // For every basic block 'i' it maintains a BlockData that consists of: 69 // Consumes: a bit vector which contains a set of indices of blocks that can 70 // reach block 'i' 71 // Kills: a bit vector which contains a set of indices of blocks that can 72 // reach block 'i', but one of the path will cross a suspend point 73 // Suspend: a boolean indicating whether block 'i' contains a suspend point. 74 // End: a boolean indicating whether block 'i' contains a coro.end intrinsic. 75 // 76 namespace { 77 struct SuspendCrossingInfo { 78 BlockToIndexMapping Mapping; 79 80 struct BlockData { 81 BitVector Consumes; 82 BitVector Kills; 83 bool Suspend = false; 84 bool End = false; 85 }; 86 SmallVector<BlockData, SmallVectorThreshold> Block; 87 88 iterator_range<succ_iterator> successors(BlockData const &BD) const { 89 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]); 90 return llvm::successors(BB); 91 } 92 93 BlockData &getBlockData(BasicBlock *BB) { 94 return Block[Mapping.blockToIndex(BB)]; 95 } 96 97 void dump() const; 98 void dump(StringRef Label, BitVector const &BV) const; 99 100 SuspendCrossingInfo(Function &F, coro::Shape &Shape); 101 102 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const { 103 size_t const DefIndex = Mapping.blockToIndex(DefBB); 104 size_t const UseIndex = Mapping.blockToIndex(UseBB); 105 106 assert(Block[UseIndex].Consumes[DefIndex] && "use must consume def"); 107 bool const Result = Block[UseIndex].Kills[DefIndex]; 108 DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName() 109 << " answer is " << Result << "\n"); 110 return Result; 111 } 112 113 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const { 114 auto *I = cast<Instruction>(U); 115 116 // We rewrote PHINodes, so that only the ones with exactly one incoming 117 // value need to be analyzed. 118 if (auto *PN = dyn_cast<PHINode>(I)) 119 if (PN->getNumIncomingValues() > 1) 120 return false; 121 122 BasicBlock *UseBB = I->getParent(); 123 return hasPathCrossingSuspendPoint(DefBB, UseBB); 124 } 125 126 bool isDefinitionAcrossSuspend(Argument &A, User *U) const { 127 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U); 128 } 129 130 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const { 131 return isDefinitionAcrossSuspend(I.getParent(), U); 132 } 133 }; 134 } // end anonymous namespace 135 136 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 137 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label, 138 BitVector const &BV) const { 139 dbgs() << Label << ":"; 140 for (size_t I = 0, N = BV.size(); I < N; ++I) 141 if (BV[I]) 142 dbgs() << " " << Mapping.indexToBlock(I)->getName(); 143 dbgs() << "\n"; 144 } 145 146 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const { 147 for (size_t I = 0, N = Block.size(); I < N; ++I) { 148 BasicBlock *const B = Mapping.indexToBlock(I); 149 dbgs() << B->getName() << ":\n"; 150 dump(" Consumes", Block[I].Consumes); 151 dump(" Kills", Block[I].Kills); 152 } 153 dbgs() << "\n"; 154 } 155 #endif 156 157 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape) 158 : Mapping(F) { 159 const size_t N = Mapping.size(); 160 Block.resize(N); 161 162 // Initialize every block so that it consumes itself 163 for (size_t I = 0; I < N; ++I) { 164 auto &B = Block[I]; 165 B.Consumes.resize(N); 166 B.Kills.resize(N); 167 B.Consumes.set(I); 168 } 169 170 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as 171 // the code beyond coro.end is reachable during initial invocation of the 172 // coroutine. 173 for (auto *CE : Shape.CoroEnds) 174 getBlockData(CE->getParent()).End = true; 175 176 // Mark all suspend blocks and indicate that they kill everything they 177 // consume. Note, that crossing coro.save also requires a spill, as any code 178 // between coro.save and coro.suspend may resume the coroutine and all of the 179 // state needs to be saved by that time. 180 auto markSuspendBlock = [&](IntrinsicInst* BarrierInst) { 181 BasicBlock *SuspendBlock = BarrierInst->getParent(); 182 auto &B = getBlockData(SuspendBlock); 183 B.Suspend = true; 184 B.Kills |= B.Consumes; 185 }; 186 for (CoroSuspendInst *CSI : Shape.CoroSuspends) { 187 markSuspendBlock(CSI); 188 markSuspendBlock(CSI->getCoroSave()); 189 } 190 191 // Iterate propagating consumes and kills until they stop changing. 192 int Iteration = 0; 193 (void)Iteration; 194 195 bool Changed; 196 do { 197 DEBUG(dbgs() << "iteration " << ++Iteration); 198 DEBUG(dbgs() << "==============\n"); 199 200 Changed = false; 201 for (size_t I = 0; I < N; ++I) { 202 auto &B = Block[I]; 203 for (BasicBlock *SI : successors(B)) { 204 205 auto SuccNo = Mapping.blockToIndex(SI); 206 207 // Saved Consumes and Kills bitsets so that it is easy to see 208 // if anything changed after propagation. 209 auto &S = Block[SuccNo]; 210 auto SavedConsumes = S.Consumes; 211 auto SavedKills = S.Kills; 212 213 // Propagate Kills and Consumes from block B into its successor S. 214 S.Consumes |= B.Consumes; 215 S.Kills |= B.Kills; 216 217 // If block B is a suspend block, it should propagate kills into the 218 // its successor for every block B consumes. 219 if (B.Suspend) { 220 S.Kills |= B.Consumes; 221 } 222 if (S.Suspend) { 223 // If block S is a suspend block, it should kill all of the blocks it 224 // consumes. 225 S.Kills |= S.Consumes; 226 } else if (S.End) { 227 // If block S is an end block, it should not propagate kills as the 228 // blocks following coro.end() are reached during initial invocation 229 // of the coroutine while all the data are still available on the 230 // stack or in the registers. 231 S.Kills.reset(); 232 } else { 233 // This is reached when S block it not Suspend nor coro.end and it 234 // need to make sure that it is not in the kill set. 235 S.Kills.reset(SuccNo); 236 } 237 238 // See if anything changed. 239 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes); 240 241 if (S.Kills != SavedKills) { 242 DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName() 243 << "\n"); 244 DEBUG(dump("S.Kills", S.Kills)); 245 DEBUG(dump("SavedKills", SavedKills)); 246 } 247 if (S.Consumes != SavedConsumes) { 248 DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n"); 249 DEBUG(dump("S.Consume", S.Consumes)); 250 DEBUG(dump("SavedCons", SavedConsumes)); 251 } 252 } 253 } 254 } while (Changed); 255 DEBUG(dump()); 256 } 257 258 #undef DEBUG_TYPE // "coro-suspend-crossing" 259 #define DEBUG_TYPE "coro-frame" 260 261 // We build up the list of spills for every case where a use is separated 262 // from the definition by a suspend point. 263 264 struct Spill : std::pair<Value *, Instruction *> { 265 using base = std::pair<Value *, Instruction *>; 266 267 Spill(Value *Def, User *U) : base(Def, cast<Instruction>(U)) {} 268 269 Value *def() const { return first; } 270 Instruction *user() const { return second; } 271 BasicBlock *userBlock() const { return second->getParent(); } 272 273 std::pair<Value *, BasicBlock *> getKey() const { 274 return {def(), userBlock()}; 275 } 276 277 bool operator<(Spill const &rhs) const { return getKey() < rhs.getKey(); } 278 }; 279 280 // Note that there may be more than one record with the same value of Def in 281 // the SpillInfo vector. 282 using SpillInfo = SmallVector<Spill, 8>; 283 284 #ifndef NDEBUG 285 static void dump(StringRef Title, SpillInfo const &Spills) { 286 dbgs() << "------------- " << Title << "--------------\n"; 287 Value *CurrentValue = nullptr; 288 for (auto const &E : Spills) { 289 if (CurrentValue != E.def()) { 290 CurrentValue = E.def(); 291 CurrentValue->dump(); 292 } 293 dbgs() << " user: "; 294 E.user()->dump(); 295 } 296 } 297 #endif 298 299 // Build a struct that will keep state for an active coroutine. 300 // struct f.frame { 301 // ResumeFnTy ResumeFnAddr; 302 // ResumeFnTy DestroyFnAddr; 303 // int ResumeIndex; 304 // ... promise (if present) ... 305 // ... spills ... 306 // }; 307 static StructType *buildFrameType(Function &F, coro::Shape &Shape, 308 SpillInfo &Spills) { 309 LLVMContext &C = F.getContext(); 310 SmallString<32> Name(F.getName()); 311 Name.append(".Frame"); 312 StructType *FrameTy = StructType::create(C, Name); 313 auto *FramePtrTy = FrameTy->getPointerTo(); 314 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy, 315 /*IsVarArgs=*/false); 316 auto *FnPtrTy = FnTy->getPointerTo(); 317 318 // Figure out how wide should be an integer type storing the suspend index. 319 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size())); 320 Type *PromiseType = Shape.PromiseAlloca 321 ? Shape.PromiseAlloca->getType()->getElementType() 322 : Type::getInt1Ty(C); 323 SmallVector<Type *, 8> Types{FnPtrTy, FnPtrTy, PromiseType, 324 Type::getIntNTy(C, IndexBits)}; 325 Value *CurrentDef = nullptr; 326 327 // Create an entry for every spilled value. 328 for (auto const &S : Spills) { 329 if (CurrentDef == S.def()) 330 continue; 331 332 CurrentDef = S.def(); 333 // PromiseAlloca was already added to Types array earlier. 334 if (CurrentDef == Shape.PromiseAlloca) 335 continue; 336 337 Type *Ty = nullptr; 338 if (auto *AI = dyn_cast<AllocaInst>(CurrentDef)) 339 Ty = AI->getAllocatedType(); 340 else 341 Ty = CurrentDef->getType(); 342 343 Types.push_back(Ty); 344 } 345 FrameTy->setBody(Types); 346 347 return FrameTy; 348 } 349 350 // Replace all alloca and SSA values that are accessed across suspend points 351 // with GetElementPointer from coroutine frame + loads and stores. Create an 352 // AllocaSpillBB that will become the new entry block for the resume parts of 353 // the coroutine: 354 // 355 // %hdl = coro.begin(...) 356 // whatever 357 // 358 // becomes: 359 // 360 // %hdl = coro.begin(...) 361 // %FramePtr = bitcast i8* hdl to %f.frame* 362 // br label %AllocaSpillBB 363 // 364 // AllocaSpillBB: 365 // ; geps corresponding to allocas that were moved to coroutine frame 366 // br label PostSpill 367 // 368 // PostSpill: 369 // whatever 370 // 371 // 372 static Instruction *insertSpills(SpillInfo &Spills, coro::Shape &Shape) { 373 auto *CB = Shape.CoroBegin; 374 IRBuilder<> Builder(CB->getNextNode()); 375 PointerType *FramePtrTy = Shape.FrameTy->getPointerTo(); 376 auto *FramePtr = 377 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr")); 378 Type *FrameTy = FramePtrTy->getElementType(); 379 380 Value *CurrentValue = nullptr; 381 BasicBlock *CurrentBlock = nullptr; 382 Value *CurrentReload = nullptr; 383 unsigned Index = coro::Shape::LastKnownField; 384 385 // We need to keep track of any allocas that need "spilling" 386 // since they will live in the coroutine frame now, all access to them 387 // need to be changed, not just the access across suspend points 388 // we remember allocas and their indices to be handled once we processed 389 // all the spills. 390 SmallVector<std::pair<AllocaInst *, unsigned>, 4> Allocas; 391 // Promise alloca (if present) has a fixed field number (Shape::PromiseField) 392 if (Shape.PromiseAlloca) 393 Allocas.emplace_back(Shape.PromiseAlloca, coro::Shape::PromiseField); 394 395 // Create a load instruction to reload the spilled value from the coroutine 396 // frame. 397 auto CreateReload = [&](Instruction *InsertBefore) { 398 Builder.SetInsertPoint(InsertBefore); 399 auto *G = Builder.CreateConstInBoundsGEP2_32(FrameTy, FramePtr, 0, Index, 400 CurrentValue->getName() + 401 Twine(".reload.addr")); 402 return isa<AllocaInst>(CurrentValue) 403 ? G 404 : Builder.CreateLoad(G, 405 CurrentValue->getName() + Twine(".reload")); 406 }; 407 408 for (auto const &E : Spills) { 409 // If we have not seen the value, generate a spill. 410 if (CurrentValue != E.def()) { 411 CurrentValue = E.def(); 412 CurrentBlock = nullptr; 413 CurrentReload = nullptr; 414 415 ++Index; 416 417 if (auto *AI = dyn_cast<AllocaInst>(CurrentValue)) { 418 // Spilled AllocaInst will be replaced with GEP from the coroutine frame 419 // there is no spill required. 420 Allocas.emplace_back(AI, Index); 421 if (!AI->isStaticAlloca()) 422 report_fatal_error("Coroutines cannot handle non static allocas yet"); 423 } else { 424 // Otherwise, create a store instruction storing the value into the 425 // coroutine frame. 426 427 Instruction *InsertPt = nullptr; 428 if (isa<Argument>(CurrentValue)) { 429 // For arguments, we will place the store instruction right after 430 // the coroutine frame pointer instruction, i.e. bitcast of 431 // coro.begin from i8* to %f.frame*. 432 InsertPt = FramePtr->getNextNode(); 433 } else if (auto *II = dyn_cast<InvokeInst>(CurrentValue)) { 434 // If we are spilling the result of the invoke instruction, split the 435 // normal edge and insert the spill in the new block. 436 auto NewBB = SplitEdge(II->getParent(), II->getNormalDest()); 437 InsertPt = NewBB->getTerminator(); 438 } else { 439 // For all other values, the spill is placed immediately after 440 // the definition. 441 assert(!isa<TerminatorInst>(E.def()) && "unexpected terminator"); 442 InsertPt = cast<Instruction>(E.def())->getNextNode(); 443 } 444 445 Builder.SetInsertPoint(InsertPt); 446 auto *G = Builder.CreateConstInBoundsGEP2_32( 447 FrameTy, FramePtr, 0, Index, 448 CurrentValue->getName() + Twine(".spill.addr")); 449 Builder.CreateStore(CurrentValue, G); 450 } 451 } 452 453 // If we have not seen the use block, generate a reload in it. 454 if (CurrentBlock != E.userBlock()) { 455 CurrentBlock = E.userBlock(); 456 CurrentReload = CreateReload(&*CurrentBlock->getFirstInsertionPt()); 457 } 458 459 // If we have a single edge PHINode, remove it and replace it with a reload 460 // from the coroutine frame. (We already took care of multi edge PHINodes 461 // by rewriting them in the rewritePHIs function). 462 if (auto *PN = dyn_cast<PHINode>(E.user())) { 463 assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming " 464 "values in the PHINode"); 465 PN->replaceAllUsesWith(CurrentReload); 466 PN->eraseFromParent(); 467 continue; 468 } 469 470 // Replace all uses of CurrentValue in the current instruction with reload. 471 E.user()->replaceUsesOfWith(CurrentValue, CurrentReload); 472 } 473 474 BasicBlock *FramePtrBB = FramePtr->getParent(); 475 Shape.AllocaSpillBlock = 476 FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB"); 477 Shape.AllocaSpillBlock->splitBasicBlock(&Shape.AllocaSpillBlock->front(), 478 "PostSpill"); 479 480 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front()); 481 // If we found any allocas, replace all of their remaining uses with Geps. 482 for (auto &P : Allocas) { 483 auto *G = 484 Builder.CreateConstInBoundsGEP2_32(FrameTy, FramePtr, 0, P.second); 485 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G)) here, 486 // as we are changing location of the instruction. 487 G->takeName(P.first); 488 P.first->replaceAllUsesWith(G); 489 P.first->eraseFromParent(); 490 } 491 return FramePtr; 492 } 493 494 static void rewritePHIs(BasicBlock &BB) { 495 // For every incoming edge we will create a block holding all 496 // incoming values in a single PHI nodes. 497 // 498 // loop: 499 // %n.val = phi i32[%n, %entry], [%inc, %loop] 500 // 501 // It will create: 502 // 503 // loop.from.entry: 504 // %n.loop.pre = phi i32 [%n, %entry] 505 // br %label loop 506 // loop.from.loop: 507 // %inc.loop.pre = phi i32 [%inc, %loop] 508 // br %label loop 509 // 510 // After this rewrite, further analysis will ignore any phi nodes with more 511 // than one incoming edge. 512 513 // TODO: Simplify PHINodes in the basic block to remove duplicate 514 // predecessors. 515 516 SmallVector<BasicBlock *, 8> Preds(pred_begin(&BB), pred_end(&BB)); 517 for (BasicBlock *Pred : Preds) { 518 auto *IncomingBB = SplitEdge(Pred, &BB); 519 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName()); 520 auto *PN = cast<PHINode>(&BB.front()); 521 do { 522 int Index = PN->getBasicBlockIndex(IncomingBB); 523 Value *V = PN->getIncomingValue(Index); 524 PHINode *InputV = PHINode::Create( 525 V->getType(), 1, V->getName() + Twine(".") + BB.getName(), 526 &IncomingBB->front()); 527 InputV->addIncoming(V, Pred); 528 PN->setIncomingValue(Index, InputV); 529 PN = dyn_cast<PHINode>(PN->getNextNode()); 530 } while (PN); 531 } 532 } 533 534 static void rewritePHIs(Function &F) { 535 SmallVector<BasicBlock *, 8> WorkList; 536 537 for (BasicBlock &BB : F) 538 if (auto *PN = dyn_cast<PHINode>(&BB.front())) 539 if (PN->getNumIncomingValues() > 1) 540 WorkList.push_back(&BB); 541 542 for (BasicBlock *BB : WorkList) 543 rewritePHIs(*BB); 544 } 545 546 // Check for instructions that we can recreate on resume as opposed to spill 547 // the result into a coroutine frame. 548 static bool materializable(Instruction &V) { 549 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) || 550 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V); 551 } 552 553 // Check for structural coroutine intrinsics that should not be spilled into 554 // the coroutine frame. 555 static bool isCoroutineStructureIntrinsic(Instruction &I) { 556 return isa<CoroIdInst>(&I) || isa<CoroBeginInst>(&I) || 557 isa<CoroSaveInst>(&I) || isa<CoroSuspendInst>(&I); 558 } 559 560 // For every use of the value that is across suspend point, recreate that value 561 // after a suspend point. 562 static void rewriteMaterializableInstructions(IRBuilder<> &IRB, 563 SpillInfo const &Spills) { 564 BasicBlock *CurrentBlock = nullptr; 565 Instruction *CurrentMaterialization = nullptr; 566 Instruction *CurrentDef = nullptr; 567 568 for (auto const &E : Spills) { 569 // If it is a new definition, update CurrentXXX variables. 570 if (CurrentDef != E.def()) { 571 CurrentDef = cast<Instruction>(E.def()); 572 CurrentBlock = nullptr; 573 CurrentMaterialization = nullptr; 574 } 575 576 // If we have not seen this block, materialize the value. 577 if (CurrentBlock != E.userBlock()) { 578 CurrentBlock = E.userBlock(); 579 CurrentMaterialization = cast<Instruction>(CurrentDef)->clone(); 580 CurrentMaterialization->setName(CurrentDef->getName()); 581 CurrentMaterialization->insertBefore( 582 &*CurrentBlock->getFirstInsertionPt()); 583 } 584 585 if (auto *PN = dyn_cast<PHINode>(E.user())) { 586 assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming " 587 "values in the PHINode"); 588 PN->replaceAllUsesWith(CurrentMaterialization); 589 PN->eraseFromParent(); 590 continue; 591 } 592 593 // Replace all uses of CurrentDef in the current instruction with the 594 // CurrentMaterialization for the block. 595 E.user()->replaceUsesOfWith(CurrentDef, CurrentMaterialization); 596 } 597 } 598 599 // Move early uses of spilled variable after CoroBegin. 600 // For example, if a parameter had address taken, we may end up with the code 601 // like: 602 // define @f(i32 %n) { 603 // %n.addr = alloca i32 604 // store %n, %n.addr 605 // ... 606 // call @coro.begin 607 // we need to move the store after coro.begin 608 static void moveSpillUsesAfterCoroBegin(Function &F, SpillInfo const &Spills, 609 CoroBeginInst *CoroBegin) { 610 DominatorTree DT(F); 611 SmallVector<Instruction *, 8> NeedsMoving; 612 613 Value *CurrentValue = nullptr; 614 615 for (auto const &E : Spills) { 616 if (CurrentValue == E.def()) 617 continue; 618 619 CurrentValue = E.def(); 620 621 for (User *U : CurrentValue->users()) { 622 Instruction *I = cast<Instruction>(U); 623 if (!DT.dominates(CoroBegin, I)) { 624 // TODO: Make this more robust. Currently if we run into a situation 625 // where simple instruction move won't work we panic and 626 // report_fatal_error. 627 for (User *UI : I->users()) { 628 if (!DT.dominates(CoroBegin, cast<Instruction>(UI))) 629 report_fatal_error("cannot move instruction since its users are not" 630 " dominated by CoroBegin"); 631 } 632 633 DEBUG(dbgs() << "will move: " << *I << "\n"); 634 NeedsMoving.push_back(I); 635 } 636 } 637 } 638 639 Instruction *InsertPt = CoroBegin->getNextNode(); 640 for (Instruction *I : NeedsMoving) 641 I->moveBefore(InsertPt); 642 } 643 644 // Splits the block at a particular instruction unless it is the first 645 // instruction in the block with a single predecessor. 646 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) { 647 auto *BB = I->getParent(); 648 if (&BB->front() == I) { 649 if (BB->getSinglePredecessor()) { 650 BB->setName(Name); 651 return BB; 652 } 653 } 654 return BB->splitBasicBlock(I, Name); 655 } 656 657 // Split above and below a particular instruction so that it 658 // will be all alone by itself in a block. 659 static void splitAround(Instruction *I, const Twine &Name) { 660 splitBlockIfNotFirst(I, Name); 661 splitBlockIfNotFirst(I->getNextNode(), "After" + Name); 662 } 663 664 void coro::buildCoroutineFrame(Function &F, Shape &Shape) { 665 // Lower coro.dbg.declare to coro.dbg.value, since we are going to rewrite 666 // access to local variables. 667 LowerDbgDeclare(F); 668 669 Shape.PromiseAlloca = Shape.CoroBegin->getId()->getPromise(); 670 if (Shape.PromiseAlloca) { 671 Shape.CoroBegin->getId()->clearPromise(); 672 } 673 674 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end 675 // intrinsics are in their own blocks to simplify the logic of building up 676 // SuspendCrossing data. 677 for (CoroSuspendInst *CSI : Shape.CoroSuspends) { 678 splitAround(CSI->getCoroSave(), "CoroSave"); 679 splitAround(CSI, "CoroSuspend"); 680 } 681 682 // Put fallthrough CoroEnd into its own block. Note: Shape::buildFrom places 683 // the fallthrough coro.end as the first element of CoroEnds array. 684 splitAround(Shape.CoroEnds.front(), "CoroEnd"); 685 686 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will 687 // never has its definition separated from the PHI by the suspend point. 688 rewritePHIs(F); 689 690 // Build suspend crossing info. 691 SuspendCrossingInfo Checker(F, Shape); 692 693 IRBuilder<> Builder(F.getContext()); 694 SpillInfo Spills; 695 696 // See if there are materializable instructions across suspend points. 697 for (Instruction &I : instructions(F)) 698 if (materializable(I)) 699 for (User *U : I.users()) 700 if (Checker.isDefinitionAcrossSuspend(I, U)) 701 Spills.emplace_back(&I, U); 702 703 // Rewrite materializable instructions to be materialized at the use point. 704 std::sort(Spills.begin(), Spills.end()); 705 DEBUG(dump("Materializations", Spills)); 706 rewriteMaterializableInstructions(Builder, Spills); 707 708 // Collect the spills for arguments and other not-materializable values. 709 Spills.clear(); 710 for (Argument &A : F.getArgumentList()) 711 for (User *U : A.users()) 712 if (Checker.isDefinitionAcrossSuspend(A, U)) 713 Spills.emplace_back(&A, U); 714 715 for (Instruction &I : instructions(F)) { 716 // Values returned from coroutine structure intrinsics should not be part 717 // of the Coroutine Frame. 718 if (isCoroutineStructureIntrinsic(I)) 719 continue; 720 // The Coroutine Promise always included into coroutine frame, no need to 721 // check for suspend crossing. 722 if (Shape.PromiseAlloca == &I) 723 continue; 724 725 for (User *U : I.users()) 726 if (Checker.isDefinitionAcrossSuspend(I, U)) { 727 // We cannot spill a token. 728 if (I.getType()->isTokenTy()) 729 report_fatal_error( 730 "token definition is separated from the use by a suspend point"); 731 assert(!materializable(I) && 732 "rewriteMaterializable did not do its job"); 733 Spills.emplace_back(&I, U); 734 } 735 } 736 std::sort(Spills.begin(), Spills.end()); 737 DEBUG(dump("Spills", Spills)); 738 moveSpillUsesAfterCoroBegin(F, Spills, Shape.CoroBegin); 739 Shape.FrameTy = buildFrameType(F, Shape, Spills); 740 Shape.FramePtr = insertSpills(Spills, Shape); 741 } 742