1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // This file contains classes used to discover if for a particular value 10 // there from sue to definition that crosses a suspend block. 11 // 12 // Using the information discovered we form a Coroutine Frame structure to 13 // contain those values. All uses of those values are replaced with appropriate 14 // GEP + load from the coroutine frame. At the point of the definition we spill 15 // the value into the coroutine frame. 16 // 17 // TODO: pack values tightly using liveness info. 18 //===----------------------------------------------------------------------===// 19 20 #include "CoroInternal.h" 21 #include "llvm/ADT/BitVector.h" 22 #include "llvm/IR/CFG.h" 23 #include "llvm/IR/Dominators.h" 24 #include "llvm/IR/IRBuilder.h" 25 #include "llvm/IR/InstIterator.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/MathExtras.h" 28 #include "llvm/Support/circular_raw_ostream.h" 29 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 30 #include "llvm/Transforms/Utils/Local.h" 31 32 using namespace llvm; 33 34 // The "coro-suspend-crossing" flag is very noisy. There is another debug type, 35 // "coro-frame", which results in leaner debug spew. 36 #define DEBUG_TYPE "coro-suspend-crossing" 37 38 enum { SmallVectorThreshold = 32 }; 39 40 // Provides two way mapping between the blocks and numbers. 41 namespace { 42 class BlockToIndexMapping { 43 SmallVector<BasicBlock *, SmallVectorThreshold> V; 44 45 public: 46 size_t size() const { return V.size(); } 47 48 BlockToIndexMapping(Function &F) { 49 for (BasicBlock &BB : F) 50 V.push_back(&BB); 51 std::sort(V.begin(), V.end()); 52 } 53 54 size_t blockToIndex(BasicBlock *BB) const { 55 auto *I = std::lower_bound(V.begin(), V.end(), BB); 56 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block"); 57 return I - V.begin(); 58 } 59 60 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; } 61 }; 62 } // end anonymous namespace 63 64 // The SuspendCrossingInfo maintains data that allows to answer a question 65 // whether given two BasicBlocks A and B there is a path from A to B that 66 // passes through a suspend point. 67 // 68 // For every basic block 'i' it maintains a BlockData that consists of: 69 // Consumes: a bit vector which contains a set of indicies of blocks that can 70 // reach block 'i' 71 // Kills: a bit vector which contains a set of indicies of blocks that can 72 // reach block 'i', but one of the path will cross a suspend point 73 // Suspend: a boolean indicating whether block 'i' contains a suspend point. 74 // End: a boolean indicating whether block 'i' contains a coro.end intrinsic. 75 // 76 namespace { 77 struct SuspendCrossingInfo { 78 BlockToIndexMapping Mapping; 79 80 struct BlockData { 81 BitVector Consumes; 82 BitVector Kills; 83 bool Suspend = false; 84 bool End = false; 85 }; 86 SmallVector<BlockData, SmallVectorThreshold> Block; 87 88 iterator_range<succ_iterator> successors(BlockData const &BD) const { 89 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]); 90 return llvm::successors(BB); 91 } 92 93 BlockData &getBlockData(BasicBlock *BB) { 94 return Block[Mapping.blockToIndex(BB)]; 95 } 96 97 void dump() const; 98 void dump(StringRef Label, BitVector const &BV) const; 99 100 SuspendCrossingInfo(Function &F, coro::Shape &Shape); 101 102 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const { 103 size_t const DefIndex = Mapping.blockToIndex(DefBB); 104 size_t const UseIndex = Mapping.blockToIndex(UseBB); 105 106 assert(Block[UseIndex].Consumes[DefIndex] && "use must consume def"); 107 bool const Result = Block[UseIndex].Kills[DefIndex]; 108 DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName() 109 << " answer is " << Result << "\n"); 110 return Result; 111 } 112 113 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const { 114 auto *I = cast<Instruction>(U); 115 116 // We rewrote PHINodes, so that only the ones with exactly one incoming 117 // value need to be analyzed. 118 if (auto *PN = dyn_cast<PHINode>(I)) 119 if (PN->getNumIncomingValues() > 1) 120 return false; 121 122 BasicBlock *UseBB = I->getParent(); 123 return hasPathCrossingSuspendPoint(DefBB, UseBB); 124 } 125 126 bool isDefinitionAcrossSuspend(Argument &A, User *U) const { 127 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U); 128 } 129 130 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const { 131 return isDefinitionAcrossSuspend(I.getParent(), U); 132 } 133 }; 134 } // end anonymous namespace 135 136 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label, 137 BitVector const &BV) const { 138 dbgs() << Label << ":"; 139 for (size_t I = 0, N = BV.size(); I < N; ++I) 140 if (BV[I]) 141 dbgs() << " " << Mapping.indexToBlock(I)->getName(); 142 dbgs() << "\n"; 143 } 144 145 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const { 146 for (size_t I = 0, N = Block.size(); I < N; ++I) { 147 BasicBlock *const B = Mapping.indexToBlock(I); 148 dbgs() << B->getName() << ":\n"; 149 dump(" Consumes", Block[I].Consumes); 150 dump(" Kills", Block[I].Kills); 151 } 152 dbgs() << "\n"; 153 } 154 155 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape) 156 : Mapping(F) { 157 const size_t N = Mapping.size(); 158 Block.resize(N); 159 160 // Initialize every block so that it consumes itself 161 for (size_t I = 0; I < N; ++I) { 162 auto &B = Block[I]; 163 B.Consumes.resize(N); 164 B.Kills.resize(N); 165 B.Consumes.set(I); 166 } 167 168 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as 169 // the code beyond coro.end is reachable during initial invocation of the 170 // coroutine. 171 for (auto *CE : Shape.CoroEnds) 172 getBlockData(CE->getParent()).End = true; 173 174 // Mark all suspend blocks and indicate that they kill everything they 175 // consume. Note, that crossing coro.save also requires a spill, as any code 176 // between coro.save and coro.suspend may resume the coroutine and all of the 177 // state needs to be saved by that time. 178 auto markSuspendBlock = [&](IntrinsicInst* BarrierInst) { 179 BasicBlock *SuspendBlock = BarrierInst->getParent(); 180 auto &B = getBlockData(SuspendBlock); 181 B.Suspend = true; 182 B.Kills |= B.Consumes; 183 }; 184 for (CoroSuspendInst *CSI : Shape.CoroSuspends) { 185 markSuspendBlock(CSI); 186 markSuspendBlock(CSI->getCoroSave()); 187 } 188 189 // Iterate propagating consumes and kills until they stop changing. 190 int Iteration = 0; 191 (void)Iteration; 192 193 bool Changed; 194 do { 195 DEBUG(dbgs() << "iteration " << ++Iteration); 196 DEBUG(dbgs() << "==============\n"); 197 198 Changed = false; 199 for (size_t I = 0; I < N; ++I) { 200 auto &B = Block[I]; 201 for (BasicBlock *SI : successors(B)) { 202 203 auto SuccNo = Mapping.blockToIndex(SI); 204 205 // Saved Consumes and Kills bitsets so that it is easy to see 206 // if anything changed after propagation. 207 auto &S = Block[SuccNo]; 208 auto SavedConsumes = S.Consumes; 209 auto SavedKills = S.Kills; 210 211 // Propagate Kills and Consumes from block B into its successor S. 212 S.Consumes |= B.Consumes; 213 S.Kills |= B.Kills; 214 215 // If block B is a suspend block, it should propagate kills into the 216 // its successor for every block B consumes. 217 if (B.Suspend) { 218 S.Kills |= B.Consumes; 219 } 220 if (S.Suspend) { 221 // If block S is a suspend block, it should kill all of the blocks it 222 // consumes. 223 S.Kills |= S.Consumes; 224 } else if (S.End) { 225 // If block S is an end block, it should not propagate kills as the 226 // blocks following coro.end() are reached during initial invocation 227 // of the coroutine while all the data are still available on the 228 // stack or in the registers. 229 S.Kills.reset(); 230 } else { 231 // This is reached when S block it not Suspend nor coro.end and it 232 // need to make sure that it is not in the kill set. 233 S.Kills.reset(SuccNo); 234 } 235 236 // See if anything changed. 237 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes); 238 239 if (S.Kills != SavedKills) { 240 DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName() 241 << "\n"); 242 DEBUG(dump("S.Kills", S.Kills)); 243 DEBUG(dump("SavedKills", SavedKills)); 244 } 245 if (S.Consumes != SavedConsumes) { 246 DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n"); 247 DEBUG(dump("S.Consume", S.Consumes)); 248 DEBUG(dump("SavedCons", SavedConsumes)); 249 } 250 } 251 } 252 } while (Changed); 253 DEBUG(dump()); 254 } 255 256 #undef DEBUG_TYPE // "coro-suspend-crossing" 257 #define DEBUG_TYPE "coro-frame" 258 259 // We build up the list of spills for every case where a use is separated 260 // from the definition by a suspend point. 261 262 struct Spill : std::pair<Value *, Instruction *> { 263 using base = std::pair<Value *, Instruction *>; 264 265 Spill(Value *Def, User *U) : base(Def, cast<Instruction>(U)) {} 266 267 Value *def() const { return first; } 268 Instruction *user() const { return second; } 269 BasicBlock *userBlock() const { return second->getParent(); } 270 271 std::pair<Value *, BasicBlock *> getKey() const { 272 return {def(), userBlock()}; 273 } 274 275 bool operator<(Spill const &rhs) const { return getKey() < rhs.getKey(); } 276 }; 277 278 // Note that there may be more than one record with the same value of Def in 279 // the SpillInfo vector. 280 using SpillInfo = SmallVector<Spill, 8>; 281 282 #ifndef NDEBUG 283 static void dump(StringRef Title, SpillInfo const &Spills) { 284 dbgs() << "------------- " << Title << "--------------\n"; 285 Value *CurrentValue = nullptr; 286 for (auto const &E : Spills) { 287 if (CurrentValue != E.def()) { 288 CurrentValue = E.def(); 289 CurrentValue->dump(); 290 } 291 dbgs() << " user: "; 292 E.user()->dump(); 293 } 294 } 295 #endif 296 297 // Build a struct that will keep state for an active coroutine. 298 // struct f.frame { 299 // ResumeFnTy ResumeFnAddr; 300 // ResumeFnTy DestroyFnAddr; 301 // int ResumeIndex; 302 // ... promise (if present) ... 303 // ... spills ... 304 // }; 305 static StructType *buildFrameType(Function &F, coro::Shape &Shape, 306 SpillInfo &Spills) { 307 LLVMContext &C = F.getContext(); 308 SmallString<32> Name(F.getName()); 309 Name.append(".Frame"); 310 StructType *FrameTy = StructType::create(C, Name); 311 auto *FramePtrTy = FrameTy->getPointerTo(); 312 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy, 313 /*IsVarArgs=*/false); 314 auto *FnPtrTy = FnTy->getPointerTo(); 315 316 // Figure out how wide should be an integer type storing the suspend index. 317 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size())); 318 Type *PromiseType = Shape.PromiseAlloca 319 ? Shape.PromiseAlloca->getType()->getElementType() 320 : Type::getInt1Ty(C); 321 SmallVector<Type *, 8> Types{FnPtrTy, FnPtrTy, PromiseType, 322 Type::getIntNTy(C, IndexBits)}; 323 Value *CurrentDef = nullptr; 324 325 // Create an entry for every spilled value. 326 for (auto const &S : Spills) { 327 if (CurrentDef == S.def()) 328 continue; 329 330 CurrentDef = S.def(); 331 // PromiseAlloca was already added to Types array earlier. 332 if (CurrentDef == Shape.PromiseAlloca) 333 continue; 334 335 Type *Ty = nullptr; 336 if (auto *AI = dyn_cast<AllocaInst>(CurrentDef)) 337 Ty = AI->getAllocatedType(); 338 else 339 Ty = CurrentDef->getType(); 340 341 Types.push_back(Ty); 342 } 343 FrameTy->setBody(Types); 344 345 return FrameTy; 346 } 347 348 // Replace all alloca and SSA values that are accessed across suspend points 349 // with GetElementPointer from coroutine frame + loads and stores. Create an 350 // AllocaSpillBB that will become the new entry block for the resume parts of 351 // the coroutine: 352 // 353 // %hdl = coro.begin(...) 354 // whatever 355 // 356 // becomes: 357 // 358 // %hdl = coro.begin(...) 359 // %FramePtr = bitcast i8* hdl to %f.frame* 360 // br label %AllocaSpillBB 361 // 362 // AllocaSpillBB: 363 // ; geps corresponding to allocas that were moved to coroutine frame 364 // br label PostSpill 365 // 366 // PostSpill: 367 // whatever 368 // 369 // 370 static Instruction *insertSpills(SpillInfo &Spills, coro::Shape &Shape) { 371 auto *CB = Shape.CoroBegin; 372 IRBuilder<> Builder(CB->getNextNode()); 373 PointerType *FramePtrTy = Shape.FrameTy->getPointerTo(); 374 auto *FramePtr = 375 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr")); 376 Type *FrameTy = FramePtrTy->getElementType(); 377 378 Value *CurrentValue = nullptr; 379 BasicBlock *CurrentBlock = nullptr; 380 Value *CurrentReload = nullptr; 381 unsigned Index = coro::Shape::LastKnownField; 382 383 // We need to keep track of any allocas that need "spilling" 384 // since they will live in the coroutine frame now, all access to them 385 // need to be changed, not just the access across suspend points 386 // we remember allocas and their indices to be handled once we processed 387 // all the spills. 388 SmallVector<std::pair<AllocaInst *, unsigned>, 4> Allocas; 389 // Promise alloca (if present) has a fixed field number (Shape::PromiseField) 390 if (Shape.PromiseAlloca) 391 Allocas.emplace_back(Shape.PromiseAlloca, coro::Shape::PromiseField); 392 393 // Create a load instruction to reload the spilled value from the coroutine 394 // frame. 395 auto CreateReload = [&](Instruction *InsertBefore) { 396 Builder.SetInsertPoint(InsertBefore); 397 auto *G = Builder.CreateConstInBoundsGEP2_32(FrameTy, FramePtr, 0, Index, 398 CurrentValue->getName() + 399 Twine(".reload.addr")); 400 return isa<AllocaInst>(CurrentValue) 401 ? G 402 : Builder.CreateLoad(G, 403 CurrentValue->getName() + Twine(".reload")); 404 }; 405 406 for (auto const &E : Spills) { 407 // If we have not seen the value, generate a spill. 408 if (CurrentValue != E.def()) { 409 CurrentValue = E.def(); 410 CurrentBlock = nullptr; 411 CurrentReload = nullptr; 412 413 ++Index; 414 415 if (auto *AI = dyn_cast<AllocaInst>(CurrentValue)) { 416 // Spilled AllocaInst will be replaced with GEP from the coroutine frame 417 // there is no spill required. 418 Allocas.emplace_back(AI, Index); 419 if (!AI->isStaticAlloca()) 420 report_fatal_error("Coroutines cannot handle non static allocas yet"); 421 } else { 422 // Otherwise, create a store instruction storing the value into the 423 // coroutine frame. For, argument, we will place the store instruction 424 // right after the coroutine frame pointer instruction, i.e. bitcase of 425 // coro.begin from i8* to %f.frame*. For all other values, the spill is 426 // placed immediately after the definition. 427 Builder.SetInsertPoint( 428 isa<Argument>(CurrentValue) 429 ? FramePtr->getNextNode() 430 : dyn_cast<Instruction>(E.def())->getNextNode()); 431 432 auto *G = Builder.CreateConstInBoundsGEP2_32( 433 FrameTy, FramePtr, 0, Index, 434 CurrentValue->getName() + Twine(".spill.addr")); 435 Builder.CreateStore(CurrentValue, G); 436 } 437 } 438 439 // If we have not seen the use block, generate a reload in it. 440 if (CurrentBlock != E.userBlock()) { 441 CurrentBlock = E.userBlock(); 442 CurrentReload = CreateReload(&*CurrentBlock->getFirstInsertionPt()); 443 } 444 445 // If we have a single edge PHINode, remove it and replace it with a reload 446 // from the coroutine frame. (We already took care of multi edge PHINodes 447 // by rewriting them in the rewritePHIs function). 448 if (auto *PN = dyn_cast<PHINode>(E.user())) { 449 assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming " 450 "values in the PHINode"); 451 PN->replaceAllUsesWith(CurrentReload); 452 PN->eraseFromParent(); 453 continue; 454 } 455 456 // Replace all uses of CurrentValue in the current instruction with reload. 457 E.user()->replaceUsesOfWith(CurrentValue, CurrentReload); 458 } 459 460 BasicBlock *FramePtrBB = FramePtr->getParent(); 461 Shape.AllocaSpillBlock = 462 FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB"); 463 Shape.AllocaSpillBlock->splitBasicBlock(&Shape.AllocaSpillBlock->front(), 464 "PostSpill"); 465 466 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front()); 467 // If we found any allocas, replace all of their remaining uses with Geps. 468 for (auto &P : Allocas) { 469 auto *G = 470 Builder.CreateConstInBoundsGEP2_32(FrameTy, FramePtr, 0, P.second); 471 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G)) here, 472 // as we are changing location of the instruction. 473 G->takeName(P.first); 474 P.first->replaceAllUsesWith(G); 475 P.first->eraseFromParent(); 476 } 477 return FramePtr; 478 } 479 480 static void rewritePHIs(BasicBlock &BB) { 481 // For every incoming edge we will create a block holding all 482 // incoming values in a single PHI nodes. 483 // 484 // loop: 485 // %n.val = phi i32[%n, %entry], [%inc, %loop] 486 // 487 // It will create: 488 // 489 // loop.from.entry: 490 // %n.loop.pre = phi i32 [%n, %entry] 491 // br %label loop 492 // loop.from.loop: 493 // %inc.loop.pre = phi i32 [%inc, %loop] 494 // br %label loop 495 // 496 // After this rewrite, further analysis will ignore any phi nodes with more 497 // than one incoming edge. 498 499 // TODO: Simplify PHINodes in the basic block to remove duplicate 500 // predecessors. 501 502 SmallVector<BasicBlock *, 8> Preds(pred_begin(&BB), pred_end(&BB)); 503 for (BasicBlock *Pred : Preds) { 504 auto *IncomingBB = SplitEdge(Pred, &BB); 505 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName()); 506 auto *PN = cast<PHINode>(&BB.front()); 507 do { 508 int Index = PN->getBasicBlockIndex(IncomingBB); 509 Value *V = PN->getIncomingValue(Index); 510 PHINode *InputV = PHINode::Create( 511 V->getType(), 1, V->getName() + Twine(".") + BB.getName(), 512 &IncomingBB->front()); 513 InputV->addIncoming(V, Pred); 514 PN->setIncomingValue(Index, InputV); 515 PN = dyn_cast<PHINode>(PN->getNextNode()); 516 } while (PN); 517 } 518 } 519 520 static void rewritePHIs(Function &F) { 521 SmallVector<BasicBlock *, 8> WorkList; 522 523 for (BasicBlock &BB : F) 524 if (auto *PN = dyn_cast<PHINode>(&BB.front())) 525 if (PN->getNumIncomingValues() > 1) 526 WorkList.push_back(&BB); 527 528 for (BasicBlock *BB : WorkList) 529 rewritePHIs(*BB); 530 } 531 532 // Check for instructions that we can recreate on resume as opposed to spill 533 // the result into a coroutine frame. 534 static bool materializable(Instruction &V) { 535 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) || 536 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V); 537 } 538 539 // Check for structural coroutine intrinsics that should not be spilled into 540 // the coroutine frame. 541 static bool isCoroutineStructureIntrinsic(Instruction &I) { 542 return isa<CoroIdInst>(&I) || isa<CoroBeginInst>(&I) || 543 isa<CoroSaveInst>(&I) || isa<CoroSuspendInst>(&I); 544 } 545 546 // For every use of the value that is across suspend point, recreate that value 547 // after a suspend point. 548 static void rewriteMaterializableInstructions(IRBuilder<> &IRB, 549 SpillInfo const &Spills) { 550 BasicBlock *CurrentBlock = nullptr; 551 Instruction *CurrentMaterialization = nullptr; 552 Instruction *CurrentDef = nullptr; 553 554 for (auto const &E : Spills) { 555 // If it is a new definition, update CurrentXXX variables. 556 if (CurrentDef != E.def()) { 557 CurrentDef = cast<Instruction>(E.def()); 558 CurrentBlock = nullptr; 559 CurrentMaterialization = nullptr; 560 } 561 562 // If we have not seen this block, materialize the value. 563 if (CurrentBlock != E.userBlock()) { 564 CurrentBlock = E.userBlock(); 565 CurrentMaterialization = cast<Instruction>(CurrentDef)->clone(); 566 CurrentMaterialization->setName(CurrentDef->getName()); 567 CurrentMaterialization->insertBefore( 568 &*CurrentBlock->getFirstInsertionPt()); 569 } 570 571 if (auto *PN = dyn_cast<PHINode>(E.user())) { 572 assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming " 573 "values in the PHINode"); 574 PN->replaceAllUsesWith(CurrentMaterialization); 575 PN->eraseFromParent(); 576 continue; 577 } 578 579 // Replace all uses of CurrentDef in the current instruction with the 580 // CurrentMaterialization for the block. 581 E.user()->replaceUsesOfWith(CurrentDef, CurrentMaterialization); 582 } 583 } 584 585 // Move early uses of spilled variable after CoroBegin. 586 // For example, if a parameter had address taken, we may end up with the code 587 // like: 588 // define @f(i32 %n) { 589 // %n.addr = alloca i32 590 // store %n, %n.addr 591 // ... 592 // call @coro.begin 593 // we need to move the store after coro.begin 594 static void moveSpillUsesAfterCoroBegin(Function &F, SpillInfo const &Spills, 595 CoroBeginInst *CoroBegin) { 596 DominatorTree DT(F); 597 SmallVector<Instruction *, 8> NeedsMoving; 598 599 Value *CurrentValue = nullptr; 600 601 for (auto const &E : Spills) { 602 if (CurrentValue == E.def()) 603 continue; 604 605 CurrentValue = E.def(); 606 607 for (User *U : CurrentValue->users()) { 608 Instruction *I = cast<Instruction>(U); 609 if (!DT.dominates(CoroBegin, I)) { 610 // TODO: Make this more robust. Currently if we run into a situation 611 // where simple instruction move won't work we panic and 612 // report_fatal_error. 613 for (User *UI : I->users()) { 614 if (!DT.dominates(CoroBegin, cast<Instruction>(UI))) 615 report_fatal_error("cannot move instruction since its users are not" 616 " dominated by CoroBegin"); 617 } 618 619 DEBUG(dbgs() << "will move: " << *I << "\n"); 620 NeedsMoving.push_back(I); 621 } 622 } 623 } 624 625 Instruction *InsertPt = CoroBegin->getNextNode(); 626 for (Instruction *I : NeedsMoving) 627 I->moveBefore(InsertPt); 628 } 629 630 // Splits the block at a particular instruction unless it is the first 631 // instruction in the block with a single predecessor. 632 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) { 633 auto *BB = I->getParent(); 634 if (&BB->front() == I) { 635 if (BB->getSinglePredecessor()) { 636 BB->setName(Name); 637 return BB; 638 } 639 } 640 return BB->splitBasicBlock(I, Name); 641 } 642 643 // Split above and below a particular instruction so that it 644 // will be all alone by itself in a block. 645 static void splitAround(Instruction *I, const Twine &Name) { 646 splitBlockIfNotFirst(I, Name); 647 splitBlockIfNotFirst(I->getNextNode(), "After" + Name); 648 } 649 650 void coro::buildCoroutineFrame(Function &F, Shape &Shape) { 651 // Lower coro.dbg.declare to coro.dbg.value, since we are going to rewrite 652 // access to local variables. 653 LowerDbgDeclare(F); 654 655 Shape.PromiseAlloca = Shape.CoroBegin->getId()->getPromise(); 656 if (Shape.PromiseAlloca) { 657 Shape.CoroBegin->getId()->clearPromise(); 658 } 659 660 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end 661 // intrinsics are in their own blocks to simplify the logic of building up 662 // SuspendCrossing data. 663 for (CoroSuspendInst *CSI : Shape.CoroSuspends) { 664 splitAround(CSI->getCoroSave(), "CoroSave"); 665 splitAround(CSI, "CoroSuspend"); 666 } 667 668 // Put fallthrough CoroEnd into its own block. Note: Shape::buildFrom places 669 // the fallthrough coro.end as the first element of CoroEnds array. 670 splitAround(Shape.CoroEnds.front(), "CoroEnd"); 671 672 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will 673 // never has its definition separated from the PHI by the suspend point. 674 rewritePHIs(F); 675 676 // Build suspend crossing info. 677 SuspendCrossingInfo Checker(F, Shape); 678 679 IRBuilder<> Builder(F.getContext()); 680 SpillInfo Spills; 681 682 // See if there are materializable instructions across suspend points. 683 for (Instruction &I : instructions(F)) 684 if (materializable(I)) 685 for (User *U : I.users()) 686 if (Checker.isDefinitionAcrossSuspend(I, U)) 687 Spills.emplace_back(&I, U); 688 689 // Rewrite materializable instructions to be materialized at the use point. 690 std::sort(Spills.begin(), Spills.end()); 691 DEBUG(dump("Materializations", Spills)); 692 rewriteMaterializableInstructions(Builder, Spills); 693 694 // Collect the spills for arguments and other not-materializable values. 695 Spills.clear(); 696 for (Argument &A : F.getArgumentList()) 697 for (User *U : A.users()) 698 if (Checker.isDefinitionAcrossSuspend(A, U)) 699 Spills.emplace_back(&A, U); 700 701 for (Instruction &I : instructions(F)) { 702 // Values returned from coroutine structure intrinsics should not be part 703 // of the Coroutine Frame. 704 if (isCoroutineStructureIntrinsic(I)) 705 continue; 706 // The Coroutine Promise always included into coroutine frame, no need to 707 // check for suspend crossing. 708 if (Shape.PromiseAlloca == &I) 709 continue; 710 711 for (User *U : I.users()) 712 if (Checker.isDefinitionAcrossSuspend(I, U)) { 713 // We cannot spill a token. 714 if (I.getType()->isTokenTy()) 715 report_fatal_error( 716 "token definition is separated from the use by a suspend point"); 717 assert(!materializable(I) && 718 "rewriteMaterializable did not do its job"); 719 Spills.emplace_back(&I, U); 720 } 721 } 722 std::sort(Spills.begin(), Spills.end()); 723 DEBUG(dump("Spills", Spills)); 724 moveSpillUsesAfterCoroBegin(F, Spills, Shape.CoroBegin); 725 Shape.FrameTy = buildFrameType(F, Shape, Spills); 726 Shape.FramePtr = insertSpills(Spills, Shape); 727 } 728