1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This file contains classes used to discover if for a particular value 9 // there from sue to definition that crosses a suspend block. 10 // 11 // Using the information discovered we form a Coroutine Frame structure to 12 // contain those values. All uses of those values are replaced with appropriate 13 // GEP + load from the coroutine frame. At the point of the definition we spill 14 // the value into the coroutine frame. 15 // 16 // TODO: pack values tightly using liveness info. 17 //===----------------------------------------------------------------------===// 18 19 #include "CoroInternal.h" 20 #include "llvm/ADT/BitVector.h" 21 #include "llvm/Transforms/Utils/Local.h" 22 #include "llvm/Config/llvm-config.h" 23 #include "llvm/IR/CFG.h" 24 #include "llvm/IR/Dominators.h" 25 #include "llvm/IR/IRBuilder.h" 26 #include "llvm/IR/InstIterator.h" 27 #include "llvm/Support/Debug.h" 28 #include "llvm/Support/MathExtras.h" 29 #include "llvm/Support/circular_raw_ostream.h" 30 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 31 32 using namespace llvm; 33 34 // The "coro-suspend-crossing" flag is very noisy. There is another debug type, 35 // "coro-frame", which results in leaner debug spew. 36 #define DEBUG_TYPE "coro-suspend-crossing" 37 38 enum { SmallVectorThreshold = 32 }; 39 40 // Provides two way mapping between the blocks and numbers. 41 namespace { 42 class BlockToIndexMapping { 43 SmallVector<BasicBlock *, SmallVectorThreshold> V; 44 45 public: 46 size_t size() const { return V.size(); } 47 48 BlockToIndexMapping(Function &F) { 49 for (BasicBlock &BB : F) 50 V.push_back(&BB); 51 llvm::sort(V); 52 } 53 54 size_t blockToIndex(BasicBlock *BB) const { 55 auto *I = std::lower_bound(V.begin(), V.end(), BB); 56 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block"); 57 return I - V.begin(); 58 } 59 60 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; } 61 }; 62 } // end anonymous namespace 63 64 // The SuspendCrossingInfo maintains data that allows to answer a question 65 // whether given two BasicBlocks A and B there is a path from A to B that 66 // passes through a suspend point. 67 // 68 // For every basic block 'i' it maintains a BlockData that consists of: 69 // Consumes: a bit vector which contains a set of indices of blocks that can 70 // reach block 'i' 71 // Kills: a bit vector which contains a set of indices of blocks that can 72 // reach block 'i', but one of the path will cross a suspend point 73 // Suspend: a boolean indicating whether block 'i' contains a suspend point. 74 // End: a boolean indicating whether block 'i' contains a coro.end intrinsic. 75 // 76 namespace { 77 struct SuspendCrossingInfo { 78 BlockToIndexMapping Mapping; 79 80 struct BlockData { 81 BitVector Consumes; 82 BitVector Kills; 83 bool Suspend = false; 84 bool End = false; 85 }; 86 SmallVector<BlockData, SmallVectorThreshold> Block; 87 88 iterator_range<succ_iterator> successors(BlockData const &BD) const { 89 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]); 90 return llvm::successors(BB); 91 } 92 93 BlockData &getBlockData(BasicBlock *BB) { 94 return Block[Mapping.blockToIndex(BB)]; 95 } 96 97 void dump() const; 98 void dump(StringRef Label, BitVector const &BV) const; 99 100 SuspendCrossingInfo(Function &F, coro::Shape &Shape); 101 102 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const { 103 size_t const DefIndex = Mapping.blockToIndex(DefBB); 104 size_t const UseIndex = Mapping.blockToIndex(UseBB); 105 106 assert(Block[UseIndex].Consumes[DefIndex] && "use must consume def"); 107 bool const Result = Block[UseIndex].Kills[DefIndex]; 108 LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName() 109 << " answer is " << Result << "\n"); 110 return Result; 111 } 112 113 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const { 114 auto *I = cast<Instruction>(U); 115 116 // We rewrote PHINodes, so that only the ones with exactly one incoming 117 // value need to be analyzed. 118 if (auto *PN = dyn_cast<PHINode>(I)) 119 if (PN->getNumIncomingValues() > 1) 120 return false; 121 122 BasicBlock *UseBB = I->getParent(); 123 return hasPathCrossingSuspendPoint(DefBB, UseBB); 124 } 125 126 bool isDefinitionAcrossSuspend(Argument &A, User *U) const { 127 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U); 128 } 129 130 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const { 131 return isDefinitionAcrossSuspend(I.getParent(), U); 132 } 133 }; 134 } // end anonymous namespace 135 136 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 137 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label, 138 BitVector const &BV) const { 139 dbgs() << Label << ":"; 140 for (size_t I = 0, N = BV.size(); I < N; ++I) 141 if (BV[I]) 142 dbgs() << " " << Mapping.indexToBlock(I)->getName(); 143 dbgs() << "\n"; 144 } 145 146 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const { 147 for (size_t I = 0, N = Block.size(); I < N; ++I) { 148 BasicBlock *const B = Mapping.indexToBlock(I); 149 dbgs() << B->getName() << ":\n"; 150 dump(" Consumes", Block[I].Consumes); 151 dump(" Kills", Block[I].Kills); 152 } 153 dbgs() << "\n"; 154 } 155 #endif 156 157 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape) 158 : Mapping(F) { 159 const size_t N = Mapping.size(); 160 Block.resize(N); 161 162 // Initialize every block so that it consumes itself 163 for (size_t I = 0; I < N; ++I) { 164 auto &B = Block[I]; 165 B.Consumes.resize(N); 166 B.Kills.resize(N); 167 B.Consumes.set(I); 168 } 169 170 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as 171 // the code beyond coro.end is reachable during initial invocation of the 172 // coroutine. 173 for (auto *CE : Shape.CoroEnds) 174 getBlockData(CE->getParent()).End = true; 175 176 // Mark all suspend blocks and indicate that they kill everything they 177 // consume. Note, that crossing coro.save also requires a spill, as any code 178 // between coro.save and coro.suspend may resume the coroutine and all of the 179 // state needs to be saved by that time. 180 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) { 181 BasicBlock *SuspendBlock = BarrierInst->getParent(); 182 auto &B = getBlockData(SuspendBlock); 183 B.Suspend = true; 184 B.Kills |= B.Consumes; 185 }; 186 for (CoroSuspendInst *CSI : Shape.CoroSuspends) { 187 markSuspendBlock(CSI); 188 markSuspendBlock(CSI->getCoroSave()); 189 } 190 191 // Iterate propagating consumes and kills until they stop changing. 192 int Iteration = 0; 193 (void)Iteration; 194 195 bool Changed; 196 do { 197 LLVM_DEBUG(dbgs() << "iteration " << ++Iteration); 198 LLVM_DEBUG(dbgs() << "==============\n"); 199 200 Changed = false; 201 for (size_t I = 0; I < N; ++I) { 202 auto &B = Block[I]; 203 for (BasicBlock *SI : successors(B)) { 204 205 auto SuccNo = Mapping.blockToIndex(SI); 206 207 // Saved Consumes and Kills bitsets so that it is easy to see 208 // if anything changed after propagation. 209 auto &S = Block[SuccNo]; 210 auto SavedConsumes = S.Consumes; 211 auto SavedKills = S.Kills; 212 213 // Propagate Kills and Consumes from block B into its successor S. 214 S.Consumes |= B.Consumes; 215 S.Kills |= B.Kills; 216 217 // If block B is a suspend block, it should propagate kills into the 218 // its successor for every block B consumes. 219 if (B.Suspend) { 220 S.Kills |= B.Consumes; 221 } 222 if (S.Suspend) { 223 // If block S is a suspend block, it should kill all of the blocks it 224 // consumes. 225 S.Kills |= S.Consumes; 226 } else if (S.End) { 227 // If block S is an end block, it should not propagate kills as the 228 // blocks following coro.end() are reached during initial invocation 229 // of the coroutine while all the data are still available on the 230 // stack or in the registers. 231 S.Kills.reset(); 232 } else { 233 // This is reached when S block it not Suspend nor coro.end and it 234 // need to make sure that it is not in the kill set. 235 S.Kills.reset(SuccNo); 236 } 237 238 // See if anything changed. 239 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes); 240 241 if (S.Kills != SavedKills) { 242 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName() 243 << "\n"); 244 LLVM_DEBUG(dump("S.Kills", S.Kills)); 245 LLVM_DEBUG(dump("SavedKills", SavedKills)); 246 } 247 if (S.Consumes != SavedConsumes) { 248 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n"); 249 LLVM_DEBUG(dump("S.Consume", S.Consumes)); 250 LLVM_DEBUG(dump("SavedCons", SavedConsumes)); 251 } 252 } 253 } 254 } while (Changed); 255 LLVM_DEBUG(dump()); 256 } 257 258 #undef DEBUG_TYPE // "coro-suspend-crossing" 259 #define DEBUG_TYPE "coro-frame" 260 261 // We build up the list of spills for every case where a use is separated 262 // from the definition by a suspend point. 263 264 namespace { 265 class Spill { 266 Value *Def = nullptr; 267 Instruction *User = nullptr; 268 unsigned FieldNo = 0; 269 270 public: 271 Spill(Value *Def, llvm::User *U) : Def(Def), User(cast<Instruction>(U)) {} 272 273 Value *def() const { return Def; } 274 Instruction *user() const { return User; } 275 BasicBlock *userBlock() const { return User->getParent(); } 276 277 // Note that field index is stored in the first SpillEntry for a particular 278 // definition. Subsequent mentions of a defintion do not have fieldNo 279 // assigned. This works out fine as the users of Spills capture the info about 280 // the definition the first time they encounter it. Consider refactoring 281 // SpillInfo into two arrays to normalize the spill representation. 282 unsigned fieldIndex() const { 283 assert(FieldNo && "Accessing unassigned field"); 284 return FieldNo; 285 } 286 void setFieldIndex(unsigned FieldNumber) { 287 assert(!FieldNo && "Reassigning field number"); 288 FieldNo = FieldNumber; 289 } 290 }; 291 } // namespace 292 293 // Note that there may be more than one record with the same value of Def in 294 // the SpillInfo vector. 295 using SpillInfo = SmallVector<Spill, 8>; 296 297 #ifndef NDEBUG 298 static void dump(StringRef Title, SpillInfo const &Spills) { 299 dbgs() << "------------- " << Title << "--------------\n"; 300 Value *CurrentValue = nullptr; 301 for (auto const &E : Spills) { 302 if (CurrentValue != E.def()) { 303 CurrentValue = E.def(); 304 CurrentValue->dump(); 305 } 306 dbgs() << " user: "; 307 E.user()->dump(); 308 } 309 } 310 #endif 311 312 namespace { 313 // We cannot rely solely on natural alignment of a type when building a 314 // coroutine frame and if the alignment specified on the Alloca instruction 315 // differs from the natural alignment of the alloca type we will need to insert 316 // padding. 317 struct PaddingCalculator { 318 const DataLayout &DL; 319 LLVMContext &Context; 320 unsigned StructSize = 0; 321 322 PaddingCalculator(LLVMContext &Context, DataLayout const &DL) 323 : DL(DL), Context(Context) {} 324 325 // Replicate the logic from IR/DataLayout.cpp to match field offset 326 // computation for LLVM structs. 327 void addType(Type *Ty) { 328 unsigned TyAlign = DL.getABITypeAlignment(Ty); 329 if ((StructSize & (TyAlign - 1)) != 0) 330 StructSize = alignTo(StructSize, TyAlign); 331 332 StructSize += DL.getTypeAllocSize(Ty); // Consume space for this data item. 333 } 334 335 void addTypes(SmallVectorImpl<Type *> const &Types) { 336 for (auto *Ty : Types) 337 addType(Ty); 338 } 339 340 unsigned computePadding(Type *Ty, unsigned ForcedAlignment) { 341 unsigned TyAlign = DL.getABITypeAlignment(Ty); 342 auto Natural = alignTo(StructSize, TyAlign); 343 auto Forced = alignTo(StructSize, ForcedAlignment); 344 345 // Return how many bytes of padding we need to insert. 346 if (Natural != Forced) 347 return std::max(Natural, Forced) - StructSize; 348 349 // Rely on natural alignment. 350 return 0; 351 } 352 353 // If padding required, return the padding field type to insert. 354 ArrayType *getPaddingType(Type *Ty, unsigned ForcedAlignment) { 355 if (auto Padding = computePadding(Ty, ForcedAlignment)) 356 return ArrayType::get(Type::getInt8Ty(Context), Padding); 357 358 return nullptr; 359 } 360 }; 361 } // namespace 362 363 // Build a struct that will keep state for an active coroutine. 364 // struct f.frame { 365 // ResumeFnTy ResumeFnAddr; 366 // ResumeFnTy DestroyFnAddr; 367 // int ResumeIndex; 368 // ... promise (if present) ... 369 // ... spills ... 370 // }; 371 static StructType *buildFrameType(Function &F, coro::Shape &Shape, 372 SpillInfo &Spills) { 373 LLVMContext &C = F.getContext(); 374 const DataLayout &DL = F.getParent()->getDataLayout(); 375 PaddingCalculator Padder(C, DL); 376 SmallString<32> Name(F.getName()); 377 Name.append(".Frame"); 378 StructType *FrameTy = StructType::create(C, Name); 379 auto *FramePtrTy = FrameTy->getPointerTo(); 380 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy, 381 /*IsVarArgs=*/false); 382 auto *FnPtrTy = FnTy->getPointerTo(); 383 384 // Figure out how wide should be an integer type storing the suspend index. 385 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size())); 386 Type *PromiseType = Shape.PromiseAlloca 387 ? Shape.PromiseAlloca->getType()->getElementType() 388 : Type::getInt1Ty(C); 389 SmallVector<Type *, 8> Types{FnPtrTy, FnPtrTy, PromiseType, 390 Type::getIntNTy(C, IndexBits)}; 391 Value *CurrentDef = nullptr; 392 393 Padder.addTypes(Types); 394 395 // Create an entry for every spilled value. 396 for (auto &S : Spills) { 397 if (CurrentDef == S.def()) 398 continue; 399 400 CurrentDef = S.def(); 401 // PromiseAlloca was already added to Types array earlier. 402 if (CurrentDef == Shape.PromiseAlloca) 403 continue; 404 405 uint64_t Count = 1; 406 Type *Ty = nullptr; 407 if (auto *AI = dyn_cast<AllocaInst>(CurrentDef)) { 408 Ty = AI->getAllocatedType(); 409 if (unsigned AllocaAlignment = AI->getAlignment()) { 410 // If alignment is specified in alloca, see if we need to insert extra 411 // padding. 412 if (auto PaddingTy = Padder.getPaddingType(Ty, AllocaAlignment)) { 413 Types.push_back(PaddingTy); 414 Padder.addType(PaddingTy); 415 } 416 } 417 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 418 Count = CI->getValue().getZExtValue(); 419 else 420 report_fatal_error("Coroutines cannot handle non static allocas yet"); 421 } else { 422 Ty = CurrentDef->getType(); 423 } 424 S.setFieldIndex(Types.size()); 425 if (Count == 1) 426 Types.push_back(Ty); 427 else 428 Types.push_back(ArrayType::get(Ty, Count)); 429 Padder.addType(Ty); 430 } 431 FrameTy->setBody(Types); 432 433 return FrameTy; 434 } 435 436 // We need to make room to insert a spill after initial PHIs, but before 437 // catchswitch instruction. Placing it before violates the requirement that 438 // catchswitch, like all other EHPads must be the first nonPHI in a block. 439 // 440 // Split away catchswitch into a separate block and insert in its place: 441 // 442 // cleanuppad <InsertPt> cleanupret. 443 // 444 // cleanupret instruction will act as an insert point for the spill. 445 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) { 446 BasicBlock *CurrentBlock = CatchSwitch->getParent(); 447 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch); 448 CurrentBlock->getTerminator()->eraseFromParent(); 449 450 auto *CleanupPad = 451 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock); 452 auto *CleanupRet = 453 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock); 454 return CleanupRet; 455 } 456 457 // Replace all alloca and SSA values that are accessed across suspend points 458 // with GetElementPointer from coroutine frame + loads and stores. Create an 459 // AllocaSpillBB that will become the new entry block for the resume parts of 460 // the coroutine: 461 // 462 // %hdl = coro.begin(...) 463 // whatever 464 // 465 // becomes: 466 // 467 // %hdl = coro.begin(...) 468 // %FramePtr = bitcast i8* hdl to %f.frame* 469 // br label %AllocaSpillBB 470 // 471 // AllocaSpillBB: 472 // ; geps corresponding to allocas that were moved to coroutine frame 473 // br label PostSpill 474 // 475 // PostSpill: 476 // whatever 477 // 478 // 479 static Instruction *insertSpills(SpillInfo &Spills, coro::Shape &Shape) { 480 auto *CB = Shape.CoroBegin; 481 LLVMContext &C = CB->getContext(); 482 IRBuilder<> Builder(CB->getNextNode()); 483 StructType *FrameTy = Shape.FrameTy; 484 PointerType *FramePtrTy = FrameTy->getPointerTo(); 485 auto *FramePtr = 486 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr")); 487 488 Value *CurrentValue = nullptr; 489 BasicBlock *CurrentBlock = nullptr; 490 Value *CurrentReload = nullptr; 491 unsigned Index = 0; // Proper field number will be read from field definition. 492 493 // We need to keep track of any allocas that need "spilling" 494 // since they will live in the coroutine frame now, all access to them 495 // need to be changed, not just the access across suspend points 496 // we remember allocas and their indices to be handled once we processed 497 // all the spills. 498 SmallVector<std::pair<AllocaInst *, unsigned>, 4> Allocas; 499 // Promise alloca (if present) has a fixed field number (Shape::PromiseField) 500 if (Shape.PromiseAlloca) 501 Allocas.emplace_back(Shape.PromiseAlloca, coro::Shape::PromiseField); 502 503 // Create a GEP with the given index into the coroutine frame for the original 504 // value Orig. Appends an extra 0 index for array-allocas, preserving the 505 // original type. 506 auto GetFramePointer = [&](uint32_t Index, Value *Orig) -> Value * { 507 SmallVector<Value *, 3> Indices = { 508 ConstantInt::get(Type::getInt32Ty(C), 0), 509 ConstantInt::get(Type::getInt32Ty(C), Index), 510 }; 511 512 if (auto *AI = dyn_cast<AllocaInst>(Orig)) { 513 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) { 514 auto Count = CI->getValue().getZExtValue(); 515 if (Count > 1) { 516 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0)); 517 } 518 } else { 519 report_fatal_error("Coroutines cannot handle non static allocas yet"); 520 } 521 } 522 523 return Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices); 524 }; 525 526 // Create a load instruction to reload the spilled value from the coroutine 527 // frame. 528 auto CreateReload = [&](Instruction *InsertBefore) { 529 assert(Index && "accessing unassigned field number"); 530 Builder.SetInsertPoint(InsertBefore); 531 532 auto *G = GetFramePointer(Index, CurrentValue); 533 G->setName(CurrentValue->getName() + Twine(".reload.addr")); 534 535 return isa<AllocaInst>(CurrentValue) 536 ? G 537 : Builder.CreateLoad(FrameTy->getElementType(Index), G, 538 CurrentValue->getName() + Twine(".reload")); 539 }; 540 541 for (auto const &E : Spills) { 542 // If we have not seen the value, generate a spill. 543 if (CurrentValue != E.def()) { 544 CurrentValue = E.def(); 545 CurrentBlock = nullptr; 546 CurrentReload = nullptr; 547 548 Index = E.fieldIndex(); 549 550 if (auto *AI = dyn_cast<AllocaInst>(CurrentValue)) { 551 // Spilled AllocaInst will be replaced with GEP from the coroutine frame 552 // there is no spill required. 553 Allocas.emplace_back(AI, Index); 554 if (!AI->isStaticAlloca()) 555 report_fatal_error("Coroutines cannot handle non static allocas yet"); 556 } else { 557 // Otherwise, create a store instruction storing the value into the 558 // coroutine frame. 559 560 Instruction *InsertPt = nullptr; 561 if (isa<Argument>(CurrentValue)) { 562 // For arguments, we will place the store instruction right after 563 // the coroutine frame pointer instruction, i.e. bitcast of 564 // coro.begin from i8* to %f.frame*. 565 InsertPt = FramePtr->getNextNode(); 566 } else if (auto *II = dyn_cast<InvokeInst>(CurrentValue)) { 567 // If we are spilling the result of the invoke instruction, split the 568 // normal edge and insert the spill in the new block. 569 auto NewBB = SplitEdge(II->getParent(), II->getNormalDest()); 570 InsertPt = NewBB->getTerminator(); 571 } else if (dyn_cast<PHINode>(CurrentValue)) { 572 // Skip the PHINodes and EH pads instructions. 573 BasicBlock *DefBlock = cast<Instruction>(E.def())->getParent(); 574 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator())) 575 InsertPt = splitBeforeCatchSwitch(CSI); 576 else 577 InsertPt = &*DefBlock->getFirstInsertionPt(); 578 } else { 579 // For all other values, the spill is placed immediately after 580 // the definition. 581 assert(!cast<Instruction>(E.def())->isTerminator() && 582 "unexpected terminator"); 583 InsertPt = cast<Instruction>(E.def())->getNextNode(); 584 } 585 586 Builder.SetInsertPoint(InsertPt); 587 auto *G = Builder.CreateConstInBoundsGEP2_32( 588 FrameTy, FramePtr, 0, Index, 589 CurrentValue->getName() + Twine(".spill.addr")); 590 Builder.CreateStore(CurrentValue, G); 591 } 592 } 593 594 // If we have not seen the use block, generate a reload in it. 595 if (CurrentBlock != E.userBlock()) { 596 CurrentBlock = E.userBlock(); 597 CurrentReload = CreateReload(&*CurrentBlock->getFirstInsertionPt()); 598 } 599 600 // If we have a single edge PHINode, remove it and replace it with a reload 601 // from the coroutine frame. (We already took care of multi edge PHINodes 602 // by rewriting them in the rewritePHIs function). 603 if (auto *PN = dyn_cast<PHINode>(E.user())) { 604 assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming " 605 "values in the PHINode"); 606 PN->replaceAllUsesWith(CurrentReload); 607 PN->eraseFromParent(); 608 continue; 609 } 610 611 // Replace all uses of CurrentValue in the current instruction with reload. 612 E.user()->replaceUsesOfWith(CurrentValue, CurrentReload); 613 } 614 615 BasicBlock *FramePtrBB = FramePtr->getParent(); 616 Shape.AllocaSpillBlock = 617 FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB"); 618 Shape.AllocaSpillBlock->splitBasicBlock(&Shape.AllocaSpillBlock->front(), 619 "PostSpill"); 620 621 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front()); 622 // If we found any allocas, replace all of their remaining uses with Geps. 623 for (auto &P : Allocas) { 624 auto *G = GetFramePointer(P.second, P.first); 625 626 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G)) here, 627 // as we are changing location of the instruction. 628 G->takeName(P.first); 629 P.first->replaceAllUsesWith(G); 630 P.first->eraseFromParent(); 631 } 632 return FramePtr; 633 } 634 635 // Sets the unwind edge of an instruction to a particular successor. 636 static void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ) { 637 if (auto *II = dyn_cast<InvokeInst>(TI)) 638 II->setUnwindDest(Succ); 639 else if (auto *CS = dyn_cast<CatchSwitchInst>(TI)) 640 CS->setUnwindDest(Succ); 641 else if (auto *CR = dyn_cast<CleanupReturnInst>(TI)) 642 CR->setUnwindDest(Succ); 643 else 644 llvm_unreachable("unexpected terminator instruction"); 645 } 646 647 // Replaces all uses of OldPred with the NewPred block in all PHINodes in a 648 // block. 649 static void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, 650 BasicBlock *NewPred, 651 PHINode *LandingPadReplacement) { 652 unsigned BBIdx = 0; 653 for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) { 654 PHINode *PN = cast<PHINode>(I); 655 656 // We manually update the LandingPadReplacement PHINode and it is the last 657 // PHI Node. So, if we find it, we are done. 658 if (LandingPadReplacement == PN) 659 break; 660 661 // Reuse the previous value of BBIdx if it lines up. In cases where we 662 // have multiple phi nodes with *lots* of predecessors, this is a speed 663 // win because we don't have to scan the PHI looking for TIBB. This 664 // happens because the BB list of PHI nodes are usually in the same 665 // order. 666 if (PN->getIncomingBlock(BBIdx) != OldPred) 667 BBIdx = PN->getBasicBlockIndex(OldPred); 668 669 assert(BBIdx != (unsigned)-1 && "Invalid PHI Index!"); 670 PN->setIncomingBlock(BBIdx, NewPred); 671 } 672 } 673 674 // Uses SplitEdge unless the successor block is an EHPad, in which case do EH 675 // specific handling. 676 static BasicBlock *ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, 677 LandingPadInst *OriginalPad, 678 PHINode *LandingPadReplacement) { 679 auto *PadInst = Succ->getFirstNonPHI(); 680 if (!LandingPadReplacement && !PadInst->isEHPad()) 681 return SplitEdge(BB, Succ); 682 683 auto *NewBB = BasicBlock::Create(BB->getContext(), "", BB->getParent(), Succ); 684 setUnwindEdgeTo(BB->getTerminator(), NewBB); 685 updatePhiNodes(Succ, BB, NewBB, LandingPadReplacement); 686 687 if (LandingPadReplacement) { 688 auto *NewLP = OriginalPad->clone(); 689 auto *Terminator = BranchInst::Create(Succ, NewBB); 690 NewLP->insertBefore(Terminator); 691 LandingPadReplacement->addIncoming(NewLP, NewBB); 692 return NewBB; 693 } 694 Value *ParentPad = nullptr; 695 if (auto *FuncletPad = dyn_cast<FuncletPadInst>(PadInst)) 696 ParentPad = FuncletPad->getParentPad(); 697 else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(PadInst)) 698 ParentPad = CatchSwitch->getParentPad(); 699 else 700 llvm_unreachable("handling for other EHPads not implemented yet"); 701 702 auto *NewCleanupPad = CleanupPadInst::Create(ParentPad, {}, "", NewBB); 703 CleanupReturnInst::Create(NewCleanupPad, Succ, NewBB); 704 return NewBB; 705 } 706 707 static void rewritePHIs(BasicBlock &BB) { 708 // For every incoming edge we will create a block holding all 709 // incoming values in a single PHI nodes. 710 // 711 // loop: 712 // %n.val = phi i32[%n, %entry], [%inc, %loop] 713 // 714 // It will create: 715 // 716 // loop.from.entry: 717 // %n.loop.pre = phi i32 [%n, %entry] 718 // br %label loop 719 // loop.from.loop: 720 // %inc.loop.pre = phi i32 [%inc, %loop] 721 // br %label loop 722 // 723 // After this rewrite, further analysis will ignore any phi nodes with more 724 // than one incoming edge. 725 726 // TODO: Simplify PHINodes in the basic block to remove duplicate 727 // predecessors. 728 729 LandingPadInst *LandingPad = nullptr; 730 PHINode *ReplPHI = nullptr; 731 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) { 732 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks. 733 // We replace the original landing pad with a PHINode that will collect the 734 // results from all of them. 735 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad); 736 ReplPHI->takeName(LandingPad); 737 LandingPad->replaceAllUsesWith(ReplPHI); 738 // We will erase the original landing pad at the end of this function after 739 // ehAwareSplitEdge cloned it in the transition blocks. 740 } 741 742 SmallVector<BasicBlock *, 8> Preds(pred_begin(&BB), pred_end(&BB)); 743 for (BasicBlock *Pred : Preds) { 744 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI); 745 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName()); 746 auto *PN = cast<PHINode>(&BB.front()); 747 do { 748 int Index = PN->getBasicBlockIndex(IncomingBB); 749 Value *V = PN->getIncomingValue(Index); 750 PHINode *InputV = PHINode::Create( 751 V->getType(), 1, V->getName() + Twine(".") + BB.getName(), 752 &IncomingBB->front()); 753 InputV->addIncoming(V, Pred); 754 PN->setIncomingValue(Index, InputV); 755 PN = dyn_cast<PHINode>(PN->getNextNode()); 756 } while (PN != ReplPHI); // ReplPHI is either null or the PHI that replaced 757 // the landing pad. 758 } 759 760 if (LandingPad) { 761 // Calls to ehAwareSplitEdge function cloned the original lading pad. 762 // No longer need it. 763 LandingPad->eraseFromParent(); 764 } 765 } 766 767 static void rewritePHIs(Function &F) { 768 SmallVector<BasicBlock *, 8> WorkList; 769 770 for (BasicBlock &BB : F) 771 if (auto *PN = dyn_cast<PHINode>(&BB.front())) 772 if (PN->getNumIncomingValues() > 1) 773 WorkList.push_back(&BB); 774 775 for (BasicBlock *BB : WorkList) 776 rewritePHIs(*BB); 777 } 778 779 // Check for instructions that we can recreate on resume as opposed to spill 780 // the result into a coroutine frame. 781 static bool materializable(Instruction &V) { 782 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) || 783 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V); 784 } 785 786 // Check for structural coroutine intrinsics that should not be spilled into 787 // the coroutine frame. 788 static bool isCoroutineStructureIntrinsic(Instruction &I) { 789 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) || 790 isa<CoroSuspendInst>(&I); 791 } 792 793 // For every use of the value that is across suspend point, recreate that value 794 // after a suspend point. 795 static void rewriteMaterializableInstructions(IRBuilder<> &IRB, 796 SpillInfo const &Spills) { 797 BasicBlock *CurrentBlock = nullptr; 798 Instruction *CurrentMaterialization = nullptr; 799 Instruction *CurrentDef = nullptr; 800 801 for (auto const &E : Spills) { 802 // If it is a new definition, update CurrentXXX variables. 803 if (CurrentDef != E.def()) { 804 CurrentDef = cast<Instruction>(E.def()); 805 CurrentBlock = nullptr; 806 CurrentMaterialization = nullptr; 807 } 808 809 // If we have not seen this block, materialize the value. 810 if (CurrentBlock != E.userBlock()) { 811 CurrentBlock = E.userBlock(); 812 CurrentMaterialization = cast<Instruction>(CurrentDef)->clone(); 813 CurrentMaterialization->setName(CurrentDef->getName()); 814 CurrentMaterialization->insertBefore( 815 &*CurrentBlock->getFirstInsertionPt()); 816 } 817 818 if (auto *PN = dyn_cast<PHINode>(E.user())) { 819 assert(PN->getNumIncomingValues() == 1 && "unexpected number of incoming " 820 "values in the PHINode"); 821 PN->replaceAllUsesWith(CurrentMaterialization); 822 PN->eraseFromParent(); 823 continue; 824 } 825 826 // Replace all uses of CurrentDef in the current instruction with the 827 // CurrentMaterialization for the block. 828 E.user()->replaceUsesOfWith(CurrentDef, CurrentMaterialization); 829 } 830 } 831 832 // Move early uses of spilled variable after CoroBegin. 833 // For example, if a parameter had address taken, we may end up with the code 834 // like: 835 // define @f(i32 %n) { 836 // %n.addr = alloca i32 837 // store %n, %n.addr 838 // ... 839 // call @coro.begin 840 // we need to move the store after coro.begin 841 static void moveSpillUsesAfterCoroBegin(Function &F, SpillInfo const &Spills, 842 CoroBeginInst *CoroBegin) { 843 DominatorTree DT(F); 844 SmallVector<Instruction *, 8> NeedsMoving; 845 846 Value *CurrentValue = nullptr; 847 848 for (auto const &E : Spills) { 849 if (CurrentValue == E.def()) 850 continue; 851 852 CurrentValue = E.def(); 853 854 for (User *U : CurrentValue->users()) { 855 Instruction *I = cast<Instruction>(U); 856 if (!DT.dominates(CoroBegin, I)) { 857 LLVM_DEBUG(dbgs() << "will move: " << *I << "\n"); 858 859 // TODO: Make this more robust. Currently if we run into a situation 860 // where simple instruction move won't work we panic and 861 // report_fatal_error. 862 for (User *UI : I->users()) { 863 if (!DT.dominates(CoroBegin, cast<Instruction>(UI))) 864 report_fatal_error("cannot move instruction since its users are not" 865 " dominated by CoroBegin"); 866 } 867 868 NeedsMoving.push_back(I); 869 } 870 } 871 } 872 873 Instruction *InsertPt = CoroBegin->getNextNode(); 874 for (Instruction *I : NeedsMoving) 875 I->moveBefore(InsertPt); 876 } 877 878 // Splits the block at a particular instruction unless it is the first 879 // instruction in the block with a single predecessor. 880 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) { 881 auto *BB = I->getParent(); 882 if (&BB->front() == I) { 883 if (BB->getSinglePredecessor()) { 884 BB->setName(Name); 885 return BB; 886 } 887 } 888 return BB->splitBasicBlock(I, Name); 889 } 890 891 // Split above and below a particular instruction so that it 892 // will be all alone by itself in a block. 893 static void splitAround(Instruction *I, const Twine &Name) { 894 splitBlockIfNotFirst(I, Name); 895 splitBlockIfNotFirst(I->getNextNode(), "After" + Name); 896 } 897 898 void coro::buildCoroutineFrame(Function &F, Shape &Shape) { 899 // Lower coro.dbg.declare to coro.dbg.value, since we are going to rewrite 900 // access to local variables. 901 LowerDbgDeclare(F); 902 903 Shape.PromiseAlloca = Shape.CoroBegin->getId()->getPromise(); 904 if (Shape.PromiseAlloca) { 905 Shape.CoroBegin->getId()->clearPromise(); 906 } 907 908 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end 909 // intrinsics are in their own blocks to simplify the logic of building up 910 // SuspendCrossing data. 911 for (CoroSuspendInst *CSI : Shape.CoroSuspends) { 912 splitAround(CSI->getCoroSave(), "CoroSave"); 913 splitAround(CSI, "CoroSuspend"); 914 } 915 916 // Put CoroEnds into their own blocks. 917 for (CoroEndInst *CE : Shape.CoroEnds) 918 splitAround(CE, "CoroEnd"); 919 920 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will 921 // never has its definition separated from the PHI by the suspend point. 922 rewritePHIs(F); 923 924 // Build suspend crossing info. 925 SuspendCrossingInfo Checker(F, Shape); 926 927 IRBuilder<> Builder(F.getContext()); 928 SpillInfo Spills; 929 930 for (int Repeat = 0; Repeat < 4; ++Repeat) { 931 // See if there are materializable instructions across suspend points. 932 for (Instruction &I : instructions(F)) 933 if (materializable(I)) 934 for (User *U : I.users()) 935 if (Checker.isDefinitionAcrossSuspend(I, U)) 936 Spills.emplace_back(&I, U); 937 938 if (Spills.empty()) 939 break; 940 941 // Rewrite materializable instructions to be materialized at the use point. 942 LLVM_DEBUG(dump("Materializations", Spills)); 943 rewriteMaterializableInstructions(Builder, Spills); 944 Spills.clear(); 945 } 946 947 // Collect the spills for arguments and other not-materializable values. 948 for (Argument &A : F.args()) 949 for (User *U : A.users()) 950 if (Checker.isDefinitionAcrossSuspend(A, U)) 951 Spills.emplace_back(&A, U); 952 953 for (Instruction &I : instructions(F)) { 954 // Values returned from coroutine structure intrinsics should not be part 955 // of the Coroutine Frame. 956 if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin) 957 continue; 958 // The Coroutine Promise always included into coroutine frame, no need to 959 // check for suspend crossing. 960 if (Shape.PromiseAlloca == &I) 961 continue; 962 963 for (User *U : I.users()) 964 if (Checker.isDefinitionAcrossSuspend(I, U)) { 965 // We cannot spill a token. 966 if (I.getType()->isTokenTy()) 967 report_fatal_error( 968 "token definition is separated from the use by a suspend point"); 969 Spills.emplace_back(&I, U); 970 } 971 } 972 LLVM_DEBUG(dump("Spills", Spills)); 973 moveSpillUsesAfterCoroBegin(F, Spills, Shape.CoroBegin); 974 Shape.FrameTy = buildFrameType(F, Shape, Spills); 975 Shape.FramePtr = insertSpills(Spills, Shape); 976 } 977