1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This file contains classes used to discover if for a particular value 9 // there from sue to definition that crosses a suspend block. 10 // 11 // Using the information discovered we form a Coroutine Frame structure to 12 // contain those values. All uses of those values are replaced with appropriate 13 // GEP + load from the coroutine frame. At the point of the definition we spill 14 // the value into the coroutine frame. 15 // 16 // TODO: pack values tightly using liveness info. 17 //===----------------------------------------------------------------------===// 18 19 #include "CoroInternal.h" 20 #include "llvm/ADT/BitVector.h" 21 #include "llvm/ADT/SmallString.h" 22 #include "llvm/Analysis/PtrUseVisitor.h" 23 #include "llvm/Analysis/StackLifetime.h" 24 #include "llvm/Config/llvm-config.h" 25 #include "llvm/IR/CFG.h" 26 #include "llvm/IR/DIBuilder.h" 27 #include "llvm/IR/Dominators.h" 28 #include "llvm/IR/IRBuilder.h" 29 #include "llvm/IR/InstIterator.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/MathExtras.h" 33 #include "llvm/Support/OptimizedStructLayout.h" 34 #include "llvm/Support/circular_raw_ostream.h" 35 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 36 #include "llvm/Transforms/Utils/Local.h" 37 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 38 #include <algorithm> 39 40 using namespace llvm; 41 42 // The "coro-suspend-crossing" flag is very noisy. There is another debug type, 43 // "coro-frame", which results in leaner debug spew. 44 #define DEBUG_TYPE "coro-suspend-crossing" 45 46 static cl::opt<bool> EnableReuseStorageInFrame( 47 "reuse-storage-in-coroutine-frame", cl::Hidden, 48 cl::desc( 49 "Enable the optimization which would reuse the storage in the coroutine \ 50 frame for allocas whose liferanges are not overlapped, for testing purposes"), 51 llvm::cl::init(false)); 52 53 enum { SmallVectorThreshold = 32 }; 54 55 // Provides two way mapping between the blocks and numbers. 56 namespace { 57 class BlockToIndexMapping { 58 SmallVector<BasicBlock *, SmallVectorThreshold> V; 59 60 public: 61 size_t size() const { return V.size(); } 62 63 BlockToIndexMapping(Function &F) { 64 for (BasicBlock &BB : F) 65 V.push_back(&BB); 66 llvm::sort(V); 67 } 68 69 size_t blockToIndex(BasicBlock *BB) const { 70 auto *I = llvm::lower_bound(V, BB); 71 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block"); 72 return I - V.begin(); 73 } 74 75 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; } 76 }; 77 } // end anonymous namespace 78 79 // The SuspendCrossingInfo maintains data that allows to answer a question 80 // whether given two BasicBlocks A and B there is a path from A to B that 81 // passes through a suspend point. 82 // 83 // For every basic block 'i' it maintains a BlockData that consists of: 84 // Consumes: a bit vector which contains a set of indices of blocks that can 85 // reach block 'i' 86 // Kills: a bit vector which contains a set of indices of blocks that can 87 // reach block 'i', but one of the path will cross a suspend point 88 // Suspend: a boolean indicating whether block 'i' contains a suspend point. 89 // End: a boolean indicating whether block 'i' contains a coro.end intrinsic. 90 // 91 namespace { 92 struct SuspendCrossingInfo { 93 BlockToIndexMapping Mapping; 94 95 struct BlockData { 96 BitVector Consumes; 97 BitVector Kills; 98 bool Suspend = false; 99 bool End = false; 100 }; 101 SmallVector<BlockData, SmallVectorThreshold> Block; 102 103 iterator_range<succ_iterator> successors(BlockData const &BD) const { 104 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]); 105 return llvm::successors(BB); 106 } 107 108 BlockData &getBlockData(BasicBlock *BB) { 109 return Block[Mapping.blockToIndex(BB)]; 110 } 111 112 void dump() const; 113 void dump(StringRef Label, BitVector const &BV) const; 114 115 SuspendCrossingInfo(Function &F, coro::Shape &Shape); 116 117 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const { 118 size_t const DefIndex = Mapping.blockToIndex(DefBB); 119 size_t const UseIndex = Mapping.blockToIndex(UseBB); 120 121 bool const Result = Block[UseIndex].Kills[DefIndex]; 122 LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName() 123 << " answer is " << Result << "\n"); 124 return Result; 125 } 126 127 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const { 128 auto *I = cast<Instruction>(U); 129 130 // We rewrote PHINodes, so that only the ones with exactly one incoming 131 // value need to be analyzed. 132 if (auto *PN = dyn_cast<PHINode>(I)) 133 if (PN->getNumIncomingValues() > 1) 134 return false; 135 136 BasicBlock *UseBB = I->getParent(); 137 138 // As a special case, treat uses by an llvm.coro.suspend.retcon or an 139 // llvm.coro.suspend.async as if they were uses in the suspend's single 140 // predecessor: the uses conceptually occur before the suspend. 141 if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) { 142 UseBB = UseBB->getSinglePredecessor(); 143 assert(UseBB && "should have split coro.suspend into its own block"); 144 } 145 146 return hasPathCrossingSuspendPoint(DefBB, UseBB); 147 } 148 149 bool isDefinitionAcrossSuspend(Argument &A, User *U) const { 150 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U); 151 } 152 153 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const { 154 auto *DefBB = I.getParent(); 155 156 // As a special case, treat values produced by an llvm.coro.suspend.* 157 // as if they were defined in the single successor: the uses 158 // conceptually occur after the suspend. 159 if (isa<AnyCoroSuspendInst>(I)) { 160 DefBB = DefBB->getSingleSuccessor(); 161 assert(DefBB && "should have split coro.suspend into its own block"); 162 } 163 164 return isDefinitionAcrossSuspend(DefBB, U); 165 } 166 }; 167 } // end anonymous namespace 168 169 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 170 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label, 171 BitVector const &BV) const { 172 dbgs() << Label << ":"; 173 for (size_t I = 0, N = BV.size(); I < N; ++I) 174 if (BV[I]) 175 dbgs() << " " << Mapping.indexToBlock(I)->getName(); 176 dbgs() << "\n"; 177 } 178 179 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const { 180 for (size_t I = 0, N = Block.size(); I < N; ++I) { 181 BasicBlock *const B = Mapping.indexToBlock(I); 182 dbgs() << B->getName() << ":\n"; 183 dump(" Consumes", Block[I].Consumes); 184 dump(" Kills", Block[I].Kills); 185 } 186 dbgs() << "\n"; 187 } 188 #endif 189 190 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape) 191 : Mapping(F) { 192 const size_t N = Mapping.size(); 193 Block.resize(N); 194 195 // Initialize every block so that it consumes itself 196 for (size_t I = 0; I < N; ++I) { 197 auto &B = Block[I]; 198 B.Consumes.resize(N); 199 B.Kills.resize(N); 200 B.Consumes.set(I); 201 } 202 203 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as 204 // the code beyond coro.end is reachable during initial invocation of the 205 // coroutine. 206 for (auto *CE : Shape.CoroEnds) 207 getBlockData(CE->getParent()).End = true; 208 209 // Mark all suspend blocks and indicate that they kill everything they 210 // consume. Note, that crossing coro.save also requires a spill, as any code 211 // between coro.save and coro.suspend may resume the coroutine and all of the 212 // state needs to be saved by that time. 213 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) { 214 BasicBlock *SuspendBlock = BarrierInst->getParent(); 215 auto &B = getBlockData(SuspendBlock); 216 B.Suspend = true; 217 B.Kills |= B.Consumes; 218 }; 219 for (auto *CSI : Shape.CoroSuspends) { 220 markSuspendBlock(CSI); 221 if (auto *Save = CSI->getCoroSave()) 222 markSuspendBlock(Save); 223 } 224 225 // Iterate propagating consumes and kills until they stop changing. 226 int Iteration = 0; 227 (void)Iteration; 228 229 bool Changed; 230 do { 231 LLVM_DEBUG(dbgs() << "iteration " << ++Iteration); 232 LLVM_DEBUG(dbgs() << "==============\n"); 233 234 Changed = false; 235 for (size_t I = 0; I < N; ++I) { 236 auto &B = Block[I]; 237 for (BasicBlock *SI : successors(B)) { 238 239 auto SuccNo = Mapping.blockToIndex(SI); 240 241 // Saved Consumes and Kills bitsets so that it is easy to see 242 // if anything changed after propagation. 243 auto &S = Block[SuccNo]; 244 auto SavedConsumes = S.Consumes; 245 auto SavedKills = S.Kills; 246 247 // Propagate Kills and Consumes from block B into its successor S. 248 S.Consumes |= B.Consumes; 249 S.Kills |= B.Kills; 250 251 // If block B is a suspend block, it should propagate kills into the 252 // its successor for every block B consumes. 253 if (B.Suspend) { 254 S.Kills |= B.Consumes; 255 } 256 if (S.Suspend) { 257 // If block S is a suspend block, it should kill all of the blocks it 258 // consumes. 259 S.Kills |= S.Consumes; 260 } else if (S.End) { 261 // If block S is an end block, it should not propagate kills as the 262 // blocks following coro.end() are reached during initial invocation 263 // of the coroutine while all the data are still available on the 264 // stack or in the registers. 265 S.Kills.reset(); 266 } else { 267 // This is reached when S block it not Suspend nor coro.end and it 268 // need to make sure that it is not in the kill set. 269 S.Kills.reset(SuccNo); 270 } 271 272 // See if anything changed. 273 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes); 274 275 if (S.Kills != SavedKills) { 276 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName() 277 << "\n"); 278 LLVM_DEBUG(dump("S.Kills", S.Kills)); 279 LLVM_DEBUG(dump("SavedKills", SavedKills)); 280 } 281 if (S.Consumes != SavedConsumes) { 282 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n"); 283 LLVM_DEBUG(dump("S.Consume", S.Consumes)); 284 LLVM_DEBUG(dump("SavedCons", SavedConsumes)); 285 } 286 } 287 } 288 } while (Changed); 289 LLVM_DEBUG(dump()); 290 } 291 292 #undef DEBUG_TYPE // "coro-suspend-crossing" 293 #define DEBUG_TYPE "coro-frame" 294 295 namespace { 296 class FrameTypeBuilder; 297 // Mapping from the to-be-spilled value to all the users that need reload. 298 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>; 299 struct AllocaInfo { 300 AllocaInst *Alloca; 301 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases; 302 bool MayWriteBeforeCoroBegin; 303 AllocaInfo(AllocaInst *Alloca, 304 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases, 305 bool MayWriteBeforeCoroBegin) 306 : Alloca(Alloca), Aliases(std::move(Aliases)), 307 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {} 308 }; 309 struct FrameDataInfo { 310 // All the values (that are not allocas) that needs to be spilled to the 311 // frame. 312 SpillInfo Spills; 313 // Allocas contains all values defined as allocas that need to live in the 314 // frame. 315 SmallVector<AllocaInfo, 8> Allocas; 316 317 SmallVector<Value *, 8> getAllDefs() const { 318 SmallVector<Value *, 8> Defs; 319 for (const auto &P : Spills) 320 Defs.push_back(P.first); 321 for (const auto &A : Allocas) 322 Defs.push_back(A.Alloca); 323 return Defs; 324 } 325 326 uint32_t getFieldIndex(Value *V) const { 327 auto Itr = FieldIndexMap.find(V); 328 assert(Itr != FieldIndexMap.end() && 329 "Value does not have a frame field index"); 330 return Itr->second; 331 } 332 333 void setFieldIndex(Value *V, uint32_t Index) { 334 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) && 335 "Cannot set the index for the same field twice."); 336 FieldIndexMap[V] = Index; 337 } 338 339 // Remap the index of every field in the frame, using the final layout index. 340 void updateLayoutIndex(FrameTypeBuilder &B); 341 342 private: 343 // LayoutIndexUpdateStarted is used to avoid updating the index of any field 344 // twice by mistake. 345 bool LayoutIndexUpdateStarted = false; 346 // Map from values to their slot indexes on the frame. They will be first set 347 // with their original insertion field index. After the frame is built, their 348 // indexes will be updated into the final layout index. 349 DenseMap<Value *, uint32_t> FieldIndexMap; 350 }; 351 } // namespace 352 353 #ifndef NDEBUG 354 static void dumpSpills(StringRef Title, const SpillInfo &Spills) { 355 dbgs() << "------------- " << Title << "--------------\n"; 356 for (const auto &E : Spills) { 357 E.first->dump(); 358 dbgs() << " user: "; 359 for (auto *I : E.second) 360 I->dump(); 361 } 362 } 363 364 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) { 365 dbgs() << "------------- Allocas --------------\n"; 366 for (const auto &A : Allocas) { 367 A.Alloca->dump(); 368 } 369 } 370 #endif 371 372 namespace { 373 using FieldIDType = size_t; 374 // We cannot rely solely on natural alignment of a type when building a 375 // coroutine frame and if the alignment specified on the Alloca instruction 376 // differs from the natural alignment of the alloca type we will need to insert 377 // padding. 378 class FrameTypeBuilder { 379 private: 380 struct Field { 381 uint64_t Size; 382 uint64_t Offset; 383 Type *Ty; 384 FieldIDType LayoutFieldIndex; 385 Align Alignment; 386 Align TyAlignment; 387 }; 388 389 const DataLayout &DL; 390 LLVMContext &Context; 391 uint64_t StructSize = 0; 392 Align StructAlign; 393 bool IsFinished = false; 394 395 SmallVector<Field, 8> Fields; 396 DenseMap<Value*, unsigned> FieldIndexByKey; 397 398 public: 399 FrameTypeBuilder(LLVMContext &Context, DataLayout const &DL) 400 : DL(DL), Context(Context) {} 401 402 /// Add a field to this structure for the storage of an `alloca` 403 /// instruction. 404 LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI, 405 bool IsHeader = false) { 406 Type *Ty = AI->getAllocatedType(); 407 408 // Make an array type if this is a static array allocation. 409 if (AI->isArrayAllocation()) { 410 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 411 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue()); 412 else 413 report_fatal_error("Coroutines cannot handle non static allocas yet"); 414 } 415 416 return addField(Ty, AI->getAlign(), IsHeader); 417 } 418 419 /// We want to put the allocas whose lifetime-ranges are not overlapped 420 /// into one slot of coroutine frame. 421 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566 422 /// 423 /// cppcoro::task<void> alternative_paths(bool cond) { 424 /// if (cond) { 425 /// big_structure a; 426 /// process(a); 427 /// co_await something(); 428 /// } else { 429 /// big_structure b; 430 /// process2(b); 431 /// co_await something(); 432 /// } 433 /// } 434 /// 435 /// We want to put variable a and variable b in the same slot to 436 /// reduce the size of coroutine frame. 437 /// 438 /// This function use StackLifetime algorithm to partition the AllocaInsts in 439 /// Spills to non-overlapped sets in order to put Alloca in the same 440 /// non-overlapped set into the same slot in the Coroutine Frame. Then add 441 /// field for the allocas in the same non-overlapped set by using the largest 442 /// type as the field type. 443 /// 444 /// Side Effects: Because We sort the allocas, the order of allocas in the 445 /// frame may be different with the order in the source code. 446 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData, 447 coro::Shape &Shape); 448 449 /// Add a field to this structure. 450 LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment, 451 bool IsHeader = false) { 452 assert(!IsFinished && "adding fields to a finished builder"); 453 assert(Ty && "must provide a type for a field"); 454 455 // The field size is always the alloc size of the type. 456 uint64_t FieldSize = DL.getTypeAllocSize(Ty); 457 458 // The field alignment might not be the type alignment, but we need 459 // to remember the type alignment anyway to build the type. 460 Align TyAlignment = DL.getABITypeAlign(Ty); 461 if (!FieldAlignment) FieldAlignment = TyAlignment; 462 463 // Lay out header fields immediately. 464 uint64_t Offset; 465 if (IsHeader) { 466 Offset = alignTo(StructSize, FieldAlignment); 467 StructSize = Offset + FieldSize; 468 469 // Everything else has a flexible offset. 470 } else { 471 Offset = OptimizedStructLayoutField::FlexibleOffset; 472 } 473 474 Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment}); 475 return Fields.size() - 1; 476 } 477 478 /// Finish the layout and set the body on the given type. 479 void finish(StructType *Ty); 480 481 uint64_t getStructSize() const { 482 assert(IsFinished && "not yet finished!"); 483 return StructSize; 484 } 485 486 Align getStructAlign() const { 487 assert(IsFinished && "not yet finished!"); 488 return StructAlign; 489 } 490 491 FieldIDType getLayoutFieldIndex(FieldIDType Id) const { 492 assert(IsFinished && "not yet finished!"); 493 return Fields[Id].LayoutFieldIndex; 494 } 495 }; 496 } // namespace 497 498 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) { 499 auto Updater = [&](Value *I) { 500 setFieldIndex(I, B.getLayoutFieldIndex(getFieldIndex(I))); 501 }; 502 LayoutIndexUpdateStarted = true; 503 for (auto &S : Spills) 504 Updater(S.first); 505 for (const auto &A : Allocas) 506 Updater(A.Alloca); 507 LayoutIndexUpdateStarted = false; 508 } 509 510 void FrameTypeBuilder::addFieldForAllocas(const Function &F, 511 FrameDataInfo &FrameData, 512 coro::Shape &Shape) { 513 DenseMap<AllocaInst *, unsigned int> AllocaIndex; 514 using AllocaSetType = SmallVector<AllocaInst *, 4>; 515 SmallVector<AllocaSetType, 4> NonOverlapedAllocas; 516 517 // We need to add field for allocas at the end of this function. However, this 518 // function has multiple exits, so we use this helper to avoid redundant code. 519 struct RTTIHelper { 520 std::function<void()> func; 521 RTTIHelper(std::function<void()> &&func) : func(func) {} 522 ~RTTIHelper() { func(); } 523 } Helper([&]() { 524 for (auto AllocaList : NonOverlapedAllocas) { 525 auto *LargestAI = *AllocaList.begin(); 526 FieldIDType Id = addFieldForAlloca(LargestAI); 527 for (auto *Alloca : AllocaList) 528 FrameData.setFieldIndex(Alloca, Id); 529 } 530 }); 531 532 if (!Shape.ReuseFrameSlot && !EnableReuseStorageInFrame) { 533 for (const auto &A : FrameData.Allocas) { 534 AllocaInst *Alloca = A.Alloca; 535 AllocaIndex[Alloca] = NonOverlapedAllocas.size(); 536 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 537 } 538 return; 539 } 540 541 // Because there are pathes from the lifetime.start to coro.end 542 // for each alloca, the liferanges for every alloca is overlaped 543 // in the blocks who contain coro.end and the successor blocks. 544 // So we choose to skip there blocks when we calculates the liferange 545 // for each alloca. It should be reasonable since there shouldn't be uses 546 // in these blocks and the coroutine frame shouldn't be used outside the 547 // coroutine body. 548 // 549 // Note that the user of coro.suspend may not be SwitchInst. However, this 550 // case seems too complex to handle. And it is harmless to skip these 551 // patterns since it just prevend putting the allocas to live in the same 552 // slot. 553 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest; 554 for (auto CoroSuspendInst : Shape.CoroSuspends) { 555 for (auto U : CoroSuspendInst->users()) { 556 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) { 557 auto *SWI = const_cast<SwitchInst *>(ConstSWI); 558 DefaultSuspendDest[SWI] = SWI->getDefaultDest(); 559 SWI->setDefaultDest(SWI->getSuccessor(1)); 560 } 561 } 562 } 563 564 auto ExtractAllocas = [&]() { 565 AllocaSetType Allocas; 566 Allocas.reserve(FrameData.Allocas.size()); 567 for (const auto &A : FrameData.Allocas) 568 Allocas.push_back(A.Alloca); 569 return Allocas; 570 }; 571 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(), 572 StackLifetime::LivenessType::May); 573 StackLifetimeAnalyzer.run(); 574 auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) { 575 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps( 576 StackLifetimeAnalyzer.getLiveRange(AI2)); 577 }; 578 auto GetAllocaSize = [&](const AllocaInfo &A) { 579 Optional<uint64_t> RetSize = A.Alloca->getAllocationSizeInBits(DL); 580 assert(RetSize && "We can't handle scalable type now.\n"); 581 return RetSize.getValue(); 582 }; 583 // Put larger allocas in the front. So the larger allocas have higher 584 // priority to merge, which can save more space potentially. Also each 585 // AllocaSet would be ordered. So we can get the largest Alloca in one 586 // AllocaSet easily. 587 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) { 588 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2); 589 }); 590 for (const auto &A : FrameData.Allocas) { 591 AllocaInst *Alloca = A.Alloca; 592 bool Merged = false; 593 // Try to find if the Alloca is not inferenced with any existing 594 // NonOverlappedAllocaSet. If it is true, insert the alloca to that 595 // NonOverlappedAllocaSet. 596 for (auto &AllocaSet : NonOverlapedAllocas) { 597 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n"); 598 bool CouldMerge = none_of(AllocaSet, [&](auto Iter) { 599 return IsAllocaInferenre(Alloca, Iter); 600 }); 601 if (!CouldMerge) 602 continue; 603 AllocaIndex[Alloca] = AllocaIndex[*AllocaSet.begin()]; 604 AllocaSet.push_back(Alloca); 605 Merged = true; 606 break; 607 } 608 if (!Merged) { 609 AllocaIndex[Alloca] = NonOverlapedAllocas.size(); 610 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 611 } 612 } 613 // Recover the default target destination for each Switch statement 614 // reserved. 615 for (auto SwitchAndDefaultDest : DefaultSuspendDest) { 616 SwitchInst *SWI = SwitchAndDefaultDest.first; 617 BasicBlock *DestBB = SwitchAndDefaultDest.second; 618 SWI->setDefaultDest(DestBB); 619 } 620 // This Debug Info could tell us which allocas are merged into one slot. 621 LLVM_DEBUG(for (auto &AllocaSet 622 : NonOverlapedAllocas) { 623 if (AllocaSet.size() > 1) { 624 dbgs() << "In Function:" << F.getName() << "\n"; 625 dbgs() << "Find Union Set " 626 << "\n"; 627 dbgs() << "\tAllocas are \n"; 628 for (auto Alloca : AllocaSet) 629 dbgs() << "\t\t" << *Alloca << "\n"; 630 } 631 }); 632 } 633 634 void FrameTypeBuilder::finish(StructType *Ty) { 635 assert(!IsFinished && "already finished!"); 636 637 // Prepare the optimal-layout field array. 638 // The Id in the layout field is a pointer to our Field for it. 639 SmallVector<OptimizedStructLayoutField, 8> LayoutFields; 640 LayoutFields.reserve(Fields.size()); 641 for (auto &Field : Fields) { 642 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment, 643 Field.Offset); 644 } 645 646 // Perform layout. 647 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields); 648 StructSize = SizeAndAlign.first; 649 StructAlign = SizeAndAlign.second; 650 651 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & { 652 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id)); 653 }; 654 655 // We need to produce a packed struct type if there's a field whose 656 // assigned offset isn't a multiple of its natural type alignment. 657 bool Packed = [&] { 658 for (auto &LayoutField : LayoutFields) { 659 auto &F = getField(LayoutField); 660 if (!isAligned(F.TyAlignment, LayoutField.Offset)) 661 return true; 662 } 663 return false; 664 }(); 665 666 // Build the struct body. 667 SmallVector<Type*, 16> FieldTypes; 668 FieldTypes.reserve(LayoutFields.size() * 3 / 2); 669 uint64_t LastOffset = 0; 670 for (auto &LayoutField : LayoutFields) { 671 auto &F = getField(LayoutField); 672 673 auto Offset = LayoutField.Offset; 674 675 // Add a padding field if there's a padding gap and we're either 676 // building a packed struct or the padding gap is more than we'd 677 // get from aligning to the field type's natural alignment. 678 assert(Offset >= LastOffset); 679 if (Offset != LastOffset) { 680 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset) 681 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context), 682 Offset - LastOffset)); 683 } 684 685 F.Offset = Offset; 686 F.LayoutFieldIndex = FieldTypes.size(); 687 688 FieldTypes.push_back(F.Ty); 689 LastOffset = Offset + F.Size; 690 } 691 692 Ty->setBody(FieldTypes, Packed); 693 694 #ifndef NDEBUG 695 // Check that the IR layout matches the offsets we expect. 696 auto Layout = DL.getStructLayout(Ty); 697 for (auto &F : Fields) { 698 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty); 699 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset); 700 } 701 #endif 702 703 IsFinished = true; 704 } 705 706 // Build a struct that will keep state for an active coroutine. 707 // struct f.frame { 708 // ResumeFnTy ResumeFnAddr; 709 // ResumeFnTy DestroyFnAddr; 710 // int ResumeIndex; 711 // ... promise (if present) ... 712 // ... spills ... 713 // }; 714 static StructType *buildFrameType(Function &F, coro::Shape &Shape, 715 FrameDataInfo &FrameData) { 716 LLVMContext &C = F.getContext(); 717 const DataLayout &DL = F.getParent()->getDataLayout(); 718 StructType *FrameTy = [&] { 719 SmallString<32> Name(F.getName()); 720 Name.append(".Frame"); 721 return StructType::create(C, Name); 722 }(); 723 724 FrameTypeBuilder B(C, DL); 725 726 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca(); 727 Optional<FieldIDType> SwitchIndexFieldId; 728 729 if (Shape.ABI == coro::ABI::Switch) { 730 auto *FramePtrTy = FrameTy->getPointerTo(); 731 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy, 732 /*IsVarArg=*/false); 733 auto *FnPtrTy = FnTy->getPointerTo(); 734 735 // Add header fields for the resume and destroy functions. 736 // We can rely on these being perfectly packed. 737 (void)B.addField(FnPtrTy, None, /*header*/ true); 738 (void)B.addField(FnPtrTy, None, /*header*/ true); 739 740 // PromiseAlloca field needs to be explicitly added here because it's 741 // a header field with a fixed offset based on its alignment. Hence it 742 // needs special handling and cannot be added to FrameData.Allocas. 743 if (PromiseAlloca) 744 FrameData.setFieldIndex( 745 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true)); 746 747 // Add a field to store the suspend index. This doesn't need to 748 // be in the header. 749 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size())); 750 Type *IndexType = Type::getIntNTy(C, IndexBits); 751 752 SwitchIndexFieldId = B.addField(IndexType, None); 753 } else { 754 assert(PromiseAlloca == nullptr && "lowering doesn't support promises"); 755 } 756 757 // Because multiple allocas may own the same field slot, 758 // we add allocas to field here. 759 B.addFieldForAllocas(F, FrameData, Shape); 760 // Create an entry for every spilled value. 761 for (auto &S : FrameData.Spills) { 762 FieldIDType Id = B.addField(S.first->getType(), None); 763 FrameData.setFieldIndex(S.first, Id); 764 } 765 766 B.finish(FrameTy); 767 FrameData.updateLayoutIndex(B); 768 Shape.FrameAlign = B.getStructAlign(); 769 Shape.FrameSize = B.getStructSize(); 770 771 switch (Shape.ABI) { 772 case coro::ABI::Switch: 773 // In the switch ABI, remember the switch-index field. 774 Shape.SwitchLowering.IndexField = 775 B.getLayoutFieldIndex(*SwitchIndexFieldId); 776 777 // Also round the frame size up to a multiple of its alignment, as is 778 // generally expected in C/C++. 779 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign); 780 break; 781 782 // In the retcon ABI, remember whether the frame is inline in the storage. 783 case coro::ABI::Retcon: 784 case coro::ABI::RetconOnce: { 785 auto Id = Shape.getRetconCoroId(); 786 Shape.RetconLowering.IsFrameInlineInStorage 787 = (B.getStructSize() <= Id->getStorageSize() && 788 B.getStructAlign() <= Id->getStorageAlignment()); 789 break; 790 } 791 case coro::ABI::Async: { 792 Shape.AsyncLowering.FrameOffset = 793 alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign); 794 Shape.AsyncLowering.ContextSize = 795 Shape.AsyncLowering.FrameOffset + Shape.FrameSize; 796 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) { 797 report_fatal_error( 798 "The alignment requirment of frame variables cannot be higher than " 799 "the alignment of the async function context"); 800 } 801 break; 802 } 803 } 804 805 return FrameTy; 806 } 807 808 // We use a pointer use visitor to track how an alloca is being used. 809 // The goal is to be able to answer the following three questions: 810 // 1. Should this alloca be allocated on the frame instead. 811 // 2. Could the content of the alloca be modified prior to CoroBegn, which would 812 // require copying the data from alloca to the frame after CoroBegin. 813 // 3. Is there any alias created for this alloca prior to CoroBegin, but used 814 // after CoroBegin. In that case, we will need to recreate the alias after 815 // CoroBegin based off the frame. To answer question 1, we track two things: 816 // a. List of all BasicBlocks that use this alloca or any of the aliases of 817 // the alloca. In the end, we check if there exists any two basic blocks that 818 // cross suspension points. If so, this alloca must be put on the frame. b. 819 // Whether the alloca or any alias of the alloca is escaped at some point, 820 // either by storing the address somewhere, or the address is used in a 821 // function call that might capture. If it's ever escaped, this alloca must be 822 // put on the frame conservatively. 823 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin. 824 // Whenever a potential write happens, either through a store instruction, a 825 // function call or any of the memory intrinsics, we check whether this 826 // instruction is prior to CoroBegin. To answer question 3, we track the offsets 827 // of all aliases created for the alloca prior to CoroBegin but used after 828 // CoroBegin. llvm::Optional is used to be able to represent the case when the 829 // offset is unknown (e.g. when you have a PHINode that takes in different 830 // offset values). We cannot handle unknown offsets and will assert. This is the 831 // potential issue left out. An ideal solution would likely require a 832 // significant redesign. 833 namespace { 834 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> { 835 using Base = PtrUseVisitor<AllocaUseVisitor>; 836 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT, 837 const CoroBeginInst &CB, const SuspendCrossingInfo &Checker) 838 : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker) {} 839 840 void visit(Instruction &I) { 841 UserBBs.insert(I.getParent()); 842 Base::visit(I); 843 // If the pointer is escaped prior to CoroBegin, we have to assume it would 844 // be written into before CoroBegin as well. 845 if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) { 846 MayWriteBeforeCoroBegin = true; 847 } 848 } 849 // We need to provide this overload as PtrUseVisitor uses a pointer based 850 // visiting function. 851 void visit(Instruction *I) { return visit(*I); } 852 853 void visitPHINode(PHINode &I) { 854 enqueueUsers(I); 855 handleAlias(I); 856 } 857 858 void visitSelectInst(SelectInst &I) { 859 enqueueUsers(I); 860 handleAlias(I); 861 } 862 863 void visitStoreInst(StoreInst &SI) { 864 // Base visit function will handle escape setting. 865 Base::visitStoreInst(SI); 866 867 // Regardless whether the alias of the alloca is the value operand or the 868 // pointer operand, we need to assume the alloca is been written. 869 handleMayWrite(SI); 870 } 871 872 // All mem intrinsics modify the data. 873 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); } 874 875 void visitBitCastInst(BitCastInst &BC) { 876 Base::visitBitCastInst(BC); 877 handleAlias(BC); 878 } 879 880 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 881 Base::visitAddrSpaceCastInst(ASC); 882 handleAlias(ASC); 883 } 884 885 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 886 // The base visitor will adjust Offset accordingly. 887 Base::visitGetElementPtrInst(GEPI); 888 handleAlias(GEPI); 889 } 890 891 void visitCallBase(CallBase &CB) { 892 for (unsigned Op = 0, OpCount = CB.getNumArgOperands(); Op < OpCount; ++Op) 893 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op)) 894 PI.setEscaped(&CB); 895 handleMayWrite(CB); 896 } 897 898 bool getShouldLiveOnFrame() const { 899 if (!ShouldLiveOnFrame) 900 ShouldLiveOnFrame = computeShouldLiveOnFrame(); 901 return ShouldLiveOnFrame.getValue(); 902 } 903 904 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; } 905 906 DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const { 907 assert(getShouldLiveOnFrame() && "This method should only be called if the " 908 "alloca needs to live on the frame."); 909 for (const auto &P : AliasOffetMap) 910 if (!P.second) 911 report_fatal_error("Unable to handle an alias with unknown offset " 912 "created before CoroBegin."); 913 return AliasOffetMap; 914 } 915 916 private: 917 const DominatorTree &DT; 918 const CoroBeginInst &CoroBegin; 919 const SuspendCrossingInfo &Checker; 920 // All alias to the original AllocaInst, created before CoroBegin and used 921 // after CoroBegin. Each entry contains the instruction and the offset in the 922 // original Alloca. They need to be recreated after CoroBegin off the frame. 923 DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{}; 924 SmallPtrSet<BasicBlock *, 2> UserBBs{}; 925 bool MayWriteBeforeCoroBegin{false}; 926 927 mutable llvm::Optional<bool> ShouldLiveOnFrame{}; 928 929 bool computeShouldLiveOnFrame() const { 930 if (PI.isEscaped()) 931 return true; 932 933 for (auto *BB1 : UserBBs) 934 for (auto *BB2 : UserBBs) 935 if (Checker.hasPathCrossingSuspendPoint(BB1, BB2)) 936 return true; 937 938 return false; 939 } 940 941 void handleMayWrite(const Instruction &I) { 942 if (!DT.dominates(&CoroBegin, &I)) 943 MayWriteBeforeCoroBegin = true; 944 } 945 946 bool usedAfterCoroBegin(Instruction &I) { 947 for (auto &U : I.uses()) 948 if (DT.dominates(&CoroBegin, U)) 949 return true; 950 return false; 951 } 952 953 void handleAlias(Instruction &I) { 954 // We track all aliases created prior to CoroBegin but used after. 955 // These aliases may need to be recreated after CoroBegin if the alloca 956 // need to live on the frame. 957 if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I)) 958 return; 959 960 if (!IsOffsetKnown) { 961 AliasOffetMap[&I].reset(); 962 } else { 963 auto Itr = AliasOffetMap.find(&I); 964 if (Itr == AliasOffetMap.end()) { 965 AliasOffetMap[&I] = Offset; 966 } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) { 967 // If we have seen two different possible values for this alias, we set 968 // it to empty. 969 AliasOffetMap[&I].reset(); 970 } 971 } 972 } 973 }; 974 } // namespace 975 976 // We need to make room to insert a spill after initial PHIs, but before 977 // catchswitch instruction. Placing it before violates the requirement that 978 // catchswitch, like all other EHPads must be the first nonPHI in a block. 979 // 980 // Split away catchswitch into a separate block and insert in its place: 981 // 982 // cleanuppad <InsertPt> cleanupret. 983 // 984 // cleanupret instruction will act as an insert point for the spill. 985 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) { 986 BasicBlock *CurrentBlock = CatchSwitch->getParent(); 987 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch); 988 CurrentBlock->getTerminator()->eraseFromParent(); 989 990 auto *CleanupPad = 991 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock); 992 auto *CleanupRet = 993 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock); 994 return CleanupRet; 995 } 996 997 // Replace all alloca and SSA values that are accessed across suspend points 998 // with GetElementPointer from coroutine frame + loads and stores. Create an 999 // AllocaSpillBB that will become the new entry block for the resume parts of 1000 // the coroutine: 1001 // 1002 // %hdl = coro.begin(...) 1003 // whatever 1004 // 1005 // becomes: 1006 // 1007 // %hdl = coro.begin(...) 1008 // %FramePtr = bitcast i8* hdl to %f.frame* 1009 // br label %AllocaSpillBB 1010 // 1011 // AllocaSpillBB: 1012 // ; geps corresponding to allocas that were moved to coroutine frame 1013 // br label PostSpill 1014 // 1015 // PostSpill: 1016 // whatever 1017 // 1018 // 1019 static Instruction *insertSpills(const FrameDataInfo &FrameData, 1020 coro::Shape &Shape) { 1021 auto *CB = Shape.CoroBegin; 1022 LLVMContext &C = CB->getContext(); 1023 IRBuilder<> Builder(CB->getNextNode()); 1024 StructType *FrameTy = Shape.FrameTy; 1025 PointerType *FramePtrTy = FrameTy->getPointerTo(); 1026 auto *FramePtr = 1027 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr")); 1028 DominatorTree DT(*CB->getFunction()); 1029 1030 // Create a GEP with the given index into the coroutine frame for the original 1031 // value Orig. Appends an extra 0 index for array-allocas, preserving the 1032 // original type. 1033 auto GetFramePointer = [&](Value *Orig) -> Value * { 1034 FieldIDType Index = FrameData.getFieldIndex(Orig); 1035 SmallVector<Value *, 3> Indices = { 1036 ConstantInt::get(Type::getInt32Ty(C), 0), 1037 ConstantInt::get(Type::getInt32Ty(C), Index), 1038 }; 1039 1040 if (auto *AI = dyn_cast<AllocaInst>(Orig)) { 1041 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) { 1042 auto Count = CI->getValue().getZExtValue(); 1043 if (Count > 1) { 1044 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0)); 1045 } 1046 } else { 1047 report_fatal_error("Coroutines cannot handle non static allocas yet"); 1048 } 1049 } 1050 1051 auto GEP = cast<GetElementPtrInst>( 1052 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices)); 1053 if (isa<AllocaInst>(Orig)) { 1054 // If the type of GEP is not equal to the type of AllocaInst, it implies 1055 // that the AllocaInst may be reused in the Frame slot of other 1056 // AllocaInst. So we cast the GEP to the type of AllocaInst. 1057 if (GEP->getResultElementType() != Orig->getType()) 1058 return Builder.CreateBitCast(GEP, Orig->getType(), 1059 Orig->getName() + Twine(".cast")); 1060 } 1061 return GEP; 1062 }; 1063 1064 for (auto const &E : FrameData.Spills) { 1065 Value *Def = E.first; 1066 // Create a store instruction storing the value into the 1067 // coroutine frame. 1068 Instruction *InsertPt = nullptr; 1069 if (auto *Arg = dyn_cast<Argument>(Def)) { 1070 // For arguments, we will place the store instruction right after 1071 // the coroutine frame pointer instruction, i.e. bitcast of 1072 // coro.begin from i8* to %f.frame*. 1073 InsertPt = FramePtr->getNextNode(); 1074 1075 // If we're spilling an Argument, make sure we clear 'nocapture' 1076 // from the coroutine function. 1077 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture); 1078 1079 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) { 1080 // Don't spill immediately after a suspend; splitting assumes 1081 // that the suspend will be followed by a branch. 1082 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI(); 1083 } else { 1084 auto *I = cast<Instruction>(Def); 1085 if (!DT.dominates(CB, I)) { 1086 // If it is not dominated by CoroBegin, then spill should be 1087 // inserted immediately after CoroFrame is computed. 1088 InsertPt = FramePtr->getNextNode(); 1089 } else if (auto *II = dyn_cast<InvokeInst>(I)) { 1090 // If we are spilling the result of the invoke instruction, split 1091 // the normal edge and insert the spill in the new block. 1092 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest()); 1093 InsertPt = NewBB->getTerminator(); 1094 } else if (isa<PHINode>(I)) { 1095 // Skip the PHINodes and EH pads instructions. 1096 BasicBlock *DefBlock = I->getParent(); 1097 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator())) 1098 InsertPt = splitBeforeCatchSwitch(CSI); 1099 else 1100 InsertPt = &*DefBlock->getFirstInsertionPt(); 1101 } else { 1102 assert(!I->isTerminator() && "unexpected terminator"); 1103 // For all other values, the spill is placed immediately after 1104 // the definition. 1105 InsertPt = I->getNextNode(); 1106 } 1107 } 1108 1109 auto Index = FrameData.getFieldIndex(Def); 1110 Builder.SetInsertPoint(InsertPt); 1111 auto *G = Builder.CreateConstInBoundsGEP2_32( 1112 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr")); 1113 Builder.CreateStore(Def, G); 1114 1115 BasicBlock *CurrentBlock = nullptr; 1116 Value *CurrentReload = nullptr; 1117 for (auto *U : E.second) { 1118 // If we have not seen the use block, create a load instruction to reload 1119 // the spilled value from the coroutine frame. Populates the Value pointer 1120 // reference provided with the frame GEP. 1121 if (CurrentBlock != U->getParent()) { 1122 CurrentBlock = U->getParent(); 1123 Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt()); 1124 1125 auto *GEP = GetFramePointer(E.first); 1126 GEP->setName(E.first->getName() + Twine(".reload.addr")); 1127 CurrentReload = Builder.CreateLoad( 1128 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP, 1129 E.first->getName() + Twine(".reload")); 1130 } 1131 1132 // If we have a single edge PHINode, remove it and replace it with a 1133 // reload from the coroutine frame. (We already took care of multi edge 1134 // PHINodes by rewriting them in the rewritePHIs function). 1135 if (auto *PN = dyn_cast<PHINode>(U)) { 1136 assert(PN->getNumIncomingValues() == 1 && 1137 "unexpected number of incoming " 1138 "values in the PHINode"); 1139 PN->replaceAllUsesWith(CurrentReload); 1140 PN->eraseFromParent(); 1141 continue; 1142 } 1143 1144 // Replace all uses of CurrentValue in the current instruction with 1145 // reload. 1146 U->replaceUsesOfWith(Def, CurrentReload); 1147 } 1148 } 1149 1150 BasicBlock *FramePtrBB = FramePtr->getParent(); 1151 1152 auto SpillBlock = 1153 FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB"); 1154 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill"); 1155 Shape.AllocaSpillBlock = SpillBlock; 1156 1157 // retcon and retcon.once lowering assumes all uses have been sunk. 1158 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 1159 Shape.ABI == coro::ABI::Async) { 1160 // If we found any allocas, replace all of their remaining uses with Geps. 1161 Builder.SetInsertPoint(&SpillBlock->front()); 1162 for (const auto &P : FrameData.Allocas) { 1163 AllocaInst *Alloca = P.Alloca; 1164 auto *G = GetFramePointer(Alloca); 1165 1166 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G)) 1167 // here, as we are changing location of the instruction. 1168 G->takeName(Alloca); 1169 Alloca->replaceAllUsesWith(G); 1170 Alloca->eraseFromParent(); 1171 } 1172 return FramePtr; 1173 } 1174 1175 // If we found any alloca, replace all of their remaining uses with GEP 1176 // instructions. Because new dbg.declare have been created for these alloca, 1177 // we also delete the original dbg.declare and replace other uses with undef. 1178 // Note: We cannot replace the alloca with GEP instructions indiscriminately, 1179 // as some of the uses may not be dominated by CoroBegin. 1180 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front()); 1181 SmallVector<Instruction *, 4> UsersToUpdate; 1182 for (const auto &A : FrameData.Allocas) { 1183 AllocaInst *Alloca = A.Alloca; 1184 UsersToUpdate.clear(); 1185 for (User *U : Alloca->users()) { 1186 auto *I = cast<Instruction>(U); 1187 if (DT.dominates(CB, I)) 1188 UsersToUpdate.push_back(I); 1189 } 1190 if (UsersToUpdate.empty()) 1191 continue; 1192 auto *G = GetFramePointer(Alloca); 1193 G->setName(Alloca->getName() + Twine(".reload.addr")); 1194 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Alloca); 1195 if (!DIs.empty()) 1196 DIBuilder(*Alloca->getModule(), 1197 /*AllowUnresolved*/ false) 1198 .insertDeclare(G, DIs.front()->getVariable(), 1199 DIs.front()->getExpression(), 1200 DIs.front()->getDebugLoc(), DIs.front()); 1201 for (auto *DI : FindDbgDeclareUses(Alloca)) 1202 DI->eraseFromParent(); 1203 replaceDbgUsesWithUndef(Alloca); 1204 1205 for (Instruction *I : UsersToUpdate) 1206 I->replaceUsesOfWith(Alloca, G); 1207 } 1208 Builder.SetInsertPoint(FramePtr->getNextNode()); 1209 for (const auto &A : FrameData.Allocas) { 1210 AllocaInst *Alloca = A.Alloca; 1211 if (A.MayWriteBeforeCoroBegin) { 1212 // isEscaped really means potentially modified before CoroBegin. 1213 if (Alloca->isArrayAllocation()) 1214 report_fatal_error( 1215 "Coroutines cannot handle copying of array allocas yet"); 1216 1217 auto *G = GetFramePointer(Alloca); 1218 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca); 1219 Builder.CreateStore(Value, G); 1220 } 1221 // For each alias to Alloca created before CoroBegin but used after 1222 // CoroBegin, we recreate them after CoroBegin by appplying the offset 1223 // to the pointer in the frame. 1224 for (const auto &Alias : A.Aliases) { 1225 auto *FramePtr = GetFramePointer(Alloca); 1226 auto *FramePtrRaw = 1227 Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C)); 1228 auto *AliasPtr = Builder.CreateGEP( 1229 FramePtrRaw, 1230 ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue())); 1231 auto *AliasPtrTyped = 1232 Builder.CreateBitCast(AliasPtr, Alias.first->getType()); 1233 Alias.first->replaceUsesWithIf( 1234 AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); }); 1235 } 1236 } 1237 return FramePtr; 1238 } 1239 1240 // Sets the unwind edge of an instruction to a particular successor. 1241 static void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ) { 1242 if (auto *II = dyn_cast<InvokeInst>(TI)) 1243 II->setUnwindDest(Succ); 1244 else if (auto *CS = dyn_cast<CatchSwitchInst>(TI)) 1245 CS->setUnwindDest(Succ); 1246 else if (auto *CR = dyn_cast<CleanupReturnInst>(TI)) 1247 CR->setUnwindDest(Succ); 1248 else 1249 llvm_unreachable("unexpected terminator instruction"); 1250 } 1251 1252 // Replaces all uses of OldPred with the NewPred block in all PHINodes in a 1253 // block. 1254 static void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, 1255 BasicBlock *NewPred, PHINode *Until = nullptr) { 1256 unsigned BBIdx = 0; 1257 for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) { 1258 PHINode *PN = cast<PHINode>(I); 1259 1260 // We manually update the LandingPadReplacement PHINode and it is the last 1261 // PHI Node. So, if we find it, we are done. 1262 if (Until == PN) 1263 break; 1264 1265 // Reuse the previous value of BBIdx if it lines up. In cases where we 1266 // have multiple phi nodes with *lots* of predecessors, this is a speed 1267 // win because we don't have to scan the PHI looking for TIBB. This 1268 // happens because the BB list of PHI nodes are usually in the same 1269 // order. 1270 if (PN->getIncomingBlock(BBIdx) != OldPred) 1271 BBIdx = PN->getBasicBlockIndex(OldPred); 1272 1273 assert(BBIdx != (unsigned)-1 && "Invalid PHI Index!"); 1274 PN->setIncomingBlock(BBIdx, NewPred); 1275 } 1276 } 1277 1278 // Uses SplitEdge unless the successor block is an EHPad, in which case do EH 1279 // specific handling. 1280 static BasicBlock *ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, 1281 LandingPadInst *OriginalPad, 1282 PHINode *LandingPadReplacement) { 1283 auto *PadInst = Succ->getFirstNonPHI(); 1284 if (!LandingPadReplacement && !PadInst->isEHPad()) 1285 return SplitEdge(BB, Succ); 1286 1287 auto *NewBB = BasicBlock::Create(BB->getContext(), "", BB->getParent(), Succ); 1288 setUnwindEdgeTo(BB->getTerminator(), NewBB); 1289 updatePhiNodes(Succ, BB, NewBB, LandingPadReplacement); 1290 1291 if (LandingPadReplacement) { 1292 auto *NewLP = OriginalPad->clone(); 1293 auto *Terminator = BranchInst::Create(Succ, NewBB); 1294 NewLP->insertBefore(Terminator); 1295 LandingPadReplacement->addIncoming(NewLP, NewBB); 1296 return NewBB; 1297 } 1298 Value *ParentPad = nullptr; 1299 if (auto *FuncletPad = dyn_cast<FuncletPadInst>(PadInst)) 1300 ParentPad = FuncletPad->getParentPad(); 1301 else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(PadInst)) 1302 ParentPad = CatchSwitch->getParentPad(); 1303 else 1304 llvm_unreachable("handling for other EHPads not implemented yet"); 1305 1306 auto *NewCleanupPad = CleanupPadInst::Create(ParentPad, {}, "", NewBB); 1307 CleanupReturnInst::Create(NewCleanupPad, Succ, NewBB); 1308 return NewBB; 1309 } 1310 1311 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new 1312 // PHI in InsertedBB. 1313 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, 1314 BasicBlock *InsertedBB, 1315 BasicBlock *PredBB, 1316 PHINode *UntilPHI = nullptr) { 1317 auto *PN = cast<PHINode>(&SuccBB->front()); 1318 do { 1319 int Index = PN->getBasicBlockIndex(InsertedBB); 1320 Value *V = PN->getIncomingValue(Index); 1321 PHINode *InputV = PHINode::Create( 1322 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(), 1323 &InsertedBB->front()); 1324 InputV->addIncoming(V, PredBB); 1325 PN->setIncomingValue(Index, InputV); 1326 PN = dyn_cast<PHINode>(PN->getNextNode()); 1327 } while (PN != UntilPHI); 1328 } 1329 1330 // Rewrites the PHI Nodes in a cleanuppad. 1331 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, 1332 CleanupPadInst *CleanupPad) { 1333 // For every incoming edge to a CleanupPad we will create a new block holding 1334 // all incoming values in single-value PHI nodes. We will then create another 1335 // block to act as a dispather (as all unwind edges for related EH blocks 1336 // must be the same). 1337 // 1338 // cleanuppad: 1339 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1] 1340 // %3 = cleanuppad within none [] 1341 // 1342 // It will create: 1343 // 1344 // cleanuppad.corodispatch 1345 // %2 = phi i8[0, %catchswitch], [1, %catch.1] 1346 // %3 = cleanuppad within none [] 1347 // switch i8 % 2, label %unreachable 1348 // [i8 0, label %cleanuppad.from.catchswitch 1349 // i8 1, label %cleanuppad.from.catch.1] 1350 // cleanuppad.from.catchswitch: 1351 // %4 = phi i32 [%0, %catchswitch] 1352 // br %label cleanuppad 1353 // cleanuppad.from.catch.1: 1354 // %6 = phi i32 [%1, %catch.1] 1355 // br %label cleanuppad 1356 // cleanuppad: 1357 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch], 1358 // [%6, %cleanuppad.from.catch.1] 1359 1360 // Unreachable BB, in case switching on an invalid value in the dispatcher. 1361 auto *UnreachBB = BasicBlock::Create( 1362 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent()); 1363 IRBuilder<> Builder(UnreachBB); 1364 Builder.CreateUnreachable(); 1365 1366 // Create a new cleanuppad which will be the dispatcher. 1367 auto *NewCleanupPadBB = 1368 BasicBlock::Create(CleanupPadBB->getContext(), 1369 CleanupPadBB->getName() + Twine(".corodispatch"), 1370 CleanupPadBB->getParent(), CleanupPadBB); 1371 Builder.SetInsertPoint(NewCleanupPadBB); 1372 auto *SwitchType = Builder.getInt8Ty(); 1373 auto *SetDispatchValuePN = 1374 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB)); 1375 CleanupPad->removeFromParent(); 1376 CleanupPad->insertAfter(SetDispatchValuePN); 1377 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB, 1378 pred_size(CleanupPadBB)); 1379 1380 int SwitchIndex = 0; 1381 SmallVector<BasicBlock *, 8> Preds(pred_begin(CleanupPadBB), 1382 pred_end(CleanupPadBB)); 1383 for (BasicBlock *Pred : Preds) { 1384 // Create a new cleanuppad and move the PHI values to there. 1385 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(), 1386 CleanupPadBB->getName() + 1387 Twine(".from.") + Pred->getName(), 1388 CleanupPadBB->getParent(), CleanupPadBB); 1389 updatePhiNodes(CleanupPadBB, Pred, CaseBB); 1390 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") + 1391 Pred->getName()); 1392 Builder.SetInsertPoint(CaseBB); 1393 Builder.CreateBr(CleanupPadBB); 1394 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB); 1395 1396 // Update this Pred to the new unwind point. 1397 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB); 1398 1399 // Setup the switch in the dispatcher. 1400 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex); 1401 SetDispatchValuePN->addIncoming(SwitchConstant, Pred); 1402 SwitchOnDispatch->addCase(SwitchConstant, CaseBB); 1403 SwitchIndex++; 1404 } 1405 } 1406 1407 static void rewritePHIs(BasicBlock &BB) { 1408 // For every incoming edge we will create a block holding all 1409 // incoming values in a single PHI nodes. 1410 // 1411 // loop: 1412 // %n.val = phi i32[%n, %entry], [%inc, %loop] 1413 // 1414 // It will create: 1415 // 1416 // loop.from.entry: 1417 // %n.loop.pre = phi i32 [%n, %entry] 1418 // br %label loop 1419 // loop.from.loop: 1420 // %inc.loop.pre = phi i32 [%inc, %loop] 1421 // br %label loop 1422 // 1423 // After this rewrite, further analysis will ignore any phi nodes with more 1424 // than one incoming edge. 1425 1426 // TODO: Simplify PHINodes in the basic block to remove duplicate 1427 // predecessors. 1428 1429 // Special case for CleanupPad: all EH blocks must have the same unwind edge 1430 // so we need to create an additional "dispatcher" block. 1431 if (auto *CleanupPad = 1432 dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) { 1433 SmallVector<BasicBlock *, 8> Preds(pred_begin(&BB), pred_end(&BB)); 1434 for (BasicBlock *Pred : Preds) { 1435 if (CatchSwitchInst *CS = 1436 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) { 1437 // CleanupPad with a CatchSwitch predecessor: therefore this is an 1438 // unwind destination that needs to be handle specially. 1439 assert(CS->getUnwindDest() == &BB); 1440 rewritePHIsForCleanupPad(&BB, CleanupPad); 1441 return; 1442 } 1443 } 1444 } 1445 1446 LandingPadInst *LandingPad = nullptr; 1447 PHINode *ReplPHI = nullptr; 1448 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) { 1449 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks. 1450 // We replace the original landing pad with a PHINode that will collect the 1451 // results from all of them. 1452 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad); 1453 ReplPHI->takeName(LandingPad); 1454 LandingPad->replaceAllUsesWith(ReplPHI); 1455 // We will erase the original landing pad at the end of this function after 1456 // ehAwareSplitEdge cloned it in the transition blocks. 1457 } 1458 1459 SmallVector<BasicBlock *, 8> Preds(pred_begin(&BB), pred_end(&BB)); 1460 for (BasicBlock *Pred : Preds) { 1461 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI); 1462 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName()); 1463 1464 // Stop the moving of values at ReplPHI, as this is either null or the PHI 1465 // that replaced the landing pad. 1466 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI); 1467 } 1468 1469 if (LandingPad) { 1470 // Calls to ehAwareSplitEdge function cloned the original lading pad. 1471 // No longer need it. 1472 LandingPad->eraseFromParent(); 1473 } 1474 } 1475 1476 static void rewritePHIs(Function &F) { 1477 SmallVector<BasicBlock *, 8> WorkList; 1478 1479 for (BasicBlock &BB : F) 1480 if (auto *PN = dyn_cast<PHINode>(&BB.front())) 1481 if (PN->getNumIncomingValues() > 1) 1482 WorkList.push_back(&BB); 1483 1484 for (BasicBlock *BB : WorkList) 1485 rewritePHIs(*BB); 1486 } 1487 1488 // Check for instructions that we can recreate on resume as opposed to spill 1489 // the result into a coroutine frame. 1490 static bool materializable(Instruction &V) { 1491 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) || 1492 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V); 1493 } 1494 1495 // Check for structural coroutine intrinsics that should not be spilled into 1496 // the coroutine frame. 1497 static bool isCoroutineStructureIntrinsic(Instruction &I) { 1498 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) || 1499 isa<CoroSuspendInst>(&I); 1500 } 1501 1502 // For every use of the value that is across suspend point, recreate that value 1503 // after a suspend point. 1504 static void rewriteMaterializableInstructions(IRBuilder<> &IRB, 1505 const SpillInfo &Spills) { 1506 for (const auto &E : Spills) { 1507 Value *Def = E.first; 1508 BasicBlock *CurrentBlock = nullptr; 1509 Instruction *CurrentMaterialization = nullptr; 1510 for (Instruction *U : E.second) { 1511 // If we have not seen this block, materialize the value. 1512 if (CurrentBlock != U->getParent()) { 1513 CurrentBlock = U->getParent(); 1514 CurrentMaterialization = cast<Instruction>(Def)->clone(); 1515 CurrentMaterialization->setName(Def->getName()); 1516 CurrentMaterialization->insertBefore( 1517 &*CurrentBlock->getFirstInsertionPt()); 1518 } 1519 if (auto *PN = dyn_cast<PHINode>(U)) { 1520 assert(PN->getNumIncomingValues() == 1 && 1521 "unexpected number of incoming " 1522 "values in the PHINode"); 1523 PN->replaceAllUsesWith(CurrentMaterialization); 1524 PN->eraseFromParent(); 1525 continue; 1526 } 1527 // Replace all uses of Def in the current instruction with the 1528 // CurrentMaterialization for the block. 1529 U->replaceUsesOfWith(Def, CurrentMaterialization); 1530 } 1531 } 1532 } 1533 1534 // Splits the block at a particular instruction unless it is the first 1535 // instruction in the block with a single predecessor. 1536 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) { 1537 auto *BB = I->getParent(); 1538 if (&BB->front() == I) { 1539 if (BB->getSinglePredecessor()) { 1540 BB->setName(Name); 1541 return BB; 1542 } 1543 } 1544 return BB->splitBasicBlock(I, Name); 1545 } 1546 1547 // Split above and below a particular instruction so that it 1548 // will be all alone by itself in a block. 1549 static void splitAround(Instruction *I, const Twine &Name) { 1550 splitBlockIfNotFirst(I, Name); 1551 splitBlockIfNotFirst(I->getNextNode(), "After" + Name); 1552 } 1553 1554 static bool isSuspendBlock(BasicBlock *BB) { 1555 return isa<AnyCoroSuspendInst>(BB->front()); 1556 } 1557 1558 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet; 1559 1560 /// Does control flow starting at the given block ever reach a suspend 1561 /// instruction before reaching a block in VisitedOrFreeBBs? 1562 static bool isSuspendReachableFrom(BasicBlock *From, 1563 VisitedBlocksSet &VisitedOrFreeBBs) { 1564 // Eagerly try to add this block to the visited set. If it's already 1565 // there, stop recursing; this path doesn't reach a suspend before 1566 // either looping or reaching a freeing block. 1567 if (!VisitedOrFreeBBs.insert(From).second) 1568 return false; 1569 1570 // We assume that we'll already have split suspends into their own blocks. 1571 if (isSuspendBlock(From)) 1572 return true; 1573 1574 // Recurse on the successors. 1575 for (auto Succ : successors(From)) { 1576 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs)) 1577 return true; 1578 } 1579 1580 return false; 1581 } 1582 1583 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a 1584 /// suspend point? 1585 static bool isLocalAlloca(CoroAllocaAllocInst *AI) { 1586 // Seed the visited set with all the basic blocks containing a free 1587 // so that we won't pass them up. 1588 VisitedBlocksSet VisitedOrFreeBBs; 1589 for (auto User : AI->users()) { 1590 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User)) 1591 VisitedOrFreeBBs.insert(FI->getParent()); 1592 } 1593 1594 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs); 1595 } 1596 1597 /// After we split the coroutine, will the given basic block be along 1598 /// an obvious exit path for the resumption function? 1599 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, 1600 unsigned depth = 3) { 1601 // If we've bottomed out our depth count, stop searching and assume 1602 // that the path might loop back. 1603 if (depth == 0) return false; 1604 1605 // If this is a suspend block, we're about to exit the resumption function. 1606 if (isSuspendBlock(BB)) return true; 1607 1608 // Recurse into the successors. 1609 for (auto Succ : successors(BB)) { 1610 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1)) 1611 return false; 1612 } 1613 1614 // If none of the successors leads back in a loop, we're on an exit/abort. 1615 return true; 1616 } 1617 1618 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) { 1619 // Look for a free that isn't sufficiently obviously followed by 1620 // either a suspend or a termination, i.e. something that will leave 1621 // the coro resumption frame. 1622 for (auto U : AI->users()) { 1623 auto FI = dyn_cast<CoroAllocaFreeInst>(U); 1624 if (!FI) continue; 1625 1626 if (!willLeaveFunctionImmediatelyAfter(FI->getParent())) 1627 return true; 1628 } 1629 1630 // If we never found one, we don't need a stack save. 1631 return false; 1632 } 1633 1634 /// Turn each of the given local allocas into a normal (dynamic) alloca 1635 /// instruction. 1636 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas, 1637 SmallVectorImpl<Instruction*> &DeadInsts) { 1638 for (auto AI : LocalAllocas) { 1639 auto M = AI->getModule(); 1640 IRBuilder<> Builder(AI); 1641 1642 // Save the stack depth. Try to avoid doing this if the stackrestore 1643 // is going to immediately precede a return or something. 1644 Value *StackSave = nullptr; 1645 if (localAllocaNeedsStackSave(AI)) 1646 StackSave = Builder.CreateCall( 1647 Intrinsic::getDeclaration(M, Intrinsic::stacksave)); 1648 1649 // Allocate memory. 1650 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize()); 1651 Alloca->setAlignment(Align(AI->getAlignment())); 1652 1653 for (auto U : AI->users()) { 1654 // Replace gets with the allocation. 1655 if (isa<CoroAllocaGetInst>(U)) { 1656 U->replaceAllUsesWith(Alloca); 1657 1658 // Replace frees with stackrestores. This is safe because 1659 // alloca.alloc is required to obey a stack discipline, although we 1660 // don't enforce that structurally. 1661 } else { 1662 auto FI = cast<CoroAllocaFreeInst>(U); 1663 if (StackSave) { 1664 Builder.SetInsertPoint(FI); 1665 Builder.CreateCall( 1666 Intrinsic::getDeclaration(M, Intrinsic::stackrestore), 1667 StackSave); 1668 } 1669 } 1670 DeadInsts.push_back(cast<Instruction>(U)); 1671 } 1672 1673 DeadInsts.push_back(AI); 1674 } 1675 } 1676 1677 /// Turn the given coro.alloca.alloc call into a dynamic allocation. 1678 /// This happens during the all-instructions iteration, so it must not 1679 /// delete the call. 1680 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI, 1681 coro::Shape &Shape, 1682 SmallVectorImpl<Instruction*> &DeadInsts) { 1683 IRBuilder<> Builder(AI); 1684 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr); 1685 1686 for (User *U : AI->users()) { 1687 if (isa<CoroAllocaGetInst>(U)) { 1688 U->replaceAllUsesWith(Alloc); 1689 } else { 1690 auto FI = cast<CoroAllocaFreeInst>(U); 1691 Builder.SetInsertPoint(FI); 1692 Shape.emitDealloc(Builder, Alloc, nullptr); 1693 } 1694 DeadInsts.push_back(cast<Instruction>(U)); 1695 } 1696 1697 // Push this on last so that it gets deleted after all the others. 1698 DeadInsts.push_back(AI); 1699 1700 // Return the new allocation value so that we can check for needed spills. 1701 return cast<Instruction>(Alloc); 1702 } 1703 1704 /// Get the current swifterror value. 1705 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, 1706 coro::Shape &Shape) { 1707 // Make a fake function pointer as a sort of intrinsic. 1708 auto FnTy = FunctionType::get(ValueTy, {}, false); 1709 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 1710 1711 auto Call = Builder.CreateCall(FnTy, Fn, {}); 1712 Shape.SwiftErrorOps.push_back(Call); 1713 1714 return Call; 1715 } 1716 1717 /// Set the given value as the current swifterror value. 1718 /// 1719 /// Returns a slot that can be used as a swifterror slot. 1720 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, 1721 coro::Shape &Shape) { 1722 // Make a fake function pointer as a sort of intrinsic. 1723 auto FnTy = FunctionType::get(V->getType()->getPointerTo(), 1724 {V->getType()}, false); 1725 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 1726 1727 auto Call = Builder.CreateCall(FnTy, Fn, { V }); 1728 Shape.SwiftErrorOps.push_back(Call); 1729 1730 return Call; 1731 } 1732 1733 /// Set the swifterror value from the given alloca before a call, 1734 /// then put in back in the alloca afterwards. 1735 /// 1736 /// Returns an address that will stand in for the swifterror slot 1737 /// until splitting. 1738 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call, 1739 AllocaInst *Alloca, 1740 coro::Shape &Shape) { 1741 auto ValueTy = Alloca->getAllocatedType(); 1742 IRBuilder<> Builder(Call); 1743 1744 // Load the current value from the alloca and set it as the 1745 // swifterror value. 1746 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca); 1747 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape); 1748 1749 // Move to after the call. Since swifterror only has a guaranteed 1750 // value on normal exits, we can ignore implicit and explicit unwind 1751 // edges. 1752 if (isa<CallInst>(Call)) { 1753 Builder.SetInsertPoint(Call->getNextNode()); 1754 } else { 1755 auto Invoke = cast<InvokeInst>(Call); 1756 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg()); 1757 } 1758 1759 // Get the current swifterror value and store it to the alloca. 1760 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape); 1761 Builder.CreateStore(ValueAfterCall, Alloca); 1762 1763 return Addr; 1764 } 1765 1766 /// Eliminate a formerly-swifterror alloca by inserting the get/set 1767 /// intrinsics and attempting to MemToReg the alloca away. 1768 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, 1769 coro::Shape &Shape) { 1770 for (auto UI = Alloca->use_begin(), UE = Alloca->use_end(); UI != UE; ) { 1771 // We're likely changing the use list, so use a mutation-safe 1772 // iteration pattern. 1773 auto &Use = *UI; 1774 ++UI; 1775 1776 // swifterror values can only be used in very specific ways. 1777 // We take advantage of that here. 1778 auto User = Use.getUser(); 1779 if (isa<LoadInst>(User) || isa<StoreInst>(User)) 1780 continue; 1781 1782 assert(isa<CallInst>(User) || isa<InvokeInst>(User)); 1783 auto Call = cast<Instruction>(User); 1784 1785 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape); 1786 1787 // Use the returned slot address as the call argument. 1788 Use.set(Addr); 1789 } 1790 1791 // All the uses should be loads and stores now. 1792 assert(isAllocaPromotable(Alloca)); 1793 } 1794 1795 /// "Eliminate" a swifterror argument by reducing it to the alloca case 1796 /// and then loading and storing in the prologue and epilog. 1797 /// 1798 /// The argument keeps the swifterror flag. 1799 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, 1800 coro::Shape &Shape, 1801 SmallVectorImpl<AllocaInst*> &AllocasToPromote) { 1802 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg()); 1803 1804 auto ArgTy = cast<PointerType>(Arg.getType()); 1805 auto ValueTy = ArgTy->getElementType(); 1806 1807 // Reduce to the alloca case: 1808 1809 // Create an alloca and replace all uses of the arg with it. 1810 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace()); 1811 Arg.replaceAllUsesWith(Alloca); 1812 1813 // Set an initial value in the alloca. swifterror is always null on entry. 1814 auto InitialValue = Constant::getNullValue(ValueTy); 1815 Builder.CreateStore(InitialValue, Alloca); 1816 1817 // Find all the suspends in the function and save and restore around them. 1818 for (auto Suspend : Shape.CoroSuspends) { 1819 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape); 1820 } 1821 1822 // Find all the coro.ends in the function and restore the error value. 1823 for (auto End : Shape.CoroEnds) { 1824 Builder.SetInsertPoint(End); 1825 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca); 1826 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape); 1827 } 1828 1829 // Now we can use the alloca logic. 1830 AllocasToPromote.push_back(Alloca); 1831 eliminateSwiftErrorAlloca(F, Alloca, Shape); 1832 } 1833 1834 /// Eliminate all problematic uses of swifterror arguments and allocas 1835 /// from the function. We'll fix them up later when splitting the function. 1836 static void eliminateSwiftError(Function &F, coro::Shape &Shape) { 1837 SmallVector<AllocaInst*, 4> AllocasToPromote; 1838 1839 // Look for a swifterror argument. 1840 for (auto &Arg : F.args()) { 1841 if (!Arg.hasSwiftErrorAttr()) continue; 1842 1843 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote); 1844 break; 1845 } 1846 1847 // Look for swifterror allocas. 1848 for (auto &Inst : F.getEntryBlock()) { 1849 auto Alloca = dyn_cast<AllocaInst>(&Inst); 1850 if (!Alloca || !Alloca->isSwiftError()) continue; 1851 1852 // Clear the swifterror flag. 1853 Alloca->setSwiftError(false); 1854 1855 AllocasToPromote.push_back(Alloca); 1856 eliminateSwiftErrorAlloca(F, Alloca, Shape); 1857 } 1858 1859 // If we have any allocas to promote, compute a dominator tree and 1860 // promote them en masse. 1861 if (!AllocasToPromote.empty()) { 1862 DominatorTree DT(F); 1863 PromoteMemToReg(AllocasToPromote, DT); 1864 } 1865 } 1866 1867 /// retcon and retcon.once conventions assume that all spill uses can be sunk 1868 /// after the coro.begin intrinsic. 1869 static void sinkSpillUsesAfterCoroBegin(Function &F, 1870 const FrameDataInfo &FrameData, 1871 CoroBeginInst *CoroBegin) { 1872 DominatorTree Dom(F); 1873 1874 SmallSetVector<Instruction *, 32> ToMove; 1875 SmallVector<Instruction *, 32> Worklist; 1876 1877 // Collect all users that precede coro.begin. 1878 for (auto *Def : FrameData.getAllDefs()) { 1879 for (User *U : Def->users()) { 1880 auto Inst = cast<Instruction>(U); 1881 if (Inst->getParent() != CoroBegin->getParent() || 1882 Dom.dominates(CoroBegin, Inst) || 1883 isa<CoroIdAsyncInst>(Inst) /*'fake' use of async context argument*/) 1884 continue; 1885 if (ToMove.insert(Inst)) 1886 Worklist.push_back(Inst); 1887 } 1888 } 1889 // Recursively collect users before coro.begin. 1890 while (!Worklist.empty()) { 1891 auto *Def = Worklist.back(); 1892 Worklist.pop_back(); 1893 for (User *U : Def->users()) { 1894 auto Inst = cast<Instruction>(U); 1895 if (Dom.dominates(CoroBegin, Inst)) 1896 continue; 1897 if (ToMove.insert(Inst)) 1898 Worklist.push_back(Inst); 1899 } 1900 } 1901 1902 // Sort by dominance. 1903 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end()); 1904 std::sort(InsertionList.begin(), InsertionList.end(), 1905 [&Dom](Instruction *A, Instruction *B) -> bool { 1906 // If a dominates b it should preceed (<) b. 1907 return Dom.dominates(A, B); 1908 }); 1909 1910 Instruction *InsertPt = CoroBegin->getNextNode(); 1911 for (Instruction *Inst : InsertionList) 1912 Inst->moveBefore(InsertPt); 1913 1914 return; 1915 } 1916 1917 /// For each local variable that all of its user are only used inside one of 1918 /// suspended region, we sink their lifetime.start markers to the place where 1919 /// after the suspend block. Doing so minimizes the lifetime of each variable, 1920 /// hence minimizing the amount of data we end up putting on the frame. 1921 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, 1922 SuspendCrossingInfo &Checker) { 1923 DominatorTree DT(F); 1924 1925 // Collect all possible basic blocks which may dominate all uses of allocas. 1926 SmallPtrSet<BasicBlock *, 4> DomSet; 1927 DomSet.insert(&F.getEntryBlock()); 1928 for (auto *CSI : Shape.CoroSuspends) { 1929 BasicBlock *SuspendBlock = CSI->getParent(); 1930 assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() && 1931 "should have split coro.suspend into its own block"); 1932 DomSet.insert(SuspendBlock->getSingleSuccessor()); 1933 } 1934 1935 for (Instruction &I : instructions(F)) { 1936 AllocaInst* AI = dyn_cast<AllocaInst>(&I); 1937 if (!AI) 1938 continue; 1939 1940 for (BasicBlock *DomBB : DomSet) { 1941 bool Valid = true; 1942 SmallVector<Instruction *, 1> Lifetimes; 1943 1944 auto isLifetimeStart = [](Instruction* I) { 1945 if (auto* II = dyn_cast<IntrinsicInst>(I)) 1946 return II->getIntrinsicID() == Intrinsic::lifetime_start; 1947 return false; 1948 }; 1949 1950 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) { 1951 if (isLifetimeStart(U)) { 1952 Lifetimes.push_back(U); 1953 return true; 1954 } 1955 if (!U->hasOneUse() || U->stripPointerCasts() != AI) 1956 return false; 1957 if (isLifetimeStart(U->user_back())) { 1958 Lifetimes.push_back(U->user_back()); 1959 return true; 1960 } 1961 return false; 1962 }; 1963 1964 for (User *U : AI->users()) { 1965 Instruction *UI = cast<Instruction>(U); 1966 // For all users except lifetime.start markers, if they are all 1967 // dominated by one of the basic blocks and do not cross 1968 // suspend points as well, then there is no need to spill the 1969 // instruction. 1970 if (!DT.dominates(DomBB, UI->getParent()) || 1971 Checker.isDefinitionAcrossSuspend(DomBB, UI)) { 1972 // Skip lifetime.start, GEP and bitcast used by lifetime.start 1973 // markers. 1974 if (collectLifetimeStart(UI, AI)) 1975 continue; 1976 Valid = false; 1977 break; 1978 } 1979 } 1980 // Sink lifetime.start markers to dominate block when they are 1981 // only used outside the region. 1982 if (Valid && Lifetimes.size() != 0) { 1983 // May be AI itself, when the type of AI is i8* 1984 auto *NewBitCast = [&](AllocaInst *AI) -> Value* { 1985 if (isa<AllocaInst>(Lifetimes[0]->getOperand(1))) 1986 return AI; 1987 auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext()); 1988 return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "", 1989 DomBB->getTerminator()); 1990 }(AI); 1991 1992 auto *NewLifetime = Lifetimes[0]->clone(); 1993 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast); 1994 NewLifetime->insertBefore(DomBB->getTerminator()); 1995 1996 // All the outsided lifetime.start markers are no longer necessary. 1997 for (Instruction *S : Lifetimes) 1998 S->eraseFromParent(); 1999 2000 break; 2001 } 2002 } 2003 } 2004 } 2005 2006 static void collectFrameAllocas(Function &F, coro::Shape &Shape, 2007 const SuspendCrossingInfo &Checker, 2008 SmallVectorImpl<AllocaInfo> &Allocas) { 2009 // Collect lifetime.start info for each alloca. 2010 using LifetimeStart = SmallPtrSet<Instruction *, 2>; 2011 llvm::DenseMap<AllocaInst *, std::unique_ptr<LifetimeStart>> LifetimeMap; 2012 for (Instruction &I : instructions(F)) { 2013 auto *II = dyn_cast<IntrinsicInst>(&I); 2014 if (!II || II->getIntrinsicID() != Intrinsic::lifetime_start) 2015 continue; 2016 2017 if (auto *OpInst = dyn_cast<Instruction>(II->getOperand(1))) { 2018 if (auto *AI = dyn_cast<AllocaInst>(OpInst->stripPointerCasts())) { 2019 2020 if (LifetimeMap.find(AI) == LifetimeMap.end()) 2021 LifetimeMap[AI] = std::make_unique<LifetimeStart>(); 2022 LifetimeMap[AI]->insert(isa<AllocaInst>(OpInst) ? II : OpInst); 2023 } 2024 } 2025 } 2026 2027 for (Instruction &I : instructions(F)) { 2028 auto *AI = dyn_cast<AllocaInst>(&I); 2029 if (!AI) 2030 continue; 2031 // The PromiseAlloca will be specially handled since it needs to be in a 2032 // fixed position in the frame. 2033 if (AI == Shape.SwitchLowering.PromiseAlloca) { 2034 continue; 2035 } 2036 bool ShouldLiveOnFrame = false; 2037 auto Iter = LifetimeMap.find(AI); 2038 if (Iter != LifetimeMap.end()) { 2039 // Check against lifetime.start if the instruction has the info. 2040 for (User *U : I.users()) { 2041 for (auto *S : *Iter->second) 2042 if ((ShouldLiveOnFrame = Checker.isDefinitionAcrossSuspend(*S, U))) 2043 break; 2044 if (ShouldLiveOnFrame) 2045 break; 2046 } 2047 if (!ShouldLiveOnFrame) 2048 continue; 2049 } 2050 // At this point, either ShouldLiveOnFrame is true or we didn't have 2051 // lifetime information. We will need to rely on more precise pointer 2052 // tracking. 2053 DominatorTree DT(F); 2054 AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT, 2055 *Shape.CoroBegin, Checker}; 2056 Visitor.visitPtr(*AI); 2057 if (!Visitor.getShouldLiveOnFrame()) 2058 continue; 2059 Allocas.emplace_back(AI, Visitor.getAliasesCopy(), 2060 Visitor.getMayWriteBeforeCoroBegin()); 2061 } 2062 } 2063 2064 void coro::buildCoroutineFrame(Function &F, Shape &Shape) { 2065 eliminateSwiftError(F, Shape); 2066 2067 if (Shape.ABI == coro::ABI::Switch && 2068 Shape.SwitchLowering.PromiseAlloca) { 2069 Shape.getSwitchCoroId()->clearPromise(); 2070 } 2071 2072 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end 2073 // intrinsics are in their own blocks to simplify the logic of building up 2074 // SuspendCrossing data. 2075 for (auto *CSI : Shape.CoroSuspends) { 2076 if (auto *Save = CSI->getCoroSave()) 2077 splitAround(Save, "CoroSave"); 2078 splitAround(CSI, "CoroSuspend"); 2079 } 2080 2081 // Put CoroEnds into their own blocks. 2082 for (CoroEndInst *CE : Shape.CoroEnds) 2083 splitAround(CE, "CoroEnd"); 2084 2085 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will 2086 // never has its definition separated from the PHI by the suspend point. 2087 rewritePHIs(F); 2088 2089 // Build suspend crossing info. 2090 SuspendCrossingInfo Checker(F, Shape); 2091 2092 IRBuilder<> Builder(F.getContext()); 2093 FrameDataInfo FrameData; 2094 SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas; 2095 SmallVector<Instruction*, 4> DeadInstructions; 2096 2097 { 2098 SpillInfo Spills; 2099 for (int Repeat = 0; Repeat < 4; ++Repeat) { 2100 // See if there are materializable instructions across suspend points. 2101 for (Instruction &I : instructions(F)) 2102 if (materializable(I)) 2103 for (User *U : I.users()) 2104 if (Checker.isDefinitionAcrossSuspend(I, U)) 2105 Spills[&I].push_back(cast<Instruction>(U)); 2106 2107 if (Spills.empty()) 2108 break; 2109 2110 // Rewrite materializable instructions to be materialized at the use 2111 // point. 2112 LLVM_DEBUG(dumpSpills("Materializations", Spills)); 2113 rewriteMaterializableInstructions(Builder, Spills); 2114 Spills.clear(); 2115 } 2116 } 2117 2118 sinkLifetimeStartMarkers(F, Shape, Checker); 2119 collectFrameAllocas(F, Shape, Checker, FrameData.Allocas); 2120 LLVM_DEBUG(dumpAllocas(FrameData.Allocas)); 2121 2122 // Collect the spills for arguments and other not-materializable values. 2123 for (Argument &A : F.args()) 2124 for (User *U : A.users()) 2125 if (Checker.isDefinitionAcrossSuspend(A, U)) 2126 FrameData.Spills[&A].push_back(cast<Instruction>(U)); 2127 2128 for (Instruction &I : instructions(F)) { 2129 // Values returned from coroutine structure intrinsics should not be part 2130 // of the Coroutine Frame. 2131 if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin) 2132 continue; 2133 2134 // The Coroutine Promise always included into coroutine frame, no need to 2135 // check for suspend crossing. 2136 if (Shape.ABI == coro::ABI::Switch && 2137 Shape.SwitchLowering.PromiseAlloca == &I) 2138 continue; 2139 2140 // Handle alloca.alloc specially here. 2141 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) { 2142 // Check whether the alloca's lifetime is bounded by suspend points. 2143 if (isLocalAlloca(AI)) { 2144 LocalAllocas.push_back(AI); 2145 continue; 2146 } 2147 2148 // If not, do a quick rewrite of the alloca and then add spills of 2149 // the rewritten value. The rewrite doesn't invalidate anything in 2150 // Spills because the other alloca intrinsics have no other operands 2151 // besides AI, and it doesn't invalidate the iteration because we delay 2152 // erasing AI. 2153 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions); 2154 2155 for (User *U : Alloc->users()) { 2156 if (Checker.isDefinitionAcrossSuspend(*Alloc, U)) 2157 FrameData.Spills[Alloc].push_back(cast<Instruction>(U)); 2158 } 2159 continue; 2160 } 2161 2162 // Ignore alloca.get; we process this as part of coro.alloca.alloc. 2163 if (isa<CoroAllocaGetInst>(I)) 2164 continue; 2165 2166 if (isa<AllocaInst>(I)) 2167 continue; 2168 2169 for (User *U : I.users()) 2170 if (Checker.isDefinitionAcrossSuspend(I, U)) { 2171 // We cannot spill a token. 2172 if (I.getType()->isTokenTy()) 2173 report_fatal_error( 2174 "token definition is separated from the use by a suspend point"); 2175 FrameData.Spills[&I].push_back(cast<Instruction>(U)); 2176 } 2177 } 2178 LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills)); 2179 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 2180 Shape.ABI == coro::ABI::Async) 2181 sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin); 2182 Shape.FrameTy = buildFrameType(F, Shape, FrameData); 2183 // Add PromiseAlloca to Allocas list so that it is processed in insertSpills. 2184 if (Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.PromiseAlloca) 2185 // We assume that the promise alloca won't be modified before 2186 // CoroBegin and no alias will be create before CoroBegin. 2187 FrameData.Allocas.emplace_back( 2188 Shape.SwitchLowering.PromiseAlloca, 2189 DenseMap<Instruction *, llvm::Optional<APInt>>{}, false); 2190 Shape.FramePtr = insertSpills(FrameData, Shape); 2191 lowerLocalAllocas(LocalAllocas, DeadInstructions); 2192 2193 for (auto I : DeadInstructions) 2194 I->eraseFromParent(); 2195 } 2196