1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This file contains classes used to discover if for a particular value 9 // there from sue to definition that crosses a suspend block. 10 // 11 // Using the information discovered we form a Coroutine Frame structure to 12 // contain those values. All uses of those values are replaced with appropriate 13 // GEP + load from the coroutine frame. At the point of the definition we spill 14 // the value into the coroutine frame. 15 //===----------------------------------------------------------------------===// 16 17 #include "CoroInternal.h" 18 #include "llvm/ADT/BitVector.h" 19 #include "llvm/ADT/SmallString.h" 20 #include "llvm/Analysis/PtrUseVisitor.h" 21 #include "llvm/Analysis/StackLifetime.h" 22 #include "llvm/Config/llvm-config.h" 23 #include "llvm/IR/CFG.h" 24 #include "llvm/IR/DIBuilder.h" 25 #include "llvm/IR/Dominators.h" 26 #include "llvm/IR/IRBuilder.h" 27 #include "llvm/IR/InstIterator.h" 28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Support/Debug.h" 30 #include "llvm/Support/MathExtras.h" 31 #include "llvm/Support/OptimizedStructLayout.h" 32 #include "llvm/Support/circular_raw_ostream.h" 33 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 34 #include "llvm/Transforms/Utils/Local.h" 35 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 36 #include <algorithm> 37 38 using namespace llvm; 39 40 // The "coro-suspend-crossing" flag is very noisy. There is another debug type, 41 // "coro-frame", which results in leaner debug spew. 42 #define DEBUG_TYPE "coro-suspend-crossing" 43 44 static cl::opt<bool> EnableReuseStorageInFrame( 45 "reuse-storage-in-coroutine-frame", cl::Hidden, 46 cl::desc( 47 "Enable the optimization which would reuse the storage in the coroutine \ 48 frame for allocas whose liferanges are not overlapped, for testing purposes"), 49 llvm::cl::init(false)); 50 51 enum { SmallVectorThreshold = 32 }; 52 53 // Provides two way mapping between the blocks and numbers. 54 namespace { 55 class BlockToIndexMapping { 56 SmallVector<BasicBlock *, SmallVectorThreshold> V; 57 58 public: 59 size_t size() const { return V.size(); } 60 61 BlockToIndexMapping(Function &F) { 62 for (BasicBlock &BB : F) 63 V.push_back(&BB); 64 llvm::sort(V); 65 } 66 67 size_t blockToIndex(BasicBlock *BB) const { 68 auto *I = llvm::lower_bound(V, BB); 69 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block"); 70 return I - V.begin(); 71 } 72 73 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; } 74 }; 75 } // end anonymous namespace 76 77 // The SuspendCrossingInfo maintains data that allows to answer a question 78 // whether given two BasicBlocks A and B there is a path from A to B that 79 // passes through a suspend point. 80 // 81 // For every basic block 'i' it maintains a BlockData that consists of: 82 // Consumes: a bit vector which contains a set of indices of blocks that can 83 // reach block 'i' 84 // Kills: a bit vector which contains a set of indices of blocks that can 85 // reach block 'i', but one of the path will cross a suspend point 86 // Suspend: a boolean indicating whether block 'i' contains a suspend point. 87 // End: a boolean indicating whether block 'i' contains a coro.end intrinsic. 88 // 89 namespace { 90 struct SuspendCrossingInfo { 91 BlockToIndexMapping Mapping; 92 93 struct BlockData { 94 BitVector Consumes; 95 BitVector Kills; 96 bool Suspend = false; 97 bool End = false; 98 }; 99 SmallVector<BlockData, SmallVectorThreshold> Block; 100 101 iterator_range<succ_iterator> successors(BlockData const &BD) const { 102 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]); 103 return llvm::successors(BB); 104 } 105 106 BlockData &getBlockData(BasicBlock *BB) { 107 return Block[Mapping.blockToIndex(BB)]; 108 } 109 110 void dump() const; 111 void dump(StringRef Label, BitVector const &BV) const; 112 113 SuspendCrossingInfo(Function &F, coro::Shape &Shape); 114 115 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const { 116 size_t const DefIndex = Mapping.blockToIndex(DefBB); 117 size_t const UseIndex = Mapping.blockToIndex(UseBB); 118 119 bool const Result = Block[UseIndex].Kills[DefIndex]; 120 LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName() 121 << " answer is " << Result << "\n"); 122 return Result; 123 } 124 125 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const { 126 auto *I = cast<Instruction>(U); 127 128 // We rewrote PHINodes, so that only the ones with exactly one incoming 129 // value need to be analyzed. 130 if (auto *PN = dyn_cast<PHINode>(I)) 131 if (PN->getNumIncomingValues() > 1) 132 return false; 133 134 BasicBlock *UseBB = I->getParent(); 135 136 // As a special case, treat uses by an llvm.coro.suspend.retcon or an 137 // llvm.coro.suspend.async as if they were uses in the suspend's single 138 // predecessor: the uses conceptually occur before the suspend. 139 if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) { 140 UseBB = UseBB->getSinglePredecessor(); 141 assert(UseBB && "should have split coro.suspend into its own block"); 142 } 143 144 return hasPathCrossingSuspendPoint(DefBB, UseBB); 145 } 146 147 bool isDefinitionAcrossSuspend(Argument &A, User *U) const { 148 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U); 149 } 150 151 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const { 152 auto *DefBB = I.getParent(); 153 154 // As a special case, treat values produced by an llvm.coro.suspend.* 155 // as if they were defined in the single successor: the uses 156 // conceptually occur after the suspend. 157 if (isa<AnyCoroSuspendInst>(I)) { 158 DefBB = DefBB->getSingleSuccessor(); 159 assert(DefBB && "should have split coro.suspend into its own block"); 160 } 161 162 return isDefinitionAcrossSuspend(DefBB, U); 163 } 164 }; 165 } // end anonymous namespace 166 167 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 168 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label, 169 BitVector const &BV) const { 170 dbgs() << Label << ":"; 171 for (size_t I = 0, N = BV.size(); I < N; ++I) 172 if (BV[I]) 173 dbgs() << " " << Mapping.indexToBlock(I)->getName(); 174 dbgs() << "\n"; 175 } 176 177 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const { 178 for (size_t I = 0, N = Block.size(); I < N; ++I) { 179 BasicBlock *const B = Mapping.indexToBlock(I); 180 dbgs() << B->getName() << ":\n"; 181 dump(" Consumes", Block[I].Consumes); 182 dump(" Kills", Block[I].Kills); 183 } 184 dbgs() << "\n"; 185 } 186 #endif 187 188 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape) 189 : Mapping(F) { 190 const size_t N = Mapping.size(); 191 Block.resize(N); 192 193 // Initialize every block so that it consumes itself 194 for (size_t I = 0; I < N; ++I) { 195 auto &B = Block[I]; 196 B.Consumes.resize(N); 197 B.Kills.resize(N); 198 B.Consumes.set(I); 199 } 200 201 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as 202 // the code beyond coro.end is reachable during initial invocation of the 203 // coroutine. 204 for (auto *CE : Shape.CoroEnds) 205 getBlockData(CE->getParent()).End = true; 206 207 // Mark all suspend blocks and indicate that they kill everything they 208 // consume. Note, that crossing coro.save also requires a spill, as any code 209 // between coro.save and coro.suspend may resume the coroutine and all of the 210 // state needs to be saved by that time. 211 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) { 212 BasicBlock *SuspendBlock = BarrierInst->getParent(); 213 auto &B = getBlockData(SuspendBlock); 214 B.Suspend = true; 215 B.Kills |= B.Consumes; 216 }; 217 for (auto *CSI : Shape.CoroSuspends) { 218 markSuspendBlock(CSI); 219 if (auto *Save = CSI->getCoroSave()) 220 markSuspendBlock(Save); 221 } 222 223 // Iterate propagating consumes and kills until they stop changing. 224 int Iteration = 0; 225 (void)Iteration; 226 227 bool Changed; 228 do { 229 LLVM_DEBUG(dbgs() << "iteration " << ++Iteration); 230 LLVM_DEBUG(dbgs() << "==============\n"); 231 232 Changed = false; 233 for (size_t I = 0; I < N; ++I) { 234 auto &B = Block[I]; 235 for (BasicBlock *SI : successors(B)) { 236 237 auto SuccNo = Mapping.blockToIndex(SI); 238 239 // Saved Consumes and Kills bitsets so that it is easy to see 240 // if anything changed after propagation. 241 auto &S = Block[SuccNo]; 242 auto SavedConsumes = S.Consumes; 243 auto SavedKills = S.Kills; 244 245 // Propagate Kills and Consumes from block B into its successor S. 246 S.Consumes |= B.Consumes; 247 S.Kills |= B.Kills; 248 249 // If block B is a suspend block, it should propagate kills into the 250 // its successor for every block B consumes. 251 if (B.Suspend) { 252 S.Kills |= B.Consumes; 253 } 254 if (S.Suspend) { 255 // If block S is a suspend block, it should kill all of the blocks it 256 // consumes. 257 S.Kills |= S.Consumes; 258 } else if (S.End) { 259 // If block S is an end block, it should not propagate kills as the 260 // blocks following coro.end() are reached during initial invocation 261 // of the coroutine while all the data are still available on the 262 // stack or in the registers. 263 S.Kills.reset(); 264 } else { 265 // This is reached when S block it not Suspend nor coro.end and it 266 // need to make sure that it is not in the kill set. 267 S.Kills.reset(SuccNo); 268 } 269 270 // See if anything changed. 271 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes); 272 273 if (S.Kills != SavedKills) { 274 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName() 275 << "\n"); 276 LLVM_DEBUG(dump("S.Kills", S.Kills)); 277 LLVM_DEBUG(dump("SavedKills", SavedKills)); 278 } 279 if (S.Consumes != SavedConsumes) { 280 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n"); 281 LLVM_DEBUG(dump("S.Consume", S.Consumes)); 282 LLVM_DEBUG(dump("SavedCons", SavedConsumes)); 283 } 284 } 285 } 286 } while (Changed); 287 LLVM_DEBUG(dump()); 288 } 289 290 #undef DEBUG_TYPE // "coro-suspend-crossing" 291 #define DEBUG_TYPE "coro-frame" 292 293 namespace { 294 class FrameTypeBuilder; 295 // Mapping from the to-be-spilled value to all the users that need reload. 296 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>; 297 struct AllocaInfo { 298 AllocaInst *Alloca; 299 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases; 300 bool MayWriteBeforeCoroBegin; 301 AllocaInfo(AllocaInst *Alloca, 302 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases, 303 bool MayWriteBeforeCoroBegin) 304 : Alloca(Alloca), Aliases(std::move(Aliases)), 305 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {} 306 }; 307 struct FrameDataInfo { 308 // All the values (that are not allocas) that needs to be spilled to the 309 // frame. 310 SpillInfo Spills; 311 // Allocas contains all values defined as allocas that need to live in the 312 // frame. 313 SmallVector<AllocaInfo, 8> Allocas; 314 315 SmallVector<Value *, 8> getAllDefs() const { 316 SmallVector<Value *, 8> Defs; 317 for (const auto &P : Spills) 318 Defs.push_back(P.first); 319 for (const auto &A : Allocas) 320 Defs.push_back(A.Alloca); 321 return Defs; 322 } 323 324 uint32_t getFieldIndex(Value *V) const { 325 auto Itr = FieldIndexMap.find(V); 326 assert(Itr != FieldIndexMap.end() && 327 "Value does not have a frame field index"); 328 return Itr->second; 329 } 330 331 void setFieldIndex(Value *V, uint32_t Index) { 332 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) && 333 "Cannot set the index for the same field twice."); 334 FieldIndexMap[V] = Index; 335 } 336 337 // Remap the index of every field in the frame, using the final layout index. 338 void updateLayoutIndex(FrameTypeBuilder &B); 339 340 private: 341 // LayoutIndexUpdateStarted is used to avoid updating the index of any field 342 // twice by mistake. 343 bool LayoutIndexUpdateStarted = false; 344 // Map from values to their slot indexes on the frame. They will be first set 345 // with their original insertion field index. After the frame is built, their 346 // indexes will be updated into the final layout index. 347 DenseMap<Value *, uint32_t> FieldIndexMap; 348 }; 349 } // namespace 350 351 #ifndef NDEBUG 352 static void dumpSpills(StringRef Title, const SpillInfo &Spills) { 353 dbgs() << "------------- " << Title << "--------------\n"; 354 for (const auto &E : Spills) { 355 E.first->dump(); 356 dbgs() << " user: "; 357 for (auto *I : E.second) 358 I->dump(); 359 } 360 } 361 362 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) { 363 dbgs() << "------------- Allocas --------------\n"; 364 for (const auto &A : Allocas) { 365 A.Alloca->dump(); 366 } 367 } 368 #endif 369 370 namespace { 371 using FieldIDType = size_t; 372 // We cannot rely solely on natural alignment of a type when building a 373 // coroutine frame and if the alignment specified on the Alloca instruction 374 // differs from the natural alignment of the alloca type we will need to insert 375 // padding. 376 class FrameTypeBuilder { 377 private: 378 struct Field { 379 uint64_t Size; 380 uint64_t Offset; 381 Type *Ty; 382 FieldIDType LayoutFieldIndex; 383 Align Alignment; 384 Align TyAlignment; 385 }; 386 387 const DataLayout &DL; 388 LLVMContext &Context; 389 uint64_t StructSize = 0; 390 Align StructAlign; 391 bool IsFinished = false; 392 393 SmallVector<Field, 8> Fields; 394 DenseMap<Value*, unsigned> FieldIndexByKey; 395 396 public: 397 FrameTypeBuilder(LLVMContext &Context, DataLayout const &DL) 398 : DL(DL), Context(Context) {} 399 400 /// Add a field to this structure for the storage of an `alloca` 401 /// instruction. 402 LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI, 403 bool IsHeader = false) { 404 Type *Ty = AI->getAllocatedType(); 405 406 // Make an array type if this is a static array allocation. 407 if (AI->isArrayAllocation()) { 408 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 409 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue()); 410 else 411 report_fatal_error("Coroutines cannot handle non static allocas yet"); 412 } 413 414 return addField(Ty, AI->getAlign(), IsHeader); 415 } 416 417 /// We want to put the allocas whose lifetime-ranges are not overlapped 418 /// into one slot of coroutine frame. 419 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566 420 /// 421 /// cppcoro::task<void> alternative_paths(bool cond) { 422 /// if (cond) { 423 /// big_structure a; 424 /// process(a); 425 /// co_await something(); 426 /// } else { 427 /// big_structure b; 428 /// process2(b); 429 /// co_await something(); 430 /// } 431 /// } 432 /// 433 /// We want to put variable a and variable b in the same slot to 434 /// reduce the size of coroutine frame. 435 /// 436 /// This function use StackLifetime algorithm to partition the AllocaInsts in 437 /// Spills to non-overlapped sets in order to put Alloca in the same 438 /// non-overlapped set into the same slot in the Coroutine Frame. Then add 439 /// field for the allocas in the same non-overlapped set by using the largest 440 /// type as the field type. 441 /// 442 /// Side Effects: Because We sort the allocas, the order of allocas in the 443 /// frame may be different with the order in the source code. 444 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData, 445 coro::Shape &Shape); 446 447 /// Add a field to this structure. 448 LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment, 449 bool IsHeader = false) { 450 assert(!IsFinished && "adding fields to a finished builder"); 451 assert(Ty && "must provide a type for a field"); 452 453 // The field size is always the alloc size of the type. 454 uint64_t FieldSize = DL.getTypeAllocSize(Ty); 455 456 // The field alignment might not be the type alignment, but we need 457 // to remember the type alignment anyway to build the type. 458 Align TyAlignment = DL.getABITypeAlign(Ty); 459 if (!FieldAlignment) FieldAlignment = TyAlignment; 460 461 // Lay out header fields immediately. 462 uint64_t Offset; 463 if (IsHeader) { 464 Offset = alignTo(StructSize, FieldAlignment); 465 StructSize = Offset + FieldSize; 466 467 // Everything else has a flexible offset. 468 } else { 469 Offset = OptimizedStructLayoutField::FlexibleOffset; 470 } 471 472 Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment}); 473 return Fields.size() - 1; 474 } 475 476 /// Finish the layout and set the body on the given type. 477 void finish(StructType *Ty); 478 479 uint64_t getStructSize() const { 480 assert(IsFinished && "not yet finished!"); 481 return StructSize; 482 } 483 484 Align getStructAlign() const { 485 assert(IsFinished && "not yet finished!"); 486 return StructAlign; 487 } 488 489 FieldIDType getLayoutFieldIndex(FieldIDType Id) const { 490 assert(IsFinished && "not yet finished!"); 491 return Fields[Id].LayoutFieldIndex; 492 } 493 }; 494 } // namespace 495 496 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) { 497 auto Updater = [&](Value *I) { 498 setFieldIndex(I, B.getLayoutFieldIndex(getFieldIndex(I))); 499 }; 500 LayoutIndexUpdateStarted = true; 501 for (auto &S : Spills) 502 Updater(S.first); 503 for (const auto &A : Allocas) 504 Updater(A.Alloca); 505 LayoutIndexUpdateStarted = false; 506 } 507 508 void FrameTypeBuilder::addFieldForAllocas(const Function &F, 509 FrameDataInfo &FrameData, 510 coro::Shape &Shape) { 511 using AllocaSetType = SmallVector<AllocaInst *, 4>; 512 SmallVector<AllocaSetType, 4> NonOverlapedAllocas; 513 514 // We need to add field for allocas at the end of this function. However, this 515 // function has multiple exits, so we use this helper to avoid redundant code. 516 struct RTTIHelper { 517 std::function<void()> func; 518 RTTIHelper(std::function<void()> &&func) : func(func) {} 519 ~RTTIHelper() { func(); } 520 } Helper([&]() { 521 for (auto AllocaList : NonOverlapedAllocas) { 522 auto *LargestAI = *AllocaList.begin(); 523 FieldIDType Id = addFieldForAlloca(LargestAI); 524 for (auto *Alloca : AllocaList) 525 FrameData.setFieldIndex(Alloca, Id); 526 } 527 }); 528 529 if (!Shape.ReuseFrameSlot && !EnableReuseStorageInFrame) { 530 for (const auto &A : FrameData.Allocas) { 531 AllocaInst *Alloca = A.Alloca; 532 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 533 } 534 return; 535 } 536 537 // Because there are pathes from the lifetime.start to coro.end 538 // for each alloca, the liferanges for every alloca is overlaped 539 // in the blocks who contain coro.end and the successor blocks. 540 // So we choose to skip there blocks when we calculates the liferange 541 // for each alloca. It should be reasonable since there shouldn't be uses 542 // in these blocks and the coroutine frame shouldn't be used outside the 543 // coroutine body. 544 // 545 // Note that the user of coro.suspend may not be SwitchInst. However, this 546 // case seems too complex to handle. And it is harmless to skip these 547 // patterns since it just prevend putting the allocas to live in the same 548 // slot. 549 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest; 550 for (auto CoroSuspendInst : Shape.CoroSuspends) { 551 for (auto U : CoroSuspendInst->users()) { 552 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) { 553 auto *SWI = const_cast<SwitchInst *>(ConstSWI); 554 DefaultSuspendDest[SWI] = SWI->getDefaultDest(); 555 SWI->setDefaultDest(SWI->getSuccessor(1)); 556 } 557 } 558 } 559 560 auto ExtractAllocas = [&]() { 561 AllocaSetType Allocas; 562 Allocas.reserve(FrameData.Allocas.size()); 563 for (const auto &A : FrameData.Allocas) 564 Allocas.push_back(A.Alloca); 565 return Allocas; 566 }; 567 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(), 568 StackLifetime::LivenessType::May); 569 StackLifetimeAnalyzer.run(); 570 auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) { 571 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps( 572 StackLifetimeAnalyzer.getLiveRange(AI2)); 573 }; 574 auto GetAllocaSize = [&](const AllocaInfo &A) { 575 Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL); 576 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n"); 577 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported"); 578 return RetSize->getFixedSize(); 579 }; 580 // Put larger allocas in the front. So the larger allocas have higher 581 // priority to merge, which can save more space potentially. Also each 582 // AllocaSet would be ordered. So we can get the largest Alloca in one 583 // AllocaSet easily. 584 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) { 585 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2); 586 }); 587 for (const auto &A : FrameData.Allocas) { 588 AllocaInst *Alloca = A.Alloca; 589 bool Merged = false; 590 // Try to find if the Alloca is not inferenced with any existing 591 // NonOverlappedAllocaSet. If it is true, insert the alloca to that 592 // NonOverlappedAllocaSet. 593 for (auto &AllocaSet : NonOverlapedAllocas) { 594 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n"); 595 bool NoInference = none_of(AllocaSet, [&](auto Iter) { 596 return IsAllocaInferenre(Alloca, Iter); 597 }); 598 // If the alignment of A is multiple of the alignment of B, the address 599 // of A should satisfy the requirement for aligning for B. 600 // 601 // There may be other more fine-grained strategies to handle the alignment 602 // infomation during the merging process. But it seems hard to handle 603 // these strategies and benefit little. 604 bool Alignable = [&]() -> bool { 605 auto *LargestAlloca = *AllocaSet.begin(); 606 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() == 607 0; 608 }(); 609 bool CouldMerge = NoInference && Alignable; 610 if (!CouldMerge) 611 continue; 612 AllocaSet.push_back(Alloca); 613 Merged = true; 614 break; 615 } 616 if (!Merged) { 617 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 618 } 619 } 620 // Recover the default target destination for each Switch statement 621 // reserved. 622 for (auto SwitchAndDefaultDest : DefaultSuspendDest) { 623 SwitchInst *SWI = SwitchAndDefaultDest.first; 624 BasicBlock *DestBB = SwitchAndDefaultDest.second; 625 SWI->setDefaultDest(DestBB); 626 } 627 // This Debug Info could tell us which allocas are merged into one slot. 628 LLVM_DEBUG(for (auto &AllocaSet 629 : NonOverlapedAllocas) { 630 if (AllocaSet.size() > 1) { 631 dbgs() << "In Function:" << F.getName() << "\n"; 632 dbgs() << "Find Union Set " 633 << "\n"; 634 dbgs() << "\tAllocas are \n"; 635 for (auto Alloca : AllocaSet) 636 dbgs() << "\t\t" << *Alloca << "\n"; 637 } 638 }); 639 } 640 641 void FrameTypeBuilder::finish(StructType *Ty) { 642 assert(!IsFinished && "already finished!"); 643 644 // Prepare the optimal-layout field array. 645 // The Id in the layout field is a pointer to our Field for it. 646 SmallVector<OptimizedStructLayoutField, 8> LayoutFields; 647 LayoutFields.reserve(Fields.size()); 648 for (auto &Field : Fields) { 649 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment, 650 Field.Offset); 651 } 652 653 // Perform layout. 654 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields); 655 StructSize = SizeAndAlign.first; 656 StructAlign = SizeAndAlign.second; 657 658 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & { 659 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id)); 660 }; 661 662 // We need to produce a packed struct type if there's a field whose 663 // assigned offset isn't a multiple of its natural type alignment. 664 bool Packed = [&] { 665 for (auto &LayoutField : LayoutFields) { 666 auto &F = getField(LayoutField); 667 if (!isAligned(F.TyAlignment, LayoutField.Offset)) 668 return true; 669 } 670 return false; 671 }(); 672 673 // Build the struct body. 674 SmallVector<Type*, 16> FieldTypes; 675 FieldTypes.reserve(LayoutFields.size() * 3 / 2); 676 uint64_t LastOffset = 0; 677 for (auto &LayoutField : LayoutFields) { 678 auto &F = getField(LayoutField); 679 680 auto Offset = LayoutField.Offset; 681 682 // Add a padding field if there's a padding gap and we're either 683 // building a packed struct or the padding gap is more than we'd 684 // get from aligning to the field type's natural alignment. 685 assert(Offset >= LastOffset); 686 if (Offset != LastOffset) { 687 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset) 688 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context), 689 Offset - LastOffset)); 690 } 691 692 F.Offset = Offset; 693 F.LayoutFieldIndex = FieldTypes.size(); 694 695 FieldTypes.push_back(F.Ty); 696 LastOffset = Offset + F.Size; 697 } 698 699 Ty->setBody(FieldTypes, Packed); 700 701 #ifndef NDEBUG 702 // Check that the IR layout matches the offsets we expect. 703 auto Layout = DL.getStructLayout(Ty); 704 for (auto &F : Fields) { 705 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty); 706 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset); 707 } 708 #endif 709 710 IsFinished = true; 711 } 712 713 // Build a struct that will keep state for an active coroutine. 714 // struct f.frame { 715 // ResumeFnTy ResumeFnAddr; 716 // ResumeFnTy DestroyFnAddr; 717 // int ResumeIndex; 718 // ... promise (if present) ... 719 // ... spills ... 720 // }; 721 static StructType *buildFrameType(Function &F, coro::Shape &Shape, 722 FrameDataInfo &FrameData) { 723 LLVMContext &C = F.getContext(); 724 const DataLayout &DL = F.getParent()->getDataLayout(); 725 StructType *FrameTy = [&] { 726 SmallString<32> Name(F.getName()); 727 Name.append(".Frame"); 728 return StructType::create(C, Name); 729 }(); 730 731 FrameTypeBuilder B(C, DL); 732 733 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca(); 734 Optional<FieldIDType> SwitchIndexFieldId; 735 736 if (Shape.ABI == coro::ABI::Switch) { 737 auto *FramePtrTy = FrameTy->getPointerTo(); 738 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy, 739 /*IsVarArg=*/false); 740 auto *FnPtrTy = FnTy->getPointerTo(); 741 742 // Add header fields for the resume and destroy functions. 743 // We can rely on these being perfectly packed. 744 (void)B.addField(FnPtrTy, None, /*header*/ true); 745 (void)B.addField(FnPtrTy, None, /*header*/ true); 746 747 // PromiseAlloca field needs to be explicitly added here because it's 748 // a header field with a fixed offset based on its alignment. Hence it 749 // needs special handling and cannot be added to FrameData.Allocas. 750 if (PromiseAlloca) 751 FrameData.setFieldIndex( 752 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true)); 753 754 // Add a field to store the suspend index. This doesn't need to 755 // be in the header. 756 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size())); 757 Type *IndexType = Type::getIntNTy(C, IndexBits); 758 759 SwitchIndexFieldId = B.addField(IndexType, None); 760 } else { 761 assert(PromiseAlloca == nullptr && "lowering doesn't support promises"); 762 } 763 764 // Because multiple allocas may own the same field slot, 765 // we add allocas to field here. 766 B.addFieldForAllocas(F, FrameData, Shape); 767 // Add PromiseAlloca to Allocas list so that 768 // 1. updateLayoutIndex could update its index after 769 // `performOptimizedStructLayout` 770 // 2. it is processed in insertSpills. 771 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca) 772 // We assume that the promise alloca won't be modified before 773 // CoroBegin and no alias will be create before CoroBegin. 774 FrameData.Allocas.emplace_back( 775 PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false); 776 // Create an entry for every spilled value. 777 for (auto &S : FrameData.Spills) { 778 FieldIDType Id = B.addField(S.first->getType(), None); 779 FrameData.setFieldIndex(S.first, Id); 780 } 781 782 B.finish(FrameTy); 783 FrameData.updateLayoutIndex(B); 784 Shape.FrameAlign = B.getStructAlign(); 785 Shape.FrameSize = B.getStructSize(); 786 787 switch (Shape.ABI) { 788 case coro::ABI::Switch: 789 // In the switch ABI, remember the switch-index field. 790 Shape.SwitchLowering.IndexField = 791 B.getLayoutFieldIndex(*SwitchIndexFieldId); 792 793 // Also round the frame size up to a multiple of its alignment, as is 794 // generally expected in C/C++. 795 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign); 796 break; 797 798 // In the retcon ABI, remember whether the frame is inline in the storage. 799 case coro::ABI::Retcon: 800 case coro::ABI::RetconOnce: { 801 auto Id = Shape.getRetconCoroId(); 802 Shape.RetconLowering.IsFrameInlineInStorage 803 = (B.getStructSize() <= Id->getStorageSize() && 804 B.getStructAlign() <= Id->getStorageAlignment()); 805 break; 806 } 807 case coro::ABI::Async: { 808 Shape.AsyncLowering.FrameOffset = 809 alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign); 810 // Also make the final context size a multiple of the context alignment to 811 // make allocation easier for allocators. 812 Shape.AsyncLowering.ContextSize = 813 alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize, 814 Shape.AsyncLowering.getContextAlignment()); 815 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) { 816 report_fatal_error( 817 "The alignment requirment of frame variables cannot be higher than " 818 "the alignment of the async function context"); 819 } 820 break; 821 } 822 } 823 824 return FrameTy; 825 } 826 827 // We use a pointer use visitor to track how an alloca is being used. 828 // The goal is to be able to answer the following three questions: 829 // 1. Should this alloca be allocated on the frame instead. 830 // 2. Could the content of the alloca be modified prior to CoroBegn, which would 831 // require copying the data from alloca to the frame after CoroBegin. 832 // 3. Is there any alias created for this alloca prior to CoroBegin, but used 833 // after CoroBegin. In that case, we will need to recreate the alias after 834 // CoroBegin based off the frame. To answer question 1, we track two things: 835 // a. List of all BasicBlocks that use this alloca or any of the aliases of 836 // the alloca. In the end, we check if there exists any two basic blocks that 837 // cross suspension points. If so, this alloca must be put on the frame. b. 838 // Whether the alloca or any alias of the alloca is escaped at some point, 839 // either by storing the address somewhere, or the address is used in a 840 // function call that might capture. If it's ever escaped, this alloca must be 841 // put on the frame conservatively. 842 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin. 843 // Whenever a potential write happens, either through a store instruction, a 844 // function call or any of the memory intrinsics, we check whether this 845 // instruction is prior to CoroBegin. To answer question 3, we track the offsets 846 // of all aliases created for the alloca prior to CoroBegin but used after 847 // CoroBegin. llvm::Optional is used to be able to represent the case when the 848 // offset is unknown (e.g. when you have a PHINode that takes in different 849 // offset values). We cannot handle unknown offsets and will assert. This is the 850 // potential issue left out. An ideal solution would likely require a 851 // significant redesign. 852 namespace { 853 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> { 854 using Base = PtrUseVisitor<AllocaUseVisitor>; 855 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT, 856 const CoroBeginInst &CB, const SuspendCrossingInfo &Checker) 857 : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker) {} 858 859 void visit(Instruction &I) { 860 Users.insert(&I); 861 Base::visit(I); 862 // If the pointer is escaped prior to CoroBegin, we have to assume it would 863 // be written into before CoroBegin as well. 864 if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) { 865 MayWriteBeforeCoroBegin = true; 866 } 867 } 868 // We need to provide this overload as PtrUseVisitor uses a pointer based 869 // visiting function. 870 void visit(Instruction *I) { return visit(*I); } 871 872 void visitPHINode(PHINode &I) { 873 enqueueUsers(I); 874 handleAlias(I); 875 } 876 877 void visitSelectInst(SelectInst &I) { 878 enqueueUsers(I); 879 handleAlias(I); 880 } 881 882 void visitStoreInst(StoreInst &SI) { 883 // Regardless whether the alias of the alloca is the value operand or the 884 // pointer operand, we need to assume the alloca is been written. 885 handleMayWrite(SI); 886 887 if (SI.getValueOperand() != U->get()) 888 return; 889 890 // We are storing the pointer into a memory location, potentially escaping. 891 // As an optimization, we try to detect simple cases where it doesn't 892 // actually escape, for example: 893 // %ptr = alloca .. 894 // %addr = alloca .. 895 // store %ptr, %addr 896 // %x = load %addr 897 // .. 898 // If %addr is only used by loading from it, we could simply treat %x as 899 // another alias of %ptr, and not considering %ptr being escaped. 900 auto IsSimpleStoreThenLoad = [&]() { 901 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand()); 902 // If the memory location we are storing to is not an alloca, it 903 // could be an alias of some other memory locations, which is difficult 904 // to analyze. 905 if (!AI) 906 return false; 907 // StoreAliases contains aliases of the memory location stored into. 908 SmallVector<Instruction *, 4> StoreAliases = {AI}; 909 while (!StoreAliases.empty()) { 910 Instruction *I = StoreAliases.pop_back_val(); 911 for (User *U : I->users()) { 912 // If we are loading from the memory location, we are creating an 913 // alias of the original pointer. 914 if (auto *LI = dyn_cast<LoadInst>(U)) { 915 enqueueUsers(*LI); 916 handleAlias(*LI); 917 continue; 918 } 919 // If we are overriding the memory location, the pointer certainly 920 // won't escape. 921 if (auto *S = dyn_cast<StoreInst>(U)) 922 if (S->getPointerOperand() == I) 923 continue; 924 if (auto *II = dyn_cast<IntrinsicInst>(U)) 925 if (II->isLifetimeStartOrEnd()) 926 continue; 927 // BitCastInst creats aliases of the memory location being stored 928 // into. 929 if (auto *BI = dyn_cast<BitCastInst>(U)) { 930 StoreAliases.push_back(BI); 931 continue; 932 } 933 return false; 934 } 935 } 936 937 return true; 938 }; 939 940 if (!IsSimpleStoreThenLoad()) 941 PI.setEscaped(&SI); 942 } 943 944 // All mem intrinsics modify the data. 945 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); } 946 947 void visitBitCastInst(BitCastInst &BC) { 948 Base::visitBitCastInst(BC); 949 handleAlias(BC); 950 } 951 952 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 953 Base::visitAddrSpaceCastInst(ASC); 954 handleAlias(ASC); 955 } 956 957 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 958 // The base visitor will adjust Offset accordingly. 959 Base::visitGetElementPtrInst(GEPI); 960 handleAlias(GEPI); 961 } 962 963 void visitIntrinsicInst(IntrinsicInst &II) { 964 if (II.getIntrinsicID() != Intrinsic::lifetime_start) 965 return Base::visitIntrinsicInst(II); 966 LifetimeStarts.insert(&II); 967 } 968 969 void visitCallBase(CallBase &CB) { 970 for (unsigned Op = 0, OpCount = CB.getNumArgOperands(); Op < OpCount; ++Op) 971 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op)) 972 PI.setEscaped(&CB); 973 handleMayWrite(CB); 974 } 975 976 bool getShouldLiveOnFrame() const { 977 if (!ShouldLiveOnFrame) 978 ShouldLiveOnFrame = computeShouldLiveOnFrame(); 979 return ShouldLiveOnFrame.getValue(); 980 } 981 982 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; } 983 984 DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const { 985 assert(getShouldLiveOnFrame() && "This method should only be called if the " 986 "alloca needs to live on the frame."); 987 for (const auto &P : AliasOffetMap) 988 if (!P.second) 989 report_fatal_error("Unable to handle an alias with unknown offset " 990 "created before CoroBegin."); 991 return AliasOffetMap; 992 } 993 994 private: 995 const DominatorTree &DT; 996 const CoroBeginInst &CoroBegin; 997 const SuspendCrossingInfo &Checker; 998 // All alias to the original AllocaInst, created before CoroBegin and used 999 // after CoroBegin. Each entry contains the instruction and the offset in the 1000 // original Alloca. They need to be recreated after CoroBegin off the frame. 1001 DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{}; 1002 SmallPtrSet<Instruction *, 4> Users{}; 1003 SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{}; 1004 bool MayWriteBeforeCoroBegin{false}; 1005 1006 mutable llvm::Optional<bool> ShouldLiveOnFrame{}; 1007 1008 bool computeShouldLiveOnFrame() const { 1009 // If lifetime information is available, we check it first since it's 1010 // more precise. We look at every pair of lifetime.start intrinsic and 1011 // every basic block that uses the pointer to see if they cross suspension 1012 // points. The uses cover both direct uses as well as indirect uses. 1013 if (!LifetimeStarts.empty()) { 1014 for (auto *I : Users) 1015 for (auto *S : LifetimeStarts) 1016 if (Checker.isDefinitionAcrossSuspend(*S, I)) 1017 return true; 1018 return false; 1019 } 1020 // FIXME: Ideally the isEscaped check should come at the beginning. 1021 // However there are a few loose ends that need to be fixed first before 1022 // we can do that. We need to make sure we are not over-conservative, so 1023 // that the data accessed in-between await_suspend and symmetric transfer 1024 // is always put on the stack, and also data accessed after coro.end is 1025 // always put on the stack (esp the return object). To fix that, we need 1026 // to: 1027 // 1) Potentially treat sret as nocapture in calls 1028 // 2) Special handle the return object and put it on the stack 1029 // 3) Utilize lifetime.end intrinsic 1030 if (PI.isEscaped()) 1031 return true; 1032 1033 for (auto *U1 : Users) 1034 for (auto *U2 : Users) 1035 if (Checker.isDefinitionAcrossSuspend(*U1, U2)) 1036 return true; 1037 1038 return false; 1039 } 1040 1041 void handleMayWrite(const Instruction &I) { 1042 if (!DT.dominates(&CoroBegin, &I)) 1043 MayWriteBeforeCoroBegin = true; 1044 } 1045 1046 bool usedAfterCoroBegin(Instruction &I) { 1047 for (auto &U : I.uses()) 1048 if (DT.dominates(&CoroBegin, U)) 1049 return true; 1050 return false; 1051 } 1052 1053 void handleAlias(Instruction &I) { 1054 // We track all aliases created prior to CoroBegin but used after. 1055 // These aliases may need to be recreated after CoroBegin if the alloca 1056 // need to live on the frame. 1057 if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I)) 1058 return; 1059 1060 if (!IsOffsetKnown) { 1061 AliasOffetMap[&I].reset(); 1062 } else { 1063 auto Itr = AliasOffetMap.find(&I); 1064 if (Itr == AliasOffetMap.end()) { 1065 AliasOffetMap[&I] = Offset; 1066 } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) { 1067 // If we have seen two different possible values for this alias, we set 1068 // it to empty. 1069 AliasOffetMap[&I].reset(); 1070 } 1071 } 1072 } 1073 }; 1074 } // namespace 1075 1076 // We need to make room to insert a spill after initial PHIs, but before 1077 // catchswitch instruction. Placing it before violates the requirement that 1078 // catchswitch, like all other EHPads must be the first nonPHI in a block. 1079 // 1080 // Split away catchswitch into a separate block and insert in its place: 1081 // 1082 // cleanuppad <InsertPt> cleanupret. 1083 // 1084 // cleanupret instruction will act as an insert point for the spill. 1085 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) { 1086 BasicBlock *CurrentBlock = CatchSwitch->getParent(); 1087 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch); 1088 CurrentBlock->getTerminator()->eraseFromParent(); 1089 1090 auto *CleanupPad = 1091 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock); 1092 auto *CleanupRet = 1093 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock); 1094 return CleanupRet; 1095 } 1096 1097 // Replace all alloca and SSA values that are accessed across suspend points 1098 // with GetElementPointer from coroutine frame + loads and stores. Create an 1099 // AllocaSpillBB that will become the new entry block for the resume parts of 1100 // the coroutine: 1101 // 1102 // %hdl = coro.begin(...) 1103 // whatever 1104 // 1105 // becomes: 1106 // 1107 // %hdl = coro.begin(...) 1108 // %FramePtr = bitcast i8* hdl to %f.frame* 1109 // br label %AllocaSpillBB 1110 // 1111 // AllocaSpillBB: 1112 // ; geps corresponding to allocas that were moved to coroutine frame 1113 // br label PostSpill 1114 // 1115 // PostSpill: 1116 // whatever 1117 // 1118 // 1119 static Instruction *insertSpills(const FrameDataInfo &FrameData, 1120 coro::Shape &Shape) { 1121 auto *CB = Shape.CoroBegin; 1122 LLVMContext &C = CB->getContext(); 1123 IRBuilder<> Builder(CB->getNextNode()); 1124 StructType *FrameTy = Shape.FrameTy; 1125 PointerType *FramePtrTy = FrameTy->getPointerTo(); 1126 auto *FramePtr = 1127 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr")); 1128 DominatorTree DT(*CB->getFunction()); 1129 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache; 1130 1131 // Create a GEP with the given index into the coroutine frame for the original 1132 // value Orig. Appends an extra 0 index for array-allocas, preserving the 1133 // original type. 1134 auto GetFramePointer = [&](Value *Orig) -> Value * { 1135 FieldIDType Index = FrameData.getFieldIndex(Orig); 1136 SmallVector<Value *, 3> Indices = { 1137 ConstantInt::get(Type::getInt32Ty(C), 0), 1138 ConstantInt::get(Type::getInt32Ty(C), Index), 1139 }; 1140 1141 if (auto *AI = dyn_cast<AllocaInst>(Orig)) { 1142 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) { 1143 auto Count = CI->getValue().getZExtValue(); 1144 if (Count > 1) { 1145 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0)); 1146 } 1147 } else { 1148 report_fatal_error("Coroutines cannot handle non static allocas yet"); 1149 } 1150 } 1151 1152 auto GEP = cast<GetElementPtrInst>( 1153 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices)); 1154 if (isa<AllocaInst>(Orig)) { 1155 // If the type of GEP is not equal to the type of AllocaInst, it implies 1156 // that the AllocaInst may be reused in the Frame slot of other 1157 // AllocaInst. So We cast GEP to the AllocaInst here to re-use 1158 // the Frame storage. 1159 // 1160 // Note: If we change the strategy dealing with alignment, we need to refine 1161 // this casting. 1162 if (GEP->getResultElementType() != Orig->getType()) 1163 return Builder.CreateBitCast(GEP, Orig->getType(), 1164 Orig->getName() + Twine(".cast")); 1165 } 1166 return GEP; 1167 }; 1168 1169 for (auto const &E : FrameData.Spills) { 1170 Value *Def = E.first; 1171 // Create a store instruction storing the value into the 1172 // coroutine frame. 1173 Instruction *InsertPt = nullptr; 1174 if (auto *Arg = dyn_cast<Argument>(Def)) { 1175 // For arguments, we will place the store instruction right after 1176 // the coroutine frame pointer instruction, i.e. bitcast of 1177 // coro.begin from i8* to %f.frame*. 1178 InsertPt = FramePtr->getNextNode(); 1179 1180 // If we're spilling an Argument, make sure we clear 'nocapture' 1181 // from the coroutine function. 1182 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture); 1183 1184 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) { 1185 // Don't spill immediately after a suspend; splitting assumes 1186 // that the suspend will be followed by a branch. 1187 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI(); 1188 } else { 1189 auto *I = cast<Instruction>(Def); 1190 if (!DT.dominates(CB, I)) { 1191 // If it is not dominated by CoroBegin, then spill should be 1192 // inserted immediately after CoroFrame is computed. 1193 InsertPt = FramePtr->getNextNode(); 1194 } else if (auto *II = dyn_cast<InvokeInst>(I)) { 1195 // If we are spilling the result of the invoke instruction, split 1196 // the normal edge and insert the spill in the new block. 1197 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest()); 1198 InsertPt = NewBB->getTerminator(); 1199 } else if (isa<PHINode>(I)) { 1200 // Skip the PHINodes and EH pads instructions. 1201 BasicBlock *DefBlock = I->getParent(); 1202 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator())) 1203 InsertPt = splitBeforeCatchSwitch(CSI); 1204 else 1205 InsertPt = &*DefBlock->getFirstInsertionPt(); 1206 } else { 1207 assert(!I->isTerminator() && "unexpected terminator"); 1208 // For all other values, the spill is placed immediately after 1209 // the definition. 1210 InsertPt = I->getNextNode(); 1211 } 1212 } 1213 1214 auto Index = FrameData.getFieldIndex(Def); 1215 Builder.SetInsertPoint(InsertPt); 1216 auto *G = Builder.CreateConstInBoundsGEP2_32( 1217 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr")); 1218 Builder.CreateStore(Def, G); 1219 1220 BasicBlock *CurrentBlock = nullptr; 1221 Value *CurrentReload = nullptr; 1222 for (auto *U : E.second) { 1223 // If we have not seen the use block, create a load instruction to reload 1224 // the spilled value from the coroutine frame. Populates the Value pointer 1225 // reference provided with the frame GEP. 1226 if (CurrentBlock != U->getParent()) { 1227 CurrentBlock = U->getParent(); 1228 Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt()); 1229 1230 auto *GEP = GetFramePointer(E.first); 1231 GEP->setName(E.first->getName() + Twine(".reload.addr")); 1232 CurrentReload = Builder.CreateLoad( 1233 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP, 1234 E.first->getName() + Twine(".reload")); 1235 1236 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def); 1237 for (DbgDeclareInst *DDI : DIs) { 1238 bool AllowUnresolved = false; 1239 // This dbg.declare is preserved for all coro-split function 1240 // fragments. It will be unreachable in the main function, and 1241 // processed by coro::salvageDebugInfo() by CoroCloner. 1242 DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved) 1243 .insertDeclare(CurrentReload, DDI->getVariable(), 1244 DDI->getExpression(), DDI->getDebugLoc(), 1245 &*Builder.GetInsertPoint()); 1246 // This dbg.declare is for the main function entry point. It 1247 // will be deleted in all coro-split functions. 1248 coro::salvageDebugInfo(DbgPtrAllocaCache, DDI); 1249 } 1250 } 1251 1252 // If we have a single edge PHINode, remove it and replace it with a 1253 // reload from the coroutine frame. (We already took care of multi edge 1254 // PHINodes by rewriting them in the rewritePHIs function). 1255 if (auto *PN = dyn_cast<PHINode>(U)) { 1256 assert(PN->getNumIncomingValues() == 1 && 1257 "unexpected number of incoming " 1258 "values in the PHINode"); 1259 PN->replaceAllUsesWith(CurrentReload); 1260 PN->eraseFromParent(); 1261 continue; 1262 } 1263 1264 // Replace all uses of CurrentValue in the current instruction with 1265 // reload. 1266 U->replaceUsesOfWith(Def, CurrentReload); 1267 } 1268 } 1269 1270 BasicBlock *FramePtrBB = FramePtr->getParent(); 1271 1272 auto SpillBlock = 1273 FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB"); 1274 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill"); 1275 Shape.AllocaSpillBlock = SpillBlock; 1276 1277 // retcon and retcon.once lowering assumes all uses have been sunk. 1278 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 1279 Shape.ABI == coro::ABI::Async) { 1280 // If we found any allocas, replace all of their remaining uses with Geps. 1281 Builder.SetInsertPoint(&SpillBlock->front()); 1282 for (const auto &P : FrameData.Allocas) { 1283 AllocaInst *Alloca = P.Alloca; 1284 auto *G = GetFramePointer(Alloca); 1285 1286 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G)) 1287 // here, as we are changing location of the instruction. 1288 G->takeName(Alloca); 1289 Alloca->replaceAllUsesWith(G); 1290 Alloca->eraseFromParent(); 1291 } 1292 return FramePtr; 1293 } 1294 1295 // If we found any alloca, replace all of their remaining uses with GEP 1296 // instructions. Because new dbg.declare have been created for these alloca, 1297 // we also delete the original dbg.declare and replace other uses with undef. 1298 // Note: We cannot replace the alloca with GEP instructions indiscriminately, 1299 // as some of the uses may not be dominated by CoroBegin. 1300 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front()); 1301 SmallVector<Instruction *, 4> UsersToUpdate; 1302 for (const auto &A : FrameData.Allocas) { 1303 AllocaInst *Alloca = A.Alloca; 1304 UsersToUpdate.clear(); 1305 for (User *U : Alloca->users()) { 1306 auto *I = cast<Instruction>(U); 1307 if (DT.dominates(CB, I)) 1308 UsersToUpdate.push_back(I); 1309 } 1310 if (UsersToUpdate.empty()) 1311 continue; 1312 auto *G = GetFramePointer(Alloca); 1313 G->setName(Alloca->getName() + Twine(".reload.addr")); 1314 1315 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Alloca); 1316 if (!DIs.empty()) 1317 DIBuilder(*Alloca->getModule(), 1318 /*AllowUnresolved*/ false) 1319 .insertDeclare(G, DIs.front()->getVariable(), 1320 DIs.front()->getExpression(), 1321 DIs.front()->getDebugLoc(), DIs.front()); 1322 for (auto *DI : FindDbgDeclareUses(Alloca)) 1323 DI->eraseFromParent(); 1324 replaceDbgUsesWithUndef(Alloca); 1325 1326 for (Instruction *I : UsersToUpdate) 1327 I->replaceUsesOfWith(Alloca, G); 1328 } 1329 Builder.SetInsertPoint(FramePtr->getNextNode()); 1330 for (const auto &A : FrameData.Allocas) { 1331 AllocaInst *Alloca = A.Alloca; 1332 if (A.MayWriteBeforeCoroBegin) { 1333 // isEscaped really means potentially modified before CoroBegin. 1334 if (Alloca->isArrayAllocation()) 1335 report_fatal_error( 1336 "Coroutines cannot handle copying of array allocas yet"); 1337 1338 auto *G = GetFramePointer(Alloca); 1339 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca); 1340 Builder.CreateStore(Value, G); 1341 } 1342 // For each alias to Alloca created before CoroBegin but used after 1343 // CoroBegin, we recreate them after CoroBegin by appplying the offset 1344 // to the pointer in the frame. 1345 for (const auto &Alias : A.Aliases) { 1346 auto *FramePtr = GetFramePointer(Alloca); 1347 auto *FramePtrRaw = 1348 Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C)); 1349 auto *AliasPtr = Builder.CreateGEP( 1350 FramePtrRaw, 1351 ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue())); 1352 auto *AliasPtrTyped = 1353 Builder.CreateBitCast(AliasPtr, Alias.first->getType()); 1354 Alias.first->replaceUsesWithIf( 1355 AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); }); 1356 } 1357 } 1358 return FramePtr; 1359 } 1360 1361 // Sets the unwind edge of an instruction to a particular successor. 1362 static void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ) { 1363 if (auto *II = dyn_cast<InvokeInst>(TI)) 1364 II->setUnwindDest(Succ); 1365 else if (auto *CS = dyn_cast<CatchSwitchInst>(TI)) 1366 CS->setUnwindDest(Succ); 1367 else if (auto *CR = dyn_cast<CleanupReturnInst>(TI)) 1368 CR->setUnwindDest(Succ); 1369 else 1370 llvm_unreachable("unexpected terminator instruction"); 1371 } 1372 1373 // Replaces all uses of OldPred with the NewPred block in all PHINodes in a 1374 // block. 1375 static void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, 1376 BasicBlock *NewPred, PHINode *Until = nullptr) { 1377 unsigned BBIdx = 0; 1378 for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) { 1379 PHINode *PN = cast<PHINode>(I); 1380 1381 // We manually update the LandingPadReplacement PHINode and it is the last 1382 // PHI Node. So, if we find it, we are done. 1383 if (Until == PN) 1384 break; 1385 1386 // Reuse the previous value of BBIdx if it lines up. In cases where we 1387 // have multiple phi nodes with *lots* of predecessors, this is a speed 1388 // win because we don't have to scan the PHI looking for TIBB. This 1389 // happens because the BB list of PHI nodes are usually in the same 1390 // order. 1391 if (PN->getIncomingBlock(BBIdx) != OldPred) 1392 BBIdx = PN->getBasicBlockIndex(OldPred); 1393 1394 assert(BBIdx != (unsigned)-1 && "Invalid PHI Index!"); 1395 PN->setIncomingBlock(BBIdx, NewPred); 1396 } 1397 } 1398 1399 // Uses SplitEdge unless the successor block is an EHPad, in which case do EH 1400 // specific handling. 1401 static BasicBlock *ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, 1402 LandingPadInst *OriginalPad, 1403 PHINode *LandingPadReplacement) { 1404 auto *PadInst = Succ->getFirstNonPHI(); 1405 if (!LandingPadReplacement && !PadInst->isEHPad()) 1406 return SplitEdge(BB, Succ); 1407 1408 auto *NewBB = BasicBlock::Create(BB->getContext(), "", BB->getParent(), Succ); 1409 setUnwindEdgeTo(BB->getTerminator(), NewBB); 1410 updatePhiNodes(Succ, BB, NewBB, LandingPadReplacement); 1411 1412 if (LandingPadReplacement) { 1413 auto *NewLP = OriginalPad->clone(); 1414 auto *Terminator = BranchInst::Create(Succ, NewBB); 1415 NewLP->insertBefore(Terminator); 1416 LandingPadReplacement->addIncoming(NewLP, NewBB); 1417 return NewBB; 1418 } 1419 Value *ParentPad = nullptr; 1420 if (auto *FuncletPad = dyn_cast<FuncletPadInst>(PadInst)) 1421 ParentPad = FuncletPad->getParentPad(); 1422 else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(PadInst)) 1423 ParentPad = CatchSwitch->getParentPad(); 1424 else 1425 llvm_unreachable("handling for other EHPads not implemented yet"); 1426 1427 auto *NewCleanupPad = CleanupPadInst::Create(ParentPad, {}, "", NewBB); 1428 CleanupReturnInst::Create(NewCleanupPad, Succ, NewBB); 1429 return NewBB; 1430 } 1431 1432 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new 1433 // PHI in InsertedBB. 1434 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, 1435 BasicBlock *InsertedBB, 1436 BasicBlock *PredBB, 1437 PHINode *UntilPHI = nullptr) { 1438 auto *PN = cast<PHINode>(&SuccBB->front()); 1439 do { 1440 int Index = PN->getBasicBlockIndex(InsertedBB); 1441 Value *V = PN->getIncomingValue(Index); 1442 PHINode *InputV = PHINode::Create( 1443 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(), 1444 &InsertedBB->front()); 1445 InputV->addIncoming(V, PredBB); 1446 PN->setIncomingValue(Index, InputV); 1447 PN = dyn_cast<PHINode>(PN->getNextNode()); 1448 } while (PN != UntilPHI); 1449 } 1450 1451 // Rewrites the PHI Nodes in a cleanuppad. 1452 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, 1453 CleanupPadInst *CleanupPad) { 1454 // For every incoming edge to a CleanupPad we will create a new block holding 1455 // all incoming values in single-value PHI nodes. We will then create another 1456 // block to act as a dispather (as all unwind edges for related EH blocks 1457 // must be the same). 1458 // 1459 // cleanuppad: 1460 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1] 1461 // %3 = cleanuppad within none [] 1462 // 1463 // It will create: 1464 // 1465 // cleanuppad.corodispatch 1466 // %2 = phi i8[0, %catchswitch], [1, %catch.1] 1467 // %3 = cleanuppad within none [] 1468 // switch i8 % 2, label %unreachable 1469 // [i8 0, label %cleanuppad.from.catchswitch 1470 // i8 1, label %cleanuppad.from.catch.1] 1471 // cleanuppad.from.catchswitch: 1472 // %4 = phi i32 [%0, %catchswitch] 1473 // br %label cleanuppad 1474 // cleanuppad.from.catch.1: 1475 // %6 = phi i32 [%1, %catch.1] 1476 // br %label cleanuppad 1477 // cleanuppad: 1478 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch], 1479 // [%6, %cleanuppad.from.catch.1] 1480 1481 // Unreachable BB, in case switching on an invalid value in the dispatcher. 1482 auto *UnreachBB = BasicBlock::Create( 1483 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent()); 1484 IRBuilder<> Builder(UnreachBB); 1485 Builder.CreateUnreachable(); 1486 1487 // Create a new cleanuppad which will be the dispatcher. 1488 auto *NewCleanupPadBB = 1489 BasicBlock::Create(CleanupPadBB->getContext(), 1490 CleanupPadBB->getName() + Twine(".corodispatch"), 1491 CleanupPadBB->getParent(), CleanupPadBB); 1492 Builder.SetInsertPoint(NewCleanupPadBB); 1493 auto *SwitchType = Builder.getInt8Ty(); 1494 auto *SetDispatchValuePN = 1495 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB)); 1496 CleanupPad->removeFromParent(); 1497 CleanupPad->insertAfter(SetDispatchValuePN); 1498 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB, 1499 pred_size(CleanupPadBB)); 1500 1501 int SwitchIndex = 0; 1502 SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB)); 1503 for (BasicBlock *Pred : Preds) { 1504 // Create a new cleanuppad and move the PHI values to there. 1505 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(), 1506 CleanupPadBB->getName() + 1507 Twine(".from.") + Pred->getName(), 1508 CleanupPadBB->getParent(), CleanupPadBB); 1509 updatePhiNodes(CleanupPadBB, Pred, CaseBB); 1510 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") + 1511 Pred->getName()); 1512 Builder.SetInsertPoint(CaseBB); 1513 Builder.CreateBr(CleanupPadBB); 1514 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB); 1515 1516 // Update this Pred to the new unwind point. 1517 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB); 1518 1519 // Setup the switch in the dispatcher. 1520 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex); 1521 SetDispatchValuePN->addIncoming(SwitchConstant, Pred); 1522 SwitchOnDispatch->addCase(SwitchConstant, CaseBB); 1523 SwitchIndex++; 1524 } 1525 } 1526 1527 static void rewritePHIs(BasicBlock &BB) { 1528 // For every incoming edge we will create a block holding all 1529 // incoming values in a single PHI nodes. 1530 // 1531 // loop: 1532 // %n.val = phi i32[%n, %entry], [%inc, %loop] 1533 // 1534 // It will create: 1535 // 1536 // loop.from.entry: 1537 // %n.loop.pre = phi i32 [%n, %entry] 1538 // br %label loop 1539 // loop.from.loop: 1540 // %inc.loop.pre = phi i32 [%inc, %loop] 1541 // br %label loop 1542 // 1543 // After this rewrite, further analysis will ignore any phi nodes with more 1544 // than one incoming edge. 1545 1546 // TODO: Simplify PHINodes in the basic block to remove duplicate 1547 // predecessors. 1548 1549 // Special case for CleanupPad: all EH blocks must have the same unwind edge 1550 // so we need to create an additional "dispatcher" block. 1551 if (auto *CleanupPad = 1552 dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) { 1553 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB)); 1554 for (BasicBlock *Pred : Preds) { 1555 if (CatchSwitchInst *CS = 1556 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) { 1557 // CleanupPad with a CatchSwitch predecessor: therefore this is an 1558 // unwind destination that needs to be handle specially. 1559 assert(CS->getUnwindDest() == &BB); 1560 (void)CS; 1561 rewritePHIsForCleanupPad(&BB, CleanupPad); 1562 return; 1563 } 1564 } 1565 } 1566 1567 LandingPadInst *LandingPad = nullptr; 1568 PHINode *ReplPHI = nullptr; 1569 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) { 1570 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks. 1571 // We replace the original landing pad with a PHINode that will collect the 1572 // results from all of them. 1573 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad); 1574 ReplPHI->takeName(LandingPad); 1575 LandingPad->replaceAllUsesWith(ReplPHI); 1576 // We will erase the original landing pad at the end of this function after 1577 // ehAwareSplitEdge cloned it in the transition blocks. 1578 } 1579 1580 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB)); 1581 for (BasicBlock *Pred : Preds) { 1582 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI); 1583 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName()); 1584 1585 // Stop the moving of values at ReplPHI, as this is either null or the PHI 1586 // that replaced the landing pad. 1587 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI); 1588 } 1589 1590 if (LandingPad) { 1591 // Calls to ehAwareSplitEdge function cloned the original lading pad. 1592 // No longer need it. 1593 LandingPad->eraseFromParent(); 1594 } 1595 } 1596 1597 static void rewritePHIs(Function &F) { 1598 SmallVector<BasicBlock *, 8> WorkList; 1599 1600 for (BasicBlock &BB : F) 1601 if (auto *PN = dyn_cast<PHINode>(&BB.front())) 1602 if (PN->getNumIncomingValues() > 1) 1603 WorkList.push_back(&BB); 1604 1605 for (BasicBlock *BB : WorkList) 1606 rewritePHIs(*BB); 1607 } 1608 1609 // Check for instructions that we can recreate on resume as opposed to spill 1610 // the result into a coroutine frame. 1611 static bool materializable(Instruction &V) { 1612 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) || 1613 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V); 1614 } 1615 1616 // Check for structural coroutine intrinsics that should not be spilled into 1617 // the coroutine frame. 1618 static bool isCoroutineStructureIntrinsic(Instruction &I) { 1619 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) || 1620 isa<CoroSuspendInst>(&I); 1621 } 1622 1623 // For every use of the value that is across suspend point, recreate that value 1624 // after a suspend point. 1625 static void rewriteMaterializableInstructions(IRBuilder<> &IRB, 1626 const SpillInfo &Spills) { 1627 for (const auto &E : Spills) { 1628 Value *Def = E.first; 1629 BasicBlock *CurrentBlock = nullptr; 1630 Instruction *CurrentMaterialization = nullptr; 1631 for (Instruction *U : E.second) { 1632 // If we have not seen this block, materialize the value. 1633 if (CurrentBlock != U->getParent()) { 1634 CurrentBlock = U->getParent(); 1635 CurrentMaterialization = cast<Instruction>(Def)->clone(); 1636 CurrentMaterialization->setName(Def->getName()); 1637 CurrentMaterialization->insertBefore( 1638 &*CurrentBlock->getFirstInsertionPt()); 1639 } 1640 if (auto *PN = dyn_cast<PHINode>(U)) { 1641 assert(PN->getNumIncomingValues() == 1 && 1642 "unexpected number of incoming " 1643 "values in the PHINode"); 1644 PN->replaceAllUsesWith(CurrentMaterialization); 1645 PN->eraseFromParent(); 1646 continue; 1647 } 1648 // Replace all uses of Def in the current instruction with the 1649 // CurrentMaterialization for the block. 1650 U->replaceUsesOfWith(Def, CurrentMaterialization); 1651 } 1652 } 1653 } 1654 1655 // Splits the block at a particular instruction unless it is the first 1656 // instruction in the block with a single predecessor. 1657 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) { 1658 auto *BB = I->getParent(); 1659 if (&BB->front() == I) { 1660 if (BB->getSinglePredecessor()) { 1661 BB->setName(Name); 1662 return BB; 1663 } 1664 } 1665 return BB->splitBasicBlock(I, Name); 1666 } 1667 1668 // Split above and below a particular instruction so that it 1669 // will be all alone by itself in a block. 1670 static void splitAround(Instruction *I, const Twine &Name) { 1671 splitBlockIfNotFirst(I, Name); 1672 splitBlockIfNotFirst(I->getNextNode(), "After" + Name); 1673 } 1674 1675 static bool isSuspendBlock(BasicBlock *BB) { 1676 return isa<AnyCoroSuspendInst>(BB->front()); 1677 } 1678 1679 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet; 1680 1681 /// Does control flow starting at the given block ever reach a suspend 1682 /// instruction before reaching a block in VisitedOrFreeBBs? 1683 static bool isSuspendReachableFrom(BasicBlock *From, 1684 VisitedBlocksSet &VisitedOrFreeBBs) { 1685 // Eagerly try to add this block to the visited set. If it's already 1686 // there, stop recursing; this path doesn't reach a suspend before 1687 // either looping or reaching a freeing block. 1688 if (!VisitedOrFreeBBs.insert(From).second) 1689 return false; 1690 1691 // We assume that we'll already have split suspends into their own blocks. 1692 if (isSuspendBlock(From)) 1693 return true; 1694 1695 // Recurse on the successors. 1696 for (auto Succ : successors(From)) { 1697 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs)) 1698 return true; 1699 } 1700 1701 return false; 1702 } 1703 1704 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a 1705 /// suspend point? 1706 static bool isLocalAlloca(CoroAllocaAllocInst *AI) { 1707 // Seed the visited set with all the basic blocks containing a free 1708 // so that we won't pass them up. 1709 VisitedBlocksSet VisitedOrFreeBBs; 1710 for (auto User : AI->users()) { 1711 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User)) 1712 VisitedOrFreeBBs.insert(FI->getParent()); 1713 } 1714 1715 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs); 1716 } 1717 1718 /// After we split the coroutine, will the given basic block be along 1719 /// an obvious exit path for the resumption function? 1720 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, 1721 unsigned depth = 3) { 1722 // If we've bottomed out our depth count, stop searching and assume 1723 // that the path might loop back. 1724 if (depth == 0) return false; 1725 1726 // If this is a suspend block, we're about to exit the resumption function. 1727 if (isSuspendBlock(BB)) return true; 1728 1729 // Recurse into the successors. 1730 for (auto Succ : successors(BB)) { 1731 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1)) 1732 return false; 1733 } 1734 1735 // If none of the successors leads back in a loop, we're on an exit/abort. 1736 return true; 1737 } 1738 1739 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) { 1740 // Look for a free that isn't sufficiently obviously followed by 1741 // either a suspend or a termination, i.e. something that will leave 1742 // the coro resumption frame. 1743 for (auto U : AI->users()) { 1744 auto FI = dyn_cast<CoroAllocaFreeInst>(U); 1745 if (!FI) continue; 1746 1747 if (!willLeaveFunctionImmediatelyAfter(FI->getParent())) 1748 return true; 1749 } 1750 1751 // If we never found one, we don't need a stack save. 1752 return false; 1753 } 1754 1755 /// Turn each of the given local allocas into a normal (dynamic) alloca 1756 /// instruction. 1757 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas, 1758 SmallVectorImpl<Instruction*> &DeadInsts) { 1759 for (auto AI : LocalAllocas) { 1760 auto M = AI->getModule(); 1761 IRBuilder<> Builder(AI); 1762 1763 // Save the stack depth. Try to avoid doing this if the stackrestore 1764 // is going to immediately precede a return or something. 1765 Value *StackSave = nullptr; 1766 if (localAllocaNeedsStackSave(AI)) 1767 StackSave = Builder.CreateCall( 1768 Intrinsic::getDeclaration(M, Intrinsic::stacksave)); 1769 1770 // Allocate memory. 1771 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize()); 1772 Alloca->setAlignment(Align(AI->getAlignment())); 1773 1774 for (auto U : AI->users()) { 1775 // Replace gets with the allocation. 1776 if (isa<CoroAllocaGetInst>(U)) { 1777 U->replaceAllUsesWith(Alloca); 1778 1779 // Replace frees with stackrestores. This is safe because 1780 // alloca.alloc is required to obey a stack discipline, although we 1781 // don't enforce that structurally. 1782 } else { 1783 auto FI = cast<CoroAllocaFreeInst>(U); 1784 if (StackSave) { 1785 Builder.SetInsertPoint(FI); 1786 Builder.CreateCall( 1787 Intrinsic::getDeclaration(M, Intrinsic::stackrestore), 1788 StackSave); 1789 } 1790 } 1791 DeadInsts.push_back(cast<Instruction>(U)); 1792 } 1793 1794 DeadInsts.push_back(AI); 1795 } 1796 } 1797 1798 /// Turn the given coro.alloca.alloc call into a dynamic allocation. 1799 /// This happens during the all-instructions iteration, so it must not 1800 /// delete the call. 1801 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI, 1802 coro::Shape &Shape, 1803 SmallVectorImpl<Instruction*> &DeadInsts) { 1804 IRBuilder<> Builder(AI); 1805 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr); 1806 1807 for (User *U : AI->users()) { 1808 if (isa<CoroAllocaGetInst>(U)) { 1809 U->replaceAllUsesWith(Alloc); 1810 } else { 1811 auto FI = cast<CoroAllocaFreeInst>(U); 1812 Builder.SetInsertPoint(FI); 1813 Shape.emitDealloc(Builder, Alloc, nullptr); 1814 } 1815 DeadInsts.push_back(cast<Instruction>(U)); 1816 } 1817 1818 // Push this on last so that it gets deleted after all the others. 1819 DeadInsts.push_back(AI); 1820 1821 // Return the new allocation value so that we can check for needed spills. 1822 return cast<Instruction>(Alloc); 1823 } 1824 1825 /// Get the current swifterror value. 1826 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, 1827 coro::Shape &Shape) { 1828 // Make a fake function pointer as a sort of intrinsic. 1829 auto FnTy = FunctionType::get(ValueTy, {}, false); 1830 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 1831 1832 auto Call = Builder.CreateCall(FnTy, Fn, {}); 1833 Shape.SwiftErrorOps.push_back(Call); 1834 1835 return Call; 1836 } 1837 1838 /// Set the given value as the current swifterror value. 1839 /// 1840 /// Returns a slot that can be used as a swifterror slot. 1841 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, 1842 coro::Shape &Shape) { 1843 // Make a fake function pointer as a sort of intrinsic. 1844 auto FnTy = FunctionType::get(V->getType()->getPointerTo(), 1845 {V->getType()}, false); 1846 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 1847 1848 auto Call = Builder.CreateCall(FnTy, Fn, { V }); 1849 Shape.SwiftErrorOps.push_back(Call); 1850 1851 return Call; 1852 } 1853 1854 /// Set the swifterror value from the given alloca before a call, 1855 /// then put in back in the alloca afterwards. 1856 /// 1857 /// Returns an address that will stand in for the swifterror slot 1858 /// until splitting. 1859 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call, 1860 AllocaInst *Alloca, 1861 coro::Shape &Shape) { 1862 auto ValueTy = Alloca->getAllocatedType(); 1863 IRBuilder<> Builder(Call); 1864 1865 // Load the current value from the alloca and set it as the 1866 // swifterror value. 1867 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca); 1868 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape); 1869 1870 // Move to after the call. Since swifterror only has a guaranteed 1871 // value on normal exits, we can ignore implicit and explicit unwind 1872 // edges. 1873 if (isa<CallInst>(Call)) { 1874 Builder.SetInsertPoint(Call->getNextNode()); 1875 } else { 1876 auto Invoke = cast<InvokeInst>(Call); 1877 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg()); 1878 } 1879 1880 // Get the current swifterror value and store it to the alloca. 1881 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape); 1882 Builder.CreateStore(ValueAfterCall, Alloca); 1883 1884 return Addr; 1885 } 1886 1887 /// Eliminate a formerly-swifterror alloca by inserting the get/set 1888 /// intrinsics and attempting to MemToReg the alloca away. 1889 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, 1890 coro::Shape &Shape) { 1891 for (auto UI = Alloca->use_begin(), UE = Alloca->use_end(); UI != UE; ) { 1892 // We're likely changing the use list, so use a mutation-safe 1893 // iteration pattern. 1894 auto &Use = *UI; 1895 ++UI; 1896 1897 // swifterror values can only be used in very specific ways. 1898 // We take advantage of that here. 1899 auto User = Use.getUser(); 1900 if (isa<LoadInst>(User) || isa<StoreInst>(User)) 1901 continue; 1902 1903 assert(isa<CallInst>(User) || isa<InvokeInst>(User)); 1904 auto Call = cast<Instruction>(User); 1905 1906 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape); 1907 1908 // Use the returned slot address as the call argument. 1909 Use.set(Addr); 1910 } 1911 1912 // All the uses should be loads and stores now. 1913 assert(isAllocaPromotable(Alloca)); 1914 } 1915 1916 /// "Eliminate" a swifterror argument by reducing it to the alloca case 1917 /// and then loading and storing in the prologue and epilog. 1918 /// 1919 /// The argument keeps the swifterror flag. 1920 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, 1921 coro::Shape &Shape, 1922 SmallVectorImpl<AllocaInst*> &AllocasToPromote) { 1923 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg()); 1924 1925 auto ArgTy = cast<PointerType>(Arg.getType()); 1926 auto ValueTy = ArgTy->getElementType(); 1927 1928 // Reduce to the alloca case: 1929 1930 // Create an alloca and replace all uses of the arg with it. 1931 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace()); 1932 Arg.replaceAllUsesWith(Alloca); 1933 1934 // Set an initial value in the alloca. swifterror is always null on entry. 1935 auto InitialValue = Constant::getNullValue(ValueTy); 1936 Builder.CreateStore(InitialValue, Alloca); 1937 1938 // Find all the suspends in the function and save and restore around them. 1939 for (auto Suspend : Shape.CoroSuspends) { 1940 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape); 1941 } 1942 1943 // Find all the coro.ends in the function and restore the error value. 1944 for (auto End : Shape.CoroEnds) { 1945 Builder.SetInsertPoint(End); 1946 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca); 1947 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape); 1948 } 1949 1950 // Now we can use the alloca logic. 1951 AllocasToPromote.push_back(Alloca); 1952 eliminateSwiftErrorAlloca(F, Alloca, Shape); 1953 } 1954 1955 /// Eliminate all problematic uses of swifterror arguments and allocas 1956 /// from the function. We'll fix them up later when splitting the function. 1957 static void eliminateSwiftError(Function &F, coro::Shape &Shape) { 1958 SmallVector<AllocaInst*, 4> AllocasToPromote; 1959 1960 // Look for a swifterror argument. 1961 for (auto &Arg : F.args()) { 1962 if (!Arg.hasSwiftErrorAttr()) continue; 1963 1964 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote); 1965 break; 1966 } 1967 1968 // Look for swifterror allocas. 1969 for (auto &Inst : F.getEntryBlock()) { 1970 auto Alloca = dyn_cast<AllocaInst>(&Inst); 1971 if (!Alloca || !Alloca->isSwiftError()) continue; 1972 1973 // Clear the swifterror flag. 1974 Alloca->setSwiftError(false); 1975 1976 AllocasToPromote.push_back(Alloca); 1977 eliminateSwiftErrorAlloca(F, Alloca, Shape); 1978 } 1979 1980 // If we have any allocas to promote, compute a dominator tree and 1981 // promote them en masse. 1982 if (!AllocasToPromote.empty()) { 1983 DominatorTree DT(F); 1984 PromoteMemToReg(AllocasToPromote, DT); 1985 } 1986 } 1987 1988 /// retcon and retcon.once conventions assume that all spill uses can be sunk 1989 /// after the coro.begin intrinsic. 1990 static void sinkSpillUsesAfterCoroBegin(Function &F, 1991 const FrameDataInfo &FrameData, 1992 CoroBeginInst *CoroBegin) { 1993 DominatorTree Dom(F); 1994 1995 SmallSetVector<Instruction *, 32> ToMove; 1996 SmallVector<Instruction *, 32> Worklist; 1997 1998 // Collect all users that precede coro.begin. 1999 for (auto *Def : FrameData.getAllDefs()) { 2000 for (User *U : Def->users()) { 2001 auto Inst = cast<Instruction>(U); 2002 if (Inst->getParent() != CoroBegin->getParent() || 2003 Dom.dominates(CoroBegin, Inst)) 2004 continue; 2005 if (ToMove.insert(Inst)) 2006 Worklist.push_back(Inst); 2007 } 2008 } 2009 // Recursively collect users before coro.begin. 2010 while (!Worklist.empty()) { 2011 auto *Def = Worklist.pop_back_val(); 2012 for (User *U : Def->users()) { 2013 auto Inst = cast<Instruction>(U); 2014 if (Dom.dominates(CoroBegin, Inst)) 2015 continue; 2016 if (ToMove.insert(Inst)) 2017 Worklist.push_back(Inst); 2018 } 2019 } 2020 2021 // Sort by dominance. 2022 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end()); 2023 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool { 2024 // If a dominates b it should preceed (<) b. 2025 return Dom.dominates(A, B); 2026 }); 2027 2028 Instruction *InsertPt = CoroBegin->getNextNode(); 2029 for (Instruction *Inst : InsertionList) 2030 Inst->moveBefore(InsertPt); 2031 } 2032 2033 /// For each local variable that all of its user are only used inside one of 2034 /// suspended region, we sink their lifetime.start markers to the place where 2035 /// after the suspend block. Doing so minimizes the lifetime of each variable, 2036 /// hence minimizing the amount of data we end up putting on the frame. 2037 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, 2038 SuspendCrossingInfo &Checker) { 2039 DominatorTree DT(F); 2040 2041 // Collect all possible basic blocks which may dominate all uses of allocas. 2042 SmallPtrSet<BasicBlock *, 4> DomSet; 2043 DomSet.insert(&F.getEntryBlock()); 2044 for (auto *CSI : Shape.CoroSuspends) { 2045 BasicBlock *SuspendBlock = CSI->getParent(); 2046 assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() && 2047 "should have split coro.suspend into its own block"); 2048 DomSet.insert(SuspendBlock->getSingleSuccessor()); 2049 } 2050 2051 for (Instruction &I : instructions(F)) { 2052 AllocaInst* AI = dyn_cast<AllocaInst>(&I); 2053 if (!AI) 2054 continue; 2055 2056 for (BasicBlock *DomBB : DomSet) { 2057 bool Valid = true; 2058 SmallVector<Instruction *, 1> Lifetimes; 2059 2060 auto isLifetimeStart = [](Instruction* I) { 2061 if (auto* II = dyn_cast<IntrinsicInst>(I)) 2062 return II->getIntrinsicID() == Intrinsic::lifetime_start; 2063 return false; 2064 }; 2065 2066 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) { 2067 if (isLifetimeStart(U)) { 2068 Lifetimes.push_back(U); 2069 return true; 2070 } 2071 if (!U->hasOneUse() || U->stripPointerCasts() != AI) 2072 return false; 2073 if (isLifetimeStart(U->user_back())) { 2074 Lifetimes.push_back(U->user_back()); 2075 return true; 2076 } 2077 return false; 2078 }; 2079 2080 for (User *U : AI->users()) { 2081 Instruction *UI = cast<Instruction>(U); 2082 // For all users except lifetime.start markers, if they are all 2083 // dominated by one of the basic blocks and do not cross 2084 // suspend points as well, then there is no need to spill the 2085 // instruction. 2086 if (!DT.dominates(DomBB, UI->getParent()) || 2087 Checker.isDefinitionAcrossSuspend(DomBB, UI)) { 2088 // Skip lifetime.start, GEP and bitcast used by lifetime.start 2089 // markers. 2090 if (collectLifetimeStart(UI, AI)) 2091 continue; 2092 Valid = false; 2093 break; 2094 } 2095 } 2096 // Sink lifetime.start markers to dominate block when they are 2097 // only used outside the region. 2098 if (Valid && Lifetimes.size() != 0) { 2099 // May be AI itself, when the type of AI is i8* 2100 auto *NewBitCast = [&](AllocaInst *AI) -> Value* { 2101 if (isa<AllocaInst>(Lifetimes[0]->getOperand(1))) 2102 return AI; 2103 auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext()); 2104 return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "", 2105 DomBB->getTerminator()); 2106 }(AI); 2107 2108 auto *NewLifetime = Lifetimes[0]->clone(); 2109 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast); 2110 NewLifetime->insertBefore(DomBB->getTerminator()); 2111 2112 // All the outsided lifetime.start markers are no longer necessary. 2113 for (Instruction *S : Lifetimes) 2114 S->eraseFromParent(); 2115 2116 break; 2117 } 2118 } 2119 } 2120 } 2121 2122 static void collectFrameAllocas(Function &F, coro::Shape &Shape, 2123 const SuspendCrossingInfo &Checker, 2124 SmallVectorImpl<AllocaInfo> &Allocas) { 2125 for (Instruction &I : instructions(F)) { 2126 auto *AI = dyn_cast<AllocaInst>(&I); 2127 if (!AI) 2128 continue; 2129 // The PromiseAlloca will be specially handled since it needs to be in a 2130 // fixed position in the frame. 2131 if (AI == Shape.SwitchLowering.PromiseAlloca) { 2132 continue; 2133 } 2134 DominatorTree DT(F); 2135 AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT, 2136 *Shape.CoroBegin, Checker}; 2137 Visitor.visitPtr(*AI); 2138 if (!Visitor.getShouldLiveOnFrame()) 2139 continue; 2140 Allocas.emplace_back(AI, Visitor.getAliasesCopy(), 2141 Visitor.getMayWriteBeforeCoroBegin()); 2142 } 2143 } 2144 2145 void coro::salvageDebugInfo( 2146 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache, 2147 DbgDeclareInst *DDI) { 2148 Function *F = DDI->getFunction(); 2149 IRBuilder<> Builder(F->getContext()); 2150 auto InsertPt = F->getEntryBlock().getFirstInsertionPt(); 2151 while (isa<IntrinsicInst>(InsertPt)) 2152 ++InsertPt; 2153 Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt); 2154 DIExpression *Expr = DDI->getExpression(); 2155 // Follow the pointer arithmetic all the way to the incoming 2156 // function argument and convert into a DIExpression. 2157 bool OutermostLoad = true; 2158 Value *Storage = DDI->getAddress(); 2159 while (Storage) { 2160 if (auto *LdInst = dyn_cast<LoadInst>(Storage)) { 2161 Storage = LdInst->getOperand(0); 2162 // FIXME: This is a heuristic that works around the fact that 2163 // LLVM IR debug intrinsics cannot yet distinguish between 2164 // memory and value locations: Because a dbg.declare(alloca) is 2165 // implicitly a memory location no DW_OP_deref operation for the 2166 // last direct load from an alloca is necessary. This condition 2167 // effectively drops the *last* DW_OP_deref in the expression. 2168 if (!OutermostLoad) 2169 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore); 2170 OutermostLoad = false; 2171 } else if (auto *StInst = dyn_cast<StoreInst>(Storage)) { 2172 Storage = StInst->getOperand(0); 2173 } else if (auto *GEPInst = dyn_cast<GetElementPtrInst>(Storage)) { 2174 Expr = llvm::salvageDebugInfoImpl(*GEPInst, Expr, 2175 /*WithStackValue=*/false); 2176 if (!Expr) 2177 return; 2178 Storage = GEPInst->getOperand(0); 2179 } else if (auto *BCInst = dyn_cast<llvm::BitCastInst>(Storage)) 2180 Storage = BCInst->getOperand(0); 2181 else 2182 break; 2183 } 2184 if (!Storage) 2185 return; 2186 2187 // Store a pointer to the coroutine frame object in an alloca so it 2188 // is available throughout the function when producing unoptimized 2189 // code. Extending the lifetime this way is correct because the 2190 // variable has been declared by a dbg.declare intrinsic. 2191 if (auto Arg = dyn_cast_or_null<llvm::Argument>(Storage)) { 2192 auto &Cached = DbgPtrAllocaCache[Storage]; 2193 if (!Cached) { 2194 Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr, 2195 Arg->getName() + ".debug"); 2196 Builder.CreateStore(Storage, Cached); 2197 } 2198 Storage = Cached; 2199 // FIXME: LLVM lacks nuanced semantics to differentiate between 2200 // memory and direct locations at the IR level. The backend will 2201 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory 2202 // location. Thus, if there are deref and offset operations in the 2203 // expression, we need to add a DW_OP_deref at the *start* of the 2204 // expression to first load the contents of the alloca before 2205 // adjusting it with the expression. 2206 if (Expr && Expr->isComplex()) 2207 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore); 2208 } 2209 auto &VMContext = DDI->getFunction()->getContext(); 2210 DDI->setOperand( 2211 0, MetadataAsValue::get(VMContext, ValueAsMetadata::get(Storage))); 2212 DDI->setOperand(2, MetadataAsValue::get(VMContext, Expr)); 2213 if (auto *InsertPt = dyn_cast_or_null<Instruction>(Storage)) 2214 DDI->moveAfter(InsertPt); 2215 } 2216 2217 void coro::buildCoroutineFrame(Function &F, Shape &Shape) { 2218 // Don't eliminate swifterror in async functions that won't be split. 2219 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty()) 2220 eliminateSwiftError(F, Shape); 2221 2222 if (Shape.ABI == coro::ABI::Switch && 2223 Shape.SwitchLowering.PromiseAlloca) { 2224 Shape.getSwitchCoroId()->clearPromise(); 2225 } 2226 2227 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end 2228 // intrinsics are in their own blocks to simplify the logic of building up 2229 // SuspendCrossing data. 2230 for (auto *CSI : Shape.CoroSuspends) { 2231 if (auto *Save = CSI->getCoroSave()) 2232 splitAround(Save, "CoroSave"); 2233 splitAround(CSI, "CoroSuspend"); 2234 } 2235 2236 // Put CoroEnds into their own blocks. 2237 for (AnyCoroEndInst *CE : Shape.CoroEnds) { 2238 splitAround(CE, "CoroEnd"); 2239 2240 // Emit the musttail call function in a new block before the CoroEnd. 2241 // We do this here so that the right suspend crossing info is computed for 2242 // the uses of the musttail call function call. (Arguments to the coro.end 2243 // instructions would be ignored) 2244 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) { 2245 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction(); 2246 if (!MustTailCallFn) 2247 continue; 2248 IRBuilder<> Builder(AsyncEnd); 2249 SmallVector<Value *, 8> Args(AsyncEnd->args()); 2250 auto Arguments = ArrayRef<Value *>(Args).drop_front(3); 2251 auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn, 2252 Arguments, Builder); 2253 splitAround(Call, "MustTailCall.Before.CoroEnd"); 2254 } 2255 } 2256 2257 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will 2258 // never has its definition separated from the PHI by the suspend point. 2259 rewritePHIs(F); 2260 2261 // Build suspend crossing info. 2262 SuspendCrossingInfo Checker(F, Shape); 2263 2264 IRBuilder<> Builder(F.getContext()); 2265 FrameDataInfo FrameData; 2266 SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas; 2267 SmallVector<Instruction*, 4> DeadInstructions; 2268 2269 { 2270 SpillInfo Spills; 2271 for (int Repeat = 0; Repeat < 4; ++Repeat) { 2272 // See if there are materializable instructions across suspend points. 2273 for (Instruction &I : instructions(F)) 2274 if (materializable(I)) 2275 for (User *U : I.users()) 2276 if (Checker.isDefinitionAcrossSuspend(I, U)) 2277 Spills[&I].push_back(cast<Instruction>(U)); 2278 2279 if (Spills.empty()) 2280 break; 2281 2282 // Rewrite materializable instructions to be materialized at the use 2283 // point. 2284 LLVM_DEBUG(dumpSpills("Materializations", Spills)); 2285 rewriteMaterializableInstructions(Builder, Spills); 2286 Spills.clear(); 2287 } 2288 } 2289 2290 sinkLifetimeStartMarkers(F, Shape, Checker); 2291 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty()) 2292 collectFrameAllocas(F, Shape, Checker, FrameData.Allocas); 2293 LLVM_DEBUG(dumpAllocas(FrameData.Allocas)); 2294 2295 // Collect the spills for arguments and other not-materializable values. 2296 for (Argument &A : F.args()) 2297 for (User *U : A.users()) 2298 if (Checker.isDefinitionAcrossSuspend(A, U)) 2299 FrameData.Spills[&A].push_back(cast<Instruction>(U)); 2300 2301 for (Instruction &I : instructions(F)) { 2302 // Values returned from coroutine structure intrinsics should not be part 2303 // of the Coroutine Frame. 2304 if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin) 2305 continue; 2306 2307 // The Coroutine Promise always included into coroutine frame, no need to 2308 // check for suspend crossing. 2309 if (Shape.ABI == coro::ABI::Switch && 2310 Shape.SwitchLowering.PromiseAlloca == &I) 2311 continue; 2312 2313 // Handle alloca.alloc specially here. 2314 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) { 2315 // Check whether the alloca's lifetime is bounded by suspend points. 2316 if (isLocalAlloca(AI)) { 2317 LocalAllocas.push_back(AI); 2318 continue; 2319 } 2320 2321 // If not, do a quick rewrite of the alloca and then add spills of 2322 // the rewritten value. The rewrite doesn't invalidate anything in 2323 // Spills because the other alloca intrinsics have no other operands 2324 // besides AI, and it doesn't invalidate the iteration because we delay 2325 // erasing AI. 2326 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions); 2327 2328 for (User *U : Alloc->users()) { 2329 if (Checker.isDefinitionAcrossSuspend(*Alloc, U)) 2330 FrameData.Spills[Alloc].push_back(cast<Instruction>(U)); 2331 } 2332 continue; 2333 } 2334 2335 // Ignore alloca.get; we process this as part of coro.alloca.alloc. 2336 if (isa<CoroAllocaGetInst>(I)) 2337 continue; 2338 2339 if (isa<AllocaInst>(I)) 2340 continue; 2341 2342 for (User *U : I.users()) 2343 if (Checker.isDefinitionAcrossSuspend(I, U)) { 2344 // We cannot spill a token. 2345 if (I.getType()->isTokenTy()) 2346 report_fatal_error( 2347 "token definition is separated from the use by a suspend point"); 2348 FrameData.Spills[&I].push_back(cast<Instruction>(U)); 2349 } 2350 } 2351 LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills)); 2352 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 2353 Shape.ABI == coro::ABI::Async) 2354 sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin); 2355 Shape.FrameTy = buildFrameType(F, Shape, FrameData); 2356 Shape.FramePtr = insertSpills(FrameData, Shape); 2357 lowerLocalAllocas(LocalAllocas, DeadInstructions); 2358 2359 for (auto I : DeadInstructions) 2360 I->eraseFromParent(); 2361 } 2362