1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This file contains classes used to discover if for a particular value 9 // there from sue to definition that crosses a suspend block. 10 // 11 // Using the information discovered we form a Coroutine Frame structure to 12 // contain those values. All uses of those values are replaced with appropriate 13 // GEP + load from the coroutine frame. At the point of the definition we spill 14 // the value into the coroutine frame. 15 // 16 // TODO: pack values tightly using liveness info. 17 //===----------------------------------------------------------------------===// 18 19 #include "CoroInternal.h" 20 #include "llvm/ADT/BitVector.h" 21 #include "llvm/ADT/SmallString.h" 22 #include "llvm/Analysis/PtrUseVisitor.h" 23 #include "llvm/Analysis/StackLifetime.h" 24 #include "llvm/Config/llvm-config.h" 25 #include "llvm/IR/CFG.h" 26 #include "llvm/IR/DIBuilder.h" 27 #include "llvm/IR/Dominators.h" 28 #include "llvm/IR/IRBuilder.h" 29 #include "llvm/IR/InstIterator.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/MathExtras.h" 33 #include "llvm/Support/OptimizedStructLayout.h" 34 #include "llvm/Support/circular_raw_ostream.h" 35 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 36 #include "llvm/Transforms/Utils/Local.h" 37 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 38 #include <algorithm> 39 40 using namespace llvm; 41 42 // The "coro-suspend-crossing" flag is very noisy. There is another debug type, 43 // "coro-frame", which results in leaner debug spew. 44 #define DEBUG_TYPE "coro-suspend-crossing" 45 46 static cl::opt<bool> EnableReuseStorageInFrame( 47 "reuse-storage-in-coroutine-frame", cl::Hidden, 48 cl::desc( 49 "Enable the optimization which would reuse the storage in the coroutine \ 50 frame for allocas whose liferanges are not overlapped, for testing purposes"), 51 llvm::cl::init(false)); 52 53 enum { SmallVectorThreshold = 32 }; 54 55 // Provides two way mapping between the blocks and numbers. 56 namespace { 57 class BlockToIndexMapping { 58 SmallVector<BasicBlock *, SmallVectorThreshold> V; 59 60 public: 61 size_t size() const { return V.size(); } 62 63 BlockToIndexMapping(Function &F) { 64 for (BasicBlock &BB : F) 65 V.push_back(&BB); 66 llvm::sort(V); 67 } 68 69 size_t blockToIndex(BasicBlock *BB) const { 70 auto *I = llvm::lower_bound(V, BB); 71 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block"); 72 return I - V.begin(); 73 } 74 75 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; } 76 }; 77 } // end anonymous namespace 78 79 // The SuspendCrossingInfo maintains data that allows to answer a question 80 // whether given two BasicBlocks A and B there is a path from A to B that 81 // passes through a suspend point. 82 // 83 // For every basic block 'i' it maintains a BlockData that consists of: 84 // Consumes: a bit vector which contains a set of indices of blocks that can 85 // reach block 'i' 86 // Kills: a bit vector which contains a set of indices of blocks that can 87 // reach block 'i', but one of the path will cross a suspend point 88 // Suspend: a boolean indicating whether block 'i' contains a suspend point. 89 // End: a boolean indicating whether block 'i' contains a coro.end intrinsic. 90 // 91 namespace { 92 struct SuspendCrossingInfo { 93 BlockToIndexMapping Mapping; 94 95 struct BlockData { 96 BitVector Consumes; 97 BitVector Kills; 98 bool Suspend = false; 99 bool End = false; 100 }; 101 SmallVector<BlockData, SmallVectorThreshold> Block; 102 103 iterator_range<succ_iterator> successors(BlockData const &BD) const { 104 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]); 105 return llvm::successors(BB); 106 } 107 108 BlockData &getBlockData(BasicBlock *BB) { 109 return Block[Mapping.blockToIndex(BB)]; 110 } 111 112 void dump() const; 113 void dump(StringRef Label, BitVector const &BV) const; 114 115 SuspendCrossingInfo(Function &F, coro::Shape &Shape); 116 117 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const { 118 size_t const DefIndex = Mapping.blockToIndex(DefBB); 119 size_t const UseIndex = Mapping.blockToIndex(UseBB); 120 121 bool const Result = Block[UseIndex].Kills[DefIndex]; 122 LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName() 123 << " answer is " << Result << "\n"); 124 return Result; 125 } 126 127 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const { 128 auto *I = cast<Instruction>(U); 129 130 // We rewrote PHINodes, so that only the ones with exactly one incoming 131 // value need to be analyzed. 132 if (auto *PN = dyn_cast<PHINode>(I)) 133 if (PN->getNumIncomingValues() > 1) 134 return false; 135 136 BasicBlock *UseBB = I->getParent(); 137 138 // As a special case, treat uses by an llvm.coro.suspend.retcon or an 139 // llvm.coro.suspend.async as if they were uses in the suspend's single 140 // predecessor: the uses conceptually occur before the suspend. 141 if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) { 142 UseBB = UseBB->getSinglePredecessor(); 143 assert(UseBB && "should have split coro.suspend into its own block"); 144 } 145 146 return hasPathCrossingSuspendPoint(DefBB, UseBB); 147 } 148 149 bool isDefinitionAcrossSuspend(Argument &A, User *U) const { 150 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U); 151 } 152 153 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const { 154 auto *DefBB = I.getParent(); 155 156 // As a special case, treat values produced by an llvm.coro.suspend.* 157 // as if they were defined in the single successor: the uses 158 // conceptually occur after the suspend. 159 if (isa<AnyCoroSuspendInst>(I)) { 160 DefBB = DefBB->getSingleSuccessor(); 161 assert(DefBB && "should have split coro.suspend into its own block"); 162 } 163 164 return isDefinitionAcrossSuspend(DefBB, U); 165 } 166 }; 167 } // end anonymous namespace 168 169 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 170 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label, 171 BitVector const &BV) const { 172 dbgs() << Label << ":"; 173 for (size_t I = 0, N = BV.size(); I < N; ++I) 174 if (BV[I]) 175 dbgs() << " " << Mapping.indexToBlock(I)->getName(); 176 dbgs() << "\n"; 177 } 178 179 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const { 180 for (size_t I = 0, N = Block.size(); I < N; ++I) { 181 BasicBlock *const B = Mapping.indexToBlock(I); 182 dbgs() << B->getName() << ":\n"; 183 dump(" Consumes", Block[I].Consumes); 184 dump(" Kills", Block[I].Kills); 185 } 186 dbgs() << "\n"; 187 } 188 #endif 189 190 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape) 191 : Mapping(F) { 192 const size_t N = Mapping.size(); 193 Block.resize(N); 194 195 // Initialize every block so that it consumes itself 196 for (size_t I = 0; I < N; ++I) { 197 auto &B = Block[I]; 198 B.Consumes.resize(N); 199 B.Kills.resize(N); 200 B.Consumes.set(I); 201 } 202 203 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as 204 // the code beyond coro.end is reachable during initial invocation of the 205 // coroutine. 206 for (auto *CE : Shape.CoroEnds) 207 getBlockData(CE->getParent()).End = true; 208 209 // Mark all suspend blocks and indicate that they kill everything they 210 // consume. Note, that crossing coro.save also requires a spill, as any code 211 // between coro.save and coro.suspend may resume the coroutine and all of the 212 // state needs to be saved by that time. 213 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) { 214 BasicBlock *SuspendBlock = BarrierInst->getParent(); 215 auto &B = getBlockData(SuspendBlock); 216 B.Suspend = true; 217 B.Kills |= B.Consumes; 218 }; 219 for (auto *CSI : Shape.CoroSuspends) { 220 markSuspendBlock(CSI); 221 if (auto *Save = CSI->getCoroSave()) 222 markSuspendBlock(Save); 223 } 224 225 // Iterate propagating consumes and kills until they stop changing. 226 int Iteration = 0; 227 (void)Iteration; 228 229 bool Changed; 230 do { 231 LLVM_DEBUG(dbgs() << "iteration " << ++Iteration); 232 LLVM_DEBUG(dbgs() << "==============\n"); 233 234 Changed = false; 235 for (size_t I = 0; I < N; ++I) { 236 auto &B = Block[I]; 237 for (BasicBlock *SI : successors(B)) { 238 239 auto SuccNo = Mapping.blockToIndex(SI); 240 241 // Saved Consumes and Kills bitsets so that it is easy to see 242 // if anything changed after propagation. 243 auto &S = Block[SuccNo]; 244 auto SavedConsumes = S.Consumes; 245 auto SavedKills = S.Kills; 246 247 // Propagate Kills and Consumes from block B into its successor S. 248 S.Consumes |= B.Consumes; 249 S.Kills |= B.Kills; 250 251 // If block B is a suspend block, it should propagate kills into the 252 // its successor for every block B consumes. 253 if (B.Suspend) { 254 S.Kills |= B.Consumes; 255 } 256 if (S.Suspend) { 257 // If block S is a suspend block, it should kill all of the blocks it 258 // consumes. 259 S.Kills |= S.Consumes; 260 } else if (S.End) { 261 // If block S is an end block, it should not propagate kills as the 262 // blocks following coro.end() are reached during initial invocation 263 // of the coroutine while all the data are still available on the 264 // stack or in the registers. 265 S.Kills.reset(); 266 } else { 267 // This is reached when S block it not Suspend nor coro.end and it 268 // need to make sure that it is not in the kill set. 269 S.Kills.reset(SuccNo); 270 } 271 272 // See if anything changed. 273 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes); 274 275 if (S.Kills != SavedKills) { 276 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName() 277 << "\n"); 278 LLVM_DEBUG(dump("S.Kills", S.Kills)); 279 LLVM_DEBUG(dump("SavedKills", SavedKills)); 280 } 281 if (S.Consumes != SavedConsumes) { 282 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n"); 283 LLVM_DEBUG(dump("S.Consume", S.Consumes)); 284 LLVM_DEBUG(dump("SavedCons", SavedConsumes)); 285 } 286 } 287 } 288 } while (Changed); 289 LLVM_DEBUG(dump()); 290 } 291 292 #undef DEBUG_TYPE // "coro-suspend-crossing" 293 #define DEBUG_TYPE "coro-frame" 294 295 namespace { 296 class FrameTypeBuilder; 297 // Mapping from the to-be-spilled value to all the users that need reload. 298 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>; 299 struct AllocaInfo { 300 AllocaInst *Alloca; 301 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases; 302 bool MayWriteBeforeCoroBegin; 303 AllocaInfo(AllocaInst *Alloca, 304 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases, 305 bool MayWriteBeforeCoroBegin) 306 : Alloca(Alloca), Aliases(std::move(Aliases)), 307 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {} 308 }; 309 struct FrameDataInfo { 310 // All the values (that are not allocas) that needs to be spilled to the 311 // frame. 312 SpillInfo Spills; 313 // Allocas contains all values defined as allocas that need to live in the 314 // frame. 315 SmallVector<AllocaInfo, 8> Allocas; 316 317 SmallVector<Value *, 8> getAllDefs() const { 318 SmallVector<Value *, 8> Defs; 319 for (const auto &P : Spills) 320 Defs.push_back(P.first); 321 for (const auto &A : Allocas) 322 Defs.push_back(A.Alloca); 323 return Defs; 324 } 325 326 uint32_t getFieldIndex(Value *V) const { 327 auto Itr = FieldIndexMap.find(V); 328 assert(Itr != FieldIndexMap.end() && 329 "Value does not have a frame field index"); 330 return Itr->second; 331 } 332 333 void setFieldIndex(Value *V, uint32_t Index) { 334 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) && 335 "Cannot set the index for the same field twice."); 336 FieldIndexMap[V] = Index; 337 } 338 339 // Remap the index of every field in the frame, using the final layout index. 340 void updateLayoutIndex(FrameTypeBuilder &B); 341 342 private: 343 // LayoutIndexUpdateStarted is used to avoid updating the index of any field 344 // twice by mistake. 345 bool LayoutIndexUpdateStarted = false; 346 // Map from values to their slot indexes on the frame. They will be first set 347 // with their original insertion field index. After the frame is built, their 348 // indexes will be updated into the final layout index. 349 DenseMap<Value *, uint32_t> FieldIndexMap; 350 }; 351 } // namespace 352 353 #ifndef NDEBUG 354 static void dumpSpills(StringRef Title, const SpillInfo &Spills) { 355 dbgs() << "------------- " << Title << "--------------\n"; 356 for (const auto &E : Spills) { 357 E.first->dump(); 358 dbgs() << " user: "; 359 for (auto *I : E.second) 360 I->dump(); 361 } 362 } 363 364 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) { 365 dbgs() << "------------- Allocas --------------\n"; 366 for (const auto &A : Allocas) { 367 A.Alloca->dump(); 368 } 369 } 370 #endif 371 372 namespace { 373 using FieldIDType = size_t; 374 // We cannot rely solely on natural alignment of a type when building a 375 // coroutine frame and if the alignment specified on the Alloca instruction 376 // differs from the natural alignment of the alloca type we will need to insert 377 // padding. 378 class FrameTypeBuilder { 379 private: 380 struct Field { 381 uint64_t Size; 382 uint64_t Offset; 383 Type *Ty; 384 FieldIDType LayoutFieldIndex; 385 Align Alignment; 386 Align TyAlignment; 387 }; 388 389 const DataLayout &DL; 390 LLVMContext &Context; 391 uint64_t StructSize = 0; 392 Align StructAlign; 393 bool IsFinished = false; 394 395 SmallVector<Field, 8> Fields; 396 DenseMap<Value*, unsigned> FieldIndexByKey; 397 398 public: 399 FrameTypeBuilder(LLVMContext &Context, DataLayout const &DL) 400 : DL(DL), Context(Context) {} 401 402 /// Add a field to this structure for the storage of an `alloca` 403 /// instruction. 404 LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI, 405 bool IsHeader = false) { 406 Type *Ty = AI->getAllocatedType(); 407 408 // Make an array type if this is a static array allocation. 409 if (AI->isArrayAllocation()) { 410 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 411 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue()); 412 else 413 report_fatal_error("Coroutines cannot handle non static allocas yet"); 414 } 415 416 return addField(Ty, AI->getAlign(), IsHeader); 417 } 418 419 /// We want to put the allocas whose lifetime-ranges are not overlapped 420 /// into one slot of coroutine frame. 421 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566 422 /// 423 /// cppcoro::task<void> alternative_paths(bool cond) { 424 /// if (cond) { 425 /// big_structure a; 426 /// process(a); 427 /// co_await something(); 428 /// } else { 429 /// big_structure b; 430 /// process2(b); 431 /// co_await something(); 432 /// } 433 /// } 434 /// 435 /// We want to put variable a and variable b in the same slot to 436 /// reduce the size of coroutine frame. 437 /// 438 /// This function use StackLifetime algorithm to partition the AllocaInsts in 439 /// Spills to non-overlapped sets in order to put Alloca in the same 440 /// non-overlapped set into the same slot in the Coroutine Frame. Then add 441 /// field for the allocas in the same non-overlapped set by using the largest 442 /// type as the field type. 443 /// 444 /// Side Effects: Because We sort the allocas, the order of allocas in the 445 /// frame may be different with the order in the source code. 446 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData, 447 coro::Shape &Shape); 448 449 /// Add a field to this structure. 450 LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment, 451 bool IsHeader = false) { 452 assert(!IsFinished && "adding fields to a finished builder"); 453 assert(Ty && "must provide a type for a field"); 454 455 // The field size is always the alloc size of the type. 456 uint64_t FieldSize = DL.getTypeAllocSize(Ty); 457 458 // The field alignment might not be the type alignment, but we need 459 // to remember the type alignment anyway to build the type. 460 Align TyAlignment = DL.getABITypeAlign(Ty); 461 if (!FieldAlignment) FieldAlignment = TyAlignment; 462 463 // Lay out header fields immediately. 464 uint64_t Offset; 465 if (IsHeader) { 466 Offset = alignTo(StructSize, FieldAlignment); 467 StructSize = Offset + FieldSize; 468 469 // Everything else has a flexible offset. 470 } else { 471 Offset = OptimizedStructLayoutField::FlexibleOffset; 472 } 473 474 Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment}); 475 return Fields.size() - 1; 476 } 477 478 /// Finish the layout and set the body on the given type. 479 void finish(StructType *Ty); 480 481 uint64_t getStructSize() const { 482 assert(IsFinished && "not yet finished!"); 483 return StructSize; 484 } 485 486 Align getStructAlign() const { 487 assert(IsFinished && "not yet finished!"); 488 return StructAlign; 489 } 490 491 FieldIDType getLayoutFieldIndex(FieldIDType Id) const { 492 assert(IsFinished && "not yet finished!"); 493 return Fields[Id].LayoutFieldIndex; 494 } 495 }; 496 } // namespace 497 498 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) { 499 auto Updater = [&](Value *I) { 500 setFieldIndex(I, B.getLayoutFieldIndex(getFieldIndex(I))); 501 }; 502 LayoutIndexUpdateStarted = true; 503 for (auto &S : Spills) 504 Updater(S.first); 505 for (const auto &A : Allocas) 506 Updater(A.Alloca); 507 LayoutIndexUpdateStarted = false; 508 } 509 510 void FrameTypeBuilder::addFieldForAllocas(const Function &F, 511 FrameDataInfo &FrameData, 512 coro::Shape &Shape) { 513 DenseMap<AllocaInst *, unsigned int> AllocaIndex; 514 using AllocaSetType = SmallVector<AllocaInst *, 4>; 515 SmallVector<AllocaSetType, 4> NonOverlapedAllocas; 516 517 // We need to add field for allocas at the end of this function. However, this 518 // function has multiple exits, so we use this helper to avoid redundant code. 519 struct RTTIHelper { 520 std::function<void()> func; 521 RTTIHelper(std::function<void()> &&func) : func(func) {} 522 ~RTTIHelper() { func(); } 523 } Helper([&]() { 524 for (auto AllocaList : NonOverlapedAllocas) { 525 auto *LargestAI = *AllocaList.begin(); 526 FieldIDType Id = addFieldForAlloca(LargestAI); 527 for (auto *Alloca : AllocaList) 528 FrameData.setFieldIndex(Alloca, Id); 529 } 530 }); 531 532 if (!Shape.ReuseFrameSlot && !EnableReuseStorageInFrame) { 533 for (const auto &A : FrameData.Allocas) { 534 AllocaInst *Alloca = A.Alloca; 535 AllocaIndex[Alloca] = NonOverlapedAllocas.size(); 536 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 537 } 538 return; 539 } 540 541 // Because there are pathes from the lifetime.start to coro.end 542 // for each alloca, the liferanges for every alloca is overlaped 543 // in the blocks who contain coro.end and the successor blocks. 544 // So we choose to skip there blocks when we calculates the liferange 545 // for each alloca. It should be reasonable since there shouldn't be uses 546 // in these blocks and the coroutine frame shouldn't be used outside the 547 // coroutine body. 548 // 549 // Note that the user of coro.suspend may not be SwitchInst. However, this 550 // case seems too complex to handle. And it is harmless to skip these 551 // patterns since it just prevend putting the allocas to live in the same 552 // slot. 553 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest; 554 for (auto CoroSuspendInst : Shape.CoroSuspends) { 555 for (auto U : CoroSuspendInst->users()) { 556 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) { 557 auto *SWI = const_cast<SwitchInst *>(ConstSWI); 558 DefaultSuspendDest[SWI] = SWI->getDefaultDest(); 559 SWI->setDefaultDest(SWI->getSuccessor(1)); 560 } 561 } 562 } 563 564 auto ExtractAllocas = [&]() { 565 AllocaSetType Allocas; 566 Allocas.reserve(FrameData.Allocas.size()); 567 for (const auto &A : FrameData.Allocas) 568 Allocas.push_back(A.Alloca); 569 return Allocas; 570 }; 571 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(), 572 StackLifetime::LivenessType::May); 573 StackLifetimeAnalyzer.run(); 574 auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) { 575 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps( 576 StackLifetimeAnalyzer.getLiveRange(AI2)); 577 }; 578 auto GetAllocaSize = [&](const AllocaInfo &A) { 579 Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL); 580 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n"); 581 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported"); 582 return RetSize->getFixedSize(); 583 }; 584 // Put larger allocas in the front. So the larger allocas have higher 585 // priority to merge, which can save more space potentially. Also each 586 // AllocaSet would be ordered. So we can get the largest Alloca in one 587 // AllocaSet easily. 588 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) { 589 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2); 590 }); 591 for (const auto &A : FrameData.Allocas) { 592 AllocaInst *Alloca = A.Alloca; 593 bool Merged = false; 594 // Try to find if the Alloca is not inferenced with any existing 595 // NonOverlappedAllocaSet. If it is true, insert the alloca to that 596 // NonOverlappedAllocaSet. 597 for (auto &AllocaSet : NonOverlapedAllocas) { 598 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n"); 599 bool CouldMerge = none_of(AllocaSet, [&](auto Iter) { 600 return IsAllocaInferenre(Alloca, Iter); 601 }); 602 if (!CouldMerge) 603 continue; 604 AllocaIndex[Alloca] = AllocaIndex[*AllocaSet.begin()]; 605 AllocaSet.push_back(Alloca); 606 Merged = true; 607 break; 608 } 609 if (!Merged) { 610 AllocaIndex[Alloca] = NonOverlapedAllocas.size(); 611 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 612 } 613 } 614 // Recover the default target destination for each Switch statement 615 // reserved. 616 for (auto SwitchAndDefaultDest : DefaultSuspendDest) { 617 SwitchInst *SWI = SwitchAndDefaultDest.first; 618 BasicBlock *DestBB = SwitchAndDefaultDest.second; 619 SWI->setDefaultDest(DestBB); 620 } 621 // This Debug Info could tell us which allocas are merged into one slot. 622 LLVM_DEBUG(for (auto &AllocaSet 623 : NonOverlapedAllocas) { 624 if (AllocaSet.size() > 1) { 625 dbgs() << "In Function:" << F.getName() << "\n"; 626 dbgs() << "Find Union Set " 627 << "\n"; 628 dbgs() << "\tAllocas are \n"; 629 for (auto Alloca : AllocaSet) 630 dbgs() << "\t\t" << *Alloca << "\n"; 631 } 632 }); 633 } 634 635 void FrameTypeBuilder::finish(StructType *Ty) { 636 assert(!IsFinished && "already finished!"); 637 638 // Prepare the optimal-layout field array. 639 // The Id in the layout field is a pointer to our Field for it. 640 SmallVector<OptimizedStructLayoutField, 8> LayoutFields; 641 LayoutFields.reserve(Fields.size()); 642 for (auto &Field : Fields) { 643 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment, 644 Field.Offset); 645 } 646 647 // Perform layout. 648 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields); 649 StructSize = SizeAndAlign.first; 650 StructAlign = SizeAndAlign.second; 651 652 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & { 653 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id)); 654 }; 655 656 // We need to produce a packed struct type if there's a field whose 657 // assigned offset isn't a multiple of its natural type alignment. 658 bool Packed = [&] { 659 for (auto &LayoutField : LayoutFields) { 660 auto &F = getField(LayoutField); 661 if (!isAligned(F.TyAlignment, LayoutField.Offset)) 662 return true; 663 } 664 return false; 665 }(); 666 667 // Build the struct body. 668 SmallVector<Type*, 16> FieldTypes; 669 FieldTypes.reserve(LayoutFields.size() * 3 / 2); 670 uint64_t LastOffset = 0; 671 for (auto &LayoutField : LayoutFields) { 672 auto &F = getField(LayoutField); 673 674 auto Offset = LayoutField.Offset; 675 676 // Add a padding field if there's a padding gap and we're either 677 // building a packed struct or the padding gap is more than we'd 678 // get from aligning to the field type's natural alignment. 679 assert(Offset >= LastOffset); 680 if (Offset != LastOffset) { 681 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset) 682 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context), 683 Offset - LastOffset)); 684 } 685 686 F.Offset = Offset; 687 F.LayoutFieldIndex = FieldTypes.size(); 688 689 FieldTypes.push_back(F.Ty); 690 LastOffset = Offset + F.Size; 691 } 692 693 Ty->setBody(FieldTypes, Packed); 694 695 #ifndef NDEBUG 696 // Check that the IR layout matches the offsets we expect. 697 auto Layout = DL.getStructLayout(Ty); 698 for (auto &F : Fields) { 699 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty); 700 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset); 701 } 702 #endif 703 704 IsFinished = true; 705 } 706 707 // Build a struct that will keep state for an active coroutine. 708 // struct f.frame { 709 // ResumeFnTy ResumeFnAddr; 710 // ResumeFnTy DestroyFnAddr; 711 // int ResumeIndex; 712 // ... promise (if present) ... 713 // ... spills ... 714 // }; 715 static StructType *buildFrameType(Function &F, coro::Shape &Shape, 716 FrameDataInfo &FrameData) { 717 LLVMContext &C = F.getContext(); 718 const DataLayout &DL = F.getParent()->getDataLayout(); 719 StructType *FrameTy = [&] { 720 SmallString<32> Name(F.getName()); 721 Name.append(".Frame"); 722 return StructType::create(C, Name); 723 }(); 724 725 FrameTypeBuilder B(C, DL); 726 727 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca(); 728 Optional<FieldIDType> SwitchIndexFieldId; 729 730 if (Shape.ABI == coro::ABI::Switch) { 731 auto *FramePtrTy = FrameTy->getPointerTo(); 732 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy, 733 /*IsVarArg=*/false); 734 auto *FnPtrTy = FnTy->getPointerTo(); 735 736 // Add header fields for the resume and destroy functions. 737 // We can rely on these being perfectly packed. 738 (void)B.addField(FnPtrTy, None, /*header*/ true); 739 (void)B.addField(FnPtrTy, None, /*header*/ true); 740 741 // PromiseAlloca field needs to be explicitly added here because it's 742 // a header field with a fixed offset based on its alignment. Hence it 743 // needs special handling and cannot be added to FrameData.Allocas. 744 if (PromiseAlloca) 745 FrameData.setFieldIndex( 746 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true)); 747 748 // Add a field to store the suspend index. This doesn't need to 749 // be in the header. 750 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size())); 751 Type *IndexType = Type::getIntNTy(C, IndexBits); 752 753 SwitchIndexFieldId = B.addField(IndexType, None); 754 } else { 755 assert(PromiseAlloca == nullptr && "lowering doesn't support promises"); 756 } 757 758 // Because multiple allocas may own the same field slot, 759 // we add allocas to field here. 760 B.addFieldForAllocas(F, FrameData, Shape); 761 // Add PromiseAlloca to Allocas list so that 762 // 1. updateLayoutIndex could update its index after 763 // `performOptimizedStructLayout` 764 // 2. it is processed in insertSpills. 765 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca) 766 // We assume that the promise alloca won't be modified before 767 // CoroBegin and no alias will be create before CoroBegin. 768 FrameData.Allocas.emplace_back( 769 PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false); 770 // Create an entry for every spilled value. 771 for (auto &S : FrameData.Spills) { 772 FieldIDType Id = B.addField(S.first->getType(), None); 773 FrameData.setFieldIndex(S.first, Id); 774 } 775 776 B.finish(FrameTy); 777 FrameData.updateLayoutIndex(B); 778 Shape.FrameAlign = B.getStructAlign(); 779 Shape.FrameSize = B.getStructSize(); 780 781 switch (Shape.ABI) { 782 case coro::ABI::Switch: 783 // In the switch ABI, remember the switch-index field. 784 Shape.SwitchLowering.IndexField = 785 B.getLayoutFieldIndex(*SwitchIndexFieldId); 786 787 // Also round the frame size up to a multiple of its alignment, as is 788 // generally expected in C/C++. 789 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign); 790 break; 791 792 // In the retcon ABI, remember whether the frame is inline in the storage. 793 case coro::ABI::Retcon: 794 case coro::ABI::RetconOnce: { 795 auto Id = Shape.getRetconCoroId(); 796 Shape.RetconLowering.IsFrameInlineInStorage 797 = (B.getStructSize() <= Id->getStorageSize() && 798 B.getStructAlign() <= Id->getStorageAlignment()); 799 break; 800 } 801 case coro::ABI::Async: { 802 Shape.AsyncLowering.FrameOffset = 803 alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign); 804 // Also make the final context size a multiple of the context alignment to 805 // make allocation easier for allocators. 806 Shape.AsyncLowering.ContextSize = 807 alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize, 808 Shape.AsyncLowering.getContextAlignment()); 809 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) { 810 report_fatal_error( 811 "The alignment requirment of frame variables cannot be higher than " 812 "the alignment of the async function context"); 813 } 814 break; 815 } 816 } 817 818 return FrameTy; 819 } 820 821 // We use a pointer use visitor to track how an alloca is being used. 822 // The goal is to be able to answer the following three questions: 823 // 1. Should this alloca be allocated on the frame instead. 824 // 2. Could the content of the alloca be modified prior to CoroBegn, which would 825 // require copying the data from alloca to the frame after CoroBegin. 826 // 3. Is there any alias created for this alloca prior to CoroBegin, but used 827 // after CoroBegin. In that case, we will need to recreate the alias after 828 // CoroBegin based off the frame. To answer question 1, we track two things: 829 // a. List of all BasicBlocks that use this alloca or any of the aliases of 830 // the alloca. In the end, we check if there exists any two basic blocks that 831 // cross suspension points. If so, this alloca must be put on the frame. b. 832 // Whether the alloca or any alias of the alloca is escaped at some point, 833 // either by storing the address somewhere, or the address is used in a 834 // function call that might capture. If it's ever escaped, this alloca must be 835 // put on the frame conservatively. 836 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin. 837 // Whenever a potential write happens, either through a store instruction, a 838 // function call or any of the memory intrinsics, we check whether this 839 // instruction is prior to CoroBegin. To answer question 3, we track the offsets 840 // of all aliases created for the alloca prior to CoroBegin but used after 841 // CoroBegin. llvm::Optional is used to be able to represent the case when the 842 // offset is unknown (e.g. when you have a PHINode that takes in different 843 // offset values). We cannot handle unknown offsets and will assert. This is the 844 // potential issue left out. An ideal solution would likely require a 845 // significant redesign. 846 namespace { 847 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> { 848 using Base = PtrUseVisitor<AllocaUseVisitor>; 849 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT, 850 const CoroBeginInst &CB, const SuspendCrossingInfo &Checker) 851 : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker) {} 852 853 void visit(Instruction &I) { 854 UserBBs.insert(I.getParent()); 855 Base::visit(I); 856 // If the pointer is escaped prior to CoroBegin, we have to assume it would 857 // be written into before CoroBegin as well. 858 if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) { 859 MayWriteBeforeCoroBegin = true; 860 } 861 } 862 // We need to provide this overload as PtrUseVisitor uses a pointer based 863 // visiting function. 864 void visit(Instruction *I) { return visit(*I); } 865 866 void visitPHINode(PHINode &I) { 867 enqueueUsers(I); 868 handleAlias(I); 869 } 870 871 void visitSelectInst(SelectInst &I) { 872 enqueueUsers(I); 873 handleAlias(I); 874 } 875 876 void visitStoreInst(StoreInst &SI) { 877 // Regardless whether the alias of the alloca is the value operand or the 878 // pointer operand, we need to assume the alloca is been written. 879 handleMayWrite(SI); 880 881 if (SI.getValueOperand() != U->get()) 882 return; 883 884 // We are storing the pointer into a memory location, potentially escaping. 885 // As an optimization, we try to detect simple cases where it doesn't 886 // actually escape, for example: 887 // %ptr = alloca .. 888 // %addr = alloca .. 889 // store %ptr, %addr 890 // %x = load %addr 891 // .. 892 // If %addr is only used by loading from it, we could simply treat %x as 893 // another alias of %ptr, and not considering %ptr being escaped. 894 auto IsSimpleStoreThenLoad = [&]() { 895 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand()); 896 // If the memory location we are storing to is not an alloca, it 897 // could be an alias of some other memory locations, which is difficult 898 // to analyze. 899 if (!AI) 900 return false; 901 // StoreAliases contains aliases of the memory location stored into. 902 SmallVector<Instruction *, 4> StoreAliases = {AI}; 903 while (!StoreAliases.empty()) { 904 Instruction *I = StoreAliases.back(); 905 StoreAliases.pop_back(); 906 for (User *U : I->users()) { 907 // If we are loading from the memory location, we are creating an 908 // alias of the original pointer. 909 if (auto *LI = dyn_cast<LoadInst>(U)) { 910 enqueueUsers(*LI); 911 handleAlias(*LI); 912 continue; 913 } 914 // If we are overriding the memory location, the pointer certainly 915 // won't escape. 916 if (auto *S = dyn_cast<StoreInst>(U)) 917 if (S->getPointerOperand() == I) 918 continue; 919 if (auto *II = dyn_cast<IntrinsicInst>(U)) 920 if (II->isLifetimeStartOrEnd()) 921 continue; 922 // BitCastInst creats aliases of the memory location being stored 923 // into. 924 if (auto *BI = dyn_cast<BitCastInst>(U)) { 925 StoreAliases.push_back(BI); 926 continue; 927 } 928 return false; 929 } 930 } 931 932 return true; 933 }; 934 935 if (!IsSimpleStoreThenLoad()) 936 PI.setEscaped(&SI); 937 } 938 939 // All mem intrinsics modify the data. 940 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); } 941 942 void visitBitCastInst(BitCastInst &BC) { 943 Base::visitBitCastInst(BC); 944 handleAlias(BC); 945 } 946 947 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 948 Base::visitAddrSpaceCastInst(ASC); 949 handleAlias(ASC); 950 } 951 952 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 953 // The base visitor will adjust Offset accordingly. 954 Base::visitGetElementPtrInst(GEPI); 955 handleAlias(GEPI); 956 } 957 958 void visitCallBase(CallBase &CB) { 959 for (unsigned Op = 0, OpCount = CB.getNumArgOperands(); Op < OpCount; ++Op) 960 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op)) 961 PI.setEscaped(&CB); 962 handleMayWrite(CB); 963 } 964 965 bool getShouldLiveOnFrame() const { 966 if (!ShouldLiveOnFrame) 967 ShouldLiveOnFrame = computeShouldLiveOnFrame(); 968 return ShouldLiveOnFrame.getValue(); 969 } 970 971 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; } 972 973 DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const { 974 assert(getShouldLiveOnFrame() && "This method should only be called if the " 975 "alloca needs to live on the frame."); 976 for (const auto &P : AliasOffetMap) 977 if (!P.second) 978 report_fatal_error("Unable to handle an alias with unknown offset " 979 "created before CoroBegin."); 980 return AliasOffetMap; 981 } 982 983 private: 984 const DominatorTree &DT; 985 const CoroBeginInst &CoroBegin; 986 const SuspendCrossingInfo &Checker; 987 // All alias to the original AllocaInst, created before CoroBegin and used 988 // after CoroBegin. Each entry contains the instruction and the offset in the 989 // original Alloca. They need to be recreated after CoroBegin off the frame. 990 DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{}; 991 SmallPtrSet<BasicBlock *, 2> UserBBs{}; 992 bool MayWriteBeforeCoroBegin{false}; 993 994 mutable llvm::Optional<bool> ShouldLiveOnFrame{}; 995 996 bool computeShouldLiveOnFrame() const { 997 if (PI.isEscaped()) 998 return true; 999 1000 for (auto *BB1 : UserBBs) 1001 for (auto *BB2 : UserBBs) 1002 if (Checker.hasPathCrossingSuspendPoint(BB1, BB2)) 1003 return true; 1004 1005 return false; 1006 } 1007 1008 void handleMayWrite(const Instruction &I) { 1009 if (!DT.dominates(&CoroBegin, &I)) 1010 MayWriteBeforeCoroBegin = true; 1011 } 1012 1013 bool usedAfterCoroBegin(Instruction &I) { 1014 for (auto &U : I.uses()) 1015 if (DT.dominates(&CoroBegin, U)) 1016 return true; 1017 return false; 1018 } 1019 1020 void handleAlias(Instruction &I) { 1021 // We track all aliases created prior to CoroBegin but used after. 1022 // These aliases may need to be recreated after CoroBegin if the alloca 1023 // need to live on the frame. 1024 if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I)) 1025 return; 1026 1027 if (!IsOffsetKnown) { 1028 AliasOffetMap[&I].reset(); 1029 } else { 1030 auto Itr = AliasOffetMap.find(&I); 1031 if (Itr == AliasOffetMap.end()) { 1032 AliasOffetMap[&I] = Offset; 1033 } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) { 1034 // If we have seen two different possible values for this alias, we set 1035 // it to empty. 1036 AliasOffetMap[&I].reset(); 1037 } 1038 } 1039 } 1040 }; 1041 } // namespace 1042 1043 // We need to make room to insert a spill after initial PHIs, but before 1044 // catchswitch instruction. Placing it before violates the requirement that 1045 // catchswitch, like all other EHPads must be the first nonPHI in a block. 1046 // 1047 // Split away catchswitch into a separate block and insert in its place: 1048 // 1049 // cleanuppad <InsertPt> cleanupret. 1050 // 1051 // cleanupret instruction will act as an insert point for the spill. 1052 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) { 1053 BasicBlock *CurrentBlock = CatchSwitch->getParent(); 1054 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch); 1055 CurrentBlock->getTerminator()->eraseFromParent(); 1056 1057 auto *CleanupPad = 1058 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock); 1059 auto *CleanupRet = 1060 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock); 1061 return CleanupRet; 1062 } 1063 1064 // Replace all alloca and SSA values that are accessed across suspend points 1065 // with GetElementPointer from coroutine frame + loads and stores. Create an 1066 // AllocaSpillBB that will become the new entry block for the resume parts of 1067 // the coroutine: 1068 // 1069 // %hdl = coro.begin(...) 1070 // whatever 1071 // 1072 // becomes: 1073 // 1074 // %hdl = coro.begin(...) 1075 // %FramePtr = bitcast i8* hdl to %f.frame* 1076 // br label %AllocaSpillBB 1077 // 1078 // AllocaSpillBB: 1079 // ; geps corresponding to allocas that were moved to coroutine frame 1080 // br label PostSpill 1081 // 1082 // PostSpill: 1083 // whatever 1084 // 1085 // 1086 static Instruction *insertSpills(const FrameDataInfo &FrameData, 1087 coro::Shape &Shape) { 1088 auto *CB = Shape.CoroBegin; 1089 LLVMContext &C = CB->getContext(); 1090 IRBuilder<> Builder(CB->getNextNode()); 1091 StructType *FrameTy = Shape.FrameTy; 1092 PointerType *FramePtrTy = FrameTy->getPointerTo(); 1093 auto *FramePtr = 1094 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr")); 1095 DominatorTree DT(*CB->getFunction()); 1096 1097 // Create a GEP with the given index into the coroutine frame for the original 1098 // value Orig. Appends an extra 0 index for array-allocas, preserving the 1099 // original type. 1100 auto GetFramePointer = [&](Value *Orig) -> Value * { 1101 FieldIDType Index = FrameData.getFieldIndex(Orig); 1102 SmallVector<Value *, 3> Indices = { 1103 ConstantInt::get(Type::getInt32Ty(C), 0), 1104 ConstantInt::get(Type::getInt32Ty(C), Index), 1105 }; 1106 1107 if (auto *AI = dyn_cast<AllocaInst>(Orig)) { 1108 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) { 1109 auto Count = CI->getValue().getZExtValue(); 1110 if (Count > 1) { 1111 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0)); 1112 } 1113 } else { 1114 report_fatal_error("Coroutines cannot handle non static allocas yet"); 1115 } 1116 } 1117 1118 auto GEP = cast<GetElementPtrInst>( 1119 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices)); 1120 if (isa<AllocaInst>(Orig)) { 1121 // If the type of GEP is not equal to the type of AllocaInst, it implies 1122 // that the AllocaInst may be reused in the Frame slot of other 1123 // AllocaInst. So we cast the GEP to the type of AllocaInst. 1124 if (GEP->getResultElementType() != Orig->getType()) 1125 return Builder.CreateBitCast(GEP, Orig->getType(), 1126 Orig->getName() + Twine(".cast")); 1127 } 1128 return GEP; 1129 }; 1130 1131 for (auto const &E : FrameData.Spills) { 1132 Value *Def = E.first; 1133 // Create a store instruction storing the value into the 1134 // coroutine frame. 1135 Instruction *InsertPt = nullptr; 1136 if (auto *Arg = dyn_cast<Argument>(Def)) { 1137 // For arguments, we will place the store instruction right after 1138 // the coroutine frame pointer instruction, i.e. bitcast of 1139 // coro.begin from i8* to %f.frame*. 1140 InsertPt = FramePtr->getNextNode(); 1141 1142 // If we're spilling an Argument, make sure we clear 'nocapture' 1143 // from the coroutine function. 1144 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture); 1145 1146 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) { 1147 // Don't spill immediately after a suspend; splitting assumes 1148 // that the suspend will be followed by a branch. 1149 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI(); 1150 } else { 1151 auto *I = cast<Instruction>(Def); 1152 if (!DT.dominates(CB, I)) { 1153 // If it is not dominated by CoroBegin, then spill should be 1154 // inserted immediately after CoroFrame is computed. 1155 InsertPt = FramePtr->getNextNode(); 1156 } else if (auto *II = dyn_cast<InvokeInst>(I)) { 1157 // If we are spilling the result of the invoke instruction, split 1158 // the normal edge and insert the spill in the new block. 1159 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest()); 1160 InsertPt = NewBB->getTerminator(); 1161 } else if (isa<PHINode>(I)) { 1162 // Skip the PHINodes and EH pads instructions. 1163 BasicBlock *DefBlock = I->getParent(); 1164 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator())) 1165 InsertPt = splitBeforeCatchSwitch(CSI); 1166 else 1167 InsertPt = &*DefBlock->getFirstInsertionPt(); 1168 } else { 1169 assert(!I->isTerminator() && "unexpected terminator"); 1170 // For all other values, the spill is placed immediately after 1171 // the definition. 1172 InsertPt = I->getNextNode(); 1173 } 1174 } 1175 1176 auto Index = FrameData.getFieldIndex(Def); 1177 Builder.SetInsertPoint(InsertPt); 1178 auto *G = Builder.CreateConstInBoundsGEP2_32( 1179 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr")); 1180 Builder.CreateStore(Def, G); 1181 1182 BasicBlock *CurrentBlock = nullptr; 1183 Value *CurrentReload = nullptr; 1184 for (auto *U : E.second) { 1185 // If we have not seen the use block, create a load instruction to reload 1186 // the spilled value from the coroutine frame. Populates the Value pointer 1187 // reference provided with the frame GEP. 1188 if (CurrentBlock != U->getParent()) { 1189 CurrentBlock = U->getParent(); 1190 Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt()); 1191 1192 auto *GEP = GetFramePointer(E.first); 1193 GEP->setName(E.first->getName() + Twine(".reload.addr")); 1194 CurrentReload = Builder.CreateLoad( 1195 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP, 1196 E.first->getName() + Twine(".reload")); 1197 } 1198 1199 // If we have a single edge PHINode, remove it and replace it with a 1200 // reload from the coroutine frame. (We already took care of multi edge 1201 // PHINodes by rewriting them in the rewritePHIs function). 1202 if (auto *PN = dyn_cast<PHINode>(U)) { 1203 assert(PN->getNumIncomingValues() == 1 && 1204 "unexpected number of incoming " 1205 "values in the PHINode"); 1206 PN->replaceAllUsesWith(CurrentReload); 1207 PN->eraseFromParent(); 1208 continue; 1209 } 1210 1211 // Replace all uses of CurrentValue in the current instruction with 1212 // reload. 1213 U->replaceUsesOfWith(Def, CurrentReload); 1214 } 1215 } 1216 1217 BasicBlock *FramePtrBB = FramePtr->getParent(); 1218 1219 auto SpillBlock = 1220 FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB"); 1221 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill"); 1222 Shape.AllocaSpillBlock = SpillBlock; 1223 1224 // retcon and retcon.once lowering assumes all uses have been sunk. 1225 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 1226 Shape.ABI == coro::ABI::Async) { 1227 // If we found any allocas, replace all of their remaining uses with Geps. 1228 Builder.SetInsertPoint(&SpillBlock->front()); 1229 for (const auto &P : FrameData.Allocas) { 1230 AllocaInst *Alloca = P.Alloca; 1231 auto *G = GetFramePointer(Alloca); 1232 1233 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G)) 1234 // here, as we are changing location of the instruction. 1235 G->takeName(Alloca); 1236 Alloca->replaceAllUsesWith(G); 1237 Alloca->eraseFromParent(); 1238 } 1239 return FramePtr; 1240 } 1241 1242 // If we found any alloca, replace all of their remaining uses with GEP 1243 // instructions. Because new dbg.declare have been created for these alloca, 1244 // we also delete the original dbg.declare and replace other uses with undef. 1245 // Note: We cannot replace the alloca with GEP instructions indiscriminately, 1246 // as some of the uses may not be dominated by CoroBegin. 1247 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front()); 1248 SmallVector<Instruction *, 4> UsersToUpdate; 1249 for (const auto &A : FrameData.Allocas) { 1250 AllocaInst *Alloca = A.Alloca; 1251 UsersToUpdate.clear(); 1252 for (User *U : Alloca->users()) { 1253 auto *I = cast<Instruction>(U); 1254 if (DT.dominates(CB, I)) 1255 UsersToUpdate.push_back(I); 1256 } 1257 if (UsersToUpdate.empty()) 1258 continue; 1259 auto *G = GetFramePointer(Alloca); 1260 G->setName(Alloca->getName() + Twine(".reload.addr")); 1261 1262 SmallPtrSet<BasicBlock *, 4> SeenDbgBBs; 1263 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Alloca); 1264 DIBuilder DIB(*Alloca->getModule(), /*AllowUnresolved*/ false); 1265 Instruction *FirstDbgDecl = nullptr; 1266 1267 if (!DIs.empty()) { 1268 FirstDbgDecl = DIB.insertDeclare(G, DIs.front()->getVariable(), 1269 DIs.front()->getExpression(), 1270 DIs.front()->getDebugLoc(), DIs.front()); 1271 SeenDbgBBs.insert(DIs.front()->getParent()); 1272 } 1273 for (auto *DI : FindDbgDeclareUses(Alloca)) 1274 DI->eraseFromParent(); 1275 replaceDbgUsesWithUndef(Alloca); 1276 1277 for (Instruction *I : UsersToUpdate) { 1278 I->replaceUsesOfWith(Alloca, G); 1279 1280 // After cloning, transformations might not guarantee that all uses 1281 // of this alloca are dominated by the already existing dbg.declare's, 1282 // compromising the debug quality. Instead of writing another 1283 // transformation to patch each clone, go ahead and early populate 1284 // basic blocks that use such allocas with more debug info. 1285 if (SeenDbgBBs.count(I->getParent())) 1286 continue; 1287 1288 // If there isn't a prior dbg.declare for this alloca, it probably 1289 // means the state hasn't changed prior to one of the relevant suspend 1290 // point for this frame access. 1291 if (!FirstDbgDecl) 1292 continue; 1293 1294 // These instructions are all dominated by the alloca, insert the 1295 // dbg.value in the beginning of the BB to enhance debugging 1296 // experience and allow values to be inspected as early as possible. 1297 // Prefer dbg.value over dbg.declare since it better sets expectations 1298 // that control flow can be later changed by other passes. 1299 auto *DI = cast<DbgDeclareInst>(FirstDbgDecl); 1300 BasicBlock *CurrentBlock = I->getParent(); 1301 auto *DerefExpr = 1302 DIExpression::append(DI->getExpression(), dwarf::DW_OP_deref); 1303 DIB.insertDbgValueIntrinsic(G, DI->getVariable(), DerefExpr, 1304 DI->getDebugLoc(), 1305 &*CurrentBlock->getFirstInsertionPt()); 1306 SeenDbgBBs.insert(CurrentBlock); 1307 } 1308 } 1309 Builder.SetInsertPoint(FramePtr->getNextNode()); 1310 for (const auto &A : FrameData.Allocas) { 1311 AllocaInst *Alloca = A.Alloca; 1312 if (A.MayWriteBeforeCoroBegin) { 1313 // isEscaped really means potentially modified before CoroBegin. 1314 if (Alloca->isArrayAllocation()) 1315 report_fatal_error( 1316 "Coroutines cannot handle copying of array allocas yet"); 1317 1318 auto *G = GetFramePointer(Alloca); 1319 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca); 1320 Builder.CreateStore(Value, G); 1321 } 1322 // For each alias to Alloca created before CoroBegin but used after 1323 // CoroBegin, we recreate them after CoroBegin by appplying the offset 1324 // to the pointer in the frame. 1325 for (const auto &Alias : A.Aliases) { 1326 auto *FramePtr = GetFramePointer(Alloca); 1327 auto *FramePtrRaw = 1328 Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C)); 1329 auto *AliasPtr = Builder.CreateGEP( 1330 FramePtrRaw, 1331 ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue())); 1332 auto *AliasPtrTyped = 1333 Builder.CreateBitCast(AliasPtr, Alias.first->getType()); 1334 Alias.first->replaceUsesWithIf( 1335 AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); }); 1336 } 1337 } 1338 return FramePtr; 1339 } 1340 1341 // Sets the unwind edge of an instruction to a particular successor. 1342 static void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ) { 1343 if (auto *II = dyn_cast<InvokeInst>(TI)) 1344 II->setUnwindDest(Succ); 1345 else if (auto *CS = dyn_cast<CatchSwitchInst>(TI)) 1346 CS->setUnwindDest(Succ); 1347 else if (auto *CR = dyn_cast<CleanupReturnInst>(TI)) 1348 CR->setUnwindDest(Succ); 1349 else 1350 llvm_unreachable("unexpected terminator instruction"); 1351 } 1352 1353 // Replaces all uses of OldPred with the NewPred block in all PHINodes in a 1354 // block. 1355 static void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, 1356 BasicBlock *NewPred, PHINode *Until = nullptr) { 1357 unsigned BBIdx = 0; 1358 for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) { 1359 PHINode *PN = cast<PHINode>(I); 1360 1361 // We manually update the LandingPadReplacement PHINode and it is the last 1362 // PHI Node. So, if we find it, we are done. 1363 if (Until == PN) 1364 break; 1365 1366 // Reuse the previous value of BBIdx if it lines up. In cases where we 1367 // have multiple phi nodes with *lots* of predecessors, this is a speed 1368 // win because we don't have to scan the PHI looking for TIBB. This 1369 // happens because the BB list of PHI nodes are usually in the same 1370 // order. 1371 if (PN->getIncomingBlock(BBIdx) != OldPred) 1372 BBIdx = PN->getBasicBlockIndex(OldPred); 1373 1374 assert(BBIdx != (unsigned)-1 && "Invalid PHI Index!"); 1375 PN->setIncomingBlock(BBIdx, NewPred); 1376 } 1377 } 1378 1379 // Uses SplitEdge unless the successor block is an EHPad, in which case do EH 1380 // specific handling. 1381 static BasicBlock *ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, 1382 LandingPadInst *OriginalPad, 1383 PHINode *LandingPadReplacement) { 1384 auto *PadInst = Succ->getFirstNonPHI(); 1385 if (!LandingPadReplacement && !PadInst->isEHPad()) 1386 return SplitEdge(BB, Succ); 1387 1388 auto *NewBB = BasicBlock::Create(BB->getContext(), "", BB->getParent(), Succ); 1389 setUnwindEdgeTo(BB->getTerminator(), NewBB); 1390 updatePhiNodes(Succ, BB, NewBB, LandingPadReplacement); 1391 1392 if (LandingPadReplacement) { 1393 auto *NewLP = OriginalPad->clone(); 1394 auto *Terminator = BranchInst::Create(Succ, NewBB); 1395 NewLP->insertBefore(Terminator); 1396 LandingPadReplacement->addIncoming(NewLP, NewBB); 1397 return NewBB; 1398 } 1399 Value *ParentPad = nullptr; 1400 if (auto *FuncletPad = dyn_cast<FuncletPadInst>(PadInst)) 1401 ParentPad = FuncletPad->getParentPad(); 1402 else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(PadInst)) 1403 ParentPad = CatchSwitch->getParentPad(); 1404 else 1405 llvm_unreachable("handling for other EHPads not implemented yet"); 1406 1407 auto *NewCleanupPad = CleanupPadInst::Create(ParentPad, {}, "", NewBB); 1408 CleanupReturnInst::Create(NewCleanupPad, Succ, NewBB); 1409 return NewBB; 1410 } 1411 1412 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new 1413 // PHI in InsertedBB. 1414 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, 1415 BasicBlock *InsertedBB, 1416 BasicBlock *PredBB, 1417 PHINode *UntilPHI = nullptr) { 1418 auto *PN = cast<PHINode>(&SuccBB->front()); 1419 do { 1420 int Index = PN->getBasicBlockIndex(InsertedBB); 1421 Value *V = PN->getIncomingValue(Index); 1422 PHINode *InputV = PHINode::Create( 1423 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(), 1424 &InsertedBB->front()); 1425 InputV->addIncoming(V, PredBB); 1426 PN->setIncomingValue(Index, InputV); 1427 PN = dyn_cast<PHINode>(PN->getNextNode()); 1428 } while (PN != UntilPHI); 1429 } 1430 1431 // Rewrites the PHI Nodes in a cleanuppad. 1432 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, 1433 CleanupPadInst *CleanupPad) { 1434 // For every incoming edge to a CleanupPad we will create a new block holding 1435 // all incoming values in single-value PHI nodes. We will then create another 1436 // block to act as a dispather (as all unwind edges for related EH blocks 1437 // must be the same). 1438 // 1439 // cleanuppad: 1440 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1] 1441 // %3 = cleanuppad within none [] 1442 // 1443 // It will create: 1444 // 1445 // cleanuppad.corodispatch 1446 // %2 = phi i8[0, %catchswitch], [1, %catch.1] 1447 // %3 = cleanuppad within none [] 1448 // switch i8 % 2, label %unreachable 1449 // [i8 0, label %cleanuppad.from.catchswitch 1450 // i8 1, label %cleanuppad.from.catch.1] 1451 // cleanuppad.from.catchswitch: 1452 // %4 = phi i32 [%0, %catchswitch] 1453 // br %label cleanuppad 1454 // cleanuppad.from.catch.1: 1455 // %6 = phi i32 [%1, %catch.1] 1456 // br %label cleanuppad 1457 // cleanuppad: 1458 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch], 1459 // [%6, %cleanuppad.from.catch.1] 1460 1461 // Unreachable BB, in case switching on an invalid value in the dispatcher. 1462 auto *UnreachBB = BasicBlock::Create( 1463 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent()); 1464 IRBuilder<> Builder(UnreachBB); 1465 Builder.CreateUnreachable(); 1466 1467 // Create a new cleanuppad which will be the dispatcher. 1468 auto *NewCleanupPadBB = 1469 BasicBlock::Create(CleanupPadBB->getContext(), 1470 CleanupPadBB->getName() + Twine(".corodispatch"), 1471 CleanupPadBB->getParent(), CleanupPadBB); 1472 Builder.SetInsertPoint(NewCleanupPadBB); 1473 auto *SwitchType = Builder.getInt8Ty(); 1474 auto *SetDispatchValuePN = 1475 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB)); 1476 CleanupPad->removeFromParent(); 1477 CleanupPad->insertAfter(SetDispatchValuePN); 1478 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB, 1479 pred_size(CleanupPadBB)); 1480 1481 int SwitchIndex = 0; 1482 SmallVector<BasicBlock *, 8> Preds(pred_begin(CleanupPadBB), 1483 pred_end(CleanupPadBB)); 1484 for (BasicBlock *Pred : Preds) { 1485 // Create a new cleanuppad and move the PHI values to there. 1486 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(), 1487 CleanupPadBB->getName() + 1488 Twine(".from.") + Pred->getName(), 1489 CleanupPadBB->getParent(), CleanupPadBB); 1490 updatePhiNodes(CleanupPadBB, Pred, CaseBB); 1491 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") + 1492 Pred->getName()); 1493 Builder.SetInsertPoint(CaseBB); 1494 Builder.CreateBr(CleanupPadBB); 1495 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB); 1496 1497 // Update this Pred to the new unwind point. 1498 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB); 1499 1500 // Setup the switch in the dispatcher. 1501 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex); 1502 SetDispatchValuePN->addIncoming(SwitchConstant, Pred); 1503 SwitchOnDispatch->addCase(SwitchConstant, CaseBB); 1504 SwitchIndex++; 1505 } 1506 } 1507 1508 static void rewritePHIs(BasicBlock &BB) { 1509 // For every incoming edge we will create a block holding all 1510 // incoming values in a single PHI nodes. 1511 // 1512 // loop: 1513 // %n.val = phi i32[%n, %entry], [%inc, %loop] 1514 // 1515 // It will create: 1516 // 1517 // loop.from.entry: 1518 // %n.loop.pre = phi i32 [%n, %entry] 1519 // br %label loop 1520 // loop.from.loop: 1521 // %inc.loop.pre = phi i32 [%inc, %loop] 1522 // br %label loop 1523 // 1524 // After this rewrite, further analysis will ignore any phi nodes with more 1525 // than one incoming edge. 1526 1527 // TODO: Simplify PHINodes in the basic block to remove duplicate 1528 // predecessors. 1529 1530 // Special case for CleanupPad: all EH blocks must have the same unwind edge 1531 // so we need to create an additional "dispatcher" block. 1532 if (auto *CleanupPad = 1533 dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) { 1534 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB)); 1535 for (BasicBlock *Pred : Preds) { 1536 if (CatchSwitchInst *CS = 1537 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) { 1538 (void)CS; 1539 // CleanupPad with a CatchSwitch predecessor: therefore this is an 1540 // unwind destination that needs to be handle specially. 1541 assert(CS->getUnwindDest() == &BB); 1542 (void)CS; 1543 rewritePHIsForCleanupPad(&BB, CleanupPad); 1544 return; 1545 } 1546 } 1547 } 1548 1549 LandingPadInst *LandingPad = nullptr; 1550 PHINode *ReplPHI = nullptr; 1551 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) { 1552 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks. 1553 // We replace the original landing pad with a PHINode that will collect the 1554 // results from all of them. 1555 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad); 1556 ReplPHI->takeName(LandingPad); 1557 LandingPad->replaceAllUsesWith(ReplPHI); 1558 // We will erase the original landing pad at the end of this function after 1559 // ehAwareSplitEdge cloned it in the transition blocks. 1560 } 1561 1562 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB)); 1563 for (BasicBlock *Pred : Preds) { 1564 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI); 1565 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName()); 1566 1567 // Stop the moving of values at ReplPHI, as this is either null or the PHI 1568 // that replaced the landing pad. 1569 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI); 1570 } 1571 1572 if (LandingPad) { 1573 // Calls to ehAwareSplitEdge function cloned the original lading pad. 1574 // No longer need it. 1575 LandingPad->eraseFromParent(); 1576 } 1577 } 1578 1579 static void rewritePHIs(Function &F) { 1580 SmallVector<BasicBlock *, 8> WorkList; 1581 1582 for (BasicBlock &BB : F) 1583 if (auto *PN = dyn_cast<PHINode>(&BB.front())) 1584 if (PN->getNumIncomingValues() > 1) 1585 WorkList.push_back(&BB); 1586 1587 for (BasicBlock *BB : WorkList) 1588 rewritePHIs(*BB); 1589 } 1590 1591 // Check for instructions that we can recreate on resume as opposed to spill 1592 // the result into a coroutine frame. 1593 static bool materializable(Instruction &V) { 1594 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) || 1595 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V); 1596 } 1597 1598 // Check for structural coroutine intrinsics that should not be spilled into 1599 // the coroutine frame. 1600 static bool isCoroutineStructureIntrinsic(Instruction &I) { 1601 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) || 1602 isa<CoroSuspendInst>(&I); 1603 } 1604 1605 // For every use of the value that is across suspend point, recreate that value 1606 // after a suspend point. 1607 static void rewriteMaterializableInstructions(IRBuilder<> &IRB, 1608 const SpillInfo &Spills) { 1609 for (const auto &E : Spills) { 1610 Value *Def = E.first; 1611 BasicBlock *CurrentBlock = nullptr; 1612 Instruction *CurrentMaterialization = nullptr; 1613 for (Instruction *U : E.second) { 1614 // If we have not seen this block, materialize the value. 1615 if (CurrentBlock != U->getParent()) { 1616 CurrentBlock = U->getParent(); 1617 CurrentMaterialization = cast<Instruction>(Def)->clone(); 1618 CurrentMaterialization->setName(Def->getName()); 1619 CurrentMaterialization->insertBefore( 1620 &*CurrentBlock->getFirstInsertionPt()); 1621 } 1622 if (auto *PN = dyn_cast<PHINode>(U)) { 1623 assert(PN->getNumIncomingValues() == 1 && 1624 "unexpected number of incoming " 1625 "values in the PHINode"); 1626 PN->replaceAllUsesWith(CurrentMaterialization); 1627 PN->eraseFromParent(); 1628 continue; 1629 } 1630 // Replace all uses of Def in the current instruction with the 1631 // CurrentMaterialization for the block. 1632 U->replaceUsesOfWith(Def, CurrentMaterialization); 1633 } 1634 } 1635 } 1636 1637 // Splits the block at a particular instruction unless it is the first 1638 // instruction in the block with a single predecessor. 1639 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) { 1640 auto *BB = I->getParent(); 1641 if (&BB->front() == I) { 1642 if (BB->getSinglePredecessor()) { 1643 BB->setName(Name); 1644 return BB; 1645 } 1646 } 1647 return BB->splitBasicBlock(I, Name); 1648 } 1649 1650 // Split above and below a particular instruction so that it 1651 // will be all alone by itself in a block. 1652 static void splitAround(Instruction *I, const Twine &Name) { 1653 splitBlockIfNotFirst(I, Name); 1654 splitBlockIfNotFirst(I->getNextNode(), "After" + Name); 1655 } 1656 1657 static bool isSuspendBlock(BasicBlock *BB) { 1658 return isa<AnyCoroSuspendInst>(BB->front()); 1659 } 1660 1661 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet; 1662 1663 /// Does control flow starting at the given block ever reach a suspend 1664 /// instruction before reaching a block in VisitedOrFreeBBs? 1665 static bool isSuspendReachableFrom(BasicBlock *From, 1666 VisitedBlocksSet &VisitedOrFreeBBs) { 1667 // Eagerly try to add this block to the visited set. If it's already 1668 // there, stop recursing; this path doesn't reach a suspend before 1669 // either looping or reaching a freeing block. 1670 if (!VisitedOrFreeBBs.insert(From).second) 1671 return false; 1672 1673 // We assume that we'll already have split suspends into their own blocks. 1674 if (isSuspendBlock(From)) 1675 return true; 1676 1677 // Recurse on the successors. 1678 for (auto Succ : successors(From)) { 1679 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs)) 1680 return true; 1681 } 1682 1683 return false; 1684 } 1685 1686 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a 1687 /// suspend point? 1688 static bool isLocalAlloca(CoroAllocaAllocInst *AI) { 1689 // Seed the visited set with all the basic blocks containing a free 1690 // so that we won't pass them up. 1691 VisitedBlocksSet VisitedOrFreeBBs; 1692 for (auto User : AI->users()) { 1693 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User)) 1694 VisitedOrFreeBBs.insert(FI->getParent()); 1695 } 1696 1697 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs); 1698 } 1699 1700 /// After we split the coroutine, will the given basic block be along 1701 /// an obvious exit path for the resumption function? 1702 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, 1703 unsigned depth = 3) { 1704 // If we've bottomed out our depth count, stop searching and assume 1705 // that the path might loop back. 1706 if (depth == 0) return false; 1707 1708 // If this is a suspend block, we're about to exit the resumption function. 1709 if (isSuspendBlock(BB)) return true; 1710 1711 // Recurse into the successors. 1712 for (auto Succ : successors(BB)) { 1713 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1)) 1714 return false; 1715 } 1716 1717 // If none of the successors leads back in a loop, we're on an exit/abort. 1718 return true; 1719 } 1720 1721 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) { 1722 // Look for a free that isn't sufficiently obviously followed by 1723 // either a suspend or a termination, i.e. something that will leave 1724 // the coro resumption frame. 1725 for (auto U : AI->users()) { 1726 auto FI = dyn_cast<CoroAllocaFreeInst>(U); 1727 if (!FI) continue; 1728 1729 if (!willLeaveFunctionImmediatelyAfter(FI->getParent())) 1730 return true; 1731 } 1732 1733 // If we never found one, we don't need a stack save. 1734 return false; 1735 } 1736 1737 /// Turn each of the given local allocas into a normal (dynamic) alloca 1738 /// instruction. 1739 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas, 1740 SmallVectorImpl<Instruction*> &DeadInsts) { 1741 for (auto AI : LocalAllocas) { 1742 auto M = AI->getModule(); 1743 IRBuilder<> Builder(AI); 1744 1745 // Save the stack depth. Try to avoid doing this if the stackrestore 1746 // is going to immediately precede a return or something. 1747 Value *StackSave = nullptr; 1748 if (localAllocaNeedsStackSave(AI)) 1749 StackSave = Builder.CreateCall( 1750 Intrinsic::getDeclaration(M, Intrinsic::stacksave)); 1751 1752 // Allocate memory. 1753 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize()); 1754 Alloca->setAlignment(Align(AI->getAlignment())); 1755 1756 for (auto U : AI->users()) { 1757 // Replace gets with the allocation. 1758 if (isa<CoroAllocaGetInst>(U)) { 1759 U->replaceAllUsesWith(Alloca); 1760 1761 // Replace frees with stackrestores. This is safe because 1762 // alloca.alloc is required to obey a stack discipline, although we 1763 // don't enforce that structurally. 1764 } else { 1765 auto FI = cast<CoroAllocaFreeInst>(U); 1766 if (StackSave) { 1767 Builder.SetInsertPoint(FI); 1768 Builder.CreateCall( 1769 Intrinsic::getDeclaration(M, Intrinsic::stackrestore), 1770 StackSave); 1771 } 1772 } 1773 DeadInsts.push_back(cast<Instruction>(U)); 1774 } 1775 1776 DeadInsts.push_back(AI); 1777 } 1778 } 1779 1780 /// Turn the given coro.alloca.alloc call into a dynamic allocation. 1781 /// This happens during the all-instructions iteration, so it must not 1782 /// delete the call. 1783 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI, 1784 coro::Shape &Shape, 1785 SmallVectorImpl<Instruction*> &DeadInsts) { 1786 IRBuilder<> Builder(AI); 1787 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr); 1788 1789 for (User *U : AI->users()) { 1790 if (isa<CoroAllocaGetInst>(U)) { 1791 U->replaceAllUsesWith(Alloc); 1792 } else { 1793 auto FI = cast<CoroAllocaFreeInst>(U); 1794 Builder.SetInsertPoint(FI); 1795 Shape.emitDealloc(Builder, Alloc, nullptr); 1796 } 1797 DeadInsts.push_back(cast<Instruction>(U)); 1798 } 1799 1800 // Push this on last so that it gets deleted after all the others. 1801 DeadInsts.push_back(AI); 1802 1803 // Return the new allocation value so that we can check for needed spills. 1804 return cast<Instruction>(Alloc); 1805 } 1806 1807 /// Get the current swifterror value. 1808 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, 1809 coro::Shape &Shape) { 1810 // Make a fake function pointer as a sort of intrinsic. 1811 auto FnTy = FunctionType::get(ValueTy, {}, false); 1812 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 1813 1814 auto Call = Builder.CreateCall(FnTy, Fn, {}); 1815 Shape.SwiftErrorOps.push_back(Call); 1816 1817 return Call; 1818 } 1819 1820 /// Set the given value as the current swifterror value. 1821 /// 1822 /// Returns a slot that can be used as a swifterror slot. 1823 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, 1824 coro::Shape &Shape) { 1825 // Make a fake function pointer as a sort of intrinsic. 1826 auto FnTy = FunctionType::get(V->getType()->getPointerTo(), 1827 {V->getType()}, false); 1828 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 1829 1830 auto Call = Builder.CreateCall(FnTy, Fn, { V }); 1831 Shape.SwiftErrorOps.push_back(Call); 1832 1833 return Call; 1834 } 1835 1836 /// Set the swifterror value from the given alloca before a call, 1837 /// then put in back in the alloca afterwards. 1838 /// 1839 /// Returns an address that will stand in for the swifterror slot 1840 /// until splitting. 1841 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call, 1842 AllocaInst *Alloca, 1843 coro::Shape &Shape) { 1844 auto ValueTy = Alloca->getAllocatedType(); 1845 IRBuilder<> Builder(Call); 1846 1847 // Load the current value from the alloca and set it as the 1848 // swifterror value. 1849 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca); 1850 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape); 1851 1852 // Move to after the call. Since swifterror only has a guaranteed 1853 // value on normal exits, we can ignore implicit and explicit unwind 1854 // edges. 1855 if (isa<CallInst>(Call)) { 1856 Builder.SetInsertPoint(Call->getNextNode()); 1857 } else { 1858 auto Invoke = cast<InvokeInst>(Call); 1859 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg()); 1860 } 1861 1862 // Get the current swifterror value and store it to the alloca. 1863 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape); 1864 Builder.CreateStore(ValueAfterCall, Alloca); 1865 1866 return Addr; 1867 } 1868 1869 /// Eliminate a formerly-swifterror alloca by inserting the get/set 1870 /// intrinsics and attempting to MemToReg the alloca away. 1871 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, 1872 coro::Shape &Shape) { 1873 for (auto UI = Alloca->use_begin(), UE = Alloca->use_end(); UI != UE; ) { 1874 // We're likely changing the use list, so use a mutation-safe 1875 // iteration pattern. 1876 auto &Use = *UI; 1877 ++UI; 1878 1879 // swifterror values can only be used in very specific ways. 1880 // We take advantage of that here. 1881 auto User = Use.getUser(); 1882 if (isa<LoadInst>(User) || isa<StoreInst>(User)) 1883 continue; 1884 1885 assert(isa<CallInst>(User) || isa<InvokeInst>(User)); 1886 auto Call = cast<Instruction>(User); 1887 1888 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape); 1889 1890 // Use the returned slot address as the call argument. 1891 Use.set(Addr); 1892 } 1893 1894 // All the uses should be loads and stores now. 1895 assert(isAllocaPromotable(Alloca)); 1896 } 1897 1898 /// "Eliminate" a swifterror argument by reducing it to the alloca case 1899 /// and then loading and storing in the prologue and epilog. 1900 /// 1901 /// The argument keeps the swifterror flag. 1902 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, 1903 coro::Shape &Shape, 1904 SmallVectorImpl<AllocaInst*> &AllocasToPromote) { 1905 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg()); 1906 1907 auto ArgTy = cast<PointerType>(Arg.getType()); 1908 auto ValueTy = ArgTy->getElementType(); 1909 1910 // Reduce to the alloca case: 1911 1912 // Create an alloca and replace all uses of the arg with it. 1913 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace()); 1914 Arg.replaceAllUsesWith(Alloca); 1915 1916 // Set an initial value in the alloca. swifterror is always null on entry. 1917 auto InitialValue = Constant::getNullValue(ValueTy); 1918 Builder.CreateStore(InitialValue, Alloca); 1919 1920 // Find all the suspends in the function and save and restore around them. 1921 for (auto Suspend : Shape.CoroSuspends) { 1922 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape); 1923 } 1924 1925 // Find all the coro.ends in the function and restore the error value. 1926 for (auto End : Shape.CoroEnds) { 1927 Builder.SetInsertPoint(End); 1928 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca); 1929 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape); 1930 } 1931 1932 // Now we can use the alloca logic. 1933 AllocasToPromote.push_back(Alloca); 1934 eliminateSwiftErrorAlloca(F, Alloca, Shape); 1935 } 1936 1937 /// Eliminate all problematic uses of swifterror arguments and allocas 1938 /// from the function. We'll fix them up later when splitting the function. 1939 static void eliminateSwiftError(Function &F, coro::Shape &Shape) { 1940 SmallVector<AllocaInst*, 4> AllocasToPromote; 1941 1942 // Look for a swifterror argument. 1943 for (auto &Arg : F.args()) { 1944 if (!Arg.hasSwiftErrorAttr()) continue; 1945 1946 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote); 1947 break; 1948 } 1949 1950 // Look for swifterror allocas. 1951 for (auto &Inst : F.getEntryBlock()) { 1952 auto Alloca = dyn_cast<AllocaInst>(&Inst); 1953 if (!Alloca || !Alloca->isSwiftError()) continue; 1954 1955 // Clear the swifterror flag. 1956 Alloca->setSwiftError(false); 1957 1958 AllocasToPromote.push_back(Alloca); 1959 eliminateSwiftErrorAlloca(F, Alloca, Shape); 1960 } 1961 1962 // If we have any allocas to promote, compute a dominator tree and 1963 // promote them en masse. 1964 if (!AllocasToPromote.empty()) { 1965 DominatorTree DT(F); 1966 PromoteMemToReg(AllocasToPromote, DT); 1967 } 1968 } 1969 1970 /// retcon and retcon.once conventions assume that all spill uses can be sunk 1971 /// after the coro.begin intrinsic. 1972 static void sinkSpillUsesAfterCoroBegin(Function &F, 1973 const FrameDataInfo &FrameData, 1974 CoroBeginInst *CoroBegin) { 1975 DominatorTree Dom(F); 1976 1977 SmallSetVector<Instruction *, 32> ToMove; 1978 SmallVector<Instruction *, 32> Worklist; 1979 1980 // Collect all users that precede coro.begin. 1981 for (auto *Def : FrameData.getAllDefs()) { 1982 for (User *U : Def->users()) { 1983 auto Inst = cast<Instruction>(U); 1984 if (Inst->getParent() != CoroBegin->getParent() || 1985 Dom.dominates(CoroBegin, Inst)) 1986 continue; 1987 if (ToMove.insert(Inst)) 1988 Worklist.push_back(Inst); 1989 } 1990 } 1991 // Recursively collect users before coro.begin. 1992 while (!Worklist.empty()) { 1993 auto *Def = Worklist.back(); 1994 Worklist.pop_back(); 1995 for (User *U : Def->users()) { 1996 auto Inst = cast<Instruction>(U); 1997 if (Dom.dominates(CoroBegin, Inst)) 1998 continue; 1999 if (ToMove.insert(Inst)) 2000 Worklist.push_back(Inst); 2001 } 2002 } 2003 2004 // Sort by dominance. 2005 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end()); 2006 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool { 2007 // If a dominates b it should preceed (<) b. 2008 return Dom.dominates(A, B); 2009 }); 2010 2011 Instruction *InsertPt = CoroBegin->getNextNode(); 2012 for (Instruction *Inst : InsertionList) 2013 Inst->moveBefore(InsertPt); 2014 } 2015 2016 /// For each local variable that all of its user are only used inside one of 2017 /// suspended region, we sink their lifetime.start markers to the place where 2018 /// after the suspend block. Doing so minimizes the lifetime of each variable, 2019 /// hence minimizing the amount of data we end up putting on the frame. 2020 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, 2021 SuspendCrossingInfo &Checker) { 2022 DominatorTree DT(F); 2023 2024 // Collect all possible basic blocks which may dominate all uses of allocas. 2025 SmallPtrSet<BasicBlock *, 4> DomSet; 2026 DomSet.insert(&F.getEntryBlock()); 2027 for (auto *CSI : Shape.CoroSuspends) { 2028 BasicBlock *SuspendBlock = CSI->getParent(); 2029 assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() && 2030 "should have split coro.suspend into its own block"); 2031 DomSet.insert(SuspendBlock->getSingleSuccessor()); 2032 } 2033 2034 for (Instruction &I : instructions(F)) { 2035 AllocaInst* AI = dyn_cast<AllocaInst>(&I); 2036 if (!AI) 2037 continue; 2038 2039 for (BasicBlock *DomBB : DomSet) { 2040 bool Valid = true; 2041 SmallVector<Instruction *, 1> Lifetimes; 2042 2043 auto isLifetimeStart = [](Instruction* I) { 2044 if (auto* II = dyn_cast<IntrinsicInst>(I)) 2045 return II->getIntrinsicID() == Intrinsic::lifetime_start; 2046 return false; 2047 }; 2048 2049 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) { 2050 if (isLifetimeStart(U)) { 2051 Lifetimes.push_back(U); 2052 return true; 2053 } 2054 if (!U->hasOneUse() || U->stripPointerCasts() != AI) 2055 return false; 2056 if (isLifetimeStart(U->user_back())) { 2057 Lifetimes.push_back(U->user_back()); 2058 return true; 2059 } 2060 return false; 2061 }; 2062 2063 for (User *U : AI->users()) { 2064 Instruction *UI = cast<Instruction>(U); 2065 // For all users except lifetime.start markers, if they are all 2066 // dominated by one of the basic blocks and do not cross 2067 // suspend points as well, then there is no need to spill the 2068 // instruction. 2069 if (!DT.dominates(DomBB, UI->getParent()) || 2070 Checker.isDefinitionAcrossSuspend(DomBB, UI)) { 2071 // Skip lifetime.start, GEP and bitcast used by lifetime.start 2072 // markers. 2073 if (collectLifetimeStart(UI, AI)) 2074 continue; 2075 Valid = false; 2076 break; 2077 } 2078 } 2079 // Sink lifetime.start markers to dominate block when they are 2080 // only used outside the region. 2081 if (Valid && Lifetimes.size() != 0) { 2082 // May be AI itself, when the type of AI is i8* 2083 auto *NewBitCast = [&](AllocaInst *AI) -> Value* { 2084 if (isa<AllocaInst>(Lifetimes[0]->getOperand(1))) 2085 return AI; 2086 auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext()); 2087 return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "", 2088 DomBB->getTerminator()); 2089 }(AI); 2090 2091 auto *NewLifetime = Lifetimes[0]->clone(); 2092 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast); 2093 NewLifetime->insertBefore(DomBB->getTerminator()); 2094 2095 // All the outsided lifetime.start markers are no longer necessary. 2096 for (Instruction *S : Lifetimes) 2097 S->eraseFromParent(); 2098 2099 break; 2100 } 2101 } 2102 } 2103 } 2104 2105 static void collectFrameAllocas(Function &F, coro::Shape &Shape, 2106 const SuspendCrossingInfo &Checker, 2107 SmallVectorImpl<AllocaInfo> &Allocas) { 2108 // Collect lifetime.start info for each alloca. 2109 using LifetimeStart = SmallPtrSet<Instruction *, 2>; 2110 llvm::DenseMap<AllocaInst *, std::unique_ptr<LifetimeStart>> LifetimeMap; 2111 for (Instruction &I : instructions(F)) { 2112 auto *II = dyn_cast<IntrinsicInst>(&I); 2113 if (!II || II->getIntrinsicID() != Intrinsic::lifetime_start) 2114 continue; 2115 2116 if (auto *OpInst = dyn_cast<Instruction>(II->getOperand(1))) { 2117 if (auto *AI = dyn_cast<AllocaInst>(OpInst->stripPointerCasts())) { 2118 2119 if (LifetimeMap.find(AI) == LifetimeMap.end()) 2120 LifetimeMap[AI] = std::make_unique<LifetimeStart>(); 2121 LifetimeMap[AI]->insert(isa<AllocaInst>(OpInst) ? II : OpInst); 2122 } 2123 } 2124 } 2125 2126 for (Instruction &I : instructions(F)) { 2127 auto *AI = dyn_cast<AllocaInst>(&I); 2128 if (!AI) 2129 continue; 2130 // The PromiseAlloca will be specially handled since it needs to be in a 2131 // fixed position in the frame. 2132 if (AI == Shape.SwitchLowering.PromiseAlloca) { 2133 continue; 2134 } 2135 bool ShouldLiveOnFrame = false; 2136 auto Iter = LifetimeMap.find(AI); 2137 if (Iter != LifetimeMap.end()) { 2138 // Check against lifetime.start if the instruction has the info. 2139 for (User *U : I.users()) { 2140 for (auto *S : *Iter->second) 2141 if ((ShouldLiveOnFrame = Checker.isDefinitionAcrossSuspend(*S, U))) 2142 break; 2143 if (ShouldLiveOnFrame) 2144 break; 2145 } 2146 if (!ShouldLiveOnFrame) 2147 continue; 2148 } 2149 // At this point, either ShouldLiveOnFrame is true or we didn't have 2150 // lifetime information. We will need to rely on more precise pointer 2151 // tracking. 2152 DominatorTree DT(F); 2153 AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT, 2154 *Shape.CoroBegin, Checker}; 2155 Visitor.visitPtr(*AI); 2156 if (!Visitor.getShouldLiveOnFrame()) 2157 continue; 2158 Allocas.emplace_back(AI, Visitor.getAliasesCopy(), 2159 Visitor.getMayWriteBeforeCoroBegin()); 2160 } 2161 } 2162 2163 void coro::buildCoroutineFrame(Function &F, Shape &Shape) { 2164 eliminateSwiftError(F, Shape); 2165 2166 if (Shape.ABI == coro::ABI::Switch && 2167 Shape.SwitchLowering.PromiseAlloca) { 2168 Shape.getSwitchCoroId()->clearPromise(); 2169 } 2170 2171 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end 2172 // intrinsics are in their own blocks to simplify the logic of building up 2173 // SuspendCrossing data. 2174 for (auto *CSI : Shape.CoroSuspends) { 2175 if (auto *Save = CSI->getCoroSave()) 2176 splitAround(Save, "CoroSave"); 2177 splitAround(CSI, "CoroSuspend"); 2178 } 2179 2180 // Put CoroEnds into their own blocks. 2181 for (AnyCoroEndInst *CE : Shape.CoroEnds) { 2182 splitAround(CE, "CoroEnd"); 2183 2184 // Emit the musttail call function in a new block before the CoroEnd. 2185 // We do this here so that the right suspend crossing info is computed for 2186 // the uses of the musttail call function call. (Arguments to the coro.end 2187 // instructions would be ignored) 2188 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) { 2189 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction(); 2190 if (!MustTailCallFn) 2191 continue; 2192 IRBuilder<> Builder(AsyncEnd); 2193 SmallVector<Value *, 8> Args(AsyncEnd->args()); 2194 auto Arguments = ArrayRef<Value *>(Args).drop_front(3); 2195 auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn, 2196 Arguments, Builder); 2197 splitAround(Call, "MustTailCall.Before.CoroEnd"); 2198 } 2199 } 2200 2201 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will 2202 // never has its definition separated from the PHI by the suspend point. 2203 rewritePHIs(F); 2204 2205 // Build suspend crossing info. 2206 SuspendCrossingInfo Checker(F, Shape); 2207 2208 IRBuilder<> Builder(F.getContext()); 2209 FrameDataInfo FrameData; 2210 SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas; 2211 SmallVector<Instruction*, 4> DeadInstructions; 2212 2213 { 2214 SpillInfo Spills; 2215 for (int Repeat = 0; Repeat < 4; ++Repeat) { 2216 // See if there are materializable instructions across suspend points. 2217 for (Instruction &I : instructions(F)) 2218 if (materializable(I)) 2219 for (User *U : I.users()) 2220 if (Checker.isDefinitionAcrossSuspend(I, U)) 2221 Spills[&I].push_back(cast<Instruction>(U)); 2222 2223 if (Spills.empty()) 2224 break; 2225 2226 // Rewrite materializable instructions to be materialized at the use 2227 // point. 2228 LLVM_DEBUG(dumpSpills("Materializations", Spills)); 2229 rewriteMaterializableInstructions(Builder, Spills); 2230 Spills.clear(); 2231 } 2232 } 2233 2234 sinkLifetimeStartMarkers(F, Shape, Checker); 2235 collectFrameAllocas(F, Shape, Checker, FrameData.Allocas); 2236 LLVM_DEBUG(dumpAllocas(FrameData.Allocas)); 2237 2238 // Collect the spills for arguments and other not-materializable values. 2239 for (Argument &A : F.args()) 2240 for (User *U : A.users()) 2241 if (Checker.isDefinitionAcrossSuspend(A, U)) 2242 FrameData.Spills[&A].push_back(cast<Instruction>(U)); 2243 2244 for (Instruction &I : instructions(F)) { 2245 // Values returned from coroutine structure intrinsics should not be part 2246 // of the Coroutine Frame. 2247 if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin) 2248 continue; 2249 2250 // The Coroutine Promise always included into coroutine frame, no need to 2251 // check for suspend crossing. 2252 if (Shape.ABI == coro::ABI::Switch && 2253 Shape.SwitchLowering.PromiseAlloca == &I) 2254 continue; 2255 2256 // Handle alloca.alloc specially here. 2257 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) { 2258 // Check whether the alloca's lifetime is bounded by suspend points. 2259 if (isLocalAlloca(AI)) { 2260 LocalAllocas.push_back(AI); 2261 continue; 2262 } 2263 2264 // If not, do a quick rewrite of the alloca and then add spills of 2265 // the rewritten value. The rewrite doesn't invalidate anything in 2266 // Spills because the other alloca intrinsics have no other operands 2267 // besides AI, and it doesn't invalidate the iteration because we delay 2268 // erasing AI. 2269 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions); 2270 2271 for (User *U : Alloc->users()) { 2272 if (Checker.isDefinitionAcrossSuspend(*Alloc, U)) 2273 FrameData.Spills[Alloc].push_back(cast<Instruction>(U)); 2274 } 2275 continue; 2276 } 2277 2278 // Ignore alloca.get; we process this as part of coro.alloca.alloc. 2279 if (isa<CoroAllocaGetInst>(I)) 2280 continue; 2281 2282 if (isa<AllocaInst>(I)) 2283 continue; 2284 2285 for (User *U : I.users()) 2286 if (Checker.isDefinitionAcrossSuspend(I, U)) { 2287 // We cannot spill a token. 2288 if (I.getType()->isTokenTy()) 2289 report_fatal_error( 2290 "token definition is separated from the use by a suspend point"); 2291 FrameData.Spills[&I].push_back(cast<Instruction>(U)); 2292 } 2293 } 2294 LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills)); 2295 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 2296 Shape.ABI == coro::ABI::Async) 2297 sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin); 2298 Shape.FrameTy = buildFrameType(F, Shape, FrameData); 2299 Shape.FramePtr = insertSpills(FrameData, Shape); 2300 lowerLocalAllocas(LocalAllocas, DeadInstructions); 2301 2302 for (auto I : DeadInstructions) 2303 I->eraseFromParent(); 2304 } 2305