1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This file contains classes used to discover if for a particular value 9 // there from sue to definition that crosses a suspend block. 10 // 11 // Using the information discovered we form a Coroutine Frame structure to 12 // contain those values. All uses of those values are replaced with appropriate 13 // GEP + load from the coroutine frame. At the point of the definition we spill 14 // the value into the coroutine frame. 15 // 16 // TODO: pack values tightly using liveness info. 17 //===----------------------------------------------------------------------===// 18 19 #include "CoroInternal.h" 20 #include "llvm/ADT/BitVector.h" 21 #include "llvm/ADT/SmallString.h" 22 #include "llvm/Analysis/PtrUseVisitor.h" 23 #include "llvm/Analysis/StackLifetime.h" 24 #include "llvm/Config/llvm-config.h" 25 #include "llvm/IR/CFG.h" 26 #include "llvm/IR/DIBuilder.h" 27 #include "llvm/IR/Dominators.h" 28 #include "llvm/IR/IRBuilder.h" 29 #include "llvm/IR/InstIterator.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/MathExtras.h" 33 #include "llvm/Support/OptimizedStructLayout.h" 34 #include "llvm/Support/circular_raw_ostream.h" 35 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 36 #include "llvm/Transforms/Utils/Local.h" 37 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 38 #include <algorithm> 39 40 using namespace llvm; 41 42 // The "coro-suspend-crossing" flag is very noisy. There is another debug type, 43 // "coro-frame", which results in leaner debug spew. 44 #define DEBUG_TYPE "coro-suspend-crossing" 45 46 static cl::opt<bool> EnableReuseStorageInFrame( 47 "reuse-storage-in-coroutine-frame", cl::Hidden, 48 cl::desc( 49 "Enable the optimization which would reuse the storage in the coroutine \ 50 frame for allocas whose liferanges are not overlapped, for testing purposes"), 51 llvm::cl::init(false)); 52 53 enum { SmallVectorThreshold = 32 }; 54 55 // Provides two way mapping between the blocks and numbers. 56 namespace { 57 class BlockToIndexMapping { 58 SmallVector<BasicBlock *, SmallVectorThreshold> V; 59 60 public: 61 size_t size() const { return V.size(); } 62 63 BlockToIndexMapping(Function &F) { 64 for (BasicBlock &BB : F) 65 V.push_back(&BB); 66 llvm::sort(V); 67 } 68 69 size_t blockToIndex(BasicBlock *BB) const { 70 auto *I = llvm::lower_bound(V, BB); 71 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block"); 72 return I - V.begin(); 73 } 74 75 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; } 76 }; 77 } // end anonymous namespace 78 79 // The SuspendCrossingInfo maintains data that allows to answer a question 80 // whether given two BasicBlocks A and B there is a path from A to B that 81 // passes through a suspend point. 82 // 83 // For every basic block 'i' it maintains a BlockData that consists of: 84 // Consumes: a bit vector which contains a set of indices of blocks that can 85 // reach block 'i' 86 // Kills: a bit vector which contains a set of indices of blocks that can 87 // reach block 'i', but one of the path will cross a suspend point 88 // Suspend: a boolean indicating whether block 'i' contains a suspend point. 89 // End: a boolean indicating whether block 'i' contains a coro.end intrinsic. 90 // 91 namespace { 92 struct SuspendCrossingInfo { 93 BlockToIndexMapping Mapping; 94 95 struct BlockData { 96 BitVector Consumes; 97 BitVector Kills; 98 bool Suspend = false; 99 bool End = false; 100 }; 101 SmallVector<BlockData, SmallVectorThreshold> Block; 102 103 iterator_range<succ_iterator> successors(BlockData const &BD) const { 104 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]); 105 return llvm::successors(BB); 106 } 107 108 BlockData &getBlockData(BasicBlock *BB) { 109 return Block[Mapping.blockToIndex(BB)]; 110 } 111 112 void dump() const; 113 void dump(StringRef Label, BitVector const &BV) const; 114 115 SuspendCrossingInfo(Function &F, coro::Shape &Shape); 116 117 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const { 118 size_t const DefIndex = Mapping.blockToIndex(DefBB); 119 size_t const UseIndex = Mapping.blockToIndex(UseBB); 120 121 bool const Result = Block[UseIndex].Kills[DefIndex]; 122 LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName() 123 << " answer is " << Result << "\n"); 124 return Result; 125 } 126 127 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const { 128 auto *I = cast<Instruction>(U); 129 130 // We rewrote PHINodes, so that only the ones with exactly one incoming 131 // value need to be analyzed. 132 if (auto *PN = dyn_cast<PHINode>(I)) 133 if (PN->getNumIncomingValues() > 1) 134 return false; 135 136 BasicBlock *UseBB = I->getParent(); 137 138 // As a special case, treat uses by an llvm.coro.suspend.retcon or an 139 // llvm.coro.suspend.async as if they were uses in the suspend's single 140 // predecessor: the uses conceptually occur before the suspend. 141 if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) { 142 UseBB = UseBB->getSinglePredecessor(); 143 assert(UseBB && "should have split coro.suspend into its own block"); 144 } 145 146 return hasPathCrossingSuspendPoint(DefBB, UseBB); 147 } 148 149 bool isDefinitionAcrossSuspend(Argument &A, User *U) const { 150 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U); 151 } 152 153 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const { 154 auto *DefBB = I.getParent(); 155 156 // As a special case, treat values produced by an llvm.coro.suspend.* 157 // as if they were defined in the single successor: the uses 158 // conceptually occur after the suspend. 159 if (isa<AnyCoroSuspendInst>(I)) { 160 DefBB = DefBB->getSingleSuccessor(); 161 assert(DefBB && "should have split coro.suspend into its own block"); 162 } 163 164 return isDefinitionAcrossSuspend(DefBB, U); 165 } 166 }; 167 } // end anonymous namespace 168 169 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 170 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label, 171 BitVector const &BV) const { 172 dbgs() << Label << ":"; 173 for (size_t I = 0, N = BV.size(); I < N; ++I) 174 if (BV[I]) 175 dbgs() << " " << Mapping.indexToBlock(I)->getName(); 176 dbgs() << "\n"; 177 } 178 179 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const { 180 for (size_t I = 0, N = Block.size(); I < N; ++I) { 181 BasicBlock *const B = Mapping.indexToBlock(I); 182 dbgs() << B->getName() << ":\n"; 183 dump(" Consumes", Block[I].Consumes); 184 dump(" Kills", Block[I].Kills); 185 } 186 dbgs() << "\n"; 187 } 188 #endif 189 190 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape) 191 : Mapping(F) { 192 const size_t N = Mapping.size(); 193 Block.resize(N); 194 195 // Initialize every block so that it consumes itself 196 for (size_t I = 0; I < N; ++I) { 197 auto &B = Block[I]; 198 B.Consumes.resize(N); 199 B.Kills.resize(N); 200 B.Consumes.set(I); 201 } 202 203 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as 204 // the code beyond coro.end is reachable during initial invocation of the 205 // coroutine. 206 for (auto *CE : Shape.CoroEnds) 207 getBlockData(CE->getParent()).End = true; 208 209 // Mark all suspend blocks and indicate that they kill everything they 210 // consume. Note, that crossing coro.save also requires a spill, as any code 211 // between coro.save and coro.suspend may resume the coroutine and all of the 212 // state needs to be saved by that time. 213 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) { 214 BasicBlock *SuspendBlock = BarrierInst->getParent(); 215 auto &B = getBlockData(SuspendBlock); 216 B.Suspend = true; 217 B.Kills |= B.Consumes; 218 }; 219 for (auto *CSI : Shape.CoroSuspends) { 220 markSuspendBlock(CSI); 221 if (auto *Save = CSI->getCoroSave()) 222 markSuspendBlock(Save); 223 } 224 225 // Iterate propagating consumes and kills until they stop changing. 226 int Iteration = 0; 227 (void)Iteration; 228 229 bool Changed; 230 do { 231 LLVM_DEBUG(dbgs() << "iteration " << ++Iteration); 232 LLVM_DEBUG(dbgs() << "==============\n"); 233 234 Changed = false; 235 for (size_t I = 0; I < N; ++I) { 236 auto &B = Block[I]; 237 for (BasicBlock *SI : successors(B)) { 238 239 auto SuccNo = Mapping.blockToIndex(SI); 240 241 // Saved Consumes and Kills bitsets so that it is easy to see 242 // if anything changed after propagation. 243 auto &S = Block[SuccNo]; 244 auto SavedConsumes = S.Consumes; 245 auto SavedKills = S.Kills; 246 247 // Propagate Kills and Consumes from block B into its successor S. 248 S.Consumes |= B.Consumes; 249 S.Kills |= B.Kills; 250 251 // If block B is a suspend block, it should propagate kills into the 252 // its successor for every block B consumes. 253 if (B.Suspend) { 254 S.Kills |= B.Consumes; 255 } 256 if (S.Suspend) { 257 // If block S is a suspend block, it should kill all of the blocks it 258 // consumes. 259 S.Kills |= S.Consumes; 260 } else if (S.End) { 261 // If block S is an end block, it should not propagate kills as the 262 // blocks following coro.end() are reached during initial invocation 263 // of the coroutine while all the data are still available on the 264 // stack or in the registers. 265 S.Kills.reset(); 266 } else { 267 // This is reached when S block it not Suspend nor coro.end and it 268 // need to make sure that it is not in the kill set. 269 S.Kills.reset(SuccNo); 270 } 271 272 // See if anything changed. 273 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes); 274 275 if (S.Kills != SavedKills) { 276 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName() 277 << "\n"); 278 LLVM_DEBUG(dump("S.Kills", S.Kills)); 279 LLVM_DEBUG(dump("SavedKills", SavedKills)); 280 } 281 if (S.Consumes != SavedConsumes) { 282 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n"); 283 LLVM_DEBUG(dump("S.Consume", S.Consumes)); 284 LLVM_DEBUG(dump("SavedCons", SavedConsumes)); 285 } 286 } 287 } 288 } while (Changed); 289 LLVM_DEBUG(dump()); 290 } 291 292 #undef DEBUG_TYPE // "coro-suspend-crossing" 293 #define DEBUG_TYPE "coro-frame" 294 295 namespace { 296 class FrameTypeBuilder; 297 // Mapping from the to-be-spilled value to all the users that need reload. 298 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>; 299 struct AllocaInfo { 300 AllocaInst *Alloca; 301 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases; 302 bool MayWriteBeforeCoroBegin; 303 AllocaInfo(AllocaInst *Alloca, 304 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases, 305 bool MayWriteBeforeCoroBegin) 306 : Alloca(Alloca), Aliases(std::move(Aliases)), 307 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {} 308 }; 309 struct FrameDataInfo { 310 // All the values (that are not allocas) that needs to be spilled to the 311 // frame. 312 SpillInfo Spills; 313 // Allocas contains all values defined as allocas that need to live in the 314 // frame. 315 SmallVector<AllocaInfo, 8> Allocas; 316 317 SmallVector<Value *, 8> getAllDefs() const { 318 SmallVector<Value *, 8> Defs; 319 for (const auto &P : Spills) 320 Defs.push_back(P.first); 321 for (const auto &A : Allocas) 322 Defs.push_back(A.Alloca); 323 return Defs; 324 } 325 326 uint32_t getFieldIndex(Value *V) const { 327 auto Itr = FieldIndexMap.find(V); 328 assert(Itr != FieldIndexMap.end() && 329 "Value does not have a frame field index"); 330 return Itr->second; 331 } 332 333 void setFieldIndex(Value *V, uint32_t Index) { 334 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) && 335 "Cannot set the index for the same field twice."); 336 FieldIndexMap[V] = Index; 337 } 338 339 // Remap the index of every field in the frame, using the final layout index. 340 void updateLayoutIndex(FrameTypeBuilder &B); 341 342 private: 343 // LayoutIndexUpdateStarted is used to avoid updating the index of any field 344 // twice by mistake. 345 bool LayoutIndexUpdateStarted = false; 346 // Map from values to their slot indexes on the frame. They will be first set 347 // with their original insertion field index. After the frame is built, their 348 // indexes will be updated into the final layout index. 349 DenseMap<Value *, uint32_t> FieldIndexMap; 350 }; 351 } // namespace 352 353 #ifndef NDEBUG 354 static void dumpSpills(StringRef Title, const SpillInfo &Spills) { 355 dbgs() << "------------- " << Title << "--------------\n"; 356 for (const auto &E : Spills) { 357 E.first->dump(); 358 dbgs() << " user: "; 359 for (auto *I : E.second) 360 I->dump(); 361 } 362 } 363 364 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) { 365 dbgs() << "------------- Allocas --------------\n"; 366 for (const auto &A : Allocas) { 367 A.Alloca->dump(); 368 } 369 } 370 #endif 371 372 namespace { 373 using FieldIDType = size_t; 374 // We cannot rely solely on natural alignment of a type when building a 375 // coroutine frame and if the alignment specified on the Alloca instruction 376 // differs from the natural alignment of the alloca type we will need to insert 377 // padding. 378 class FrameTypeBuilder { 379 private: 380 struct Field { 381 uint64_t Size; 382 uint64_t Offset; 383 Type *Ty; 384 FieldIDType LayoutFieldIndex; 385 Align Alignment; 386 Align TyAlignment; 387 }; 388 389 const DataLayout &DL; 390 LLVMContext &Context; 391 uint64_t StructSize = 0; 392 Align StructAlign; 393 bool IsFinished = false; 394 395 SmallVector<Field, 8> Fields; 396 DenseMap<Value*, unsigned> FieldIndexByKey; 397 398 public: 399 FrameTypeBuilder(LLVMContext &Context, DataLayout const &DL) 400 : DL(DL), Context(Context) {} 401 402 /// Add a field to this structure for the storage of an `alloca` 403 /// instruction. 404 LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI, 405 bool IsHeader = false) { 406 Type *Ty = AI->getAllocatedType(); 407 408 // Make an array type if this is a static array allocation. 409 if (AI->isArrayAllocation()) { 410 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 411 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue()); 412 else 413 report_fatal_error("Coroutines cannot handle non static allocas yet"); 414 } 415 416 return addField(Ty, AI->getAlign(), IsHeader); 417 } 418 419 /// We want to put the allocas whose lifetime-ranges are not overlapped 420 /// into one slot of coroutine frame. 421 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566 422 /// 423 /// cppcoro::task<void> alternative_paths(bool cond) { 424 /// if (cond) { 425 /// big_structure a; 426 /// process(a); 427 /// co_await something(); 428 /// } else { 429 /// big_structure b; 430 /// process2(b); 431 /// co_await something(); 432 /// } 433 /// } 434 /// 435 /// We want to put variable a and variable b in the same slot to 436 /// reduce the size of coroutine frame. 437 /// 438 /// This function use StackLifetime algorithm to partition the AllocaInsts in 439 /// Spills to non-overlapped sets in order to put Alloca in the same 440 /// non-overlapped set into the same slot in the Coroutine Frame. Then add 441 /// field for the allocas in the same non-overlapped set by using the largest 442 /// type as the field type. 443 /// 444 /// Side Effects: Because We sort the allocas, the order of allocas in the 445 /// frame may be different with the order in the source code. 446 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData, 447 coro::Shape &Shape); 448 449 /// Add a field to this structure. 450 LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment, 451 bool IsHeader = false) { 452 assert(!IsFinished && "adding fields to a finished builder"); 453 assert(Ty && "must provide a type for a field"); 454 455 // The field size is always the alloc size of the type. 456 uint64_t FieldSize = DL.getTypeAllocSize(Ty); 457 458 // The field alignment might not be the type alignment, but we need 459 // to remember the type alignment anyway to build the type. 460 Align TyAlignment = DL.getABITypeAlign(Ty); 461 if (!FieldAlignment) FieldAlignment = TyAlignment; 462 463 // Lay out header fields immediately. 464 uint64_t Offset; 465 if (IsHeader) { 466 Offset = alignTo(StructSize, FieldAlignment); 467 StructSize = Offset + FieldSize; 468 469 // Everything else has a flexible offset. 470 } else { 471 Offset = OptimizedStructLayoutField::FlexibleOffset; 472 } 473 474 Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment}); 475 return Fields.size() - 1; 476 } 477 478 /// Finish the layout and set the body on the given type. 479 void finish(StructType *Ty); 480 481 uint64_t getStructSize() const { 482 assert(IsFinished && "not yet finished!"); 483 return StructSize; 484 } 485 486 Align getStructAlign() const { 487 assert(IsFinished && "not yet finished!"); 488 return StructAlign; 489 } 490 491 FieldIDType getLayoutFieldIndex(FieldIDType Id) const { 492 assert(IsFinished && "not yet finished!"); 493 return Fields[Id].LayoutFieldIndex; 494 } 495 }; 496 } // namespace 497 498 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) { 499 auto Updater = [&](Value *I) { 500 setFieldIndex(I, B.getLayoutFieldIndex(getFieldIndex(I))); 501 }; 502 LayoutIndexUpdateStarted = true; 503 for (auto &S : Spills) 504 Updater(S.first); 505 for (const auto &A : Allocas) 506 Updater(A.Alloca); 507 LayoutIndexUpdateStarted = false; 508 } 509 510 void FrameTypeBuilder::addFieldForAllocas(const Function &F, 511 FrameDataInfo &FrameData, 512 coro::Shape &Shape) { 513 DenseMap<AllocaInst *, unsigned int> AllocaIndex; 514 using AllocaSetType = SmallVector<AllocaInst *, 4>; 515 SmallVector<AllocaSetType, 4> NonOverlapedAllocas; 516 517 // We need to add field for allocas at the end of this function. However, this 518 // function has multiple exits, so we use this helper to avoid redundant code. 519 struct RTTIHelper { 520 std::function<void()> func; 521 RTTIHelper(std::function<void()> &&func) : func(func) {} 522 ~RTTIHelper() { func(); } 523 } Helper([&]() { 524 for (auto AllocaList : NonOverlapedAllocas) { 525 auto *LargestAI = *AllocaList.begin(); 526 FieldIDType Id = addFieldForAlloca(LargestAI); 527 for (auto *Alloca : AllocaList) 528 FrameData.setFieldIndex(Alloca, Id); 529 } 530 }); 531 532 if (!Shape.ReuseFrameSlot && !EnableReuseStorageInFrame) { 533 for (const auto &A : FrameData.Allocas) { 534 AllocaInst *Alloca = A.Alloca; 535 AllocaIndex[Alloca] = NonOverlapedAllocas.size(); 536 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 537 } 538 return; 539 } 540 541 // Because there are pathes from the lifetime.start to coro.end 542 // for each alloca, the liferanges for every alloca is overlaped 543 // in the blocks who contain coro.end and the successor blocks. 544 // So we choose to skip there blocks when we calculates the liferange 545 // for each alloca. It should be reasonable since there shouldn't be uses 546 // in these blocks and the coroutine frame shouldn't be used outside the 547 // coroutine body. 548 // 549 // Note that the user of coro.suspend may not be SwitchInst. However, this 550 // case seems too complex to handle. And it is harmless to skip these 551 // patterns since it just prevend putting the allocas to live in the same 552 // slot. 553 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest; 554 for (auto CoroSuspendInst : Shape.CoroSuspends) { 555 for (auto U : CoroSuspendInst->users()) { 556 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) { 557 auto *SWI = const_cast<SwitchInst *>(ConstSWI); 558 DefaultSuspendDest[SWI] = SWI->getDefaultDest(); 559 SWI->setDefaultDest(SWI->getSuccessor(1)); 560 } 561 } 562 } 563 564 auto ExtractAllocas = [&]() { 565 AllocaSetType Allocas; 566 Allocas.reserve(FrameData.Allocas.size()); 567 for (const auto &A : FrameData.Allocas) 568 Allocas.push_back(A.Alloca); 569 return Allocas; 570 }; 571 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(), 572 StackLifetime::LivenessType::May); 573 StackLifetimeAnalyzer.run(); 574 auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) { 575 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps( 576 StackLifetimeAnalyzer.getLiveRange(AI2)); 577 }; 578 auto GetAllocaSize = [&](const AllocaInfo &A) { 579 Optional<uint64_t> RetSize = A.Alloca->getAllocationSizeInBits(DL); 580 assert(RetSize && "We can't handle scalable type now.\n"); 581 return RetSize.getValue(); 582 }; 583 // Put larger allocas in the front. So the larger allocas have higher 584 // priority to merge, which can save more space potentially. Also each 585 // AllocaSet would be ordered. So we can get the largest Alloca in one 586 // AllocaSet easily. 587 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) { 588 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2); 589 }); 590 for (const auto &A : FrameData.Allocas) { 591 AllocaInst *Alloca = A.Alloca; 592 bool Merged = false; 593 // Try to find if the Alloca is not inferenced with any existing 594 // NonOverlappedAllocaSet. If it is true, insert the alloca to that 595 // NonOverlappedAllocaSet. 596 for (auto &AllocaSet : NonOverlapedAllocas) { 597 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n"); 598 bool CouldMerge = none_of(AllocaSet, [&](auto Iter) { 599 return IsAllocaInferenre(Alloca, Iter); 600 }); 601 if (!CouldMerge) 602 continue; 603 AllocaIndex[Alloca] = AllocaIndex[*AllocaSet.begin()]; 604 AllocaSet.push_back(Alloca); 605 Merged = true; 606 break; 607 } 608 if (!Merged) { 609 AllocaIndex[Alloca] = NonOverlapedAllocas.size(); 610 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 611 } 612 } 613 // Recover the default target destination for each Switch statement 614 // reserved. 615 for (auto SwitchAndDefaultDest : DefaultSuspendDest) { 616 SwitchInst *SWI = SwitchAndDefaultDest.first; 617 BasicBlock *DestBB = SwitchAndDefaultDest.second; 618 SWI->setDefaultDest(DestBB); 619 } 620 // This Debug Info could tell us which allocas are merged into one slot. 621 LLVM_DEBUG(for (auto &AllocaSet 622 : NonOverlapedAllocas) { 623 if (AllocaSet.size() > 1) { 624 dbgs() << "In Function:" << F.getName() << "\n"; 625 dbgs() << "Find Union Set " 626 << "\n"; 627 dbgs() << "\tAllocas are \n"; 628 for (auto Alloca : AllocaSet) 629 dbgs() << "\t\t" << *Alloca << "\n"; 630 } 631 }); 632 } 633 634 void FrameTypeBuilder::finish(StructType *Ty) { 635 assert(!IsFinished && "already finished!"); 636 637 // Prepare the optimal-layout field array. 638 // The Id in the layout field is a pointer to our Field for it. 639 SmallVector<OptimizedStructLayoutField, 8> LayoutFields; 640 LayoutFields.reserve(Fields.size()); 641 for (auto &Field : Fields) { 642 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment, 643 Field.Offset); 644 } 645 646 // Perform layout. 647 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields); 648 StructSize = SizeAndAlign.first; 649 StructAlign = SizeAndAlign.second; 650 651 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & { 652 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id)); 653 }; 654 655 // We need to produce a packed struct type if there's a field whose 656 // assigned offset isn't a multiple of its natural type alignment. 657 bool Packed = [&] { 658 for (auto &LayoutField : LayoutFields) { 659 auto &F = getField(LayoutField); 660 if (!isAligned(F.TyAlignment, LayoutField.Offset)) 661 return true; 662 } 663 return false; 664 }(); 665 666 // Build the struct body. 667 SmallVector<Type*, 16> FieldTypes; 668 FieldTypes.reserve(LayoutFields.size() * 3 / 2); 669 uint64_t LastOffset = 0; 670 for (auto &LayoutField : LayoutFields) { 671 auto &F = getField(LayoutField); 672 673 auto Offset = LayoutField.Offset; 674 675 // Add a padding field if there's a padding gap and we're either 676 // building a packed struct or the padding gap is more than we'd 677 // get from aligning to the field type's natural alignment. 678 assert(Offset >= LastOffset); 679 if (Offset != LastOffset) { 680 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset) 681 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context), 682 Offset - LastOffset)); 683 } 684 685 F.Offset = Offset; 686 F.LayoutFieldIndex = FieldTypes.size(); 687 688 FieldTypes.push_back(F.Ty); 689 LastOffset = Offset + F.Size; 690 } 691 692 Ty->setBody(FieldTypes, Packed); 693 694 #ifndef NDEBUG 695 // Check that the IR layout matches the offsets we expect. 696 auto Layout = DL.getStructLayout(Ty); 697 for (auto &F : Fields) { 698 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty); 699 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset); 700 } 701 #endif 702 703 IsFinished = true; 704 } 705 706 // Build a struct that will keep state for an active coroutine. 707 // struct f.frame { 708 // ResumeFnTy ResumeFnAddr; 709 // ResumeFnTy DestroyFnAddr; 710 // int ResumeIndex; 711 // ... promise (if present) ... 712 // ... spills ... 713 // }; 714 static StructType *buildFrameType(Function &F, coro::Shape &Shape, 715 FrameDataInfo &FrameData) { 716 LLVMContext &C = F.getContext(); 717 const DataLayout &DL = F.getParent()->getDataLayout(); 718 StructType *FrameTy = [&] { 719 SmallString<32> Name(F.getName()); 720 Name.append(".Frame"); 721 return StructType::create(C, Name); 722 }(); 723 724 FrameTypeBuilder B(C, DL); 725 726 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca(); 727 Optional<FieldIDType> SwitchIndexFieldId; 728 729 if (Shape.ABI == coro::ABI::Switch) { 730 auto *FramePtrTy = FrameTy->getPointerTo(); 731 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy, 732 /*IsVarArg=*/false); 733 auto *FnPtrTy = FnTy->getPointerTo(); 734 735 // Add header fields for the resume and destroy functions. 736 // We can rely on these being perfectly packed. 737 (void)B.addField(FnPtrTy, None, /*header*/ true); 738 (void)B.addField(FnPtrTy, None, /*header*/ true); 739 740 // PromiseAlloca field needs to be explicitly added here because it's 741 // a header field with a fixed offset based on its alignment. Hence it 742 // needs special handling and cannot be added to FrameData.Allocas. 743 if (PromiseAlloca) 744 FrameData.setFieldIndex( 745 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true)); 746 747 // Add a field to store the suspend index. This doesn't need to 748 // be in the header. 749 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size())); 750 Type *IndexType = Type::getIntNTy(C, IndexBits); 751 752 SwitchIndexFieldId = B.addField(IndexType, None); 753 } else { 754 assert(PromiseAlloca == nullptr && "lowering doesn't support promises"); 755 } 756 757 // Because multiple allocas may own the same field slot, 758 // we add allocas to field here. 759 B.addFieldForAllocas(F, FrameData, Shape); 760 // Create an entry for every spilled value. 761 for (auto &S : FrameData.Spills) { 762 FieldIDType Id = B.addField(S.first->getType(), None); 763 FrameData.setFieldIndex(S.first, Id); 764 } 765 766 B.finish(FrameTy); 767 FrameData.updateLayoutIndex(B); 768 Shape.FrameAlign = B.getStructAlign(); 769 Shape.FrameSize = B.getStructSize(); 770 771 switch (Shape.ABI) { 772 case coro::ABI::Switch: 773 // In the switch ABI, remember the switch-index field. 774 Shape.SwitchLowering.IndexField = 775 B.getLayoutFieldIndex(*SwitchIndexFieldId); 776 777 // Also round the frame size up to a multiple of its alignment, as is 778 // generally expected in C/C++. 779 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign); 780 break; 781 782 // In the retcon ABI, remember whether the frame is inline in the storage. 783 case coro::ABI::Retcon: 784 case coro::ABI::RetconOnce: { 785 auto Id = Shape.getRetconCoroId(); 786 Shape.RetconLowering.IsFrameInlineInStorage 787 = (B.getStructSize() <= Id->getStorageSize() && 788 B.getStructAlign() <= Id->getStorageAlignment()); 789 break; 790 } 791 case coro::ABI::Async: { 792 Shape.AsyncLowering.FrameOffset = 793 alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign); 794 // Also make the final context size a multiple of the context alignment to 795 // make allocation easier for allocators. 796 Shape.AsyncLowering.ContextSize = 797 alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize, 798 Shape.AsyncLowering.getContextAlignment()); 799 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) { 800 report_fatal_error( 801 "The alignment requirment of frame variables cannot be higher than " 802 "the alignment of the async function context"); 803 } 804 break; 805 } 806 } 807 808 return FrameTy; 809 } 810 811 // We use a pointer use visitor to track how an alloca is being used. 812 // The goal is to be able to answer the following three questions: 813 // 1. Should this alloca be allocated on the frame instead. 814 // 2. Could the content of the alloca be modified prior to CoroBegn, which would 815 // require copying the data from alloca to the frame after CoroBegin. 816 // 3. Is there any alias created for this alloca prior to CoroBegin, but used 817 // after CoroBegin. In that case, we will need to recreate the alias after 818 // CoroBegin based off the frame. To answer question 1, we track two things: 819 // a. List of all BasicBlocks that use this alloca or any of the aliases of 820 // the alloca. In the end, we check if there exists any two basic blocks that 821 // cross suspension points. If so, this alloca must be put on the frame. b. 822 // Whether the alloca or any alias of the alloca is escaped at some point, 823 // either by storing the address somewhere, or the address is used in a 824 // function call that might capture. If it's ever escaped, this alloca must be 825 // put on the frame conservatively. 826 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin. 827 // Whenever a potential write happens, either through a store instruction, a 828 // function call or any of the memory intrinsics, we check whether this 829 // instruction is prior to CoroBegin. To answer question 3, we track the offsets 830 // of all aliases created for the alloca prior to CoroBegin but used after 831 // CoroBegin. llvm::Optional is used to be able to represent the case when the 832 // offset is unknown (e.g. when you have a PHINode that takes in different 833 // offset values). We cannot handle unknown offsets and will assert. This is the 834 // potential issue left out. An ideal solution would likely require a 835 // significant redesign. 836 namespace { 837 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> { 838 using Base = PtrUseVisitor<AllocaUseVisitor>; 839 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT, 840 const CoroBeginInst &CB, const SuspendCrossingInfo &Checker) 841 : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker) {} 842 843 void visit(Instruction &I) { 844 UserBBs.insert(I.getParent()); 845 Base::visit(I); 846 // If the pointer is escaped prior to CoroBegin, we have to assume it would 847 // be written into before CoroBegin as well. 848 if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) { 849 MayWriteBeforeCoroBegin = true; 850 } 851 } 852 // We need to provide this overload as PtrUseVisitor uses a pointer based 853 // visiting function. 854 void visit(Instruction *I) { return visit(*I); } 855 856 void visitPHINode(PHINode &I) { 857 enqueueUsers(I); 858 handleAlias(I); 859 } 860 861 void visitSelectInst(SelectInst &I) { 862 enqueueUsers(I); 863 handleAlias(I); 864 } 865 866 void visitStoreInst(StoreInst &SI) { 867 // Regardless whether the alias of the alloca is the value operand or the 868 // pointer operand, we need to assume the alloca is been written. 869 handleMayWrite(SI); 870 871 if (SI.getValueOperand() != U->get()) 872 return; 873 874 // We are storing the pointer into a memory location, potentially escaping. 875 // As an optimization, we try to detect simple cases where it doesn't 876 // actually escape, for example: 877 // %ptr = alloca .. 878 // %addr = alloca .. 879 // store %ptr, %addr 880 // %x = load %addr 881 // .. 882 // If %addr is only used by loading from it, we could simply treat %x as 883 // another alias of %ptr, and not considering %ptr being escaped. 884 auto IsSimpleStoreThenLoad = [&]() { 885 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand()); 886 // If the memory location we are storing to is not an alloca, it 887 // could be an alias of some other memory locations, which is difficult 888 // to analyze. 889 if (!AI) 890 return false; 891 // StoreAliases contains aliases of the memory location stored into. 892 SmallVector<Instruction *, 4> StoreAliases = {AI}; 893 while (!StoreAliases.empty()) { 894 Instruction *I = StoreAliases.back(); 895 StoreAliases.pop_back(); 896 for (User *U : I->users()) { 897 // If we are loading from the memory location, we are creating an 898 // alias of the original pointer. 899 if (auto *LI = dyn_cast<LoadInst>(U)) { 900 enqueueUsers(*LI); 901 handleAlias(*LI); 902 continue; 903 } 904 // If we are overriding the memory location, the pointer certainly 905 // won't escape. 906 if (auto *S = dyn_cast<StoreInst>(U)) 907 if (S->getPointerOperand() == I) 908 continue; 909 if (auto *II = dyn_cast<IntrinsicInst>(U)) 910 if (II->isLifetimeStartOrEnd()) 911 continue; 912 // BitCastInst creats aliases of the memory location being stored 913 // into. 914 if (auto *BI = dyn_cast<BitCastInst>(U)) { 915 StoreAliases.push_back(BI); 916 continue; 917 } 918 return false; 919 } 920 } 921 922 return true; 923 }; 924 925 if (!IsSimpleStoreThenLoad()) 926 PI.setEscaped(&SI); 927 } 928 929 // All mem intrinsics modify the data. 930 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); } 931 932 void visitBitCastInst(BitCastInst &BC) { 933 Base::visitBitCastInst(BC); 934 handleAlias(BC); 935 } 936 937 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 938 Base::visitAddrSpaceCastInst(ASC); 939 handleAlias(ASC); 940 } 941 942 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 943 // The base visitor will adjust Offset accordingly. 944 Base::visitGetElementPtrInst(GEPI); 945 handleAlias(GEPI); 946 } 947 948 void visitCallBase(CallBase &CB) { 949 for (unsigned Op = 0, OpCount = CB.getNumArgOperands(); Op < OpCount; ++Op) 950 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op)) 951 PI.setEscaped(&CB); 952 handleMayWrite(CB); 953 } 954 955 bool getShouldLiveOnFrame() const { 956 if (!ShouldLiveOnFrame) 957 ShouldLiveOnFrame = computeShouldLiveOnFrame(); 958 return ShouldLiveOnFrame.getValue(); 959 } 960 961 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; } 962 963 DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const { 964 assert(getShouldLiveOnFrame() && "This method should only be called if the " 965 "alloca needs to live on the frame."); 966 for (const auto &P : AliasOffetMap) 967 if (!P.second) 968 report_fatal_error("Unable to handle an alias with unknown offset " 969 "created before CoroBegin."); 970 return AliasOffetMap; 971 } 972 973 private: 974 const DominatorTree &DT; 975 const CoroBeginInst &CoroBegin; 976 const SuspendCrossingInfo &Checker; 977 // All alias to the original AllocaInst, created before CoroBegin and used 978 // after CoroBegin. Each entry contains the instruction and the offset in the 979 // original Alloca. They need to be recreated after CoroBegin off the frame. 980 DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{}; 981 SmallPtrSet<BasicBlock *, 2> UserBBs{}; 982 bool MayWriteBeforeCoroBegin{false}; 983 984 mutable llvm::Optional<bool> ShouldLiveOnFrame{}; 985 986 bool computeShouldLiveOnFrame() const { 987 if (PI.isEscaped()) 988 return true; 989 990 for (auto *BB1 : UserBBs) 991 for (auto *BB2 : UserBBs) 992 if (Checker.hasPathCrossingSuspendPoint(BB1, BB2)) 993 return true; 994 995 return false; 996 } 997 998 void handleMayWrite(const Instruction &I) { 999 if (!DT.dominates(&CoroBegin, &I)) 1000 MayWriteBeforeCoroBegin = true; 1001 } 1002 1003 bool usedAfterCoroBegin(Instruction &I) { 1004 for (auto &U : I.uses()) 1005 if (DT.dominates(&CoroBegin, U)) 1006 return true; 1007 return false; 1008 } 1009 1010 void handleAlias(Instruction &I) { 1011 // We track all aliases created prior to CoroBegin but used after. 1012 // These aliases may need to be recreated after CoroBegin if the alloca 1013 // need to live on the frame. 1014 if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I)) 1015 return; 1016 1017 if (!IsOffsetKnown) { 1018 AliasOffetMap[&I].reset(); 1019 } else { 1020 auto Itr = AliasOffetMap.find(&I); 1021 if (Itr == AliasOffetMap.end()) { 1022 AliasOffetMap[&I] = Offset; 1023 } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) { 1024 // If we have seen two different possible values for this alias, we set 1025 // it to empty. 1026 AliasOffetMap[&I].reset(); 1027 } 1028 } 1029 } 1030 }; 1031 } // namespace 1032 1033 // We need to make room to insert a spill after initial PHIs, but before 1034 // catchswitch instruction. Placing it before violates the requirement that 1035 // catchswitch, like all other EHPads must be the first nonPHI in a block. 1036 // 1037 // Split away catchswitch into a separate block and insert in its place: 1038 // 1039 // cleanuppad <InsertPt> cleanupret. 1040 // 1041 // cleanupret instruction will act as an insert point for the spill. 1042 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) { 1043 BasicBlock *CurrentBlock = CatchSwitch->getParent(); 1044 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch); 1045 CurrentBlock->getTerminator()->eraseFromParent(); 1046 1047 auto *CleanupPad = 1048 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock); 1049 auto *CleanupRet = 1050 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock); 1051 return CleanupRet; 1052 } 1053 1054 // Replace all alloca and SSA values that are accessed across suspend points 1055 // with GetElementPointer from coroutine frame + loads and stores. Create an 1056 // AllocaSpillBB that will become the new entry block for the resume parts of 1057 // the coroutine: 1058 // 1059 // %hdl = coro.begin(...) 1060 // whatever 1061 // 1062 // becomes: 1063 // 1064 // %hdl = coro.begin(...) 1065 // %FramePtr = bitcast i8* hdl to %f.frame* 1066 // br label %AllocaSpillBB 1067 // 1068 // AllocaSpillBB: 1069 // ; geps corresponding to allocas that were moved to coroutine frame 1070 // br label PostSpill 1071 // 1072 // PostSpill: 1073 // whatever 1074 // 1075 // 1076 static Instruction *insertSpills(const FrameDataInfo &FrameData, 1077 coro::Shape &Shape) { 1078 auto *CB = Shape.CoroBegin; 1079 LLVMContext &C = CB->getContext(); 1080 IRBuilder<> Builder(CB->getNextNode()); 1081 StructType *FrameTy = Shape.FrameTy; 1082 PointerType *FramePtrTy = FrameTy->getPointerTo(); 1083 auto *FramePtr = 1084 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr")); 1085 DominatorTree DT(*CB->getFunction()); 1086 1087 // Create a GEP with the given index into the coroutine frame for the original 1088 // value Orig. Appends an extra 0 index for array-allocas, preserving the 1089 // original type. 1090 auto GetFramePointer = [&](Value *Orig) -> Value * { 1091 FieldIDType Index = FrameData.getFieldIndex(Orig); 1092 SmallVector<Value *, 3> Indices = { 1093 ConstantInt::get(Type::getInt32Ty(C), 0), 1094 ConstantInt::get(Type::getInt32Ty(C), Index), 1095 }; 1096 1097 if (auto *AI = dyn_cast<AllocaInst>(Orig)) { 1098 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) { 1099 auto Count = CI->getValue().getZExtValue(); 1100 if (Count > 1) { 1101 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0)); 1102 } 1103 } else { 1104 report_fatal_error("Coroutines cannot handle non static allocas yet"); 1105 } 1106 } 1107 1108 auto GEP = cast<GetElementPtrInst>( 1109 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices)); 1110 if (isa<AllocaInst>(Orig)) { 1111 // If the type of GEP is not equal to the type of AllocaInst, it implies 1112 // that the AllocaInst may be reused in the Frame slot of other 1113 // AllocaInst. So we cast the GEP to the type of AllocaInst. 1114 if (GEP->getResultElementType() != Orig->getType()) 1115 return Builder.CreateBitCast(GEP, Orig->getType(), 1116 Orig->getName() + Twine(".cast")); 1117 } 1118 return GEP; 1119 }; 1120 1121 for (auto const &E : FrameData.Spills) { 1122 Value *Def = E.first; 1123 // Create a store instruction storing the value into the 1124 // coroutine frame. 1125 Instruction *InsertPt = nullptr; 1126 if (auto *Arg = dyn_cast<Argument>(Def)) { 1127 // For arguments, we will place the store instruction right after 1128 // the coroutine frame pointer instruction, i.e. bitcast of 1129 // coro.begin from i8* to %f.frame*. 1130 InsertPt = FramePtr->getNextNode(); 1131 1132 // If we're spilling an Argument, make sure we clear 'nocapture' 1133 // from the coroutine function. 1134 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture); 1135 1136 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) { 1137 // Don't spill immediately after a suspend; splitting assumes 1138 // that the suspend will be followed by a branch. 1139 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI(); 1140 } else { 1141 auto *I = cast<Instruction>(Def); 1142 if (!DT.dominates(CB, I)) { 1143 // If it is not dominated by CoroBegin, then spill should be 1144 // inserted immediately after CoroFrame is computed. 1145 InsertPt = FramePtr->getNextNode(); 1146 } else if (auto *II = dyn_cast<InvokeInst>(I)) { 1147 // If we are spilling the result of the invoke instruction, split 1148 // the normal edge and insert the spill in the new block. 1149 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest()); 1150 InsertPt = NewBB->getTerminator(); 1151 } else if (isa<PHINode>(I)) { 1152 // Skip the PHINodes and EH pads instructions. 1153 BasicBlock *DefBlock = I->getParent(); 1154 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator())) 1155 InsertPt = splitBeforeCatchSwitch(CSI); 1156 else 1157 InsertPt = &*DefBlock->getFirstInsertionPt(); 1158 } else { 1159 assert(!I->isTerminator() && "unexpected terminator"); 1160 // For all other values, the spill is placed immediately after 1161 // the definition. 1162 InsertPt = I->getNextNode(); 1163 } 1164 } 1165 1166 auto Index = FrameData.getFieldIndex(Def); 1167 Builder.SetInsertPoint(InsertPt); 1168 auto *G = Builder.CreateConstInBoundsGEP2_32( 1169 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr")); 1170 Builder.CreateStore(Def, G); 1171 1172 BasicBlock *CurrentBlock = nullptr; 1173 Value *CurrentReload = nullptr; 1174 for (auto *U : E.second) { 1175 // If we have not seen the use block, create a load instruction to reload 1176 // the spilled value from the coroutine frame. Populates the Value pointer 1177 // reference provided with the frame GEP. 1178 if (CurrentBlock != U->getParent()) { 1179 CurrentBlock = U->getParent(); 1180 Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt()); 1181 1182 auto *GEP = GetFramePointer(E.first); 1183 GEP->setName(E.first->getName() + Twine(".reload.addr")); 1184 CurrentReload = Builder.CreateLoad( 1185 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP, 1186 E.first->getName() + Twine(".reload")); 1187 } 1188 1189 // If we have a single edge PHINode, remove it and replace it with a 1190 // reload from the coroutine frame. (We already took care of multi edge 1191 // PHINodes by rewriting them in the rewritePHIs function). 1192 if (auto *PN = dyn_cast<PHINode>(U)) { 1193 assert(PN->getNumIncomingValues() == 1 && 1194 "unexpected number of incoming " 1195 "values in the PHINode"); 1196 PN->replaceAllUsesWith(CurrentReload); 1197 PN->eraseFromParent(); 1198 continue; 1199 } 1200 1201 // Replace all uses of CurrentValue in the current instruction with 1202 // reload. 1203 U->replaceUsesOfWith(Def, CurrentReload); 1204 } 1205 } 1206 1207 BasicBlock *FramePtrBB = FramePtr->getParent(); 1208 1209 auto SpillBlock = 1210 FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB"); 1211 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill"); 1212 Shape.AllocaSpillBlock = SpillBlock; 1213 1214 // retcon and retcon.once lowering assumes all uses have been sunk. 1215 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 1216 Shape.ABI == coro::ABI::Async) { 1217 // If we found any allocas, replace all of their remaining uses with Geps. 1218 Builder.SetInsertPoint(&SpillBlock->front()); 1219 for (const auto &P : FrameData.Allocas) { 1220 AllocaInst *Alloca = P.Alloca; 1221 auto *G = GetFramePointer(Alloca); 1222 1223 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G)) 1224 // here, as we are changing location of the instruction. 1225 G->takeName(Alloca); 1226 Alloca->replaceAllUsesWith(G); 1227 Alloca->eraseFromParent(); 1228 } 1229 return FramePtr; 1230 } 1231 1232 // If we found any alloca, replace all of their remaining uses with GEP 1233 // instructions. Because new dbg.declare have been created for these alloca, 1234 // we also delete the original dbg.declare and replace other uses with undef. 1235 // Note: We cannot replace the alloca with GEP instructions indiscriminately, 1236 // as some of the uses may not be dominated by CoroBegin. 1237 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front()); 1238 SmallVector<Instruction *, 4> UsersToUpdate; 1239 for (const auto &A : FrameData.Allocas) { 1240 AllocaInst *Alloca = A.Alloca; 1241 UsersToUpdate.clear(); 1242 for (User *U : Alloca->users()) { 1243 auto *I = cast<Instruction>(U); 1244 if (DT.dominates(CB, I)) 1245 UsersToUpdate.push_back(I); 1246 } 1247 if (UsersToUpdate.empty()) 1248 continue; 1249 auto *G = GetFramePointer(Alloca); 1250 G->setName(Alloca->getName() + Twine(".reload.addr")); 1251 1252 SmallPtrSet<BasicBlock *, 4> SeenDbgBBs; 1253 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Alloca); 1254 DIBuilder DIB(*Alloca->getModule(), /*AllowUnresolved*/ false); 1255 Instruction *FirstDbgDecl = nullptr; 1256 1257 if (!DIs.empty()) { 1258 FirstDbgDecl = DIB.insertDeclare(G, DIs.front()->getVariable(), 1259 DIs.front()->getExpression(), 1260 DIs.front()->getDebugLoc(), DIs.front()); 1261 SeenDbgBBs.insert(DIs.front()->getParent()); 1262 } 1263 for (auto *DI : FindDbgDeclareUses(Alloca)) 1264 DI->eraseFromParent(); 1265 replaceDbgUsesWithUndef(Alloca); 1266 1267 for (Instruction *I : UsersToUpdate) { 1268 I->replaceUsesOfWith(Alloca, G); 1269 1270 // After cloning, transformations might not guarantee that all uses 1271 // of this alloca are dominated by the already existing dbg.declare's, 1272 // compromising the debug quality. Instead of writing another 1273 // transformation to patch each clone, go ahead and early populate 1274 // basic blocks that use such allocas with more debug info. 1275 if (SeenDbgBBs.count(I->getParent())) 1276 continue; 1277 1278 // If there isn't a prior dbg.declare for this alloca, it probably 1279 // means the state hasn't changed prior to one of the relevant suspend 1280 // point for this frame access. 1281 if (!FirstDbgDecl) 1282 continue; 1283 1284 // These instructions are all dominated by the alloca, insert the 1285 // dbg.value in the beginning of the BB to enhance debugging 1286 // experience and allow values to be inspected as early as possible. 1287 // Prefer dbg.value over dbg.declare since it better sets expectations 1288 // that control flow can be later changed by other passes. 1289 auto *DI = cast<DbgDeclareInst>(FirstDbgDecl); 1290 BasicBlock *CurrentBlock = I->getParent(); 1291 DIB.insertDbgValueIntrinsic(G, DI->getVariable(), DI->getExpression(), 1292 DI->getDebugLoc(), 1293 &*CurrentBlock->getFirstInsertionPt()); 1294 SeenDbgBBs.insert(CurrentBlock); 1295 } 1296 } 1297 Builder.SetInsertPoint(FramePtr->getNextNode()); 1298 for (const auto &A : FrameData.Allocas) { 1299 AllocaInst *Alloca = A.Alloca; 1300 if (A.MayWriteBeforeCoroBegin) { 1301 // isEscaped really means potentially modified before CoroBegin. 1302 if (Alloca->isArrayAllocation()) 1303 report_fatal_error( 1304 "Coroutines cannot handle copying of array allocas yet"); 1305 1306 auto *G = GetFramePointer(Alloca); 1307 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca); 1308 Builder.CreateStore(Value, G); 1309 } 1310 // For each alias to Alloca created before CoroBegin but used after 1311 // CoroBegin, we recreate them after CoroBegin by appplying the offset 1312 // to the pointer in the frame. 1313 for (const auto &Alias : A.Aliases) { 1314 auto *FramePtr = GetFramePointer(Alloca); 1315 auto *FramePtrRaw = 1316 Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C)); 1317 auto *AliasPtr = Builder.CreateGEP( 1318 FramePtrRaw, 1319 ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue())); 1320 auto *AliasPtrTyped = 1321 Builder.CreateBitCast(AliasPtr, Alias.first->getType()); 1322 Alias.first->replaceUsesWithIf( 1323 AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); }); 1324 } 1325 } 1326 return FramePtr; 1327 } 1328 1329 // Sets the unwind edge of an instruction to a particular successor. 1330 static void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ) { 1331 if (auto *II = dyn_cast<InvokeInst>(TI)) 1332 II->setUnwindDest(Succ); 1333 else if (auto *CS = dyn_cast<CatchSwitchInst>(TI)) 1334 CS->setUnwindDest(Succ); 1335 else if (auto *CR = dyn_cast<CleanupReturnInst>(TI)) 1336 CR->setUnwindDest(Succ); 1337 else 1338 llvm_unreachable("unexpected terminator instruction"); 1339 } 1340 1341 // Replaces all uses of OldPred with the NewPred block in all PHINodes in a 1342 // block. 1343 static void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred, 1344 BasicBlock *NewPred, PHINode *Until = nullptr) { 1345 unsigned BBIdx = 0; 1346 for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) { 1347 PHINode *PN = cast<PHINode>(I); 1348 1349 // We manually update the LandingPadReplacement PHINode and it is the last 1350 // PHI Node. So, if we find it, we are done. 1351 if (Until == PN) 1352 break; 1353 1354 // Reuse the previous value of BBIdx if it lines up. In cases where we 1355 // have multiple phi nodes with *lots* of predecessors, this is a speed 1356 // win because we don't have to scan the PHI looking for TIBB. This 1357 // happens because the BB list of PHI nodes are usually in the same 1358 // order. 1359 if (PN->getIncomingBlock(BBIdx) != OldPred) 1360 BBIdx = PN->getBasicBlockIndex(OldPred); 1361 1362 assert(BBIdx != (unsigned)-1 && "Invalid PHI Index!"); 1363 PN->setIncomingBlock(BBIdx, NewPred); 1364 } 1365 } 1366 1367 // Uses SplitEdge unless the successor block is an EHPad, in which case do EH 1368 // specific handling. 1369 static BasicBlock *ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ, 1370 LandingPadInst *OriginalPad, 1371 PHINode *LandingPadReplacement) { 1372 auto *PadInst = Succ->getFirstNonPHI(); 1373 if (!LandingPadReplacement && !PadInst->isEHPad()) 1374 return SplitEdge(BB, Succ); 1375 1376 auto *NewBB = BasicBlock::Create(BB->getContext(), "", BB->getParent(), Succ); 1377 setUnwindEdgeTo(BB->getTerminator(), NewBB); 1378 updatePhiNodes(Succ, BB, NewBB, LandingPadReplacement); 1379 1380 if (LandingPadReplacement) { 1381 auto *NewLP = OriginalPad->clone(); 1382 auto *Terminator = BranchInst::Create(Succ, NewBB); 1383 NewLP->insertBefore(Terminator); 1384 LandingPadReplacement->addIncoming(NewLP, NewBB); 1385 return NewBB; 1386 } 1387 Value *ParentPad = nullptr; 1388 if (auto *FuncletPad = dyn_cast<FuncletPadInst>(PadInst)) 1389 ParentPad = FuncletPad->getParentPad(); 1390 else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(PadInst)) 1391 ParentPad = CatchSwitch->getParentPad(); 1392 else 1393 llvm_unreachable("handling for other EHPads not implemented yet"); 1394 1395 auto *NewCleanupPad = CleanupPadInst::Create(ParentPad, {}, "", NewBB); 1396 CleanupReturnInst::Create(NewCleanupPad, Succ, NewBB); 1397 return NewBB; 1398 } 1399 1400 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new 1401 // PHI in InsertedBB. 1402 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, 1403 BasicBlock *InsertedBB, 1404 BasicBlock *PredBB, 1405 PHINode *UntilPHI = nullptr) { 1406 auto *PN = cast<PHINode>(&SuccBB->front()); 1407 do { 1408 int Index = PN->getBasicBlockIndex(InsertedBB); 1409 Value *V = PN->getIncomingValue(Index); 1410 PHINode *InputV = PHINode::Create( 1411 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(), 1412 &InsertedBB->front()); 1413 InputV->addIncoming(V, PredBB); 1414 PN->setIncomingValue(Index, InputV); 1415 PN = dyn_cast<PHINode>(PN->getNextNode()); 1416 } while (PN != UntilPHI); 1417 } 1418 1419 // Rewrites the PHI Nodes in a cleanuppad. 1420 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, 1421 CleanupPadInst *CleanupPad) { 1422 // For every incoming edge to a CleanupPad we will create a new block holding 1423 // all incoming values in single-value PHI nodes. We will then create another 1424 // block to act as a dispather (as all unwind edges for related EH blocks 1425 // must be the same). 1426 // 1427 // cleanuppad: 1428 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1] 1429 // %3 = cleanuppad within none [] 1430 // 1431 // It will create: 1432 // 1433 // cleanuppad.corodispatch 1434 // %2 = phi i8[0, %catchswitch], [1, %catch.1] 1435 // %3 = cleanuppad within none [] 1436 // switch i8 % 2, label %unreachable 1437 // [i8 0, label %cleanuppad.from.catchswitch 1438 // i8 1, label %cleanuppad.from.catch.1] 1439 // cleanuppad.from.catchswitch: 1440 // %4 = phi i32 [%0, %catchswitch] 1441 // br %label cleanuppad 1442 // cleanuppad.from.catch.1: 1443 // %6 = phi i32 [%1, %catch.1] 1444 // br %label cleanuppad 1445 // cleanuppad: 1446 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch], 1447 // [%6, %cleanuppad.from.catch.1] 1448 1449 // Unreachable BB, in case switching on an invalid value in the dispatcher. 1450 auto *UnreachBB = BasicBlock::Create( 1451 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent()); 1452 IRBuilder<> Builder(UnreachBB); 1453 Builder.CreateUnreachable(); 1454 1455 // Create a new cleanuppad which will be the dispatcher. 1456 auto *NewCleanupPadBB = 1457 BasicBlock::Create(CleanupPadBB->getContext(), 1458 CleanupPadBB->getName() + Twine(".corodispatch"), 1459 CleanupPadBB->getParent(), CleanupPadBB); 1460 Builder.SetInsertPoint(NewCleanupPadBB); 1461 auto *SwitchType = Builder.getInt8Ty(); 1462 auto *SetDispatchValuePN = 1463 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB)); 1464 CleanupPad->removeFromParent(); 1465 CleanupPad->insertAfter(SetDispatchValuePN); 1466 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB, 1467 pred_size(CleanupPadBB)); 1468 1469 int SwitchIndex = 0; 1470 SmallVector<BasicBlock *, 8> Preds(pred_begin(CleanupPadBB), 1471 pred_end(CleanupPadBB)); 1472 for (BasicBlock *Pred : Preds) { 1473 // Create a new cleanuppad and move the PHI values to there. 1474 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(), 1475 CleanupPadBB->getName() + 1476 Twine(".from.") + Pred->getName(), 1477 CleanupPadBB->getParent(), CleanupPadBB); 1478 updatePhiNodes(CleanupPadBB, Pred, CaseBB); 1479 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") + 1480 Pred->getName()); 1481 Builder.SetInsertPoint(CaseBB); 1482 Builder.CreateBr(CleanupPadBB); 1483 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB); 1484 1485 // Update this Pred to the new unwind point. 1486 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB); 1487 1488 // Setup the switch in the dispatcher. 1489 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex); 1490 SetDispatchValuePN->addIncoming(SwitchConstant, Pred); 1491 SwitchOnDispatch->addCase(SwitchConstant, CaseBB); 1492 SwitchIndex++; 1493 } 1494 } 1495 1496 static void rewritePHIs(BasicBlock &BB) { 1497 // For every incoming edge we will create a block holding all 1498 // incoming values in a single PHI nodes. 1499 // 1500 // loop: 1501 // %n.val = phi i32[%n, %entry], [%inc, %loop] 1502 // 1503 // It will create: 1504 // 1505 // loop.from.entry: 1506 // %n.loop.pre = phi i32 [%n, %entry] 1507 // br %label loop 1508 // loop.from.loop: 1509 // %inc.loop.pre = phi i32 [%inc, %loop] 1510 // br %label loop 1511 // 1512 // After this rewrite, further analysis will ignore any phi nodes with more 1513 // than one incoming edge. 1514 1515 // TODO: Simplify PHINodes in the basic block to remove duplicate 1516 // predecessors. 1517 1518 // Special case for CleanupPad: all EH blocks must have the same unwind edge 1519 // so we need to create an additional "dispatcher" block. 1520 if (auto *CleanupPad = 1521 dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) { 1522 SmallVector<BasicBlock *, 8> Preds(pred_begin(&BB), pred_end(&BB)); 1523 for (BasicBlock *Pred : Preds) { 1524 if (CatchSwitchInst *CS = 1525 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) { 1526 // CleanupPad with a CatchSwitch predecessor: therefore this is an 1527 // unwind destination that needs to be handle specially. 1528 assert(CS->getUnwindDest() == &BB); 1529 rewritePHIsForCleanupPad(&BB, CleanupPad); 1530 return; 1531 } 1532 } 1533 } 1534 1535 LandingPadInst *LandingPad = nullptr; 1536 PHINode *ReplPHI = nullptr; 1537 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) { 1538 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks. 1539 // We replace the original landing pad with a PHINode that will collect the 1540 // results from all of them. 1541 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad); 1542 ReplPHI->takeName(LandingPad); 1543 LandingPad->replaceAllUsesWith(ReplPHI); 1544 // We will erase the original landing pad at the end of this function after 1545 // ehAwareSplitEdge cloned it in the transition blocks. 1546 } 1547 1548 SmallVector<BasicBlock *, 8> Preds(pred_begin(&BB), pred_end(&BB)); 1549 for (BasicBlock *Pred : Preds) { 1550 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI); 1551 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName()); 1552 1553 // Stop the moving of values at ReplPHI, as this is either null or the PHI 1554 // that replaced the landing pad. 1555 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI); 1556 } 1557 1558 if (LandingPad) { 1559 // Calls to ehAwareSplitEdge function cloned the original lading pad. 1560 // No longer need it. 1561 LandingPad->eraseFromParent(); 1562 } 1563 } 1564 1565 static void rewritePHIs(Function &F) { 1566 SmallVector<BasicBlock *, 8> WorkList; 1567 1568 for (BasicBlock &BB : F) 1569 if (auto *PN = dyn_cast<PHINode>(&BB.front())) 1570 if (PN->getNumIncomingValues() > 1) 1571 WorkList.push_back(&BB); 1572 1573 for (BasicBlock *BB : WorkList) 1574 rewritePHIs(*BB); 1575 } 1576 1577 // Check for instructions that we can recreate on resume as opposed to spill 1578 // the result into a coroutine frame. 1579 static bool materializable(Instruction &V) { 1580 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) || 1581 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V); 1582 } 1583 1584 // Check for structural coroutine intrinsics that should not be spilled into 1585 // the coroutine frame. 1586 static bool isCoroutineStructureIntrinsic(Instruction &I) { 1587 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) || 1588 isa<CoroSuspendInst>(&I); 1589 } 1590 1591 // For every use of the value that is across suspend point, recreate that value 1592 // after a suspend point. 1593 static void rewriteMaterializableInstructions(IRBuilder<> &IRB, 1594 const SpillInfo &Spills) { 1595 for (const auto &E : Spills) { 1596 Value *Def = E.first; 1597 BasicBlock *CurrentBlock = nullptr; 1598 Instruction *CurrentMaterialization = nullptr; 1599 for (Instruction *U : E.second) { 1600 // If we have not seen this block, materialize the value. 1601 if (CurrentBlock != U->getParent()) { 1602 CurrentBlock = U->getParent(); 1603 CurrentMaterialization = cast<Instruction>(Def)->clone(); 1604 CurrentMaterialization->setName(Def->getName()); 1605 CurrentMaterialization->insertBefore( 1606 &*CurrentBlock->getFirstInsertionPt()); 1607 } 1608 if (auto *PN = dyn_cast<PHINode>(U)) { 1609 assert(PN->getNumIncomingValues() == 1 && 1610 "unexpected number of incoming " 1611 "values in the PHINode"); 1612 PN->replaceAllUsesWith(CurrentMaterialization); 1613 PN->eraseFromParent(); 1614 continue; 1615 } 1616 // Replace all uses of Def in the current instruction with the 1617 // CurrentMaterialization for the block. 1618 U->replaceUsesOfWith(Def, CurrentMaterialization); 1619 } 1620 } 1621 } 1622 1623 // Splits the block at a particular instruction unless it is the first 1624 // instruction in the block with a single predecessor. 1625 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) { 1626 auto *BB = I->getParent(); 1627 if (&BB->front() == I) { 1628 if (BB->getSinglePredecessor()) { 1629 BB->setName(Name); 1630 return BB; 1631 } 1632 } 1633 return BB->splitBasicBlock(I, Name); 1634 } 1635 1636 // Split above and below a particular instruction so that it 1637 // will be all alone by itself in a block. 1638 static void splitAround(Instruction *I, const Twine &Name) { 1639 splitBlockIfNotFirst(I, Name); 1640 splitBlockIfNotFirst(I->getNextNode(), "After" + Name); 1641 } 1642 1643 static bool isSuspendBlock(BasicBlock *BB) { 1644 return isa<AnyCoroSuspendInst>(BB->front()); 1645 } 1646 1647 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet; 1648 1649 /// Does control flow starting at the given block ever reach a suspend 1650 /// instruction before reaching a block in VisitedOrFreeBBs? 1651 static bool isSuspendReachableFrom(BasicBlock *From, 1652 VisitedBlocksSet &VisitedOrFreeBBs) { 1653 // Eagerly try to add this block to the visited set. If it's already 1654 // there, stop recursing; this path doesn't reach a suspend before 1655 // either looping or reaching a freeing block. 1656 if (!VisitedOrFreeBBs.insert(From).second) 1657 return false; 1658 1659 // We assume that we'll already have split suspends into their own blocks. 1660 if (isSuspendBlock(From)) 1661 return true; 1662 1663 // Recurse on the successors. 1664 for (auto Succ : successors(From)) { 1665 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs)) 1666 return true; 1667 } 1668 1669 return false; 1670 } 1671 1672 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a 1673 /// suspend point? 1674 static bool isLocalAlloca(CoroAllocaAllocInst *AI) { 1675 // Seed the visited set with all the basic blocks containing a free 1676 // so that we won't pass them up. 1677 VisitedBlocksSet VisitedOrFreeBBs; 1678 for (auto User : AI->users()) { 1679 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User)) 1680 VisitedOrFreeBBs.insert(FI->getParent()); 1681 } 1682 1683 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs); 1684 } 1685 1686 /// After we split the coroutine, will the given basic block be along 1687 /// an obvious exit path for the resumption function? 1688 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, 1689 unsigned depth = 3) { 1690 // If we've bottomed out our depth count, stop searching and assume 1691 // that the path might loop back. 1692 if (depth == 0) return false; 1693 1694 // If this is a suspend block, we're about to exit the resumption function. 1695 if (isSuspendBlock(BB)) return true; 1696 1697 // Recurse into the successors. 1698 for (auto Succ : successors(BB)) { 1699 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1)) 1700 return false; 1701 } 1702 1703 // If none of the successors leads back in a loop, we're on an exit/abort. 1704 return true; 1705 } 1706 1707 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) { 1708 // Look for a free that isn't sufficiently obviously followed by 1709 // either a suspend or a termination, i.e. something that will leave 1710 // the coro resumption frame. 1711 for (auto U : AI->users()) { 1712 auto FI = dyn_cast<CoroAllocaFreeInst>(U); 1713 if (!FI) continue; 1714 1715 if (!willLeaveFunctionImmediatelyAfter(FI->getParent())) 1716 return true; 1717 } 1718 1719 // If we never found one, we don't need a stack save. 1720 return false; 1721 } 1722 1723 /// Turn each of the given local allocas into a normal (dynamic) alloca 1724 /// instruction. 1725 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas, 1726 SmallVectorImpl<Instruction*> &DeadInsts) { 1727 for (auto AI : LocalAllocas) { 1728 auto M = AI->getModule(); 1729 IRBuilder<> Builder(AI); 1730 1731 // Save the stack depth. Try to avoid doing this if the stackrestore 1732 // is going to immediately precede a return or something. 1733 Value *StackSave = nullptr; 1734 if (localAllocaNeedsStackSave(AI)) 1735 StackSave = Builder.CreateCall( 1736 Intrinsic::getDeclaration(M, Intrinsic::stacksave)); 1737 1738 // Allocate memory. 1739 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize()); 1740 Alloca->setAlignment(Align(AI->getAlignment())); 1741 1742 for (auto U : AI->users()) { 1743 // Replace gets with the allocation. 1744 if (isa<CoroAllocaGetInst>(U)) { 1745 U->replaceAllUsesWith(Alloca); 1746 1747 // Replace frees with stackrestores. This is safe because 1748 // alloca.alloc is required to obey a stack discipline, although we 1749 // don't enforce that structurally. 1750 } else { 1751 auto FI = cast<CoroAllocaFreeInst>(U); 1752 if (StackSave) { 1753 Builder.SetInsertPoint(FI); 1754 Builder.CreateCall( 1755 Intrinsic::getDeclaration(M, Intrinsic::stackrestore), 1756 StackSave); 1757 } 1758 } 1759 DeadInsts.push_back(cast<Instruction>(U)); 1760 } 1761 1762 DeadInsts.push_back(AI); 1763 } 1764 } 1765 1766 /// Turn the given coro.alloca.alloc call into a dynamic allocation. 1767 /// This happens during the all-instructions iteration, so it must not 1768 /// delete the call. 1769 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI, 1770 coro::Shape &Shape, 1771 SmallVectorImpl<Instruction*> &DeadInsts) { 1772 IRBuilder<> Builder(AI); 1773 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr); 1774 1775 for (User *U : AI->users()) { 1776 if (isa<CoroAllocaGetInst>(U)) { 1777 U->replaceAllUsesWith(Alloc); 1778 } else { 1779 auto FI = cast<CoroAllocaFreeInst>(U); 1780 Builder.SetInsertPoint(FI); 1781 Shape.emitDealloc(Builder, Alloc, nullptr); 1782 } 1783 DeadInsts.push_back(cast<Instruction>(U)); 1784 } 1785 1786 // Push this on last so that it gets deleted after all the others. 1787 DeadInsts.push_back(AI); 1788 1789 // Return the new allocation value so that we can check for needed spills. 1790 return cast<Instruction>(Alloc); 1791 } 1792 1793 /// Get the current swifterror value. 1794 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, 1795 coro::Shape &Shape) { 1796 // Make a fake function pointer as a sort of intrinsic. 1797 auto FnTy = FunctionType::get(ValueTy, {}, false); 1798 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 1799 1800 auto Call = Builder.CreateCall(FnTy, Fn, {}); 1801 Shape.SwiftErrorOps.push_back(Call); 1802 1803 return Call; 1804 } 1805 1806 /// Set the given value as the current swifterror value. 1807 /// 1808 /// Returns a slot that can be used as a swifterror slot. 1809 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, 1810 coro::Shape &Shape) { 1811 // Make a fake function pointer as a sort of intrinsic. 1812 auto FnTy = FunctionType::get(V->getType()->getPointerTo(), 1813 {V->getType()}, false); 1814 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 1815 1816 auto Call = Builder.CreateCall(FnTy, Fn, { V }); 1817 Shape.SwiftErrorOps.push_back(Call); 1818 1819 return Call; 1820 } 1821 1822 /// Set the swifterror value from the given alloca before a call, 1823 /// then put in back in the alloca afterwards. 1824 /// 1825 /// Returns an address that will stand in for the swifterror slot 1826 /// until splitting. 1827 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call, 1828 AllocaInst *Alloca, 1829 coro::Shape &Shape) { 1830 auto ValueTy = Alloca->getAllocatedType(); 1831 IRBuilder<> Builder(Call); 1832 1833 // Load the current value from the alloca and set it as the 1834 // swifterror value. 1835 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca); 1836 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape); 1837 1838 // Move to after the call. Since swifterror only has a guaranteed 1839 // value on normal exits, we can ignore implicit and explicit unwind 1840 // edges. 1841 if (isa<CallInst>(Call)) { 1842 Builder.SetInsertPoint(Call->getNextNode()); 1843 } else { 1844 auto Invoke = cast<InvokeInst>(Call); 1845 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg()); 1846 } 1847 1848 // Get the current swifterror value and store it to the alloca. 1849 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape); 1850 Builder.CreateStore(ValueAfterCall, Alloca); 1851 1852 return Addr; 1853 } 1854 1855 /// Eliminate a formerly-swifterror alloca by inserting the get/set 1856 /// intrinsics and attempting to MemToReg the alloca away. 1857 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, 1858 coro::Shape &Shape) { 1859 for (auto UI = Alloca->use_begin(), UE = Alloca->use_end(); UI != UE; ) { 1860 // We're likely changing the use list, so use a mutation-safe 1861 // iteration pattern. 1862 auto &Use = *UI; 1863 ++UI; 1864 1865 // swifterror values can only be used in very specific ways. 1866 // We take advantage of that here. 1867 auto User = Use.getUser(); 1868 if (isa<LoadInst>(User) || isa<StoreInst>(User)) 1869 continue; 1870 1871 assert(isa<CallInst>(User) || isa<InvokeInst>(User)); 1872 auto Call = cast<Instruction>(User); 1873 1874 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape); 1875 1876 // Use the returned slot address as the call argument. 1877 Use.set(Addr); 1878 } 1879 1880 // All the uses should be loads and stores now. 1881 assert(isAllocaPromotable(Alloca)); 1882 } 1883 1884 /// "Eliminate" a swifterror argument by reducing it to the alloca case 1885 /// and then loading and storing in the prologue and epilog. 1886 /// 1887 /// The argument keeps the swifterror flag. 1888 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, 1889 coro::Shape &Shape, 1890 SmallVectorImpl<AllocaInst*> &AllocasToPromote) { 1891 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg()); 1892 1893 auto ArgTy = cast<PointerType>(Arg.getType()); 1894 auto ValueTy = ArgTy->getElementType(); 1895 1896 // Reduce to the alloca case: 1897 1898 // Create an alloca and replace all uses of the arg with it. 1899 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace()); 1900 Arg.replaceAllUsesWith(Alloca); 1901 1902 // Set an initial value in the alloca. swifterror is always null on entry. 1903 auto InitialValue = Constant::getNullValue(ValueTy); 1904 Builder.CreateStore(InitialValue, Alloca); 1905 1906 // Find all the suspends in the function and save and restore around them. 1907 for (auto Suspend : Shape.CoroSuspends) { 1908 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape); 1909 } 1910 1911 // Find all the coro.ends in the function and restore the error value. 1912 for (auto End : Shape.CoroEnds) { 1913 Builder.SetInsertPoint(End); 1914 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca); 1915 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape); 1916 } 1917 1918 // Now we can use the alloca logic. 1919 AllocasToPromote.push_back(Alloca); 1920 eliminateSwiftErrorAlloca(F, Alloca, Shape); 1921 } 1922 1923 /// Eliminate all problematic uses of swifterror arguments and allocas 1924 /// from the function. We'll fix them up later when splitting the function. 1925 static void eliminateSwiftError(Function &F, coro::Shape &Shape) { 1926 SmallVector<AllocaInst*, 4> AllocasToPromote; 1927 1928 // Look for a swifterror argument. 1929 for (auto &Arg : F.args()) { 1930 if (!Arg.hasSwiftErrorAttr()) continue; 1931 1932 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote); 1933 break; 1934 } 1935 1936 // Look for swifterror allocas. 1937 for (auto &Inst : F.getEntryBlock()) { 1938 auto Alloca = dyn_cast<AllocaInst>(&Inst); 1939 if (!Alloca || !Alloca->isSwiftError()) continue; 1940 1941 // Clear the swifterror flag. 1942 Alloca->setSwiftError(false); 1943 1944 AllocasToPromote.push_back(Alloca); 1945 eliminateSwiftErrorAlloca(F, Alloca, Shape); 1946 } 1947 1948 // If we have any allocas to promote, compute a dominator tree and 1949 // promote them en masse. 1950 if (!AllocasToPromote.empty()) { 1951 DominatorTree DT(F); 1952 PromoteMemToReg(AllocasToPromote, DT); 1953 } 1954 } 1955 1956 /// retcon and retcon.once conventions assume that all spill uses can be sunk 1957 /// after the coro.begin intrinsic. 1958 static void sinkSpillUsesAfterCoroBegin(Function &F, 1959 const FrameDataInfo &FrameData, 1960 CoroBeginInst *CoroBegin) { 1961 DominatorTree Dom(F); 1962 1963 SmallSetVector<Instruction *, 32> ToMove; 1964 SmallVector<Instruction *, 32> Worklist; 1965 1966 // Collect all users that precede coro.begin. 1967 for (auto *Def : FrameData.getAllDefs()) { 1968 for (User *U : Def->users()) { 1969 auto Inst = cast<Instruction>(U); 1970 if (Inst->getParent() != CoroBegin->getParent() || 1971 Dom.dominates(CoroBegin, Inst)) 1972 continue; 1973 if (ToMove.insert(Inst)) 1974 Worklist.push_back(Inst); 1975 } 1976 } 1977 // Recursively collect users before coro.begin. 1978 while (!Worklist.empty()) { 1979 auto *Def = Worklist.back(); 1980 Worklist.pop_back(); 1981 for (User *U : Def->users()) { 1982 auto Inst = cast<Instruction>(U); 1983 if (Dom.dominates(CoroBegin, Inst)) 1984 continue; 1985 if (ToMove.insert(Inst)) 1986 Worklist.push_back(Inst); 1987 } 1988 } 1989 1990 // Sort by dominance. 1991 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end()); 1992 std::sort(InsertionList.begin(), InsertionList.end(), 1993 [&Dom](Instruction *A, Instruction *B) -> bool { 1994 // If a dominates b it should preceed (<) b. 1995 return Dom.dominates(A, B); 1996 }); 1997 1998 Instruction *InsertPt = CoroBegin->getNextNode(); 1999 for (Instruction *Inst : InsertionList) 2000 Inst->moveBefore(InsertPt); 2001 2002 return; 2003 } 2004 2005 /// For each local variable that all of its user are only used inside one of 2006 /// suspended region, we sink their lifetime.start markers to the place where 2007 /// after the suspend block. Doing so minimizes the lifetime of each variable, 2008 /// hence minimizing the amount of data we end up putting on the frame. 2009 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, 2010 SuspendCrossingInfo &Checker) { 2011 DominatorTree DT(F); 2012 2013 // Collect all possible basic blocks which may dominate all uses of allocas. 2014 SmallPtrSet<BasicBlock *, 4> DomSet; 2015 DomSet.insert(&F.getEntryBlock()); 2016 for (auto *CSI : Shape.CoroSuspends) { 2017 BasicBlock *SuspendBlock = CSI->getParent(); 2018 assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() && 2019 "should have split coro.suspend into its own block"); 2020 DomSet.insert(SuspendBlock->getSingleSuccessor()); 2021 } 2022 2023 for (Instruction &I : instructions(F)) { 2024 AllocaInst* AI = dyn_cast<AllocaInst>(&I); 2025 if (!AI) 2026 continue; 2027 2028 for (BasicBlock *DomBB : DomSet) { 2029 bool Valid = true; 2030 SmallVector<Instruction *, 1> Lifetimes; 2031 2032 auto isLifetimeStart = [](Instruction* I) { 2033 if (auto* II = dyn_cast<IntrinsicInst>(I)) 2034 return II->getIntrinsicID() == Intrinsic::lifetime_start; 2035 return false; 2036 }; 2037 2038 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) { 2039 if (isLifetimeStart(U)) { 2040 Lifetimes.push_back(U); 2041 return true; 2042 } 2043 if (!U->hasOneUse() || U->stripPointerCasts() != AI) 2044 return false; 2045 if (isLifetimeStart(U->user_back())) { 2046 Lifetimes.push_back(U->user_back()); 2047 return true; 2048 } 2049 return false; 2050 }; 2051 2052 for (User *U : AI->users()) { 2053 Instruction *UI = cast<Instruction>(U); 2054 // For all users except lifetime.start markers, if they are all 2055 // dominated by one of the basic blocks and do not cross 2056 // suspend points as well, then there is no need to spill the 2057 // instruction. 2058 if (!DT.dominates(DomBB, UI->getParent()) || 2059 Checker.isDefinitionAcrossSuspend(DomBB, UI)) { 2060 // Skip lifetime.start, GEP and bitcast used by lifetime.start 2061 // markers. 2062 if (collectLifetimeStart(UI, AI)) 2063 continue; 2064 Valid = false; 2065 break; 2066 } 2067 } 2068 // Sink lifetime.start markers to dominate block when they are 2069 // only used outside the region. 2070 if (Valid && Lifetimes.size() != 0) { 2071 // May be AI itself, when the type of AI is i8* 2072 auto *NewBitCast = [&](AllocaInst *AI) -> Value* { 2073 if (isa<AllocaInst>(Lifetimes[0]->getOperand(1))) 2074 return AI; 2075 auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext()); 2076 return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "", 2077 DomBB->getTerminator()); 2078 }(AI); 2079 2080 auto *NewLifetime = Lifetimes[0]->clone(); 2081 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast); 2082 NewLifetime->insertBefore(DomBB->getTerminator()); 2083 2084 // All the outsided lifetime.start markers are no longer necessary. 2085 for (Instruction *S : Lifetimes) 2086 S->eraseFromParent(); 2087 2088 break; 2089 } 2090 } 2091 } 2092 } 2093 2094 static void collectFrameAllocas(Function &F, coro::Shape &Shape, 2095 const SuspendCrossingInfo &Checker, 2096 SmallVectorImpl<AllocaInfo> &Allocas) { 2097 // Collect lifetime.start info for each alloca. 2098 using LifetimeStart = SmallPtrSet<Instruction *, 2>; 2099 llvm::DenseMap<AllocaInst *, std::unique_ptr<LifetimeStart>> LifetimeMap; 2100 for (Instruction &I : instructions(F)) { 2101 auto *II = dyn_cast<IntrinsicInst>(&I); 2102 if (!II || II->getIntrinsicID() != Intrinsic::lifetime_start) 2103 continue; 2104 2105 if (auto *OpInst = dyn_cast<Instruction>(II->getOperand(1))) { 2106 if (auto *AI = dyn_cast<AllocaInst>(OpInst->stripPointerCasts())) { 2107 2108 if (LifetimeMap.find(AI) == LifetimeMap.end()) 2109 LifetimeMap[AI] = std::make_unique<LifetimeStart>(); 2110 LifetimeMap[AI]->insert(isa<AllocaInst>(OpInst) ? II : OpInst); 2111 } 2112 } 2113 } 2114 2115 for (Instruction &I : instructions(F)) { 2116 auto *AI = dyn_cast<AllocaInst>(&I); 2117 if (!AI) 2118 continue; 2119 // The PromiseAlloca will be specially handled since it needs to be in a 2120 // fixed position in the frame. 2121 if (AI == Shape.SwitchLowering.PromiseAlloca) { 2122 continue; 2123 } 2124 bool ShouldLiveOnFrame = false; 2125 auto Iter = LifetimeMap.find(AI); 2126 if (Iter != LifetimeMap.end()) { 2127 // Check against lifetime.start if the instruction has the info. 2128 for (User *U : I.users()) { 2129 for (auto *S : *Iter->second) 2130 if ((ShouldLiveOnFrame = Checker.isDefinitionAcrossSuspend(*S, U))) 2131 break; 2132 if (ShouldLiveOnFrame) 2133 break; 2134 } 2135 if (!ShouldLiveOnFrame) 2136 continue; 2137 } 2138 // At this point, either ShouldLiveOnFrame is true or we didn't have 2139 // lifetime information. We will need to rely on more precise pointer 2140 // tracking. 2141 DominatorTree DT(F); 2142 AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT, 2143 *Shape.CoroBegin, Checker}; 2144 Visitor.visitPtr(*AI); 2145 if (!Visitor.getShouldLiveOnFrame()) 2146 continue; 2147 Allocas.emplace_back(AI, Visitor.getAliasesCopy(), 2148 Visitor.getMayWriteBeforeCoroBegin()); 2149 } 2150 } 2151 2152 void coro::buildCoroutineFrame(Function &F, Shape &Shape) { 2153 eliminateSwiftError(F, Shape); 2154 2155 if (Shape.ABI == coro::ABI::Switch && 2156 Shape.SwitchLowering.PromiseAlloca) { 2157 Shape.getSwitchCoroId()->clearPromise(); 2158 } 2159 2160 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end 2161 // intrinsics are in their own blocks to simplify the logic of building up 2162 // SuspendCrossing data. 2163 for (auto *CSI : Shape.CoroSuspends) { 2164 if (auto *Save = CSI->getCoroSave()) 2165 splitAround(Save, "CoroSave"); 2166 splitAround(CSI, "CoroSuspend"); 2167 } 2168 2169 // Put CoroEnds into their own blocks. 2170 for (CoroEndInst *CE : Shape.CoroEnds) 2171 splitAround(CE, "CoroEnd"); 2172 2173 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will 2174 // never has its definition separated from the PHI by the suspend point. 2175 rewritePHIs(F); 2176 2177 // Build suspend crossing info. 2178 SuspendCrossingInfo Checker(F, Shape); 2179 2180 IRBuilder<> Builder(F.getContext()); 2181 FrameDataInfo FrameData; 2182 SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas; 2183 SmallVector<Instruction*, 4> DeadInstructions; 2184 2185 { 2186 SpillInfo Spills; 2187 for (int Repeat = 0; Repeat < 4; ++Repeat) { 2188 // See if there are materializable instructions across suspend points. 2189 for (Instruction &I : instructions(F)) 2190 if (materializable(I)) 2191 for (User *U : I.users()) 2192 if (Checker.isDefinitionAcrossSuspend(I, U)) 2193 Spills[&I].push_back(cast<Instruction>(U)); 2194 2195 if (Spills.empty()) 2196 break; 2197 2198 // Rewrite materializable instructions to be materialized at the use 2199 // point. 2200 LLVM_DEBUG(dumpSpills("Materializations", Spills)); 2201 rewriteMaterializableInstructions(Builder, Spills); 2202 Spills.clear(); 2203 } 2204 } 2205 2206 sinkLifetimeStartMarkers(F, Shape, Checker); 2207 collectFrameAllocas(F, Shape, Checker, FrameData.Allocas); 2208 LLVM_DEBUG(dumpAllocas(FrameData.Allocas)); 2209 2210 // Collect the spills for arguments and other not-materializable values. 2211 for (Argument &A : F.args()) 2212 for (User *U : A.users()) 2213 if (Checker.isDefinitionAcrossSuspend(A, U)) 2214 FrameData.Spills[&A].push_back(cast<Instruction>(U)); 2215 2216 for (Instruction &I : instructions(F)) { 2217 // Values returned from coroutine structure intrinsics should not be part 2218 // of the Coroutine Frame. 2219 if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin) 2220 continue; 2221 2222 // The Coroutine Promise always included into coroutine frame, no need to 2223 // check for suspend crossing. 2224 if (Shape.ABI == coro::ABI::Switch && 2225 Shape.SwitchLowering.PromiseAlloca == &I) 2226 continue; 2227 2228 // Handle alloca.alloc specially here. 2229 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) { 2230 // Check whether the alloca's lifetime is bounded by suspend points. 2231 if (isLocalAlloca(AI)) { 2232 LocalAllocas.push_back(AI); 2233 continue; 2234 } 2235 2236 // If not, do a quick rewrite of the alloca and then add spills of 2237 // the rewritten value. The rewrite doesn't invalidate anything in 2238 // Spills because the other alloca intrinsics have no other operands 2239 // besides AI, and it doesn't invalidate the iteration because we delay 2240 // erasing AI. 2241 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions); 2242 2243 for (User *U : Alloc->users()) { 2244 if (Checker.isDefinitionAcrossSuspend(*Alloc, U)) 2245 FrameData.Spills[Alloc].push_back(cast<Instruction>(U)); 2246 } 2247 continue; 2248 } 2249 2250 // Ignore alloca.get; we process this as part of coro.alloca.alloc. 2251 if (isa<CoroAllocaGetInst>(I)) 2252 continue; 2253 2254 if (isa<AllocaInst>(I)) 2255 continue; 2256 2257 for (User *U : I.users()) 2258 if (Checker.isDefinitionAcrossSuspend(I, U)) { 2259 // We cannot spill a token. 2260 if (I.getType()->isTokenTy()) 2261 report_fatal_error( 2262 "token definition is separated from the use by a suspend point"); 2263 FrameData.Spills[&I].push_back(cast<Instruction>(U)); 2264 } 2265 } 2266 LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills)); 2267 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 2268 Shape.ABI == coro::ABI::Async) 2269 sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin); 2270 Shape.FrameTy = buildFrameType(F, Shape, FrameData); 2271 // Add PromiseAlloca to Allocas list so that it is processed in insertSpills. 2272 if (Shape.ABI == coro::ABI::Switch && Shape.SwitchLowering.PromiseAlloca) 2273 // We assume that the promise alloca won't be modified before 2274 // CoroBegin and no alias will be create before CoroBegin. 2275 FrameData.Allocas.emplace_back( 2276 Shape.SwitchLowering.PromiseAlloca, 2277 DenseMap<Instruction *, llvm::Optional<APInt>>{}, false); 2278 Shape.FramePtr = insertSpills(FrameData, Shape); 2279 lowerLocalAllocas(LocalAllocas, DeadInstructions); 2280 2281 for (auto I : DeadInstructions) 2282 I->eraseFromParent(); 2283 } 2284