1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This file contains classes used to discover if for a particular value 9 // there from sue to definition that crosses a suspend block. 10 // 11 // Using the information discovered we form a Coroutine Frame structure to 12 // contain those values. All uses of those values are replaced with appropriate 13 // GEP + load from the coroutine frame. At the point of the definition we spill 14 // the value into the coroutine frame. 15 //===----------------------------------------------------------------------===// 16 17 #include "CoroInternal.h" 18 #include "llvm/ADT/BitVector.h" 19 #include "llvm/ADT/SmallString.h" 20 #include "llvm/Analysis/PtrUseVisitor.h" 21 #include "llvm/Analysis/StackLifetime.h" 22 #include "llvm/Config/llvm-config.h" 23 #include "llvm/IR/CFG.h" 24 #include "llvm/IR/DIBuilder.h" 25 #include "llvm/IR/Dominators.h" 26 #include "llvm/IR/IRBuilder.h" 27 #include "llvm/IR/InstIterator.h" 28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Support/Debug.h" 30 #include "llvm/Support/MathExtras.h" 31 #include "llvm/Support/OptimizedStructLayout.h" 32 #include "llvm/Support/circular_raw_ostream.h" 33 #include "llvm/Support/raw_ostream.h" 34 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 35 #include "llvm/Transforms/Utils/Local.h" 36 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 37 #include <algorithm> 38 39 using namespace llvm; 40 41 // The "coro-suspend-crossing" flag is very noisy. There is another debug type, 42 // "coro-frame", which results in leaner debug spew. 43 #define DEBUG_TYPE "coro-suspend-crossing" 44 45 static cl::opt<bool> EnableReuseStorageInFrame( 46 "reuse-storage-in-coroutine-frame", cl::Hidden, 47 cl::desc( 48 "Enable the optimization which would reuse the storage in the coroutine \ 49 frame for allocas whose liferanges are not overlapped, for testing purposes"), 50 llvm::cl::init(false)); 51 52 enum { SmallVectorThreshold = 32 }; 53 54 // Provides two way mapping between the blocks and numbers. 55 namespace { 56 class BlockToIndexMapping { 57 SmallVector<BasicBlock *, SmallVectorThreshold> V; 58 59 public: 60 size_t size() const { return V.size(); } 61 62 BlockToIndexMapping(Function &F) { 63 for (BasicBlock &BB : F) 64 V.push_back(&BB); 65 llvm::sort(V); 66 } 67 68 size_t blockToIndex(BasicBlock *BB) const { 69 auto *I = llvm::lower_bound(V, BB); 70 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block"); 71 return I - V.begin(); 72 } 73 74 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; } 75 }; 76 } // end anonymous namespace 77 78 // The SuspendCrossingInfo maintains data that allows to answer a question 79 // whether given two BasicBlocks A and B there is a path from A to B that 80 // passes through a suspend point. 81 // 82 // For every basic block 'i' it maintains a BlockData that consists of: 83 // Consumes: a bit vector which contains a set of indices of blocks that can 84 // reach block 'i' 85 // Kills: a bit vector which contains a set of indices of blocks that can 86 // reach block 'i', but one of the path will cross a suspend point 87 // Suspend: a boolean indicating whether block 'i' contains a suspend point. 88 // End: a boolean indicating whether block 'i' contains a coro.end intrinsic. 89 // 90 namespace { 91 struct SuspendCrossingInfo { 92 BlockToIndexMapping Mapping; 93 94 struct BlockData { 95 BitVector Consumes; 96 BitVector Kills; 97 bool Suspend = false; 98 bool End = false; 99 }; 100 SmallVector<BlockData, SmallVectorThreshold> Block; 101 102 iterator_range<succ_iterator> successors(BlockData const &BD) const { 103 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]); 104 return llvm::successors(BB); 105 } 106 107 BlockData &getBlockData(BasicBlock *BB) { 108 return Block[Mapping.blockToIndex(BB)]; 109 } 110 111 void dump() const; 112 void dump(StringRef Label, BitVector const &BV) const; 113 114 SuspendCrossingInfo(Function &F, coro::Shape &Shape); 115 116 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const { 117 size_t const DefIndex = Mapping.blockToIndex(DefBB); 118 size_t const UseIndex = Mapping.blockToIndex(UseBB); 119 120 bool const Result = Block[UseIndex].Kills[DefIndex]; 121 LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName() 122 << " answer is " << Result << "\n"); 123 return Result; 124 } 125 126 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const { 127 auto *I = cast<Instruction>(U); 128 129 // We rewrote PHINodes, so that only the ones with exactly one incoming 130 // value need to be analyzed. 131 if (auto *PN = dyn_cast<PHINode>(I)) 132 if (PN->getNumIncomingValues() > 1) 133 return false; 134 135 BasicBlock *UseBB = I->getParent(); 136 137 // As a special case, treat uses by an llvm.coro.suspend.retcon or an 138 // llvm.coro.suspend.async as if they were uses in the suspend's single 139 // predecessor: the uses conceptually occur before the suspend. 140 if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) { 141 UseBB = UseBB->getSinglePredecessor(); 142 assert(UseBB && "should have split coro.suspend into its own block"); 143 } 144 145 return hasPathCrossingSuspendPoint(DefBB, UseBB); 146 } 147 148 bool isDefinitionAcrossSuspend(Argument &A, User *U) const { 149 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U); 150 } 151 152 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const { 153 auto *DefBB = I.getParent(); 154 155 // As a special case, treat values produced by an llvm.coro.suspend.* 156 // as if they were defined in the single successor: the uses 157 // conceptually occur after the suspend. 158 if (isa<AnyCoroSuspendInst>(I)) { 159 DefBB = DefBB->getSingleSuccessor(); 160 assert(DefBB && "should have split coro.suspend into its own block"); 161 } 162 163 return isDefinitionAcrossSuspend(DefBB, U); 164 } 165 166 bool isDefinitionAcrossSuspend(Value &V, User *U) const { 167 if (auto *Arg = dyn_cast<Argument>(&V)) 168 return isDefinitionAcrossSuspend(*Arg, U); 169 if (auto *Inst = dyn_cast<Instruction>(&V)) 170 return isDefinitionAcrossSuspend(*Inst, U); 171 172 llvm_unreachable( 173 "Coroutine could only collect Argument and Instruction now."); 174 } 175 }; 176 } // end anonymous namespace 177 178 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 179 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label, 180 BitVector const &BV) const { 181 dbgs() << Label << ":"; 182 for (size_t I = 0, N = BV.size(); I < N; ++I) 183 if (BV[I]) 184 dbgs() << " " << Mapping.indexToBlock(I)->getName(); 185 dbgs() << "\n"; 186 } 187 188 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const { 189 for (size_t I = 0, N = Block.size(); I < N; ++I) { 190 BasicBlock *const B = Mapping.indexToBlock(I); 191 dbgs() << B->getName() << ":\n"; 192 dump(" Consumes", Block[I].Consumes); 193 dump(" Kills", Block[I].Kills); 194 } 195 dbgs() << "\n"; 196 } 197 #endif 198 199 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape) 200 : Mapping(F) { 201 const size_t N = Mapping.size(); 202 Block.resize(N); 203 204 // Initialize every block so that it consumes itself 205 for (size_t I = 0; I < N; ++I) { 206 auto &B = Block[I]; 207 B.Consumes.resize(N); 208 B.Kills.resize(N); 209 B.Consumes.set(I); 210 } 211 212 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as 213 // the code beyond coro.end is reachable during initial invocation of the 214 // coroutine. 215 for (auto *CE : Shape.CoroEnds) 216 getBlockData(CE->getParent()).End = true; 217 218 // Mark all suspend blocks and indicate that they kill everything they 219 // consume. Note, that crossing coro.save also requires a spill, as any code 220 // between coro.save and coro.suspend may resume the coroutine and all of the 221 // state needs to be saved by that time. 222 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) { 223 BasicBlock *SuspendBlock = BarrierInst->getParent(); 224 auto &B = getBlockData(SuspendBlock); 225 B.Suspend = true; 226 B.Kills |= B.Consumes; 227 }; 228 for (auto *CSI : Shape.CoroSuspends) { 229 markSuspendBlock(CSI); 230 if (auto *Save = CSI->getCoroSave()) 231 markSuspendBlock(Save); 232 } 233 234 // Iterate propagating consumes and kills until they stop changing. 235 int Iteration = 0; 236 (void)Iteration; 237 238 bool Changed; 239 do { 240 LLVM_DEBUG(dbgs() << "iteration " << ++Iteration); 241 LLVM_DEBUG(dbgs() << "==============\n"); 242 243 Changed = false; 244 for (size_t I = 0; I < N; ++I) { 245 auto &B = Block[I]; 246 for (BasicBlock *SI : successors(B)) { 247 248 auto SuccNo = Mapping.blockToIndex(SI); 249 250 // Saved Consumes and Kills bitsets so that it is easy to see 251 // if anything changed after propagation. 252 auto &S = Block[SuccNo]; 253 auto SavedConsumes = S.Consumes; 254 auto SavedKills = S.Kills; 255 256 // Propagate Kills and Consumes from block B into its successor S. 257 S.Consumes |= B.Consumes; 258 S.Kills |= B.Kills; 259 260 // If block B is a suspend block, it should propagate kills into the 261 // its successor for every block B consumes. 262 if (B.Suspend) { 263 S.Kills |= B.Consumes; 264 } 265 if (S.Suspend) { 266 // If block S is a suspend block, it should kill all of the blocks it 267 // consumes. 268 S.Kills |= S.Consumes; 269 } else if (S.End) { 270 // If block S is an end block, it should not propagate kills as the 271 // blocks following coro.end() are reached during initial invocation 272 // of the coroutine while all the data are still available on the 273 // stack or in the registers. 274 S.Kills.reset(); 275 } else { 276 // This is reached when S block it not Suspend nor coro.end and it 277 // need to make sure that it is not in the kill set. 278 S.Kills.reset(SuccNo); 279 } 280 281 // See if anything changed. 282 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes); 283 284 if (S.Kills != SavedKills) { 285 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName() 286 << "\n"); 287 LLVM_DEBUG(dump("S.Kills", S.Kills)); 288 LLVM_DEBUG(dump("SavedKills", SavedKills)); 289 } 290 if (S.Consumes != SavedConsumes) { 291 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n"); 292 LLVM_DEBUG(dump("S.Consume", S.Consumes)); 293 LLVM_DEBUG(dump("SavedCons", SavedConsumes)); 294 } 295 } 296 } 297 } while (Changed); 298 LLVM_DEBUG(dump()); 299 } 300 301 #undef DEBUG_TYPE // "coro-suspend-crossing" 302 #define DEBUG_TYPE "coro-frame" 303 304 namespace { 305 class FrameTypeBuilder; 306 // Mapping from the to-be-spilled value to all the users that need reload. 307 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>; 308 struct AllocaInfo { 309 AllocaInst *Alloca; 310 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases; 311 bool MayWriteBeforeCoroBegin; 312 AllocaInfo(AllocaInst *Alloca, 313 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases, 314 bool MayWriteBeforeCoroBegin) 315 : Alloca(Alloca), Aliases(std::move(Aliases)), 316 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {} 317 }; 318 struct FrameDataInfo { 319 // All the values (that are not allocas) that needs to be spilled to the 320 // frame. 321 SpillInfo Spills; 322 // Allocas contains all values defined as allocas that need to live in the 323 // frame. 324 SmallVector<AllocaInfo, 8> Allocas; 325 326 SmallVector<Value *, 8> getAllDefs() const { 327 SmallVector<Value *, 8> Defs; 328 for (const auto &P : Spills) 329 Defs.push_back(P.first); 330 for (const auto &A : Allocas) 331 Defs.push_back(A.Alloca); 332 return Defs; 333 } 334 335 uint32_t getFieldIndex(Value *V) const { 336 auto Itr = FieldIndexMap.find(V); 337 assert(Itr != FieldIndexMap.end() && 338 "Value does not have a frame field index"); 339 return Itr->second; 340 } 341 342 void setFieldIndex(Value *V, uint32_t Index) { 343 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) && 344 "Cannot set the index for the same field twice."); 345 FieldIndexMap[V] = Index; 346 } 347 348 uint64_t getAlign(Value *V) const { 349 auto Iter = FieldAlignMap.find(V); 350 assert(Iter != FieldAlignMap.end()); 351 return Iter->second; 352 } 353 354 void setAlign(Value *V, uint64_t Align) { 355 assert(FieldAlignMap.count(V) == 0); 356 FieldAlignMap.insert({V, Align}); 357 } 358 359 uint64_t getOffset(Value *V) const { 360 auto Iter = FieldOffsetMap.find(V); 361 assert(Iter != FieldOffsetMap.end()); 362 return Iter->second; 363 } 364 365 void setOffset(Value *V, uint64_t Offset) { 366 assert(FieldOffsetMap.count(V) == 0); 367 FieldOffsetMap.insert({V, Offset}); 368 } 369 370 // Remap the index of every field in the frame, using the final layout index. 371 void updateLayoutIndex(FrameTypeBuilder &B); 372 373 private: 374 // LayoutIndexUpdateStarted is used to avoid updating the index of any field 375 // twice by mistake. 376 bool LayoutIndexUpdateStarted = false; 377 // Map from values to their slot indexes on the frame. They will be first set 378 // with their original insertion field index. After the frame is built, their 379 // indexes will be updated into the final layout index. 380 DenseMap<Value *, uint32_t> FieldIndexMap; 381 // Map from values to their alignment on the frame. They would be set after 382 // the frame is built. 383 DenseMap<Value *, uint64_t> FieldAlignMap; 384 // Map from values to their offset on the frame. They would be set after 385 // the frame is built. 386 DenseMap<Value *, uint64_t> FieldOffsetMap; 387 }; 388 } // namespace 389 390 #ifndef NDEBUG 391 static void dumpSpills(StringRef Title, const SpillInfo &Spills) { 392 dbgs() << "------------- " << Title << "--------------\n"; 393 for (const auto &E : Spills) { 394 E.first->dump(); 395 dbgs() << " user: "; 396 for (auto *I : E.second) 397 I->dump(); 398 } 399 } 400 401 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) { 402 dbgs() << "------------- Allocas --------------\n"; 403 for (const auto &A : Allocas) { 404 A.Alloca->dump(); 405 } 406 } 407 #endif 408 409 namespace { 410 using FieldIDType = size_t; 411 // We cannot rely solely on natural alignment of a type when building a 412 // coroutine frame and if the alignment specified on the Alloca instruction 413 // differs from the natural alignment of the alloca type we will need to insert 414 // padding. 415 class FrameTypeBuilder { 416 private: 417 struct Field { 418 uint64_t Size; 419 uint64_t Offset; 420 Type *Ty; 421 FieldIDType LayoutFieldIndex; 422 Align Alignment; 423 Align TyAlignment; 424 }; 425 426 const DataLayout &DL; 427 LLVMContext &Context; 428 uint64_t StructSize = 0; 429 Align StructAlign; 430 bool IsFinished = false; 431 432 SmallVector<Field, 8> Fields; 433 DenseMap<Value*, unsigned> FieldIndexByKey; 434 435 public: 436 FrameTypeBuilder(LLVMContext &Context, DataLayout const &DL) 437 : DL(DL), Context(Context) {} 438 439 /// Add a field to this structure for the storage of an `alloca` 440 /// instruction. 441 LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI, 442 bool IsHeader = false) { 443 Type *Ty = AI->getAllocatedType(); 444 445 // Make an array type if this is a static array allocation. 446 if (AI->isArrayAllocation()) { 447 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 448 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue()); 449 else 450 report_fatal_error("Coroutines cannot handle non static allocas yet"); 451 } 452 453 return addField(Ty, AI->getAlign(), IsHeader); 454 } 455 456 /// We want to put the allocas whose lifetime-ranges are not overlapped 457 /// into one slot of coroutine frame. 458 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566 459 /// 460 /// cppcoro::task<void> alternative_paths(bool cond) { 461 /// if (cond) { 462 /// big_structure a; 463 /// process(a); 464 /// co_await something(); 465 /// } else { 466 /// big_structure b; 467 /// process2(b); 468 /// co_await something(); 469 /// } 470 /// } 471 /// 472 /// We want to put variable a and variable b in the same slot to 473 /// reduce the size of coroutine frame. 474 /// 475 /// This function use StackLifetime algorithm to partition the AllocaInsts in 476 /// Spills to non-overlapped sets in order to put Alloca in the same 477 /// non-overlapped set into the same slot in the Coroutine Frame. Then add 478 /// field for the allocas in the same non-overlapped set by using the largest 479 /// type as the field type. 480 /// 481 /// Side Effects: Because We sort the allocas, the order of allocas in the 482 /// frame may be different with the order in the source code. 483 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData, 484 coro::Shape &Shape); 485 486 /// Add a field to this structure. 487 LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment, 488 bool IsHeader = false) { 489 assert(!IsFinished && "adding fields to a finished builder"); 490 assert(Ty && "must provide a type for a field"); 491 492 // The field size is always the alloc size of the type. 493 uint64_t FieldSize = DL.getTypeAllocSize(Ty); 494 495 // For an alloca with size=0, we don't need to add a field and they 496 // can just point to any index in the frame. Use index 0. 497 if (FieldSize == 0) { 498 return 0; 499 } 500 501 // The field alignment might not be the type alignment, but we need 502 // to remember the type alignment anyway to build the type. 503 Align TyAlignment = DL.getABITypeAlign(Ty); 504 if (!FieldAlignment) FieldAlignment = TyAlignment; 505 506 // Lay out header fields immediately. 507 uint64_t Offset; 508 if (IsHeader) { 509 Offset = alignTo(StructSize, FieldAlignment); 510 StructSize = Offset + FieldSize; 511 512 // Everything else has a flexible offset. 513 } else { 514 Offset = OptimizedStructLayoutField::FlexibleOffset; 515 } 516 517 Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment}); 518 return Fields.size() - 1; 519 } 520 521 /// Finish the layout and set the body on the given type. 522 void finish(StructType *Ty); 523 524 uint64_t getStructSize() const { 525 assert(IsFinished && "not yet finished!"); 526 return StructSize; 527 } 528 529 Align getStructAlign() const { 530 assert(IsFinished && "not yet finished!"); 531 return StructAlign; 532 } 533 534 FieldIDType getLayoutFieldIndex(FieldIDType Id) const { 535 assert(IsFinished && "not yet finished!"); 536 return Fields[Id].LayoutFieldIndex; 537 } 538 539 Field getLayoutField(FieldIDType Id) const { 540 assert(IsFinished && "not yet finished!"); 541 return Fields[Id]; 542 } 543 }; 544 } // namespace 545 546 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) { 547 auto Updater = [&](Value *I) { 548 auto Field = B.getLayoutField(getFieldIndex(I)); 549 setFieldIndex(I, Field.LayoutFieldIndex); 550 setAlign(I, Field.Alignment.value()); 551 setOffset(I, Field.Offset); 552 }; 553 LayoutIndexUpdateStarted = true; 554 for (auto &S : Spills) 555 Updater(S.first); 556 for (const auto &A : Allocas) 557 Updater(A.Alloca); 558 LayoutIndexUpdateStarted = false; 559 } 560 561 void FrameTypeBuilder::addFieldForAllocas(const Function &F, 562 FrameDataInfo &FrameData, 563 coro::Shape &Shape) { 564 using AllocaSetType = SmallVector<AllocaInst *, 4>; 565 SmallVector<AllocaSetType, 4> NonOverlapedAllocas; 566 567 // We need to add field for allocas at the end of this function. However, this 568 // function has multiple exits, so we use this helper to avoid redundant code. 569 struct RTTIHelper { 570 std::function<void()> func; 571 RTTIHelper(std::function<void()> &&func) : func(func) {} 572 ~RTTIHelper() { func(); } 573 } Helper([&]() { 574 for (auto AllocaList : NonOverlapedAllocas) { 575 auto *LargestAI = *AllocaList.begin(); 576 FieldIDType Id = addFieldForAlloca(LargestAI); 577 for (auto *Alloca : AllocaList) 578 FrameData.setFieldIndex(Alloca, Id); 579 } 580 }); 581 582 if (!Shape.ReuseFrameSlot && !EnableReuseStorageInFrame) { 583 for (const auto &A : FrameData.Allocas) { 584 AllocaInst *Alloca = A.Alloca; 585 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 586 } 587 return; 588 } 589 590 // Because there are pathes from the lifetime.start to coro.end 591 // for each alloca, the liferanges for every alloca is overlaped 592 // in the blocks who contain coro.end and the successor blocks. 593 // So we choose to skip there blocks when we calculates the liferange 594 // for each alloca. It should be reasonable since there shouldn't be uses 595 // in these blocks and the coroutine frame shouldn't be used outside the 596 // coroutine body. 597 // 598 // Note that the user of coro.suspend may not be SwitchInst. However, this 599 // case seems too complex to handle. And it is harmless to skip these 600 // patterns since it just prevend putting the allocas to live in the same 601 // slot. 602 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest; 603 for (auto CoroSuspendInst : Shape.CoroSuspends) { 604 for (auto U : CoroSuspendInst->users()) { 605 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) { 606 auto *SWI = const_cast<SwitchInst *>(ConstSWI); 607 DefaultSuspendDest[SWI] = SWI->getDefaultDest(); 608 SWI->setDefaultDest(SWI->getSuccessor(1)); 609 } 610 } 611 } 612 613 auto ExtractAllocas = [&]() { 614 AllocaSetType Allocas; 615 Allocas.reserve(FrameData.Allocas.size()); 616 for (const auto &A : FrameData.Allocas) 617 Allocas.push_back(A.Alloca); 618 return Allocas; 619 }; 620 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(), 621 StackLifetime::LivenessType::May); 622 StackLifetimeAnalyzer.run(); 623 auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) { 624 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps( 625 StackLifetimeAnalyzer.getLiveRange(AI2)); 626 }; 627 auto GetAllocaSize = [&](const AllocaInfo &A) { 628 Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL); 629 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n"); 630 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported"); 631 return RetSize->getFixedSize(); 632 }; 633 // Put larger allocas in the front. So the larger allocas have higher 634 // priority to merge, which can save more space potentially. Also each 635 // AllocaSet would be ordered. So we can get the largest Alloca in one 636 // AllocaSet easily. 637 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) { 638 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2); 639 }); 640 for (const auto &A : FrameData.Allocas) { 641 AllocaInst *Alloca = A.Alloca; 642 bool Merged = false; 643 // Try to find if the Alloca is not inferenced with any existing 644 // NonOverlappedAllocaSet. If it is true, insert the alloca to that 645 // NonOverlappedAllocaSet. 646 for (auto &AllocaSet : NonOverlapedAllocas) { 647 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n"); 648 bool NoInference = none_of(AllocaSet, [&](auto Iter) { 649 return IsAllocaInferenre(Alloca, Iter); 650 }); 651 // If the alignment of A is multiple of the alignment of B, the address 652 // of A should satisfy the requirement for aligning for B. 653 // 654 // There may be other more fine-grained strategies to handle the alignment 655 // infomation during the merging process. But it seems hard to handle 656 // these strategies and benefit little. 657 bool Alignable = [&]() -> bool { 658 auto *LargestAlloca = *AllocaSet.begin(); 659 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() == 660 0; 661 }(); 662 bool CouldMerge = NoInference && Alignable; 663 if (!CouldMerge) 664 continue; 665 AllocaSet.push_back(Alloca); 666 Merged = true; 667 break; 668 } 669 if (!Merged) { 670 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 671 } 672 } 673 // Recover the default target destination for each Switch statement 674 // reserved. 675 for (auto SwitchAndDefaultDest : DefaultSuspendDest) { 676 SwitchInst *SWI = SwitchAndDefaultDest.first; 677 BasicBlock *DestBB = SwitchAndDefaultDest.second; 678 SWI->setDefaultDest(DestBB); 679 } 680 // This Debug Info could tell us which allocas are merged into one slot. 681 LLVM_DEBUG(for (auto &AllocaSet 682 : NonOverlapedAllocas) { 683 if (AllocaSet.size() > 1) { 684 dbgs() << "In Function:" << F.getName() << "\n"; 685 dbgs() << "Find Union Set " 686 << "\n"; 687 dbgs() << "\tAllocas are \n"; 688 for (auto Alloca : AllocaSet) 689 dbgs() << "\t\t" << *Alloca << "\n"; 690 } 691 }); 692 } 693 694 void FrameTypeBuilder::finish(StructType *Ty) { 695 assert(!IsFinished && "already finished!"); 696 697 // Prepare the optimal-layout field array. 698 // The Id in the layout field is a pointer to our Field for it. 699 SmallVector<OptimizedStructLayoutField, 8> LayoutFields; 700 LayoutFields.reserve(Fields.size()); 701 for (auto &Field : Fields) { 702 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment, 703 Field.Offset); 704 } 705 706 // Perform layout. 707 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields); 708 StructSize = SizeAndAlign.first; 709 StructAlign = SizeAndAlign.second; 710 711 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & { 712 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id)); 713 }; 714 715 // We need to produce a packed struct type if there's a field whose 716 // assigned offset isn't a multiple of its natural type alignment. 717 bool Packed = [&] { 718 for (auto &LayoutField : LayoutFields) { 719 auto &F = getField(LayoutField); 720 if (!isAligned(F.TyAlignment, LayoutField.Offset)) 721 return true; 722 } 723 return false; 724 }(); 725 726 // Build the struct body. 727 SmallVector<Type*, 16> FieldTypes; 728 FieldTypes.reserve(LayoutFields.size() * 3 / 2); 729 uint64_t LastOffset = 0; 730 for (auto &LayoutField : LayoutFields) { 731 auto &F = getField(LayoutField); 732 733 auto Offset = LayoutField.Offset; 734 735 // Add a padding field if there's a padding gap and we're either 736 // building a packed struct or the padding gap is more than we'd 737 // get from aligning to the field type's natural alignment. 738 assert(Offset >= LastOffset); 739 if (Offset != LastOffset) { 740 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset) 741 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context), 742 Offset - LastOffset)); 743 } 744 745 F.Offset = Offset; 746 F.LayoutFieldIndex = FieldTypes.size(); 747 748 FieldTypes.push_back(F.Ty); 749 LastOffset = Offset + F.Size; 750 } 751 752 Ty->setBody(FieldTypes, Packed); 753 754 #ifndef NDEBUG 755 // Check that the IR layout matches the offsets we expect. 756 auto Layout = DL.getStructLayout(Ty); 757 for (auto &F : Fields) { 758 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty); 759 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset); 760 } 761 #endif 762 763 IsFinished = true; 764 } 765 766 static void cacheDIVar(FrameDataInfo &FrameData, 767 DenseMap<Value *, DILocalVariable *> &DIVarCache) { 768 for (auto *V : FrameData.getAllDefs()) { 769 if (DIVarCache.find(V) != DIVarCache.end()) 770 continue; 771 772 auto DDIs = FindDbgDeclareUses(V); 773 auto *I = llvm::find_if(DDIs, [](DbgDeclareInst *DDI) { 774 return DDI->getExpression()->getNumElements() == 0; 775 }); 776 if (I != DDIs.end()) 777 DIVarCache.insert({V, (*I)->getVariable()}); 778 } 779 } 780 781 /// Create name for Type. It uses MDString to store new created string to 782 /// avoid memory leak. 783 static StringRef solveTypeName(Type *Ty) { 784 if (Ty->isIntegerTy()) { 785 // The longest name in common may be '__int_128', which has 9 bits. 786 SmallString<16> Buffer; 787 raw_svector_ostream OS(Buffer); 788 OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth(); 789 auto *MDName = MDString::get(Ty->getContext(), OS.str()); 790 return MDName->getString(); 791 } 792 793 if (Ty->isFloatingPointTy()) { 794 if (Ty->isFloatTy()) 795 return "__float_"; 796 if (Ty->isDoubleTy()) 797 return "__double_"; 798 return "__floating_type_"; 799 } 800 801 if (Ty->isPointerTy()) { 802 auto *PtrTy = cast<PointerType>(Ty); 803 Type *PointeeTy = PtrTy->getElementType(); 804 auto Name = solveTypeName(PointeeTy); 805 if (Name == "UnknownType") 806 return "PointerType"; 807 SmallString<16> Buffer; 808 Twine(Name + "_Ptr").toStringRef(Buffer); 809 auto *MDName = MDString::get(Ty->getContext(), Buffer.str()); 810 return MDName->getString(); 811 } 812 813 if (Ty->isStructTy()) { 814 if (!cast<StructType>(Ty)->hasName()) 815 return "__LiteralStructType_"; 816 817 auto Name = Ty->getStructName(); 818 819 SmallString<16> Buffer(Name); 820 for_each(Buffer, [](auto &Iter) { 821 if (Iter == '.' || Iter == ':') 822 Iter = '_'; 823 }); 824 auto *MDName = MDString::get(Ty->getContext(), Buffer.str()); 825 return MDName->getString(); 826 } 827 828 return "UnknownType"; 829 } 830 831 static DIType *solveDIType(DIBuilder &Builder, Type *Ty, DataLayout &Layout, 832 DIScope *Scope, unsigned LineNum, 833 DenseMap<Type *, DIType *> &DITypeCache) { 834 if (DIType *DT = DITypeCache.lookup(Ty)) 835 return DT; 836 837 StringRef Name = solveTypeName(Ty); 838 839 DIType *RetType = nullptr; 840 841 if (Ty->isIntegerTy()) { 842 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth(); 843 RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed, 844 llvm::DINode::FlagArtificial); 845 } else if (Ty->isFloatingPointTy()) { 846 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty), 847 dwarf::DW_ATE_float, 848 llvm::DINode::FlagArtificial); 849 } else if (Ty->isPointerTy()) { 850 // Construct BasicType instead of PointerType to avoid infinite 851 // search problem. 852 // For example, we would be in trouble if we traverse recursively: 853 // 854 // struct Node { 855 // Node* ptr; 856 // }; 857 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty), 858 dwarf::DW_ATE_address, 859 llvm::DINode::FlagArtificial); 860 } else if (Ty->isStructTy()) { 861 auto *DIStruct = Builder.createStructType( 862 Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty), 863 Layout.getPrefTypeAlignment(Ty), llvm::DINode::FlagArtificial, nullptr, 864 llvm::DINodeArray()); 865 866 auto *StructTy = cast<StructType>(Ty); 867 SmallVector<Metadata *, 16> Elements; 868 for (unsigned I = 0; I < StructTy->getNumElements(); I++) { 869 DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout, 870 Scope, LineNum, DITypeCache); 871 assert(DITy); 872 Elements.push_back(Builder.createMemberType( 873 Scope, DITy->getName(), Scope->getFile(), LineNum, 874 DITy->getSizeInBits(), DITy->getAlignInBits(), 875 Layout.getStructLayout(StructTy)->getElementOffsetInBits(I), 876 llvm::DINode::FlagArtificial, DITy)); 877 } 878 879 Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements)); 880 881 RetType = DIStruct; 882 } else { 883 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n";); 884 SmallString<32> Buffer; 885 raw_svector_ostream OS(Buffer); 886 OS << Name.str() << "_" << Layout.getTypeSizeInBits(Ty); 887 RetType = Builder.createBasicType(OS.str(), Layout.getTypeSizeInBits(Ty), 888 dwarf::DW_ATE_address, 889 llvm::DINode::FlagArtificial); 890 } 891 892 DITypeCache.insert({Ty, RetType}); 893 return RetType; 894 } 895 896 /// Build artificial debug info for C++ coroutine frames to allow users to 897 /// inspect the contents of the frame directly 898 /// 899 /// Create Debug information for coroutine frame with debug name "__coro_frame". 900 /// The debug information for the fields of coroutine frame is constructed from 901 /// the following way: 902 /// 1. For all the value in the Frame, we search the use of dbg.declare to find 903 /// the corresponding debug variables for the value. If we can find the 904 /// debug variable, we can get full and accurate debug information. 905 /// 2. If we can't get debug information in step 1 and 2, we could only try to 906 /// build the DIType by Type. We did this in solveDIType. We only handle 907 /// integer, float, double, integer type and struct type for now. 908 static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, 909 FrameDataInfo &FrameData) { 910 DISubprogram *DIS = F.getSubprogram(); 911 // If there is no DISubprogram for F, it implies the Function are not compiled 912 // with debug info. So we also don't need to generate debug info for the frame 913 // neither. 914 if (!DIS || !DIS->getUnit() || 915 !dwarf::isCPlusPlus( 916 (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage())) 917 return; 918 919 assert(Shape.ABI == coro::ABI::Switch && 920 "We could only build debug infomation for C++ coroutine now.\n"); 921 922 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false); 923 924 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca(); 925 assert(PromiseAlloca && 926 "Coroutine with switch ABI should own Promise alloca"); 927 928 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(PromiseAlloca); 929 if (DIs.empty()) 930 return; 931 932 DbgDeclareInst *PromiseDDI = DIs.front(); 933 DILocalVariable *PromiseDIVariable = PromiseDDI->getVariable(); 934 DILocalScope *PromiseDIScope = PromiseDIVariable->getScope(); 935 DIFile *DFile = PromiseDIScope->getFile(); 936 DILocation *DILoc = PromiseDDI->getDebugLoc().get(); 937 unsigned LineNum = PromiseDIVariable->getLine(); 938 939 DICompositeType *FrameDITy = DBuilder.createStructType( 940 DIS, "__coro_frame_ty", DFile, LineNum, Shape.FrameSize * 8, 941 Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr, 942 llvm::DINodeArray()); 943 StructType *FrameTy = Shape.FrameTy; 944 SmallVector<Metadata *, 16> Elements; 945 DataLayout Layout = F.getParent()->getDataLayout(); 946 947 DenseMap<Value *, DILocalVariable *> DIVarCache; 948 cacheDIVar(FrameData, DIVarCache); 949 950 unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume; 951 unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy; 952 unsigned IndexIndex = Shape.SwitchLowering.IndexField; 953 954 DenseMap<unsigned, StringRef> NameCache; 955 NameCache.insert({ResumeIndex, "__resume_fn"}); 956 NameCache.insert({DestroyIndex, "__destroy_fn"}); 957 NameCache.insert({IndexIndex, "__coro_index"}); 958 959 Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex), 960 *DestroyFnTy = FrameTy->getElementType(DestroyIndex), 961 *IndexTy = FrameTy->getElementType(IndexIndex); 962 963 DenseMap<unsigned, DIType *> TyCache; 964 TyCache.insert({ResumeIndex, 965 DBuilder.createBasicType("__resume_fn", 966 Layout.getTypeSizeInBits(ResumeFnTy), 967 dwarf::DW_ATE_address)}); 968 TyCache.insert( 969 {DestroyIndex, DBuilder.createBasicType( 970 "__destroy_fn", Layout.getTypeSizeInBits(DestroyFnTy), 971 dwarf::DW_ATE_address)}); 972 973 /// FIXME: If we fill the field `SizeInBits` with the actual size of 974 /// __coro_index in bits, then __coro_index wouldn't show in the debugger. 975 TyCache.insert({IndexIndex, DBuilder.createBasicType( 976 "__coro_index", 977 (Layout.getTypeSizeInBits(IndexTy) < 8) 978 ? 8 979 : Layout.getTypeSizeInBits(IndexTy), 980 dwarf::DW_ATE_unsigned_char)}); 981 982 for (auto *V : FrameData.getAllDefs()) { 983 if (DIVarCache.find(V) == DIVarCache.end()) 984 continue; 985 986 auto Index = FrameData.getFieldIndex(V); 987 988 NameCache.insert({Index, DIVarCache[V]->getName()}); 989 TyCache.insert({Index, DIVarCache[V]->getType()}); 990 } 991 992 // Cache from index to (Align, Offset Pair) 993 DenseMap<unsigned, std::pair<unsigned, unsigned>> OffsetCache; 994 // The Align and Offset of Resume function and Destroy function are fixed. 995 OffsetCache.insert({ResumeIndex, {8, 0}}); 996 OffsetCache.insert({DestroyIndex, {8, 8}}); 997 OffsetCache.insert( 998 {IndexIndex, 999 {Shape.SwitchLowering.IndexAlign, Shape.SwitchLowering.IndexOffset}}); 1000 1001 for (auto *V : FrameData.getAllDefs()) { 1002 auto Index = FrameData.getFieldIndex(V); 1003 1004 OffsetCache.insert( 1005 {Index, {FrameData.getAlign(V), FrameData.getOffset(V)}}); 1006 } 1007 1008 DenseMap<Type *, DIType *> DITypeCache; 1009 // This counter is used to avoid same type names. e.g., there would be 1010 // many i32 and i64 types in one coroutine. And we would use i32_0 and 1011 // i32_1 to avoid the same type. Since it makes no sense the name of the 1012 // fields confilicts with each other. 1013 unsigned UnknownTypeNum = 0; 1014 for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) { 1015 if (OffsetCache.find(Index) == OffsetCache.end()) 1016 continue; 1017 1018 std::string Name; 1019 uint64_t SizeInBits; 1020 uint32_t AlignInBits; 1021 uint64_t OffsetInBits; 1022 DIType *DITy = nullptr; 1023 1024 Type *Ty = FrameTy->getElementType(Index); 1025 assert(Ty->isSized() && "We can't handle type which is not sized.\n"); 1026 SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedSize(); 1027 AlignInBits = OffsetCache[Index].first * 8; 1028 OffsetInBits = OffsetCache[Index].second * 8; 1029 1030 if (NameCache.find(Index) != NameCache.end()) { 1031 Name = NameCache[Index].str(); 1032 DITy = TyCache[Index]; 1033 } else { 1034 DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache); 1035 assert(DITy && "SolveDIType shouldn't return nullptr.\n"); 1036 Name = DITy->getName().str(); 1037 Name += "_" + std::to_string(UnknownTypeNum); 1038 UnknownTypeNum++; 1039 } 1040 1041 Elements.push_back(DBuilder.createMemberType( 1042 FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits, 1043 llvm::DINode::FlagArtificial, DITy)); 1044 } 1045 1046 DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements)); 1047 1048 auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame", 1049 DFile, LineNum, FrameDITy, 1050 true, DINode::FlagArtificial); 1051 assert(FrameDIVar->isValidLocationForIntrinsic(PromiseDDI->getDebugLoc())); 1052 1053 // Subprogram would have ContainedNodes field which records the debug 1054 // variables it contained. So we need to add __coro_frame to the 1055 // ContainedNodes of it. 1056 // 1057 // If we don't add __coro_frame to the RetainedNodes, user may get 1058 // `no symbol __coro_frame in context` rather than `__coro_frame` 1059 // is optimized out, which is more precise. 1060 if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) { 1061 auto RetainedNodes = SubProgram->getRetainedNodes(); 1062 SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(), 1063 RetainedNodes.end()); 1064 RetainedNodesVec.push_back(FrameDIVar); 1065 SubProgram->replaceOperandWith( 1066 7, (MDTuple::get(F.getContext(), RetainedNodesVec))); 1067 } 1068 1069 DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar, 1070 DBuilder.createExpression(), DILoc, 1071 Shape.FramePtr->getNextNode()); 1072 } 1073 1074 // Build a struct that will keep state for an active coroutine. 1075 // struct f.frame { 1076 // ResumeFnTy ResumeFnAddr; 1077 // ResumeFnTy DestroyFnAddr; 1078 // int ResumeIndex; 1079 // ... promise (if present) ... 1080 // ... spills ... 1081 // }; 1082 static StructType *buildFrameType(Function &F, coro::Shape &Shape, 1083 FrameDataInfo &FrameData) { 1084 LLVMContext &C = F.getContext(); 1085 const DataLayout &DL = F.getParent()->getDataLayout(); 1086 StructType *FrameTy = [&] { 1087 SmallString<32> Name(F.getName()); 1088 Name.append(".Frame"); 1089 return StructType::create(C, Name); 1090 }(); 1091 1092 FrameTypeBuilder B(C, DL); 1093 1094 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca(); 1095 Optional<FieldIDType> SwitchIndexFieldId; 1096 1097 if (Shape.ABI == coro::ABI::Switch) { 1098 auto *FramePtrTy = FrameTy->getPointerTo(); 1099 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy, 1100 /*IsVarArg=*/false); 1101 auto *FnPtrTy = FnTy->getPointerTo(); 1102 1103 // Add header fields for the resume and destroy functions. 1104 // We can rely on these being perfectly packed. 1105 (void)B.addField(FnPtrTy, None, /*header*/ true); 1106 (void)B.addField(FnPtrTy, None, /*header*/ true); 1107 1108 // PromiseAlloca field needs to be explicitly added here because it's 1109 // a header field with a fixed offset based on its alignment. Hence it 1110 // needs special handling and cannot be added to FrameData.Allocas. 1111 if (PromiseAlloca) 1112 FrameData.setFieldIndex( 1113 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true)); 1114 1115 // Add a field to store the suspend index. This doesn't need to 1116 // be in the header. 1117 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size())); 1118 Type *IndexType = Type::getIntNTy(C, IndexBits); 1119 1120 SwitchIndexFieldId = B.addField(IndexType, None); 1121 } else { 1122 assert(PromiseAlloca == nullptr && "lowering doesn't support promises"); 1123 } 1124 1125 // Because multiple allocas may own the same field slot, 1126 // we add allocas to field here. 1127 B.addFieldForAllocas(F, FrameData, Shape); 1128 // Add PromiseAlloca to Allocas list so that 1129 // 1. updateLayoutIndex could update its index after 1130 // `performOptimizedStructLayout` 1131 // 2. it is processed in insertSpills. 1132 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca) 1133 // We assume that the promise alloca won't be modified before 1134 // CoroBegin and no alias will be create before CoroBegin. 1135 FrameData.Allocas.emplace_back( 1136 PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false); 1137 // Create an entry for every spilled value. 1138 for (auto &S : FrameData.Spills) { 1139 Type *FieldType = S.first->getType(); 1140 // For byval arguments, we need to store the pointed value in the frame, 1141 // instead of the pointer itself. 1142 if (const Argument *A = dyn_cast<Argument>(S.first)) 1143 if (A->hasByValAttr()) 1144 FieldType = FieldType->getPointerElementType(); 1145 FieldIDType Id = B.addField(FieldType, None); 1146 FrameData.setFieldIndex(S.first, Id); 1147 } 1148 1149 B.finish(FrameTy); 1150 FrameData.updateLayoutIndex(B); 1151 Shape.FrameAlign = B.getStructAlign(); 1152 Shape.FrameSize = B.getStructSize(); 1153 1154 switch (Shape.ABI) { 1155 case coro::ABI::Switch: { 1156 // In the switch ABI, remember the switch-index field. 1157 auto IndexField = B.getLayoutField(*SwitchIndexFieldId); 1158 Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex; 1159 Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value(); 1160 Shape.SwitchLowering.IndexOffset = IndexField.Offset; 1161 1162 // Also round the frame size up to a multiple of its alignment, as is 1163 // generally expected in C/C++. 1164 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign); 1165 break; 1166 } 1167 1168 // In the retcon ABI, remember whether the frame is inline in the storage. 1169 case coro::ABI::Retcon: 1170 case coro::ABI::RetconOnce: { 1171 auto Id = Shape.getRetconCoroId(); 1172 Shape.RetconLowering.IsFrameInlineInStorage 1173 = (B.getStructSize() <= Id->getStorageSize() && 1174 B.getStructAlign() <= Id->getStorageAlignment()); 1175 break; 1176 } 1177 case coro::ABI::Async: { 1178 Shape.AsyncLowering.FrameOffset = 1179 alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign); 1180 // Also make the final context size a multiple of the context alignment to 1181 // make allocation easier for allocators. 1182 Shape.AsyncLowering.ContextSize = 1183 alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize, 1184 Shape.AsyncLowering.getContextAlignment()); 1185 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) { 1186 report_fatal_error( 1187 "The alignment requirment of frame variables cannot be higher than " 1188 "the alignment of the async function context"); 1189 } 1190 break; 1191 } 1192 } 1193 1194 return FrameTy; 1195 } 1196 1197 // We use a pointer use visitor to track how an alloca is being used. 1198 // The goal is to be able to answer the following three questions: 1199 // 1. Should this alloca be allocated on the frame instead. 1200 // 2. Could the content of the alloca be modified prior to CoroBegn, which would 1201 // require copying the data from alloca to the frame after CoroBegin. 1202 // 3. Is there any alias created for this alloca prior to CoroBegin, but used 1203 // after CoroBegin. In that case, we will need to recreate the alias after 1204 // CoroBegin based off the frame. To answer question 1, we track two things: 1205 // a. List of all BasicBlocks that use this alloca or any of the aliases of 1206 // the alloca. In the end, we check if there exists any two basic blocks that 1207 // cross suspension points. If so, this alloca must be put on the frame. b. 1208 // Whether the alloca or any alias of the alloca is escaped at some point, 1209 // either by storing the address somewhere, or the address is used in a 1210 // function call that might capture. If it's ever escaped, this alloca must be 1211 // put on the frame conservatively. 1212 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin. 1213 // Whenever a potential write happens, either through a store instruction, a 1214 // function call or any of the memory intrinsics, we check whether this 1215 // instruction is prior to CoroBegin. To answer question 3, we track the offsets 1216 // of all aliases created for the alloca prior to CoroBegin but used after 1217 // CoroBegin. llvm::Optional is used to be able to represent the case when the 1218 // offset is unknown (e.g. when you have a PHINode that takes in different 1219 // offset values). We cannot handle unknown offsets and will assert. This is the 1220 // potential issue left out. An ideal solution would likely require a 1221 // significant redesign. 1222 namespace { 1223 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> { 1224 using Base = PtrUseVisitor<AllocaUseVisitor>; 1225 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT, 1226 const CoroBeginInst &CB, const SuspendCrossingInfo &Checker) 1227 : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker) {} 1228 1229 void visit(Instruction &I) { 1230 Users.insert(&I); 1231 Base::visit(I); 1232 // If the pointer is escaped prior to CoroBegin, we have to assume it would 1233 // be written into before CoroBegin as well. 1234 if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) { 1235 MayWriteBeforeCoroBegin = true; 1236 } 1237 } 1238 // We need to provide this overload as PtrUseVisitor uses a pointer based 1239 // visiting function. 1240 void visit(Instruction *I) { return visit(*I); } 1241 1242 void visitPHINode(PHINode &I) { 1243 enqueueUsers(I); 1244 handleAlias(I); 1245 } 1246 1247 void visitSelectInst(SelectInst &I) { 1248 enqueueUsers(I); 1249 handleAlias(I); 1250 } 1251 1252 void visitStoreInst(StoreInst &SI) { 1253 // Regardless whether the alias of the alloca is the value operand or the 1254 // pointer operand, we need to assume the alloca is been written. 1255 handleMayWrite(SI); 1256 1257 if (SI.getValueOperand() != U->get()) 1258 return; 1259 1260 // We are storing the pointer into a memory location, potentially escaping. 1261 // As an optimization, we try to detect simple cases where it doesn't 1262 // actually escape, for example: 1263 // %ptr = alloca .. 1264 // %addr = alloca .. 1265 // store %ptr, %addr 1266 // %x = load %addr 1267 // .. 1268 // If %addr is only used by loading from it, we could simply treat %x as 1269 // another alias of %ptr, and not considering %ptr being escaped. 1270 auto IsSimpleStoreThenLoad = [&]() { 1271 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand()); 1272 // If the memory location we are storing to is not an alloca, it 1273 // could be an alias of some other memory locations, which is difficult 1274 // to analyze. 1275 if (!AI) 1276 return false; 1277 // StoreAliases contains aliases of the memory location stored into. 1278 SmallVector<Instruction *, 4> StoreAliases = {AI}; 1279 while (!StoreAliases.empty()) { 1280 Instruction *I = StoreAliases.pop_back_val(); 1281 for (User *U : I->users()) { 1282 // If we are loading from the memory location, we are creating an 1283 // alias of the original pointer. 1284 if (auto *LI = dyn_cast<LoadInst>(U)) { 1285 enqueueUsers(*LI); 1286 handleAlias(*LI); 1287 continue; 1288 } 1289 // If we are overriding the memory location, the pointer certainly 1290 // won't escape. 1291 if (auto *S = dyn_cast<StoreInst>(U)) 1292 if (S->getPointerOperand() == I) 1293 continue; 1294 if (auto *II = dyn_cast<IntrinsicInst>(U)) 1295 if (II->isLifetimeStartOrEnd()) 1296 continue; 1297 // BitCastInst creats aliases of the memory location being stored 1298 // into. 1299 if (auto *BI = dyn_cast<BitCastInst>(U)) { 1300 StoreAliases.push_back(BI); 1301 continue; 1302 } 1303 return false; 1304 } 1305 } 1306 1307 return true; 1308 }; 1309 1310 if (!IsSimpleStoreThenLoad()) 1311 PI.setEscaped(&SI); 1312 } 1313 1314 // All mem intrinsics modify the data. 1315 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); } 1316 1317 void visitBitCastInst(BitCastInst &BC) { 1318 Base::visitBitCastInst(BC); 1319 handleAlias(BC); 1320 } 1321 1322 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 1323 Base::visitAddrSpaceCastInst(ASC); 1324 handleAlias(ASC); 1325 } 1326 1327 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 1328 // The base visitor will adjust Offset accordingly. 1329 Base::visitGetElementPtrInst(GEPI); 1330 handleAlias(GEPI); 1331 } 1332 1333 void visitIntrinsicInst(IntrinsicInst &II) { 1334 if (II.getIntrinsicID() != Intrinsic::lifetime_start) 1335 return Base::visitIntrinsicInst(II); 1336 LifetimeStarts.insert(&II); 1337 } 1338 1339 void visitCallBase(CallBase &CB) { 1340 for (unsigned Op = 0, OpCount = CB.getNumArgOperands(); Op < OpCount; ++Op) 1341 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op)) 1342 PI.setEscaped(&CB); 1343 handleMayWrite(CB); 1344 } 1345 1346 bool getShouldLiveOnFrame() const { 1347 if (!ShouldLiveOnFrame) 1348 ShouldLiveOnFrame = computeShouldLiveOnFrame(); 1349 return ShouldLiveOnFrame.getValue(); 1350 } 1351 1352 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; } 1353 1354 DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const { 1355 assert(getShouldLiveOnFrame() && "This method should only be called if the " 1356 "alloca needs to live on the frame."); 1357 for (const auto &P : AliasOffetMap) 1358 if (!P.second) 1359 report_fatal_error("Unable to handle an alias with unknown offset " 1360 "created before CoroBegin."); 1361 return AliasOffetMap; 1362 } 1363 1364 private: 1365 const DominatorTree &DT; 1366 const CoroBeginInst &CoroBegin; 1367 const SuspendCrossingInfo &Checker; 1368 // All alias to the original AllocaInst, created before CoroBegin and used 1369 // after CoroBegin. Each entry contains the instruction and the offset in the 1370 // original Alloca. They need to be recreated after CoroBegin off the frame. 1371 DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{}; 1372 SmallPtrSet<Instruction *, 4> Users{}; 1373 SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{}; 1374 bool MayWriteBeforeCoroBegin{false}; 1375 1376 mutable llvm::Optional<bool> ShouldLiveOnFrame{}; 1377 1378 bool computeShouldLiveOnFrame() const { 1379 // If lifetime information is available, we check it first since it's 1380 // more precise. We look at every pair of lifetime.start intrinsic and 1381 // every basic block that uses the pointer to see if they cross suspension 1382 // points. The uses cover both direct uses as well as indirect uses. 1383 if (!LifetimeStarts.empty()) { 1384 for (auto *I : Users) 1385 for (auto *S : LifetimeStarts) 1386 if (Checker.isDefinitionAcrossSuspend(*S, I)) 1387 return true; 1388 return false; 1389 } 1390 // FIXME: Ideally the isEscaped check should come at the beginning. 1391 // However there are a few loose ends that need to be fixed first before 1392 // we can do that. We need to make sure we are not over-conservative, so 1393 // that the data accessed in-between await_suspend and symmetric transfer 1394 // is always put on the stack, and also data accessed after coro.end is 1395 // always put on the stack (esp the return object). To fix that, we need 1396 // to: 1397 // 1) Potentially treat sret as nocapture in calls 1398 // 2) Special handle the return object and put it on the stack 1399 // 3) Utilize lifetime.end intrinsic 1400 if (PI.isEscaped()) 1401 return true; 1402 1403 for (auto *U1 : Users) 1404 for (auto *U2 : Users) 1405 if (Checker.isDefinitionAcrossSuspend(*U1, U2)) 1406 return true; 1407 1408 return false; 1409 } 1410 1411 void handleMayWrite(const Instruction &I) { 1412 if (!DT.dominates(&CoroBegin, &I)) 1413 MayWriteBeforeCoroBegin = true; 1414 } 1415 1416 bool usedAfterCoroBegin(Instruction &I) { 1417 for (auto &U : I.uses()) 1418 if (DT.dominates(&CoroBegin, U)) 1419 return true; 1420 return false; 1421 } 1422 1423 void handleAlias(Instruction &I) { 1424 // We track all aliases created prior to CoroBegin but used after. 1425 // These aliases may need to be recreated after CoroBegin if the alloca 1426 // need to live on the frame. 1427 if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I)) 1428 return; 1429 1430 if (!IsOffsetKnown) { 1431 AliasOffetMap[&I].reset(); 1432 } else { 1433 auto Itr = AliasOffetMap.find(&I); 1434 if (Itr == AliasOffetMap.end()) { 1435 AliasOffetMap[&I] = Offset; 1436 } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) { 1437 // If we have seen two different possible values for this alias, we set 1438 // it to empty. 1439 AliasOffetMap[&I].reset(); 1440 } 1441 } 1442 } 1443 }; 1444 } // namespace 1445 1446 // We need to make room to insert a spill after initial PHIs, but before 1447 // catchswitch instruction. Placing it before violates the requirement that 1448 // catchswitch, like all other EHPads must be the first nonPHI in a block. 1449 // 1450 // Split away catchswitch into a separate block and insert in its place: 1451 // 1452 // cleanuppad <InsertPt> cleanupret. 1453 // 1454 // cleanupret instruction will act as an insert point for the spill. 1455 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) { 1456 BasicBlock *CurrentBlock = CatchSwitch->getParent(); 1457 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch); 1458 CurrentBlock->getTerminator()->eraseFromParent(); 1459 1460 auto *CleanupPad = 1461 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock); 1462 auto *CleanupRet = 1463 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock); 1464 return CleanupRet; 1465 } 1466 1467 static void createFramePtr(coro::Shape &Shape) { 1468 auto *CB = Shape.CoroBegin; 1469 IRBuilder<> Builder(CB->getNextNode()); 1470 StructType *FrameTy = Shape.FrameTy; 1471 PointerType *FramePtrTy = FrameTy->getPointerTo(); 1472 Shape.FramePtr = 1473 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr")); 1474 } 1475 1476 // Replace all alloca and SSA values that are accessed across suspend points 1477 // with GetElementPointer from coroutine frame + loads and stores. Create an 1478 // AllocaSpillBB that will become the new entry block for the resume parts of 1479 // the coroutine: 1480 // 1481 // %hdl = coro.begin(...) 1482 // whatever 1483 // 1484 // becomes: 1485 // 1486 // %hdl = coro.begin(...) 1487 // %FramePtr = bitcast i8* hdl to %f.frame* 1488 // br label %AllocaSpillBB 1489 // 1490 // AllocaSpillBB: 1491 // ; geps corresponding to allocas that were moved to coroutine frame 1492 // br label PostSpill 1493 // 1494 // PostSpill: 1495 // whatever 1496 // 1497 // 1498 static Instruction *insertSpills(const FrameDataInfo &FrameData, 1499 coro::Shape &Shape) { 1500 auto *CB = Shape.CoroBegin; 1501 LLVMContext &C = CB->getContext(); 1502 IRBuilder<> Builder(C); 1503 StructType *FrameTy = Shape.FrameTy; 1504 Instruction *FramePtr = Shape.FramePtr; 1505 DominatorTree DT(*CB->getFunction()); 1506 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache; 1507 1508 // Create a GEP with the given index into the coroutine frame for the original 1509 // value Orig. Appends an extra 0 index for array-allocas, preserving the 1510 // original type. 1511 auto GetFramePointer = [&](Value *Orig) -> Value * { 1512 FieldIDType Index = FrameData.getFieldIndex(Orig); 1513 SmallVector<Value *, 3> Indices = { 1514 ConstantInt::get(Type::getInt32Ty(C), 0), 1515 ConstantInt::get(Type::getInt32Ty(C), Index), 1516 }; 1517 1518 if (auto *AI = dyn_cast<AllocaInst>(Orig)) { 1519 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) { 1520 auto Count = CI->getValue().getZExtValue(); 1521 if (Count > 1) { 1522 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0)); 1523 } 1524 } else { 1525 report_fatal_error("Coroutines cannot handle non static allocas yet"); 1526 } 1527 } 1528 1529 auto GEP = cast<GetElementPtrInst>( 1530 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices)); 1531 if (isa<AllocaInst>(Orig)) { 1532 // If the type of GEP is not equal to the type of AllocaInst, it implies 1533 // that the AllocaInst may be reused in the Frame slot of other 1534 // AllocaInst. So We cast GEP to the AllocaInst here to re-use 1535 // the Frame storage. 1536 // 1537 // Note: If we change the strategy dealing with alignment, we need to refine 1538 // this casting. 1539 if (GEP->getResultElementType() != Orig->getType()) 1540 return Builder.CreateBitCast(GEP, Orig->getType(), 1541 Orig->getName() + Twine(".cast")); 1542 } 1543 return GEP; 1544 }; 1545 1546 for (auto const &E : FrameData.Spills) { 1547 Value *Def = E.first; 1548 // Create a store instruction storing the value into the 1549 // coroutine frame. 1550 Instruction *InsertPt = nullptr; 1551 bool NeedToCopyArgPtrValue = false; 1552 if (auto *Arg = dyn_cast<Argument>(Def)) { 1553 // For arguments, we will place the store instruction right after 1554 // the coroutine frame pointer instruction, i.e. bitcast of 1555 // coro.begin from i8* to %f.frame*. 1556 InsertPt = FramePtr->getNextNode(); 1557 1558 // If we're spilling an Argument, make sure we clear 'nocapture' 1559 // from the coroutine function. 1560 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture); 1561 1562 if (Arg->hasByValAttr()) 1563 NeedToCopyArgPtrValue = true; 1564 1565 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) { 1566 // Don't spill immediately after a suspend; splitting assumes 1567 // that the suspend will be followed by a branch. 1568 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI(); 1569 } else { 1570 auto *I = cast<Instruction>(Def); 1571 if (!DT.dominates(CB, I)) { 1572 // If it is not dominated by CoroBegin, then spill should be 1573 // inserted immediately after CoroFrame is computed. 1574 InsertPt = FramePtr->getNextNode(); 1575 } else if (auto *II = dyn_cast<InvokeInst>(I)) { 1576 // If we are spilling the result of the invoke instruction, split 1577 // the normal edge and insert the spill in the new block. 1578 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest()); 1579 InsertPt = NewBB->getTerminator(); 1580 } else if (isa<PHINode>(I)) { 1581 // Skip the PHINodes and EH pads instructions. 1582 BasicBlock *DefBlock = I->getParent(); 1583 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator())) 1584 InsertPt = splitBeforeCatchSwitch(CSI); 1585 else 1586 InsertPt = &*DefBlock->getFirstInsertionPt(); 1587 } else { 1588 assert(!I->isTerminator() && "unexpected terminator"); 1589 // For all other values, the spill is placed immediately after 1590 // the definition. 1591 InsertPt = I->getNextNode(); 1592 } 1593 } 1594 1595 auto Index = FrameData.getFieldIndex(Def); 1596 Builder.SetInsertPoint(InsertPt); 1597 auto *G = Builder.CreateConstInBoundsGEP2_32( 1598 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr")); 1599 if (NeedToCopyArgPtrValue) { 1600 // For byval arguments, we need to store the pointed value in the frame, 1601 // instead of the pointer itself. 1602 auto *Value = 1603 Builder.CreateLoad(Def->getType()->getPointerElementType(), Def); 1604 Builder.CreateStore(Value, G); 1605 } else { 1606 Builder.CreateStore(Def, G); 1607 } 1608 1609 BasicBlock *CurrentBlock = nullptr; 1610 Value *CurrentReload = nullptr; 1611 for (auto *U : E.second) { 1612 // If we have not seen the use block, create a load instruction to reload 1613 // the spilled value from the coroutine frame. Populates the Value pointer 1614 // reference provided with the frame GEP. 1615 if (CurrentBlock != U->getParent()) { 1616 CurrentBlock = U->getParent(); 1617 Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt()); 1618 1619 auto *GEP = GetFramePointer(E.first); 1620 GEP->setName(E.first->getName() + Twine(".reload.addr")); 1621 if (NeedToCopyArgPtrValue) 1622 CurrentReload = GEP; 1623 else 1624 CurrentReload = Builder.CreateLoad( 1625 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP, 1626 E.first->getName() + Twine(".reload")); 1627 1628 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def); 1629 for (DbgDeclareInst *DDI : DIs) { 1630 bool AllowUnresolved = false; 1631 // This dbg.declare is preserved for all coro-split function 1632 // fragments. It will be unreachable in the main function, and 1633 // processed by coro::salvageDebugInfo() by CoroCloner. 1634 DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved) 1635 .insertDeclare(CurrentReload, DDI->getVariable(), 1636 DDI->getExpression(), DDI->getDebugLoc(), 1637 &*Builder.GetInsertPoint()); 1638 // This dbg.declare is for the main function entry point. It 1639 // will be deleted in all coro-split functions. 1640 coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.ReuseFrameSlot); 1641 } 1642 } 1643 1644 // If we have a single edge PHINode, remove it and replace it with a 1645 // reload from the coroutine frame. (We already took care of multi edge 1646 // PHINodes by rewriting them in the rewritePHIs function). 1647 if (auto *PN = dyn_cast<PHINode>(U)) { 1648 assert(PN->getNumIncomingValues() == 1 && 1649 "unexpected number of incoming " 1650 "values in the PHINode"); 1651 PN->replaceAllUsesWith(CurrentReload); 1652 PN->eraseFromParent(); 1653 continue; 1654 } 1655 1656 // Replace all uses of CurrentValue in the current instruction with 1657 // reload. 1658 U->replaceUsesOfWith(Def, CurrentReload); 1659 } 1660 } 1661 1662 BasicBlock *FramePtrBB = FramePtr->getParent(); 1663 1664 auto SpillBlock = 1665 FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB"); 1666 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill"); 1667 Shape.AllocaSpillBlock = SpillBlock; 1668 1669 // retcon and retcon.once lowering assumes all uses have been sunk. 1670 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 1671 Shape.ABI == coro::ABI::Async) { 1672 // If we found any allocas, replace all of their remaining uses with Geps. 1673 Builder.SetInsertPoint(&SpillBlock->front()); 1674 for (const auto &P : FrameData.Allocas) { 1675 AllocaInst *Alloca = P.Alloca; 1676 auto *G = GetFramePointer(Alloca); 1677 1678 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G)) 1679 // here, as we are changing location of the instruction. 1680 G->takeName(Alloca); 1681 Alloca->replaceAllUsesWith(G); 1682 Alloca->eraseFromParent(); 1683 } 1684 return FramePtr; 1685 } 1686 1687 // If we found any alloca, replace all of their remaining uses with GEP 1688 // instructions. To remain debugbility, we replace the uses of allocas for 1689 // dbg.declares and dbg.values with the reload from the frame. 1690 // Note: We cannot replace the alloca with GEP instructions indiscriminately, 1691 // as some of the uses may not be dominated by CoroBegin. 1692 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front()); 1693 SmallVector<Instruction *, 4> UsersToUpdate; 1694 for (const auto &A : FrameData.Allocas) { 1695 AllocaInst *Alloca = A.Alloca; 1696 UsersToUpdate.clear(); 1697 for (User *U : Alloca->users()) { 1698 auto *I = cast<Instruction>(U); 1699 if (DT.dominates(CB, I)) 1700 UsersToUpdate.push_back(I); 1701 } 1702 if (UsersToUpdate.empty()) 1703 continue; 1704 auto *G = GetFramePointer(Alloca); 1705 G->setName(Alloca->getName() + Twine(".reload.addr")); 1706 1707 SmallVector<DbgVariableIntrinsic *, 4> DIs; 1708 findDbgUsers(DIs, Alloca); 1709 for (auto *DVI : DIs) 1710 DVI->replaceUsesOfWith(Alloca, G); 1711 1712 for (Instruction *I : UsersToUpdate) 1713 I->replaceUsesOfWith(Alloca, G); 1714 } 1715 Builder.SetInsertPoint(FramePtr->getNextNode()); 1716 for (const auto &A : FrameData.Allocas) { 1717 AllocaInst *Alloca = A.Alloca; 1718 if (A.MayWriteBeforeCoroBegin) { 1719 // isEscaped really means potentially modified before CoroBegin. 1720 if (Alloca->isArrayAllocation()) 1721 report_fatal_error( 1722 "Coroutines cannot handle copying of array allocas yet"); 1723 1724 auto *G = GetFramePointer(Alloca); 1725 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca); 1726 Builder.CreateStore(Value, G); 1727 } 1728 // For each alias to Alloca created before CoroBegin but used after 1729 // CoroBegin, we recreate them after CoroBegin by appplying the offset 1730 // to the pointer in the frame. 1731 for (const auto &Alias : A.Aliases) { 1732 auto *FramePtr = GetFramePointer(Alloca); 1733 auto *FramePtrRaw = 1734 Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C)); 1735 auto *AliasPtr = Builder.CreateGEP( 1736 Type::getInt8Ty(C), FramePtrRaw, 1737 ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue())); 1738 auto *AliasPtrTyped = 1739 Builder.CreateBitCast(AliasPtr, Alias.first->getType()); 1740 Alias.first->replaceUsesWithIf( 1741 AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); }); 1742 } 1743 } 1744 return FramePtr; 1745 } 1746 1747 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new 1748 // PHI in InsertedBB. 1749 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, 1750 BasicBlock *InsertedBB, 1751 BasicBlock *PredBB, 1752 PHINode *UntilPHI = nullptr) { 1753 auto *PN = cast<PHINode>(&SuccBB->front()); 1754 do { 1755 int Index = PN->getBasicBlockIndex(InsertedBB); 1756 Value *V = PN->getIncomingValue(Index); 1757 PHINode *InputV = PHINode::Create( 1758 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(), 1759 &InsertedBB->front()); 1760 InputV->addIncoming(V, PredBB); 1761 PN->setIncomingValue(Index, InputV); 1762 PN = dyn_cast<PHINode>(PN->getNextNode()); 1763 } while (PN != UntilPHI); 1764 } 1765 1766 // Rewrites the PHI Nodes in a cleanuppad. 1767 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, 1768 CleanupPadInst *CleanupPad) { 1769 // For every incoming edge to a CleanupPad we will create a new block holding 1770 // all incoming values in single-value PHI nodes. We will then create another 1771 // block to act as a dispather (as all unwind edges for related EH blocks 1772 // must be the same). 1773 // 1774 // cleanuppad: 1775 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1] 1776 // %3 = cleanuppad within none [] 1777 // 1778 // It will create: 1779 // 1780 // cleanuppad.corodispatch 1781 // %2 = phi i8[0, %catchswitch], [1, %catch.1] 1782 // %3 = cleanuppad within none [] 1783 // switch i8 % 2, label %unreachable 1784 // [i8 0, label %cleanuppad.from.catchswitch 1785 // i8 1, label %cleanuppad.from.catch.1] 1786 // cleanuppad.from.catchswitch: 1787 // %4 = phi i32 [%0, %catchswitch] 1788 // br %label cleanuppad 1789 // cleanuppad.from.catch.1: 1790 // %6 = phi i32 [%1, %catch.1] 1791 // br %label cleanuppad 1792 // cleanuppad: 1793 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch], 1794 // [%6, %cleanuppad.from.catch.1] 1795 1796 // Unreachable BB, in case switching on an invalid value in the dispatcher. 1797 auto *UnreachBB = BasicBlock::Create( 1798 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent()); 1799 IRBuilder<> Builder(UnreachBB); 1800 Builder.CreateUnreachable(); 1801 1802 // Create a new cleanuppad which will be the dispatcher. 1803 auto *NewCleanupPadBB = 1804 BasicBlock::Create(CleanupPadBB->getContext(), 1805 CleanupPadBB->getName() + Twine(".corodispatch"), 1806 CleanupPadBB->getParent(), CleanupPadBB); 1807 Builder.SetInsertPoint(NewCleanupPadBB); 1808 auto *SwitchType = Builder.getInt8Ty(); 1809 auto *SetDispatchValuePN = 1810 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB)); 1811 CleanupPad->removeFromParent(); 1812 CleanupPad->insertAfter(SetDispatchValuePN); 1813 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB, 1814 pred_size(CleanupPadBB)); 1815 1816 int SwitchIndex = 0; 1817 SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB)); 1818 for (BasicBlock *Pred : Preds) { 1819 // Create a new cleanuppad and move the PHI values to there. 1820 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(), 1821 CleanupPadBB->getName() + 1822 Twine(".from.") + Pred->getName(), 1823 CleanupPadBB->getParent(), CleanupPadBB); 1824 updatePhiNodes(CleanupPadBB, Pred, CaseBB); 1825 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") + 1826 Pred->getName()); 1827 Builder.SetInsertPoint(CaseBB); 1828 Builder.CreateBr(CleanupPadBB); 1829 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB); 1830 1831 // Update this Pred to the new unwind point. 1832 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB); 1833 1834 // Setup the switch in the dispatcher. 1835 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex); 1836 SetDispatchValuePN->addIncoming(SwitchConstant, Pred); 1837 SwitchOnDispatch->addCase(SwitchConstant, CaseBB); 1838 SwitchIndex++; 1839 } 1840 } 1841 1842 static void rewritePHIs(BasicBlock &BB) { 1843 // For every incoming edge we will create a block holding all 1844 // incoming values in a single PHI nodes. 1845 // 1846 // loop: 1847 // %n.val = phi i32[%n, %entry], [%inc, %loop] 1848 // 1849 // It will create: 1850 // 1851 // loop.from.entry: 1852 // %n.loop.pre = phi i32 [%n, %entry] 1853 // br %label loop 1854 // loop.from.loop: 1855 // %inc.loop.pre = phi i32 [%inc, %loop] 1856 // br %label loop 1857 // 1858 // After this rewrite, further analysis will ignore any phi nodes with more 1859 // than one incoming edge. 1860 1861 // TODO: Simplify PHINodes in the basic block to remove duplicate 1862 // predecessors. 1863 1864 // Special case for CleanupPad: all EH blocks must have the same unwind edge 1865 // so we need to create an additional "dispatcher" block. 1866 if (auto *CleanupPad = 1867 dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) { 1868 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB)); 1869 for (BasicBlock *Pred : Preds) { 1870 if (CatchSwitchInst *CS = 1871 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) { 1872 // CleanupPad with a CatchSwitch predecessor: therefore this is an 1873 // unwind destination that needs to be handle specially. 1874 assert(CS->getUnwindDest() == &BB); 1875 (void)CS; 1876 rewritePHIsForCleanupPad(&BB, CleanupPad); 1877 return; 1878 } 1879 } 1880 } 1881 1882 LandingPadInst *LandingPad = nullptr; 1883 PHINode *ReplPHI = nullptr; 1884 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) { 1885 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks. 1886 // We replace the original landing pad with a PHINode that will collect the 1887 // results from all of them. 1888 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad); 1889 ReplPHI->takeName(LandingPad); 1890 LandingPad->replaceAllUsesWith(ReplPHI); 1891 // We will erase the original landing pad at the end of this function after 1892 // ehAwareSplitEdge cloned it in the transition blocks. 1893 } 1894 1895 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB)); 1896 for (BasicBlock *Pred : Preds) { 1897 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI); 1898 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName()); 1899 1900 // Stop the moving of values at ReplPHI, as this is either null or the PHI 1901 // that replaced the landing pad. 1902 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI); 1903 } 1904 1905 if (LandingPad) { 1906 // Calls to ehAwareSplitEdge function cloned the original lading pad. 1907 // No longer need it. 1908 LandingPad->eraseFromParent(); 1909 } 1910 } 1911 1912 static void rewritePHIs(Function &F) { 1913 SmallVector<BasicBlock *, 8> WorkList; 1914 1915 for (BasicBlock &BB : F) 1916 if (auto *PN = dyn_cast<PHINode>(&BB.front())) 1917 if (PN->getNumIncomingValues() > 1) 1918 WorkList.push_back(&BB); 1919 1920 for (BasicBlock *BB : WorkList) 1921 rewritePHIs(*BB); 1922 } 1923 1924 // Check for instructions that we can recreate on resume as opposed to spill 1925 // the result into a coroutine frame. 1926 static bool materializable(Instruction &V) { 1927 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) || 1928 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V); 1929 } 1930 1931 // Check for structural coroutine intrinsics that should not be spilled into 1932 // the coroutine frame. 1933 static bool isCoroutineStructureIntrinsic(Instruction &I) { 1934 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) || 1935 isa<CoroSuspendInst>(&I); 1936 } 1937 1938 // For every use of the value that is across suspend point, recreate that value 1939 // after a suspend point. 1940 static void rewriteMaterializableInstructions(IRBuilder<> &IRB, 1941 const SpillInfo &Spills) { 1942 for (const auto &E : Spills) { 1943 Value *Def = E.first; 1944 BasicBlock *CurrentBlock = nullptr; 1945 Instruction *CurrentMaterialization = nullptr; 1946 for (Instruction *U : E.second) { 1947 // If we have not seen this block, materialize the value. 1948 if (CurrentBlock != U->getParent()) { 1949 1950 bool IsInCoroSuspendBlock = isa<AnyCoroSuspendInst>(U); 1951 CurrentBlock = IsInCoroSuspendBlock 1952 ? U->getParent()->getSinglePredecessor() 1953 : U->getParent(); 1954 CurrentMaterialization = cast<Instruction>(Def)->clone(); 1955 CurrentMaterialization->setName(Def->getName()); 1956 CurrentMaterialization->insertBefore( 1957 IsInCoroSuspendBlock ? CurrentBlock->getTerminator() 1958 : &*CurrentBlock->getFirstInsertionPt()); 1959 } 1960 if (auto *PN = dyn_cast<PHINode>(U)) { 1961 assert(PN->getNumIncomingValues() == 1 && 1962 "unexpected number of incoming " 1963 "values in the PHINode"); 1964 PN->replaceAllUsesWith(CurrentMaterialization); 1965 PN->eraseFromParent(); 1966 continue; 1967 } 1968 // Replace all uses of Def in the current instruction with the 1969 // CurrentMaterialization for the block. 1970 U->replaceUsesOfWith(Def, CurrentMaterialization); 1971 } 1972 } 1973 } 1974 1975 // Splits the block at a particular instruction unless it is the first 1976 // instruction in the block with a single predecessor. 1977 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) { 1978 auto *BB = I->getParent(); 1979 if (&BB->front() == I) { 1980 if (BB->getSinglePredecessor()) { 1981 BB->setName(Name); 1982 return BB; 1983 } 1984 } 1985 return BB->splitBasicBlock(I, Name); 1986 } 1987 1988 // Split above and below a particular instruction so that it 1989 // will be all alone by itself in a block. 1990 static void splitAround(Instruction *I, const Twine &Name) { 1991 splitBlockIfNotFirst(I, Name); 1992 splitBlockIfNotFirst(I->getNextNode(), "After" + Name); 1993 } 1994 1995 static bool isSuspendBlock(BasicBlock *BB) { 1996 return isa<AnyCoroSuspendInst>(BB->front()); 1997 } 1998 1999 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet; 2000 2001 /// Does control flow starting at the given block ever reach a suspend 2002 /// instruction before reaching a block in VisitedOrFreeBBs? 2003 static bool isSuspendReachableFrom(BasicBlock *From, 2004 VisitedBlocksSet &VisitedOrFreeBBs) { 2005 // Eagerly try to add this block to the visited set. If it's already 2006 // there, stop recursing; this path doesn't reach a suspend before 2007 // either looping or reaching a freeing block. 2008 if (!VisitedOrFreeBBs.insert(From).second) 2009 return false; 2010 2011 // We assume that we'll already have split suspends into their own blocks. 2012 if (isSuspendBlock(From)) 2013 return true; 2014 2015 // Recurse on the successors. 2016 for (auto Succ : successors(From)) { 2017 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs)) 2018 return true; 2019 } 2020 2021 return false; 2022 } 2023 2024 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a 2025 /// suspend point? 2026 static bool isLocalAlloca(CoroAllocaAllocInst *AI) { 2027 // Seed the visited set with all the basic blocks containing a free 2028 // so that we won't pass them up. 2029 VisitedBlocksSet VisitedOrFreeBBs; 2030 for (auto User : AI->users()) { 2031 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User)) 2032 VisitedOrFreeBBs.insert(FI->getParent()); 2033 } 2034 2035 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs); 2036 } 2037 2038 /// After we split the coroutine, will the given basic block be along 2039 /// an obvious exit path for the resumption function? 2040 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, 2041 unsigned depth = 3) { 2042 // If we've bottomed out our depth count, stop searching and assume 2043 // that the path might loop back. 2044 if (depth == 0) return false; 2045 2046 // If this is a suspend block, we're about to exit the resumption function. 2047 if (isSuspendBlock(BB)) return true; 2048 2049 // Recurse into the successors. 2050 for (auto Succ : successors(BB)) { 2051 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1)) 2052 return false; 2053 } 2054 2055 // If none of the successors leads back in a loop, we're on an exit/abort. 2056 return true; 2057 } 2058 2059 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) { 2060 // Look for a free that isn't sufficiently obviously followed by 2061 // either a suspend or a termination, i.e. something that will leave 2062 // the coro resumption frame. 2063 for (auto U : AI->users()) { 2064 auto FI = dyn_cast<CoroAllocaFreeInst>(U); 2065 if (!FI) continue; 2066 2067 if (!willLeaveFunctionImmediatelyAfter(FI->getParent())) 2068 return true; 2069 } 2070 2071 // If we never found one, we don't need a stack save. 2072 return false; 2073 } 2074 2075 /// Turn each of the given local allocas into a normal (dynamic) alloca 2076 /// instruction. 2077 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas, 2078 SmallVectorImpl<Instruction*> &DeadInsts) { 2079 for (auto AI : LocalAllocas) { 2080 auto M = AI->getModule(); 2081 IRBuilder<> Builder(AI); 2082 2083 // Save the stack depth. Try to avoid doing this if the stackrestore 2084 // is going to immediately precede a return or something. 2085 Value *StackSave = nullptr; 2086 if (localAllocaNeedsStackSave(AI)) 2087 StackSave = Builder.CreateCall( 2088 Intrinsic::getDeclaration(M, Intrinsic::stacksave)); 2089 2090 // Allocate memory. 2091 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize()); 2092 Alloca->setAlignment(Align(AI->getAlignment())); 2093 2094 for (auto U : AI->users()) { 2095 // Replace gets with the allocation. 2096 if (isa<CoroAllocaGetInst>(U)) { 2097 U->replaceAllUsesWith(Alloca); 2098 2099 // Replace frees with stackrestores. This is safe because 2100 // alloca.alloc is required to obey a stack discipline, although we 2101 // don't enforce that structurally. 2102 } else { 2103 auto FI = cast<CoroAllocaFreeInst>(U); 2104 if (StackSave) { 2105 Builder.SetInsertPoint(FI); 2106 Builder.CreateCall( 2107 Intrinsic::getDeclaration(M, Intrinsic::stackrestore), 2108 StackSave); 2109 } 2110 } 2111 DeadInsts.push_back(cast<Instruction>(U)); 2112 } 2113 2114 DeadInsts.push_back(AI); 2115 } 2116 } 2117 2118 /// Turn the given coro.alloca.alloc call into a dynamic allocation. 2119 /// This happens during the all-instructions iteration, so it must not 2120 /// delete the call. 2121 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI, 2122 coro::Shape &Shape, 2123 SmallVectorImpl<Instruction*> &DeadInsts) { 2124 IRBuilder<> Builder(AI); 2125 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr); 2126 2127 for (User *U : AI->users()) { 2128 if (isa<CoroAllocaGetInst>(U)) { 2129 U->replaceAllUsesWith(Alloc); 2130 } else { 2131 auto FI = cast<CoroAllocaFreeInst>(U); 2132 Builder.SetInsertPoint(FI); 2133 Shape.emitDealloc(Builder, Alloc, nullptr); 2134 } 2135 DeadInsts.push_back(cast<Instruction>(U)); 2136 } 2137 2138 // Push this on last so that it gets deleted after all the others. 2139 DeadInsts.push_back(AI); 2140 2141 // Return the new allocation value so that we can check for needed spills. 2142 return cast<Instruction>(Alloc); 2143 } 2144 2145 /// Get the current swifterror value. 2146 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, 2147 coro::Shape &Shape) { 2148 // Make a fake function pointer as a sort of intrinsic. 2149 auto FnTy = FunctionType::get(ValueTy, {}, false); 2150 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 2151 2152 auto Call = Builder.CreateCall(FnTy, Fn, {}); 2153 Shape.SwiftErrorOps.push_back(Call); 2154 2155 return Call; 2156 } 2157 2158 /// Set the given value as the current swifterror value. 2159 /// 2160 /// Returns a slot that can be used as a swifterror slot. 2161 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, 2162 coro::Shape &Shape) { 2163 // Make a fake function pointer as a sort of intrinsic. 2164 auto FnTy = FunctionType::get(V->getType()->getPointerTo(), 2165 {V->getType()}, false); 2166 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 2167 2168 auto Call = Builder.CreateCall(FnTy, Fn, { V }); 2169 Shape.SwiftErrorOps.push_back(Call); 2170 2171 return Call; 2172 } 2173 2174 /// Set the swifterror value from the given alloca before a call, 2175 /// then put in back in the alloca afterwards. 2176 /// 2177 /// Returns an address that will stand in for the swifterror slot 2178 /// until splitting. 2179 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call, 2180 AllocaInst *Alloca, 2181 coro::Shape &Shape) { 2182 auto ValueTy = Alloca->getAllocatedType(); 2183 IRBuilder<> Builder(Call); 2184 2185 // Load the current value from the alloca and set it as the 2186 // swifterror value. 2187 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca); 2188 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape); 2189 2190 // Move to after the call. Since swifterror only has a guaranteed 2191 // value on normal exits, we can ignore implicit and explicit unwind 2192 // edges. 2193 if (isa<CallInst>(Call)) { 2194 Builder.SetInsertPoint(Call->getNextNode()); 2195 } else { 2196 auto Invoke = cast<InvokeInst>(Call); 2197 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg()); 2198 } 2199 2200 // Get the current swifterror value and store it to the alloca. 2201 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape); 2202 Builder.CreateStore(ValueAfterCall, Alloca); 2203 2204 return Addr; 2205 } 2206 2207 /// Eliminate a formerly-swifterror alloca by inserting the get/set 2208 /// intrinsics and attempting to MemToReg the alloca away. 2209 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, 2210 coro::Shape &Shape) { 2211 for (auto UI = Alloca->use_begin(), UE = Alloca->use_end(); UI != UE; ) { 2212 // We're likely changing the use list, so use a mutation-safe 2213 // iteration pattern. 2214 auto &Use = *UI; 2215 ++UI; 2216 2217 // swifterror values can only be used in very specific ways. 2218 // We take advantage of that here. 2219 auto User = Use.getUser(); 2220 if (isa<LoadInst>(User) || isa<StoreInst>(User)) 2221 continue; 2222 2223 assert(isa<CallInst>(User) || isa<InvokeInst>(User)); 2224 auto Call = cast<Instruction>(User); 2225 2226 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape); 2227 2228 // Use the returned slot address as the call argument. 2229 Use.set(Addr); 2230 } 2231 2232 // All the uses should be loads and stores now. 2233 assert(isAllocaPromotable(Alloca)); 2234 } 2235 2236 /// "Eliminate" a swifterror argument by reducing it to the alloca case 2237 /// and then loading and storing in the prologue and epilog. 2238 /// 2239 /// The argument keeps the swifterror flag. 2240 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, 2241 coro::Shape &Shape, 2242 SmallVectorImpl<AllocaInst*> &AllocasToPromote) { 2243 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg()); 2244 2245 auto ArgTy = cast<PointerType>(Arg.getType()); 2246 auto ValueTy = ArgTy->getElementType(); 2247 2248 // Reduce to the alloca case: 2249 2250 // Create an alloca and replace all uses of the arg with it. 2251 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace()); 2252 Arg.replaceAllUsesWith(Alloca); 2253 2254 // Set an initial value in the alloca. swifterror is always null on entry. 2255 auto InitialValue = Constant::getNullValue(ValueTy); 2256 Builder.CreateStore(InitialValue, Alloca); 2257 2258 // Find all the suspends in the function and save and restore around them. 2259 for (auto Suspend : Shape.CoroSuspends) { 2260 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape); 2261 } 2262 2263 // Find all the coro.ends in the function and restore the error value. 2264 for (auto End : Shape.CoroEnds) { 2265 Builder.SetInsertPoint(End); 2266 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca); 2267 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape); 2268 } 2269 2270 // Now we can use the alloca logic. 2271 AllocasToPromote.push_back(Alloca); 2272 eliminateSwiftErrorAlloca(F, Alloca, Shape); 2273 } 2274 2275 /// Eliminate all problematic uses of swifterror arguments and allocas 2276 /// from the function. We'll fix them up later when splitting the function. 2277 static void eliminateSwiftError(Function &F, coro::Shape &Shape) { 2278 SmallVector<AllocaInst*, 4> AllocasToPromote; 2279 2280 // Look for a swifterror argument. 2281 for (auto &Arg : F.args()) { 2282 if (!Arg.hasSwiftErrorAttr()) continue; 2283 2284 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote); 2285 break; 2286 } 2287 2288 // Look for swifterror allocas. 2289 for (auto &Inst : F.getEntryBlock()) { 2290 auto Alloca = dyn_cast<AllocaInst>(&Inst); 2291 if (!Alloca || !Alloca->isSwiftError()) continue; 2292 2293 // Clear the swifterror flag. 2294 Alloca->setSwiftError(false); 2295 2296 AllocasToPromote.push_back(Alloca); 2297 eliminateSwiftErrorAlloca(F, Alloca, Shape); 2298 } 2299 2300 // If we have any allocas to promote, compute a dominator tree and 2301 // promote them en masse. 2302 if (!AllocasToPromote.empty()) { 2303 DominatorTree DT(F); 2304 PromoteMemToReg(AllocasToPromote, DT); 2305 } 2306 } 2307 2308 /// retcon and retcon.once conventions assume that all spill uses can be sunk 2309 /// after the coro.begin intrinsic. 2310 static void sinkSpillUsesAfterCoroBegin(Function &F, 2311 const FrameDataInfo &FrameData, 2312 CoroBeginInst *CoroBegin) { 2313 DominatorTree Dom(F); 2314 2315 SmallSetVector<Instruction *, 32> ToMove; 2316 SmallVector<Instruction *, 32> Worklist; 2317 2318 // Collect all users that precede coro.begin. 2319 for (auto *Def : FrameData.getAllDefs()) { 2320 for (User *U : Def->users()) { 2321 auto Inst = cast<Instruction>(U); 2322 if (Inst->getParent() != CoroBegin->getParent() || 2323 Dom.dominates(CoroBegin, Inst)) 2324 continue; 2325 if (ToMove.insert(Inst)) 2326 Worklist.push_back(Inst); 2327 } 2328 } 2329 // Recursively collect users before coro.begin. 2330 while (!Worklist.empty()) { 2331 auto *Def = Worklist.pop_back_val(); 2332 for (User *U : Def->users()) { 2333 auto Inst = cast<Instruction>(U); 2334 if (Dom.dominates(CoroBegin, Inst)) 2335 continue; 2336 if (ToMove.insert(Inst)) 2337 Worklist.push_back(Inst); 2338 } 2339 } 2340 2341 // Sort by dominance. 2342 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end()); 2343 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool { 2344 // If a dominates b it should preceed (<) b. 2345 return Dom.dominates(A, B); 2346 }); 2347 2348 Instruction *InsertPt = CoroBegin->getNextNode(); 2349 for (Instruction *Inst : InsertionList) 2350 Inst->moveBefore(InsertPt); 2351 } 2352 2353 /// For each local variable that all of its user are only used inside one of 2354 /// suspended region, we sink their lifetime.start markers to the place where 2355 /// after the suspend block. Doing so minimizes the lifetime of each variable, 2356 /// hence minimizing the amount of data we end up putting on the frame. 2357 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, 2358 SuspendCrossingInfo &Checker) { 2359 DominatorTree DT(F); 2360 2361 // Collect all possible basic blocks which may dominate all uses of allocas. 2362 SmallPtrSet<BasicBlock *, 4> DomSet; 2363 DomSet.insert(&F.getEntryBlock()); 2364 for (auto *CSI : Shape.CoroSuspends) { 2365 BasicBlock *SuspendBlock = CSI->getParent(); 2366 assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() && 2367 "should have split coro.suspend into its own block"); 2368 DomSet.insert(SuspendBlock->getSingleSuccessor()); 2369 } 2370 2371 for (Instruction &I : instructions(F)) { 2372 AllocaInst* AI = dyn_cast<AllocaInst>(&I); 2373 if (!AI) 2374 continue; 2375 2376 for (BasicBlock *DomBB : DomSet) { 2377 bool Valid = true; 2378 SmallVector<Instruction *, 1> Lifetimes; 2379 2380 auto isLifetimeStart = [](Instruction* I) { 2381 if (auto* II = dyn_cast<IntrinsicInst>(I)) 2382 return II->getIntrinsicID() == Intrinsic::lifetime_start; 2383 return false; 2384 }; 2385 2386 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) { 2387 if (isLifetimeStart(U)) { 2388 Lifetimes.push_back(U); 2389 return true; 2390 } 2391 if (!U->hasOneUse() || U->stripPointerCasts() != AI) 2392 return false; 2393 if (isLifetimeStart(U->user_back())) { 2394 Lifetimes.push_back(U->user_back()); 2395 return true; 2396 } 2397 return false; 2398 }; 2399 2400 for (User *U : AI->users()) { 2401 Instruction *UI = cast<Instruction>(U); 2402 // For all users except lifetime.start markers, if they are all 2403 // dominated by one of the basic blocks and do not cross 2404 // suspend points as well, then there is no need to spill the 2405 // instruction. 2406 if (!DT.dominates(DomBB, UI->getParent()) || 2407 Checker.isDefinitionAcrossSuspend(DomBB, UI)) { 2408 // Skip lifetime.start, GEP and bitcast used by lifetime.start 2409 // markers. 2410 if (collectLifetimeStart(UI, AI)) 2411 continue; 2412 Valid = false; 2413 break; 2414 } 2415 } 2416 // Sink lifetime.start markers to dominate block when they are 2417 // only used outside the region. 2418 if (Valid && Lifetimes.size() != 0) { 2419 // May be AI itself, when the type of AI is i8* 2420 auto *NewBitCast = [&](AllocaInst *AI) -> Value* { 2421 if (isa<AllocaInst>(Lifetimes[0]->getOperand(1))) 2422 return AI; 2423 auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext()); 2424 return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "", 2425 DomBB->getTerminator()); 2426 }(AI); 2427 2428 auto *NewLifetime = Lifetimes[0]->clone(); 2429 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast); 2430 NewLifetime->insertBefore(DomBB->getTerminator()); 2431 2432 // All the outsided lifetime.start markers are no longer necessary. 2433 for (Instruction *S : Lifetimes) 2434 S->eraseFromParent(); 2435 2436 break; 2437 } 2438 } 2439 } 2440 } 2441 2442 static void collectFrameAllocas(Function &F, coro::Shape &Shape, 2443 const SuspendCrossingInfo &Checker, 2444 SmallVectorImpl<AllocaInfo> &Allocas) { 2445 for (Instruction &I : instructions(F)) { 2446 auto *AI = dyn_cast<AllocaInst>(&I); 2447 if (!AI) 2448 continue; 2449 // The PromiseAlloca will be specially handled since it needs to be in a 2450 // fixed position in the frame. 2451 if (AI == Shape.SwitchLowering.PromiseAlloca) { 2452 continue; 2453 } 2454 DominatorTree DT(F); 2455 AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT, 2456 *Shape.CoroBegin, Checker}; 2457 Visitor.visitPtr(*AI); 2458 if (!Visitor.getShouldLiveOnFrame()) 2459 continue; 2460 Allocas.emplace_back(AI, Visitor.getAliasesCopy(), 2461 Visitor.getMayWriteBeforeCoroBegin()); 2462 } 2463 } 2464 2465 void coro::salvageDebugInfo( 2466 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache, 2467 DbgVariableIntrinsic *DVI, bool ReuseFrameSlot) { 2468 Function *F = DVI->getFunction(); 2469 IRBuilder<> Builder(F->getContext()); 2470 auto InsertPt = F->getEntryBlock().getFirstInsertionPt(); 2471 while (isa<IntrinsicInst>(InsertPt)) 2472 ++InsertPt; 2473 Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt); 2474 DIExpression *Expr = DVI->getExpression(); 2475 // Follow the pointer arithmetic all the way to the incoming 2476 // function argument and convert into a DIExpression. 2477 bool OutermostLoad = true; 2478 Value *Storage = DVI->getVariableLocationOp(0); 2479 Value *OriginalStorage = Storage; 2480 while (Storage) { 2481 if (auto *LdInst = dyn_cast<LoadInst>(Storage)) { 2482 Storage = LdInst->getOperand(0); 2483 // FIXME: This is a heuristic that works around the fact that 2484 // LLVM IR debug intrinsics cannot yet distinguish between 2485 // memory and value locations: Because a dbg.declare(alloca) is 2486 // implicitly a memory location no DW_OP_deref operation for the 2487 // last direct load from an alloca is necessary. This condition 2488 // effectively drops the *last* DW_OP_deref in the expression. 2489 if (!OutermostLoad) 2490 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore); 2491 OutermostLoad = false; 2492 } else if (auto *StInst = dyn_cast<StoreInst>(Storage)) { 2493 Storage = StInst->getOperand(0); 2494 } else if (auto *GEPInst = dyn_cast<GetElementPtrInst>(Storage)) { 2495 SmallVector<Value *> AdditionalValues; 2496 DIExpression *SalvagedExpr = llvm::salvageDebugInfoImpl( 2497 *GEPInst, Expr, 2498 /*WithStackValue=*/false, 0, AdditionalValues); 2499 // Debug declares cannot currently handle additional location 2500 // operands. 2501 if (!SalvagedExpr || !AdditionalValues.empty()) 2502 break; 2503 Expr = SalvagedExpr; 2504 Storage = GEPInst->getOperand(0); 2505 } else if (auto *BCInst = dyn_cast<llvm::BitCastInst>(Storage)) 2506 Storage = BCInst->getOperand(0); 2507 else 2508 break; 2509 } 2510 if (!Storage) 2511 return; 2512 2513 // Store a pointer to the coroutine frame object in an alloca so it 2514 // is available throughout the function when producing unoptimized 2515 // code. Extending the lifetime this way is correct because the 2516 // variable has been declared by a dbg.declare intrinsic. 2517 // 2518 // Avoid to create the alloca would be eliminated by optimization 2519 // passes and the corresponding dbg.declares would be invalid. 2520 if (!ReuseFrameSlot && !EnableReuseStorageInFrame) 2521 if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) { 2522 auto &Cached = DbgPtrAllocaCache[Storage]; 2523 if (!Cached) { 2524 Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr, 2525 Arg->getName() + ".debug"); 2526 Builder.CreateStore(Storage, Cached); 2527 } 2528 Storage = Cached; 2529 // FIXME: LLVM lacks nuanced semantics to differentiate between 2530 // memory and direct locations at the IR level. The backend will 2531 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory 2532 // location. Thus, if there are deref and offset operations in the 2533 // expression, we need to add a DW_OP_deref at the *start* of the 2534 // expression to first load the contents of the alloca before 2535 // adjusting it with the expression. 2536 if (Expr && Expr->isComplex()) 2537 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore); 2538 } 2539 2540 DVI->replaceVariableLocationOp(OriginalStorage, Storage); 2541 DVI->setExpression(Expr); 2542 /// It makes no sense to move the dbg.value intrinsic. 2543 if (!isa<DbgValueInst>(DVI)) { 2544 if (auto *InsertPt = dyn_cast<Instruction>(Storage)) 2545 DVI->moveAfter(InsertPt); 2546 else if (isa<Argument>(Storage)) 2547 DVI->moveAfter(F->getEntryBlock().getFirstNonPHI()); 2548 } 2549 } 2550 2551 void coro::buildCoroutineFrame(Function &F, Shape &Shape) { 2552 // Don't eliminate swifterror in async functions that won't be split. 2553 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty()) 2554 eliminateSwiftError(F, Shape); 2555 2556 if (Shape.ABI == coro::ABI::Switch && 2557 Shape.SwitchLowering.PromiseAlloca) { 2558 Shape.getSwitchCoroId()->clearPromise(); 2559 } 2560 2561 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end 2562 // intrinsics are in their own blocks to simplify the logic of building up 2563 // SuspendCrossing data. 2564 for (auto *CSI : Shape.CoroSuspends) { 2565 if (auto *Save = CSI->getCoroSave()) 2566 splitAround(Save, "CoroSave"); 2567 splitAround(CSI, "CoroSuspend"); 2568 } 2569 2570 // Put CoroEnds into their own blocks. 2571 for (AnyCoroEndInst *CE : Shape.CoroEnds) { 2572 splitAround(CE, "CoroEnd"); 2573 2574 // Emit the musttail call function in a new block before the CoroEnd. 2575 // We do this here so that the right suspend crossing info is computed for 2576 // the uses of the musttail call function call. (Arguments to the coro.end 2577 // instructions would be ignored) 2578 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) { 2579 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction(); 2580 if (!MustTailCallFn) 2581 continue; 2582 IRBuilder<> Builder(AsyncEnd); 2583 SmallVector<Value *, 8> Args(AsyncEnd->args()); 2584 auto Arguments = ArrayRef<Value *>(Args).drop_front(3); 2585 auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn, 2586 Arguments, Builder); 2587 splitAround(Call, "MustTailCall.Before.CoroEnd"); 2588 } 2589 } 2590 2591 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will 2592 // never has its definition separated from the PHI by the suspend point. 2593 rewritePHIs(F); 2594 2595 // Build suspend crossing info. 2596 SuspendCrossingInfo Checker(F, Shape); 2597 2598 IRBuilder<> Builder(F.getContext()); 2599 FrameDataInfo FrameData; 2600 SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas; 2601 SmallVector<Instruction*, 4> DeadInstructions; 2602 2603 { 2604 SpillInfo Spills; 2605 for (int Repeat = 0; Repeat < 4; ++Repeat) { 2606 // See if there are materializable instructions across suspend points. 2607 for (Instruction &I : instructions(F)) 2608 if (materializable(I)) { 2609 for (User *U : I.users()) 2610 if (Checker.isDefinitionAcrossSuspend(I, U)) 2611 Spills[&I].push_back(cast<Instruction>(U)); 2612 2613 // Manually add dbg.value metadata uses of I. 2614 SmallVector<DbgValueInst *, 16> DVIs; 2615 findDbgValues(DVIs, &I); 2616 for (auto *DVI : DVIs) 2617 if (Checker.isDefinitionAcrossSuspend(I, DVI)) 2618 Spills[&I].push_back(DVI); 2619 } 2620 2621 if (Spills.empty()) 2622 break; 2623 2624 // Rewrite materializable instructions to be materialized at the use 2625 // point. 2626 LLVM_DEBUG(dumpSpills("Materializations", Spills)); 2627 rewriteMaterializableInstructions(Builder, Spills); 2628 Spills.clear(); 2629 } 2630 } 2631 2632 sinkLifetimeStartMarkers(F, Shape, Checker); 2633 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty()) 2634 collectFrameAllocas(F, Shape, Checker, FrameData.Allocas); 2635 LLVM_DEBUG(dumpAllocas(FrameData.Allocas)); 2636 2637 // Collect the spills for arguments and other not-materializable values. 2638 for (Argument &A : F.args()) 2639 for (User *U : A.users()) 2640 if (Checker.isDefinitionAcrossSuspend(A, U)) 2641 FrameData.Spills[&A].push_back(cast<Instruction>(U)); 2642 2643 for (Instruction &I : instructions(F)) { 2644 // Values returned from coroutine structure intrinsics should not be part 2645 // of the Coroutine Frame. 2646 if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin) 2647 continue; 2648 2649 // The Coroutine Promise always included into coroutine frame, no need to 2650 // check for suspend crossing. 2651 if (Shape.ABI == coro::ABI::Switch && 2652 Shape.SwitchLowering.PromiseAlloca == &I) 2653 continue; 2654 2655 // Handle alloca.alloc specially here. 2656 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) { 2657 // Check whether the alloca's lifetime is bounded by suspend points. 2658 if (isLocalAlloca(AI)) { 2659 LocalAllocas.push_back(AI); 2660 continue; 2661 } 2662 2663 // If not, do a quick rewrite of the alloca and then add spills of 2664 // the rewritten value. The rewrite doesn't invalidate anything in 2665 // Spills because the other alloca intrinsics have no other operands 2666 // besides AI, and it doesn't invalidate the iteration because we delay 2667 // erasing AI. 2668 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions); 2669 2670 for (User *U : Alloc->users()) { 2671 if (Checker.isDefinitionAcrossSuspend(*Alloc, U)) 2672 FrameData.Spills[Alloc].push_back(cast<Instruction>(U)); 2673 } 2674 continue; 2675 } 2676 2677 // Ignore alloca.get; we process this as part of coro.alloca.alloc. 2678 if (isa<CoroAllocaGetInst>(I)) 2679 continue; 2680 2681 if (isa<AllocaInst>(I)) 2682 continue; 2683 2684 for (User *U : I.users()) 2685 if (Checker.isDefinitionAcrossSuspend(I, U)) { 2686 // We cannot spill a token. 2687 if (I.getType()->isTokenTy()) 2688 report_fatal_error( 2689 "token definition is separated from the use by a suspend point"); 2690 FrameData.Spills[&I].push_back(cast<Instruction>(U)); 2691 } 2692 } 2693 2694 // We don't want the layout of coroutine frame to be affected 2695 // by debug information. So we only choose to salvage DbgValueInst for 2696 // whose value is already in the frame. 2697 // We would handle the dbg.values for allocas specially 2698 for (auto &Iter : FrameData.Spills) { 2699 auto *V = Iter.first; 2700 SmallVector<DbgValueInst *, 16> DVIs; 2701 findDbgValues(DVIs, V); 2702 llvm::for_each(DVIs, [&](DbgValueInst *DVI) { 2703 if (Checker.isDefinitionAcrossSuspend(*V, DVI)) 2704 FrameData.Spills[V].push_back(DVI); 2705 }); 2706 } 2707 2708 LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills)); 2709 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 2710 Shape.ABI == coro::ABI::Async) 2711 sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin); 2712 Shape.FrameTy = buildFrameType(F, Shape, FrameData); 2713 createFramePtr(Shape); 2714 // For now, this works for C++ programs only. 2715 buildFrameDebugInfo(F, Shape, FrameData); 2716 insertSpills(FrameData, Shape); 2717 lowerLocalAllocas(LocalAllocas, DeadInstructions); 2718 2719 for (auto I : DeadInstructions) 2720 I->eraseFromParent(); 2721 } 2722