1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This file contains classes used to discover if for a particular value 9 // there from sue to definition that crosses a suspend block. 10 // 11 // Using the information discovered we form a Coroutine Frame structure to 12 // contain those values. All uses of those values are replaced with appropriate 13 // GEP + load from the coroutine frame. At the point of the definition we spill 14 // the value into the coroutine frame. 15 //===----------------------------------------------------------------------===// 16 17 #include "CoroInternal.h" 18 #include "llvm/ADT/BitVector.h" 19 #include "llvm/ADT/SmallString.h" 20 #include "llvm/Analysis/PtrUseVisitor.h" 21 #include "llvm/Analysis/StackLifetime.h" 22 #include "llvm/Config/llvm-config.h" 23 #include "llvm/IR/CFG.h" 24 #include "llvm/IR/DIBuilder.h" 25 #include "llvm/IR/Dominators.h" 26 #include "llvm/IR/IRBuilder.h" 27 #include "llvm/IR/InstIterator.h" 28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Support/Debug.h" 30 #include "llvm/Support/MathExtras.h" 31 #include "llvm/Support/OptimizedStructLayout.h" 32 #include "llvm/Support/circular_raw_ostream.h" 33 #include "llvm/Support/raw_ostream.h" 34 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 35 #include "llvm/Transforms/Utils/Local.h" 36 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 37 #include <algorithm> 38 39 using namespace llvm; 40 41 // The "coro-suspend-crossing" flag is very noisy. There is another debug type, 42 // "coro-frame", which results in leaner debug spew. 43 #define DEBUG_TYPE "coro-suspend-crossing" 44 45 static cl::opt<bool> EnableReuseStorageInFrame( 46 "reuse-storage-in-coroutine-frame", cl::Hidden, 47 cl::desc( 48 "Enable the optimization which would reuse the storage in the coroutine \ 49 frame for allocas whose liferanges are not overlapped, for testing purposes"), 50 llvm::cl::init(false)); 51 52 enum { SmallVectorThreshold = 32 }; 53 54 // Provides two way mapping between the blocks and numbers. 55 namespace { 56 class BlockToIndexMapping { 57 SmallVector<BasicBlock *, SmallVectorThreshold> V; 58 59 public: 60 size_t size() const { return V.size(); } 61 62 BlockToIndexMapping(Function &F) { 63 for (BasicBlock &BB : F) 64 V.push_back(&BB); 65 llvm::sort(V); 66 } 67 68 size_t blockToIndex(BasicBlock *BB) const { 69 auto *I = llvm::lower_bound(V, BB); 70 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block"); 71 return I - V.begin(); 72 } 73 74 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; } 75 }; 76 } // end anonymous namespace 77 78 // The SuspendCrossingInfo maintains data that allows to answer a question 79 // whether given two BasicBlocks A and B there is a path from A to B that 80 // passes through a suspend point. 81 // 82 // For every basic block 'i' it maintains a BlockData that consists of: 83 // Consumes: a bit vector which contains a set of indices of blocks that can 84 // reach block 'i' 85 // Kills: a bit vector which contains a set of indices of blocks that can 86 // reach block 'i', but one of the path will cross a suspend point 87 // Suspend: a boolean indicating whether block 'i' contains a suspend point. 88 // End: a boolean indicating whether block 'i' contains a coro.end intrinsic. 89 // 90 namespace { 91 struct SuspendCrossingInfo { 92 BlockToIndexMapping Mapping; 93 94 struct BlockData { 95 BitVector Consumes; 96 BitVector Kills; 97 bool Suspend = false; 98 bool End = false; 99 }; 100 SmallVector<BlockData, SmallVectorThreshold> Block; 101 102 iterator_range<succ_iterator> successors(BlockData const &BD) const { 103 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]); 104 return llvm::successors(BB); 105 } 106 107 BlockData &getBlockData(BasicBlock *BB) { 108 return Block[Mapping.blockToIndex(BB)]; 109 } 110 111 void dump() const; 112 void dump(StringRef Label, BitVector const &BV) const; 113 114 SuspendCrossingInfo(Function &F, coro::Shape &Shape); 115 116 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const { 117 size_t const DefIndex = Mapping.blockToIndex(DefBB); 118 size_t const UseIndex = Mapping.blockToIndex(UseBB); 119 120 bool const Result = Block[UseIndex].Kills[DefIndex]; 121 LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName() 122 << " answer is " << Result << "\n"); 123 return Result; 124 } 125 126 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const { 127 auto *I = cast<Instruction>(U); 128 129 // We rewrote PHINodes, so that only the ones with exactly one incoming 130 // value need to be analyzed. 131 if (auto *PN = dyn_cast<PHINode>(I)) 132 if (PN->getNumIncomingValues() > 1) 133 return false; 134 135 BasicBlock *UseBB = I->getParent(); 136 137 // As a special case, treat uses by an llvm.coro.suspend.retcon or an 138 // llvm.coro.suspend.async as if they were uses in the suspend's single 139 // predecessor: the uses conceptually occur before the suspend. 140 if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) { 141 UseBB = UseBB->getSinglePredecessor(); 142 assert(UseBB && "should have split coro.suspend into its own block"); 143 } 144 145 return hasPathCrossingSuspendPoint(DefBB, UseBB); 146 } 147 148 bool isDefinitionAcrossSuspend(Argument &A, User *U) const { 149 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U); 150 } 151 152 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const { 153 auto *DefBB = I.getParent(); 154 155 // As a special case, treat values produced by an llvm.coro.suspend.* 156 // as if they were defined in the single successor: the uses 157 // conceptually occur after the suspend. 158 if (isa<AnyCoroSuspendInst>(I)) { 159 DefBB = DefBB->getSingleSuccessor(); 160 assert(DefBB && "should have split coro.suspend into its own block"); 161 } 162 163 return isDefinitionAcrossSuspend(DefBB, U); 164 } 165 166 bool isDefinitionAcrossSuspend(Value &V, User *U) const { 167 if (auto *Arg = dyn_cast<Argument>(&V)) 168 return isDefinitionAcrossSuspend(*Arg, U); 169 if (auto *Inst = dyn_cast<Instruction>(&V)) 170 return isDefinitionAcrossSuspend(*Inst, U); 171 172 llvm_unreachable( 173 "Coroutine could only collect Argument and Instruction now."); 174 } 175 }; 176 } // end anonymous namespace 177 178 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 179 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label, 180 BitVector const &BV) const { 181 dbgs() << Label << ":"; 182 for (size_t I = 0, N = BV.size(); I < N; ++I) 183 if (BV[I]) 184 dbgs() << " " << Mapping.indexToBlock(I)->getName(); 185 dbgs() << "\n"; 186 } 187 188 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const { 189 for (size_t I = 0, N = Block.size(); I < N; ++I) { 190 BasicBlock *const B = Mapping.indexToBlock(I); 191 dbgs() << B->getName() << ":\n"; 192 dump(" Consumes", Block[I].Consumes); 193 dump(" Kills", Block[I].Kills); 194 } 195 dbgs() << "\n"; 196 } 197 #endif 198 199 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape) 200 : Mapping(F) { 201 const size_t N = Mapping.size(); 202 Block.resize(N); 203 204 // Initialize every block so that it consumes itself 205 for (size_t I = 0; I < N; ++I) { 206 auto &B = Block[I]; 207 B.Consumes.resize(N); 208 B.Kills.resize(N); 209 B.Consumes.set(I); 210 } 211 212 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as 213 // the code beyond coro.end is reachable during initial invocation of the 214 // coroutine. 215 for (auto *CE : Shape.CoroEnds) 216 getBlockData(CE->getParent()).End = true; 217 218 // Mark all suspend blocks and indicate that they kill everything they 219 // consume. Note, that crossing coro.save also requires a spill, as any code 220 // between coro.save and coro.suspend may resume the coroutine and all of the 221 // state needs to be saved by that time. 222 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) { 223 BasicBlock *SuspendBlock = BarrierInst->getParent(); 224 auto &B = getBlockData(SuspendBlock); 225 B.Suspend = true; 226 B.Kills |= B.Consumes; 227 }; 228 for (auto *CSI : Shape.CoroSuspends) { 229 markSuspendBlock(CSI); 230 if (auto *Save = CSI->getCoroSave()) 231 markSuspendBlock(Save); 232 } 233 234 // Iterate propagating consumes and kills until they stop changing. 235 int Iteration = 0; 236 (void)Iteration; 237 238 bool Changed; 239 do { 240 LLVM_DEBUG(dbgs() << "iteration " << ++Iteration); 241 LLVM_DEBUG(dbgs() << "==============\n"); 242 243 Changed = false; 244 for (size_t I = 0; I < N; ++I) { 245 auto &B = Block[I]; 246 for (BasicBlock *SI : successors(B)) { 247 248 auto SuccNo = Mapping.blockToIndex(SI); 249 250 // Saved Consumes and Kills bitsets so that it is easy to see 251 // if anything changed after propagation. 252 auto &S = Block[SuccNo]; 253 auto SavedConsumes = S.Consumes; 254 auto SavedKills = S.Kills; 255 256 // Propagate Kills and Consumes from block B into its successor S. 257 S.Consumes |= B.Consumes; 258 S.Kills |= B.Kills; 259 260 // If block B is a suspend block, it should propagate kills into the 261 // its successor for every block B consumes. 262 if (B.Suspend) { 263 S.Kills |= B.Consumes; 264 } 265 if (S.Suspend) { 266 // If block S is a suspend block, it should kill all of the blocks it 267 // consumes. 268 S.Kills |= S.Consumes; 269 } else if (S.End) { 270 // If block S is an end block, it should not propagate kills as the 271 // blocks following coro.end() are reached during initial invocation 272 // of the coroutine while all the data are still available on the 273 // stack or in the registers. 274 S.Kills.reset(); 275 } else { 276 // This is reached when S block it not Suspend nor coro.end and it 277 // need to make sure that it is not in the kill set. 278 S.Kills.reset(SuccNo); 279 } 280 281 // See if anything changed. 282 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes); 283 284 if (S.Kills != SavedKills) { 285 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName() 286 << "\n"); 287 LLVM_DEBUG(dump("S.Kills", S.Kills)); 288 LLVM_DEBUG(dump("SavedKills", SavedKills)); 289 } 290 if (S.Consumes != SavedConsumes) { 291 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n"); 292 LLVM_DEBUG(dump("S.Consume", S.Consumes)); 293 LLVM_DEBUG(dump("SavedCons", SavedConsumes)); 294 } 295 } 296 } 297 } while (Changed); 298 LLVM_DEBUG(dump()); 299 } 300 301 #undef DEBUG_TYPE // "coro-suspend-crossing" 302 #define DEBUG_TYPE "coro-frame" 303 304 namespace { 305 class FrameTypeBuilder; 306 // Mapping from the to-be-spilled value to all the users that need reload. 307 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>; 308 struct AllocaInfo { 309 AllocaInst *Alloca; 310 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases; 311 bool MayWriteBeforeCoroBegin; 312 AllocaInfo(AllocaInst *Alloca, 313 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases, 314 bool MayWriteBeforeCoroBegin) 315 : Alloca(Alloca), Aliases(std::move(Aliases)), 316 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {} 317 }; 318 struct FrameDataInfo { 319 // All the values (that are not allocas) that needs to be spilled to the 320 // frame. 321 SpillInfo Spills; 322 // Allocas contains all values defined as allocas that need to live in the 323 // frame. 324 SmallVector<AllocaInfo, 8> Allocas; 325 326 SmallVector<Value *, 8> getAllDefs() const { 327 SmallVector<Value *, 8> Defs; 328 for (const auto &P : Spills) 329 Defs.push_back(P.first); 330 for (const auto &A : Allocas) 331 Defs.push_back(A.Alloca); 332 return Defs; 333 } 334 335 uint32_t getFieldIndex(Value *V) const { 336 auto Itr = FieldIndexMap.find(V); 337 assert(Itr != FieldIndexMap.end() && 338 "Value does not have a frame field index"); 339 return Itr->second; 340 } 341 342 void setFieldIndex(Value *V, uint32_t Index) { 343 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) && 344 "Cannot set the index for the same field twice."); 345 FieldIndexMap[V] = Index; 346 } 347 348 uint64_t getAlign(Value *V) const { 349 auto Iter = FieldAlignMap.find(V); 350 assert(Iter != FieldAlignMap.end()); 351 return Iter->second; 352 } 353 354 void setAlign(Value *V, uint64_t Align) { 355 assert(FieldAlignMap.count(V) == 0); 356 FieldAlignMap.insert({V, Align}); 357 } 358 359 uint64_t getOffset(Value *V) const { 360 auto Iter = FieldOffsetMap.find(V); 361 assert(Iter != FieldOffsetMap.end()); 362 return Iter->second; 363 } 364 365 void setOffset(Value *V, uint64_t Offset) { 366 assert(FieldOffsetMap.count(V) == 0); 367 FieldOffsetMap.insert({V, Offset}); 368 } 369 370 // Remap the index of every field in the frame, using the final layout index. 371 void updateLayoutIndex(FrameTypeBuilder &B); 372 373 private: 374 // LayoutIndexUpdateStarted is used to avoid updating the index of any field 375 // twice by mistake. 376 bool LayoutIndexUpdateStarted = false; 377 // Map from values to their slot indexes on the frame. They will be first set 378 // with their original insertion field index. After the frame is built, their 379 // indexes will be updated into the final layout index. 380 DenseMap<Value *, uint32_t> FieldIndexMap; 381 // Map from values to their alignment on the frame. They would be set after 382 // the frame is built. 383 DenseMap<Value *, uint64_t> FieldAlignMap; 384 // Map from values to their offset on the frame. They would be set after 385 // the frame is built. 386 DenseMap<Value *, uint64_t> FieldOffsetMap; 387 }; 388 } // namespace 389 390 #ifndef NDEBUG 391 static void dumpSpills(StringRef Title, const SpillInfo &Spills) { 392 dbgs() << "------------- " << Title << "--------------\n"; 393 for (const auto &E : Spills) { 394 E.first->dump(); 395 dbgs() << " user: "; 396 for (auto *I : E.second) 397 I->dump(); 398 } 399 } 400 401 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) { 402 dbgs() << "------------- Allocas --------------\n"; 403 for (const auto &A : Allocas) { 404 A.Alloca->dump(); 405 } 406 } 407 #endif 408 409 namespace { 410 using FieldIDType = size_t; 411 // We cannot rely solely on natural alignment of a type when building a 412 // coroutine frame and if the alignment specified on the Alloca instruction 413 // differs from the natural alignment of the alloca type we will need to insert 414 // padding. 415 class FrameTypeBuilder { 416 private: 417 struct Field { 418 uint64_t Size; 419 uint64_t Offset; 420 Type *Ty; 421 FieldIDType LayoutFieldIndex; 422 Align Alignment; 423 Align TyAlignment; 424 }; 425 426 const DataLayout &DL; 427 LLVMContext &Context; 428 uint64_t StructSize = 0; 429 Align StructAlign; 430 bool IsFinished = false; 431 432 Optional<Align> MaxFrameAlignment; 433 434 SmallVector<Field, 8> Fields; 435 DenseMap<Value*, unsigned> FieldIndexByKey; 436 437 public: 438 FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL, 439 Optional<Align> MaxFrameAlignment) 440 : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {} 441 442 /// Add a field to this structure for the storage of an `alloca` 443 /// instruction. 444 LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI, 445 bool IsHeader = false) { 446 Type *Ty = AI->getAllocatedType(); 447 448 // Make an array type if this is a static array allocation. 449 if (AI->isArrayAllocation()) { 450 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 451 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue()); 452 else 453 report_fatal_error("Coroutines cannot handle non static allocas yet"); 454 } 455 456 return addField(Ty, AI->getAlign(), IsHeader); 457 } 458 459 /// We want to put the allocas whose lifetime-ranges are not overlapped 460 /// into one slot of coroutine frame. 461 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566 462 /// 463 /// cppcoro::task<void> alternative_paths(bool cond) { 464 /// if (cond) { 465 /// big_structure a; 466 /// process(a); 467 /// co_await something(); 468 /// } else { 469 /// big_structure b; 470 /// process2(b); 471 /// co_await something(); 472 /// } 473 /// } 474 /// 475 /// We want to put variable a and variable b in the same slot to 476 /// reduce the size of coroutine frame. 477 /// 478 /// This function use StackLifetime algorithm to partition the AllocaInsts in 479 /// Spills to non-overlapped sets in order to put Alloca in the same 480 /// non-overlapped set into the same slot in the Coroutine Frame. Then add 481 /// field for the allocas in the same non-overlapped set by using the largest 482 /// type as the field type. 483 /// 484 /// Side Effects: Because We sort the allocas, the order of allocas in the 485 /// frame may be different with the order in the source code. 486 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData, 487 coro::Shape &Shape); 488 489 /// Add a field to this structure. 490 LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment, 491 bool IsHeader = false, 492 bool IsSpillOfValue = false) { 493 assert(!IsFinished && "adding fields to a finished builder"); 494 assert(Ty && "must provide a type for a field"); 495 496 // The field size is always the alloc size of the type. 497 uint64_t FieldSize = DL.getTypeAllocSize(Ty); 498 499 // For an alloca with size=0, we don't need to add a field and they 500 // can just point to any index in the frame. Use index 0. 501 if (FieldSize == 0) { 502 return 0; 503 } 504 505 // The field alignment might not be the type alignment, but we need 506 // to remember the type alignment anyway to build the type. 507 // If we are spilling values we don't need to worry about ABI alignment 508 // concerns. 509 auto ABIAlign = DL.getABITypeAlign(Ty); 510 Align TyAlignment = 511 (IsSpillOfValue && MaxFrameAlignment) 512 ? (*MaxFrameAlignment < ABIAlign ? *MaxFrameAlignment : ABIAlign) 513 : ABIAlign; 514 if (!FieldAlignment) { 515 FieldAlignment = TyAlignment; 516 } 517 518 // Lay out header fields immediately. 519 uint64_t Offset; 520 if (IsHeader) { 521 Offset = alignTo(StructSize, FieldAlignment); 522 StructSize = Offset + FieldSize; 523 524 // Everything else has a flexible offset. 525 } else { 526 Offset = OptimizedStructLayoutField::FlexibleOffset; 527 } 528 529 Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment}); 530 return Fields.size() - 1; 531 } 532 533 /// Finish the layout and set the body on the given type. 534 void finish(StructType *Ty); 535 536 uint64_t getStructSize() const { 537 assert(IsFinished && "not yet finished!"); 538 return StructSize; 539 } 540 541 Align getStructAlign() const { 542 assert(IsFinished && "not yet finished!"); 543 return StructAlign; 544 } 545 546 FieldIDType getLayoutFieldIndex(FieldIDType Id) const { 547 assert(IsFinished && "not yet finished!"); 548 return Fields[Id].LayoutFieldIndex; 549 } 550 551 Field getLayoutField(FieldIDType Id) const { 552 assert(IsFinished && "not yet finished!"); 553 return Fields[Id]; 554 } 555 }; 556 } // namespace 557 558 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) { 559 auto Updater = [&](Value *I) { 560 auto Field = B.getLayoutField(getFieldIndex(I)); 561 setFieldIndex(I, Field.LayoutFieldIndex); 562 setAlign(I, Field.Alignment.value()); 563 setOffset(I, Field.Offset); 564 }; 565 LayoutIndexUpdateStarted = true; 566 for (auto &S : Spills) 567 Updater(S.first); 568 for (const auto &A : Allocas) 569 Updater(A.Alloca); 570 LayoutIndexUpdateStarted = false; 571 } 572 573 void FrameTypeBuilder::addFieldForAllocas(const Function &F, 574 FrameDataInfo &FrameData, 575 coro::Shape &Shape) { 576 using AllocaSetType = SmallVector<AllocaInst *, 4>; 577 SmallVector<AllocaSetType, 4> NonOverlapedAllocas; 578 579 // We need to add field for allocas at the end of this function. However, this 580 // function has multiple exits, so we use this helper to avoid redundant code. 581 struct RTTIHelper { 582 std::function<void()> func; 583 RTTIHelper(std::function<void()> &&func) : func(func) {} 584 ~RTTIHelper() { func(); } 585 } Helper([&]() { 586 for (auto AllocaList : NonOverlapedAllocas) { 587 auto *LargestAI = *AllocaList.begin(); 588 FieldIDType Id = addFieldForAlloca(LargestAI); 589 for (auto *Alloca : AllocaList) 590 FrameData.setFieldIndex(Alloca, Id); 591 } 592 }); 593 594 if (!Shape.ReuseFrameSlot && !EnableReuseStorageInFrame) { 595 for (const auto &A : FrameData.Allocas) { 596 AllocaInst *Alloca = A.Alloca; 597 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 598 } 599 return; 600 } 601 602 // Because there are pathes from the lifetime.start to coro.end 603 // for each alloca, the liferanges for every alloca is overlaped 604 // in the blocks who contain coro.end and the successor blocks. 605 // So we choose to skip there blocks when we calculates the liferange 606 // for each alloca. It should be reasonable since there shouldn't be uses 607 // in these blocks and the coroutine frame shouldn't be used outside the 608 // coroutine body. 609 // 610 // Note that the user of coro.suspend may not be SwitchInst. However, this 611 // case seems too complex to handle. And it is harmless to skip these 612 // patterns since it just prevend putting the allocas to live in the same 613 // slot. 614 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest; 615 for (auto CoroSuspendInst : Shape.CoroSuspends) { 616 for (auto U : CoroSuspendInst->users()) { 617 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) { 618 auto *SWI = const_cast<SwitchInst *>(ConstSWI); 619 DefaultSuspendDest[SWI] = SWI->getDefaultDest(); 620 SWI->setDefaultDest(SWI->getSuccessor(1)); 621 } 622 } 623 } 624 625 auto ExtractAllocas = [&]() { 626 AllocaSetType Allocas; 627 Allocas.reserve(FrameData.Allocas.size()); 628 for (const auto &A : FrameData.Allocas) 629 Allocas.push_back(A.Alloca); 630 return Allocas; 631 }; 632 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(), 633 StackLifetime::LivenessType::May); 634 StackLifetimeAnalyzer.run(); 635 auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) { 636 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps( 637 StackLifetimeAnalyzer.getLiveRange(AI2)); 638 }; 639 auto GetAllocaSize = [&](const AllocaInfo &A) { 640 Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL); 641 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n"); 642 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported"); 643 return RetSize->getFixedSize(); 644 }; 645 // Put larger allocas in the front. So the larger allocas have higher 646 // priority to merge, which can save more space potentially. Also each 647 // AllocaSet would be ordered. So we can get the largest Alloca in one 648 // AllocaSet easily. 649 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) { 650 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2); 651 }); 652 for (const auto &A : FrameData.Allocas) { 653 AllocaInst *Alloca = A.Alloca; 654 bool Merged = false; 655 // Try to find if the Alloca is not inferenced with any existing 656 // NonOverlappedAllocaSet. If it is true, insert the alloca to that 657 // NonOverlappedAllocaSet. 658 for (auto &AllocaSet : NonOverlapedAllocas) { 659 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n"); 660 bool NoInference = none_of(AllocaSet, [&](auto Iter) { 661 return IsAllocaInferenre(Alloca, Iter); 662 }); 663 // If the alignment of A is multiple of the alignment of B, the address 664 // of A should satisfy the requirement for aligning for B. 665 // 666 // There may be other more fine-grained strategies to handle the alignment 667 // infomation during the merging process. But it seems hard to handle 668 // these strategies and benefit little. 669 bool Alignable = [&]() -> bool { 670 auto *LargestAlloca = *AllocaSet.begin(); 671 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() == 672 0; 673 }(); 674 bool CouldMerge = NoInference && Alignable; 675 if (!CouldMerge) 676 continue; 677 AllocaSet.push_back(Alloca); 678 Merged = true; 679 break; 680 } 681 if (!Merged) { 682 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 683 } 684 } 685 // Recover the default target destination for each Switch statement 686 // reserved. 687 for (auto SwitchAndDefaultDest : DefaultSuspendDest) { 688 SwitchInst *SWI = SwitchAndDefaultDest.first; 689 BasicBlock *DestBB = SwitchAndDefaultDest.second; 690 SWI->setDefaultDest(DestBB); 691 } 692 // This Debug Info could tell us which allocas are merged into one slot. 693 LLVM_DEBUG(for (auto &AllocaSet 694 : NonOverlapedAllocas) { 695 if (AllocaSet.size() > 1) { 696 dbgs() << "In Function:" << F.getName() << "\n"; 697 dbgs() << "Find Union Set " 698 << "\n"; 699 dbgs() << "\tAllocas are \n"; 700 for (auto Alloca : AllocaSet) 701 dbgs() << "\t\t" << *Alloca << "\n"; 702 } 703 }); 704 } 705 706 void FrameTypeBuilder::finish(StructType *Ty) { 707 assert(!IsFinished && "already finished!"); 708 709 // Prepare the optimal-layout field array. 710 // The Id in the layout field is a pointer to our Field for it. 711 SmallVector<OptimizedStructLayoutField, 8> LayoutFields; 712 LayoutFields.reserve(Fields.size()); 713 for (auto &Field : Fields) { 714 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment, 715 Field.Offset); 716 } 717 718 // Perform layout. 719 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields); 720 StructSize = SizeAndAlign.first; 721 StructAlign = SizeAndAlign.second; 722 723 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & { 724 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id)); 725 }; 726 727 // We need to produce a packed struct type if there's a field whose 728 // assigned offset isn't a multiple of its natural type alignment. 729 bool Packed = [&] { 730 for (auto &LayoutField : LayoutFields) { 731 auto &F = getField(LayoutField); 732 if (!isAligned(F.TyAlignment, LayoutField.Offset)) 733 return true; 734 } 735 return false; 736 }(); 737 738 // Build the struct body. 739 SmallVector<Type*, 16> FieldTypes; 740 FieldTypes.reserve(LayoutFields.size() * 3 / 2); 741 uint64_t LastOffset = 0; 742 for (auto &LayoutField : LayoutFields) { 743 auto &F = getField(LayoutField); 744 745 auto Offset = LayoutField.Offset; 746 747 // Add a padding field if there's a padding gap and we're either 748 // building a packed struct or the padding gap is more than we'd 749 // get from aligning to the field type's natural alignment. 750 assert(Offset >= LastOffset); 751 if (Offset != LastOffset) { 752 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset) 753 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context), 754 Offset - LastOffset)); 755 } 756 757 F.Offset = Offset; 758 F.LayoutFieldIndex = FieldTypes.size(); 759 760 FieldTypes.push_back(F.Ty); 761 LastOffset = Offset + F.Size; 762 } 763 764 Ty->setBody(FieldTypes, Packed); 765 766 #ifndef NDEBUG 767 // Check that the IR layout matches the offsets we expect. 768 auto Layout = DL.getStructLayout(Ty); 769 for (auto &F : Fields) { 770 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty); 771 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset); 772 } 773 #endif 774 775 IsFinished = true; 776 } 777 778 static void cacheDIVar(FrameDataInfo &FrameData, 779 DenseMap<Value *, DILocalVariable *> &DIVarCache) { 780 for (auto *V : FrameData.getAllDefs()) { 781 if (DIVarCache.find(V) != DIVarCache.end()) 782 continue; 783 784 auto DDIs = FindDbgDeclareUses(V); 785 auto *I = llvm::find_if(DDIs, [](DbgDeclareInst *DDI) { 786 return DDI->getExpression()->getNumElements() == 0; 787 }); 788 if (I != DDIs.end()) 789 DIVarCache.insert({V, (*I)->getVariable()}); 790 } 791 } 792 793 /// Create name for Type. It uses MDString to store new created string to 794 /// avoid memory leak. 795 static StringRef solveTypeName(Type *Ty) { 796 if (Ty->isIntegerTy()) { 797 // The longest name in common may be '__int_128', which has 9 bits. 798 SmallString<16> Buffer; 799 raw_svector_ostream OS(Buffer); 800 OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth(); 801 auto *MDName = MDString::get(Ty->getContext(), OS.str()); 802 return MDName->getString(); 803 } 804 805 if (Ty->isFloatingPointTy()) { 806 if (Ty->isFloatTy()) 807 return "__float_"; 808 if (Ty->isDoubleTy()) 809 return "__double_"; 810 return "__floating_type_"; 811 } 812 813 if (Ty->isPointerTy()) { 814 auto *PtrTy = cast<PointerType>(Ty); 815 Type *PointeeTy = PtrTy->getElementType(); 816 auto Name = solveTypeName(PointeeTy); 817 if (Name == "UnknownType") 818 return "PointerType"; 819 SmallString<16> Buffer; 820 Twine(Name + "_Ptr").toStringRef(Buffer); 821 auto *MDName = MDString::get(Ty->getContext(), Buffer.str()); 822 return MDName->getString(); 823 } 824 825 if (Ty->isStructTy()) { 826 if (!cast<StructType>(Ty)->hasName()) 827 return "__LiteralStructType_"; 828 829 auto Name = Ty->getStructName(); 830 831 SmallString<16> Buffer(Name); 832 for_each(Buffer, [](auto &Iter) { 833 if (Iter == '.' || Iter == ':') 834 Iter = '_'; 835 }); 836 auto *MDName = MDString::get(Ty->getContext(), Buffer.str()); 837 return MDName->getString(); 838 } 839 840 return "UnknownType"; 841 } 842 843 static DIType *solveDIType(DIBuilder &Builder, Type *Ty, 844 const DataLayout &Layout, DIScope *Scope, 845 unsigned LineNum, 846 DenseMap<Type *, DIType *> &DITypeCache) { 847 if (DIType *DT = DITypeCache.lookup(Ty)) 848 return DT; 849 850 StringRef Name = solveTypeName(Ty); 851 852 DIType *RetType = nullptr; 853 854 if (Ty->isIntegerTy()) { 855 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth(); 856 RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed, 857 llvm::DINode::FlagArtificial); 858 } else if (Ty->isFloatingPointTy()) { 859 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty), 860 dwarf::DW_ATE_float, 861 llvm::DINode::FlagArtificial); 862 } else if (Ty->isPointerTy()) { 863 // Construct BasicType instead of PointerType to avoid infinite 864 // search problem. 865 // For example, we would be in trouble if we traverse recursively: 866 // 867 // struct Node { 868 // Node* ptr; 869 // }; 870 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty), 871 dwarf::DW_ATE_address, 872 llvm::DINode::FlagArtificial); 873 } else if (Ty->isStructTy()) { 874 auto *DIStruct = Builder.createStructType( 875 Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty), 876 Layout.getPrefTypeAlignment(Ty), llvm::DINode::FlagArtificial, nullptr, 877 llvm::DINodeArray()); 878 879 auto *StructTy = cast<StructType>(Ty); 880 SmallVector<Metadata *, 16> Elements; 881 for (unsigned I = 0; I < StructTy->getNumElements(); I++) { 882 DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout, 883 Scope, LineNum, DITypeCache); 884 assert(DITy); 885 Elements.push_back(Builder.createMemberType( 886 Scope, DITy->getName(), Scope->getFile(), LineNum, 887 DITy->getSizeInBits(), DITy->getAlignInBits(), 888 Layout.getStructLayout(StructTy)->getElementOffsetInBits(I), 889 llvm::DINode::FlagArtificial, DITy)); 890 } 891 892 Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements)); 893 894 RetType = DIStruct; 895 } else { 896 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n";); 897 SmallString<32> Buffer; 898 raw_svector_ostream OS(Buffer); 899 OS << Name.str() << "_" << Layout.getTypeSizeInBits(Ty); 900 RetType = Builder.createBasicType(OS.str(), Layout.getTypeSizeInBits(Ty), 901 dwarf::DW_ATE_address, 902 llvm::DINode::FlagArtificial); 903 } 904 905 DITypeCache.insert({Ty, RetType}); 906 return RetType; 907 } 908 909 /// Build artificial debug info for C++ coroutine frames to allow users to 910 /// inspect the contents of the frame directly 911 /// 912 /// Create Debug information for coroutine frame with debug name "__coro_frame". 913 /// The debug information for the fields of coroutine frame is constructed from 914 /// the following way: 915 /// 1. For all the value in the Frame, we search the use of dbg.declare to find 916 /// the corresponding debug variables for the value. If we can find the 917 /// debug variable, we can get full and accurate debug information. 918 /// 2. If we can't get debug information in step 1 and 2, we could only try to 919 /// build the DIType by Type. We did this in solveDIType. We only handle 920 /// integer, float, double, integer type and struct type for now. 921 static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, 922 FrameDataInfo &FrameData) { 923 DISubprogram *DIS = F.getSubprogram(); 924 // If there is no DISubprogram for F, it implies the Function are not compiled 925 // with debug info. So we also don't need to generate debug info for the frame 926 // neither. 927 if (!DIS || !DIS->getUnit() || 928 !dwarf::isCPlusPlus( 929 (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage())) 930 return; 931 932 assert(Shape.ABI == coro::ABI::Switch && 933 "We could only build debug infomation for C++ coroutine now.\n"); 934 935 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false); 936 937 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca(); 938 assert(PromiseAlloca && 939 "Coroutine with switch ABI should own Promise alloca"); 940 941 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(PromiseAlloca); 942 if (DIs.empty()) 943 return; 944 945 DbgDeclareInst *PromiseDDI = DIs.front(); 946 DILocalVariable *PromiseDIVariable = PromiseDDI->getVariable(); 947 DILocalScope *PromiseDIScope = PromiseDIVariable->getScope(); 948 DIFile *DFile = PromiseDIScope->getFile(); 949 DILocation *DILoc = PromiseDDI->getDebugLoc().get(); 950 unsigned LineNum = PromiseDIVariable->getLine(); 951 952 DICompositeType *FrameDITy = DBuilder.createStructType( 953 DIS, "__coro_frame_ty", DFile, LineNum, Shape.FrameSize * 8, 954 Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr, 955 llvm::DINodeArray()); 956 StructType *FrameTy = Shape.FrameTy; 957 SmallVector<Metadata *, 16> Elements; 958 DataLayout Layout = F.getParent()->getDataLayout(); 959 960 DenseMap<Value *, DILocalVariable *> DIVarCache; 961 cacheDIVar(FrameData, DIVarCache); 962 963 unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume; 964 unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy; 965 unsigned IndexIndex = Shape.SwitchLowering.IndexField; 966 967 DenseMap<unsigned, StringRef> NameCache; 968 NameCache.insert({ResumeIndex, "__resume_fn"}); 969 NameCache.insert({DestroyIndex, "__destroy_fn"}); 970 NameCache.insert({IndexIndex, "__coro_index"}); 971 972 Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex), 973 *DestroyFnTy = FrameTy->getElementType(DestroyIndex), 974 *IndexTy = FrameTy->getElementType(IndexIndex); 975 976 DenseMap<unsigned, DIType *> TyCache; 977 TyCache.insert({ResumeIndex, 978 DBuilder.createBasicType("__resume_fn", 979 Layout.getTypeSizeInBits(ResumeFnTy), 980 dwarf::DW_ATE_address)}); 981 TyCache.insert( 982 {DestroyIndex, DBuilder.createBasicType( 983 "__destroy_fn", Layout.getTypeSizeInBits(DestroyFnTy), 984 dwarf::DW_ATE_address)}); 985 986 /// FIXME: If we fill the field `SizeInBits` with the actual size of 987 /// __coro_index in bits, then __coro_index wouldn't show in the debugger. 988 TyCache.insert({IndexIndex, DBuilder.createBasicType( 989 "__coro_index", 990 (Layout.getTypeSizeInBits(IndexTy) < 8) 991 ? 8 992 : Layout.getTypeSizeInBits(IndexTy), 993 dwarf::DW_ATE_unsigned_char)}); 994 995 for (auto *V : FrameData.getAllDefs()) { 996 if (DIVarCache.find(V) == DIVarCache.end()) 997 continue; 998 999 auto Index = FrameData.getFieldIndex(V); 1000 1001 NameCache.insert({Index, DIVarCache[V]->getName()}); 1002 TyCache.insert({Index, DIVarCache[V]->getType()}); 1003 } 1004 1005 // Cache from index to (Align, Offset Pair) 1006 DenseMap<unsigned, std::pair<unsigned, unsigned>> OffsetCache; 1007 // The Align and Offset of Resume function and Destroy function are fixed. 1008 OffsetCache.insert({ResumeIndex, {8, 0}}); 1009 OffsetCache.insert({DestroyIndex, {8, 8}}); 1010 OffsetCache.insert( 1011 {IndexIndex, 1012 {Shape.SwitchLowering.IndexAlign, Shape.SwitchLowering.IndexOffset}}); 1013 1014 for (auto *V : FrameData.getAllDefs()) { 1015 auto Index = FrameData.getFieldIndex(V); 1016 1017 OffsetCache.insert( 1018 {Index, {FrameData.getAlign(V), FrameData.getOffset(V)}}); 1019 } 1020 1021 DenseMap<Type *, DIType *> DITypeCache; 1022 // This counter is used to avoid same type names. e.g., there would be 1023 // many i32 and i64 types in one coroutine. And we would use i32_0 and 1024 // i32_1 to avoid the same type. Since it makes no sense the name of the 1025 // fields confilicts with each other. 1026 unsigned UnknownTypeNum = 0; 1027 for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) { 1028 if (OffsetCache.find(Index) == OffsetCache.end()) 1029 continue; 1030 1031 std::string Name; 1032 uint64_t SizeInBits; 1033 uint32_t AlignInBits; 1034 uint64_t OffsetInBits; 1035 DIType *DITy = nullptr; 1036 1037 Type *Ty = FrameTy->getElementType(Index); 1038 assert(Ty->isSized() && "We can't handle type which is not sized.\n"); 1039 SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedSize(); 1040 AlignInBits = OffsetCache[Index].first * 8; 1041 OffsetInBits = OffsetCache[Index].second * 8; 1042 1043 if (NameCache.find(Index) != NameCache.end()) { 1044 Name = NameCache[Index].str(); 1045 DITy = TyCache[Index]; 1046 } else { 1047 DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache); 1048 assert(DITy && "SolveDIType shouldn't return nullptr.\n"); 1049 Name = DITy->getName().str(); 1050 Name += "_" + std::to_string(UnknownTypeNum); 1051 UnknownTypeNum++; 1052 } 1053 1054 Elements.push_back(DBuilder.createMemberType( 1055 FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits, 1056 llvm::DINode::FlagArtificial, DITy)); 1057 } 1058 1059 DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements)); 1060 1061 auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame", 1062 DFile, LineNum, FrameDITy, 1063 true, DINode::FlagArtificial); 1064 assert(FrameDIVar->isValidLocationForIntrinsic(PromiseDDI->getDebugLoc())); 1065 1066 // Subprogram would have ContainedNodes field which records the debug 1067 // variables it contained. So we need to add __coro_frame to the 1068 // ContainedNodes of it. 1069 // 1070 // If we don't add __coro_frame to the RetainedNodes, user may get 1071 // `no symbol __coro_frame in context` rather than `__coro_frame` 1072 // is optimized out, which is more precise. 1073 if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) { 1074 auto RetainedNodes = SubProgram->getRetainedNodes(); 1075 SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(), 1076 RetainedNodes.end()); 1077 RetainedNodesVec.push_back(FrameDIVar); 1078 SubProgram->replaceOperandWith( 1079 7, (MDTuple::get(F.getContext(), RetainedNodesVec))); 1080 } 1081 1082 DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar, 1083 DBuilder.createExpression(), DILoc, 1084 Shape.FramePtr->getNextNode()); 1085 } 1086 1087 // Build a struct that will keep state for an active coroutine. 1088 // struct f.frame { 1089 // ResumeFnTy ResumeFnAddr; 1090 // ResumeFnTy DestroyFnAddr; 1091 // int ResumeIndex; 1092 // ... promise (if present) ... 1093 // ... spills ... 1094 // }; 1095 static StructType *buildFrameType(Function &F, coro::Shape &Shape, 1096 FrameDataInfo &FrameData) { 1097 LLVMContext &C = F.getContext(); 1098 const DataLayout &DL = F.getParent()->getDataLayout(); 1099 StructType *FrameTy = [&] { 1100 SmallString<32> Name(F.getName()); 1101 Name.append(".Frame"); 1102 return StructType::create(C, Name); 1103 }(); 1104 1105 // We will use this value to cap the alignment of spilled values. 1106 Optional<Align> MaxFrameAlignment; 1107 if (Shape.ABI == coro::ABI::Async) 1108 MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment(); 1109 FrameTypeBuilder B(C, DL, MaxFrameAlignment); 1110 1111 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca(); 1112 Optional<FieldIDType> SwitchIndexFieldId; 1113 1114 if (Shape.ABI == coro::ABI::Switch) { 1115 auto *FramePtrTy = FrameTy->getPointerTo(); 1116 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy, 1117 /*IsVarArg=*/false); 1118 auto *FnPtrTy = FnTy->getPointerTo(); 1119 1120 // Add header fields for the resume and destroy functions. 1121 // We can rely on these being perfectly packed. 1122 (void)B.addField(FnPtrTy, None, /*header*/ true); 1123 (void)B.addField(FnPtrTy, None, /*header*/ true); 1124 1125 // PromiseAlloca field needs to be explicitly added here because it's 1126 // a header field with a fixed offset based on its alignment. Hence it 1127 // needs special handling and cannot be added to FrameData.Allocas. 1128 if (PromiseAlloca) 1129 FrameData.setFieldIndex( 1130 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true)); 1131 1132 // Add a field to store the suspend index. This doesn't need to 1133 // be in the header. 1134 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size())); 1135 Type *IndexType = Type::getIntNTy(C, IndexBits); 1136 1137 SwitchIndexFieldId = B.addField(IndexType, None); 1138 } else { 1139 assert(PromiseAlloca == nullptr && "lowering doesn't support promises"); 1140 } 1141 1142 // Because multiple allocas may own the same field slot, 1143 // we add allocas to field here. 1144 B.addFieldForAllocas(F, FrameData, Shape); 1145 // Add PromiseAlloca to Allocas list so that 1146 // 1. updateLayoutIndex could update its index after 1147 // `performOptimizedStructLayout` 1148 // 2. it is processed in insertSpills. 1149 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca) 1150 // We assume that the promise alloca won't be modified before 1151 // CoroBegin and no alias will be create before CoroBegin. 1152 FrameData.Allocas.emplace_back( 1153 PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false); 1154 // Create an entry for every spilled value. 1155 for (auto &S : FrameData.Spills) { 1156 Type *FieldType = S.first->getType(); 1157 // For byval arguments, we need to store the pointed value in the frame, 1158 // instead of the pointer itself. 1159 if (const Argument *A = dyn_cast<Argument>(S.first)) 1160 if (A->hasByValAttr()) 1161 FieldType = A->getParamByValType(); 1162 FieldIDType Id = 1163 B.addField(FieldType, None, false /*header*/, true /*IsSpillOfValue*/); 1164 FrameData.setFieldIndex(S.first, Id); 1165 } 1166 1167 B.finish(FrameTy); 1168 FrameData.updateLayoutIndex(B); 1169 Shape.FrameAlign = B.getStructAlign(); 1170 Shape.FrameSize = B.getStructSize(); 1171 1172 switch (Shape.ABI) { 1173 case coro::ABI::Switch: { 1174 // In the switch ABI, remember the switch-index field. 1175 auto IndexField = B.getLayoutField(*SwitchIndexFieldId); 1176 Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex; 1177 Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value(); 1178 Shape.SwitchLowering.IndexOffset = IndexField.Offset; 1179 1180 // Also round the frame size up to a multiple of its alignment, as is 1181 // generally expected in C/C++. 1182 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign); 1183 break; 1184 } 1185 1186 // In the retcon ABI, remember whether the frame is inline in the storage. 1187 case coro::ABI::Retcon: 1188 case coro::ABI::RetconOnce: { 1189 auto Id = Shape.getRetconCoroId(); 1190 Shape.RetconLowering.IsFrameInlineInStorage 1191 = (B.getStructSize() <= Id->getStorageSize() && 1192 B.getStructAlign() <= Id->getStorageAlignment()); 1193 break; 1194 } 1195 case coro::ABI::Async: { 1196 Shape.AsyncLowering.FrameOffset = 1197 alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign); 1198 // Also make the final context size a multiple of the context alignment to 1199 // make allocation easier for allocators. 1200 Shape.AsyncLowering.ContextSize = 1201 alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize, 1202 Shape.AsyncLowering.getContextAlignment()); 1203 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) { 1204 report_fatal_error( 1205 "The alignment requirment of frame variables cannot be higher than " 1206 "the alignment of the async function context"); 1207 } 1208 break; 1209 } 1210 } 1211 1212 return FrameTy; 1213 } 1214 1215 // We use a pointer use visitor to track how an alloca is being used. 1216 // The goal is to be able to answer the following three questions: 1217 // 1. Should this alloca be allocated on the frame instead. 1218 // 2. Could the content of the alloca be modified prior to CoroBegn, which would 1219 // require copying the data from alloca to the frame after CoroBegin. 1220 // 3. Is there any alias created for this alloca prior to CoroBegin, but used 1221 // after CoroBegin. In that case, we will need to recreate the alias after 1222 // CoroBegin based off the frame. To answer question 1, we track two things: 1223 // a. List of all BasicBlocks that use this alloca or any of the aliases of 1224 // the alloca. In the end, we check if there exists any two basic blocks that 1225 // cross suspension points. If so, this alloca must be put on the frame. b. 1226 // Whether the alloca or any alias of the alloca is escaped at some point, 1227 // either by storing the address somewhere, or the address is used in a 1228 // function call that might capture. If it's ever escaped, this alloca must be 1229 // put on the frame conservatively. 1230 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin. 1231 // Whenever a potential write happens, either through a store instruction, a 1232 // function call or any of the memory intrinsics, we check whether this 1233 // instruction is prior to CoroBegin. To answer question 3, we track the offsets 1234 // of all aliases created for the alloca prior to CoroBegin but used after 1235 // CoroBegin. llvm::Optional is used to be able to represent the case when the 1236 // offset is unknown (e.g. when you have a PHINode that takes in different 1237 // offset values). We cannot handle unknown offsets and will assert. This is the 1238 // potential issue left out. An ideal solution would likely require a 1239 // significant redesign. 1240 namespace { 1241 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> { 1242 using Base = PtrUseVisitor<AllocaUseVisitor>; 1243 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT, 1244 const CoroBeginInst &CB, const SuspendCrossingInfo &Checker) 1245 : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker) {} 1246 1247 void visit(Instruction &I) { 1248 Users.insert(&I); 1249 Base::visit(I); 1250 // If the pointer is escaped prior to CoroBegin, we have to assume it would 1251 // be written into before CoroBegin as well. 1252 if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) { 1253 MayWriteBeforeCoroBegin = true; 1254 } 1255 } 1256 // We need to provide this overload as PtrUseVisitor uses a pointer based 1257 // visiting function. 1258 void visit(Instruction *I) { return visit(*I); } 1259 1260 void visitPHINode(PHINode &I) { 1261 enqueueUsers(I); 1262 handleAlias(I); 1263 } 1264 1265 void visitSelectInst(SelectInst &I) { 1266 enqueueUsers(I); 1267 handleAlias(I); 1268 } 1269 1270 void visitStoreInst(StoreInst &SI) { 1271 // Regardless whether the alias of the alloca is the value operand or the 1272 // pointer operand, we need to assume the alloca is been written. 1273 handleMayWrite(SI); 1274 1275 if (SI.getValueOperand() != U->get()) 1276 return; 1277 1278 // We are storing the pointer into a memory location, potentially escaping. 1279 // As an optimization, we try to detect simple cases where it doesn't 1280 // actually escape, for example: 1281 // %ptr = alloca .. 1282 // %addr = alloca .. 1283 // store %ptr, %addr 1284 // %x = load %addr 1285 // .. 1286 // If %addr is only used by loading from it, we could simply treat %x as 1287 // another alias of %ptr, and not considering %ptr being escaped. 1288 auto IsSimpleStoreThenLoad = [&]() { 1289 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand()); 1290 // If the memory location we are storing to is not an alloca, it 1291 // could be an alias of some other memory locations, which is difficult 1292 // to analyze. 1293 if (!AI) 1294 return false; 1295 // StoreAliases contains aliases of the memory location stored into. 1296 SmallVector<Instruction *, 4> StoreAliases = {AI}; 1297 while (!StoreAliases.empty()) { 1298 Instruction *I = StoreAliases.pop_back_val(); 1299 for (User *U : I->users()) { 1300 // If we are loading from the memory location, we are creating an 1301 // alias of the original pointer. 1302 if (auto *LI = dyn_cast<LoadInst>(U)) { 1303 enqueueUsers(*LI); 1304 handleAlias(*LI); 1305 continue; 1306 } 1307 // If we are overriding the memory location, the pointer certainly 1308 // won't escape. 1309 if (auto *S = dyn_cast<StoreInst>(U)) 1310 if (S->getPointerOperand() == I) 1311 continue; 1312 if (auto *II = dyn_cast<IntrinsicInst>(U)) 1313 if (II->isLifetimeStartOrEnd()) 1314 continue; 1315 // BitCastInst creats aliases of the memory location being stored 1316 // into. 1317 if (auto *BI = dyn_cast<BitCastInst>(U)) { 1318 StoreAliases.push_back(BI); 1319 continue; 1320 } 1321 return false; 1322 } 1323 } 1324 1325 return true; 1326 }; 1327 1328 if (!IsSimpleStoreThenLoad()) 1329 PI.setEscaped(&SI); 1330 } 1331 1332 // All mem intrinsics modify the data. 1333 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); } 1334 1335 void visitBitCastInst(BitCastInst &BC) { 1336 Base::visitBitCastInst(BC); 1337 handleAlias(BC); 1338 } 1339 1340 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 1341 Base::visitAddrSpaceCastInst(ASC); 1342 handleAlias(ASC); 1343 } 1344 1345 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 1346 // The base visitor will adjust Offset accordingly. 1347 Base::visitGetElementPtrInst(GEPI); 1348 handleAlias(GEPI); 1349 } 1350 1351 void visitIntrinsicInst(IntrinsicInst &II) { 1352 if (II.getIntrinsicID() != Intrinsic::lifetime_start) 1353 return Base::visitIntrinsicInst(II); 1354 LifetimeStarts.insert(&II); 1355 } 1356 1357 void visitCallBase(CallBase &CB) { 1358 for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op) 1359 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op)) 1360 PI.setEscaped(&CB); 1361 handleMayWrite(CB); 1362 } 1363 1364 bool getShouldLiveOnFrame() const { 1365 if (!ShouldLiveOnFrame) 1366 ShouldLiveOnFrame = computeShouldLiveOnFrame(); 1367 return ShouldLiveOnFrame.getValue(); 1368 } 1369 1370 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; } 1371 1372 DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const { 1373 assert(getShouldLiveOnFrame() && "This method should only be called if the " 1374 "alloca needs to live on the frame."); 1375 for (const auto &P : AliasOffetMap) 1376 if (!P.second) 1377 report_fatal_error("Unable to handle an alias with unknown offset " 1378 "created before CoroBegin."); 1379 return AliasOffetMap; 1380 } 1381 1382 private: 1383 const DominatorTree &DT; 1384 const CoroBeginInst &CoroBegin; 1385 const SuspendCrossingInfo &Checker; 1386 // All alias to the original AllocaInst, created before CoroBegin and used 1387 // after CoroBegin. Each entry contains the instruction and the offset in the 1388 // original Alloca. They need to be recreated after CoroBegin off the frame. 1389 DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{}; 1390 SmallPtrSet<Instruction *, 4> Users{}; 1391 SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{}; 1392 bool MayWriteBeforeCoroBegin{false}; 1393 1394 mutable llvm::Optional<bool> ShouldLiveOnFrame{}; 1395 1396 bool computeShouldLiveOnFrame() const { 1397 // If lifetime information is available, we check it first since it's 1398 // more precise. We look at every pair of lifetime.start intrinsic and 1399 // every basic block that uses the pointer to see if they cross suspension 1400 // points. The uses cover both direct uses as well as indirect uses. 1401 if (!LifetimeStarts.empty()) { 1402 for (auto *I : Users) 1403 for (auto *S : LifetimeStarts) 1404 if (Checker.isDefinitionAcrossSuspend(*S, I)) 1405 return true; 1406 return false; 1407 } 1408 // FIXME: Ideally the isEscaped check should come at the beginning. 1409 // However there are a few loose ends that need to be fixed first before 1410 // we can do that. We need to make sure we are not over-conservative, so 1411 // that the data accessed in-between await_suspend and symmetric transfer 1412 // is always put on the stack, and also data accessed after coro.end is 1413 // always put on the stack (esp the return object). To fix that, we need 1414 // to: 1415 // 1) Potentially treat sret as nocapture in calls 1416 // 2) Special handle the return object and put it on the stack 1417 // 3) Utilize lifetime.end intrinsic 1418 if (PI.isEscaped()) 1419 return true; 1420 1421 for (auto *U1 : Users) 1422 for (auto *U2 : Users) 1423 if (Checker.isDefinitionAcrossSuspend(*U1, U2)) 1424 return true; 1425 1426 return false; 1427 } 1428 1429 void handleMayWrite(const Instruction &I) { 1430 if (!DT.dominates(&CoroBegin, &I)) 1431 MayWriteBeforeCoroBegin = true; 1432 } 1433 1434 bool usedAfterCoroBegin(Instruction &I) { 1435 for (auto &U : I.uses()) 1436 if (DT.dominates(&CoroBegin, U)) 1437 return true; 1438 return false; 1439 } 1440 1441 void handleAlias(Instruction &I) { 1442 // We track all aliases created prior to CoroBegin but used after. 1443 // These aliases may need to be recreated after CoroBegin if the alloca 1444 // need to live on the frame. 1445 if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I)) 1446 return; 1447 1448 if (!IsOffsetKnown) { 1449 AliasOffetMap[&I].reset(); 1450 } else { 1451 auto Itr = AliasOffetMap.find(&I); 1452 if (Itr == AliasOffetMap.end()) { 1453 AliasOffetMap[&I] = Offset; 1454 } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) { 1455 // If we have seen two different possible values for this alias, we set 1456 // it to empty. 1457 AliasOffetMap[&I].reset(); 1458 } 1459 } 1460 } 1461 }; 1462 } // namespace 1463 1464 // We need to make room to insert a spill after initial PHIs, but before 1465 // catchswitch instruction. Placing it before violates the requirement that 1466 // catchswitch, like all other EHPads must be the first nonPHI in a block. 1467 // 1468 // Split away catchswitch into a separate block and insert in its place: 1469 // 1470 // cleanuppad <InsertPt> cleanupret. 1471 // 1472 // cleanupret instruction will act as an insert point for the spill. 1473 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) { 1474 BasicBlock *CurrentBlock = CatchSwitch->getParent(); 1475 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch); 1476 CurrentBlock->getTerminator()->eraseFromParent(); 1477 1478 auto *CleanupPad = 1479 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock); 1480 auto *CleanupRet = 1481 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock); 1482 return CleanupRet; 1483 } 1484 1485 static void createFramePtr(coro::Shape &Shape) { 1486 auto *CB = Shape.CoroBegin; 1487 IRBuilder<> Builder(CB->getNextNode()); 1488 StructType *FrameTy = Shape.FrameTy; 1489 PointerType *FramePtrTy = FrameTy->getPointerTo(); 1490 Shape.FramePtr = 1491 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr")); 1492 } 1493 1494 // Replace all alloca and SSA values that are accessed across suspend points 1495 // with GetElementPointer from coroutine frame + loads and stores. Create an 1496 // AllocaSpillBB that will become the new entry block for the resume parts of 1497 // the coroutine: 1498 // 1499 // %hdl = coro.begin(...) 1500 // whatever 1501 // 1502 // becomes: 1503 // 1504 // %hdl = coro.begin(...) 1505 // %FramePtr = bitcast i8* hdl to %f.frame* 1506 // br label %AllocaSpillBB 1507 // 1508 // AllocaSpillBB: 1509 // ; geps corresponding to allocas that were moved to coroutine frame 1510 // br label PostSpill 1511 // 1512 // PostSpill: 1513 // whatever 1514 // 1515 // 1516 static Instruction *insertSpills(const FrameDataInfo &FrameData, 1517 coro::Shape &Shape) { 1518 auto *CB = Shape.CoroBegin; 1519 LLVMContext &C = CB->getContext(); 1520 IRBuilder<> Builder(C); 1521 StructType *FrameTy = Shape.FrameTy; 1522 Instruction *FramePtr = Shape.FramePtr; 1523 DominatorTree DT(*CB->getFunction()); 1524 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache; 1525 1526 // Create a GEP with the given index into the coroutine frame for the original 1527 // value Orig. Appends an extra 0 index for array-allocas, preserving the 1528 // original type. 1529 auto GetFramePointer = [&](Value *Orig) -> Value * { 1530 FieldIDType Index = FrameData.getFieldIndex(Orig); 1531 SmallVector<Value *, 3> Indices = { 1532 ConstantInt::get(Type::getInt32Ty(C), 0), 1533 ConstantInt::get(Type::getInt32Ty(C), Index), 1534 }; 1535 1536 if (auto *AI = dyn_cast<AllocaInst>(Orig)) { 1537 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) { 1538 auto Count = CI->getValue().getZExtValue(); 1539 if (Count > 1) { 1540 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0)); 1541 } 1542 } else { 1543 report_fatal_error("Coroutines cannot handle non static allocas yet"); 1544 } 1545 } 1546 1547 auto GEP = cast<GetElementPtrInst>( 1548 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices)); 1549 if (isa<AllocaInst>(Orig)) { 1550 // If the type of GEP is not equal to the type of AllocaInst, it implies 1551 // that the AllocaInst may be reused in the Frame slot of other 1552 // AllocaInst. So We cast GEP to the AllocaInst here to re-use 1553 // the Frame storage. 1554 // 1555 // Note: If we change the strategy dealing with alignment, we need to refine 1556 // this casting. 1557 if (GEP->getResultElementType() != Orig->getType()) 1558 return Builder.CreateBitCast(GEP, Orig->getType(), 1559 Orig->getName() + Twine(".cast")); 1560 } 1561 return GEP; 1562 }; 1563 1564 for (auto const &E : FrameData.Spills) { 1565 Value *Def = E.first; 1566 auto SpillAlignment = Align(FrameData.getAlign(Def)); 1567 // Create a store instruction storing the value into the 1568 // coroutine frame. 1569 Instruction *InsertPt = nullptr; 1570 bool NeedToCopyArgPtrValue = false; 1571 if (auto *Arg = dyn_cast<Argument>(Def)) { 1572 // For arguments, we will place the store instruction right after 1573 // the coroutine frame pointer instruction, i.e. bitcast of 1574 // coro.begin from i8* to %f.frame*. 1575 InsertPt = FramePtr->getNextNode(); 1576 1577 // If we're spilling an Argument, make sure we clear 'nocapture' 1578 // from the coroutine function. 1579 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture); 1580 1581 if (Arg->hasByValAttr()) 1582 NeedToCopyArgPtrValue = true; 1583 1584 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) { 1585 // Don't spill immediately after a suspend; splitting assumes 1586 // that the suspend will be followed by a branch. 1587 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI(); 1588 } else { 1589 auto *I = cast<Instruction>(Def); 1590 if (!DT.dominates(CB, I)) { 1591 // If it is not dominated by CoroBegin, then spill should be 1592 // inserted immediately after CoroFrame is computed. 1593 InsertPt = FramePtr->getNextNode(); 1594 } else if (auto *II = dyn_cast<InvokeInst>(I)) { 1595 // If we are spilling the result of the invoke instruction, split 1596 // the normal edge and insert the spill in the new block. 1597 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest()); 1598 InsertPt = NewBB->getTerminator(); 1599 } else if (isa<PHINode>(I)) { 1600 // Skip the PHINodes and EH pads instructions. 1601 BasicBlock *DefBlock = I->getParent(); 1602 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator())) 1603 InsertPt = splitBeforeCatchSwitch(CSI); 1604 else 1605 InsertPt = &*DefBlock->getFirstInsertionPt(); 1606 } else { 1607 assert(!I->isTerminator() && "unexpected terminator"); 1608 // For all other values, the spill is placed immediately after 1609 // the definition. 1610 InsertPt = I->getNextNode(); 1611 } 1612 } 1613 1614 auto Index = FrameData.getFieldIndex(Def); 1615 Builder.SetInsertPoint(InsertPt); 1616 auto *G = Builder.CreateConstInBoundsGEP2_32( 1617 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr")); 1618 if (NeedToCopyArgPtrValue) { 1619 // For byval arguments, we need to store the pointed value in the frame, 1620 // instead of the pointer itself. 1621 auto *Value = 1622 Builder.CreateLoad(Def->getType()->getPointerElementType(), Def); 1623 Builder.CreateAlignedStore(Value, G, SpillAlignment); 1624 } else { 1625 Builder.CreateAlignedStore(Def, G, SpillAlignment); 1626 } 1627 1628 BasicBlock *CurrentBlock = nullptr; 1629 Value *CurrentReload = nullptr; 1630 for (auto *U : E.second) { 1631 // If we have not seen the use block, create a load instruction to reload 1632 // the spilled value from the coroutine frame. Populates the Value pointer 1633 // reference provided with the frame GEP. 1634 if (CurrentBlock != U->getParent()) { 1635 CurrentBlock = U->getParent(); 1636 Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt()); 1637 1638 auto *GEP = GetFramePointer(E.first); 1639 GEP->setName(E.first->getName() + Twine(".reload.addr")); 1640 if (NeedToCopyArgPtrValue) 1641 CurrentReload = GEP; 1642 else 1643 CurrentReload = Builder.CreateAlignedLoad( 1644 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP, 1645 SpillAlignment, E.first->getName() + Twine(".reload")); 1646 1647 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def); 1648 for (DbgDeclareInst *DDI : DIs) { 1649 bool AllowUnresolved = false; 1650 // This dbg.declare is preserved for all coro-split function 1651 // fragments. It will be unreachable in the main function, and 1652 // processed by coro::salvageDebugInfo() by CoroCloner. 1653 DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved) 1654 .insertDeclare(CurrentReload, DDI->getVariable(), 1655 DDI->getExpression(), DDI->getDebugLoc(), 1656 &*Builder.GetInsertPoint()); 1657 // This dbg.declare is for the main function entry point. It 1658 // will be deleted in all coro-split functions. 1659 coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.ReuseFrameSlot); 1660 } 1661 } 1662 1663 // If we have a single edge PHINode, remove it and replace it with a 1664 // reload from the coroutine frame. (We already took care of multi edge 1665 // PHINodes by rewriting them in the rewritePHIs function). 1666 if (auto *PN = dyn_cast<PHINode>(U)) { 1667 assert(PN->getNumIncomingValues() == 1 && 1668 "unexpected number of incoming " 1669 "values in the PHINode"); 1670 PN->replaceAllUsesWith(CurrentReload); 1671 PN->eraseFromParent(); 1672 continue; 1673 } 1674 1675 // Replace all uses of CurrentValue in the current instruction with 1676 // reload. 1677 U->replaceUsesOfWith(Def, CurrentReload); 1678 } 1679 } 1680 1681 BasicBlock *FramePtrBB = FramePtr->getParent(); 1682 1683 auto SpillBlock = 1684 FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB"); 1685 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill"); 1686 Shape.AllocaSpillBlock = SpillBlock; 1687 1688 // retcon and retcon.once lowering assumes all uses have been sunk. 1689 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 1690 Shape.ABI == coro::ABI::Async) { 1691 // If we found any allocas, replace all of their remaining uses with Geps. 1692 Builder.SetInsertPoint(&SpillBlock->front()); 1693 for (const auto &P : FrameData.Allocas) { 1694 AllocaInst *Alloca = P.Alloca; 1695 auto *G = GetFramePointer(Alloca); 1696 1697 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G)) 1698 // here, as we are changing location of the instruction. 1699 G->takeName(Alloca); 1700 Alloca->replaceAllUsesWith(G); 1701 Alloca->eraseFromParent(); 1702 } 1703 return FramePtr; 1704 } 1705 1706 // If we found any alloca, replace all of their remaining uses with GEP 1707 // instructions. To remain debugbility, we replace the uses of allocas for 1708 // dbg.declares and dbg.values with the reload from the frame. 1709 // Note: We cannot replace the alloca with GEP instructions indiscriminately, 1710 // as some of the uses may not be dominated by CoroBegin. 1711 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front()); 1712 SmallVector<Instruction *, 4> UsersToUpdate; 1713 for (const auto &A : FrameData.Allocas) { 1714 AllocaInst *Alloca = A.Alloca; 1715 UsersToUpdate.clear(); 1716 for (User *U : Alloca->users()) { 1717 auto *I = cast<Instruction>(U); 1718 if (DT.dominates(CB, I)) 1719 UsersToUpdate.push_back(I); 1720 } 1721 if (UsersToUpdate.empty()) 1722 continue; 1723 auto *G = GetFramePointer(Alloca); 1724 G->setName(Alloca->getName() + Twine(".reload.addr")); 1725 1726 SmallVector<DbgVariableIntrinsic *, 4> DIs; 1727 findDbgUsers(DIs, Alloca); 1728 for (auto *DVI : DIs) 1729 DVI->replaceUsesOfWith(Alloca, G); 1730 1731 for (Instruction *I : UsersToUpdate) 1732 I->replaceUsesOfWith(Alloca, G); 1733 } 1734 Builder.SetInsertPoint(FramePtr->getNextNode()); 1735 for (const auto &A : FrameData.Allocas) { 1736 AllocaInst *Alloca = A.Alloca; 1737 if (A.MayWriteBeforeCoroBegin) { 1738 // isEscaped really means potentially modified before CoroBegin. 1739 if (Alloca->isArrayAllocation()) 1740 report_fatal_error( 1741 "Coroutines cannot handle copying of array allocas yet"); 1742 1743 auto *G = GetFramePointer(Alloca); 1744 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca); 1745 Builder.CreateStore(Value, G); 1746 } 1747 // For each alias to Alloca created before CoroBegin but used after 1748 // CoroBegin, we recreate them after CoroBegin by appplying the offset 1749 // to the pointer in the frame. 1750 for (const auto &Alias : A.Aliases) { 1751 auto *FramePtr = GetFramePointer(Alloca); 1752 auto *FramePtrRaw = 1753 Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C)); 1754 auto *AliasPtr = Builder.CreateGEP( 1755 Type::getInt8Ty(C), FramePtrRaw, 1756 ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue())); 1757 auto *AliasPtrTyped = 1758 Builder.CreateBitCast(AliasPtr, Alias.first->getType()); 1759 Alias.first->replaceUsesWithIf( 1760 AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); }); 1761 } 1762 } 1763 return FramePtr; 1764 } 1765 1766 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new 1767 // PHI in InsertedBB. 1768 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, 1769 BasicBlock *InsertedBB, 1770 BasicBlock *PredBB, 1771 PHINode *UntilPHI = nullptr) { 1772 auto *PN = cast<PHINode>(&SuccBB->front()); 1773 do { 1774 int Index = PN->getBasicBlockIndex(InsertedBB); 1775 Value *V = PN->getIncomingValue(Index); 1776 PHINode *InputV = PHINode::Create( 1777 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(), 1778 &InsertedBB->front()); 1779 InputV->addIncoming(V, PredBB); 1780 PN->setIncomingValue(Index, InputV); 1781 PN = dyn_cast<PHINode>(PN->getNextNode()); 1782 } while (PN != UntilPHI); 1783 } 1784 1785 // Rewrites the PHI Nodes in a cleanuppad. 1786 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, 1787 CleanupPadInst *CleanupPad) { 1788 // For every incoming edge to a CleanupPad we will create a new block holding 1789 // all incoming values in single-value PHI nodes. We will then create another 1790 // block to act as a dispather (as all unwind edges for related EH blocks 1791 // must be the same). 1792 // 1793 // cleanuppad: 1794 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1] 1795 // %3 = cleanuppad within none [] 1796 // 1797 // It will create: 1798 // 1799 // cleanuppad.corodispatch 1800 // %2 = phi i8[0, %catchswitch], [1, %catch.1] 1801 // %3 = cleanuppad within none [] 1802 // switch i8 % 2, label %unreachable 1803 // [i8 0, label %cleanuppad.from.catchswitch 1804 // i8 1, label %cleanuppad.from.catch.1] 1805 // cleanuppad.from.catchswitch: 1806 // %4 = phi i32 [%0, %catchswitch] 1807 // br %label cleanuppad 1808 // cleanuppad.from.catch.1: 1809 // %6 = phi i32 [%1, %catch.1] 1810 // br %label cleanuppad 1811 // cleanuppad: 1812 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch], 1813 // [%6, %cleanuppad.from.catch.1] 1814 1815 // Unreachable BB, in case switching on an invalid value in the dispatcher. 1816 auto *UnreachBB = BasicBlock::Create( 1817 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent()); 1818 IRBuilder<> Builder(UnreachBB); 1819 Builder.CreateUnreachable(); 1820 1821 // Create a new cleanuppad which will be the dispatcher. 1822 auto *NewCleanupPadBB = 1823 BasicBlock::Create(CleanupPadBB->getContext(), 1824 CleanupPadBB->getName() + Twine(".corodispatch"), 1825 CleanupPadBB->getParent(), CleanupPadBB); 1826 Builder.SetInsertPoint(NewCleanupPadBB); 1827 auto *SwitchType = Builder.getInt8Ty(); 1828 auto *SetDispatchValuePN = 1829 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB)); 1830 CleanupPad->removeFromParent(); 1831 CleanupPad->insertAfter(SetDispatchValuePN); 1832 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB, 1833 pred_size(CleanupPadBB)); 1834 1835 int SwitchIndex = 0; 1836 SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB)); 1837 for (BasicBlock *Pred : Preds) { 1838 // Create a new cleanuppad and move the PHI values to there. 1839 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(), 1840 CleanupPadBB->getName() + 1841 Twine(".from.") + Pred->getName(), 1842 CleanupPadBB->getParent(), CleanupPadBB); 1843 updatePhiNodes(CleanupPadBB, Pred, CaseBB); 1844 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") + 1845 Pred->getName()); 1846 Builder.SetInsertPoint(CaseBB); 1847 Builder.CreateBr(CleanupPadBB); 1848 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB); 1849 1850 // Update this Pred to the new unwind point. 1851 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB); 1852 1853 // Setup the switch in the dispatcher. 1854 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex); 1855 SetDispatchValuePN->addIncoming(SwitchConstant, Pred); 1856 SwitchOnDispatch->addCase(SwitchConstant, CaseBB); 1857 SwitchIndex++; 1858 } 1859 } 1860 1861 static void cleanupSinglePredPHIs(Function &F) { 1862 SmallVector<PHINode *, 32> Worklist; 1863 for (auto &BB : F) { 1864 for (auto &Phi : BB.phis()) { 1865 if (Phi.getNumIncomingValues() == 1) { 1866 Worklist.push_back(&Phi); 1867 } else 1868 break; 1869 } 1870 } 1871 while (!Worklist.empty()) { 1872 auto *Phi = Worklist.pop_back_val(); 1873 auto *OriginalValue = Phi->getIncomingValue(0); 1874 Phi->replaceAllUsesWith(OriginalValue); 1875 } 1876 } 1877 1878 static void rewritePHIs(BasicBlock &BB) { 1879 // For every incoming edge we will create a block holding all 1880 // incoming values in a single PHI nodes. 1881 // 1882 // loop: 1883 // %n.val = phi i32[%n, %entry], [%inc, %loop] 1884 // 1885 // It will create: 1886 // 1887 // loop.from.entry: 1888 // %n.loop.pre = phi i32 [%n, %entry] 1889 // br %label loop 1890 // loop.from.loop: 1891 // %inc.loop.pre = phi i32 [%inc, %loop] 1892 // br %label loop 1893 // 1894 // After this rewrite, further analysis will ignore any phi nodes with more 1895 // than one incoming edge. 1896 1897 // TODO: Simplify PHINodes in the basic block to remove duplicate 1898 // predecessors. 1899 1900 // Special case for CleanupPad: all EH blocks must have the same unwind edge 1901 // so we need to create an additional "dispatcher" block. 1902 if (auto *CleanupPad = 1903 dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) { 1904 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB)); 1905 for (BasicBlock *Pred : Preds) { 1906 if (CatchSwitchInst *CS = 1907 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) { 1908 // CleanupPad with a CatchSwitch predecessor: therefore this is an 1909 // unwind destination that needs to be handle specially. 1910 assert(CS->getUnwindDest() == &BB); 1911 (void)CS; 1912 rewritePHIsForCleanupPad(&BB, CleanupPad); 1913 return; 1914 } 1915 } 1916 } 1917 1918 LandingPadInst *LandingPad = nullptr; 1919 PHINode *ReplPHI = nullptr; 1920 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) { 1921 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks. 1922 // We replace the original landing pad with a PHINode that will collect the 1923 // results from all of them. 1924 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad); 1925 ReplPHI->takeName(LandingPad); 1926 LandingPad->replaceAllUsesWith(ReplPHI); 1927 // We will erase the original landing pad at the end of this function after 1928 // ehAwareSplitEdge cloned it in the transition blocks. 1929 } 1930 1931 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB)); 1932 for (BasicBlock *Pred : Preds) { 1933 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI); 1934 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName()); 1935 1936 // Stop the moving of values at ReplPHI, as this is either null or the PHI 1937 // that replaced the landing pad. 1938 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI); 1939 } 1940 1941 if (LandingPad) { 1942 // Calls to ehAwareSplitEdge function cloned the original lading pad. 1943 // No longer need it. 1944 LandingPad->eraseFromParent(); 1945 } 1946 } 1947 1948 static void rewritePHIs(Function &F) { 1949 SmallVector<BasicBlock *, 8> WorkList; 1950 1951 for (BasicBlock &BB : F) 1952 if (auto *PN = dyn_cast<PHINode>(&BB.front())) 1953 if (PN->getNumIncomingValues() > 1) 1954 WorkList.push_back(&BB); 1955 1956 for (BasicBlock *BB : WorkList) 1957 rewritePHIs(*BB); 1958 } 1959 1960 // Check for instructions that we can recreate on resume as opposed to spill 1961 // the result into a coroutine frame. 1962 static bool materializable(Instruction &V) { 1963 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) || 1964 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V); 1965 } 1966 1967 // Check for structural coroutine intrinsics that should not be spilled into 1968 // the coroutine frame. 1969 static bool isCoroutineStructureIntrinsic(Instruction &I) { 1970 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) || 1971 isa<CoroSuspendInst>(&I); 1972 } 1973 1974 // For every use of the value that is across suspend point, recreate that value 1975 // after a suspend point. 1976 static void rewriteMaterializableInstructions(IRBuilder<> &IRB, 1977 const SpillInfo &Spills) { 1978 for (const auto &E : Spills) { 1979 Value *Def = E.first; 1980 BasicBlock *CurrentBlock = nullptr; 1981 Instruction *CurrentMaterialization = nullptr; 1982 for (Instruction *U : E.second) { 1983 // If we have not seen this block, materialize the value. 1984 if (CurrentBlock != U->getParent()) { 1985 1986 bool IsInCoroSuspendBlock = isa<AnyCoroSuspendInst>(U); 1987 CurrentBlock = U->getParent(); 1988 auto *InsertBlock = IsInCoroSuspendBlock 1989 ? CurrentBlock->getSinglePredecessor() 1990 : CurrentBlock; 1991 CurrentMaterialization = cast<Instruction>(Def)->clone(); 1992 CurrentMaterialization->setName(Def->getName()); 1993 CurrentMaterialization->insertBefore( 1994 IsInCoroSuspendBlock ? InsertBlock->getTerminator() 1995 : &*InsertBlock->getFirstInsertionPt()); 1996 } 1997 if (auto *PN = dyn_cast<PHINode>(U)) { 1998 assert(PN->getNumIncomingValues() == 1 && 1999 "unexpected number of incoming " 2000 "values in the PHINode"); 2001 PN->replaceAllUsesWith(CurrentMaterialization); 2002 PN->eraseFromParent(); 2003 continue; 2004 } 2005 // Replace all uses of Def in the current instruction with the 2006 // CurrentMaterialization for the block. 2007 U->replaceUsesOfWith(Def, CurrentMaterialization); 2008 } 2009 } 2010 } 2011 2012 // Splits the block at a particular instruction unless it is the first 2013 // instruction in the block with a single predecessor. 2014 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) { 2015 auto *BB = I->getParent(); 2016 if (&BB->front() == I) { 2017 if (BB->getSinglePredecessor()) { 2018 BB->setName(Name); 2019 return BB; 2020 } 2021 } 2022 return BB->splitBasicBlock(I, Name); 2023 } 2024 2025 // Split above and below a particular instruction so that it 2026 // will be all alone by itself in a block. 2027 static void splitAround(Instruction *I, const Twine &Name) { 2028 splitBlockIfNotFirst(I, Name); 2029 splitBlockIfNotFirst(I->getNextNode(), "After" + Name); 2030 } 2031 2032 static bool isSuspendBlock(BasicBlock *BB) { 2033 return isa<AnyCoroSuspendInst>(BB->front()); 2034 } 2035 2036 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet; 2037 2038 /// Does control flow starting at the given block ever reach a suspend 2039 /// instruction before reaching a block in VisitedOrFreeBBs? 2040 static bool isSuspendReachableFrom(BasicBlock *From, 2041 VisitedBlocksSet &VisitedOrFreeBBs) { 2042 // Eagerly try to add this block to the visited set. If it's already 2043 // there, stop recursing; this path doesn't reach a suspend before 2044 // either looping or reaching a freeing block. 2045 if (!VisitedOrFreeBBs.insert(From).second) 2046 return false; 2047 2048 // We assume that we'll already have split suspends into their own blocks. 2049 if (isSuspendBlock(From)) 2050 return true; 2051 2052 // Recurse on the successors. 2053 for (auto Succ : successors(From)) { 2054 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs)) 2055 return true; 2056 } 2057 2058 return false; 2059 } 2060 2061 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a 2062 /// suspend point? 2063 static bool isLocalAlloca(CoroAllocaAllocInst *AI) { 2064 // Seed the visited set with all the basic blocks containing a free 2065 // so that we won't pass them up. 2066 VisitedBlocksSet VisitedOrFreeBBs; 2067 for (auto User : AI->users()) { 2068 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User)) 2069 VisitedOrFreeBBs.insert(FI->getParent()); 2070 } 2071 2072 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs); 2073 } 2074 2075 /// After we split the coroutine, will the given basic block be along 2076 /// an obvious exit path for the resumption function? 2077 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, 2078 unsigned depth = 3) { 2079 // If we've bottomed out our depth count, stop searching and assume 2080 // that the path might loop back. 2081 if (depth == 0) return false; 2082 2083 // If this is a suspend block, we're about to exit the resumption function. 2084 if (isSuspendBlock(BB)) return true; 2085 2086 // Recurse into the successors. 2087 for (auto Succ : successors(BB)) { 2088 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1)) 2089 return false; 2090 } 2091 2092 // If none of the successors leads back in a loop, we're on an exit/abort. 2093 return true; 2094 } 2095 2096 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) { 2097 // Look for a free that isn't sufficiently obviously followed by 2098 // either a suspend or a termination, i.e. something that will leave 2099 // the coro resumption frame. 2100 for (auto U : AI->users()) { 2101 auto FI = dyn_cast<CoroAllocaFreeInst>(U); 2102 if (!FI) continue; 2103 2104 if (!willLeaveFunctionImmediatelyAfter(FI->getParent())) 2105 return true; 2106 } 2107 2108 // If we never found one, we don't need a stack save. 2109 return false; 2110 } 2111 2112 /// Turn each of the given local allocas into a normal (dynamic) alloca 2113 /// instruction. 2114 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas, 2115 SmallVectorImpl<Instruction*> &DeadInsts) { 2116 for (auto AI : LocalAllocas) { 2117 auto M = AI->getModule(); 2118 IRBuilder<> Builder(AI); 2119 2120 // Save the stack depth. Try to avoid doing this if the stackrestore 2121 // is going to immediately precede a return or something. 2122 Value *StackSave = nullptr; 2123 if (localAllocaNeedsStackSave(AI)) 2124 StackSave = Builder.CreateCall( 2125 Intrinsic::getDeclaration(M, Intrinsic::stacksave)); 2126 2127 // Allocate memory. 2128 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize()); 2129 Alloca->setAlignment(Align(AI->getAlignment())); 2130 2131 for (auto U : AI->users()) { 2132 // Replace gets with the allocation. 2133 if (isa<CoroAllocaGetInst>(U)) { 2134 U->replaceAllUsesWith(Alloca); 2135 2136 // Replace frees with stackrestores. This is safe because 2137 // alloca.alloc is required to obey a stack discipline, although we 2138 // don't enforce that structurally. 2139 } else { 2140 auto FI = cast<CoroAllocaFreeInst>(U); 2141 if (StackSave) { 2142 Builder.SetInsertPoint(FI); 2143 Builder.CreateCall( 2144 Intrinsic::getDeclaration(M, Intrinsic::stackrestore), 2145 StackSave); 2146 } 2147 } 2148 DeadInsts.push_back(cast<Instruction>(U)); 2149 } 2150 2151 DeadInsts.push_back(AI); 2152 } 2153 } 2154 2155 /// Turn the given coro.alloca.alloc call into a dynamic allocation. 2156 /// This happens during the all-instructions iteration, so it must not 2157 /// delete the call. 2158 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI, 2159 coro::Shape &Shape, 2160 SmallVectorImpl<Instruction*> &DeadInsts) { 2161 IRBuilder<> Builder(AI); 2162 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr); 2163 2164 for (User *U : AI->users()) { 2165 if (isa<CoroAllocaGetInst>(U)) { 2166 U->replaceAllUsesWith(Alloc); 2167 } else { 2168 auto FI = cast<CoroAllocaFreeInst>(U); 2169 Builder.SetInsertPoint(FI); 2170 Shape.emitDealloc(Builder, Alloc, nullptr); 2171 } 2172 DeadInsts.push_back(cast<Instruction>(U)); 2173 } 2174 2175 // Push this on last so that it gets deleted after all the others. 2176 DeadInsts.push_back(AI); 2177 2178 // Return the new allocation value so that we can check for needed spills. 2179 return cast<Instruction>(Alloc); 2180 } 2181 2182 /// Get the current swifterror value. 2183 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, 2184 coro::Shape &Shape) { 2185 // Make a fake function pointer as a sort of intrinsic. 2186 auto FnTy = FunctionType::get(ValueTy, {}, false); 2187 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 2188 2189 auto Call = Builder.CreateCall(FnTy, Fn, {}); 2190 Shape.SwiftErrorOps.push_back(Call); 2191 2192 return Call; 2193 } 2194 2195 /// Set the given value as the current swifterror value. 2196 /// 2197 /// Returns a slot that can be used as a swifterror slot. 2198 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, 2199 coro::Shape &Shape) { 2200 // Make a fake function pointer as a sort of intrinsic. 2201 auto FnTy = FunctionType::get(V->getType()->getPointerTo(), 2202 {V->getType()}, false); 2203 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 2204 2205 auto Call = Builder.CreateCall(FnTy, Fn, { V }); 2206 Shape.SwiftErrorOps.push_back(Call); 2207 2208 return Call; 2209 } 2210 2211 /// Set the swifterror value from the given alloca before a call, 2212 /// then put in back in the alloca afterwards. 2213 /// 2214 /// Returns an address that will stand in for the swifterror slot 2215 /// until splitting. 2216 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call, 2217 AllocaInst *Alloca, 2218 coro::Shape &Shape) { 2219 auto ValueTy = Alloca->getAllocatedType(); 2220 IRBuilder<> Builder(Call); 2221 2222 // Load the current value from the alloca and set it as the 2223 // swifterror value. 2224 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca); 2225 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape); 2226 2227 // Move to after the call. Since swifterror only has a guaranteed 2228 // value on normal exits, we can ignore implicit and explicit unwind 2229 // edges. 2230 if (isa<CallInst>(Call)) { 2231 Builder.SetInsertPoint(Call->getNextNode()); 2232 } else { 2233 auto Invoke = cast<InvokeInst>(Call); 2234 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg()); 2235 } 2236 2237 // Get the current swifterror value and store it to the alloca. 2238 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape); 2239 Builder.CreateStore(ValueAfterCall, Alloca); 2240 2241 return Addr; 2242 } 2243 2244 /// Eliminate a formerly-swifterror alloca by inserting the get/set 2245 /// intrinsics and attempting to MemToReg the alloca away. 2246 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, 2247 coro::Shape &Shape) { 2248 for (auto UI = Alloca->use_begin(), UE = Alloca->use_end(); UI != UE; ) { 2249 // We're likely changing the use list, so use a mutation-safe 2250 // iteration pattern. 2251 auto &Use = *UI; 2252 ++UI; 2253 2254 // swifterror values can only be used in very specific ways. 2255 // We take advantage of that here. 2256 auto User = Use.getUser(); 2257 if (isa<LoadInst>(User) || isa<StoreInst>(User)) 2258 continue; 2259 2260 assert(isa<CallInst>(User) || isa<InvokeInst>(User)); 2261 auto Call = cast<Instruction>(User); 2262 2263 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape); 2264 2265 // Use the returned slot address as the call argument. 2266 Use.set(Addr); 2267 } 2268 2269 // All the uses should be loads and stores now. 2270 assert(isAllocaPromotable(Alloca)); 2271 } 2272 2273 /// "Eliminate" a swifterror argument by reducing it to the alloca case 2274 /// and then loading and storing in the prologue and epilog. 2275 /// 2276 /// The argument keeps the swifterror flag. 2277 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, 2278 coro::Shape &Shape, 2279 SmallVectorImpl<AllocaInst*> &AllocasToPromote) { 2280 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg()); 2281 2282 auto ArgTy = cast<PointerType>(Arg.getType()); 2283 auto ValueTy = ArgTy->getElementType(); 2284 2285 // Reduce to the alloca case: 2286 2287 // Create an alloca and replace all uses of the arg with it. 2288 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace()); 2289 Arg.replaceAllUsesWith(Alloca); 2290 2291 // Set an initial value in the alloca. swifterror is always null on entry. 2292 auto InitialValue = Constant::getNullValue(ValueTy); 2293 Builder.CreateStore(InitialValue, Alloca); 2294 2295 // Find all the suspends in the function and save and restore around them. 2296 for (auto Suspend : Shape.CoroSuspends) { 2297 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape); 2298 } 2299 2300 // Find all the coro.ends in the function and restore the error value. 2301 for (auto End : Shape.CoroEnds) { 2302 Builder.SetInsertPoint(End); 2303 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca); 2304 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape); 2305 } 2306 2307 // Now we can use the alloca logic. 2308 AllocasToPromote.push_back(Alloca); 2309 eliminateSwiftErrorAlloca(F, Alloca, Shape); 2310 } 2311 2312 /// Eliminate all problematic uses of swifterror arguments and allocas 2313 /// from the function. We'll fix them up later when splitting the function. 2314 static void eliminateSwiftError(Function &F, coro::Shape &Shape) { 2315 SmallVector<AllocaInst*, 4> AllocasToPromote; 2316 2317 // Look for a swifterror argument. 2318 for (auto &Arg : F.args()) { 2319 if (!Arg.hasSwiftErrorAttr()) continue; 2320 2321 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote); 2322 break; 2323 } 2324 2325 // Look for swifterror allocas. 2326 for (auto &Inst : F.getEntryBlock()) { 2327 auto Alloca = dyn_cast<AllocaInst>(&Inst); 2328 if (!Alloca || !Alloca->isSwiftError()) continue; 2329 2330 // Clear the swifterror flag. 2331 Alloca->setSwiftError(false); 2332 2333 AllocasToPromote.push_back(Alloca); 2334 eliminateSwiftErrorAlloca(F, Alloca, Shape); 2335 } 2336 2337 // If we have any allocas to promote, compute a dominator tree and 2338 // promote them en masse. 2339 if (!AllocasToPromote.empty()) { 2340 DominatorTree DT(F); 2341 PromoteMemToReg(AllocasToPromote, DT); 2342 } 2343 } 2344 2345 /// retcon and retcon.once conventions assume that all spill uses can be sunk 2346 /// after the coro.begin intrinsic. 2347 static void sinkSpillUsesAfterCoroBegin(Function &F, 2348 const FrameDataInfo &FrameData, 2349 CoroBeginInst *CoroBegin) { 2350 DominatorTree Dom(F); 2351 2352 SmallSetVector<Instruction *, 32> ToMove; 2353 SmallVector<Instruction *, 32> Worklist; 2354 2355 // Collect all users that precede coro.begin. 2356 for (auto *Def : FrameData.getAllDefs()) { 2357 for (User *U : Def->users()) { 2358 auto Inst = cast<Instruction>(U); 2359 if (Inst->getParent() != CoroBegin->getParent() || 2360 Dom.dominates(CoroBegin, Inst)) 2361 continue; 2362 if (ToMove.insert(Inst)) 2363 Worklist.push_back(Inst); 2364 } 2365 } 2366 // Recursively collect users before coro.begin. 2367 while (!Worklist.empty()) { 2368 auto *Def = Worklist.pop_back_val(); 2369 for (User *U : Def->users()) { 2370 auto Inst = cast<Instruction>(U); 2371 if (Dom.dominates(CoroBegin, Inst)) 2372 continue; 2373 if (ToMove.insert(Inst)) 2374 Worklist.push_back(Inst); 2375 } 2376 } 2377 2378 // Sort by dominance. 2379 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end()); 2380 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool { 2381 // If a dominates b it should preceed (<) b. 2382 return Dom.dominates(A, B); 2383 }); 2384 2385 Instruction *InsertPt = CoroBegin->getNextNode(); 2386 for (Instruction *Inst : InsertionList) 2387 Inst->moveBefore(InsertPt); 2388 } 2389 2390 /// For each local variable that all of its user are only used inside one of 2391 /// suspended region, we sink their lifetime.start markers to the place where 2392 /// after the suspend block. Doing so minimizes the lifetime of each variable, 2393 /// hence minimizing the amount of data we end up putting on the frame. 2394 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, 2395 SuspendCrossingInfo &Checker) { 2396 DominatorTree DT(F); 2397 2398 // Collect all possible basic blocks which may dominate all uses of allocas. 2399 SmallPtrSet<BasicBlock *, 4> DomSet; 2400 DomSet.insert(&F.getEntryBlock()); 2401 for (auto *CSI : Shape.CoroSuspends) { 2402 BasicBlock *SuspendBlock = CSI->getParent(); 2403 assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() && 2404 "should have split coro.suspend into its own block"); 2405 DomSet.insert(SuspendBlock->getSingleSuccessor()); 2406 } 2407 2408 for (Instruction &I : instructions(F)) { 2409 AllocaInst* AI = dyn_cast<AllocaInst>(&I); 2410 if (!AI) 2411 continue; 2412 2413 for (BasicBlock *DomBB : DomSet) { 2414 bool Valid = true; 2415 SmallVector<Instruction *, 1> Lifetimes; 2416 2417 auto isLifetimeStart = [](Instruction* I) { 2418 if (auto* II = dyn_cast<IntrinsicInst>(I)) 2419 return II->getIntrinsicID() == Intrinsic::lifetime_start; 2420 return false; 2421 }; 2422 2423 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) { 2424 if (isLifetimeStart(U)) { 2425 Lifetimes.push_back(U); 2426 return true; 2427 } 2428 if (!U->hasOneUse() || U->stripPointerCasts() != AI) 2429 return false; 2430 if (isLifetimeStart(U->user_back())) { 2431 Lifetimes.push_back(U->user_back()); 2432 return true; 2433 } 2434 return false; 2435 }; 2436 2437 for (User *U : AI->users()) { 2438 Instruction *UI = cast<Instruction>(U); 2439 // For all users except lifetime.start markers, if they are all 2440 // dominated by one of the basic blocks and do not cross 2441 // suspend points as well, then there is no need to spill the 2442 // instruction. 2443 if (!DT.dominates(DomBB, UI->getParent()) || 2444 Checker.isDefinitionAcrossSuspend(DomBB, UI)) { 2445 // Skip lifetime.start, GEP and bitcast used by lifetime.start 2446 // markers. 2447 if (collectLifetimeStart(UI, AI)) 2448 continue; 2449 Valid = false; 2450 break; 2451 } 2452 } 2453 // Sink lifetime.start markers to dominate block when they are 2454 // only used outside the region. 2455 if (Valid && Lifetimes.size() != 0) { 2456 // May be AI itself, when the type of AI is i8* 2457 auto *NewBitCast = [&](AllocaInst *AI) -> Value* { 2458 if (isa<AllocaInst>(Lifetimes[0]->getOperand(1))) 2459 return AI; 2460 auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext()); 2461 return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "", 2462 DomBB->getTerminator()); 2463 }(AI); 2464 2465 auto *NewLifetime = Lifetimes[0]->clone(); 2466 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast); 2467 NewLifetime->insertBefore(DomBB->getTerminator()); 2468 2469 // All the outsided lifetime.start markers are no longer necessary. 2470 for (Instruction *S : Lifetimes) 2471 S->eraseFromParent(); 2472 2473 break; 2474 } 2475 } 2476 } 2477 } 2478 2479 static void collectFrameAllocas(Function &F, coro::Shape &Shape, 2480 const SuspendCrossingInfo &Checker, 2481 SmallVectorImpl<AllocaInfo> &Allocas) { 2482 for (Instruction &I : instructions(F)) { 2483 auto *AI = dyn_cast<AllocaInst>(&I); 2484 if (!AI) 2485 continue; 2486 // The PromiseAlloca will be specially handled since it needs to be in a 2487 // fixed position in the frame. 2488 if (AI == Shape.SwitchLowering.PromiseAlloca) { 2489 continue; 2490 } 2491 DominatorTree DT(F); 2492 AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT, 2493 *Shape.CoroBegin, Checker}; 2494 Visitor.visitPtr(*AI); 2495 if (!Visitor.getShouldLiveOnFrame()) 2496 continue; 2497 Allocas.emplace_back(AI, Visitor.getAliasesCopy(), 2498 Visitor.getMayWriteBeforeCoroBegin()); 2499 } 2500 } 2501 2502 void coro::salvageDebugInfo( 2503 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache, 2504 DbgVariableIntrinsic *DVI, bool ReuseFrameSlot) { 2505 Function *F = DVI->getFunction(); 2506 IRBuilder<> Builder(F->getContext()); 2507 auto InsertPt = F->getEntryBlock().getFirstInsertionPt(); 2508 while (isa<IntrinsicInst>(InsertPt)) 2509 ++InsertPt; 2510 Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt); 2511 DIExpression *Expr = DVI->getExpression(); 2512 // Follow the pointer arithmetic all the way to the incoming 2513 // function argument and convert into a DIExpression. 2514 bool SkipOutermostLoad = !isa<DbgValueInst>(DVI); 2515 Value *Storage = DVI->getVariableLocationOp(0); 2516 Value *OriginalStorage = Storage; 2517 while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) { 2518 if (auto *LdInst = dyn_cast<LoadInst>(Inst)) { 2519 Storage = LdInst->getOperand(0); 2520 // FIXME: This is a heuristic that works around the fact that 2521 // LLVM IR debug intrinsics cannot yet distinguish between 2522 // memory and value locations: Because a dbg.declare(alloca) is 2523 // implicitly a memory location no DW_OP_deref operation for the 2524 // last direct load from an alloca is necessary. This condition 2525 // effectively drops the *last* DW_OP_deref in the expression. 2526 if (!SkipOutermostLoad) 2527 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore); 2528 } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) { 2529 Storage = StInst->getOperand(0); 2530 } else { 2531 SmallVector<uint64_t, 16> Ops; 2532 SmallVector<Value *, 0> AdditionalValues; 2533 Value *Op = llvm::salvageDebugInfoImpl( 2534 *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops, 2535 AdditionalValues); 2536 if (!Op || !AdditionalValues.empty()) { 2537 // If salvaging failed or salvaging produced more than one location 2538 // operand, give up. 2539 break; 2540 } 2541 Storage = Op; 2542 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false); 2543 } 2544 SkipOutermostLoad = false; 2545 } 2546 if (!Storage) 2547 return; 2548 2549 // Store a pointer to the coroutine frame object in an alloca so it 2550 // is available throughout the function when producing unoptimized 2551 // code. Extending the lifetime this way is correct because the 2552 // variable has been declared by a dbg.declare intrinsic. 2553 // 2554 // Avoid to create the alloca would be eliminated by optimization 2555 // passes and the corresponding dbg.declares would be invalid. 2556 if (!ReuseFrameSlot && !EnableReuseStorageInFrame) 2557 if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) { 2558 auto &Cached = DbgPtrAllocaCache[Storage]; 2559 if (!Cached) { 2560 Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr, 2561 Arg->getName() + ".debug"); 2562 Builder.CreateStore(Storage, Cached); 2563 } 2564 Storage = Cached; 2565 // FIXME: LLVM lacks nuanced semantics to differentiate between 2566 // memory and direct locations at the IR level. The backend will 2567 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory 2568 // location. Thus, if there are deref and offset operations in the 2569 // expression, we need to add a DW_OP_deref at the *start* of the 2570 // expression to first load the contents of the alloca before 2571 // adjusting it with the expression. 2572 if (Expr && Expr->isComplex()) 2573 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore); 2574 } 2575 2576 DVI->replaceVariableLocationOp(OriginalStorage, Storage); 2577 DVI->setExpression(Expr); 2578 /// It makes no sense to move the dbg.value intrinsic. 2579 if (!isa<DbgValueInst>(DVI)) { 2580 if (auto *InsertPt = dyn_cast<Instruction>(Storage)) 2581 DVI->moveAfter(InsertPt); 2582 else if (isa<Argument>(Storage)) 2583 DVI->moveAfter(F->getEntryBlock().getFirstNonPHI()); 2584 } 2585 } 2586 2587 void coro::buildCoroutineFrame(Function &F, Shape &Shape) { 2588 // Don't eliminate swifterror in async functions that won't be split. 2589 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty()) 2590 eliminateSwiftError(F, Shape); 2591 2592 if (Shape.ABI == coro::ABI::Switch && 2593 Shape.SwitchLowering.PromiseAlloca) { 2594 Shape.getSwitchCoroId()->clearPromise(); 2595 } 2596 2597 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end 2598 // intrinsics are in their own blocks to simplify the logic of building up 2599 // SuspendCrossing data. 2600 for (auto *CSI : Shape.CoroSuspends) { 2601 if (auto *Save = CSI->getCoroSave()) 2602 splitAround(Save, "CoroSave"); 2603 splitAround(CSI, "CoroSuspend"); 2604 } 2605 2606 // Put CoroEnds into their own blocks. 2607 for (AnyCoroEndInst *CE : Shape.CoroEnds) { 2608 splitAround(CE, "CoroEnd"); 2609 2610 // Emit the musttail call function in a new block before the CoroEnd. 2611 // We do this here so that the right suspend crossing info is computed for 2612 // the uses of the musttail call function call. (Arguments to the coro.end 2613 // instructions would be ignored) 2614 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) { 2615 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction(); 2616 if (!MustTailCallFn) 2617 continue; 2618 IRBuilder<> Builder(AsyncEnd); 2619 SmallVector<Value *, 8> Args(AsyncEnd->args()); 2620 auto Arguments = ArrayRef<Value *>(Args).drop_front(3); 2621 auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn, 2622 Arguments, Builder); 2623 splitAround(Call, "MustTailCall.Before.CoroEnd"); 2624 } 2625 } 2626 2627 // Later code makes structural assumptions about single predecessors phis e.g 2628 // that they are not live accross a suspend point. 2629 cleanupSinglePredPHIs(F); 2630 2631 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will 2632 // never has its definition separated from the PHI by the suspend point. 2633 rewritePHIs(F); 2634 2635 // Build suspend crossing info. 2636 SuspendCrossingInfo Checker(F, Shape); 2637 2638 IRBuilder<> Builder(F.getContext()); 2639 FrameDataInfo FrameData; 2640 SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas; 2641 SmallVector<Instruction*, 4> DeadInstructions; 2642 2643 { 2644 SpillInfo Spills; 2645 for (int Repeat = 0; Repeat < 4; ++Repeat) { 2646 // See if there are materializable instructions across suspend points. 2647 for (Instruction &I : instructions(F)) 2648 if (materializable(I)) { 2649 for (User *U : I.users()) 2650 if (Checker.isDefinitionAcrossSuspend(I, U)) 2651 Spills[&I].push_back(cast<Instruction>(U)); 2652 2653 // Manually add dbg.value metadata uses of I. 2654 SmallVector<DbgValueInst *, 16> DVIs; 2655 findDbgValues(DVIs, &I); 2656 for (auto *DVI : DVIs) 2657 if (Checker.isDefinitionAcrossSuspend(I, DVI)) 2658 Spills[&I].push_back(DVI); 2659 } 2660 2661 if (Spills.empty()) 2662 break; 2663 2664 // Rewrite materializable instructions to be materialized at the use 2665 // point. 2666 LLVM_DEBUG(dumpSpills("Materializations", Spills)); 2667 rewriteMaterializableInstructions(Builder, Spills); 2668 Spills.clear(); 2669 } 2670 } 2671 2672 sinkLifetimeStartMarkers(F, Shape, Checker); 2673 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty()) 2674 collectFrameAllocas(F, Shape, Checker, FrameData.Allocas); 2675 LLVM_DEBUG(dumpAllocas(FrameData.Allocas)); 2676 2677 // Collect the spills for arguments and other not-materializable values. 2678 for (Argument &A : F.args()) 2679 for (User *U : A.users()) 2680 if (Checker.isDefinitionAcrossSuspend(A, U)) 2681 FrameData.Spills[&A].push_back(cast<Instruction>(U)); 2682 2683 for (Instruction &I : instructions(F)) { 2684 // Values returned from coroutine structure intrinsics should not be part 2685 // of the Coroutine Frame. 2686 if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin) 2687 continue; 2688 2689 // The Coroutine Promise always included into coroutine frame, no need to 2690 // check for suspend crossing. 2691 if (Shape.ABI == coro::ABI::Switch && 2692 Shape.SwitchLowering.PromiseAlloca == &I) 2693 continue; 2694 2695 // Handle alloca.alloc specially here. 2696 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) { 2697 // Check whether the alloca's lifetime is bounded by suspend points. 2698 if (isLocalAlloca(AI)) { 2699 LocalAllocas.push_back(AI); 2700 continue; 2701 } 2702 2703 // If not, do a quick rewrite of the alloca and then add spills of 2704 // the rewritten value. The rewrite doesn't invalidate anything in 2705 // Spills because the other alloca intrinsics have no other operands 2706 // besides AI, and it doesn't invalidate the iteration because we delay 2707 // erasing AI. 2708 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions); 2709 2710 for (User *U : Alloc->users()) { 2711 if (Checker.isDefinitionAcrossSuspend(*Alloc, U)) 2712 FrameData.Spills[Alloc].push_back(cast<Instruction>(U)); 2713 } 2714 continue; 2715 } 2716 2717 // Ignore alloca.get; we process this as part of coro.alloca.alloc. 2718 if (isa<CoroAllocaGetInst>(I)) 2719 continue; 2720 2721 if (isa<AllocaInst>(I)) 2722 continue; 2723 2724 for (User *U : I.users()) 2725 if (Checker.isDefinitionAcrossSuspend(I, U)) { 2726 // We cannot spill a token. 2727 if (I.getType()->isTokenTy()) 2728 report_fatal_error( 2729 "token definition is separated from the use by a suspend point"); 2730 FrameData.Spills[&I].push_back(cast<Instruction>(U)); 2731 } 2732 } 2733 2734 // We don't want the layout of coroutine frame to be affected 2735 // by debug information. So we only choose to salvage DbgValueInst for 2736 // whose value is already in the frame. 2737 // We would handle the dbg.values for allocas specially 2738 for (auto &Iter : FrameData.Spills) { 2739 auto *V = Iter.first; 2740 SmallVector<DbgValueInst *, 16> DVIs; 2741 findDbgValues(DVIs, V); 2742 llvm::for_each(DVIs, [&](DbgValueInst *DVI) { 2743 if (Checker.isDefinitionAcrossSuspend(*V, DVI)) 2744 FrameData.Spills[V].push_back(DVI); 2745 }); 2746 } 2747 2748 LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills)); 2749 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 2750 Shape.ABI == coro::ABI::Async) 2751 sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin); 2752 Shape.FrameTy = buildFrameType(F, Shape, FrameData); 2753 createFramePtr(Shape); 2754 // For now, this works for C++ programs only. 2755 buildFrameDebugInfo(F, Shape, FrameData); 2756 insertSpills(FrameData, Shape); 2757 lowerLocalAllocas(LocalAllocas, DeadInstructions); 2758 2759 for (auto I : DeadInstructions) 2760 I->eraseFromParent(); 2761 } 2762