1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This file contains classes used to discover if for a particular value 9 // there from sue to definition that crosses a suspend block. 10 // 11 // Using the information discovered we form a Coroutine Frame structure to 12 // contain those values. All uses of those values are replaced with appropriate 13 // GEP + load from the coroutine frame. At the point of the definition we spill 14 // the value into the coroutine frame. 15 //===----------------------------------------------------------------------===// 16 17 #include "CoroInternal.h" 18 #include "llvm/ADT/BitVector.h" 19 #include "llvm/ADT/ScopeExit.h" 20 #include "llvm/ADT/SmallString.h" 21 #include "llvm/Analysis/PtrUseVisitor.h" 22 #include "llvm/Analysis/StackLifetime.h" 23 #include "llvm/Config/llvm-config.h" 24 #include "llvm/IR/CFG.h" 25 #include "llvm/IR/DIBuilder.h" 26 #include "llvm/IR/DebugInfo.h" 27 #include "llvm/IR/Dominators.h" 28 #include "llvm/IR/IRBuilder.h" 29 #include "llvm/IR/InstIterator.h" 30 #include "llvm/IR/IntrinsicInst.h" 31 #include "llvm/Support/CommandLine.h" 32 #include "llvm/Support/Debug.h" 33 #include "llvm/Support/MathExtras.h" 34 #include "llvm/Support/OptimizedStructLayout.h" 35 #include "llvm/Support/circular_raw_ostream.h" 36 #include "llvm/Support/raw_ostream.h" 37 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 38 #include "llvm/Transforms/Utils/Local.h" 39 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 40 #include <algorithm> 41 42 using namespace llvm; 43 44 // The "coro-suspend-crossing" flag is very noisy. There is another debug type, 45 // "coro-frame", which results in leaner debug spew. 46 #define DEBUG_TYPE "coro-suspend-crossing" 47 48 enum { SmallVectorThreshold = 32 }; 49 50 // Provides two way mapping between the blocks and numbers. 51 namespace { 52 class BlockToIndexMapping { 53 SmallVector<BasicBlock *, SmallVectorThreshold> V; 54 55 public: 56 size_t size() const { return V.size(); } 57 58 BlockToIndexMapping(Function &F) { 59 for (BasicBlock &BB : F) 60 V.push_back(&BB); 61 llvm::sort(V); 62 } 63 64 size_t blockToIndex(BasicBlock *BB) const { 65 auto *I = llvm::lower_bound(V, BB); 66 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block"); 67 return I - V.begin(); 68 } 69 70 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; } 71 }; 72 } // end anonymous namespace 73 74 // The SuspendCrossingInfo maintains data that allows to answer a question 75 // whether given two BasicBlocks A and B there is a path from A to B that 76 // passes through a suspend point. 77 // 78 // For every basic block 'i' it maintains a BlockData that consists of: 79 // Consumes: a bit vector which contains a set of indices of blocks that can 80 // reach block 'i' 81 // Kills: a bit vector which contains a set of indices of blocks that can 82 // reach block 'i', but one of the path will cross a suspend point 83 // Suspend: a boolean indicating whether block 'i' contains a suspend point. 84 // End: a boolean indicating whether block 'i' contains a coro.end intrinsic. 85 // 86 namespace { 87 struct SuspendCrossingInfo { 88 BlockToIndexMapping Mapping; 89 90 struct BlockData { 91 BitVector Consumes; 92 BitVector Kills; 93 bool Suspend = false; 94 bool End = false; 95 }; 96 SmallVector<BlockData, SmallVectorThreshold> Block; 97 98 iterator_range<succ_iterator> successors(BlockData const &BD) const { 99 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]); 100 return llvm::successors(BB); 101 } 102 103 BlockData &getBlockData(BasicBlock *BB) { 104 return Block[Mapping.blockToIndex(BB)]; 105 } 106 107 void dump() const; 108 void dump(StringRef Label, BitVector const &BV) const; 109 110 SuspendCrossingInfo(Function &F, coro::Shape &Shape); 111 112 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const { 113 size_t const DefIndex = Mapping.blockToIndex(DefBB); 114 size_t const UseIndex = Mapping.blockToIndex(UseBB); 115 116 bool const Result = Block[UseIndex].Kills[DefIndex]; 117 LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName() 118 << " answer is " << Result << "\n"); 119 return Result; 120 } 121 122 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const { 123 auto *I = cast<Instruction>(U); 124 125 // We rewrote PHINodes, so that only the ones with exactly one incoming 126 // value need to be analyzed. 127 if (auto *PN = dyn_cast<PHINode>(I)) 128 if (PN->getNumIncomingValues() > 1) 129 return false; 130 131 BasicBlock *UseBB = I->getParent(); 132 133 // As a special case, treat uses by an llvm.coro.suspend.retcon or an 134 // llvm.coro.suspend.async as if they were uses in the suspend's single 135 // predecessor: the uses conceptually occur before the suspend. 136 if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) { 137 UseBB = UseBB->getSinglePredecessor(); 138 assert(UseBB && "should have split coro.suspend into its own block"); 139 } 140 141 return hasPathCrossingSuspendPoint(DefBB, UseBB); 142 } 143 144 bool isDefinitionAcrossSuspend(Argument &A, User *U) const { 145 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U); 146 } 147 148 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const { 149 auto *DefBB = I.getParent(); 150 151 // As a special case, treat values produced by an llvm.coro.suspend.* 152 // as if they were defined in the single successor: the uses 153 // conceptually occur after the suspend. 154 if (isa<AnyCoroSuspendInst>(I)) { 155 DefBB = DefBB->getSingleSuccessor(); 156 assert(DefBB && "should have split coro.suspend into its own block"); 157 } 158 159 return isDefinitionAcrossSuspend(DefBB, U); 160 } 161 162 bool isDefinitionAcrossSuspend(Value &V, User *U) const { 163 if (auto *Arg = dyn_cast<Argument>(&V)) 164 return isDefinitionAcrossSuspend(*Arg, U); 165 if (auto *Inst = dyn_cast<Instruction>(&V)) 166 return isDefinitionAcrossSuspend(*Inst, U); 167 168 llvm_unreachable( 169 "Coroutine could only collect Argument and Instruction now."); 170 } 171 }; 172 } // end anonymous namespace 173 174 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 175 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label, 176 BitVector const &BV) const { 177 dbgs() << Label << ":"; 178 for (size_t I = 0, N = BV.size(); I < N; ++I) 179 if (BV[I]) 180 dbgs() << " " << Mapping.indexToBlock(I)->getName(); 181 dbgs() << "\n"; 182 } 183 184 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const { 185 for (size_t I = 0, N = Block.size(); I < N; ++I) { 186 BasicBlock *const B = Mapping.indexToBlock(I); 187 dbgs() << B->getName() << ":\n"; 188 dump(" Consumes", Block[I].Consumes); 189 dump(" Kills", Block[I].Kills); 190 } 191 dbgs() << "\n"; 192 } 193 #endif 194 195 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape) 196 : Mapping(F) { 197 const size_t N = Mapping.size(); 198 Block.resize(N); 199 200 // Initialize every block so that it consumes itself 201 for (size_t I = 0; I < N; ++I) { 202 auto &B = Block[I]; 203 B.Consumes.resize(N); 204 B.Kills.resize(N); 205 B.Consumes.set(I); 206 } 207 208 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as 209 // the code beyond coro.end is reachable during initial invocation of the 210 // coroutine. 211 for (auto *CE : Shape.CoroEnds) 212 getBlockData(CE->getParent()).End = true; 213 214 // Mark all suspend blocks and indicate that they kill everything they 215 // consume. Note, that crossing coro.save also requires a spill, as any code 216 // between coro.save and coro.suspend may resume the coroutine and all of the 217 // state needs to be saved by that time. 218 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) { 219 BasicBlock *SuspendBlock = BarrierInst->getParent(); 220 auto &B = getBlockData(SuspendBlock); 221 B.Suspend = true; 222 B.Kills |= B.Consumes; 223 }; 224 for (auto *CSI : Shape.CoroSuspends) { 225 markSuspendBlock(CSI); 226 if (auto *Save = CSI->getCoroSave()) 227 markSuspendBlock(Save); 228 } 229 230 // Iterate propagating consumes and kills until they stop changing. 231 int Iteration = 0; 232 (void)Iteration; 233 234 bool Changed; 235 do { 236 LLVM_DEBUG(dbgs() << "iteration " << ++Iteration); 237 LLVM_DEBUG(dbgs() << "==============\n"); 238 239 Changed = false; 240 for (size_t I = 0; I < N; ++I) { 241 auto &B = Block[I]; 242 for (BasicBlock *SI : successors(B)) { 243 244 auto SuccNo = Mapping.blockToIndex(SI); 245 246 // Saved Consumes and Kills bitsets so that it is easy to see 247 // if anything changed after propagation. 248 auto &S = Block[SuccNo]; 249 auto SavedConsumes = S.Consumes; 250 auto SavedKills = S.Kills; 251 252 // Propagate Kills and Consumes from block B into its successor S. 253 S.Consumes |= B.Consumes; 254 S.Kills |= B.Kills; 255 256 // If block B is a suspend block, it should propagate kills into the 257 // its successor for every block B consumes. 258 if (B.Suspend) { 259 S.Kills |= B.Consumes; 260 } 261 if (S.Suspend) { 262 // If block S is a suspend block, it should kill all of the blocks it 263 // consumes. 264 S.Kills |= S.Consumes; 265 } else if (S.End) { 266 // If block S is an end block, it should not propagate kills as the 267 // blocks following coro.end() are reached during initial invocation 268 // of the coroutine while all the data are still available on the 269 // stack or in the registers. 270 S.Kills.reset(); 271 } else { 272 // This is reached when S block it not Suspend nor coro.end and it 273 // need to make sure that it is not in the kill set. 274 S.Kills.reset(SuccNo); 275 } 276 277 // See if anything changed. 278 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes); 279 280 if (S.Kills != SavedKills) { 281 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName() 282 << "\n"); 283 LLVM_DEBUG(dump("S.Kills", S.Kills)); 284 LLVM_DEBUG(dump("SavedKills", SavedKills)); 285 } 286 if (S.Consumes != SavedConsumes) { 287 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n"); 288 LLVM_DEBUG(dump("S.Consume", S.Consumes)); 289 LLVM_DEBUG(dump("SavedCons", SavedConsumes)); 290 } 291 } 292 } 293 } while (Changed); 294 LLVM_DEBUG(dump()); 295 } 296 297 #undef DEBUG_TYPE // "coro-suspend-crossing" 298 #define DEBUG_TYPE "coro-frame" 299 300 namespace { 301 class FrameTypeBuilder; 302 // Mapping from the to-be-spilled value to all the users that need reload. 303 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>; 304 struct AllocaInfo { 305 AllocaInst *Alloca; 306 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases; 307 bool MayWriteBeforeCoroBegin; 308 AllocaInfo(AllocaInst *Alloca, 309 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases, 310 bool MayWriteBeforeCoroBegin) 311 : Alloca(Alloca), Aliases(std::move(Aliases)), 312 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {} 313 }; 314 struct FrameDataInfo { 315 // All the values (that are not allocas) that needs to be spilled to the 316 // frame. 317 SpillInfo Spills; 318 // Allocas contains all values defined as allocas that need to live in the 319 // frame. 320 SmallVector<AllocaInfo, 8> Allocas; 321 322 SmallVector<Value *, 8> getAllDefs() const { 323 SmallVector<Value *, 8> Defs; 324 for (const auto &P : Spills) 325 Defs.push_back(P.first); 326 for (const auto &A : Allocas) 327 Defs.push_back(A.Alloca); 328 return Defs; 329 } 330 331 uint32_t getFieldIndex(Value *V) const { 332 auto Itr = FieldIndexMap.find(V); 333 assert(Itr != FieldIndexMap.end() && 334 "Value does not have a frame field index"); 335 return Itr->second; 336 } 337 338 void setFieldIndex(Value *V, uint32_t Index) { 339 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) && 340 "Cannot set the index for the same field twice."); 341 FieldIndexMap[V] = Index; 342 } 343 344 uint64_t getAlign(Value *V) const { 345 auto Iter = FieldAlignMap.find(V); 346 assert(Iter != FieldAlignMap.end()); 347 return Iter->second; 348 } 349 350 void setAlign(Value *V, uint64_t Align) { 351 assert(FieldAlignMap.count(V) == 0); 352 FieldAlignMap.insert({V, Align}); 353 } 354 355 uint64_t getOffset(Value *V) const { 356 auto Iter = FieldOffsetMap.find(V); 357 assert(Iter != FieldOffsetMap.end()); 358 return Iter->second; 359 } 360 361 void setOffset(Value *V, uint64_t Offset) { 362 assert(FieldOffsetMap.count(V) == 0); 363 FieldOffsetMap.insert({V, Offset}); 364 } 365 366 // Remap the index of every field in the frame, using the final layout index. 367 void updateLayoutIndex(FrameTypeBuilder &B); 368 369 private: 370 // LayoutIndexUpdateStarted is used to avoid updating the index of any field 371 // twice by mistake. 372 bool LayoutIndexUpdateStarted = false; 373 // Map from values to their slot indexes on the frame. They will be first set 374 // with their original insertion field index. After the frame is built, their 375 // indexes will be updated into the final layout index. 376 DenseMap<Value *, uint32_t> FieldIndexMap; 377 // Map from values to their alignment on the frame. They would be set after 378 // the frame is built. 379 DenseMap<Value *, uint64_t> FieldAlignMap; 380 // Map from values to their offset on the frame. They would be set after 381 // the frame is built. 382 DenseMap<Value *, uint64_t> FieldOffsetMap; 383 }; 384 } // namespace 385 386 #ifndef NDEBUG 387 static void dumpSpills(StringRef Title, const SpillInfo &Spills) { 388 dbgs() << "------------- " << Title << "--------------\n"; 389 for (const auto &E : Spills) { 390 E.first->dump(); 391 dbgs() << " user: "; 392 for (auto *I : E.second) 393 I->dump(); 394 } 395 } 396 397 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) { 398 dbgs() << "------------- Allocas --------------\n"; 399 for (const auto &A : Allocas) { 400 A.Alloca->dump(); 401 } 402 } 403 #endif 404 405 namespace { 406 using FieldIDType = size_t; 407 // We cannot rely solely on natural alignment of a type when building a 408 // coroutine frame and if the alignment specified on the Alloca instruction 409 // differs from the natural alignment of the alloca type we will need to insert 410 // padding. 411 class FrameTypeBuilder { 412 private: 413 struct Field { 414 uint64_t Size; 415 uint64_t Offset; 416 Type *Ty; 417 FieldIDType LayoutFieldIndex; 418 Align Alignment; 419 Align TyAlignment; 420 }; 421 422 const DataLayout &DL; 423 LLVMContext &Context; 424 uint64_t StructSize = 0; 425 Align StructAlign; 426 bool IsFinished = false; 427 428 Optional<Align> MaxFrameAlignment; 429 430 SmallVector<Field, 8> Fields; 431 DenseMap<Value*, unsigned> FieldIndexByKey; 432 433 public: 434 FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL, 435 Optional<Align> MaxFrameAlignment) 436 : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {} 437 438 /// Add a field to this structure for the storage of an `alloca` 439 /// instruction. 440 LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI, 441 bool IsHeader = false) { 442 Type *Ty = AI->getAllocatedType(); 443 444 // Make an array type if this is a static array allocation. 445 if (AI->isArrayAllocation()) { 446 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 447 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue()); 448 else 449 report_fatal_error("Coroutines cannot handle non static allocas yet"); 450 } 451 452 return addField(Ty, AI->getAlign(), IsHeader); 453 } 454 455 /// We want to put the allocas whose lifetime-ranges are not overlapped 456 /// into one slot of coroutine frame. 457 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566 458 /// 459 /// cppcoro::task<void> alternative_paths(bool cond) { 460 /// if (cond) { 461 /// big_structure a; 462 /// process(a); 463 /// co_await something(); 464 /// } else { 465 /// big_structure b; 466 /// process2(b); 467 /// co_await something(); 468 /// } 469 /// } 470 /// 471 /// We want to put variable a and variable b in the same slot to 472 /// reduce the size of coroutine frame. 473 /// 474 /// This function use StackLifetime algorithm to partition the AllocaInsts in 475 /// Spills to non-overlapped sets in order to put Alloca in the same 476 /// non-overlapped set into the same slot in the Coroutine Frame. Then add 477 /// field for the allocas in the same non-overlapped set by using the largest 478 /// type as the field type. 479 /// 480 /// Side Effects: Because We sort the allocas, the order of allocas in the 481 /// frame may be different with the order in the source code. 482 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData, 483 coro::Shape &Shape); 484 485 /// Add a field to this structure. 486 LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment, 487 bool IsHeader = false, 488 bool IsSpillOfValue = false) { 489 assert(!IsFinished && "adding fields to a finished builder"); 490 assert(Ty && "must provide a type for a field"); 491 492 // The field size is always the alloc size of the type. 493 uint64_t FieldSize = DL.getTypeAllocSize(Ty); 494 495 // For an alloca with size=0, we don't need to add a field and they 496 // can just point to any index in the frame. Use index 0. 497 if (FieldSize == 0) { 498 return 0; 499 } 500 501 // The field alignment might not be the type alignment, but we need 502 // to remember the type alignment anyway to build the type. 503 // If we are spilling values we don't need to worry about ABI alignment 504 // concerns. 505 auto ABIAlign = DL.getABITypeAlign(Ty); 506 Align TyAlignment = 507 (IsSpillOfValue && MaxFrameAlignment) 508 ? (*MaxFrameAlignment < ABIAlign ? *MaxFrameAlignment : ABIAlign) 509 : ABIAlign; 510 if (!FieldAlignment) { 511 FieldAlignment = TyAlignment; 512 } 513 514 // Lay out header fields immediately. 515 uint64_t Offset; 516 if (IsHeader) { 517 Offset = alignTo(StructSize, FieldAlignment); 518 StructSize = Offset + FieldSize; 519 520 // Everything else has a flexible offset. 521 } else { 522 Offset = OptimizedStructLayoutField::FlexibleOffset; 523 } 524 525 Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment}); 526 return Fields.size() - 1; 527 } 528 529 /// Finish the layout and set the body on the given type. 530 void finish(StructType *Ty); 531 532 uint64_t getStructSize() const { 533 assert(IsFinished && "not yet finished!"); 534 return StructSize; 535 } 536 537 Align getStructAlign() const { 538 assert(IsFinished && "not yet finished!"); 539 return StructAlign; 540 } 541 542 FieldIDType getLayoutFieldIndex(FieldIDType Id) const { 543 assert(IsFinished && "not yet finished!"); 544 return Fields[Id].LayoutFieldIndex; 545 } 546 547 Field getLayoutField(FieldIDType Id) const { 548 assert(IsFinished && "not yet finished!"); 549 return Fields[Id]; 550 } 551 }; 552 } // namespace 553 554 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) { 555 auto Updater = [&](Value *I) { 556 auto Field = B.getLayoutField(getFieldIndex(I)); 557 setFieldIndex(I, Field.LayoutFieldIndex); 558 setAlign(I, Field.Alignment.value()); 559 setOffset(I, Field.Offset); 560 }; 561 LayoutIndexUpdateStarted = true; 562 for (auto &S : Spills) 563 Updater(S.first); 564 for (const auto &A : Allocas) 565 Updater(A.Alloca); 566 LayoutIndexUpdateStarted = false; 567 } 568 569 void FrameTypeBuilder::addFieldForAllocas(const Function &F, 570 FrameDataInfo &FrameData, 571 coro::Shape &Shape) { 572 using AllocaSetType = SmallVector<AllocaInst *, 4>; 573 SmallVector<AllocaSetType, 4> NonOverlapedAllocas; 574 575 // We need to add field for allocas at the end of this function. 576 auto AddFieldForAllocasAtExit = make_scope_exit([&]() { 577 for (auto AllocaList : NonOverlapedAllocas) { 578 auto *LargestAI = *AllocaList.begin(); 579 FieldIDType Id = addFieldForAlloca(LargestAI); 580 for (auto *Alloca : AllocaList) 581 FrameData.setFieldIndex(Alloca, Id); 582 } 583 }); 584 585 if (!Shape.OptimizeFrame) { 586 for (const auto &A : FrameData.Allocas) { 587 AllocaInst *Alloca = A.Alloca; 588 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 589 } 590 return; 591 } 592 593 // Because there are pathes from the lifetime.start to coro.end 594 // for each alloca, the liferanges for every alloca is overlaped 595 // in the blocks who contain coro.end and the successor blocks. 596 // So we choose to skip there blocks when we calculates the liferange 597 // for each alloca. It should be reasonable since there shouldn't be uses 598 // in these blocks and the coroutine frame shouldn't be used outside the 599 // coroutine body. 600 // 601 // Note that the user of coro.suspend may not be SwitchInst. However, this 602 // case seems too complex to handle. And it is harmless to skip these 603 // patterns since it just prevend putting the allocas to live in the same 604 // slot. 605 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest; 606 for (auto CoroSuspendInst : Shape.CoroSuspends) { 607 for (auto U : CoroSuspendInst->users()) { 608 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) { 609 auto *SWI = const_cast<SwitchInst *>(ConstSWI); 610 DefaultSuspendDest[SWI] = SWI->getDefaultDest(); 611 SWI->setDefaultDest(SWI->getSuccessor(1)); 612 } 613 } 614 } 615 616 auto ExtractAllocas = [&]() { 617 AllocaSetType Allocas; 618 Allocas.reserve(FrameData.Allocas.size()); 619 for (const auto &A : FrameData.Allocas) 620 Allocas.push_back(A.Alloca); 621 return Allocas; 622 }; 623 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(), 624 StackLifetime::LivenessType::May); 625 StackLifetimeAnalyzer.run(); 626 auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) { 627 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps( 628 StackLifetimeAnalyzer.getLiveRange(AI2)); 629 }; 630 auto GetAllocaSize = [&](const AllocaInfo &A) { 631 Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL); 632 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n"); 633 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported"); 634 return RetSize->getFixedSize(); 635 }; 636 // Put larger allocas in the front. So the larger allocas have higher 637 // priority to merge, which can save more space potentially. Also each 638 // AllocaSet would be ordered. So we can get the largest Alloca in one 639 // AllocaSet easily. 640 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) { 641 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2); 642 }); 643 for (const auto &A : FrameData.Allocas) { 644 AllocaInst *Alloca = A.Alloca; 645 bool Merged = false; 646 // Try to find if the Alloca is not inferenced with any existing 647 // NonOverlappedAllocaSet. If it is true, insert the alloca to that 648 // NonOverlappedAllocaSet. 649 for (auto &AllocaSet : NonOverlapedAllocas) { 650 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n"); 651 bool NoInference = none_of(AllocaSet, [&](auto Iter) { 652 return IsAllocaInferenre(Alloca, Iter); 653 }); 654 // If the alignment of A is multiple of the alignment of B, the address 655 // of A should satisfy the requirement for aligning for B. 656 // 657 // There may be other more fine-grained strategies to handle the alignment 658 // infomation during the merging process. But it seems hard to handle 659 // these strategies and benefit little. 660 bool Alignable = [&]() -> bool { 661 auto *LargestAlloca = *AllocaSet.begin(); 662 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() == 663 0; 664 }(); 665 bool CouldMerge = NoInference && Alignable; 666 if (!CouldMerge) 667 continue; 668 AllocaSet.push_back(Alloca); 669 Merged = true; 670 break; 671 } 672 if (!Merged) { 673 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 674 } 675 } 676 // Recover the default target destination for each Switch statement 677 // reserved. 678 for (auto SwitchAndDefaultDest : DefaultSuspendDest) { 679 SwitchInst *SWI = SwitchAndDefaultDest.first; 680 BasicBlock *DestBB = SwitchAndDefaultDest.second; 681 SWI->setDefaultDest(DestBB); 682 } 683 // This Debug Info could tell us which allocas are merged into one slot. 684 LLVM_DEBUG(for (auto &AllocaSet 685 : NonOverlapedAllocas) { 686 if (AllocaSet.size() > 1) { 687 dbgs() << "In Function:" << F.getName() << "\n"; 688 dbgs() << "Find Union Set " 689 << "\n"; 690 dbgs() << "\tAllocas are \n"; 691 for (auto Alloca : AllocaSet) 692 dbgs() << "\t\t" << *Alloca << "\n"; 693 } 694 }); 695 } 696 697 void FrameTypeBuilder::finish(StructType *Ty) { 698 assert(!IsFinished && "already finished!"); 699 700 // Prepare the optimal-layout field array. 701 // The Id in the layout field is a pointer to our Field for it. 702 SmallVector<OptimizedStructLayoutField, 8> LayoutFields; 703 LayoutFields.reserve(Fields.size()); 704 for (auto &Field : Fields) { 705 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment, 706 Field.Offset); 707 } 708 709 // Perform layout. 710 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields); 711 StructSize = SizeAndAlign.first; 712 StructAlign = SizeAndAlign.second; 713 714 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & { 715 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id)); 716 }; 717 718 // We need to produce a packed struct type if there's a field whose 719 // assigned offset isn't a multiple of its natural type alignment. 720 bool Packed = [&] { 721 for (auto &LayoutField : LayoutFields) { 722 auto &F = getField(LayoutField); 723 if (!isAligned(F.TyAlignment, LayoutField.Offset)) 724 return true; 725 } 726 return false; 727 }(); 728 729 // Build the struct body. 730 SmallVector<Type*, 16> FieldTypes; 731 FieldTypes.reserve(LayoutFields.size() * 3 / 2); 732 uint64_t LastOffset = 0; 733 for (auto &LayoutField : LayoutFields) { 734 auto &F = getField(LayoutField); 735 736 auto Offset = LayoutField.Offset; 737 738 // Add a padding field if there's a padding gap and we're either 739 // building a packed struct or the padding gap is more than we'd 740 // get from aligning to the field type's natural alignment. 741 assert(Offset >= LastOffset); 742 if (Offset != LastOffset) { 743 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset) 744 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context), 745 Offset - LastOffset)); 746 } 747 748 F.Offset = Offset; 749 F.LayoutFieldIndex = FieldTypes.size(); 750 751 FieldTypes.push_back(F.Ty); 752 LastOffset = Offset + F.Size; 753 } 754 755 Ty->setBody(FieldTypes, Packed); 756 757 #ifndef NDEBUG 758 // Check that the IR layout matches the offsets we expect. 759 auto Layout = DL.getStructLayout(Ty); 760 for (auto &F : Fields) { 761 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty); 762 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset); 763 } 764 #endif 765 766 IsFinished = true; 767 } 768 769 static void cacheDIVar(FrameDataInfo &FrameData, 770 DenseMap<Value *, DILocalVariable *> &DIVarCache) { 771 for (auto *V : FrameData.getAllDefs()) { 772 if (DIVarCache.find(V) != DIVarCache.end()) 773 continue; 774 775 auto DDIs = FindDbgDeclareUses(V); 776 auto *I = llvm::find_if(DDIs, [](DbgDeclareInst *DDI) { 777 return DDI->getExpression()->getNumElements() == 0; 778 }); 779 if (I != DDIs.end()) 780 DIVarCache.insert({V, (*I)->getVariable()}); 781 } 782 } 783 784 /// Create name for Type. It uses MDString to store new created string to 785 /// avoid memory leak. 786 static StringRef solveTypeName(Type *Ty) { 787 if (Ty->isIntegerTy()) { 788 // The longest name in common may be '__int_128', which has 9 bits. 789 SmallString<16> Buffer; 790 raw_svector_ostream OS(Buffer); 791 OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth(); 792 auto *MDName = MDString::get(Ty->getContext(), OS.str()); 793 return MDName->getString(); 794 } 795 796 if (Ty->isFloatingPointTy()) { 797 if (Ty->isFloatTy()) 798 return "__float_"; 799 if (Ty->isDoubleTy()) 800 return "__double_"; 801 return "__floating_type_"; 802 } 803 804 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) { 805 if (PtrTy->isOpaque()) 806 return "PointerType"; 807 Type *PointeeTy = PtrTy->getNonOpaquePointerElementType(); 808 auto Name = solveTypeName(PointeeTy); 809 if (Name == "UnknownType") 810 return "PointerType"; 811 SmallString<16> Buffer; 812 Twine(Name + "_Ptr").toStringRef(Buffer); 813 auto *MDName = MDString::get(Ty->getContext(), Buffer.str()); 814 return MDName->getString(); 815 } 816 817 if (Ty->isStructTy()) { 818 if (!cast<StructType>(Ty)->hasName()) 819 return "__LiteralStructType_"; 820 821 auto Name = Ty->getStructName(); 822 823 SmallString<16> Buffer(Name); 824 for_each(Buffer, [](auto &Iter) { 825 if (Iter == '.' || Iter == ':') 826 Iter = '_'; 827 }); 828 auto *MDName = MDString::get(Ty->getContext(), Buffer.str()); 829 return MDName->getString(); 830 } 831 832 return "UnknownType"; 833 } 834 835 static DIType *solveDIType(DIBuilder &Builder, Type *Ty, 836 const DataLayout &Layout, DIScope *Scope, 837 unsigned LineNum, 838 DenseMap<Type *, DIType *> &DITypeCache) { 839 if (DIType *DT = DITypeCache.lookup(Ty)) 840 return DT; 841 842 StringRef Name = solveTypeName(Ty); 843 844 DIType *RetType = nullptr; 845 846 if (Ty->isIntegerTy()) { 847 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth(); 848 RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed, 849 llvm::DINode::FlagArtificial); 850 } else if (Ty->isFloatingPointTy()) { 851 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty), 852 dwarf::DW_ATE_float, 853 llvm::DINode::FlagArtificial); 854 } else if (Ty->isPointerTy()) { 855 // Construct BasicType instead of PointerType to avoid infinite 856 // search problem. 857 // For example, we would be in trouble if we traverse recursively: 858 // 859 // struct Node { 860 // Node* ptr; 861 // }; 862 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty), 863 dwarf::DW_ATE_address, 864 llvm::DINode::FlagArtificial); 865 } else if (Ty->isStructTy()) { 866 auto *DIStruct = Builder.createStructType( 867 Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty), 868 Layout.getPrefTypeAlignment(Ty), llvm::DINode::FlagArtificial, nullptr, 869 llvm::DINodeArray()); 870 871 auto *StructTy = cast<StructType>(Ty); 872 SmallVector<Metadata *, 16> Elements; 873 for (unsigned I = 0; I < StructTy->getNumElements(); I++) { 874 DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout, 875 Scope, LineNum, DITypeCache); 876 assert(DITy); 877 Elements.push_back(Builder.createMemberType( 878 Scope, DITy->getName(), Scope->getFile(), LineNum, 879 DITy->getSizeInBits(), DITy->getAlignInBits(), 880 Layout.getStructLayout(StructTy)->getElementOffsetInBits(I), 881 llvm::DINode::FlagArtificial, DITy)); 882 } 883 884 Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements)); 885 886 RetType = DIStruct; 887 } else { 888 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n";); 889 SmallString<32> Buffer; 890 raw_svector_ostream OS(Buffer); 891 OS << Name.str() << "_" << Layout.getTypeSizeInBits(Ty); 892 RetType = Builder.createBasicType(OS.str(), Layout.getTypeSizeInBits(Ty), 893 dwarf::DW_ATE_address, 894 llvm::DINode::FlagArtificial); 895 } 896 897 DITypeCache.insert({Ty, RetType}); 898 return RetType; 899 } 900 901 /// Build artificial debug info for C++ coroutine frames to allow users to 902 /// inspect the contents of the frame directly 903 /// 904 /// Create Debug information for coroutine frame with debug name "__coro_frame". 905 /// The debug information for the fields of coroutine frame is constructed from 906 /// the following way: 907 /// 1. For all the value in the Frame, we search the use of dbg.declare to find 908 /// the corresponding debug variables for the value. If we can find the 909 /// debug variable, we can get full and accurate debug information. 910 /// 2. If we can't get debug information in step 1 and 2, we could only try to 911 /// build the DIType by Type. We did this in solveDIType. We only handle 912 /// integer, float, double, integer type and struct type for now. 913 static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, 914 FrameDataInfo &FrameData) { 915 DISubprogram *DIS = F.getSubprogram(); 916 // If there is no DISubprogram for F, it implies the Function are not compiled 917 // with debug info. So we also don't need to generate debug info for the frame 918 // neither. 919 if (!DIS || !DIS->getUnit() || 920 !dwarf::isCPlusPlus( 921 (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage())) 922 return; 923 924 assert(Shape.ABI == coro::ABI::Switch && 925 "We could only build debug infomation for C++ coroutine now.\n"); 926 927 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false); 928 929 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca(); 930 assert(PromiseAlloca && 931 "Coroutine with switch ABI should own Promise alloca"); 932 933 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(PromiseAlloca); 934 if (DIs.empty()) 935 return; 936 937 DbgDeclareInst *PromiseDDI = DIs.front(); 938 DILocalVariable *PromiseDIVariable = PromiseDDI->getVariable(); 939 DILocalScope *PromiseDIScope = PromiseDIVariable->getScope(); 940 DIFile *DFile = PromiseDIScope->getFile(); 941 DILocation *DILoc = PromiseDDI->getDebugLoc().get(); 942 unsigned LineNum = PromiseDIVariable->getLine(); 943 944 DICompositeType *FrameDITy = DBuilder.createStructType( 945 DIS, "__coro_frame_ty", DFile, LineNum, Shape.FrameSize * 8, 946 Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr, 947 llvm::DINodeArray()); 948 StructType *FrameTy = Shape.FrameTy; 949 SmallVector<Metadata *, 16> Elements; 950 DataLayout Layout = F.getParent()->getDataLayout(); 951 952 DenseMap<Value *, DILocalVariable *> DIVarCache; 953 cacheDIVar(FrameData, DIVarCache); 954 955 unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume; 956 unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy; 957 unsigned IndexIndex = Shape.SwitchLowering.IndexField; 958 959 DenseMap<unsigned, StringRef> NameCache; 960 NameCache.insert({ResumeIndex, "__resume_fn"}); 961 NameCache.insert({DestroyIndex, "__destroy_fn"}); 962 NameCache.insert({IndexIndex, "__coro_index"}); 963 964 Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex), 965 *DestroyFnTy = FrameTy->getElementType(DestroyIndex), 966 *IndexTy = FrameTy->getElementType(IndexIndex); 967 968 DenseMap<unsigned, DIType *> TyCache; 969 TyCache.insert({ResumeIndex, 970 DBuilder.createBasicType("__resume_fn", 971 Layout.getTypeSizeInBits(ResumeFnTy), 972 dwarf::DW_ATE_address)}); 973 TyCache.insert( 974 {DestroyIndex, DBuilder.createBasicType( 975 "__destroy_fn", Layout.getTypeSizeInBits(DestroyFnTy), 976 dwarf::DW_ATE_address)}); 977 978 /// FIXME: If we fill the field `SizeInBits` with the actual size of 979 /// __coro_index in bits, then __coro_index wouldn't show in the debugger. 980 TyCache.insert({IndexIndex, DBuilder.createBasicType( 981 "__coro_index", 982 (Layout.getTypeSizeInBits(IndexTy) < 8) 983 ? 8 984 : Layout.getTypeSizeInBits(IndexTy), 985 dwarf::DW_ATE_unsigned_char)}); 986 987 for (auto *V : FrameData.getAllDefs()) { 988 if (DIVarCache.find(V) == DIVarCache.end()) 989 continue; 990 991 auto Index = FrameData.getFieldIndex(V); 992 993 NameCache.insert({Index, DIVarCache[V]->getName()}); 994 TyCache.insert({Index, DIVarCache[V]->getType()}); 995 } 996 997 // Cache from index to (Align, Offset Pair) 998 DenseMap<unsigned, std::pair<unsigned, unsigned>> OffsetCache; 999 // The Align and Offset of Resume function and Destroy function are fixed. 1000 OffsetCache.insert({ResumeIndex, {8, 0}}); 1001 OffsetCache.insert({DestroyIndex, {8, 8}}); 1002 OffsetCache.insert( 1003 {IndexIndex, 1004 {Shape.SwitchLowering.IndexAlign, Shape.SwitchLowering.IndexOffset}}); 1005 1006 for (auto *V : FrameData.getAllDefs()) { 1007 auto Index = FrameData.getFieldIndex(V); 1008 1009 OffsetCache.insert( 1010 {Index, {FrameData.getAlign(V), FrameData.getOffset(V)}}); 1011 } 1012 1013 DenseMap<Type *, DIType *> DITypeCache; 1014 // This counter is used to avoid same type names. e.g., there would be 1015 // many i32 and i64 types in one coroutine. And we would use i32_0 and 1016 // i32_1 to avoid the same type. Since it makes no sense the name of the 1017 // fields confilicts with each other. 1018 unsigned UnknownTypeNum = 0; 1019 for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) { 1020 if (OffsetCache.find(Index) == OffsetCache.end()) 1021 continue; 1022 1023 std::string Name; 1024 uint64_t SizeInBits; 1025 uint32_t AlignInBits; 1026 uint64_t OffsetInBits; 1027 DIType *DITy = nullptr; 1028 1029 Type *Ty = FrameTy->getElementType(Index); 1030 assert(Ty->isSized() && "We can't handle type which is not sized.\n"); 1031 SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedSize(); 1032 AlignInBits = OffsetCache[Index].first * 8; 1033 OffsetInBits = OffsetCache[Index].second * 8; 1034 1035 if (NameCache.find(Index) != NameCache.end()) { 1036 Name = NameCache[Index].str(); 1037 DITy = TyCache[Index]; 1038 } else { 1039 DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache); 1040 assert(DITy && "SolveDIType shouldn't return nullptr.\n"); 1041 Name = DITy->getName().str(); 1042 Name += "_" + std::to_string(UnknownTypeNum); 1043 UnknownTypeNum++; 1044 } 1045 1046 Elements.push_back(DBuilder.createMemberType( 1047 FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits, 1048 llvm::DINode::FlagArtificial, DITy)); 1049 } 1050 1051 DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements)); 1052 1053 auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame", 1054 DFile, LineNum, FrameDITy, 1055 true, DINode::FlagArtificial); 1056 assert(FrameDIVar->isValidLocationForIntrinsic(PromiseDDI->getDebugLoc())); 1057 1058 // Subprogram would have ContainedNodes field which records the debug 1059 // variables it contained. So we need to add __coro_frame to the 1060 // ContainedNodes of it. 1061 // 1062 // If we don't add __coro_frame to the RetainedNodes, user may get 1063 // `no symbol __coro_frame in context` rather than `__coro_frame` 1064 // is optimized out, which is more precise. 1065 if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) { 1066 auto RetainedNodes = SubProgram->getRetainedNodes(); 1067 SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(), 1068 RetainedNodes.end()); 1069 RetainedNodesVec.push_back(FrameDIVar); 1070 SubProgram->replaceOperandWith( 1071 7, (MDTuple::get(F.getContext(), RetainedNodesVec))); 1072 } 1073 1074 DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar, 1075 DBuilder.createExpression(), DILoc, 1076 Shape.getInsertPtAfterFramePtr()); 1077 } 1078 1079 // Build a struct that will keep state for an active coroutine. 1080 // struct f.frame { 1081 // ResumeFnTy ResumeFnAddr; 1082 // ResumeFnTy DestroyFnAddr; 1083 // int ResumeIndex; 1084 // ... promise (if present) ... 1085 // ... spills ... 1086 // }; 1087 static StructType *buildFrameType(Function &F, coro::Shape &Shape, 1088 FrameDataInfo &FrameData) { 1089 LLVMContext &C = F.getContext(); 1090 const DataLayout &DL = F.getParent()->getDataLayout(); 1091 StructType *FrameTy = [&] { 1092 SmallString<32> Name(F.getName()); 1093 Name.append(".Frame"); 1094 return StructType::create(C, Name); 1095 }(); 1096 1097 // We will use this value to cap the alignment of spilled values. 1098 Optional<Align> MaxFrameAlignment; 1099 if (Shape.ABI == coro::ABI::Async) 1100 MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment(); 1101 FrameTypeBuilder B(C, DL, MaxFrameAlignment); 1102 1103 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca(); 1104 Optional<FieldIDType> SwitchIndexFieldId; 1105 1106 if (Shape.ABI == coro::ABI::Switch) { 1107 auto *FramePtrTy = FrameTy->getPointerTo(); 1108 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy, 1109 /*IsVarArg=*/false); 1110 auto *FnPtrTy = FnTy->getPointerTo(); 1111 1112 // Add header fields for the resume and destroy functions. 1113 // We can rely on these being perfectly packed. 1114 (void)B.addField(FnPtrTy, None, /*header*/ true); 1115 (void)B.addField(FnPtrTy, None, /*header*/ true); 1116 1117 // PromiseAlloca field needs to be explicitly added here because it's 1118 // a header field with a fixed offset based on its alignment. Hence it 1119 // needs special handling and cannot be added to FrameData.Allocas. 1120 if (PromiseAlloca) 1121 FrameData.setFieldIndex( 1122 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true)); 1123 1124 // Add a field to store the suspend index. This doesn't need to 1125 // be in the header. 1126 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size())); 1127 Type *IndexType = Type::getIntNTy(C, IndexBits); 1128 1129 SwitchIndexFieldId = B.addField(IndexType, None); 1130 } else { 1131 assert(PromiseAlloca == nullptr && "lowering doesn't support promises"); 1132 } 1133 1134 // Because multiple allocas may own the same field slot, 1135 // we add allocas to field here. 1136 B.addFieldForAllocas(F, FrameData, Shape); 1137 // Add PromiseAlloca to Allocas list so that 1138 // 1. updateLayoutIndex could update its index after 1139 // `performOptimizedStructLayout` 1140 // 2. it is processed in insertSpills. 1141 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca) 1142 // We assume that the promise alloca won't be modified before 1143 // CoroBegin and no alias will be create before CoroBegin. 1144 FrameData.Allocas.emplace_back( 1145 PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false); 1146 // Create an entry for every spilled value. 1147 for (auto &S : FrameData.Spills) { 1148 Type *FieldType = S.first->getType(); 1149 // For byval arguments, we need to store the pointed value in the frame, 1150 // instead of the pointer itself. 1151 if (const Argument *A = dyn_cast<Argument>(S.first)) 1152 if (A->hasByValAttr()) 1153 FieldType = A->getParamByValType(); 1154 FieldIDType Id = 1155 B.addField(FieldType, None, false /*header*/, true /*IsSpillOfValue*/); 1156 FrameData.setFieldIndex(S.first, Id); 1157 } 1158 1159 B.finish(FrameTy); 1160 FrameData.updateLayoutIndex(B); 1161 Shape.FrameAlign = B.getStructAlign(); 1162 Shape.FrameSize = B.getStructSize(); 1163 1164 switch (Shape.ABI) { 1165 case coro::ABI::Switch: { 1166 // In the switch ABI, remember the switch-index field. 1167 auto IndexField = B.getLayoutField(*SwitchIndexFieldId); 1168 Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex; 1169 Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value(); 1170 Shape.SwitchLowering.IndexOffset = IndexField.Offset; 1171 1172 // Also round the frame size up to a multiple of its alignment, as is 1173 // generally expected in C/C++. 1174 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign); 1175 break; 1176 } 1177 1178 // In the retcon ABI, remember whether the frame is inline in the storage. 1179 case coro::ABI::Retcon: 1180 case coro::ABI::RetconOnce: { 1181 auto Id = Shape.getRetconCoroId(); 1182 Shape.RetconLowering.IsFrameInlineInStorage 1183 = (B.getStructSize() <= Id->getStorageSize() && 1184 B.getStructAlign() <= Id->getStorageAlignment()); 1185 break; 1186 } 1187 case coro::ABI::Async: { 1188 Shape.AsyncLowering.FrameOffset = 1189 alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign); 1190 // Also make the final context size a multiple of the context alignment to 1191 // make allocation easier for allocators. 1192 Shape.AsyncLowering.ContextSize = 1193 alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize, 1194 Shape.AsyncLowering.getContextAlignment()); 1195 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) { 1196 report_fatal_error( 1197 "The alignment requirment of frame variables cannot be higher than " 1198 "the alignment of the async function context"); 1199 } 1200 break; 1201 } 1202 } 1203 1204 return FrameTy; 1205 } 1206 1207 // We use a pointer use visitor to track how an alloca is being used. 1208 // The goal is to be able to answer the following three questions: 1209 // 1. Should this alloca be allocated on the frame instead. 1210 // 2. Could the content of the alloca be modified prior to CoroBegn, which would 1211 // require copying the data from alloca to the frame after CoroBegin. 1212 // 3. Is there any alias created for this alloca prior to CoroBegin, but used 1213 // after CoroBegin. In that case, we will need to recreate the alias after 1214 // CoroBegin based off the frame. To answer question 1, we track two things: 1215 // a. List of all BasicBlocks that use this alloca or any of the aliases of 1216 // the alloca. In the end, we check if there exists any two basic blocks that 1217 // cross suspension points. If so, this alloca must be put on the frame. b. 1218 // Whether the alloca or any alias of the alloca is escaped at some point, 1219 // either by storing the address somewhere, or the address is used in a 1220 // function call that might capture. If it's ever escaped, this alloca must be 1221 // put on the frame conservatively. 1222 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin. 1223 // Whenever a potential write happens, either through a store instruction, a 1224 // function call or any of the memory intrinsics, we check whether this 1225 // instruction is prior to CoroBegin. To answer question 3, we track the offsets 1226 // of all aliases created for the alloca prior to CoroBegin but used after 1227 // CoroBegin. llvm::Optional is used to be able to represent the case when the 1228 // offset is unknown (e.g. when you have a PHINode that takes in different 1229 // offset values). We cannot handle unknown offsets and will assert. This is the 1230 // potential issue left out. An ideal solution would likely require a 1231 // significant redesign. 1232 namespace { 1233 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> { 1234 using Base = PtrUseVisitor<AllocaUseVisitor>; 1235 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT, 1236 const CoroBeginInst &CB, const SuspendCrossingInfo &Checker, 1237 bool ShouldUseLifetimeStartInfo) 1238 : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker), 1239 ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {} 1240 1241 void visit(Instruction &I) { 1242 Users.insert(&I); 1243 Base::visit(I); 1244 // If the pointer is escaped prior to CoroBegin, we have to assume it would 1245 // be written into before CoroBegin as well. 1246 if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) { 1247 MayWriteBeforeCoroBegin = true; 1248 } 1249 } 1250 // We need to provide this overload as PtrUseVisitor uses a pointer based 1251 // visiting function. 1252 void visit(Instruction *I) { return visit(*I); } 1253 1254 void visitPHINode(PHINode &I) { 1255 enqueueUsers(I); 1256 handleAlias(I); 1257 } 1258 1259 void visitSelectInst(SelectInst &I) { 1260 enqueueUsers(I); 1261 handleAlias(I); 1262 } 1263 1264 void visitStoreInst(StoreInst &SI) { 1265 // Regardless whether the alias of the alloca is the value operand or the 1266 // pointer operand, we need to assume the alloca is been written. 1267 handleMayWrite(SI); 1268 1269 if (SI.getValueOperand() != U->get()) 1270 return; 1271 1272 // We are storing the pointer into a memory location, potentially escaping. 1273 // As an optimization, we try to detect simple cases where it doesn't 1274 // actually escape, for example: 1275 // %ptr = alloca .. 1276 // %addr = alloca .. 1277 // store %ptr, %addr 1278 // %x = load %addr 1279 // .. 1280 // If %addr is only used by loading from it, we could simply treat %x as 1281 // another alias of %ptr, and not considering %ptr being escaped. 1282 auto IsSimpleStoreThenLoad = [&]() { 1283 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand()); 1284 // If the memory location we are storing to is not an alloca, it 1285 // could be an alias of some other memory locations, which is difficult 1286 // to analyze. 1287 if (!AI) 1288 return false; 1289 // StoreAliases contains aliases of the memory location stored into. 1290 SmallVector<Instruction *, 4> StoreAliases = {AI}; 1291 while (!StoreAliases.empty()) { 1292 Instruction *I = StoreAliases.pop_back_val(); 1293 for (User *U : I->users()) { 1294 // If we are loading from the memory location, we are creating an 1295 // alias of the original pointer. 1296 if (auto *LI = dyn_cast<LoadInst>(U)) { 1297 enqueueUsers(*LI); 1298 handleAlias(*LI); 1299 continue; 1300 } 1301 // If we are overriding the memory location, the pointer certainly 1302 // won't escape. 1303 if (auto *S = dyn_cast<StoreInst>(U)) 1304 if (S->getPointerOperand() == I) 1305 continue; 1306 if (auto *II = dyn_cast<IntrinsicInst>(U)) 1307 if (II->isLifetimeStartOrEnd()) 1308 continue; 1309 // BitCastInst creats aliases of the memory location being stored 1310 // into. 1311 if (auto *BI = dyn_cast<BitCastInst>(U)) { 1312 StoreAliases.push_back(BI); 1313 continue; 1314 } 1315 return false; 1316 } 1317 } 1318 1319 return true; 1320 }; 1321 1322 if (!IsSimpleStoreThenLoad()) 1323 PI.setEscaped(&SI); 1324 } 1325 1326 // All mem intrinsics modify the data. 1327 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); } 1328 1329 void visitBitCastInst(BitCastInst &BC) { 1330 Base::visitBitCastInst(BC); 1331 handleAlias(BC); 1332 } 1333 1334 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 1335 Base::visitAddrSpaceCastInst(ASC); 1336 handleAlias(ASC); 1337 } 1338 1339 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 1340 // The base visitor will adjust Offset accordingly. 1341 Base::visitGetElementPtrInst(GEPI); 1342 handleAlias(GEPI); 1343 } 1344 1345 void visitIntrinsicInst(IntrinsicInst &II) { 1346 // When we found the lifetime markers refers to a 1347 // subrange of the original alloca, ignore the lifetime 1348 // markers to avoid misleading the analysis. 1349 if (II.getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown || 1350 !Offset.isZero()) 1351 return Base::visitIntrinsicInst(II); 1352 LifetimeStarts.insert(&II); 1353 } 1354 1355 void visitCallBase(CallBase &CB) { 1356 for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op) 1357 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op)) 1358 PI.setEscaped(&CB); 1359 handleMayWrite(CB); 1360 } 1361 1362 bool getShouldLiveOnFrame() const { 1363 if (!ShouldLiveOnFrame) 1364 ShouldLiveOnFrame = computeShouldLiveOnFrame(); 1365 return ShouldLiveOnFrame.getValue(); 1366 } 1367 1368 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; } 1369 1370 DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const { 1371 assert(getShouldLiveOnFrame() && "This method should only be called if the " 1372 "alloca needs to live on the frame."); 1373 for (const auto &P : AliasOffetMap) 1374 if (!P.second) 1375 report_fatal_error("Unable to handle an alias with unknown offset " 1376 "created before CoroBegin."); 1377 return AliasOffetMap; 1378 } 1379 1380 private: 1381 const DominatorTree &DT; 1382 const CoroBeginInst &CoroBegin; 1383 const SuspendCrossingInfo &Checker; 1384 // All alias to the original AllocaInst, created before CoroBegin and used 1385 // after CoroBegin. Each entry contains the instruction and the offset in the 1386 // original Alloca. They need to be recreated after CoroBegin off the frame. 1387 DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{}; 1388 SmallPtrSet<Instruction *, 4> Users{}; 1389 SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{}; 1390 bool MayWriteBeforeCoroBegin{false}; 1391 bool ShouldUseLifetimeStartInfo{true}; 1392 1393 mutable llvm::Optional<bool> ShouldLiveOnFrame{}; 1394 1395 bool computeShouldLiveOnFrame() const { 1396 // If lifetime information is available, we check it first since it's 1397 // more precise. We look at every pair of lifetime.start intrinsic and 1398 // every basic block that uses the pointer to see if they cross suspension 1399 // points. The uses cover both direct uses as well as indirect uses. 1400 if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) { 1401 for (auto *I : Users) 1402 for (auto *S : LifetimeStarts) 1403 if (Checker.isDefinitionAcrossSuspend(*S, I)) 1404 return true; 1405 return false; 1406 } 1407 // FIXME: Ideally the isEscaped check should come at the beginning. 1408 // However there are a few loose ends that need to be fixed first before 1409 // we can do that. We need to make sure we are not over-conservative, so 1410 // that the data accessed in-between await_suspend and symmetric transfer 1411 // is always put on the stack, and also data accessed after coro.end is 1412 // always put on the stack (esp the return object). To fix that, we need 1413 // to: 1414 // 1) Potentially treat sret as nocapture in calls 1415 // 2) Special handle the return object and put it on the stack 1416 // 3) Utilize lifetime.end intrinsic 1417 if (PI.isEscaped()) 1418 return true; 1419 1420 for (auto *U1 : Users) 1421 for (auto *U2 : Users) 1422 if (Checker.isDefinitionAcrossSuspend(*U1, U2)) 1423 return true; 1424 1425 return false; 1426 } 1427 1428 void handleMayWrite(const Instruction &I) { 1429 if (!DT.dominates(&CoroBegin, &I)) 1430 MayWriteBeforeCoroBegin = true; 1431 } 1432 1433 bool usedAfterCoroBegin(Instruction &I) { 1434 for (auto &U : I.uses()) 1435 if (DT.dominates(&CoroBegin, U)) 1436 return true; 1437 return false; 1438 } 1439 1440 void handleAlias(Instruction &I) { 1441 // We track all aliases created prior to CoroBegin but used after. 1442 // These aliases may need to be recreated after CoroBegin if the alloca 1443 // need to live on the frame. 1444 if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I)) 1445 return; 1446 1447 if (!IsOffsetKnown) { 1448 AliasOffetMap[&I].reset(); 1449 } else { 1450 auto Itr = AliasOffetMap.find(&I); 1451 if (Itr == AliasOffetMap.end()) { 1452 AliasOffetMap[&I] = Offset; 1453 } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) { 1454 // If we have seen two different possible values for this alias, we set 1455 // it to empty. 1456 AliasOffetMap[&I].reset(); 1457 } 1458 } 1459 } 1460 }; 1461 } // namespace 1462 1463 // We need to make room to insert a spill after initial PHIs, but before 1464 // catchswitch instruction. Placing it before violates the requirement that 1465 // catchswitch, like all other EHPads must be the first nonPHI in a block. 1466 // 1467 // Split away catchswitch into a separate block and insert in its place: 1468 // 1469 // cleanuppad <InsertPt> cleanupret. 1470 // 1471 // cleanupret instruction will act as an insert point for the spill. 1472 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) { 1473 BasicBlock *CurrentBlock = CatchSwitch->getParent(); 1474 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch); 1475 CurrentBlock->getTerminator()->eraseFromParent(); 1476 1477 auto *CleanupPad = 1478 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock); 1479 auto *CleanupRet = 1480 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock); 1481 return CleanupRet; 1482 } 1483 1484 static void createFramePtr(coro::Shape &Shape) { 1485 auto *CB = Shape.CoroBegin; 1486 IRBuilder<> Builder(CB->getNextNode()); 1487 StructType *FrameTy = Shape.FrameTy; 1488 PointerType *FramePtrTy = FrameTy->getPointerTo(); 1489 Shape.FramePtr = 1490 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr")); 1491 } 1492 1493 // Replace all alloca and SSA values that are accessed across suspend points 1494 // with GetElementPointer from coroutine frame + loads and stores. Create an 1495 // AllocaSpillBB that will become the new entry block for the resume parts of 1496 // the coroutine: 1497 // 1498 // %hdl = coro.begin(...) 1499 // whatever 1500 // 1501 // becomes: 1502 // 1503 // %hdl = coro.begin(...) 1504 // %FramePtr = bitcast i8* hdl to %f.frame* 1505 // br label %AllocaSpillBB 1506 // 1507 // AllocaSpillBB: 1508 // ; geps corresponding to allocas that were moved to coroutine frame 1509 // br label PostSpill 1510 // 1511 // PostSpill: 1512 // whatever 1513 // 1514 // 1515 static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) { 1516 auto *CB = Shape.CoroBegin; 1517 LLVMContext &C = CB->getContext(); 1518 IRBuilder<> Builder(C); 1519 StructType *FrameTy = Shape.FrameTy; 1520 Value *FramePtr = Shape.FramePtr; 1521 DominatorTree DT(*CB->getFunction()); 1522 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache; 1523 1524 // Create a GEP with the given index into the coroutine frame for the original 1525 // value Orig. Appends an extra 0 index for array-allocas, preserving the 1526 // original type. 1527 auto GetFramePointer = [&](Value *Orig) -> Value * { 1528 FieldIDType Index = FrameData.getFieldIndex(Orig); 1529 SmallVector<Value *, 3> Indices = { 1530 ConstantInt::get(Type::getInt32Ty(C), 0), 1531 ConstantInt::get(Type::getInt32Ty(C), Index), 1532 }; 1533 1534 if (auto *AI = dyn_cast<AllocaInst>(Orig)) { 1535 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) { 1536 auto Count = CI->getValue().getZExtValue(); 1537 if (Count > 1) { 1538 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0)); 1539 } 1540 } else { 1541 report_fatal_error("Coroutines cannot handle non static allocas yet"); 1542 } 1543 } 1544 1545 auto GEP = cast<GetElementPtrInst>( 1546 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices)); 1547 if (isa<AllocaInst>(Orig)) { 1548 // If the type of GEP is not equal to the type of AllocaInst, it implies 1549 // that the AllocaInst may be reused in the Frame slot of other 1550 // AllocaInst. So We cast GEP to the AllocaInst here to re-use 1551 // the Frame storage. 1552 // 1553 // Note: If we change the strategy dealing with alignment, we need to refine 1554 // this casting. 1555 if (GEP->getResultElementType() != Orig->getType()) 1556 return Builder.CreateBitCast(GEP, Orig->getType(), 1557 Orig->getName() + Twine(".cast")); 1558 } 1559 return GEP; 1560 }; 1561 1562 for (auto const &E : FrameData.Spills) { 1563 Value *Def = E.first; 1564 auto SpillAlignment = Align(FrameData.getAlign(Def)); 1565 // Create a store instruction storing the value into the 1566 // coroutine frame. 1567 Instruction *InsertPt = nullptr; 1568 Type *ByValTy = nullptr; 1569 if (auto *Arg = dyn_cast<Argument>(Def)) { 1570 // For arguments, we will place the store instruction right after 1571 // the coroutine frame pointer instruction, i.e. bitcast of 1572 // coro.begin from i8* to %f.frame*. 1573 InsertPt = Shape.getInsertPtAfterFramePtr(); 1574 1575 // If we're spilling an Argument, make sure we clear 'nocapture' 1576 // from the coroutine function. 1577 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture); 1578 1579 if (Arg->hasByValAttr()) 1580 ByValTy = Arg->getParamByValType(); 1581 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) { 1582 // Don't spill immediately after a suspend; splitting assumes 1583 // that the suspend will be followed by a branch. 1584 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI(); 1585 } else { 1586 auto *I = cast<Instruction>(Def); 1587 if (!DT.dominates(CB, I)) { 1588 // If it is not dominated by CoroBegin, then spill should be 1589 // inserted immediately after CoroFrame is computed. 1590 InsertPt = Shape.getInsertPtAfterFramePtr(); 1591 } else if (auto *II = dyn_cast<InvokeInst>(I)) { 1592 // If we are spilling the result of the invoke instruction, split 1593 // the normal edge and insert the spill in the new block. 1594 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest()); 1595 InsertPt = NewBB->getTerminator(); 1596 } else if (isa<PHINode>(I)) { 1597 // Skip the PHINodes and EH pads instructions. 1598 BasicBlock *DefBlock = I->getParent(); 1599 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator())) 1600 InsertPt = splitBeforeCatchSwitch(CSI); 1601 else 1602 InsertPt = &*DefBlock->getFirstInsertionPt(); 1603 } else { 1604 assert(!I->isTerminator() && "unexpected terminator"); 1605 // For all other values, the spill is placed immediately after 1606 // the definition. 1607 InsertPt = I->getNextNode(); 1608 } 1609 } 1610 1611 auto Index = FrameData.getFieldIndex(Def); 1612 Builder.SetInsertPoint(InsertPt); 1613 auto *G = Builder.CreateConstInBoundsGEP2_32( 1614 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr")); 1615 if (ByValTy) { 1616 // For byval arguments, we need to store the pointed value in the frame, 1617 // instead of the pointer itself. 1618 auto *Value = Builder.CreateLoad(ByValTy, Def); 1619 Builder.CreateAlignedStore(Value, G, SpillAlignment); 1620 } else { 1621 Builder.CreateAlignedStore(Def, G, SpillAlignment); 1622 } 1623 1624 BasicBlock *CurrentBlock = nullptr; 1625 Value *CurrentReload = nullptr; 1626 for (auto *U : E.second) { 1627 // If we have not seen the use block, create a load instruction to reload 1628 // the spilled value from the coroutine frame. Populates the Value pointer 1629 // reference provided with the frame GEP. 1630 if (CurrentBlock != U->getParent()) { 1631 CurrentBlock = U->getParent(); 1632 Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt()); 1633 1634 auto *GEP = GetFramePointer(E.first); 1635 GEP->setName(E.first->getName() + Twine(".reload.addr")); 1636 if (ByValTy) 1637 CurrentReload = GEP; 1638 else 1639 CurrentReload = Builder.CreateAlignedLoad( 1640 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP, 1641 SpillAlignment, E.first->getName() + Twine(".reload")); 1642 1643 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def); 1644 for (DbgDeclareInst *DDI : DIs) { 1645 bool AllowUnresolved = false; 1646 // This dbg.declare is preserved for all coro-split function 1647 // fragments. It will be unreachable in the main function, and 1648 // processed by coro::salvageDebugInfo() by CoroCloner. 1649 DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved) 1650 .insertDeclare(CurrentReload, DDI->getVariable(), 1651 DDI->getExpression(), DDI->getDebugLoc(), 1652 &*Builder.GetInsertPoint()); 1653 // This dbg.declare is for the main function entry point. It 1654 // will be deleted in all coro-split functions. 1655 coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.OptimizeFrame); 1656 } 1657 } 1658 1659 // Salvage debug info on any dbg.addr that we see. We do not insert them 1660 // into each block where we have a use though. 1661 if (auto *DI = dyn_cast<DbgAddrIntrinsic>(U)) { 1662 coro::salvageDebugInfo(DbgPtrAllocaCache, DI, Shape.OptimizeFrame); 1663 } 1664 1665 // If we have a single edge PHINode, remove it and replace it with a 1666 // reload from the coroutine frame. (We already took care of multi edge 1667 // PHINodes by rewriting them in the rewritePHIs function). 1668 if (auto *PN = dyn_cast<PHINode>(U)) { 1669 assert(PN->getNumIncomingValues() == 1 && 1670 "unexpected number of incoming " 1671 "values in the PHINode"); 1672 PN->replaceAllUsesWith(CurrentReload); 1673 PN->eraseFromParent(); 1674 continue; 1675 } 1676 1677 // Replace all uses of CurrentValue in the current instruction with 1678 // reload. 1679 U->replaceUsesOfWith(Def, CurrentReload); 1680 } 1681 } 1682 1683 BasicBlock *FramePtrBB = Shape.getInsertPtAfterFramePtr()->getParent(); 1684 1685 auto SpillBlock = FramePtrBB->splitBasicBlock( 1686 Shape.getInsertPtAfterFramePtr(), "AllocaSpillBB"); 1687 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill"); 1688 Shape.AllocaSpillBlock = SpillBlock; 1689 1690 // retcon and retcon.once lowering assumes all uses have been sunk. 1691 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 1692 Shape.ABI == coro::ABI::Async) { 1693 // If we found any allocas, replace all of their remaining uses with Geps. 1694 Builder.SetInsertPoint(&SpillBlock->front()); 1695 for (const auto &P : FrameData.Allocas) { 1696 AllocaInst *Alloca = P.Alloca; 1697 auto *G = GetFramePointer(Alloca); 1698 1699 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G)) 1700 // here, as we are changing location of the instruction. 1701 G->takeName(Alloca); 1702 Alloca->replaceAllUsesWith(G); 1703 Alloca->eraseFromParent(); 1704 } 1705 return; 1706 } 1707 1708 // If we found any alloca, replace all of their remaining uses with GEP 1709 // instructions. To remain debugbility, we replace the uses of allocas for 1710 // dbg.declares and dbg.values with the reload from the frame. 1711 // Note: We cannot replace the alloca with GEP instructions indiscriminately, 1712 // as some of the uses may not be dominated by CoroBegin. 1713 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front()); 1714 SmallVector<Instruction *, 4> UsersToUpdate; 1715 for (const auto &A : FrameData.Allocas) { 1716 AllocaInst *Alloca = A.Alloca; 1717 UsersToUpdate.clear(); 1718 for (User *U : Alloca->users()) { 1719 auto *I = cast<Instruction>(U); 1720 if (DT.dominates(CB, I)) 1721 UsersToUpdate.push_back(I); 1722 } 1723 if (UsersToUpdate.empty()) 1724 continue; 1725 auto *G = GetFramePointer(Alloca); 1726 G->setName(Alloca->getName() + Twine(".reload.addr")); 1727 1728 SmallVector<DbgVariableIntrinsic *, 4> DIs; 1729 findDbgUsers(DIs, Alloca); 1730 for (auto *DVI : DIs) 1731 DVI->replaceUsesOfWith(Alloca, G); 1732 1733 for (Instruction *I : UsersToUpdate) 1734 I->replaceUsesOfWith(Alloca, G); 1735 } 1736 Builder.SetInsertPoint(Shape.getInsertPtAfterFramePtr()); 1737 for (const auto &A : FrameData.Allocas) { 1738 AllocaInst *Alloca = A.Alloca; 1739 if (A.MayWriteBeforeCoroBegin) { 1740 // isEscaped really means potentially modified before CoroBegin. 1741 if (Alloca->isArrayAllocation()) 1742 report_fatal_error( 1743 "Coroutines cannot handle copying of array allocas yet"); 1744 1745 auto *G = GetFramePointer(Alloca); 1746 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca); 1747 Builder.CreateStore(Value, G); 1748 } 1749 // For each alias to Alloca created before CoroBegin but used after 1750 // CoroBegin, we recreate them after CoroBegin by appplying the offset 1751 // to the pointer in the frame. 1752 for (const auto &Alias : A.Aliases) { 1753 auto *FramePtr = GetFramePointer(Alloca); 1754 auto *FramePtrRaw = 1755 Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C)); 1756 auto &Value = Alias.second.getValue(); 1757 auto ITy = IntegerType::get(C, Value.getBitWidth()); 1758 auto *AliasPtr = Builder.CreateGEP(Type::getInt8Ty(C), FramePtrRaw, 1759 ConstantInt::get(ITy, Value)); 1760 auto *AliasPtrTyped = 1761 Builder.CreateBitCast(AliasPtr, Alias.first->getType()); 1762 Alias.first->replaceUsesWithIf( 1763 AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); }); 1764 } 1765 } 1766 } 1767 1768 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new 1769 // PHI in InsertedBB. 1770 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, 1771 BasicBlock *InsertedBB, 1772 BasicBlock *PredBB, 1773 PHINode *UntilPHI = nullptr) { 1774 auto *PN = cast<PHINode>(&SuccBB->front()); 1775 do { 1776 int Index = PN->getBasicBlockIndex(InsertedBB); 1777 Value *V = PN->getIncomingValue(Index); 1778 PHINode *InputV = PHINode::Create( 1779 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(), 1780 &InsertedBB->front()); 1781 InputV->addIncoming(V, PredBB); 1782 PN->setIncomingValue(Index, InputV); 1783 PN = dyn_cast<PHINode>(PN->getNextNode()); 1784 } while (PN != UntilPHI); 1785 } 1786 1787 // Rewrites the PHI Nodes in a cleanuppad. 1788 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, 1789 CleanupPadInst *CleanupPad) { 1790 // For every incoming edge to a CleanupPad we will create a new block holding 1791 // all incoming values in single-value PHI nodes. We will then create another 1792 // block to act as a dispather (as all unwind edges for related EH blocks 1793 // must be the same). 1794 // 1795 // cleanuppad: 1796 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1] 1797 // %3 = cleanuppad within none [] 1798 // 1799 // It will create: 1800 // 1801 // cleanuppad.corodispatch 1802 // %2 = phi i8[0, %catchswitch], [1, %catch.1] 1803 // %3 = cleanuppad within none [] 1804 // switch i8 % 2, label %unreachable 1805 // [i8 0, label %cleanuppad.from.catchswitch 1806 // i8 1, label %cleanuppad.from.catch.1] 1807 // cleanuppad.from.catchswitch: 1808 // %4 = phi i32 [%0, %catchswitch] 1809 // br %label cleanuppad 1810 // cleanuppad.from.catch.1: 1811 // %6 = phi i32 [%1, %catch.1] 1812 // br %label cleanuppad 1813 // cleanuppad: 1814 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch], 1815 // [%6, %cleanuppad.from.catch.1] 1816 1817 // Unreachable BB, in case switching on an invalid value in the dispatcher. 1818 auto *UnreachBB = BasicBlock::Create( 1819 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent()); 1820 IRBuilder<> Builder(UnreachBB); 1821 Builder.CreateUnreachable(); 1822 1823 // Create a new cleanuppad which will be the dispatcher. 1824 auto *NewCleanupPadBB = 1825 BasicBlock::Create(CleanupPadBB->getContext(), 1826 CleanupPadBB->getName() + Twine(".corodispatch"), 1827 CleanupPadBB->getParent(), CleanupPadBB); 1828 Builder.SetInsertPoint(NewCleanupPadBB); 1829 auto *SwitchType = Builder.getInt8Ty(); 1830 auto *SetDispatchValuePN = 1831 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB)); 1832 CleanupPad->removeFromParent(); 1833 CleanupPad->insertAfter(SetDispatchValuePN); 1834 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB, 1835 pred_size(CleanupPadBB)); 1836 1837 int SwitchIndex = 0; 1838 SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB)); 1839 for (BasicBlock *Pred : Preds) { 1840 // Create a new cleanuppad and move the PHI values to there. 1841 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(), 1842 CleanupPadBB->getName() + 1843 Twine(".from.") + Pred->getName(), 1844 CleanupPadBB->getParent(), CleanupPadBB); 1845 updatePhiNodes(CleanupPadBB, Pred, CaseBB); 1846 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") + 1847 Pred->getName()); 1848 Builder.SetInsertPoint(CaseBB); 1849 Builder.CreateBr(CleanupPadBB); 1850 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB); 1851 1852 // Update this Pred to the new unwind point. 1853 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB); 1854 1855 // Setup the switch in the dispatcher. 1856 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex); 1857 SetDispatchValuePN->addIncoming(SwitchConstant, Pred); 1858 SwitchOnDispatch->addCase(SwitchConstant, CaseBB); 1859 SwitchIndex++; 1860 } 1861 } 1862 1863 static void cleanupSinglePredPHIs(Function &F) { 1864 SmallVector<PHINode *, 32> Worklist; 1865 for (auto &BB : F) { 1866 for (auto &Phi : BB.phis()) { 1867 if (Phi.getNumIncomingValues() == 1) { 1868 Worklist.push_back(&Phi); 1869 } else 1870 break; 1871 } 1872 } 1873 while (!Worklist.empty()) { 1874 auto *Phi = Worklist.pop_back_val(); 1875 auto *OriginalValue = Phi->getIncomingValue(0); 1876 Phi->replaceAllUsesWith(OriginalValue); 1877 } 1878 } 1879 1880 static void rewritePHIs(BasicBlock &BB) { 1881 // For every incoming edge we will create a block holding all 1882 // incoming values in a single PHI nodes. 1883 // 1884 // loop: 1885 // %n.val = phi i32[%n, %entry], [%inc, %loop] 1886 // 1887 // It will create: 1888 // 1889 // loop.from.entry: 1890 // %n.loop.pre = phi i32 [%n, %entry] 1891 // br %label loop 1892 // loop.from.loop: 1893 // %inc.loop.pre = phi i32 [%inc, %loop] 1894 // br %label loop 1895 // 1896 // After this rewrite, further analysis will ignore any phi nodes with more 1897 // than one incoming edge. 1898 1899 // TODO: Simplify PHINodes in the basic block to remove duplicate 1900 // predecessors. 1901 1902 // Special case for CleanupPad: all EH blocks must have the same unwind edge 1903 // so we need to create an additional "dispatcher" block. 1904 if (auto *CleanupPad = 1905 dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) { 1906 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB)); 1907 for (BasicBlock *Pred : Preds) { 1908 if (CatchSwitchInst *CS = 1909 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) { 1910 // CleanupPad with a CatchSwitch predecessor: therefore this is an 1911 // unwind destination that needs to be handle specially. 1912 assert(CS->getUnwindDest() == &BB); 1913 (void)CS; 1914 rewritePHIsForCleanupPad(&BB, CleanupPad); 1915 return; 1916 } 1917 } 1918 } 1919 1920 LandingPadInst *LandingPad = nullptr; 1921 PHINode *ReplPHI = nullptr; 1922 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) { 1923 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks. 1924 // We replace the original landing pad with a PHINode that will collect the 1925 // results from all of them. 1926 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad); 1927 ReplPHI->takeName(LandingPad); 1928 LandingPad->replaceAllUsesWith(ReplPHI); 1929 // We will erase the original landing pad at the end of this function after 1930 // ehAwareSplitEdge cloned it in the transition blocks. 1931 } 1932 1933 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB)); 1934 for (BasicBlock *Pred : Preds) { 1935 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI); 1936 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName()); 1937 1938 // Stop the moving of values at ReplPHI, as this is either null or the PHI 1939 // that replaced the landing pad. 1940 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI); 1941 } 1942 1943 if (LandingPad) { 1944 // Calls to ehAwareSplitEdge function cloned the original lading pad. 1945 // No longer need it. 1946 LandingPad->eraseFromParent(); 1947 } 1948 } 1949 1950 static void rewritePHIs(Function &F) { 1951 SmallVector<BasicBlock *, 8> WorkList; 1952 1953 for (BasicBlock &BB : F) 1954 if (auto *PN = dyn_cast<PHINode>(&BB.front())) 1955 if (PN->getNumIncomingValues() > 1) 1956 WorkList.push_back(&BB); 1957 1958 for (BasicBlock *BB : WorkList) 1959 rewritePHIs(*BB); 1960 } 1961 1962 // Check for instructions that we can recreate on resume as opposed to spill 1963 // the result into a coroutine frame. 1964 static bool materializable(Instruction &V) { 1965 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) || 1966 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V); 1967 } 1968 1969 // Check for structural coroutine intrinsics that should not be spilled into 1970 // the coroutine frame. 1971 static bool isCoroutineStructureIntrinsic(Instruction &I) { 1972 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) || 1973 isa<CoroSuspendInst>(&I); 1974 } 1975 1976 // For every use of the value that is across suspend point, recreate that value 1977 // after a suspend point. 1978 static void rewriteMaterializableInstructions(IRBuilder<> &IRB, 1979 const SpillInfo &Spills) { 1980 for (const auto &E : Spills) { 1981 Value *Def = E.first; 1982 BasicBlock *CurrentBlock = nullptr; 1983 Instruction *CurrentMaterialization = nullptr; 1984 for (Instruction *U : E.second) { 1985 // If we have not seen this block, materialize the value. 1986 if (CurrentBlock != U->getParent()) { 1987 1988 bool IsInCoroSuspendBlock = isa<AnyCoroSuspendInst>(U); 1989 CurrentBlock = U->getParent(); 1990 auto *InsertBlock = IsInCoroSuspendBlock 1991 ? CurrentBlock->getSinglePredecessor() 1992 : CurrentBlock; 1993 CurrentMaterialization = cast<Instruction>(Def)->clone(); 1994 CurrentMaterialization->setName(Def->getName()); 1995 CurrentMaterialization->insertBefore( 1996 IsInCoroSuspendBlock ? InsertBlock->getTerminator() 1997 : &*InsertBlock->getFirstInsertionPt()); 1998 } 1999 if (auto *PN = dyn_cast<PHINode>(U)) { 2000 assert(PN->getNumIncomingValues() == 1 && 2001 "unexpected number of incoming " 2002 "values in the PHINode"); 2003 PN->replaceAllUsesWith(CurrentMaterialization); 2004 PN->eraseFromParent(); 2005 continue; 2006 } 2007 // Replace all uses of Def in the current instruction with the 2008 // CurrentMaterialization for the block. 2009 U->replaceUsesOfWith(Def, CurrentMaterialization); 2010 } 2011 } 2012 } 2013 2014 // Splits the block at a particular instruction unless it is the first 2015 // instruction in the block with a single predecessor. 2016 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) { 2017 auto *BB = I->getParent(); 2018 if (&BB->front() == I) { 2019 if (BB->getSinglePredecessor()) { 2020 BB->setName(Name); 2021 return BB; 2022 } 2023 } 2024 return BB->splitBasicBlock(I, Name); 2025 } 2026 2027 // Split above and below a particular instruction so that it 2028 // will be all alone by itself in a block. 2029 static void splitAround(Instruction *I, const Twine &Name) { 2030 splitBlockIfNotFirst(I, Name); 2031 splitBlockIfNotFirst(I->getNextNode(), "After" + Name); 2032 } 2033 2034 static bool isSuspendBlock(BasicBlock *BB) { 2035 return isa<AnyCoroSuspendInst>(BB->front()); 2036 } 2037 2038 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet; 2039 2040 /// Does control flow starting at the given block ever reach a suspend 2041 /// instruction before reaching a block in VisitedOrFreeBBs? 2042 static bool isSuspendReachableFrom(BasicBlock *From, 2043 VisitedBlocksSet &VisitedOrFreeBBs) { 2044 // Eagerly try to add this block to the visited set. If it's already 2045 // there, stop recursing; this path doesn't reach a suspend before 2046 // either looping or reaching a freeing block. 2047 if (!VisitedOrFreeBBs.insert(From).second) 2048 return false; 2049 2050 // We assume that we'll already have split suspends into their own blocks. 2051 if (isSuspendBlock(From)) 2052 return true; 2053 2054 // Recurse on the successors. 2055 for (auto Succ : successors(From)) { 2056 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs)) 2057 return true; 2058 } 2059 2060 return false; 2061 } 2062 2063 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a 2064 /// suspend point? 2065 static bool isLocalAlloca(CoroAllocaAllocInst *AI) { 2066 // Seed the visited set with all the basic blocks containing a free 2067 // so that we won't pass them up. 2068 VisitedBlocksSet VisitedOrFreeBBs; 2069 for (auto User : AI->users()) { 2070 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User)) 2071 VisitedOrFreeBBs.insert(FI->getParent()); 2072 } 2073 2074 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs); 2075 } 2076 2077 /// After we split the coroutine, will the given basic block be along 2078 /// an obvious exit path for the resumption function? 2079 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, 2080 unsigned depth = 3) { 2081 // If we've bottomed out our depth count, stop searching and assume 2082 // that the path might loop back. 2083 if (depth == 0) return false; 2084 2085 // If this is a suspend block, we're about to exit the resumption function. 2086 if (isSuspendBlock(BB)) return true; 2087 2088 // Recurse into the successors. 2089 for (auto Succ : successors(BB)) { 2090 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1)) 2091 return false; 2092 } 2093 2094 // If none of the successors leads back in a loop, we're on an exit/abort. 2095 return true; 2096 } 2097 2098 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) { 2099 // Look for a free that isn't sufficiently obviously followed by 2100 // either a suspend or a termination, i.e. something that will leave 2101 // the coro resumption frame. 2102 for (auto U : AI->users()) { 2103 auto FI = dyn_cast<CoroAllocaFreeInst>(U); 2104 if (!FI) continue; 2105 2106 if (!willLeaveFunctionImmediatelyAfter(FI->getParent())) 2107 return true; 2108 } 2109 2110 // If we never found one, we don't need a stack save. 2111 return false; 2112 } 2113 2114 /// Turn each of the given local allocas into a normal (dynamic) alloca 2115 /// instruction. 2116 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas, 2117 SmallVectorImpl<Instruction*> &DeadInsts) { 2118 for (auto AI : LocalAllocas) { 2119 auto M = AI->getModule(); 2120 IRBuilder<> Builder(AI); 2121 2122 // Save the stack depth. Try to avoid doing this if the stackrestore 2123 // is going to immediately precede a return or something. 2124 Value *StackSave = nullptr; 2125 if (localAllocaNeedsStackSave(AI)) 2126 StackSave = Builder.CreateCall( 2127 Intrinsic::getDeclaration(M, Intrinsic::stacksave)); 2128 2129 // Allocate memory. 2130 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize()); 2131 Alloca->setAlignment(Align(AI->getAlignment())); 2132 2133 for (auto U : AI->users()) { 2134 // Replace gets with the allocation. 2135 if (isa<CoroAllocaGetInst>(U)) { 2136 U->replaceAllUsesWith(Alloca); 2137 2138 // Replace frees with stackrestores. This is safe because 2139 // alloca.alloc is required to obey a stack discipline, although we 2140 // don't enforce that structurally. 2141 } else { 2142 auto FI = cast<CoroAllocaFreeInst>(U); 2143 if (StackSave) { 2144 Builder.SetInsertPoint(FI); 2145 Builder.CreateCall( 2146 Intrinsic::getDeclaration(M, Intrinsic::stackrestore), 2147 StackSave); 2148 } 2149 } 2150 DeadInsts.push_back(cast<Instruction>(U)); 2151 } 2152 2153 DeadInsts.push_back(AI); 2154 } 2155 } 2156 2157 /// Turn the given coro.alloca.alloc call into a dynamic allocation. 2158 /// This happens during the all-instructions iteration, so it must not 2159 /// delete the call. 2160 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI, 2161 coro::Shape &Shape, 2162 SmallVectorImpl<Instruction*> &DeadInsts) { 2163 IRBuilder<> Builder(AI); 2164 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr); 2165 2166 for (User *U : AI->users()) { 2167 if (isa<CoroAllocaGetInst>(U)) { 2168 U->replaceAllUsesWith(Alloc); 2169 } else { 2170 auto FI = cast<CoroAllocaFreeInst>(U); 2171 Builder.SetInsertPoint(FI); 2172 Shape.emitDealloc(Builder, Alloc, nullptr); 2173 } 2174 DeadInsts.push_back(cast<Instruction>(U)); 2175 } 2176 2177 // Push this on last so that it gets deleted after all the others. 2178 DeadInsts.push_back(AI); 2179 2180 // Return the new allocation value so that we can check for needed spills. 2181 return cast<Instruction>(Alloc); 2182 } 2183 2184 /// Get the current swifterror value. 2185 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, 2186 coro::Shape &Shape) { 2187 // Make a fake function pointer as a sort of intrinsic. 2188 auto FnTy = FunctionType::get(ValueTy, {}, false); 2189 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 2190 2191 auto Call = Builder.CreateCall(FnTy, Fn, {}); 2192 Shape.SwiftErrorOps.push_back(Call); 2193 2194 return Call; 2195 } 2196 2197 /// Set the given value as the current swifterror value. 2198 /// 2199 /// Returns a slot that can be used as a swifterror slot. 2200 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, 2201 coro::Shape &Shape) { 2202 // Make a fake function pointer as a sort of intrinsic. 2203 auto FnTy = FunctionType::get(V->getType()->getPointerTo(), 2204 {V->getType()}, false); 2205 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 2206 2207 auto Call = Builder.CreateCall(FnTy, Fn, { V }); 2208 Shape.SwiftErrorOps.push_back(Call); 2209 2210 return Call; 2211 } 2212 2213 /// Set the swifterror value from the given alloca before a call, 2214 /// then put in back in the alloca afterwards. 2215 /// 2216 /// Returns an address that will stand in for the swifterror slot 2217 /// until splitting. 2218 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call, 2219 AllocaInst *Alloca, 2220 coro::Shape &Shape) { 2221 auto ValueTy = Alloca->getAllocatedType(); 2222 IRBuilder<> Builder(Call); 2223 2224 // Load the current value from the alloca and set it as the 2225 // swifterror value. 2226 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca); 2227 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape); 2228 2229 // Move to after the call. Since swifterror only has a guaranteed 2230 // value on normal exits, we can ignore implicit and explicit unwind 2231 // edges. 2232 if (isa<CallInst>(Call)) { 2233 Builder.SetInsertPoint(Call->getNextNode()); 2234 } else { 2235 auto Invoke = cast<InvokeInst>(Call); 2236 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg()); 2237 } 2238 2239 // Get the current swifterror value and store it to the alloca. 2240 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape); 2241 Builder.CreateStore(ValueAfterCall, Alloca); 2242 2243 return Addr; 2244 } 2245 2246 /// Eliminate a formerly-swifterror alloca by inserting the get/set 2247 /// intrinsics and attempting to MemToReg the alloca away. 2248 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, 2249 coro::Shape &Shape) { 2250 for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) { 2251 // swifterror values can only be used in very specific ways. 2252 // We take advantage of that here. 2253 auto User = Use.getUser(); 2254 if (isa<LoadInst>(User) || isa<StoreInst>(User)) 2255 continue; 2256 2257 assert(isa<CallInst>(User) || isa<InvokeInst>(User)); 2258 auto Call = cast<Instruction>(User); 2259 2260 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape); 2261 2262 // Use the returned slot address as the call argument. 2263 Use.set(Addr); 2264 } 2265 2266 // All the uses should be loads and stores now. 2267 assert(isAllocaPromotable(Alloca)); 2268 } 2269 2270 /// "Eliminate" a swifterror argument by reducing it to the alloca case 2271 /// and then loading and storing in the prologue and epilog. 2272 /// 2273 /// The argument keeps the swifterror flag. 2274 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, 2275 coro::Shape &Shape, 2276 SmallVectorImpl<AllocaInst*> &AllocasToPromote) { 2277 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg()); 2278 2279 auto ArgTy = cast<PointerType>(Arg.getType()); 2280 // swifterror arguments are required to have pointer-to-pointer type, 2281 // so create a pointer-typed alloca with opaque pointers. 2282 auto ValueTy = ArgTy->isOpaque() ? PointerType::getUnqual(F.getContext()) 2283 : ArgTy->getNonOpaquePointerElementType(); 2284 2285 // Reduce to the alloca case: 2286 2287 // Create an alloca and replace all uses of the arg with it. 2288 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace()); 2289 Arg.replaceAllUsesWith(Alloca); 2290 2291 // Set an initial value in the alloca. swifterror is always null on entry. 2292 auto InitialValue = Constant::getNullValue(ValueTy); 2293 Builder.CreateStore(InitialValue, Alloca); 2294 2295 // Find all the suspends in the function and save and restore around them. 2296 for (auto Suspend : Shape.CoroSuspends) { 2297 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape); 2298 } 2299 2300 // Find all the coro.ends in the function and restore the error value. 2301 for (auto End : Shape.CoroEnds) { 2302 Builder.SetInsertPoint(End); 2303 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca); 2304 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape); 2305 } 2306 2307 // Now we can use the alloca logic. 2308 AllocasToPromote.push_back(Alloca); 2309 eliminateSwiftErrorAlloca(F, Alloca, Shape); 2310 } 2311 2312 /// Eliminate all problematic uses of swifterror arguments and allocas 2313 /// from the function. We'll fix them up later when splitting the function. 2314 static void eliminateSwiftError(Function &F, coro::Shape &Shape) { 2315 SmallVector<AllocaInst*, 4> AllocasToPromote; 2316 2317 // Look for a swifterror argument. 2318 for (auto &Arg : F.args()) { 2319 if (!Arg.hasSwiftErrorAttr()) continue; 2320 2321 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote); 2322 break; 2323 } 2324 2325 // Look for swifterror allocas. 2326 for (auto &Inst : F.getEntryBlock()) { 2327 auto Alloca = dyn_cast<AllocaInst>(&Inst); 2328 if (!Alloca || !Alloca->isSwiftError()) continue; 2329 2330 // Clear the swifterror flag. 2331 Alloca->setSwiftError(false); 2332 2333 AllocasToPromote.push_back(Alloca); 2334 eliminateSwiftErrorAlloca(F, Alloca, Shape); 2335 } 2336 2337 // If we have any allocas to promote, compute a dominator tree and 2338 // promote them en masse. 2339 if (!AllocasToPromote.empty()) { 2340 DominatorTree DT(F); 2341 PromoteMemToReg(AllocasToPromote, DT); 2342 } 2343 } 2344 2345 /// retcon and retcon.once conventions assume that all spill uses can be sunk 2346 /// after the coro.begin intrinsic. 2347 static void sinkSpillUsesAfterCoroBegin(Function &F, 2348 const FrameDataInfo &FrameData, 2349 CoroBeginInst *CoroBegin) { 2350 DominatorTree Dom(F); 2351 2352 SmallSetVector<Instruction *, 32> ToMove; 2353 SmallVector<Instruction *, 32> Worklist; 2354 2355 // Collect all users that precede coro.begin. 2356 for (auto *Def : FrameData.getAllDefs()) { 2357 for (User *U : Def->users()) { 2358 auto Inst = cast<Instruction>(U); 2359 if (Inst->getParent() != CoroBegin->getParent() || 2360 Dom.dominates(CoroBegin, Inst)) 2361 continue; 2362 if (ToMove.insert(Inst)) 2363 Worklist.push_back(Inst); 2364 } 2365 } 2366 // Recursively collect users before coro.begin. 2367 while (!Worklist.empty()) { 2368 auto *Def = Worklist.pop_back_val(); 2369 for (User *U : Def->users()) { 2370 auto Inst = cast<Instruction>(U); 2371 if (Dom.dominates(CoroBegin, Inst)) 2372 continue; 2373 if (ToMove.insert(Inst)) 2374 Worklist.push_back(Inst); 2375 } 2376 } 2377 2378 // Sort by dominance. 2379 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end()); 2380 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool { 2381 // If a dominates b it should preceed (<) b. 2382 return Dom.dominates(A, B); 2383 }); 2384 2385 Instruction *InsertPt = CoroBegin->getNextNode(); 2386 for (Instruction *Inst : InsertionList) 2387 Inst->moveBefore(InsertPt); 2388 } 2389 2390 /// For each local variable that all of its user are only used inside one of 2391 /// suspended region, we sink their lifetime.start markers to the place where 2392 /// after the suspend block. Doing so minimizes the lifetime of each variable, 2393 /// hence minimizing the amount of data we end up putting on the frame. 2394 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, 2395 SuspendCrossingInfo &Checker) { 2396 DominatorTree DT(F); 2397 2398 // Collect all possible basic blocks which may dominate all uses of allocas. 2399 SmallPtrSet<BasicBlock *, 4> DomSet; 2400 DomSet.insert(&F.getEntryBlock()); 2401 for (auto *CSI : Shape.CoroSuspends) { 2402 BasicBlock *SuspendBlock = CSI->getParent(); 2403 assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() && 2404 "should have split coro.suspend into its own block"); 2405 DomSet.insert(SuspendBlock->getSingleSuccessor()); 2406 } 2407 2408 for (Instruction &I : instructions(F)) { 2409 AllocaInst* AI = dyn_cast<AllocaInst>(&I); 2410 if (!AI) 2411 continue; 2412 2413 for (BasicBlock *DomBB : DomSet) { 2414 bool Valid = true; 2415 SmallVector<Instruction *, 1> Lifetimes; 2416 2417 auto isLifetimeStart = [](Instruction* I) { 2418 if (auto* II = dyn_cast<IntrinsicInst>(I)) 2419 return II->getIntrinsicID() == Intrinsic::lifetime_start; 2420 return false; 2421 }; 2422 2423 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) { 2424 if (isLifetimeStart(U)) { 2425 Lifetimes.push_back(U); 2426 return true; 2427 } 2428 if (!U->hasOneUse() || U->stripPointerCasts() != AI) 2429 return false; 2430 if (isLifetimeStart(U->user_back())) { 2431 Lifetimes.push_back(U->user_back()); 2432 return true; 2433 } 2434 return false; 2435 }; 2436 2437 for (User *U : AI->users()) { 2438 Instruction *UI = cast<Instruction>(U); 2439 // For all users except lifetime.start markers, if they are all 2440 // dominated by one of the basic blocks and do not cross 2441 // suspend points as well, then there is no need to spill the 2442 // instruction. 2443 if (!DT.dominates(DomBB, UI->getParent()) || 2444 Checker.isDefinitionAcrossSuspend(DomBB, UI)) { 2445 // Skip lifetime.start, GEP and bitcast used by lifetime.start 2446 // markers. 2447 if (collectLifetimeStart(UI, AI)) 2448 continue; 2449 Valid = false; 2450 break; 2451 } 2452 } 2453 // Sink lifetime.start markers to dominate block when they are 2454 // only used outside the region. 2455 if (Valid && Lifetimes.size() != 0) { 2456 // May be AI itself, when the type of AI is i8* 2457 auto *NewBitCast = [&](AllocaInst *AI) -> Value* { 2458 if (isa<AllocaInst>(Lifetimes[0]->getOperand(1))) 2459 return AI; 2460 auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext()); 2461 return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "", 2462 DomBB->getTerminator()); 2463 }(AI); 2464 2465 auto *NewLifetime = Lifetimes[0]->clone(); 2466 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast); 2467 NewLifetime->insertBefore(DomBB->getTerminator()); 2468 2469 // All the outsided lifetime.start markers are no longer necessary. 2470 for (Instruction *S : Lifetimes) 2471 S->eraseFromParent(); 2472 2473 break; 2474 } 2475 } 2476 } 2477 } 2478 2479 static void collectFrameAllocas(Function &F, coro::Shape &Shape, 2480 const SuspendCrossingInfo &Checker, 2481 SmallVectorImpl<AllocaInfo> &Allocas) { 2482 for (Instruction &I : instructions(F)) { 2483 auto *AI = dyn_cast<AllocaInst>(&I); 2484 if (!AI) 2485 continue; 2486 // The PromiseAlloca will be specially handled since it needs to be in a 2487 // fixed position in the frame. 2488 if (AI == Shape.SwitchLowering.PromiseAlloca) { 2489 continue; 2490 } 2491 DominatorTree DT(F); 2492 // The code that uses lifetime.start intrinsic does not work for functions 2493 // with loops without exit. Disable it on ABIs we know to generate such 2494 // code. 2495 bool ShouldUseLifetimeStartInfo = 2496 (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon && 2497 Shape.ABI != coro::ABI::RetconOnce); 2498 AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT, 2499 *Shape.CoroBegin, Checker, 2500 ShouldUseLifetimeStartInfo}; 2501 Visitor.visitPtr(*AI); 2502 if (!Visitor.getShouldLiveOnFrame()) 2503 continue; 2504 Allocas.emplace_back(AI, Visitor.getAliasesCopy(), 2505 Visitor.getMayWriteBeforeCoroBegin()); 2506 } 2507 } 2508 2509 void coro::salvageDebugInfo( 2510 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache, 2511 DbgVariableIntrinsic *DVI, bool OptimizeFrame) { 2512 Function *F = DVI->getFunction(); 2513 IRBuilder<> Builder(F->getContext()); 2514 auto InsertPt = F->getEntryBlock().getFirstInsertionPt(); 2515 while (isa<IntrinsicInst>(InsertPt)) 2516 ++InsertPt; 2517 Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt); 2518 DIExpression *Expr = DVI->getExpression(); 2519 // Follow the pointer arithmetic all the way to the incoming 2520 // function argument and convert into a DIExpression. 2521 bool SkipOutermostLoad = !isa<DbgValueInst>(DVI); 2522 Value *Storage = DVI->getVariableLocationOp(0); 2523 Value *OriginalStorage = Storage; 2524 while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) { 2525 if (auto *LdInst = dyn_cast<LoadInst>(Inst)) { 2526 Storage = LdInst->getOperand(0); 2527 // FIXME: This is a heuristic that works around the fact that 2528 // LLVM IR debug intrinsics cannot yet distinguish between 2529 // memory and value locations: Because a dbg.declare(alloca) is 2530 // implicitly a memory location no DW_OP_deref operation for the 2531 // last direct load from an alloca is necessary. This condition 2532 // effectively drops the *last* DW_OP_deref in the expression. 2533 if (!SkipOutermostLoad) 2534 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore); 2535 } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) { 2536 Storage = StInst->getOperand(0); 2537 } else { 2538 SmallVector<uint64_t, 16> Ops; 2539 SmallVector<Value *, 0> AdditionalValues; 2540 Value *Op = llvm::salvageDebugInfoImpl( 2541 *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops, 2542 AdditionalValues); 2543 if (!Op || !AdditionalValues.empty()) { 2544 // If salvaging failed or salvaging produced more than one location 2545 // operand, give up. 2546 break; 2547 } 2548 Storage = Op; 2549 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false); 2550 } 2551 SkipOutermostLoad = false; 2552 } 2553 if (!Storage) 2554 return; 2555 2556 // Store a pointer to the coroutine frame object in an alloca so it 2557 // is available throughout the function when producing unoptimized 2558 // code. Extending the lifetime this way is correct because the 2559 // variable has been declared by a dbg.declare intrinsic. 2560 // 2561 // Avoid to create the alloca would be eliminated by optimization 2562 // passes and the corresponding dbg.declares would be invalid. 2563 if (!OptimizeFrame) 2564 if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) { 2565 auto &Cached = DbgPtrAllocaCache[Storage]; 2566 if (!Cached) { 2567 Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr, 2568 Arg->getName() + ".debug"); 2569 Builder.CreateStore(Storage, Cached); 2570 } 2571 Storage = Cached; 2572 // FIXME: LLVM lacks nuanced semantics to differentiate between 2573 // memory and direct locations at the IR level. The backend will 2574 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory 2575 // location. Thus, if there are deref and offset operations in the 2576 // expression, we need to add a DW_OP_deref at the *start* of the 2577 // expression to first load the contents of the alloca before 2578 // adjusting it with the expression. 2579 if (Expr && Expr->isComplex()) 2580 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore); 2581 } 2582 2583 DVI->replaceVariableLocationOp(OriginalStorage, Storage); 2584 DVI->setExpression(Expr); 2585 // We only hoist dbg.declare today since it doesn't make sense to hoist 2586 // dbg.value or dbg.addr since they do not have the same function wide 2587 // guarantees that dbg.declare does. 2588 if (!isa<DbgValueInst>(DVI) && !isa<DbgAddrIntrinsic>(DVI)) { 2589 if (auto *II = dyn_cast<InvokeInst>(Storage)) 2590 DVI->moveBefore(II->getNormalDest()->getFirstNonPHI()); 2591 else if (auto *CBI = dyn_cast<CallBrInst>(Storage)) 2592 DVI->moveBefore(CBI->getDefaultDest()->getFirstNonPHI()); 2593 else if (auto *InsertPt = dyn_cast<Instruction>(Storage)) { 2594 assert(!InsertPt->isTerminator() && 2595 "Unimaged terminator that could return a storage."); 2596 DVI->moveAfter(InsertPt); 2597 } else if (isa<Argument>(Storage)) 2598 DVI->moveAfter(F->getEntryBlock().getFirstNonPHI()); 2599 } 2600 } 2601 2602 void coro::buildCoroutineFrame(Function &F, Shape &Shape) { 2603 // Don't eliminate swifterror in async functions that won't be split. 2604 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty()) 2605 eliminateSwiftError(F, Shape); 2606 2607 if (Shape.ABI == coro::ABI::Switch && 2608 Shape.SwitchLowering.PromiseAlloca) { 2609 Shape.getSwitchCoroId()->clearPromise(); 2610 } 2611 2612 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end 2613 // intrinsics are in their own blocks to simplify the logic of building up 2614 // SuspendCrossing data. 2615 for (auto *CSI : Shape.CoroSuspends) { 2616 if (auto *Save = CSI->getCoroSave()) 2617 splitAround(Save, "CoroSave"); 2618 splitAround(CSI, "CoroSuspend"); 2619 } 2620 2621 // Put CoroEnds into their own blocks. 2622 for (AnyCoroEndInst *CE : Shape.CoroEnds) { 2623 splitAround(CE, "CoroEnd"); 2624 2625 // Emit the musttail call function in a new block before the CoroEnd. 2626 // We do this here so that the right suspend crossing info is computed for 2627 // the uses of the musttail call function call. (Arguments to the coro.end 2628 // instructions would be ignored) 2629 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) { 2630 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction(); 2631 if (!MustTailCallFn) 2632 continue; 2633 IRBuilder<> Builder(AsyncEnd); 2634 SmallVector<Value *, 8> Args(AsyncEnd->args()); 2635 auto Arguments = ArrayRef<Value *>(Args).drop_front(3); 2636 auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn, 2637 Arguments, Builder); 2638 splitAround(Call, "MustTailCall.Before.CoroEnd"); 2639 } 2640 } 2641 2642 // Later code makes structural assumptions about single predecessors phis e.g 2643 // that they are not live accross a suspend point. 2644 cleanupSinglePredPHIs(F); 2645 2646 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will 2647 // never has its definition separated from the PHI by the suspend point. 2648 rewritePHIs(F); 2649 2650 // Build suspend crossing info. 2651 SuspendCrossingInfo Checker(F, Shape); 2652 2653 IRBuilder<> Builder(F.getContext()); 2654 FrameDataInfo FrameData; 2655 SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas; 2656 SmallVector<Instruction*, 4> DeadInstructions; 2657 2658 { 2659 SpillInfo Spills; 2660 for (int Repeat = 0; Repeat < 4; ++Repeat) { 2661 // See if there are materializable instructions across suspend points. 2662 for (Instruction &I : instructions(F)) 2663 if (materializable(I)) { 2664 for (User *U : I.users()) 2665 if (Checker.isDefinitionAcrossSuspend(I, U)) 2666 Spills[&I].push_back(cast<Instruction>(U)); 2667 } 2668 2669 if (Spills.empty()) 2670 break; 2671 2672 // Rewrite materializable instructions to be materialized at the use 2673 // point. 2674 LLVM_DEBUG(dumpSpills("Materializations", Spills)); 2675 rewriteMaterializableInstructions(Builder, Spills); 2676 Spills.clear(); 2677 } 2678 } 2679 2680 if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon && 2681 Shape.ABI != coro::ABI::RetconOnce) 2682 sinkLifetimeStartMarkers(F, Shape, Checker); 2683 2684 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty()) 2685 collectFrameAllocas(F, Shape, Checker, FrameData.Allocas); 2686 LLVM_DEBUG(dumpAllocas(FrameData.Allocas)); 2687 2688 // Collect the spills for arguments and other not-materializable values. 2689 for (Argument &A : F.args()) 2690 for (User *U : A.users()) 2691 if (Checker.isDefinitionAcrossSuspend(A, U)) 2692 FrameData.Spills[&A].push_back(cast<Instruction>(U)); 2693 2694 for (Instruction &I : instructions(F)) { 2695 // Values returned from coroutine structure intrinsics should not be part 2696 // of the Coroutine Frame. 2697 if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin) 2698 continue; 2699 2700 // The Coroutine Promise always included into coroutine frame, no need to 2701 // check for suspend crossing. 2702 if (Shape.ABI == coro::ABI::Switch && 2703 Shape.SwitchLowering.PromiseAlloca == &I) 2704 continue; 2705 2706 // Handle alloca.alloc specially here. 2707 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) { 2708 // Check whether the alloca's lifetime is bounded by suspend points. 2709 if (isLocalAlloca(AI)) { 2710 LocalAllocas.push_back(AI); 2711 continue; 2712 } 2713 2714 // If not, do a quick rewrite of the alloca and then add spills of 2715 // the rewritten value. The rewrite doesn't invalidate anything in 2716 // Spills because the other alloca intrinsics have no other operands 2717 // besides AI, and it doesn't invalidate the iteration because we delay 2718 // erasing AI. 2719 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions); 2720 2721 for (User *U : Alloc->users()) { 2722 if (Checker.isDefinitionAcrossSuspend(*Alloc, U)) 2723 FrameData.Spills[Alloc].push_back(cast<Instruction>(U)); 2724 } 2725 continue; 2726 } 2727 2728 // Ignore alloca.get; we process this as part of coro.alloca.alloc. 2729 if (isa<CoroAllocaGetInst>(I)) 2730 continue; 2731 2732 if (isa<AllocaInst>(I)) 2733 continue; 2734 2735 for (User *U : I.users()) 2736 if (Checker.isDefinitionAcrossSuspend(I, U)) { 2737 // We cannot spill a token. 2738 if (I.getType()->isTokenTy()) 2739 report_fatal_error( 2740 "token definition is separated from the use by a suspend point"); 2741 FrameData.Spills[&I].push_back(cast<Instruction>(U)); 2742 } 2743 } 2744 2745 // We don't want the layout of coroutine frame to be affected 2746 // by debug information. So we only choose to salvage DbgValueInst for 2747 // whose value is already in the frame. 2748 // We would handle the dbg.values for allocas specially 2749 for (auto &Iter : FrameData.Spills) { 2750 auto *V = Iter.first; 2751 SmallVector<DbgValueInst *, 16> DVIs; 2752 findDbgValues(DVIs, V); 2753 llvm::for_each(DVIs, [&](DbgValueInst *DVI) { 2754 if (Checker.isDefinitionAcrossSuspend(*V, DVI)) 2755 FrameData.Spills[V].push_back(DVI); 2756 }); 2757 } 2758 2759 LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills)); 2760 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 2761 Shape.ABI == coro::ABI::Async) 2762 sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin); 2763 Shape.FrameTy = buildFrameType(F, Shape, FrameData); 2764 createFramePtr(Shape); 2765 // For now, this works for C++ programs only. 2766 buildFrameDebugInfo(F, Shape, FrameData); 2767 insertSpills(FrameData, Shape); 2768 lowerLocalAllocas(LocalAllocas, DeadInstructions); 2769 2770 for (auto I : DeadInstructions) 2771 I->eraseFromParent(); 2772 } 2773