1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This file contains classes used to discover if for a particular value 9 // there from sue to definition that crosses a suspend block. 10 // 11 // Using the information discovered we form a Coroutine Frame structure to 12 // contain those values. All uses of those values are replaced with appropriate 13 // GEP + load from the coroutine frame. At the point of the definition we spill 14 // the value into the coroutine frame. 15 //===----------------------------------------------------------------------===// 16 17 #include "CoroInternal.h" 18 #include "llvm/ADT/BitVector.h" 19 #include "llvm/ADT/ScopeExit.h" 20 #include "llvm/ADT/SmallString.h" 21 #include "llvm/Analysis/PtrUseVisitor.h" 22 #include "llvm/Analysis/StackLifetime.h" 23 #include "llvm/Config/llvm-config.h" 24 #include "llvm/IR/CFG.h" 25 #include "llvm/IR/DIBuilder.h" 26 #include "llvm/IR/DebugInfo.h" 27 #include "llvm/IR/Dominators.h" 28 #include "llvm/IR/IRBuilder.h" 29 #include "llvm/IR/InstIterator.h" 30 #include "llvm/IR/IntrinsicInst.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/MathExtras.h" 33 #include "llvm/Support/OptimizedStructLayout.h" 34 #include "llvm/Support/circular_raw_ostream.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 37 #include "llvm/Transforms/Utils/Local.h" 38 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 39 #include <algorithm> 40 41 using namespace llvm; 42 43 // The "coro-suspend-crossing" flag is very noisy. There is another debug type, 44 // "coro-frame", which results in leaner debug spew. 45 #define DEBUG_TYPE "coro-suspend-crossing" 46 47 enum { SmallVectorThreshold = 32 }; 48 49 // Provides two way mapping between the blocks and numbers. 50 namespace { 51 class BlockToIndexMapping { 52 SmallVector<BasicBlock *, SmallVectorThreshold> V; 53 54 public: 55 size_t size() const { return V.size(); } 56 57 BlockToIndexMapping(Function &F) { 58 for (BasicBlock &BB : F) 59 V.push_back(&BB); 60 llvm::sort(V); 61 } 62 63 size_t blockToIndex(BasicBlock *BB) const { 64 auto *I = llvm::lower_bound(V, BB); 65 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block"); 66 return I - V.begin(); 67 } 68 69 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; } 70 }; 71 } // end anonymous namespace 72 73 // The SuspendCrossingInfo maintains data that allows to answer a question 74 // whether given two BasicBlocks A and B there is a path from A to B that 75 // passes through a suspend point. 76 // 77 // For every basic block 'i' it maintains a BlockData that consists of: 78 // Consumes: a bit vector which contains a set of indices of blocks that can 79 // reach block 'i' 80 // Kills: a bit vector which contains a set of indices of blocks that can 81 // reach block 'i', but one of the path will cross a suspend point 82 // Suspend: a boolean indicating whether block 'i' contains a suspend point. 83 // End: a boolean indicating whether block 'i' contains a coro.end intrinsic. 84 // 85 namespace { 86 struct SuspendCrossingInfo { 87 BlockToIndexMapping Mapping; 88 89 struct BlockData { 90 BitVector Consumes; 91 BitVector Kills; 92 bool Suspend = false; 93 bool End = false; 94 }; 95 SmallVector<BlockData, SmallVectorThreshold> Block; 96 97 iterator_range<succ_iterator> successors(BlockData const &BD) const { 98 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]); 99 return llvm::successors(BB); 100 } 101 102 BlockData &getBlockData(BasicBlock *BB) { 103 return Block[Mapping.blockToIndex(BB)]; 104 } 105 106 void dump() const; 107 void dump(StringRef Label, BitVector const &BV) const; 108 109 SuspendCrossingInfo(Function &F, coro::Shape &Shape); 110 111 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const { 112 size_t const DefIndex = Mapping.blockToIndex(DefBB); 113 size_t const UseIndex = Mapping.blockToIndex(UseBB); 114 115 bool const Result = Block[UseIndex].Kills[DefIndex]; 116 LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName() 117 << " answer is " << Result << "\n"); 118 return Result; 119 } 120 121 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const { 122 auto *I = cast<Instruction>(U); 123 124 // We rewrote PHINodes, so that only the ones with exactly one incoming 125 // value need to be analyzed. 126 if (auto *PN = dyn_cast<PHINode>(I)) 127 if (PN->getNumIncomingValues() > 1) 128 return false; 129 130 BasicBlock *UseBB = I->getParent(); 131 132 // As a special case, treat uses by an llvm.coro.suspend.retcon or an 133 // llvm.coro.suspend.async as if they were uses in the suspend's single 134 // predecessor: the uses conceptually occur before the suspend. 135 if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) { 136 UseBB = UseBB->getSinglePredecessor(); 137 assert(UseBB && "should have split coro.suspend into its own block"); 138 } 139 140 return hasPathCrossingSuspendPoint(DefBB, UseBB); 141 } 142 143 bool isDefinitionAcrossSuspend(Argument &A, User *U) const { 144 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U); 145 } 146 147 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const { 148 auto *DefBB = I.getParent(); 149 150 // As a special case, treat values produced by an llvm.coro.suspend.* 151 // as if they were defined in the single successor: the uses 152 // conceptually occur after the suspend. 153 if (isa<AnyCoroSuspendInst>(I)) { 154 DefBB = DefBB->getSingleSuccessor(); 155 assert(DefBB && "should have split coro.suspend into its own block"); 156 } 157 158 return isDefinitionAcrossSuspend(DefBB, U); 159 } 160 161 bool isDefinitionAcrossSuspend(Value &V, User *U) const { 162 if (auto *Arg = dyn_cast<Argument>(&V)) 163 return isDefinitionAcrossSuspend(*Arg, U); 164 if (auto *Inst = dyn_cast<Instruction>(&V)) 165 return isDefinitionAcrossSuspend(*Inst, U); 166 167 llvm_unreachable( 168 "Coroutine could only collect Argument and Instruction now."); 169 } 170 }; 171 } // end anonymous namespace 172 173 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 174 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label, 175 BitVector const &BV) const { 176 dbgs() << Label << ":"; 177 for (size_t I = 0, N = BV.size(); I < N; ++I) 178 if (BV[I]) 179 dbgs() << " " << Mapping.indexToBlock(I)->getName(); 180 dbgs() << "\n"; 181 } 182 183 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const { 184 for (size_t I = 0, N = Block.size(); I < N; ++I) { 185 BasicBlock *const B = Mapping.indexToBlock(I); 186 dbgs() << B->getName() << ":\n"; 187 dump(" Consumes", Block[I].Consumes); 188 dump(" Kills", Block[I].Kills); 189 } 190 dbgs() << "\n"; 191 } 192 #endif 193 194 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape) 195 : Mapping(F) { 196 const size_t N = Mapping.size(); 197 Block.resize(N); 198 199 // Initialize every block so that it consumes itself 200 for (size_t I = 0; I < N; ++I) { 201 auto &B = Block[I]; 202 B.Consumes.resize(N); 203 B.Kills.resize(N); 204 B.Consumes.set(I); 205 } 206 207 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as 208 // the code beyond coro.end is reachable during initial invocation of the 209 // coroutine. 210 for (auto *CE : Shape.CoroEnds) 211 getBlockData(CE->getParent()).End = true; 212 213 // Mark all suspend blocks and indicate that they kill everything they 214 // consume. Note, that crossing coro.save also requires a spill, as any code 215 // between coro.save and coro.suspend may resume the coroutine and all of the 216 // state needs to be saved by that time. 217 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) { 218 BasicBlock *SuspendBlock = BarrierInst->getParent(); 219 auto &B = getBlockData(SuspendBlock); 220 B.Suspend = true; 221 B.Kills |= B.Consumes; 222 }; 223 for (auto *CSI : Shape.CoroSuspends) { 224 markSuspendBlock(CSI); 225 if (auto *Save = CSI->getCoroSave()) 226 markSuspendBlock(Save); 227 } 228 229 // Iterate propagating consumes and kills until they stop changing. 230 int Iteration = 0; 231 (void)Iteration; 232 233 bool Changed; 234 do { 235 LLVM_DEBUG(dbgs() << "iteration " << ++Iteration); 236 LLVM_DEBUG(dbgs() << "==============\n"); 237 238 Changed = false; 239 for (size_t I = 0; I < N; ++I) { 240 auto &B = Block[I]; 241 for (BasicBlock *SI : successors(B)) { 242 243 auto SuccNo = Mapping.blockToIndex(SI); 244 245 // Saved Consumes and Kills bitsets so that it is easy to see 246 // if anything changed after propagation. 247 auto &S = Block[SuccNo]; 248 auto SavedConsumes = S.Consumes; 249 auto SavedKills = S.Kills; 250 251 // Propagate Kills and Consumes from block B into its successor S. 252 S.Consumes |= B.Consumes; 253 S.Kills |= B.Kills; 254 255 // If block B is a suspend block, it should propagate kills into the 256 // its successor for every block B consumes. 257 if (B.Suspend) { 258 S.Kills |= B.Consumes; 259 } 260 if (S.Suspend) { 261 // If block S is a suspend block, it should kill all of the blocks it 262 // consumes. 263 S.Kills |= S.Consumes; 264 } else if (S.End) { 265 // If block S is an end block, it should not propagate kills as the 266 // blocks following coro.end() are reached during initial invocation 267 // of the coroutine while all the data are still available on the 268 // stack or in the registers. 269 S.Kills.reset(); 270 } else { 271 // This is reached when S block it not Suspend nor coro.end and it 272 // need to make sure that it is not in the kill set. 273 S.Kills.reset(SuccNo); 274 } 275 276 // See if anything changed. 277 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes); 278 279 if (S.Kills != SavedKills) { 280 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName() 281 << "\n"); 282 LLVM_DEBUG(dump("S.Kills", S.Kills)); 283 LLVM_DEBUG(dump("SavedKills", SavedKills)); 284 } 285 if (S.Consumes != SavedConsumes) { 286 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n"); 287 LLVM_DEBUG(dump("S.Consume", S.Consumes)); 288 LLVM_DEBUG(dump("SavedCons", SavedConsumes)); 289 } 290 } 291 } 292 } while (Changed); 293 LLVM_DEBUG(dump()); 294 } 295 296 #undef DEBUG_TYPE // "coro-suspend-crossing" 297 #define DEBUG_TYPE "coro-frame" 298 299 namespace { 300 class FrameTypeBuilder; 301 // Mapping from the to-be-spilled value to all the users that need reload. 302 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>; 303 struct AllocaInfo { 304 AllocaInst *Alloca; 305 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases; 306 bool MayWriteBeforeCoroBegin; 307 AllocaInfo(AllocaInst *Alloca, 308 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases, 309 bool MayWriteBeforeCoroBegin) 310 : Alloca(Alloca), Aliases(std::move(Aliases)), 311 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {} 312 }; 313 struct FrameDataInfo { 314 // All the values (that are not allocas) that needs to be spilled to the 315 // frame. 316 SpillInfo Spills; 317 // Allocas contains all values defined as allocas that need to live in the 318 // frame. 319 SmallVector<AllocaInfo, 8> Allocas; 320 321 SmallVector<Value *, 8> getAllDefs() const { 322 SmallVector<Value *, 8> Defs; 323 for (const auto &P : Spills) 324 Defs.push_back(P.first); 325 for (const auto &A : Allocas) 326 Defs.push_back(A.Alloca); 327 return Defs; 328 } 329 330 uint32_t getFieldIndex(Value *V) const { 331 auto Itr = FieldIndexMap.find(V); 332 assert(Itr != FieldIndexMap.end() && 333 "Value does not have a frame field index"); 334 return Itr->second; 335 } 336 337 void setFieldIndex(Value *V, uint32_t Index) { 338 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) && 339 "Cannot set the index for the same field twice."); 340 FieldIndexMap[V] = Index; 341 } 342 343 uint64_t getAlign(Value *V) const { 344 auto Iter = FieldAlignMap.find(V); 345 assert(Iter != FieldAlignMap.end()); 346 return Iter->second; 347 } 348 349 void setAlign(Value *V, uint64_t Align) { 350 assert(FieldAlignMap.count(V) == 0); 351 FieldAlignMap.insert({V, Align}); 352 } 353 354 uint64_t getOffset(Value *V) const { 355 auto Iter = FieldOffsetMap.find(V); 356 assert(Iter != FieldOffsetMap.end()); 357 return Iter->second; 358 } 359 360 void setOffset(Value *V, uint64_t Offset) { 361 assert(FieldOffsetMap.count(V) == 0); 362 FieldOffsetMap.insert({V, Offset}); 363 } 364 365 // Remap the index of every field in the frame, using the final layout index. 366 void updateLayoutIndex(FrameTypeBuilder &B); 367 368 private: 369 // LayoutIndexUpdateStarted is used to avoid updating the index of any field 370 // twice by mistake. 371 bool LayoutIndexUpdateStarted = false; 372 // Map from values to their slot indexes on the frame. They will be first set 373 // with their original insertion field index. After the frame is built, their 374 // indexes will be updated into the final layout index. 375 DenseMap<Value *, uint32_t> FieldIndexMap; 376 // Map from values to their alignment on the frame. They would be set after 377 // the frame is built. 378 DenseMap<Value *, uint64_t> FieldAlignMap; 379 // Map from values to their offset on the frame. They would be set after 380 // the frame is built. 381 DenseMap<Value *, uint64_t> FieldOffsetMap; 382 }; 383 } // namespace 384 385 #ifndef NDEBUG 386 static void dumpSpills(StringRef Title, const SpillInfo &Spills) { 387 dbgs() << "------------- " << Title << "--------------\n"; 388 for (const auto &E : Spills) { 389 E.first->dump(); 390 dbgs() << " user: "; 391 for (auto *I : E.second) 392 I->dump(); 393 } 394 } 395 396 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) { 397 dbgs() << "------------- Allocas --------------\n"; 398 for (const auto &A : Allocas) { 399 A.Alloca->dump(); 400 } 401 } 402 #endif 403 404 namespace { 405 using FieldIDType = size_t; 406 // We cannot rely solely on natural alignment of a type when building a 407 // coroutine frame and if the alignment specified on the Alloca instruction 408 // differs from the natural alignment of the alloca type we will need to insert 409 // padding. 410 class FrameTypeBuilder { 411 private: 412 struct Field { 413 uint64_t Size; 414 uint64_t Offset; 415 Type *Ty; 416 FieldIDType LayoutFieldIndex; 417 Align Alignment; 418 Align TyAlignment; 419 }; 420 421 const DataLayout &DL; 422 LLVMContext &Context; 423 uint64_t StructSize = 0; 424 Align StructAlign; 425 bool IsFinished = false; 426 427 Optional<Align> MaxFrameAlignment; 428 429 SmallVector<Field, 8> Fields; 430 DenseMap<Value*, unsigned> FieldIndexByKey; 431 432 public: 433 FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL, 434 Optional<Align> MaxFrameAlignment) 435 : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {} 436 437 /// Add a field to this structure for the storage of an `alloca` 438 /// instruction. 439 LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI, 440 bool IsHeader = false) { 441 Type *Ty = AI->getAllocatedType(); 442 443 // Make an array type if this is a static array allocation. 444 if (AI->isArrayAllocation()) { 445 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 446 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue()); 447 else 448 report_fatal_error("Coroutines cannot handle non static allocas yet"); 449 } 450 451 return addField(Ty, AI->getAlign(), IsHeader); 452 } 453 454 /// We want to put the allocas whose lifetime-ranges are not overlapped 455 /// into one slot of coroutine frame. 456 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566 457 /// 458 /// cppcoro::task<void> alternative_paths(bool cond) { 459 /// if (cond) { 460 /// big_structure a; 461 /// process(a); 462 /// co_await something(); 463 /// } else { 464 /// big_structure b; 465 /// process2(b); 466 /// co_await something(); 467 /// } 468 /// } 469 /// 470 /// We want to put variable a and variable b in the same slot to 471 /// reduce the size of coroutine frame. 472 /// 473 /// This function use StackLifetime algorithm to partition the AllocaInsts in 474 /// Spills to non-overlapped sets in order to put Alloca in the same 475 /// non-overlapped set into the same slot in the Coroutine Frame. Then add 476 /// field for the allocas in the same non-overlapped set by using the largest 477 /// type as the field type. 478 /// 479 /// Side Effects: Because We sort the allocas, the order of allocas in the 480 /// frame may be different with the order in the source code. 481 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData, 482 coro::Shape &Shape); 483 484 /// Add a field to this structure. 485 LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment, 486 bool IsHeader = false, 487 bool IsSpillOfValue = false) { 488 assert(!IsFinished && "adding fields to a finished builder"); 489 assert(Ty && "must provide a type for a field"); 490 491 // The field size is always the alloc size of the type. 492 uint64_t FieldSize = DL.getTypeAllocSize(Ty); 493 494 // For an alloca with size=0, we don't need to add a field and they 495 // can just point to any index in the frame. Use index 0. 496 if (FieldSize == 0) { 497 return 0; 498 } 499 500 // The field alignment might not be the type alignment, but we need 501 // to remember the type alignment anyway to build the type. 502 // If we are spilling values we don't need to worry about ABI alignment 503 // concerns. 504 auto ABIAlign = DL.getABITypeAlign(Ty); 505 Align TyAlignment = 506 (IsSpillOfValue && MaxFrameAlignment) 507 ? (*MaxFrameAlignment < ABIAlign ? *MaxFrameAlignment : ABIAlign) 508 : ABIAlign; 509 if (!FieldAlignment) { 510 FieldAlignment = TyAlignment; 511 } 512 513 // Lay out header fields immediately. 514 uint64_t Offset; 515 if (IsHeader) { 516 Offset = alignTo(StructSize, FieldAlignment); 517 StructSize = Offset + FieldSize; 518 519 // Everything else has a flexible offset. 520 } else { 521 Offset = OptimizedStructLayoutField::FlexibleOffset; 522 } 523 524 Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment}); 525 return Fields.size() - 1; 526 } 527 528 /// Finish the layout and set the body on the given type. 529 void finish(StructType *Ty); 530 531 uint64_t getStructSize() const { 532 assert(IsFinished && "not yet finished!"); 533 return StructSize; 534 } 535 536 Align getStructAlign() const { 537 assert(IsFinished && "not yet finished!"); 538 return StructAlign; 539 } 540 541 FieldIDType getLayoutFieldIndex(FieldIDType Id) const { 542 assert(IsFinished && "not yet finished!"); 543 return Fields[Id].LayoutFieldIndex; 544 } 545 546 Field getLayoutField(FieldIDType Id) const { 547 assert(IsFinished && "not yet finished!"); 548 return Fields[Id]; 549 } 550 }; 551 } // namespace 552 553 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) { 554 auto Updater = [&](Value *I) { 555 auto Field = B.getLayoutField(getFieldIndex(I)); 556 setFieldIndex(I, Field.LayoutFieldIndex); 557 setAlign(I, Field.Alignment.value()); 558 setOffset(I, Field.Offset); 559 }; 560 LayoutIndexUpdateStarted = true; 561 for (auto &S : Spills) 562 Updater(S.first); 563 for (const auto &A : Allocas) 564 Updater(A.Alloca); 565 LayoutIndexUpdateStarted = false; 566 } 567 568 void FrameTypeBuilder::addFieldForAllocas(const Function &F, 569 FrameDataInfo &FrameData, 570 coro::Shape &Shape) { 571 using AllocaSetType = SmallVector<AllocaInst *, 4>; 572 SmallVector<AllocaSetType, 4> NonOverlapedAllocas; 573 574 // We need to add field for allocas at the end of this function. 575 auto AddFieldForAllocasAtExit = make_scope_exit([&]() { 576 for (auto AllocaList : NonOverlapedAllocas) { 577 auto *LargestAI = *AllocaList.begin(); 578 FieldIDType Id = addFieldForAlloca(LargestAI); 579 for (auto *Alloca : AllocaList) 580 FrameData.setFieldIndex(Alloca, Id); 581 } 582 }); 583 584 if (!Shape.OptimizeFrame) { 585 for (const auto &A : FrameData.Allocas) { 586 AllocaInst *Alloca = A.Alloca; 587 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 588 } 589 return; 590 } 591 592 // Because there are pathes from the lifetime.start to coro.end 593 // for each alloca, the liferanges for every alloca is overlaped 594 // in the blocks who contain coro.end and the successor blocks. 595 // So we choose to skip there blocks when we calculates the liferange 596 // for each alloca. It should be reasonable since there shouldn't be uses 597 // in these blocks and the coroutine frame shouldn't be used outside the 598 // coroutine body. 599 // 600 // Note that the user of coro.suspend may not be SwitchInst. However, this 601 // case seems too complex to handle. And it is harmless to skip these 602 // patterns since it just prevend putting the allocas to live in the same 603 // slot. 604 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest; 605 for (auto CoroSuspendInst : Shape.CoroSuspends) { 606 for (auto U : CoroSuspendInst->users()) { 607 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) { 608 auto *SWI = const_cast<SwitchInst *>(ConstSWI); 609 DefaultSuspendDest[SWI] = SWI->getDefaultDest(); 610 SWI->setDefaultDest(SWI->getSuccessor(1)); 611 } 612 } 613 } 614 615 auto ExtractAllocas = [&]() { 616 AllocaSetType Allocas; 617 Allocas.reserve(FrameData.Allocas.size()); 618 for (const auto &A : FrameData.Allocas) 619 Allocas.push_back(A.Alloca); 620 return Allocas; 621 }; 622 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(), 623 StackLifetime::LivenessType::May); 624 StackLifetimeAnalyzer.run(); 625 auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) { 626 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps( 627 StackLifetimeAnalyzer.getLiveRange(AI2)); 628 }; 629 auto GetAllocaSize = [&](const AllocaInfo &A) { 630 Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL); 631 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n"); 632 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported"); 633 return RetSize->getFixedSize(); 634 }; 635 // Put larger allocas in the front. So the larger allocas have higher 636 // priority to merge, which can save more space potentially. Also each 637 // AllocaSet would be ordered. So we can get the largest Alloca in one 638 // AllocaSet easily. 639 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) { 640 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2); 641 }); 642 for (const auto &A : FrameData.Allocas) { 643 AllocaInst *Alloca = A.Alloca; 644 bool Merged = false; 645 // Try to find if the Alloca is not inferenced with any existing 646 // NonOverlappedAllocaSet. If it is true, insert the alloca to that 647 // NonOverlappedAllocaSet. 648 for (auto &AllocaSet : NonOverlapedAllocas) { 649 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n"); 650 bool NoInference = none_of(AllocaSet, [&](auto Iter) { 651 return IsAllocaInferenre(Alloca, Iter); 652 }); 653 // If the alignment of A is multiple of the alignment of B, the address 654 // of A should satisfy the requirement for aligning for B. 655 // 656 // There may be other more fine-grained strategies to handle the alignment 657 // infomation during the merging process. But it seems hard to handle 658 // these strategies and benefit little. 659 bool Alignable = [&]() -> bool { 660 auto *LargestAlloca = *AllocaSet.begin(); 661 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() == 662 0; 663 }(); 664 bool CouldMerge = NoInference && Alignable; 665 if (!CouldMerge) 666 continue; 667 AllocaSet.push_back(Alloca); 668 Merged = true; 669 break; 670 } 671 if (!Merged) { 672 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 673 } 674 } 675 // Recover the default target destination for each Switch statement 676 // reserved. 677 for (auto SwitchAndDefaultDest : DefaultSuspendDest) { 678 SwitchInst *SWI = SwitchAndDefaultDest.first; 679 BasicBlock *DestBB = SwitchAndDefaultDest.second; 680 SWI->setDefaultDest(DestBB); 681 } 682 // This Debug Info could tell us which allocas are merged into one slot. 683 LLVM_DEBUG(for (auto &AllocaSet 684 : NonOverlapedAllocas) { 685 if (AllocaSet.size() > 1) { 686 dbgs() << "In Function:" << F.getName() << "\n"; 687 dbgs() << "Find Union Set " 688 << "\n"; 689 dbgs() << "\tAllocas are \n"; 690 for (auto Alloca : AllocaSet) 691 dbgs() << "\t\t" << *Alloca << "\n"; 692 } 693 }); 694 } 695 696 void FrameTypeBuilder::finish(StructType *Ty) { 697 assert(!IsFinished && "already finished!"); 698 699 // Prepare the optimal-layout field array. 700 // The Id in the layout field is a pointer to our Field for it. 701 SmallVector<OptimizedStructLayoutField, 8> LayoutFields; 702 LayoutFields.reserve(Fields.size()); 703 for (auto &Field : Fields) { 704 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment, 705 Field.Offset); 706 } 707 708 // Perform layout. 709 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields); 710 StructSize = SizeAndAlign.first; 711 StructAlign = SizeAndAlign.second; 712 713 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & { 714 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id)); 715 }; 716 717 // We need to produce a packed struct type if there's a field whose 718 // assigned offset isn't a multiple of its natural type alignment. 719 bool Packed = [&] { 720 for (auto &LayoutField : LayoutFields) { 721 auto &F = getField(LayoutField); 722 if (!isAligned(F.TyAlignment, LayoutField.Offset)) 723 return true; 724 } 725 return false; 726 }(); 727 728 // Build the struct body. 729 SmallVector<Type*, 16> FieldTypes; 730 FieldTypes.reserve(LayoutFields.size() * 3 / 2); 731 uint64_t LastOffset = 0; 732 for (auto &LayoutField : LayoutFields) { 733 auto &F = getField(LayoutField); 734 735 auto Offset = LayoutField.Offset; 736 737 // Add a padding field if there's a padding gap and we're either 738 // building a packed struct or the padding gap is more than we'd 739 // get from aligning to the field type's natural alignment. 740 assert(Offset >= LastOffset); 741 if (Offset != LastOffset) { 742 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset) 743 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context), 744 Offset - LastOffset)); 745 } 746 747 F.Offset = Offset; 748 F.LayoutFieldIndex = FieldTypes.size(); 749 750 FieldTypes.push_back(F.Ty); 751 LastOffset = Offset + F.Size; 752 } 753 754 Ty->setBody(FieldTypes, Packed); 755 756 #ifndef NDEBUG 757 // Check that the IR layout matches the offsets we expect. 758 auto Layout = DL.getStructLayout(Ty); 759 for (auto &F : Fields) { 760 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty); 761 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset); 762 } 763 #endif 764 765 IsFinished = true; 766 } 767 768 static void cacheDIVar(FrameDataInfo &FrameData, 769 DenseMap<Value *, DILocalVariable *> &DIVarCache) { 770 for (auto *V : FrameData.getAllDefs()) { 771 if (DIVarCache.find(V) != DIVarCache.end()) 772 continue; 773 774 auto DDIs = FindDbgDeclareUses(V); 775 auto *I = llvm::find_if(DDIs, [](DbgDeclareInst *DDI) { 776 return DDI->getExpression()->getNumElements() == 0; 777 }); 778 if (I != DDIs.end()) 779 DIVarCache.insert({V, (*I)->getVariable()}); 780 } 781 } 782 783 /// Create name for Type. It uses MDString to store new created string to 784 /// avoid memory leak. 785 static StringRef solveTypeName(Type *Ty) { 786 if (Ty->isIntegerTy()) { 787 // The longest name in common may be '__int_128', which has 9 bits. 788 SmallString<16> Buffer; 789 raw_svector_ostream OS(Buffer); 790 OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth(); 791 auto *MDName = MDString::get(Ty->getContext(), OS.str()); 792 return MDName->getString(); 793 } 794 795 if (Ty->isFloatingPointTy()) { 796 if (Ty->isFloatTy()) 797 return "__float_"; 798 if (Ty->isDoubleTy()) 799 return "__double_"; 800 return "__floating_type_"; 801 } 802 803 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) { 804 if (PtrTy->isOpaque()) 805 return "PointerType"; 806 Type *PointeeTy = PtrTy->getNonOpaquePointerElementType(); 807 auto Name = solveTypeName(PointeeTy); 808 if (Name == "UnknownType") 809 return "PointerType"; 810 SmallString<16> Buffer; 811 Twine(Name + "_Ptr").toStringRef(Buffer); 812 auto *MDName = MDString::get(Ty->getContext(), Buffer.str()); 813 return MDName->getString(); 814 } 815 816 if (Ty->isStructTy()) { 817 if (!cast<StructType>(Ty)->hasName()) 818 return "__LiteralStructType_"; 819 820 auto Name = Ty->getStructName(); 821 822 SmallString<16> Buffer(Name); 823 for_each(Buffer, [](auto &Iter) { 824 if (Iter == '.' || Iter == ':') 825 Iter = '_'; 826 }); 827 auto *MDName = MDString::get(Ty->getContext(), Buffer.str()); 828 return MDName->getString(); 829 } 830 831 return "UnknownType"; 832 } 833 834 static DIType *solveDIType(DIBuilder &Builder, Type *Ty, 835 const DataLayout &Layout, DIScope *Scope, 836 unsigned LineNum, 837 DenseMap<Type *, DIType *> &DITypeCache) { 838 if (DIType *DT = DITypeCache.lookup(Ty)) 839 return DT; 840 841 StringRef Name = solveTypeName(Ty); 842 843 DIType *RetType = nullptr; 844 845 if (Ty->isIntegerTy()) { 846 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth(); 847 RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed, 848 llvm::DINode::FlagArtificial); 849 } else if (Ty->isFloatingPointTy()) { 850 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty), 851 dwarf::DW_ATE_float, 852 llvm::DINode::FlagArtificial); 853 } else if (Ty->isPointerTy()) { 854 // Construct BasicType instead of PointerType to avoid infinite 855 // search problem. 856 // For example, we would be in trouble if we traverse recursively: 857 // 858 // struct Node { 859 // Node* ptr; 860 // }; 861 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty), 862 dwarf::DW_ATE_address, 863 llvm::DINode::FlagArtificial); 864 } else if (Ty->isStructTy()) { 865 auto *DIStruct = Builder.createStructType( 866 Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty), 867 Layout.getPrefTypeAlignment(Ty), llvm::DINode::FlagArtificial, nullptr, 868 llvm::DINodeArray()); 869 870 auto *StructTy = cast<StructType>(Ty); 871 SmallVector<Metadata *, 16> Elements; 872 for (unsigned I = 0; I < StructTy->getNumElements(); I++) { 873 DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout, 874 Scope, LineNum, DITypeCache); 875 assert(DITy); 876 Elements.push_back(Builder.createMemberType( 877 Scope, DITy->getName(), Scope->getFile(), LineNum, 878 DITy->getSizeInBits(), DITy->getAlignInBits(), 879 Layout.getStructLayout(StructTy)->getElementOffsetInBits(I), 880 llvm::DINode::FlagArtificial, DITy)); 881 } 882 883 Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements)); 884 885 RetType = DIStruct; 886 } else { 887 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n";); 888 SmallString<32> Buffer; 889 raw_svector_ostream OS(Buffer); 890 OS << Name.str() << "_" << Layout.getTypeSizeInBits(Ty); 891 RetType = Builder.createBasicType(OS.str(), Layout.getTypeSizeInBits(Ty), 892 dwarf::DW_ATE_address, 893 llvm::DINode::FlagArtificial); 894 } 895 896 DITypeCache.insert({Ty, RetType}); 897 return RetType; 898 } 899 900 /// Build artificial debug info for C++ coroutine frames to allow users to 901 /// inspect the contents of the frame directly 902 /// 903 /// Create Debug information for coroutine frame with debug name "__coro_frame". 904 /// The debug information for the fields of coroutine frame is constructed from 905 /// the following way: 906 /// 1. For all the value in the Frame, we search the use of dbg.declare to find 907 /// the corresponding debug variables for the value. If we can find the 908 /// debug variable, we can get full and accurate debug information. 909 /// 2. If we can't get debug information in step 1 and 2, we could only try to 910 /// build the DIType by Type. We did this in solveDIType. We only handle 911 /// integer, float, double, integer type and struct type for now. 912 static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, 913 FrameDataInfo &FrameData) { 914 DISubprogram *DIS = F.getSubprogram(); 915 // If there is no DISubprogram for F, it implies the Function are not compiled 916 // with debug info. So we also don't need to generate debug info for the frame 917 // neither. 918 if (!DIS || !DIS->getUnit() || 919 !dwarf::isCPlusPlus( 920 (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage())) 921 return; 922 923 assert(Shape.ABI == coro::ABI::Switch && 924 "We could only build debug infomation for C++ coroutine now.\n"); 925 926 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false); 927 928 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca(); 929 assert(PromiseAlloca && 930 "Coroutine with switch ABI should own Promise alloca"); 931 932 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(PromiseAlloca); 933 if (DIs.empty()) 934 return; 935 936 DbgDeclareInst *PromiseDDI = DIs.front(); 937 DILocalVariable *PromiseDIVariable = PromiseDDI->getVariable(); 938 DILocalScope *PromiseDIScope = PromiseDIVariable->getScope(); 939 DIFile *DFile = PromiseDIScope->getFile(); 940 DILocation *DILoc = PromiseDDI->getDebugLoc().get(); 941 unsigned LineNum = PromiseDIVariable->getLine(); 942 943 DICompositeType *FrameDITy = DBuilder.createStructType( 944 DIS, "__coro_frame_ty", DFile, LineNum, Shape.FrameSize * 8, 945 Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr, 946 llvm::DINodeArray()); 947 StructType *FrameTy = Shape.FrameTy; 948 SmallVector<Metadata *, 16> Elements; 949 DataLayout Layout = F.getParent()->getDataLayout(); 950 951 DenseMap<Value *, DILocalVariable *> DIVarCache; 952 cacheDIVar(FrameData, DIVarCache); 953 954 unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume; 955 unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy; 956 unsigned IndexIndex = Shape.SwitchLowering.IndexField; 957 958 DenseMap<unsigned, StringRef> NameCache; 959 NameCache.insert({ResumeIndex, "__resume_fn"}); 960 NameCache.insert({DestroyIndex, "__destroy_fn"}); 961 NameCache.insert({IndexIndex, "__coro_index"}); 962 963 Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex), 964 *DestroyFnTy = FrameTy->getElementType(DestroyIndex), 965 *IndexTy = FrameTy->getElementType(IndexIndex); 966 967 DenseMap<unsigned, DIType *> TyCache; 968 TyCache.insert({ResumeIndex, 969 DBuilder.createBasicType("__resume_fn", 970 Layout.getTypeSizeInBits(ResumeFnTy), 971 dwarf::DW_ATE_address)}); 972 TyCache.insert( 973 {DestroyIndex, DBuilder.createBasicType( 974 "__destroy_fn", Layout.getTypeSizeInBits(DestroyFnTy), 975 dwarf::DW_ATE_address)}); 976 977 /// FIXME: If we fill the field `SizeInBits` with the actual size of 978 /// __coro_index in bits, then __coro_index wouldn't show in the debugger. 979 TyCache.insert({IndexIndex, DBuilder.createBasicType( 980 "__coro_index", 981 (Layout.getTypeSizeInBits(IndexTy) < 8) 982 ? 8 983 : Layout.getTypeSizeInBits(IndexTy), 984 dwarf::DW_ATE_unsigned_char)}); 985 986 for (auto *V : FrameData.getAllDefs()) { 987 if (DIVarCache.find(V) == DIVarCache.end()) 988 continue; 989 990 auto Index = FrameData.getFieldIndex(V); 991 992 NameCache.insert({Index, DIVarCache[V]->getName()}); 993 TyCache.insert({Index, DIVarCache[V]->getType()}); 994 } 995 996 // Cache from index to (Align, Offset Pair) 997 DenseMap<unsigned, std::pair<unsigned, unsigned>> OffsetCache; 998 // The Align and Offset of Resume function and Destroy function are fixed. 999 OffsetCache.insert({ResumeIndex, {8, 0}}); 1000 OffsetCache.insert({DestroyIndex, {8, 8}}); 1001 OffsetCache.insert( 1002 {IndexIndex, 1003 {Shape.SwitchLowering.IndexAlign, Shape.SwitchLowering.IndexOffset}}); 1004 1005 for (auto *V : FrameData.getAllDefs()) { 1006 auto Index = FrameData.getFieldIndex(V); 1007 1008 OffsetCache.insert( 1009 {Index, {FrameData.getAlign(V), FrameData.getOffset(V)}}); 1010 } 1011 1012 DenseMap<Type *, DIType *> DITypeCache; 1013 // This counter is used to avoid same type names. e.g., there would be 1014 // many i32 and i64 types in one coroutine. And we would use i32_0 and 1015 // i32_1 to avoid the same type. Since it makes no sense the name of the 1016 // fields confilicts with each other. 1017 unsigned UnknownTypeNum = 0; 1018 for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) { 1019 if (OffsetCache.find(Index) == OffsetCache.end()) 1020 continue; 1021 1022 std::string Name; 1023 uint64_t SizeInBits; 1024 uint32_t AlignInBits; 1025 uint64_t OffsetInBits; 1026 DIType *DITy = nullptr; 1027 1028 Type *Ty = FrameTy->getElementType(Index); 1029 assert(Ty->isSized() && "We can't handle type which is not sized.\n"); 1030 SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedSize(); 1031 AlignInBits = OffsetCache[Index].first * 8; 1032 OffsetInBits = OffsetCache[Index].second * 8; 1033 1034 if (NameCache.find(Index) != NameCache.end()) { 1035 Name = NameCache[Index].str(); 1036 DITy = TyCache[Index]; 1037 } else { 1038 DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache); 1039 assert(DITy && "SolveDIType shouldn't return nullptr.\n"); 1040 Name = DITy->getName().str(); 1041 Name += "_" + std::to_string(UnknownTypeNum); 1042 UnknownTypeNum++; 1043 } 1044 1045 Elements.push_back(DBuilder.createMemberType( 1046 FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits, 1047 llvm::DINode::FlagArtificial, DITy)); 1048 } 1049 1050 DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements)); 1051 1052 auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame", 1053 DFile, LineNum, FrameDITy, 1054 true, DINode::FlagArtificial); 1055 assert(FrameDIVar->isValidLocationForIntrinsic(PromiseDDI->getDebugLoc())); 1056 1057 // Subprogram would have ContainedNodes field which records the debug 1058 // variables it contained. So we need to add __coro_frame to the 1059 // ContainedNodes of it. 1060 // 1061 // If we don't add __coro_frame to the RetainedNodes, user may get 1062 // `no symbol __coro_frame in context` rather than `__coro_frame` 1063 // is optimized out, which is more precise. 1064 if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) { 1065 auto RetainedNodes = SubProgram->getRetainedNodes(); 1066 SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(), 1067 RetainedNodes.end()); 1068 RetainedNodesVec.push_back(FrameDIVar); 1069 SubProgram->replaceOperandWith( 1070 7, (MDTuple::get(F.getContext(), RetainedNodesVec))); 1071 } 1072 1073 DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar, 1074 DBuilder.createExpression(), DILoc, 1075 Shape.getInsertPtAfterFramePtr()); 1076 } 1077 1078 // Build a struct that will keep state for an active coroutine. 1079 // struct f.frame { 1080 // ResumeFnTy ResumeFnAddr; 1081 // ResumeFnTy DestroyFnAddr; 1082 // int ResumeIndex; 1083 // ... promise (if present) ... 1084 // ... spills ... 1085 // }; 1086 static StructType *buildFrameType(Function &F, coro::Shape &Shape, 1087 FrameDataInfo &FrameData) { 1088 LLVMContext &C = F.getContext(); 1089 const DataLayout &DL = F.getParent()->getDataLayout(); 1090 StructType *FrameTy = [&] { 1091 SmallString<32> Name(F.getName()); 1092 Name.append(".Frame"); 1093 return StructType::create(C, Name); 1094 }(); 1095 1096 // We will use this value to cap the alignment of spilled values. 1097 Optional<Align> MaxFrameAlignment; 1098 if (Shape.ABI == coro::ABI::Async) 1099 MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment(); 1100 FrameTypeBuilder B(C, DL, MaxFrameAlignment); 1101 1102 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca(); 1103 Optional<FieldIDType> SwitchIndexFieldId; 1104 1105 if (Shape.ABI == coro::ABI::Switch) { 1106 auto *FramePtrTy = FrameTy->getPointerTo(); 1107 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy, 1108 /*IsVarArg=*/false); 1109 auto *FnPtrTy = FnTy->getPointerTo(); 1110 1111 // Add header fields for the resume and destroy functions. 1112 // We can rely on these being perfectly packed. 1113 (void)B.addField(FnPtrTy, None, /*header*/ true); 1114 (void)B.addField(FnPtrTy, None, /*header*/ true); 1115 1116 // PromiseAlloca field needs to be explicitly added here because it's 1117 // a header field with a fixed offset based on its alignment. Hence it 1118 // needs special handling and cannot be added to FrameData.Allocas. 1119 if (PromiseAlloca) 1120 FrameData.setFieldIndex( 1121 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true)); 1122 1123 // Add a field to store the suspend index. This doesn't need to 1124 // be in the header. 1125 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size())); 1126 Type *IndexType = Type::getIntNTy(C, IndexBits); 1127 1128 SwitchIndexFieldId = B.addField(IndexType, None); 1129 } else { 1130 assert(PromiseAlloca == nullptr && "lowering doesn't support promises"); 1131 } 1132 1133 // Because multiple allocas may own the same field slot, 1134 // we add allocas to field here. 1135 B.addFieldForAllocas(F, FrameData, Shape); 1136 // Add PromiseAlloca to Allocas list so that 1137 // 1. updateLayoutIndex could update its index after 1138 // `performOptimizedStructLayout` 1139 // 2. it is processed in insertSpills. 1140 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca) 1141 // We assume that the promise alloca won't be modified before 1142 // CoroBegin and no alias will be create before CoroBegin. 1143 FrameData.Allocas.emplace_back( 1144 PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false); 1145 // Create an entry for every spilled value. 1146 for (auto &S : FrameData.Spills) { 1147 Type *FieldType = S.first->getType(); 1148 // For byval arguments, we need to store the pointed value in the frame, 1149 // instead of the pointer itself. 1150 if (const Argument *A = dyn_cast<Argument>(S.first)) 1151 if (A->hasByValAttr()) 1152 FieldType = A->getParamByValType(); 1153 FieldIDType Id = 1154 B.addField(FieldType, None, false /*header*/, true /*IsSpillOfValue*/); 1155 FrameData.setFieldIndex(S.first, Id); 1156 } 1157 1158 B.finish(FrameTy); 1159 FrameData.updateLayoutIndex(B); 1160 Shape.FrameAlign = B.getStructAlign(); 1161 Shape.FrameSize = B.getStructSize(); 1162 1163 switch (Shape.ABI) { 1164 case coro::ABI::Switch: { 1165 // In the switch ABI, remember the switch-index field. 1166 auto IndexField = B.getLayoutField(*SwitchIndexFieldId); 1167 Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex; 1168 Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value(); 1169 Shape.SwitchLowering.IndexOffset = IndexField.Offset; 1170 1171 // Also round the frame size up to a multiple of its alignment, as is 1172 // generally expected in C/C++. 1173 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign); 1174 break; 1175 } 1176 1177 // In the retcon ABI, remember whether the frame is inline in the storage. 1178 case coro::ABI::Retcon: 1179 case coro::ABI::RetconOnce: { 1180 auto Id = Shape.getRetconCoroId(); 1181 Shape.RetconLowering.IsFrameInlineInStorage 1182 = (B.getStructSize() <= Id->getStorageSize() && 1183 B.getStructAlign() <= Id->getStorageAlignment()); 1184 break; 1185 } 1186 case coro::ABI::Async: { 1187 Shape.AsyncLowering.FrameOffset = 1188 alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign); 1189 // Also make the final context size a multiple of the context alignment to 1190 // make allocation easier for allocators. 1191 Shape.AsyncLowering.ContextSize = 1192 alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize, 1193 Shape.AsyncLowering.getContextAlignment()); 1194 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) { 1195 report_fatal_error( 1196 "The alignment requirment of frame variables cannot be higher than " 1197 "the alignment of the async function context"); 1198 } 1199 break; 1200 } 1201 } 1202 1203 return FrameTy; 1204 } 1205 1206 // We use a pointer use visitor to track how an alloca is being used. 1207 // The goal is to be able to answer the following three questions: 1208 // 1. Should this alloca be allocated on the frame instead. 1209 // 2. Could the content of the alloca be modified prior to CoroBegn, which would 1210 // require copying the data from alloca to the frame after CoroBegin. 1211 // 3. Is there any alias created for this alloca prior to CoroBegin, but used 1212 // after CoroBegin. In that case, we will need to recreate the alias after 1213 // CoroBegin based off the frame. To answer question 1, we track two things: 1214 // a. List of all BasicBlocks that use this alloca or any of the aliases of 1215 // the alloca. In the end, we check if there exists any two basic blocks that 1216 // cross suspension points. If so, this alloca must be put on the frame. b. 1217 // Whether the alloca or any alias of the alloca is escaped at some point, 1218 // either by storing the address somewhere, or the address is used in a 1219 // function call that might capture. If it's ever escaped, this alloca must be 1220 // put on the frame conservatively. 1221 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin. 1222 // Whenever a potential write happens, either through a store instruction, a 1223 // function call or any of the memory intrinsics, we check whether this 1224 // instruction is prior to CoroBegin. To answer question 3, we track the offsets 1225 // of all aliases created for the alloca prior to CoroBegin but used after 1226 // CoroBegin. llvm::Optional is used to be able to represent the case when the 1227 // offset is unknown (e.g. when you have a PHINode that takes in different 1228 // offset values). We cannot handle unknown offsets and will assert. This is the 1229 // potential issue left out. An ideal solution would likely require a 1230 // significant redesign. 1231 namespace { 1232 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> { 1233 using Base = PtrUseVisitor<AllocaUseVisitor>; 1234 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT, 1235 const CoroBeginInst &CB, const SuspendCrossingInfo &Checker, 1236 bool ShouldUseLifetimeStartInfo) 1237 : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker), 1238 ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {} 1239 1240 void visit(Instruction &I) { 1241 Users.insert(&I); 1242 Base::visit(I); 1243 // If the pointer is escaped prior to CoroBegin, we have to assume it would 1244 // be written into before CoroBegin as well. 1245 if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) { 1246 MayWriteBeforeCoroBegin = true; 1247 } 1248 } 1249 // We need to provide this overload as PtrUseVisitor uses a pointer based 1250 // visiting function. 1251 void visit(Instruction *I) { return visit(*I); } 1252 1253 void visitPHINode(PHINode &I) { 1254 enqueueUsers(I); 1255 handleAlias(I); 1256 } 1257 1258 void visitSelectInst(SelectInst &I) { 1259 enqueueUsers(I); 1260 handleAlias(I); 1261 } 1262 1263 void visitStoreInst(StoreInst &SI) { 1264 // Regardless whether the alias of the alloca is the value operand or the 1265 // pointer operand, we need to assume the alloca is been written. 1266 handleMayWrite(SI); 1267 1268 if (SI.getValueOperand() != U->get()) 1269 return; 1270 1271 // We are storing the pointer into a memory location, potentially escaping. 1272 // As an optimization, we try to detect simple cases where it doesn't 1273 // actually escape, for example: 1274 // %ptr = alloca .. 1275 // %addr = alloca .. 1276 // store %ptr, %addr 1277 // %x = load %addr 1278 // .. 1279 // If %addr is only used by loading from it, we could simply treat %x as 1280 // another alias of %ptr, and not considering %ptr being escaped. 1281 auto IsSimpleStoreThenLoad = [&]() { 1282 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand()); 1283 // If the memory location we are storing to is not an alloca, it 1284 // could be an alias of some other memory locations, which is difficult 1285 // to analyze. 1286 if (!AI) 1287 return false; 1288 // StoreAliases contains aliases of the memory location stored into. 1289 SmallVector<Instruction *, 4> StoreAliases = {AI}; 1290 while (!StoreAliases.empty()) { 1291 Instruction *I = StoreAliases.pop_back_val(); 1292 for (User *U : I->users()) { 1293 // If we are loading from the memory location, we are creating an 1294 // alias of the original pointer. 1295 if (auto *LI = dyn_cast<LoadInst>(U)) { 1296 enqueueUsers(*LI); 1297 handleAlias(*LI); 1298 continue; 1299 } 1300 // If we are overriding the memory location, the pointer certainly 1301 // won't escape. 1302 if (auto *S = dyn_cast<StoreInst>(U)) 1303 if (S->getPointerOperand() == I) 1304 continue; 1305 if (auto *II = dyn_cast<IntrinsicInst>(U)) 1306 if (II->isLifetimeStartOrEnd()) 1307 continue; 1308 // BitCastInst creats aliases of the memory location being stored 1309 // into. 1310 if (auto *BI = dyn_cast<BitCastInst>(U)) { 1311 StoreAliases.push_back(BI); 1312 continue; 1313 } 1314 return false; 1315 } 1316 } 1317 1318 return true; 1319 }; 1320 1321 if (!IsSimpleStoreThenLoad()) 1322 PI.setEscaped(&SI); 1323 } 1324 1325 // All mem intrinsics modify the data. 1326 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); } 1327 1328 void visitBitCastInst(BitCastInst &BC) { 1329 Base::visitBitCastInst(BC); 1330 handleAlias(BC); 1331 } 1332 1333 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 1334 Base::visitAddrSpaceCastInst(ASC); 1335 handleAlias(ASC); 1336 } 1337 1338 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 1339 // The base visitor will adjust Offset accordingly. 1340 Base::visitGetElementPtrInst(GEPI); 1341 handleAlias(GEPI); 1342 } 1343 1344 void visitIntrinsicInst(IntrinsicInst &II) { 1345 // When we found the lifetime markers refers to a 1346 // subrange of the original alloca, ignore the lifetime 1347 // markers to avoid misleading the analysis. 1348 if (II.getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown || 1349 !Offset.isZero()) 1350 return Base::visitIntrinsicInst(II); 1351 LifetimeStarts.insert(&II); 1352 } 1353 1354 void visitCallBase(CallBase &CB) { 1355 for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op) 1356 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op)) 1357 PI.setEscaped(&CB); 1358 handleMayWrite(CB); 1359 } 1360 1361 bool getShouldLiveOnFrame() const { 1362 if (!ShouldLiveOnFrame) 1363 ShouldLiveOnFrame = computeShouldLiveOnFrame(); 1364 return ShouldLiveOnFrame.getValue(); 1365 } 1366 1367 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; } 1368 1369 DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const { 1370 assert(getShouldLiveOnFrame() && "This method should only be called if the " 1371 "alloca needs to live on the frame."); 1372 for (const auto &P : AliasOffetMap) 1373 if (!P.second) 1374 report_fatal_error("Unable to handle an alias with unknown offset " 1375 "created before CoroBegin."); 1376 return AliasOffetMap; 1377 } 1378 1379 private: 1380 const DominatorTree &DT; 1381 const CoroBeginInst &CoroBegin; 1382 const SuspendCrossingInfo &Checker; 1383 // All alias to the original AllocaInst, created before CoroBegin and used 1384 // after CoroBegin. Each entry contains the instruction and the offset in the 1385 // original Alloca. They need to be recreated after CoroBegin off the frame. 1386 DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{}; 1387 SmallPtrSet<Instruction *, 4> Users{}; 1388 SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{}; 1389 bool MayWriteBeforeCoroBegin{false}; 1390 bool ShouldUseLifetimeStartInfo{true}; 1391 1392 mutable llvm::Optional<bool> ShouldLiveOnFrame{}; 1393 1394 bool computeShouldLiveOnFrame() const { 1395 // If lifetime information is available, we check it first since it's 1396 // more precise. We look at every pair of lifetime.start intrinsic and 1397 // every basic block that uses the pointer to see if they cross suspension 1398 // points. The uses cover both direct uses as well as indirect uses. 1399 if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) { 1400 for (auto *I : Users) 1401 for (auto *S : LifetimeStarts) 1402 if (Checker.isDefinitionAcrossSuspend(*S, I)) 1403 return true; 1404 return false; 1405 } 1406 // FIXME: Ideally the isEscaped check should come at the beginning. 1407 // However there are a few loose ends that need to be fixed first before 1408 // we can do that. We need to make sure we are not over-conservative, so 1409 // that the data accessed in-between await_suspend and symmetric transfer 1410 // is always put on the stack, and also data accessed after coro.end is 1411 // always put on the stack (esp the return object). To fix that, we need 1412 // to: 1413 // 1) Potentially treat sret as nocapture in calls 1414 // 2) Special handle the return object and put it on the stack 1415 // 3) Utilize lifetime.end intrinsic 1416 if (PI.isEscaped()) 1417 return true; 1418 1419 for (auto *U1 : Users) 1420 for (auto *U2 : Users) 1421 if (Checker.isDefinitionAcrossSuspend(*U1, U2)) 1422 return true; 1423 1424 return false; 1425 } 1426 1427 void handleMayWrite(const Instruction &I) { 1428 if (!DT.dominates(&CoroBegin, &I)) 1429 MayWriteBeforeCoroBegin = true; 1430 } 1431 1432 bool usedAfterCoroBegin(Instruction &I) { 1433 for (auto &U : I.uses()) 1434 if (DT.dominates(&CoroBegin, U)) 1435 return true; 1436 return false; 1437 } 1438 1439 void handleAlias(Instruction &I) { 1440 // We track all aliases created prior to CoroBegin but used after. 1441 // These aliases may need to be recreated after CoroBegin if the alloca 1442 // need to live on the frame. 1443 if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I)) 1444 return; 1445 1446 if (!IsOffsetKnown) { 1447 AliasOffetMap[&I].reset(); 1448 } else { 1449 auto Itr = AliasOffetMap.find(&I); 1450 if (Itr == AliasOffetMap.end()) { 1451 AliasOffetMap[&I] = Offset; 1452 } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) { 1453 // If we have seen two different possible values for this alias, we set 1454 // it to empty. 1455 AliasOffetMap[&I].reset(); 1456 } 1457 } 1458 } 1459 }; 1460 } // namespace 1461 1462 // We need to make room to insert a spill after initial PHIs, but before 1463 // catchswitch instruction. Placing it before violates the requirement that 1464 // catchswitch, like all other EHPads must be the first nonPHI in a block. 1465 // 1466 // Split away catchswitch into a separate block and insert in its place: 1467 // 1468 // cleanuppad <InsertPt> cleanupret. 1469 // 1470 // cleanupret instruction will act as an insert point for the spill. 1471 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) { 1472 BasicBlock *CurrentBlock = CatchSwitch->getParent(); 1473 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch); 1474 CurrentBlock->getTerminator()->eraseFromParent(); 1475 1476 auto *CleanupPad = 1477 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock); 1478 auto *CleanupRet = 1479 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock); 1480 return CleanupRet; 1481 } 1482 1483 static void createFramePtr(coro::Shape &Shape) { 1484 auto *CB = Shape.CoroBegin; 1485 IRBuilder<> Builder(CB->getNextNode()); 1486 StructType *FrameTy = Shape.FrameTy; 1487 PointerType *FramePtrTy = FrameTy->getPointerTo(); 1488 Shape.FramePtr = 1489 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr")); 1490 } 1491 1492 // Replace all alloca and SSA values that are accessed across suspend points 1493 // with GetElementPointer from coroutine frame + loads and stores. Create an 1494 // AllocaSpillBB that will become the new entry block for the resume parts of 1495 // the coroutine: 1496 // 1497 // %hdl = coro.begin(...) 1498 // whatever 1499 // 1500 // becomes: 1501 // 1502 // %hdl = coro.begin(...) 1503 // %FramePtr = bitcast i8* hdl to %f.frame* 1504 // br label %AllocaSpillBB 1505 // 1506 // AllocaSpillBB: 1507 // ; geps corresponding to allocas that were moved to coroutine frame 1508 // br label PostSpill 1509 // 1510 // PostSpill: 1511 // whatever 1512 // 1513 // 1514 static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) { 1515 auto *CB = Shape.CoroBegin; 1516 LLVMContext &C = CB->getContext(); 1517 IRBuilder<> Builder(C); 1518 StructType *FrameTy = Shape.FrameTy; 1519 Value *FramePtr = Shape.FramePtr; 1520 DominatorTree DT(*CB->getFunction()); 1521 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache; 1522 1523 // Create a GEP with the given index into the coroutine frame for the original 1524 // value Orig. Appends an extra 0 index for array-allocas, preserving the 1525 // original type. 1526 auto GetFramePointer = [&](Value *Orig) -> Value * { 1527 FieldIDType Index = FrameData.getFieldIndex(Orig); 1528 SmallVector<Value *, 3> Indices = { 1529 ConstantInt::get(Type::getInt32Ty(C), 0), 1530 ConstantInt::get(Type::getInt32Ty(C), Index), 1531 }; 1532 1533 if (auto *AI = dyn_cast<AllocaInst>(Orig)) { 1534 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) { 1535 auto Count = CI->getValue().getZExtValue(); 1536 if (Count > 1) { 1537 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0)); 1538 } 1539 } else { 1540 report_fatal_error("Coroutines cannot handle non static allocas yet"); 1541 } 1542 } 1543 1544 auto GEP = cast<GetElementPtrInst>( 1545 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices)); 1546 if (isa<AllocaInst>(Orig)) { 1547 // If the type of GEP is not equal to the type of AllocaInst, it implies 1548 // that the AllocaInst may be reused in the Frame slot of other 1549 // AllocaInst. So We cast GEP to the AllocaInst here to re-use 1550 // the Frame storage. 1551 // 1552 // Note: If we change the strategy dealing with alignment, we need to refine 1553 // this casting. 1554 if (GEP->getResultElementType() != Orig->getType()) 1555 return Builder.CreateBitCast(GEP, Orig->getType(), 1556 Orig->getName() + Twine(".cast")); 1557 } 1558 return GEP; 1559 }; 1560 1561 for (auto const &E : FrameData.Spills) { 1562 Value *Def = E.first; 1563 auto SpillAlignment = Align(FrameData.getAlign(Def)); 1564 // Create a store instruction storing the value into the 1565 // coroutine frame. 1566 Instruction *InsertPt = nullptr; 1567 Type *ByValTy = nullptr; 1568 if (auto *Arg = dyn_cast<Argument>(Def)) { 1569 // For arguments, we will place the store instruction right after 1570 // the coroutine frame pointer instruction, i.e. bitcast of 1571 // coro.begin from i8* to %f.frame*. 1572 InsertPt = Shape.getInsertPtAfterFramePtr(); 1573 1574 // If we're spilling an Argument, make sure we clear 'nocapture' 1575 // from the coroutine function. 1576 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture); 1577 1578 if (Arg->hasByValAttr()) 1579 ByValTy = Arg->getParamByValType(); 1580 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) { 1581 // Don't spill immediately after a suspend; splitting assumes 1582 // that the suspend will be followed by a branch. 1583 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI(); 1584 } else { 1585 auto *I = cast<Instruction>(Def); 1586 if (!DT.dominates(CB, I)) { 1587 // If it is not dominated by CoroBegin, then spill should be 1588 // inserted immediately after CoroFrame is computed. 1589 InsertPt = Shape.getInsertPtAfterFramePtr(); 1590 } else if (auto *II = dyn_cast<InvokeInst>(I)) { 1591 // If we are spilling the result of the invoke instruction, split 1592 // the normal edge and insert the spill in the new block. 1593 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest()); 1594 InsertPt = NewBB->getTerminator(); 1595 } else if (isa<PHINode>(I)) { 1596 // Skip the PHINodes and EH pads instructions. 1597 BasicBlock *DefBlock = I->getParent(); 1598 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator())) 1599 InsertPt = splitBeforeCatchSwitch(CSI); 1600 else 1601 InsertPt = &*DefBlock->getFirstInsertionPt(); 1602 } else { 1603 assert(!I->isTerminator() && "unexpected terminator"); 1604 // For all other values, the spill is placed immediately after 1605 // the definition. 1606 InsertPt = I->getNextNode(); 1607 } 1608 } 1609 1610 auto Index = FrameData.getFieldIndex(Def); 1611 Builder.SetInsertPoint(InsertPt); 1612 auto *G = Builder.CreateConstInBoundsGEP2_32( 1613 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr")); 1614 if (ByValTy) { 1615 // For byval arguments, we need to store the pointed value in the frame, 1616 // instead of the pointer itself. 1617 auto *Value = Builder.CreateLoad(ByValTy, Def); 1618 Builder.CreateAlignedStore(Value, G, SpillAlignment); 1619 } else { 1620 Builder.CreateAlignedStore(Def, G, SpillAlignment); 1621 } 1622 1623 BasicBlock *CurrentBlock = nullptr; 1624 Value *CurrentReload = nullptr; 1625 for (auto *U : E.second) { 1626 // If we have not seen the use block, create a load instruction to reload 1627 // the spilled value from the coroutine frame. Populates the Value pointer 1628 // reference provided with the frame GEP. 1629 if (CurrentBlock != U->getParent()) { 1630 CurrentBlock = U->getParent(); 1631 Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt()); 1632 1633 auto *GEP = GetFramePointer(E.first); 1634 GEP->setName(E.first->getName() + Twine(".reload.addr")); 1635 if (ByValTy) 1636 CurrentReload = GEP; 1637 else 1638 CurrentReload = Builder.CreateAlignedLoad( 1639 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP, 1640 SpillAlignment, E.first->getName() + Twine(".reload")); 1641 1642 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def); 1643 for (DbgDeclareInst *DDI : DIs) { 1644 bool AllowUnresolved = false; 1645 // This dbg.declare is preserved for all coro-split function 1646 // fragments. It will be unreachable in the main function, and 1647 // processed by coro::salvageDebugInfo() by CoroCloner. 1648 DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved) 1649 .insertDeclare(CurrentReload, DDI->getVariable(), 1650 DDI->getExpression(), DDI->getDebugLoc(), 1651 &*Builder.GetInsertPoint()); 1652 // This dbg.declare is for the main function entry point. It 1653 // will be deleted in all coro-split functions. 1654 coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.OptimizeFrame); 1655 } 1656 } 1657 1658 // Salvage debug info on any dbg.addr that we see. We do not insert them 1659 // into each block where we have a use though. 1660 if (auto *DI = dyn_cast<DbgAddrIntrinsic>(U)) { 1661 coro::salvageDebugInfo(DbgPtrAllocaCache, DI, Shape.OptimizeFrame); 1662 } 1663 1664 // If we have a single edge PHINode, remove it and replace it with a 1665 // reload from the coroutine frame. (We already took care of multi edge 1666 // PHINodes by rewriting them in the rewritePHIs function). 1667 if (auto *PN = dyn_cast<PHINode>(U)) { 1668 assert(PN->getNumIncomingValues() == 1 && 1669 "unexpected number of incoming " 1670 "values in the PHINode"); 1671 PN->replaceAllUsesWith(CurrentReload); 1672 PN->eraseFromParent(); 1673 continue; 1674 } 1675 1676 // Replace all uses of CurrentValue in the current instruction with 1677 // reload. 1678 U->replaceUsesOfWith(Def, CurrentReload); 1679 } 1680 } 1681 1682 BasicBlock *FramePtrBB = Shape.getInsertPtAfterFramePtr()->getParent(); 1683 1684 auto SpillBlock = FramePtrBB->splitBasicBlock( 1685 Shape.getInsertPtAfterFramePtr(), "AllocaSpillBB"); 1686 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill"); 1687 Shape.AllocaSpillBlock = SpillBlock; 1688 1689 // retcon and retcon.once lowering assumes all uses have been sunk. 1690 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 1691 Shape.ABI == coro::ABI::Async) { 1692 // If we found any allocas, replace all of their remaining uses with Geps. 1693 Builder.SetInsertPoint(&SpillBlock->front()); 1694 for (const auto &P : FrameData.Allocas) { 1695 AllocaInst *Alloca = P.Alloca; 1696 auto *G = GetFramePointer(Alloca); 1697 1698 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G)) 1699 // here, as we are changing location of the instruction. 1700 G->takeName(Alloca); 1701 Alloca->replaceAllUsesWith(G); 1702 Alloca->eraseFromParent(); 1703 } 1704 return; 1705 } 1706 1707 // If we found any alloca, replace all of their remaining uses with GEP 1708 // instructions. To remain debugbility, we replace the uses of allocas for 1709 // dbg.declares and dbg.values with the reload from the frame. 1710 // Note: We cannot replace the alloca with GEP instructions indiscriminately, 1711 // as some of the uses may not be dominated by CoroBegin. 1712 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front()); 1713 SmallVector<Instruction *, 4> UsersToUpdate; 1714 for (const auto &A : FrameData.Allocas) { 1715 AllocaInst *Alloca = A.Alloca; 1716 UsersToUpdate.clear(); 1717 for (User *U : Alloca->users()) { 1718 auto *I = cast<Instruction>(U); 1719 if (DT.dominates(CB, I)) 1720 UsersToUpdate.push_back(I); 1721 } 1722 if (UsersToUpdate.empty()) 1723 continue; 1724 auto *G = GetFramePointer(Alloca); 1725 G->setName(Alloca->getName() + Twine(".reload.addr")); 1726 1727 SmallVector<DbgVariableIntrinsic *, 4> DIs; 1728 findDbgUsers(DIs, Alloca); 1729 for (auto *DVI : DIs) 1730 DVI->replaceUsesOfWith(Alloca, G); 1731 1732 for (Instruction *I : UsersToUpdate) 1733 I->replaceUsesOfWith(Alloca, G); 1734 } 1735 Builder.SetInsertPoint(Shape.getInsertPtAfterFramePtr()); 1736 for (const auto &A : FrameData.Allocas) { 1737 AllocaInst *Alloca = A.Alloca; 1738 if (A.MayWriteBeforeCoroBegin) { 1739 // isEscaped really means potentially modified before CoroBegin. 1740 if (Alloca->isArrayAllocation()) 1741 report_fatal_error( 1742 "Coroutines cannot handle copying of array allocas yet"); 1743 1744 auto *G = GetFramePointer(Alloca); 1745 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca); 1746 Builder.CreateStore(Value, G); 1747 } 1748 // For each alias to Alloca created before CoroBegin but used after 1749 // CoroBegin, we recreate them after CoroBegin by appplying the offset 1750 // to the pointer in the frame. 1751 for (const auto &Alias : A.Aliases) { 1752 auto *FramePtr = GetFramePointer(Alloca); 1753 auto *FramePtrRaw = 1754 Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C)); 1755 auto &Value = Alias.second.getValue(); 1756 auto ITy = IntegerType::get(C, Value.getBitWidth()); 1757 auto *AliasPtr = Builder.CreateGEP(Type::getInt8Ty(C), FramePtrRaw, 1758 ConstantInt::get(ITy, Value)); 1759 auto *AliasPtrTyped = 1760 Builder.CreateBitCast(AliasPtr, Alias.first->getType()); 1761 Alias.first->replaceUsesWithIf( 1762 AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); }); 1763 } 1764 } 1765 } 1766 1767 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new 1768 // PHI in InsertedBB. 1769 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, 1770 BasicBlock *InsertedBB, 1771 BasicBlock *PredBB, 1772 PHINode *UntilPHI = nullptr) { 1773 auto *PN = cast<PHINode>(&SuccBB->front()); 1774 do { 1775 int Index = PN->getBasicBlockIndex(InsertedBB); 1776 Value *V = PN->getIncomingValue(Index); 1777 PHINode *InputV = PHINode::Create( 1778 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(), 1779 &InsertedBB->front()); 1780 InputV->addIncoming(V, PredBB); 1781 PN->setIncomingValue(Index, InputV); 1782 PN = dyn_cast<PHINode>(PN->getNextNode()); 1783 } while (PN != UntilPHI); 1784 } 1785 1786 // Rewrites the PHI Nodes in a cleanuppad. 1787 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, 1788 CleanupPadInst *CleanupPad) { 1789 // For every incoming edge to a CleanupPad we will create a new block holding 1790 // all incoming values in single-value PHI nodes. We will then create another 1791 // block to act as a dispather (as all unwind edges for related EH blocks 1792 // must be the same). 1793 // 1794 // cleanuppad: 1795 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1] 1796 // %3 = cleanuppad within none [] 1797 // 1798 // It will create: 1799 // 1800 // cleanuppad.corodispatch 1801 // %2 = phi i8[0, %catchswitch], [1, %catch.1] 1802 // %3 = cleanuppad within none [] 1803 // switch i8 % 2, label %unreachable 1804 // [i8 0, label %cleanuppad.from.catchswitch 1805 // i8 1, label %cleanuppad.from.catch.1] 1806 // cleanuppad.from.catchswitch: 1807 // %4 = phi i32 [%0, %catchswitch] 1808 // br %label cleanuppad 1809 // cleanuppad.from.catch.1: 1810 // %6 = phi i32 [%1, %catch.1] 1811 // br %label cleanuppad 1812 // cleanuppad: 1813 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch], 1814 // [%6, %cleanuppad.from.catch.1] 1815 1816 // Unreachable BB, in case switching on an invalid value in the dispatcher. 1817 auto *UnreachBB = BasicBlock::Create( 1818 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent()); 1819 IRBuilder<> Builder(UnreachBB); 1820 Builder.CreateUnreachable(); 1821 1822 // Create a new cleanuppad which will be the dispatcher. 1823 auto *NewCleanupPadBB = 1824 BasicBlock::Create(CleanupPadBB->getContext(), 1825 CleanupPadBB->getName() + Twine(".corodispatch"), 1826 CleanupPadBB->getParent(), CleanupPadBB); 1827 Builder.SetInsertPoint(NewCleanupPadBB); 1828 auto *SwitchType = Builder.getInt8Ty(); 1829 auto *SetDispatchValuePN = 1830 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB)); 1831 CleanupPad->removeFromParent(); 1832 CleanupPad->insertAfter(SetDispatchValuePN); 1833 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB, 1834 pred_size(CleanupPadBB)); 1835 1836 int SwitchIndex = 0; 1837 SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB)); 1838 for (BasicBlock *Pred : Preds) { 1839 // Create a new cleanuppad and move the PHI values to there. 1840 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(), 1841 CleanupPadBB->getName() + 1842 Twine(".from.") + Pred->getName(), 1843 CleanupPadBB->getParent(), CleanupPadBB); 1844 updatePhiNodes(CleanupPadBB, Pred, CaseBB); 1845 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") + 1846 Pred->getName()); 1847 Builder.SetInsertPoint(CaseBB); 1848 Builder.CreateBr(CleanupPadBB); 1849 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB); 1850 1851 // Update this Pred to the new unwind point. 1852 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB); 1853 1854 // Setup the switch in the dispatcher. 1855 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex); 1856 SetDispatchValuePN->addIncoming(SwitchConstant, Pred); 1857 SwitchOnDispatch->addCase(SwitchConstant, CaseBB); 1858 SwitchIndex++; 1859 } 1860 } 1861 1862 static void cleanupSinglePredPHIs(Function &F) { 1863 SmallVector<PHINode *, 32> Worklist; 1864 for (auto &BB : F) { 1865 for (auto &Phi : BB.phis()) { 1866 if (Phi.getNumIncomingValues() == 1) { 1867 Worklist.push_back(&Phi); 1868 } else 1869 break; 1870 } 1871 } 1872 while (!Worklist.empty()) { 1873 auto *Phi = Worklist.pop_back_val(); 1874 auto *OriginalValue = Phi->getIncomingValue(0); 1875 Phi->replaceAllUsesWith(OriginalValue); 1876 } 1877 } 1878 1879 static void rewritePHIs(BasicBlock &BB) { 1880 // For every incoming edge we will create a block holding all 1881 // incoming values in a single PHI nodes. 1882 // 1883 // loop: 1884 // %n.val = phi i32[%n, %entry], [%inc, %loop] 1885 // 1886 // It will create: 1887 // 1888 // loop.from.entry: 1889 // %n.loop.pre = phi i32 [%n, %entry] 1890 // br %label loop 1891 // loop.from.loop: 1892 // %inc.loop.pre = phi i32 [%inc, %loop] 1893 // br %label loop 1894 // 1895 // After this rewrite, further analysis will ignore any phi nodes with more 1896 // than one incoming edge. 1897 1898 // TODO: Simplify PHINodes in the basic block to remove duplicate 1899 // predecessors. 1900 1901 // Special case for CleanupPad: all EH blocks must have the same unwind edge 1902 // so we need to create an additional "dispatcher" block. 1903 if (auto *CleanupPad = 1904 dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) { 1905 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB)); 1906 for (BasicBlock *Pred : Preds) { 1907 if (CatchSwitchInst *CS = 1908 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) { 1909 // CleanupPad with a CatchSwitch predecessor: therefore this is an 1910 // unwind destination that needs to be handle specially. 1911 assert(CS->getUnwindDest() == &BB); 1912 (void)CS; 1913 rewritePHIsForCleanupPad(&BB, CleanupPad); 1914 return; 1915 } 1916 } 1917 } 1918 1919 LandingPadInst *LandingPad = nullptr; 1920 PHINode *ReplPHI = nullptr; 1921 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) { 1922 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks. 1923 // We replace the original landing pad with a PHINode that will collect the 1924 // results from all of them. 1925 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad); 1926 ReplPHI->takeName(LandingPad); 1927 LandingPad->replaceAllUsesWith(ReplPHI); 1928 // We will erase the original landing pad at the end of this function after 1929 // ehAwareSplitEdge cloned it in the transition blocks. 1930 } 1931 1932 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB)); 1933 for (BasicBlock *Pred : Preds) { 1934 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI); 1935 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName()); 1936 1937 // Stop the moving of values at ReplPHI, as this is either null or the PHI 1938 // that replaced the landing pad. 1939 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI); 1940 } 1941 1942 if (LandingPad) { 1943 // Calls to ehAwareSplitEdge function cloned the original lading pad. 1944 // No longer need it. 1945 LandingPad->eraseFromParent(); 1946 } 1947 } 1948 1949 static void rewritePHIs(Function &F) { 1950 SmallVector<BasicBlock *, 8> WorkList; 1951 1952 for (BasicBlock &BB : F) 1953 if (auto *PN = dyn_cast<PHINode>(&BB.front())) 1954 if (PN->getNumIncomingValues() > 1) 1955 WorkList.push_back(&BB); 1956 1957 for (BasicBlock *BB : WorkList) 1958 rewritePHIs(*BB); 1959 } 1960 1961 // Check for instructions that we can recreate on resume as opposed to spill 1962 // the result into a coroutine frame. 1963 static bool materializable(Instruction &V) { 1964 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) || 1965 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V); 1966 } 1967 1968 // Check for structural coroutine intrinsics that should not be spilled into 1969 // the coroutine frame. 1970 static bool isCoroutineStructureIntrinsic(Instruction &I) { 1971 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) || 1972 isa<CoroSuspendInst>(&I); 1973 } 1974 1975 // For every use of the value that is across suspend point, recreate that value 1976 // after a suspend point. 1977 static void rewriteMaterializableInstructions(IRBuilder<> &IRB, 1978 const SpillInfo &Spills) { 1979 for (const auto &E : Spills) { 1980 Value *Def = E.first; 1981 BasicBlock *CurrentBlock = nullptr; 1982 Instruction *CurrentMaterialization = nullptr; 1983 for (Instruction *U : E.second) { 1984 // If we have not seen this block, materialize the value. 1985 if (CurrentBlock != U->getParent()) { 1986 1987 bool IsInCoroSuspendBlock = isa<AnyCoroSuspendInst>(U); 1988 CurrentBlock = U->getParent(); 1989 auto *InsertBlock = IsInCoroSuspendBlock 1990 ? CurrentBlock->getSinglePredecessor() 1991 : CurrentBlock; 1992 CurrentMaterialization = cast<Instruction>(Def)->clone(); 1993 CurrentMaterialization->setName(Def->getName()); 1994 CurrentMaterialization->insertBefore( 1995 IsInCoroSuspendBlock ? InsertBlock->getTerminator() 1996 : &*InsertBlock->getFirstInsertionPt()); 1997 } 1998 if (auto *PN = dyn_cast<PHINode>(U)) { 1999 assert(PN->getNumIncomingValues() == 1 && 2000 "unexpected number of incoming " 2001 "values in the PHINode"); 2002 PN->replaceAllUsesWith(CurrentMaterialization); 2003 PN->eraseFromParent(); 2004 continue; 2005 } 2006 // Replace all uses of Def in the current instruction with the 2007 // CurrentMaterialization for the block. 2008 U->replaceUsesOfWith(Def, CurrentMaterialization); 2009 } 2010 } 2011 } 2012 2013 // Splits the block at a particular instruction unless it is the first 2014 // instruction in the block with a single predecessor. 2015 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) { 2016 auto *BB = I->getParent(); 2017 if (&BB->front() == I) { 2018 if (BB->getSinglePredecessor()) { 2019 BB->setName(Name); 2020 return BB; 2021 } 2022 } 2023 return BB->splitBasicBlock(I, Name); 2024 } 2025 2026 // Split above and below a particular instruction so that it 2027 // will be all alone by itself in a block. 2028 static void splitAround(Instruction *I, const Twine &Name) { 2029 splitBlockIfNotFirst(I, Name); 2030 splitBlockIfNotFirst(I->getNextNode(), "After" + Name); 2031 } 2032 2033 static bool isSuspendBlock(BasicBlock *BB) { 2034 return isa<AnyCoroSuspendInst>(BB->front()); 2035 } 2036 2037 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet; 2038 2039 /// Does control flow starting at the given block ever reach a suspend 2040 /// instruction before reaching a block in VisitedOrFreeBBs? 2041 static bool isSuspendReachableFrom(BasicBlock *From, 2042 VisitedBlocksSet &VisitedOrFreeBBs) { 2043 // Eagerly try to add this block to the visited set. If it's already 2044 // there, stop recursing; this path doesn't reach a suspend before 2045 // either looping or reaching a freeing block. 2046 if (!VisitedOrFreeBBs.insert(From).second) 2047 return false; 2048 2049 // We assume that we'll already have split suspends into their own blocks. 2050 if (isSuspendBlock(From)) 2051 return true; 2052 2053 // Recurse on the successors. 2054 for (auto Succ : successors(From)) { 2055 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs)) 2056 return true; 2057 } 2058 2059 return false; 2060 } 2061 2062 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a 2063 /// suspend point? 2064 static bool isLocalAlloca(CoroAllocaAllocInst *AI) { 2065 // Seed the visited set with all the basic blocks containing a free 2066 // so that we won't pass them up. 2067 VisitedBlocksSet VisitedOrFreeBBs; 2068 for (auto User : AI->users()) { 2069 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User)) 2070 VisitedOrFreeBBs.insert(FI->getParent()); 2071 } 2072 2073 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs); 2074 } 2075 2076 /// After we split the coroutine, will the given basic block be along 2077 /// an obvious exit path for the resumption function? 2078 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, 2079 unsigned depth = 3) { 2080 // If we've bottomed out our depth count, stop searching and assume 2081 // that the path might loop back. 2082 if (depth == 0) return false; 2083 2084 // If this is a suspend block, we're about to exit the resumption function. 2085 if (isSuspendBlock(BB)) return true; 2086 2087 // Recurse into the successors. 2088 for (auto Succ : successors(BB)) { 2089 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1)) 2090 return false; 2091 } 2092 2093 // If none of the successors leads back in a loop, we're on an exit/abort. 2094 return true; 2095 } 2096 2097 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) { 2098 // Look for a free that isn't sufficiently obviously followed by 2099 // either a suspend or a termination, i.e. something that will leave 2100 // the coro resumption frame. 2101 for (auto U : AI->users()) { 2102 auto FI = dyn_cast<CoroAllocaFreeInst>(U); 2103 if (!FI) continue; 2104 2105 if (!willLeaveFunctionImmediatelyAfter(FI->getParent())) 2106 return true; 2107 } 2108 2109 // If we never found one, we don't need a stack save. 2110 return false; 2111 } 2112 2113 /// Turn each of the given local allocas into a normal (dynamic) alloca 2114 /// instruction. 2115 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas, 2116 SmallVectorImpl<Instruction*> &DeadInsts) { 2117 for (auto AI : LocalAllocas) { 2118 auto M = AI->getModule(); 2119 IRBuilder<> Builder(AI); 2120 2121 // Save the stack depth. Try to avoid doing this if the stackrestore 2122 // is going to immediately precede a return or something. 2123 Value *StackSave = nullptr; 2124 if (localAllocaNeedsStackSave(AI)) 2125 StackSave = Builder.CreateCall( 2126 Intrinsic::getDeclaration(M, Intrinsic::stacksave)); 2127 2128 // Allocate memory. 2129 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize()); 2130 Alloca->setAlignment(Align(AI->getAlignment())); 2131 2132 for (auto U : AI->users()) { 2133 // Replace gets with the allocation. 2134 if (isa<CoroAllocaGetInst>(U)) { 2135 U->replaceAllUsesWith(Alloca); 2136 2137 // Replace frees with stackrestores. This is safe because 2138 // alloca.alloc is required to obey a stack discipline, although we 2139 // don't enforce that structurally. 2140 } else { 2141 auto FI = cast<CoroAllocaFreeInst>(U); 2142 if (StackSave) { 2143 Builder.SetInsertPoint(FI); 2144 Builder.CreateCall( 2145 Intrinsic::getDeclaration(M, Intrinsic::stackrestore), 2146 StackSave); 2147 } 2148 } 2149 DeadInsts.push_back(cast<Instruction>(U)); 2150 } 2151 2152 DeadInsts.push_back(AI); 2153 } 2154 } 2155 2156 /// Turn the given coro.alloca.alloc call into a dynamic allocation. 2157 /// This happens during the all-instructions iteration, so it must not 2158 /// delete the call. 2159 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI, 2160 coro::Shape &Shape, 2161 SmallVectorImpl<Instruction*> &DeadInsts) { 2162 IRBuilder<> Builder(AI); 2163 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr); 2164 2165 for (User *U : AI->users()) { 2166 if (isa<CoroAllocaGetInst>(U)) { 2167 U->replaceAllUsesWith(Alloc); 2168 } else { 2169 auto FI = cast<CoroAllocaFreeInst>(U); 2170 Builder.SetInsertPoint(FI); 2171 Shape.emitDealloc(Builder, Alloc, nullptr); 2172 } 2173 DeadInsts.push_back(cast<Instruction>(U)); 2174 } 2175 2176 // Push this on last so that it gets deleted after all the others. 2177 DeadInsts.push_back(AI); 2178 2179 // Return the new allocation value so that we can check for needed spills. 2180 return cast<Instruction>(Alloc); 2181 } 2182 2183 /// Get the current swifterror value. 2184 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, 2185 coro::Shape &Shape) { 2186 // Make a fake function pointer as a sort of intrinsic. 2187 auto FnTy = FunctionType::get(ValueTy, {}, false); 2188 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 2189 2190 auto Call = Builder.CreateCall(FnTy, Fn, {}); 2191 Shape.SwiftErrorOps.push_back(Call); 2192 2193 return Call; 2194 } 2195 2196 /// Set the given value as the current swifterror value. 2197 /// 2198 /// Returns a slot that can be used as a swifterror slot. 2199 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, 2200 coro::Shape &Shape) { 2201 // Make a fake function pointer as a sort of intrinsic. 2202 auto FnTy = FunctionType::get(V->getType()->getPointerTo(), 2203 {V->getType()}, false); 2204 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 2205 2206 auto Call = Builder.CreateCall(FnTy, Fn, { V }); 2207 Shape.SwiftErrorOps.push_back(Call); 2208 2209 return Call; 2210 } 2211 2212 /// Set the swifterror value from the given alloca before a call, 2213 /// then put in back in the alloca afterwards. 2214 /// 2215 /// Returns an address that will stand in for the swifterror slot 2216 /// until splitting. 2217 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call, 2218 AllocaInst *Alloca, 2219 coro::Shape &Shape) { 2220 auto ValueTy = Alloca->getAllocatedType(); 2221 IRBuilder<> Builder(Call); 2222 2223 // Load the current value from the alloca and set it as the 2224 // swifterror value. 2225 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca); 2226 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape); 2227 2228 // Move to after the call. Since swifterror only has a guaranteed 2229 // value on normal exits, we can ignore implicit and explicit unwind 2230 // edges. 2231 if (isa<CallInst>(Call)) { 2232 Builder.SetInsertPoint(Call->getNextNode()); 2233 } else { 2234 auto Invoke = cast<InvokeInst>(Call); 2235 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg()); 2236 } 2237 2238 // Get the current swifterror value and store it to the alloca. 2239 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape); 2240 Builder.CreateStore(ValueAfterCall, Alloca); 2241 2242 return Addr; 2243 } 2244 2245 /// Eliminate a formerly-swifterror alloca by inserting the get/set 2246 /// intrinsics and attempting to MemToReg the alloca away. 2247 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, 2248 coro::Shape &Shape) { 2249 for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) { 2250 // swifterror values can only be used in very specific ways. 2251 // We take advantage of that here. 2252 auto User = Use.getUser(); 2253 if (isa<LoadInst>(User) || isa<StoreInst>(User)) 2254 continue; 2255 2256 assert(isa<CallInst>(User) || isa<InvokeInst>(User)); 2257 auto Call = cast<Instruction>(User); 2258 2259 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape); 2260 2261 // Use the returned slot address as the call argument. 2262 Use.set(Addr); 2263 } 2264 2265 // All the uses should be loads and stores now. 2266 assert(isAllocaPromotable(Alloca)); 2267 } 2268 2269 /// "Eliminate" a swifterror argument by reducing it to the alloca case 2270 /// and then loading and storing in the prologue and epilog. 2271 /// 2272 /// The argument keeps the swifterror flag. 2273 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, 2274 coro::Shape &Shape, 2275 SmallVectorImpl<AllocaInst*> &AllocasToPromote) { 2276 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg()); 2277 2278 auto ArgTy = cast<PointerType>(Arg.getType()); 2279 // swifterror arguments are required to have pointer-to-pointer type, 2280 // so create a pointer-typed alloca with opaque pointers. 2281 auto ValueTy = ArgTy->isOpaque() ? PointerType::getUnqual(F.getContext()) 2282 : ArgTy->getNonOpaquePointerElementType(); 2283 2284 // Reduce to the alloca case: 2285 2286 // Create an alloca and replace all uses of the arg with it. 2287 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace()); 2288 Arg.replaceAllUsesWith(Alloca); 2289 2290 // Set an initial value in the alloca. swifterror is always null on entry. 2291 auto InitialValue = Constant::getNullValue(ValueTy); 2292 Builder.CreateStore(InitialValue, Alloca); 2293 2294 // Find all the suspends in the function and save and restore around them. 2295 for (auto Suspend : Shape.CoroSuspends) { 2296 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape); 2297 } 2298 2299 // Find all the coro.ends in the function and restore the error value. 2300 for (auto End : Shape.CoroEnds) { 2301 Builder.SetInsertPoint(End); 2302 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca); 2303 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape); 2304 } 2305 2306 // Now we can use the alloca logic. 2307 AllocasToPromote.push_back(Alloca); 2308 eliminateSwiftErrorAlloca(F, Alloca, Shape); 2309 } 2310 2311 /// Eliminate all problematic uses of swifterror arguments and allocas 2312 /// from the function. We'll fix them up later when splitting the function. 2313 static void eliminateSwiftError(Function &F, coro::Shape &Shape) { 2314 SmallVector<AllocaInst*, 4> AllocasToPromote; 2315 2316 // Look for a swifterror argument. 2317 for (auto &Arg : F.args()) { 2318 if (!Arg.hasSwiftErrorAttr()) continue; 2319 2320 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote); 2321 break; 2322 } 2323 2324 // Look for swifterror allocas. 2325 for (auto &Inst : F.getEntryBlock()) { 2326 auto Alloca = dyn_cast<AllocaInst>(&Inst); 2327 if (!Alloca || !Alloca->isSwiftError()) continue; 2328 2329 // Clear the swifterror flag. 2330 Alloca->setSwiftError(false); 2331 2332 AllocasToPromote.push_back(Alloca); 2333 eliminateSwiftErrorAlloca(F, Alloca, Shape); 2334 } 2335 2336 // If we have any allocas to promote, compute a dominator tree and 2337 // promote them en masse. 2338 if (!AllocasToPromote.empty()) { 2339 DominatorTree DT(F); 2340 PromoteMemToReg(AllocasToPromote, DT); 2341 } 2342 } 2343 2344 /// retcon and retcon.once conventions assume that all spill uses can be sunk 2345 /// after the coro.begin intrinsic. 2346 static void sinkSpillUsesAfterCoroBegin(Function &F, 2347 const FrameDataInfo &FrameData, 2348 CoroBeginInst *CoroBegin) { 2349 DominatorTree Dom(F); 2350 2351 SmallSetVector<Instruction *, 32> ToMove; 2352 SmallVector<Instruction *, 32> Worklist; 2353 2354 // Collect all users that precede coro.begin. 2355 for (auto *Def : FrameData.getAllDefs()) { 2356 for (User *U : Def->users()) { 2357 auto Inst = cast<Instruction>(U); 2358 if (Inst->getParent() != CoroBegin->getParent() || 2359 Dom.dominates(CoroBegin, Inst)) 2360 continue; 2361 if (ToMove.insert(Inst)) 2362 Worklist.push_back(Inst); 2363 } 2364 } 2365 // Recursively collect users before coro.begin. 2366 while (!Worklist.empty()) { 2367 auto *Def = Worklist.pop_back_val(); 2368 for (User *U : Def->users()) { 2369 auto Inst = cast<Instruction>(U); 2370 if (Dom.dominates(CoroBegin, Inst)) 2371 continue; 2372 if (ToMove.insert(Inst)) 2373 Worklist.push_back(Inst); 2374 } 2375 } 2376 2377 // Sort by dominance. 2378 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end()); 2379 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool { 2380 // If a dominates b it should preceed (<) b. 2381 return Dom.dominates(A, B); 2382 }); 2383 2384 Instruction *InsertPt = CoroBegin->getNextNode(); 2385 for (Instruction *Inst : InsertionList) 2386 Inst->moveBefore(InsertPt); 2387 } 2388 2389 /// For each local variable that all of its user are only used inside one of 2390 /// suspended region, we sink their lifetime.start markers to the place where 2391 /// after the suspend block. Doing so minimizes the lifetime of each variable, 2392 /// hence minimizing the amount of data we end up putting on the frame. 2393 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, 2394 SuspendCrossingInfo &Checker) { 2395 DominatorTree DT(F); 2396 2397 // Collect all possible basic blocks which may dominate all uses of allocas. 2398 SmallPtrSet<BasicBlock *, 4> DomSet; 2399 DomSet.insert(&F.getEntryBlock()); 2400 for (auto *CSI : Shape.CoroSuspends) { 2401 BasicBlock *SuspendBlock = CSI->getParent(); 2402 assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() && 2403 "should have split coro.suspend into its own block"); 2404 DomSet.insert(SuspendBlock->getSingleSuccessor()); 2405 } 2406 2407 for (Instruction &I : instructions(F)) { 2408 AllocaInst* AI = dyn_cast<AllocaInst>(&I); 2409 if (!AI) 2410 continue; 2411 2412 for (BasicBlock *DomBB : DomSet) { 2413 bool Valid = true; 2414 SmallVector<Instruction *, 1> Lifetimes; 2415 2416 auto isLifetimeStart = [](Instruction* I) { 2417 if (auto* II = dyn_cast<IntrinsicInst>(I)) 2418 return II->getIntrinsicID() == Intrinsic::lifetime_start; 2419 return false; 2420 }; 2421 2422 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) { 2423 if (isLifetimeStart(U)) { 2424 Lifetimes.push_back(U); 2425 return true; 2426 } 2427 if (!U->hasOneUse() || U->stripPointerCasts() != AI) 2428 return false; 2429 if (isLifetimeStart(U->user_back())) { 2430 Lifetimes.push_back(U->user_back()); 2431 return true; 2432 } 2433 return false; 2434 }; 2435 2436 for (User *U : AI->users()) { 2437 Instruction *UI = cast<Instruction>(U); 2438 // For all users except lifetime.start markers, if they are all 2439 // dominated by one of the basic blocks and do not cross 2440 // suspend points as well, then there is no need to spill the 2441 // instruction. 2442 if (!DT.dominates(DomBB, UI->getParent()) || 2443 Checker.isDefinitionAcrossSuspend(DomBB, UI)) { 2444 // Skip lifetime.start, GEP and bitcast used by lifetime.start 2445 // markers. 2446 if (collectLifetimeStart(UI, AI)) 2447 continue; 2448 Valid = false; 2449 break; 2450 } 2451 } 2452 // Sink lifetime.start markers to dominate block when they are 2453 // only used outside the region. 2454 if (Valid && Lifetimes.size() != 0) { 2455 // May be AI itself, when the type of AI is i8* 2456 auto *NewBitCast = [&](AllocaInst *AI) -> Value* { 2457 if (isa<AllocaInst>(Lifetimes[0]->getOperand(1))) 2458 return AI; 2459 auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext()); 2460 return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "", 2461 DomBB->getTerminator()); 2462 }(AI); 2463 2464 auto *NewLifetime = Lifetimes[0]->clone(); 2465 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast); 2466 NewLifetime->insertBefore(DomBB->getTerminator()); 2467 2468 // All the outsided lifetime.start markers are no longer necessary. 2469 for (Instruction *S : Lifetimes) 2470 S->eraseFromParent(); 2471 2472 break; 2473 } 2474 } 2475 } 2476 } 2477 2478 static void collectFrameAllocas(Function &F, coro::Shape &Shape, 2479 const SuspendCrossingInfo &Checker, 2480 SmallVectorImpl<AllocaInfo> &Allocas) { 2481 for (Instruction &I : instructions(F)) { 2482 auto *AI = dyn_cast<AllocaInst>(&I); 2483 if (!AI) 2484 continue; 2485 // The PromiseAlloca will be specially handled since it needs to be in a 2486 // fixed position in the frame. 2487 if (AI == Shape.SwitchLowering.PromiseAlloca) { 2488 continue; 2489 } 2490 DominatorTree DT(F); 2491 // The code that uses lifetime.start intrinsic does not work for functions 2492 // with loops without exit. Disable it on ABIs we know to generate such 2493 // code. 2494 bool ShouldUseLifetimeStartInfo = 2495 (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon && 2496 Shape.ABI != coro::ABI::RetconOnce); 2497 AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT, 2498 *Shape.CoroBegin, Checker, 2499 ShouldUseLifetimeStartInfo}; 2500 Visitor.visitPtr(*AI); 2501 if (!Visitor.getShouldLiveOnFrame()) 2502 continue; 2503 Allocas.emplace_back(AI, Visitor.getAliasesCopy(), 2504 Visitor.getMayWriteBeforeCoroBegin()); 2505 } 2506 } 2507 2508 void coro::salvageDebugInfo( 2509 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache, 2510 DbgVariableIntrinsic *DVI, bool OptimizeFrame) { 2511 Function *F = DVI->getFunction(); 2512 IRBuilder<> Builder(F->getContext()); 2513 auto InsertPt = F->getEntryBlock().getFirstInsertionPt(); 2514 while (isa<IntrinsicInst>(InsertPt)) 2515 ++InsertPt; 2516 Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt); 2517 DIExpression *Expr = DVI->getExpression(); 2518 // Follow the pointer arithmetic all the way to the incoming 2519 // function argument and convert into a DIExpression. 2520 bool SkipOutermostLoad = !isa<DbgValueInst>(DVI); 2521 Value *Storage = DVI->getVariableLocationOp(0); 2522 Value *OriginalStorage = Storage; 2523 while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) { 2524 if (auto *LdInst = dyn_cast<LoadInst>(Inst)) { 2525 Storage = LdInst->getOperand(0); 2526 // FIXME: This is a heuristic that works around the fact that 2527 // LLVM IR debug intrinsics cannot yet distinguish between 2528 // memory and value locations: Because a dbg.declare(alloca) is 2529 // implicitly a memory location no DW_OP_deref operation for the 2530 // last direct load from an alloca is necessary. This condition 2531 // effectively drops the *last* DW_OP_deref in the expression. 2532 if (!SkipOutermostLoad) 2533 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore); 2534 } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) { 2535 Storage = StInst->getOperand(0); 2536 } else { 2537 SmallVector<uint64_t, 16> Ops; 2538 SmallVector<Value *, 0> AdditionalValues; 2539 Value *Op = llvm::salvageDebugInfoImpl( 2540 *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops, 2541 AdditionalValues); 2542 if (!Op || !AdditionalValues.empty()) { 2543 // If salvaging failed or salvaging produced more than one location 2544 // operand, give up. 2545 break; 2546 } 2547 Storage = Op; 2548 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false); 2549 } 2550 SkipOutermostLoad = false; 2551 } 2552 if (!Storage) 2553 return; 2554 2555 // Store a pointer to the coroutine frame object in an alloca so it 2556 // is available throughout the function when producing unoptimized 2557 // code. Extending the lifetime this way is correct because the 2558 // variable has been declared by a dbg.declare intrinsic. 2559 // 2560 // Avoid to create the alloca would be eliminated by optimization 2561 // passes and the corresponding dbg.declares would be invalid. 2562 if (!OptimizeFrame) 2563 if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) { 2564 auto &Cached = DbgPtrAllocaCache[Storage]; 2565 if (!Cached) { 2566 Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr, 2567 Arg->getName() + ".debug"); 2568 Builder.CreateStore(Storage, Cached); 2569 } 2570 Storage = Cached; 2571 // FIXME: LLVM lacks nuanced semantics to differentiate between 2572 // memory and direct locations at the IR level. The backend will 2573 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory 2574 // location. Thus, if there are deref and offset operations in the 2575 // expression, we need to add a DW_OP_deref at the *start* of the 2576 // expression to first load the contents of the alloca before 2577 // adjusting it with the expression. 2578 if (Expr && Expr->isComplex()) 2579 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore); 2580 } 2581 2582 DVI->replaceVariableLocationOp(OriginalStorage, Storage); 2583 DVI->setExpression(Expr); 2584 // We only hoist dbg.declare today since it doesn't make sense to hoist 2585 // dbg.value or dbg.addr since they do not have the same function wide 2586 // guarantees that dbg.declare does. 2587 if (!isa<DbgValueInst>(DVI) && !isa<DbgAddrIntrinsic>(DVI)) { 2588 if (auto *II = dyn_cast<InvokeInst>(Storage)) 2589 DVI->moveBefore(II->getNormalDest()->getFirstNonPHI()); 2590 else if (auto *CBI = dyn_cast<CallBrInst>(Storage)) 2591 DVI->moveBefore(CBI->getDefaultDest()->getFirstNonPHI()); 2592 else if (auto *InsertPt = dyn_cast<Instruction>(Storage)) { 2593 assert(!InsertPt->isTerminator() && 2594 "Unimaged terminator that could return a storage."); 2595 DVI->moveAfter(InsertPt); 2596 } else if (isa<Argument>(Storage)) 2597 DVI->moveAfter(F->getEntryBlock().getFirstNonPHI()); 2598 } 2599 } 2600 2601 void coro::buildCoroutineFrame(Function &F, Shape &Shape) { 2602 // Don't eliminate swifterror in async functions that won't be split. 2603 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty()) 2604 eliminateSwiftError(F, Shape); 2605 2606 if (Shape.ABI == coro::ABI::Switch && 2607 Shape.SwitchLowering.PromiseAlloca) { 2608 Shape.getSwitchCoroId()->clearPromise(); 2609 } 2610 2611 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end 2612 // intrinsics are in their own blocks to simplify the logic of building up 2613 // SuspendCrossing data. 2614 for (auto *CSI : Shape.CoroSuspends) { 2615 if (auto *Save = CSI->getCoroSave()) 2616 splitAround(Save, "CoroSave"); 2617 splitAround(CSI, "CoroSuspend"); 2618 } 2619 2620 // Put CoroEnds into their own blocks. 2621 for (AnyCoroEndInst *CE : Shape.CoroEnds) { 2622 splitAround(CE, "CoroEnd"); 2623 2624 // Emit the musttail call function in a new block before the CoroEnd. 2625 // We do this here so that the right suspend crossing info is computed for 2626 // the uses of the musttail call function call. (Arguments to the coro.end 2627 // instructions would be ignored) 2628 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) { 2629 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction(); 2630 if (!MustTailCallFn) 2631 continue; 2632 IRBuilder<> Builder(AsyncEnd); 2633 SmallVector<Value *, 8> Args(AsyncEnd->args()); 2634 auto Arguments = ArrayRef<Value *>(Args).drop_front(3); 2635 auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn, 2636 Arguments, Builder); 2637 splitAround(Call, "MustTailCall.Before.CoroEnd"); 2638 } 2639 } 2640 2641 // Later code makes structural assumptions about single predecessors phis e.g 2642 // that they are not live accross a suspend point. 2643 cleanupSinglePredPHIs(F); 2644 2645 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will 2646 // never has its definition separated from the PHI by the suspend point. 2647 rewritePHIs(F); 2648 2649 // Build suspend crossing info. 2650 SuspendCrossingInfo Checker(F, Shape); 2651 2652 IRBuilder<> Builder(F.getContext()); 2653 FrameDataInfo FrameData; 2654 SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas; 2655 SmallVector<Instruction*, 4> DeadInstructions; 2656 2657 { 2658 SpillInfo Spills; 2659 for (int Repeat = 0; Repeat < 4; ++Repeat) { 2660 // See if there are materializable instructions across suspend points. 2661 for (Instruction &I : instructions(F)) 2662 if (materializable(I)) { 2663 for (User *U : I.users()) 2664 if (Checker.isDefinitionAcrossSuspend(I, U)) 2665 Spills[&I].push_back(cast<Instruction>(U)); 2666 } 2667 2668 if (Spills.empty()) 2669 break; 2670 2671 // Rewrite materializable instructions to be materialized at the use 2672 // point. 2673 LLVM_DEBUG(dumpSpills("Materializations", Spills)); 2674 rewriteMaterializableInstructions(Builder, Spills); 2675 Spills.clear(); 2676 } 2677 } 2678 2679 if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon && 2680 Shape.ABI != coro::ABI::RetconOnce) 2681 sinkLifetimeStartMarkers(F, Shape, Checker); 2682 2683 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty()) 2684 collectFrameAllocas(F, Shape, Checker, FrameData.Allocas); 2685 LLVM_DEBUG(dumpAllocas(FrameData.Allocas)); 2686 2687 // Collect the spills for arguments and other not-materializable values. 2688 for (Argument &A : F.args()) 2689 for (User *U : A.users()) 2690 if (Checker.isDefinitionAcrossSuspend(A, U)) 2691 FrameData.Spills[&A].push_back(cast<Instruction>(U)); 2692 2693 for (Instruction &I : instructions(F)) { 2694 // Values returned from coroutine structure intrinsics should not be part 2695 // of the Coroutine Frame. 2696 if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin) 2697 continue; 2698 2699 // The Coroutine Promise always included into coroutine frame, no need to 2700 // check for suspend crossing. 2701 if (Shape.ABI == coro::ABI::Switch && 2702 Shape.SwitchLowering.PromiseAlloca == &I) 2703 continue; 2704 2705 // Handle alloca.alloc specially here. 2706 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) { 2707 // Check whether the alloca's lifetime is bounded by suspend points. 2708 if (isLocalAlloca(AI)) { 2709 LocalAllocas.push_back(AI); 2710 continue; 2711 } 2712 2713 // If not, do a quick rewrite of the alloca and then add spills of 2714 // the rewritten value. The rewrite doesn't invalidate anything in 2715 // Spills because the other alloca intrinsics have no other operands 2716 // besides AI, and it doesn't invalidate the iteration because we delay 2717 // erasing AI. 2718 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions); 2719 2720 for (User *U : Alloc->users()) { 2721 if (Checker.isDefinitionAcrossSuspend(*Alloc, U)) 2722 FrameData.Spills[Alloc].push_back(cast<Instruction>(U)); 2723 } 2724 continue; 2725 } 2726 2727 // Ignore alloca.get; we process this as part of coro.alloca.alloc. 2728 if (isa<CoroAllocaGetInst>(I)) 2729 continue; 2730 2731 if (isa<AllocaInst>(I)) 2732 continue; 2733 2734 for (User *U : I.users()) 2735 if (Checker.isDefinitionAcrossSuspend(I, U)) { 2736 // We cannot spill a token. 2737 if (I.getType()->isTokenTy()) 2738 report_fatal_error( 2739 "token definition is separated from the use by a suspend point"); 2740 FrameData.Spills[&I].push_back(cast<Instruction>(U)); 2741 } 2742 } 2743 2744 // We don't want the layout of coroutine frame to be affected 2745 // by debug information. So we only choose to salvage DbgValueInst for 2746 // whose value is already in the frame. 2747 // We would handle the dbg.values for allocas specially 2748 for (auto &Iter : FrameData.Spills) { 2749 auto *V = Iter.first; 2750 SmallVector<DbgValueInst *, 16> DVIs; 2751 findDbgValues(DVIs, V); 2752 llvm::for_each(DVIs, [&](DbgValueInst *DVI) { 2753 if (Checker.isDefinitionAcrossSuspend(*V, DVI)) 2754 FrameData.Spills[V].push_back(DVI); 2755 }); 2756 } 2757 2758 LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills)); 2759 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 2760 Shape.ABI == coro::ABI::Async) 2761 sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin); 2762 Shape.FrameTy = buildFrameType(F, Shape, FrameData); 2763 createFramePtr(Shape); 2764 // For now, this works for C++ programs only. 2765 buildFrameDebugInfo(F, Shape, FrameData); 2766 insertSpills(FrameData, Shape); 2767 lowerLocalAllocas(LocalAllocas, DeadInstructions); 2768 2769 for (auto I : DeadInstructions) 2770 I->eraseFromParent(); 2771 } 2772