1 //===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This file contains classes used to discover if for a particular value 9 // there from sue to definition that crosses a suspend block. 10 // 11 // Using the information discovered we form a Coroutine Frame structure to 12 // contain those values. All uses of those values are replaced with appropriate 13 // GEP + load from the coroutine frame. At the point of the definition we spill 14 // the value into the coroutine frame. 15 //===----------------------------------------------------------------------===// 16 17 #include "CoroInternal.h" 18 #include "llvm/ADT/BitVector.h" 19 #include "llvm/ADT/ScopeExit.h" 20 #include "llvm/ADT/SmallString.h" 21 #include "llvm/Analysis/PtrUseVisitor.h" 22 #include "llvm/Analysis/StackLifetime.h" 23 #include "llvm/Config/llvm-config.h" 24 #include "llvm/IR/CFG.h" 25 #include "llvm/IR/DIBuilder.h" 26 #include "llvm/IR/DebugInfo.h" 27 #include "llvm/IR/Dominators.h" 28 #include "llvm/IR/IRBuilder.h" 29 #include "llvm/IR/InstIterator.h" 30 #include "llvm/IR/IntrinsicInst.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/MathExtras.h" 33 #include "llvm/Support/OptimizedStructLayout.h" 34 #include "llvm/Support/circular_raw_ostream.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 37 #include "llvm/Transforms/Utils/Local.h" 38 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 39 #include <algorithm> 40 41 using namespace llvm; 42 43 // The "coro-suspend-crossing" flag is very noisy. There is another debug type, 44 // "coro-frame", which results in leaner debug spew. 45 #define DEBUG_TYPE "coro-suspend-crossing" 46 47 enum { SmallVectorThreshold = 32 }; 48 49 // Provides two way mapping between the blocks and numbers. 50 namespace { 51 class BlockToIndexMapping { 52 SmallVector<BasicBlock *, SmallVectorThreshold> V; 53 54 public: 55 size_t size() const { return V.size(); } 56 57 BlockToIndexMapping(Function &F) { 58 for (BasicBlock &BB : F) 59 V.push_back(&BB); 60 llvm::sort(V); 61 } 62 63 size_t blockToIndex(BasicBlock *BB) const { 64 auto *I = llvm::lower_bound(V, BB); 65 assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block"); 66 return I - V.begin(); 67 } 68 69 BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; } 70 }; 71 } // end anonymous namespace 72 73 // The SuspendCrossingInfo maintains data that allows to answer a question 74 // whether given two BasicBlocks A and B there is a path from A to B that 75 // passes through a suspend point. 76 // 77 // For every basic block 'i' it maintains a BlockData that consists of: 78 // Consumes: a bit vector which contains a set of indices of blocks that can 79 // reach block 'i' 80 // Kills: a bit vector which contains a set of indices of blocks that can 81 // reach block 'i', but one of the path will cross a suspend point 82 // Suspend: a boolean indicating whether block 'i' contains a suspend point. 83 // End: a boolean indicating whether block 'i' contains a coro.end intrinsic. 84 // 85 namespace { 86 struct SuspendCrossingInfo { 87 BlockToIndexMapping Mapping; 88 89 struct BlockData { 90 BitVector Consumes; 91 BitVector Kills; 92 bool Suspend = false; 93 bool End = false; 94 }; 95 SmallVector<BlockData, SmallVectorThreshold> Block; 96 97 iterator_range<succ_iterator> successors(BlockData const &BD) const { 98 BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]); 99 return llvm::successors(BB); 100 } 101 102 BlockData &getBlockData(BasicBlock *BB) { 103 return Block[Mapping.blockToIndex(BB)]; 104 } 105 106 void dump() const; 107 void dump(StringRef Label, BitVector const &BV) const; 108 109 SuspendCrossingInfo(Function &F, coro::Shape &Shape); 110 111 bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const { 112 size_t const DefIndex = Mapping.blockToIndex(DefBB); 113 size_t const UseIndex = Mapping.blockToIndex(UseBB); 114 115 bool const Result = Block[UseIndex].Kills[DefIndex]; 116 LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName() 117 << " answer is " << Result << "\n"); 118 return Result; 119 } 120 121 bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const { 122 auto *I = cast<Instruction>(U); 123 124 // We rewrote PHINodes, so that only the ones with exactly one incoming 125 // value need to be analyzed. 126 if (auto *PN = dyn_cast<PHINode>(I)) 127 if (PN->getNumIncomingValues() > 1) 128 return false; 129 130 BasicBlock *UseBB = I->getParent(); 131 132 // As a special case, treat uses by an llvm.coro.suspend.retcon or an 133 // llvm.coro.suspend.async as if they were uses in the suspend's single 134 // predecessor: the uses conceptually occur before the suspend. 135 if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) { 136 UseBB = UseBB->getSinglePredecessor(); 137 assert(UseBB && "should have split coro.suspend into its own block"); 138 } 139 140 return hasPathCrossingSuspendPoint(DefBB, UseBB); 141 } 142 143 bool isDefinitionAcrossSuspend(Argument &A, User *U) const { 144 return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U); 145 } 146 147 bool isDefinitionAcrossSuspend(Instruction &I, User *U) const { 148 auto *DefBB = I.getParent(); 149 150 // As a special case, treat values produced by an llvm.coro.suspend.* 151 // as if they were defined in the single successor: the uses 152 // conceptually occur after the suspend. 153 if (isa<AnyCoroSuspendInst>(I)) { 154 DefBB = DefBB->getSingleSuccessor(); 155 assert(DefBB && "should have split coro.suspend into its own block"); 156 } 157 158 return isDefinitionAcrossSuspend(DefBB, U); 159 } 160 161 bool isDefinitionAcrossSuspend(Value &V, User *U) const { 162 if (auto *Arg = dyn_cast<Argument>(&V)) 163 return isDefinitionAcrossSuspend(*Arg, U); 164 if (auto *Inst = dyn_cast<Instruction>(&V)) 165 return isDefinitionAcrossSuspend(*Inst, U); 166 167 llvm_unreachable( 168 "Coroutine could only collect Argument and Instruction now."); 169 } 170 }; 171 } // end anonymous namespace 172 173 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 174 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label, 175 BitVector const &BV) const { 176 dbgs() << Label << ":"; 177 for (size_t I = 0, N = BV.size(); I < N; ++I) 178 if (BV[I]) 179 dbgs() << " " << Mapping.indexToBlock(I)->getName(); 180 dbgs() << "\n"; 181 } 182 183 LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const { 184 for (size_t I = 0, N = Block.size(); I < N; ++I) { 185 BasicBlock *const B = Mapping.indexToBlock(I); 186 dbgs() << B->getName() << ":\n"; 187 dump(" Consumes", Block[I].Consumes); 188 dump(" Kills", Block[I].Kills); 189 } 190 dbgs() << "\n"; 191 } 192 #endif 193 194 SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape) 195 : Mapping(F) { 196 const size_t N = Mapping.size(); 197 Block.resize(N); 198 199 // Initialize every block so that it consumes itself 200 for (size_t I = 0; I < N; ++I) { 201 auto &B = Block[I]; 202 B.Consumes.resize(N); 203 B.Kills.resize(N); 204 B.Consumes.set(I); 205 } 206 207 // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as 208 // the code beyond coro.end is reachable during initial invocation of the 209 // coroutine. 210 for (auto *CE : Shape.CoroEnds) 211 getBlockData(CE->getParent()).End = true; 212 213 // Mark all suspend blocks and indicate that they kill everything they 214 // consume. Note, that crossing coro.save also requires a spill, as any code 215 // between coro.save and coro.suspend may resume the coroutine and all of the 216 // state needs to be saved by that time. 217 auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) { 218 BasicBlock *SuspendBlock = BarrierInst->getParent(); 219 auto &B = getBlockData(SuspendBlock); 220 B.Suspend = true; 221 B.Kills |= B.Consumes; 222 }; 223 for (auto *CSI : Shape.CoroSuspends) { 224 markSuspendBlock(CSI); 225 if (auto *Save = CSI->getCoroSave()) 226 markSuspendBlock(Save); 227 } 228 229 // Iterate propagating consumes and kills until they stop changing. 230 int Iteration = 0; 231 (void)Iteration; 232 233 bool Changed; 234 do { 235 LLVM_DEBUG(dbgs() << "iteration " << ++Iteration); 236 LLVM_DEBUG(dbgs() << "==============\n"); 237 238 Changed = false; 239 for (size_t I = 0; I < N; ++I) { 240 auto &B = Block[I]; 241 for (BasicBlock *SI : successors(B)) { 242 243 auto SuccNo = Mapping.blockToIndex(SI); 244 245 // Saved Consumes and Kills bitsets so that it is easy to see 246 // if anything changed after propagation. 247 auto &S = Block[SuccNo]; 248 auto SavedConsumes = S.Consumes; 249 auto SavedKills = S.Kills; 250 251 // Propagate Kills and Consumes from block B into its successor S. 252 S.Consumes |= B.Consumes; 253 S.Kills |= B.Kills; 254 255 // If block B is a suspend block, it should propagate kills into the 256 // its successor for every block B consumes. 257 if (B.Suspend) { 258 S.Kills |= B.Consumes; 259 } 260 if (S.Suspend) { 261 // If block S is a suspend block, it should kill all of the blocks it 262 // consumes. 263 S.Kills |= S.Consumes; 264 } else if (S.End) { 265 // If block S is an end block, it should not propagate kills as the 266 // blocks following coro.end() are reached during initial invocation 267 // of the coroutine while all the data are still available on the 268 // stack or in the registers. 269 S.Kills.reset(); 270 } else { 271 // This is reached when S block it not Suspend nor coro.end and it 272 // need to make sure that it is not in the kill set. 273 S.Kills.reset(SuccNo); 274 } 275 276 // See if anything changed. 277 Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes); 278 279 if (S.Kills != SavedKills) { 280 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName() 281 << "\n"); 282 LLVM_DEBUG(dump("S.Kills", S.Kills)); 283 LLVM_DEBUG(dump("SavedKills", SavedKills)); 284 } 285 if (S.Consumes != SavedConsumes) { 286 LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n"); 287 LLVM_DEBUG(dump("S.Consume", S.Consumes)); 288 LLVM_DEBUG(dump("SavedCons", SavedConsumes)); 289 } 290 } 291 } 292 } while (Changed); 293 LLVM_DEBUG(dump()); 294 } 295 296 #undef DEBUG_TYPE // "coro-suspend-crossing" 297 #define DEBUG_TYPE "coro-frame" 298 299 namespace { 300 class FrameTypeBuilder; 301 // Mapping from the to-be-spilled value to all the users that need reload. 302 using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>; 303 struct AllocaInfo { 304 AllocaInst *Alloca; 305 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases; 306 bool MayWriteBeforeCoroBegin; 307 AllocaInfo(AllocaInst *Alloca, 308 DenseMap<Instruction *, llvm::Optional<APInt>> Aliases, 309 bool MayWriteBeforeCoroBegin) 310 : Alloca(Alloca), Aliases(std::move(Aliases)), 311 MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {} 312 }; 313 struct FrameDataInfo { 314 // All the values (that are not allocas) that needs to be spilled to the 315 // frame. 316 SpillInfo Spills; 317 // Allocas contains all values defined as allocas that need to live in the 318 // frame. 319 SmallVector<AllocaInfo, 8> Allocas; 320 321 SmallVector<Value *, 8> getAllDefs() const { 322 SmallVector<Value *, 8> Defs; 323 for (const auto &P : Spills) 324 Defs.push_back(P.first); 325 for (const auto &A : Allocas) 326 Defs.push_back(A.Alloca); 327 return Defs; 328 } 329 330 uint32_t getFieldIndex(Value *V) const { 331 auto Itr = FieldIndexMap.find(V); 332 assert(Itr != FieldIndexMap.end() && 333 "Value does not have a frame field index"); 334 return Itr->second; 335 } 336 337 void setFieldIndex(Value *V, uint32_t Index) { 338 assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) && 339 "Cannot set the index for the same field twice."); 340 FieldIndexMap[V] = Index; 341 } 342 343 Align getAlign(Value *V) const { 344 auto Iter = FieldAlignMap.find(V); 345 assert(Iter != FieldAlignMap.end()); 346 return Iter->second; 347 } 348 349 void setAlign(Value *V, Align AL) { 350 assert(FieldAlignMap.count(V) == 0); 351 FieldAlignMap.insert({V, AL}); 352 } 353 354 uint64_t getDynamicAlign(Value *V) const { 355 auto Iter = FieldDynamicAlignMap.find(V); 356 assert(Iter != FieldDynamicAlignMap.end()); 357 return Iter->second; 358 } 359 360 void setDynamicAlign(Value *V, uint64_t Align) { 361 assert(FieldDynamicAlignMap.count(V) == 0); 362 FieldDynamicAlignMap.insert({V, Align}); 363 } 364 365 uint64_t getOffset(Value *V) const { 366 auto Iter = FieldOffsetMap.find(V); 367 assert(Iter != FieldOffsetMap.end()); 368 return Iter->second; 369 } 370 371 void setOffset(Value *V, uint64_t Offset) { 372 assert(FieldOffsetMap.count(V) == 0); 373 FieldOffsetMap.insert({V, Offset}); 374 } 375 376 // Remap the index of every field in the frame, using the final layout index. 377 void updateLayoutIndex(FrameTypeBuilder &B); 378 379 private: 380 // LayoutIndexUpdateStarted is used to avoid updating the index of any field 381 // twice by mistake. 382 bool LayoutIndexUpdateStarted = false; 383 // Map from values to their slot indexes on the frame. They will be first set 384 // with their original insertion field index. After the frame is built, their 385 // indexes will be updated into the final layout index. 386 DenseMap<Value *, uint32_t> FieldIndexMap; 387 // Map from values to their alignment on the frame. They would be set after 388 // the frame is built. 389 DenseMap<Value *, Align> FieldAlignMap; 390 DenseMap<Value *, uint64_t> FieldDynamicAlignMap; 391 // Map from values to their offset on the frame. They would be set after 392 // the frame is built. 393 DenseMap<Value *, uint64_t> FieldOffsetMap; 394 }; 395 } // namespace 396 397 #ifndef NDEBUG 398 static void dumpSpills(StringRef Title, const SpillInfo &Spills) { 399 dbgs() << "------------- " << Title << "--------------\n"; 400 for (const auto &E : Spills) { 401 E.first->dump(); 402 dbgs() << " user: "; 403 for (auto *I : E.second) 404 I->dump(); 405 } 406 } 407 408 static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) { 409 dbgs() << "------------- Allocas --------------\n"; 410 for (const auto &A : Allocas) { 411 A.Alloca->dump(); 412 } 413 } 414 #endif 415 416 namespace { 417 using FieldIDType = size_t; 418 // We cannot rely solely on natural alignment of a type when building a 419 // coroutine frame and if the alignment specified on the Alloca instruction 420 // differs from the natural alignment of the alloca type we will need to insert 421 // padding. 422 class FrameTypeBuilder { 423 private: 424 struct Field { 425 uint64_t Size; 426 uint64_t Offset; 427 Type *Ty; 428 FieldIDType LayoutFieldIndex; 429 Align Alignment; 430 Align TyAlignment; 431 uint64_t DynamicAlignBuffer; 432 }; 433 434 const DataLayout &DL; 435 LLVMContext &Context; 436 uint64_t StructSize = 0; 437 Align StructAlign; 438 bool IsFinished = false; 439 440 Optional<Align> MaxFrameAlignment; 441 442 SmallVector<Field, 8> Fields; 443 DenseMap<Value*, unsigned> FieldIndexByKey; 444 445 public: 446 FrameTypeBuilder(LLVMContext &Context, const DataLayout &DL, 447 Optional<Align> MaxFrameAlignment) 448 : DL(DL), Context(Context), MaxFrameAlignment(MaxFrameAlignment) {} 449 450 /// Add a field to this structure for the storage of an `alloca` 451 /// instruction. 452 LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI, 453 bool IsHeader = false) { 454 Type *Ty = AI->getAllocatedType(); 455 456 // Make an array type if this is a static array allocation. 457 if (AI->isArrayAllocation()) { 458 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) 459 Ty = ArrayType::get(Ty, CI->getValue().getZExtValue()); 460 else 461 report_fatal_error("Coroutines cannot handle non static allocas yet"); 462 } 463 464 return addField(Ty, AI->getAlign(), IsHeader); 465 } 466 467 /// We want to put the allocas whose lifetime-ranges are not overlapped 468 /// into one slot of coroutine frame. 469 /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566 470 /// 471 /// cppcoro::task<void> alternative_paths(bool cond) { 472 /// if (cond) { 473 /// big_structure a; 474 /// process(a); 475 /// co_await something(); 476 /// } else { 477 /// big_structure b; 478 /// process2(b); 479 /// co_await something(); 480 /// } 481 /// } 482 /// 483 /// We want to put variable a and variable b in the same slot to 484 /// reduce the size of coroutine frame. 485 /// 486 /// This function use StackLifetime algorithm to partition the AllocaInsts in 487 /// Spills to non-overlapped sets in order to put Alloca in the same 488 /// non-overlapped set into the same slot in the Coroutine Frame. Then add 489 /// field for the allocas in the same non-overlapped set by using the largest 490 /// type as the field type. 491 /// 492 /// Side Effects: Because We sort the allocas, the order of allocas in the 493 /// frame may be different with the order in the source code. 494 void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData, 495 coro::Shape &Shape); 496 497 /// Add a field to this structure. 498 LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign MaybeFieldAlignment, 499 bool IsHeader = false, 500 bool IsSpillOfValue = false) { 501 assert(!IsFinished && "adding fields to a finished builder"); 502 assert(Ty && "must provide a type for a field"); 503 504 // The field size is always the alloc size of the type. 505 uint64_t FieldSize = DL.getTypeAllocSize(Ty); 506 507 // For an alloca with size=0, we don't need to add a field and they 508 // can just point to any index in the frame. Use index 0. 509 if (FieldSize == 0) { 510 return 0; 511 } 512 513 // The field alignment might not be the type alignment, but we need 514 // to remember the type alignment anyway to build the type. 515 // If we are spilling values we don't need to worry about ABI alignment 516 // concerns. 517 Align ABIAlign = DL.getABITypeAlign(Ty); 518 Align TyAlignment = ABIAlign; 519 if (IsSpillOfValue && MaxFrameAlignment && *MaxFrameAlignment < ABIAlign) 520 TyAlignment = *MaxFrameAlignment; 521 Align FieldAlignment = MaybeFieldAlignment.value_or(TyAlignment); 522 523 // The field alignment could be bigger than the max frame case, in that case 524 // we request additional storage to be able to dynamically align the 525 // pointer. 526 uint64_t DynamicAlignBuffer = 0; 527 if (MaxFrameAlignment && (FieldAlignment > *MaxFrameAlignment)) { 528 DynamicAlignBuffer = 529 offsetToAlignment(MaxFrameAlignment->value(), FieldAlignment); 530 FieldAlignment = *MaxFrameAlignment; 531 FieldSize = FieldSize + DynamicAlignBuffer; 532 } 533 534 // Lay out header fields immediately. 535 uint64_t Offset; 536 if (IsHeader) { 537 Offset = alignTo(StructSize, FieldAlignment); 538 StructSize = Offset + FieldSize; 539 540 // Everything else has a flexible offset. 541 } else { 542 Offset = OptimizedStructLayoutField::FlexibleOffset; 543 } 544 545 Fields.push_back({FieldSize, Offset, Ty, 0, FieldAlignment, TyAlignment, 546 DynamicAlignBuffer}); 547 return Fields.size() - 1; 548 } 549 550 /// Finish the layout and set the body on the given type. 551 void finish(StructType *Ty); 552 553 uint64_t getStructSize() const { 554 assert(IsFinished && "not yet finished!"); 555 return StructSize; 556 } 557 558 Align getStructAlign() const { 559 assert(IsFinished && "not yet finished!"); 560 return StructAlign; 561 } 562 563 FieldIDType getLayoutFieldIndex(FieldIDType Id) const { 564 assert(IsFinished && "not yet finished!"); 565 return Fields[Id].LayoutFieldIndex; 566 } 567 568 Field getLayoutField(FieldIDType Id) const { 569 assert(IsFinished && "not yet finished!"); 570 return Fields[Id]; 571 } 572 }; 573 } // namespace 574 575 void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) { 576 auto Updater = [&](Value *I) { 577 auto Field = B.getLayoutField(getFieldIndex(I)); 578 setFieldIndex(I, Field.LayoutFieldIndex); 579 setAlign(I, Field.Alignment); 580 uint64_t dynamicAlign = 581 Field.DynamicAlignBuffer 582 ? Field.DynamicAlignBuffer + Field.Alignment.value() 583 : 0; 584 setDynamicAlign(I, dynamicAlign); 585 setOffset(I, Field.Offset); 586 }; 587 LayoutIndexUpdateStarted = true; 588 for (auto &S : Spills) 589 Updater(S.first); 590 for (const auto &A : Allocas) 591 Updater(A.Alloca); 592 LayoutIndexUpdateStarted = false; 593 } 594 595 void FrameTypeBuilder::addFieldForAllocas(const Function &F, 596 FrameDataInfo &FrameData, 597 coro::Shape &Shape) { 598 using AllocaSetType = SmallVector<AllocaInst *, 4>; 599 SmallVector<AllocaSetType, 4> NonOverlapedAllocas; 600 601 // We need to add field for allocas at the end of this function. 602 auto AddFieldForAllocasAtExit = make_scope_exit([&]() { 603 for (auto AllocaList : NonOverlapedAllocas) { 604 auto *LargestAI = *AllocaList.begin(); 605 FieldIDType Id = addFieldForAlloca(LargestAI); 606 for (auto *Alloca : AllocaList) 607 FrameData.setFieldIndex(Alloca, Id); 608 } 609 }); 610 611 if (!Shape.OptimizeFrame) { 612 for (const auto &A : FrameData.Allocas) { 613 AllocaInst *Alloca = A.Alloca; 614 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 615 } 616 return; 617 } 618 619 // Because there are pathes from the lifetime.start to coro.end 620 // for each alloca, the liferanges for every alloca is overlaped 621 // in the blocks who contain coro.end and the successor blocks. 622 // So we choose to skip there blocks when we calculates the liferange 623 // for each alloca. It should be reasonable since there shouldn't be uses 624 // in these blocks and the coroutine frame shouldn't be used outside the 625 // coroutine body. 626 // 627 // Note that the user of coro.suspend may not be SwitchInst. However, this 628 // case seems too complex to handle. And it is harmless to skip these 629 // patterns since it just prevend putting the allocas to live in the same 630 // slot. 631 DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest; 632 for (auto CoroSuspendInst : Shape.CoroSuspends) { 633 for (auto U : CoroSuspendInst->users()) { 634 if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) { 635 auto *SWI = const_cast<SwitchInst *>(ConstSWI); 636 DefaultSuspendDest[SWI] = SWI->getDefaultDest(); 637 SWI->setDefaultDest(SWI->getSuccessor(1)); 638 } 639 } 640 } 641 642 auto ExtractAllocas = [&]() { 643 AllocaSetType Allocas; 644 Allocas.reserve(FrameData.Allocas.size()); 645 for (const auto &A : FrameData.Allocas) 646 Allocas.push_back(A.Alloca); 647 return Allocas; 648 }; 649 StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(), 650 StackLifetime::LivenessType::May); 651 StackLifetimeAnalyzer.run(); 652 auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) { 653 return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps( 654 StackLifetimeAnalyzer.getLiveRange(AI2)); 655 }; 656 auto GetAllocaSize = [&](const AllocaInfo &A) { 657 Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL); 658 assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n"); 659 assert(!RetSize->isScalable() && "Scalable vectors are not yet supported"); 660 return RetSize->getFixedSize(); 661 }; 662 // Put larger allocas in the front. So the larger allocas have higher 663 // priority to merge, which can save more space potentially. Also each 664 // AllocaSet would be ordered. So we can get the largest Alloca in one 665 // AllocaSet easily. 666 sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) { 667 return GetAllocaSize(Iter1) > GetAllocaSize(Iter2); 668 }); 669 for (const auto &A : FrameData.Allocas) { 670 AllocaInst *Alloca = A.Alloca; 671 bool Merged = false; 672 // Try to find if the Alloca is not inferenced with any existing 673 // NonOverlappedAllocaSet. If it is true, insert the alloca to that 674 // NonOverlappedAllocaSet. 675 for (auto &AllocaSet : NonOverlapedAllocas) { 676 assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n"); 677 bool NoInference = none_of(AllocaSet, [&](auto Iter) { 678 return IsAllocaInferenre(Alloca, Iter); 679 }); 680 // If the alignment of A is multiple of the alignment of B, the address 681 // of A should satisfy the requirement for aligning for B. 682 // 683 // There may be other more fine-grained strategies to handle the alignment 684 // infomation during the merging process. But it seems hard to handle 685 // these strategies and benefit little. 686 bool Alignable = [&]() -> bool { 687 auto *LargestAlloca = *AllocaSet.begin(); 688 return LargestAlloca->getAlign().value() % Alloca->getAlign().value() == 689 0; 690 }(); 691 bool CouldMerge = NoInference && Alignable; 692 if (!CouldMerge) 693 continue; 694 AllocaSet.push_back(Alloca); 695 Merged = true; 696 break; 697 } 698 if (!Merged) { 699 NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca)); 700 } 701 } 702 // Recover the default target destination for each Switch statement 703 // reserved. 704 for (auto SwitchAndDefaultDest : DefaultSuspendDest) { 705 SwitchInst *SWI = SwitchAndDefaultDest.first; 706 BasicBlock *DestBB = SwitchAndDefaultDest.second; 707 SWI->setDefaultDest(DestBB); 708 } 709 // This Debug Info could tell us which allocas are merged into one slot. 710 LLVM_DEBUG(for (auto &AllocaSet 711 : NonOverlapedAllocas) { 712 if (AllocaSet.size() > 1) { 713 dbgs() << "In Function:" << F.getName() << "\n"; 714 dbgs() << "Find Union Set " 715 << "\n"; 716 dbgs() << "\tAllocas are \n"; 717 for (auto Alloca : AllocaSet) 718 dbgs() << "\t\t" << *Alloca << "\n"; 719 } 720 }); 721 } 722 723 void FrameTypeBuilder::finish(StructType *Ty) { 724 assert(!IsFinished && "already finished!"); 725 726 // Prepare the optimal-layout field array. 727 // The Id in the layout field is a pointer to our Field for it. 728 SmallVector<OptimizedStructLayoutField, 8> LayoutFields; 729 LayoutFields.reserve(Fields.size()); 730 for (auto &Field : Fields) { 731 LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment, 732 Field.Offset); 733 } 734 735 // Perform layout. 736 auto SizeAndAlign = performOptimizedStructLayout(LayoutFields); 737 StructSize = SizeAndAlign.first; 738 StructAlign = SizeAndAlign.second; 739 740 auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & { 741 return *static_cast<Field *>(const_cast<void*>(LayoutField.Id)); 742 }; 743 744 // We need to produce a packed struct type if there's a field whose 745 // assigned offset isn't a multiple of its natural type alignment. 746 bool Packed = [&] { 747 for (auto &LayoutField : LayoutFields) { 748 auto &F = getField(LayoutField); 749 if (!isAligned(F.TyAlignment, LayoutField.Offset)) 750 return true; 751 } 752 return false; 753 }(); 754 755 // Build the struct body. 756 SmallVector<Type*, 16> FieldTypes; 757 FieldTypes.reserve(LayoutFields.size() * 3 / 2); 758 uint64_t LastOffset = 0; 759 for (auto &LayoutField : LayoutFields) { 760 auto &F = getField(LayoutField); 761 762 auto Offset = LayoutField.Offset; 763 764 // Add a padding field if there's a padding gap and we're either 765 // building a packed struct or the padding gap is more than we'd 766 // get from aligning to the field type's natural alignment. 767 assert(Offset >= LastOffset); 768 if (Offset != LastOffset) { 769 if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset) 770 FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context), 771 Offset - LastOffset)); 772 } 773 774 F.Offset = Offset; 775 F.LayoutFieldIndex = FieldTypes.size(); 776 777 FieldTypes.push_back(F.Ty); 778 if (F.DynamicAlignBuffer) { 779 FieldTypes.push_back( 780 ArrayType::get(Type::getInt8Ty(Context), F.DynamicAlignBuffer)); 781 } 782 LastOffset = Offset + F.Size; 783 } 784 785 Ty->setBody(FieldTypes, Packed); 786 787 #ifndef NDEBUG 788 // Check that the IR layout matches the offsets we expect. 789 auto Layout = DL.getStructLayout(Ty); 790 for (auto &F : Fields) { 791 assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty); 792 assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset); 793 } 794 #endif 795 796 IsFinished = true; 797 } 798 799 static void cacheDIVar(FrameDataInfo &FrameData, 800 DenseMap<Value *, DILocalVariable *> &DIVarCache) { 801 for (auto *V : FrameData.getAllDefs()) { 802 if (DIVarCache.find(V) != DIVarCache.end()) 803 continue; 804 805 auto DDIs = FindDbgDeclareUses(V); 806 auto *I = llvm::find_if(DDIs, [](DbgDeclareInst *DDI) { 807 return DDI->getExpression()->getNumElements() == 0; 808 }); 809 if (I != DDIs.end()) 810 DIVarCache.insert({V, (*I)->getVariable()}); 811 } 812 } 813 814 /// Create name for Type. It uses MDString to store new created string to 815 /// avoid memory leak. 816 static StringRef solveTypeName(Type *Ty) { 817 if (Ty->isIntegerTy()) { 818 // The longest name in common may be '__int_128', which has 9 bits. 819 SmallString<16> Buffer; 820 raw_svector_ostream OS(Buffer); 821 OS << "__int_" << cast<IntegerType>(Ty)->getBitWidth(); 822 auto *MDName = MDString::get(Ty->getContext(), OS.str()); 823 return MDName->getString(); 824 } 825 826 if (Ty->isFloatingPointTy()) { 827 if (Ty->isFloatTy()) 828 return "__float_"; 829 if (Ty->isDoubleTy()) 830 return "__double_"; 831 return "__floating_type_"; 832 } 833 834 if (auto *PtrTy = dyn_cast<PointerType>(Ty)) { 835 if (PtrTy->isOpaque()) 836 return "PointerType"; 837 Type *PointeeTy = PtrTy->getNonOpaquePointerElementType(); 838 auto Name = solveTypeName(PointeeTy); 839 if (Name == "UnknownType") 840 return "PointerType"; 841 SmallString<16> Buffer; 842 Twine(Name + "_Ptr").toStringRef(Buffer); 843 auto *MDName = MDString::get(Ty->getContext(), Buffer.str()); 844 return MDName->getString(); 845 } 846 847 if (Ty->isStructTy()) { 848 if (!cast<StructType>(Ty)->hasName()) 849 return "__LiteralStructType_"; 850 851 auto Name = Ty->getStructName(); 852 853 SmallString<16> Buffer(Name); 854 for (auto &Iter : Buffer) 855 if (Iter == '.' || Iter == ':') 856 Iter = '_'; 857 auto *MDName = MDString::get(Ty->getContext(), Buffer.str()); 858 return MDName->getString(); 859 } 860 861 return "UnknownType"; 862 } 863 864 static DIType *solveDIType(DIBuilder &Builder, Type *Ty, 865 const DataLayout &Layout, DIScope *Scope, 866 unsigned LineNum, 867 DenseMap<Type *, DIType *> &DITypeCache) { 868 if (DIType *DT = DITypeCache.lookup(Ty)) 869 return DT; 870 871 StringRef Name = solveTypeName(Ty); 872 873 DIType *RetType = nullptr; 874 875 if (Ty->isIntegerTy()) { 876 auto BitWidth = cast<IntegerType>(Ty)->getBitWidth(); 877 RetType = Builder.createBasicType(Name, BitWidth, dwarf::DW_ATE_signed, 878 llvm::DINode::FlagArtificial); 879 } else if (Ty->isFloatingPointTy()) { 880 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty), 881 dwarf::DW_ATE_float, 882 llvm::DINode::FlagArtificial); 883 } else if (Ty->isPointerTy()) { 884 // Construct BasicType instead of PointerType to avoid infinite 885 // search problem. 886 // For example, we would be in trouble if we traverse recursively: 887 // 888 // struct Node { 889 // Node* ptr; 890 // }; 891 RetType = Builder.createBasicType(Name, Layout.getTypeSizeInBits(Ty), 892 dwarf::DW_ATE_address, 893 llvm::DINode::FlagArtificial); 894 } else if (Ty->isStructTy()) { 895 auto *DIStruct = Builder.createStructType( 896 Scope, Name, Scope->getFile(), LineNum, Layout.getTypeSizeInBits(Ty), 897 Layout.getPrefTypeAlignment(Ty), llvm::DINode::FlagArtificial, nullptr, 898 llvm::DINodeArray()); 899 900 auto *StructTy = cast<StructType>(Ty); 901 SmallVector<Metadata *, 16> Elements; 902 for (unsigned I = 0; I < StructTy->getNumElements(); I++) { 903 DIType *DITy = solveDIType(Builder, StructTy->getElementType(I), Layout, 904 Scope, LineNum, DITypeCache); 905 assert(DITy); 906 Elements.push_back(Builder.createMemberType( 907 Scope, DITy->getName(), Scope->getFile(), LineNum, 908 DITy->getSizeInBits(), DITy->getAlignInBits(), 909 Layout.getStructLayout(StructTy)->getElementOffsetInBits(I), 910 llvm::DINode::FlagArtificial, DITy)); 911 } 912 913 Builder.replaceArrays(DIStruct, Builder.getOrCreateArray(Elements)); 914 915 RetType = DIStruct; 916 } else { 917 LLVM_DEBUG(dbgs() << "Unresolved Type: " << *Ty << "\n";); 918 SmallString<32> Buffer; 919 raw_svector_ostream OS(Buffer); 920 OS << Name.str() << "_" << Layout.getTypeSizeInBits(Ty); 921 RetType = Builder.createBasicType(OS.str(), Layout.getTypeSizeInBits(Ty), 922 dwarf::DW_ATE_address, 923 llvm::DINode::FlagArtificial); 924 } 925 926 DITypeCache.insert({Ty, RetType}); 927 return RetType; 928 } 929 930 /// Build artificial debug info for C++ coroutine frames to allow users to 931 /// inspect the contents of the frame directly 932 /// 933 /// Create Debug information for coroutine frame with debug name "__coro_frame". 934 /// The debug information for the fields of coroutine frame is constructed from 935 /// the following way: 936 /// 1. For all the value in the Frame, we search the use of dbg.declare to find 937 /// the corresponding debug variables for the value. If we can find the 938 /// debug variable, we can get full and accurate debug information. 939 /// 2. If we can't get debug information in step 1 and 2, we could only try to 940 /// build the DIType by Type. We did this in solveDIType. We only handle 941 /// integer, float, double, integer type and struct type for now. 942 static void buildFrameDebugInfo(Function &F, coro::Shape &Shape, 943 FrameDataInfo &FrameData) { 944 DISubprogram *DIS = F.getSubprogram(); 945 // If there is no DISubprogram for F, it implies the Function are not compiled 946 // with debug info. So we also don't need to generate debug info for the frame 947 // neither. 948 if (!DIS || !DIS->getUnit() || 949 !dwarf::isCPlusPlus( 950 (dwarf::SourceLanguage)DIS->getUnit()->getSourceLanguage())) 951 return; 952 953 assert(Shape.ABI == coro::ABI::Switch && 954 "We could only build debug infomation for C++ coroutine now.\n"); 955 956 DIBuilder DBuilder(*F.getParent(), /*AllowUnresolved*/ false); 957 958 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca(); 959 assert(PromiseAlloca && 960 "Coroutine with switch ABI should own Promise alloca"); 961 962 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(PromiseAlloca); 963 if (DIs.empty()) 964 return; 965 966 DbgDeclareInst *PromiseDDI = DIs.front(); 967 DILocalVariable *PromiseDIVariable = PromiseDDI->getVariable(); 968 DILocalScope *PromiseDIScope = PromiseDIVariable->getScope(); 969 DIFile *DFile = PromiseDIScope->getFile(); 970 DILocation *DILoc = PromiseDDI->getDebugLoc().get(); 971 unsigned LineNum = PromiseDIVariable->getLine(); 972 973 DICompositeType *FrameDITy = DBuilder.createStructType( 974 DIS, "__coro_frame_ty", DFile, LineNum, Shape.FrameSize * 8, 975 Shape.FrameAlign.value() * 8, llvm::DINode::FlagArtificial, nullptr, 976 llvm::DINodeArray()); 977 StructType *FrameTy = Shape.FrameTy; 978 SmallVector<Metadata *, 16> Elements; 979 DataLayout Layout = F.getParent()->getDataLayout(); 980 981 DenseMap<Value *, DILocalVariable *> DIVarCache; 982 cacheDIVar(FrameData, DIVarCache); 983 984 unsigned ResumeIndex = coro::Shape::SwitchFieldIndex::Resume; 985 unsigned DestroyIndex = coro::Shape::SwitchFieldIndex::Destroy; 986 unsigned IndexIndex = Shape.SwitchLowering.IndexField; 987 988 DenseMap<unsigned, StringRef> NameCache; 989 NameCache.insert({ResumeIndex, "__resume_fn"}); 990 NameCache.insert({DestroyIndex, "__destroy_fn"}); 991 NameCache.insert({IndexIndex, "__coro_index"}); 992 993 Type *ResumeFnTy = FrameTy->getElementType(ResumeIndex), 994 *DestroyFnTy = FrameTy->getElementType(DestroyIndex), 995 *IndexTy = FrameTy->getElementType(IndexIndex); 996 997 DenseMap<unsigned, DIType *> TyCache; 998 TyCache.insert({ResumeIndex, 999 DBuilder.createBasicType("__resume_fn", 1000 Layout.getTypeSizeInBits(ResumeFnTy), 1001 dwarf::DW_ATE_address)}); 1002 TyCache.insert( 1003 {DestroyIndex, DBuilder.createBasicType( 1004 "__destroy_fn", Layout.getTypeSizeInBits(DestroyFnTy), 1005 dwarf::DW_ATE_address)}); 1006 1007 /// FIXME: If we fill the field `SizeInBits` with the actual size of 1008 /// __coro_index in bits, then __coro_index wouldn't show in the debugger. 1009 TyCache.insert({IndexIndex, DBuilder.createBasicType( 1010 "__coro_index", 1011 (Layout.getTypeSizeInBits(IndexTy) < 8) 1012 ? 8 1013 : Layout.getTypeSizeInBits(IndexTy), 1014 dwarf::DW_ATE_unsigned_char)}); 1015 1016 for (auto *V : FrameData.getAllDefs()) { 1017 if (DIVarCache.find(V) == DIVarCache.end()) 1018 continue; 1019 1020 auto Index = FrameData.getFieldIndex(V); 1021 1022 NameCache.insert({Index, DIVarCache[V]->getName()}); 1023 TyCache.insert({Index, DIVarCache[V]->getType()}); 1024 } 1025 1026 // Cache from index to (Align, Offset Pair) 1027 DenseMap<unsigned, std::pair<unsigned, unsigned>> OffsetCache; 1028 // The Align and Offset of Resume function and Destroy function are fixed. 1029 OffsetCache.insert({ResumeIndex, {8, 0}}); 1030 OffsetCache.insert({DestroyIndex, {8, 8}}); 1031 OffsetCache.insert( 1032 {IndexIndex, 1033 {Shape.SwitchLowering.IndexAlign, Shape.SwitchLowering.IndexOffset}}); 1034 1035 for (auto *V : FrameData.getAllDefs()) { 1036 auto Index = FrameData.getFieldIndex(V); 1037 1038 OffsetCache.insert( 1039 {Index, {FrameData.getAlign(V).value(), FrameData.getOffset(V)}}); 1040 } 1041 1042 DenseMap<Type *, DIType *> DITypeCache; 1043 // This counter is used to avoid same type names. e.g., there would be 1044 // many i32 and i64 types in one coroutine. And we would use i32_0 and 1045 // i32_1 to avoid the same type. Since it makes no sense the name of the 1046 // fields confilicts with each other. 1047 unsigned UnknownTypeNum = 0; 1048 for (unsigned Index = 0; Index < FrameTy->getNumElements(); Index++) { 1049 if (OffsetCache.find(Index) == OffsetCache.end()) 1050 continue; 1051 1052 std::string Name; 1053 uint64_t SizeInBits; 1054 uint32_t AlignInBits; 1055 uint64_t OffsetInBits; 1056 DIType *DITy = nullptr; 1057 1058 Type *Ty = FrameTy->getElementType(Index); 1059 assert(Ty->isSized() && "We can't handle type which is not sized.\n"); 1060 SizeInBits = Layout.getTypeSizeInBits(Ty).getFixedSize(); 1061 AlignInBits = OffsetCache[Index].first * 8; 1062 OffsetInBits = OffsetCache[Index].second * 8; 1063 1064 if (NameCache.find(Index) != NameCache.end()) { 1065 Name = NameCache[Index].str(); 1066 DITy = TyCache[Index]; 1067 } else { 1068 DITy = solveDIType(DBuilder, Ty, Layout, FrameDITy, LineNum, DITypeCache); 1069 assert(DITy && "SolveDIType shouldn't return nullptr.\n"); 1070 Name = DITy->getName().str(); 1071 Name += "_" + std::to_string(UnknownTypeNum); 1072 UnknownTypeNum++; 1073 } 1074 1075 Elements.push_back(DBuilder.createMemberType( 1076 FrameDITy, Name, DFile, LineNum, SizeInBits, AlignInBits, OffsetInBits, 1077 llvm::DINode::FlagArtificial, DITy)); 1078 } 1079 1080 DBuilder.replaceArrays(FrameDITy, DBuilder.getOrCreateArray(Elements)); 1081 1082 auto *FrameDIVar = DBuilder.createAutoVariable(PromiseDIScope, "__coro_frame", 1083 DFile, LineNum, FrameDITy, 1084 true, DINode::FlagArtificial); 1085 assert(FrameDIVar->isValidLocationForIntrinsic(PromiseDDI->getDebugLoc())); 1086 1087 // Subprogram would have ContainedNodes field which records the debug 1088 // variables it contained. So we need to add __coro_frame to the 1089 // ContainedNodes of it. 1090 // 1091 // If we don't add __coro_frame to the RetainedNodes, user may get 1092 // `no symbol __coro_frame in context` rather than `__coro_frame` 1093 // is optimized out, which is more precise. 1094 if (auto *SubProgram = dyn_cast<DISubprogram>(PromiseDIScope)) { 1095 auto RetainedNodes = SubProgram->getRetainedNodes(); 1096 SmallVector<Metadata *, 32> RetainedNodesVec(RetainedNodes.begin(), 1097 RetainedNodes.end()); 1098 RetainedNodesVec.push_back(FrameDIVar); 1099 SubProgram->replaceOperandWith( 1100 7, (MDTuple::get(F.getContext(), RetainedNodesVec))); 1101 } 1102 1103 DBuilder.insertDeclare(Shape.FramePtr, FrameDIVar, 1104 DBuilder.createExpression(), DILoc, 1105 Shape.getInsertPtAfterFramePtr()); 1106 } 1107 1108 // Build a struct that will keep state for an active coroutine. 1109 // struct f.frame { 1110 // ResumeFnTy ResumeFnAddr; 1111 // ResumeFnTy DestroyFnAddr; 1112 // int ResumeIndex; 1113 // ... promise (if present) ... 1114 // ... spills ... 1115 // }; 1116 static StructType *buildFrameType(Function &F, coro::Shape &Shape, 1117 FrameDataInfo &FrameData) { 1118 LLVMContext &C = F.getContext(); 1119 const DataLayout &DL = F.getParent()->getDataLayout(); 1120 StructType *FrameTy = [&] { 1121 SmallString<32> Name(F.getName()); 1122 Name.append(".Frame"); 1123 return StructType::create(C, Name); 1124 }(); 1125 1126 // We will use this value to cap the alignment of spilled values. 1127 Optional<Align> MaxFrameAlignment; 1128 if (Shape.ABI == coro::ABI::Async) 1129 MaxFrameAlignment = Shape.AsyncLowering.getContextAlignment(); 1130 FrameTypeBuilder B(C, DL, MaxFrameAlignment); 1131 1132 AllocaInst *PromiseAlloca = Shape.getPromiseAlloca(); 1133 Optional<FieldIDType> SwitchIndexFieldId; 1134 1135 if (Shape.ABI == coro::ABI::Switch) { 1136 auto *FramePtrTy = FrameTy->getPointerTo(); 1137 auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy, 1138 /*IsVarArg=*/false); 1139 auto *FnPtrTy = FnTy->getPointerTo(); 1140 1141 // Add header fields for the resume and destroy functions. 1142 // We can rely on these being perfectly packed. 1143 (void)B.addField(FnPtrTy, None, /*header*/ true); 1144 (void)B.addField(FnPtrTy, None, /*header*/ true); 1145 1146 // PromiseAlloca field needs to be explicitly added here because it's 1147 // a header field with a fixed offset based on its alignment. Hence it 1148 // needs special handling and cannot be added to FrameData.Allocas. 1149 if (PromiseAlloca) 1150 FrameData.setFieldIndex( 1151 PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true)); 1152 1153 // Add a field to store the suspend index. This doesn't need to 1154 // be in the header. 1155 unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size())); 1156 Type *IndexType = Type::getIntNTy(C, IndexBits); 1157 1158 SwitchIndexFieldId = B.addField(IndexType, None); 1159 } else { 1160 assert(PromiseAlloca == nullptr && "lowering doesn't support promises"); 1161 } 1162 1163 // Because multiple allocas may own the same field slot, 1164 // we add allocas to field here. 1165 B.addFieldForAllocas(F, FrameData, Shape); 1166 // Add PromiseAlloca to Allocas list so that 1167 // 1. updateLayoutIndex could update its index after 1168 // `performOptimizedStructLayout` 1169 // 2. it is processed in insertSpills. 1170 if (Shape.ABI == coro::ABI::Switch && PromiseAlloca) 1171 // We assume that the promise alloca won't be modified before 1172 // CoroBegin and no alias will be create before CoroBegin. 1173 FrameData.Allocas.emplace_back( 1174 PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false); 1175 // Create an entry for every spilled value. 1176 for (auto &S : FrameData.Spills) { 1177 Type *FieldType = S.first->getType(); 1178 // For byval arguments, we need to store the pointed value in the frame, 1179 // instead of the pointer itself. 1180 if (const Argument *A = dyn_cast<Argument>(S.first)) 1181 if (A->hasByValAttr()) 1182 FieldType = A->getParamByValType(); 1183 FieldIDType Id = 1184 B.addField(FieldType, None, false /*header*/, true /*IsSpillOfValue*/); 1185 FrameData.setFieldIndex(S.first, Id); 1186 } 1187 1188 B.finish(FrameTy); 1189 FrameData.updateLayoutIndex(B); 1190 Shape.FrameAlign = B.getStructAlign(); 1191 Shape.FrameSize = B.getStructSize(); 1192 1193 switch (Shape.ABI) { 1194 case coro::ABI::Switch: { 1195 // In the switch ABI, remember the switch-index field. 1196 auto IndexField = B.getLayoutField(*SwitchIndexFieldId); 1197 Shape.SwitchLowering.IndexField = IndexField.LayoutFieldIndex; 1198 Shape.SwitchLowering.IndexAlign = IndexField.Alignment.value(); 1199 Shape.SwitchLowering.IndexOffset = IndexField.Offset; 1200 1201 // Also round the frame size up to a multiple of its alignment, as is 1202 // generally expected in C/C++. 1203 Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign); 1204 break; 1205 } 1206 1207 // In the retcon ABI, remember whether the frame is inline in the storage. 1208 case coro::ABI::Retcon: 1209 case coro::ABI::RetconOnce: { 1210 auto Id = Shape.getRetconCoroId(); 1211 Shape.RetconLowering.IsFrameInlineInStorage 1212 = (B.getStructSize() <= Id->getStorageSize() && 1213 B.getStructAlign() <= Id->getStorageAlignment()); 1214 break; 1215 } 1216 case coro::ABI::Async: { 1217 Shape.AsyncLowering.FrameOffset = 1218 alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign); 1219 // Also make the final context size a multiple of the context alignment to 1220 // make allocation easier for allocators. 1221 Shape.AsyncLowering.ContextSize = 1222 alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize, 1223 Shape.AsyncLowering.getContextAlignment()); 1224 if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) { 1225 report_fatal_error( 1226 "The alignment requirment of frame variables cannot be higher than " 1227 "the alignment of the async function context"); 1228 } 1229 break; 1230 } 1231 } 1232 1233 return FrameTy; 1234 } 1235 1236 // We use a pointer use visitor to track how an alloca is being used. 1237 // The goal is to be able to answer the following three questions: 1238 // 1. Should this alloca be allocated on the frame instead. 1239 // 2. Could the content of the alloca be modified prior to CoroBegn, which would 1240 // require copying the data from alloca to the frame after CoroBegin. 1241 // 3. Is there any alias created for this alloca prior to CoroBegin, but used 1242 // after CoroBegin. In that case, we will need to recreate the alias after 1243 // CoroBegin based off the frame. To answer question 1, we track two things: 1244 // a. List of all BasicBlocks that use this alloca or any of the aliases of 1245 // the alloca. In the end, we check if there exists any two basic blocks that 1246 // cross suspension points. If so, this alloca must be put on the frame. b. 1247 // Whether the alloca or any alias of the alloca is escaped at some point, 1248 // either by storing the address somewhere, or the address is used in a 1249 // function call that might capture. If it's ever escaped, this alloca must be 1250 // put on the frame conservatively. 1251 // To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin. 1252 // Whenever a potential write happens, either through a store instruction, a 1253 // function call or any of the memory intrinsics, we check whether this 1254 // instruction is prior to CoroBegin. To answer question 3, we track the offsets 1255 // of all aliases created for the alloca prior to CoroBegin but used after 1256 // CoroBegin. llvm::Optional is used to be able to represent the case when the 1257 // offset is unknown (e.g. when you have a PHINode that takes in different 1258 // offset values). We cannot handle unknown offsets and will assert. This is the 1259 // potential issue left out. An ideal solution would likely require a 1260 // significant redesign. 1261 namespace { 1262 struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> { 1263 using Base = PtrUseVisitor<AllocaUseVisitor>; 1264 AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT, 1265 const CoroBeginInst &CB, const SuspendCrossingInfo &Checker, 1266 bool ShouldUseLifetimeStartInfo) 1267 : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker), 1268 ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {} 1269 1270 void visit(Instruction &I) { 1271 Users.insert(&I); 1272 Base::visit(I); 1273 // If the pointer is escaped prior to CoroBegin, we have to assume it would 1274 // be written into before CoroBegin as well. 1275 if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) { 1276 MayWriteBeforeCoroBegin = true; 1277 } 1278 } 1279 // We need to provide this overload as PtrUseVisitor uses a pointer based 1280 // visiting function. 1281 void visit(Instruction *I) { return visit(*I); } 1282 1283 void visitPHINode(PHINode &I) { 1284 enqueueUsers(I); 1285 handleAlias(I); 1286 } 1287 1288 void visitSelectInst(SelectInst &I) { 1289 enqueueUsers(I); 1290 handleAlias(I); 1291 } 1292 1293 void visitStoreInst(StoreInst &SI) { 1294 // Regardless whether the alias of the alloca is the value operand or the 1295 // pointer operand, we need to assume the alloca is been written. 1296 handleMayWrite(SI); 1297 1298 if (SI.getValueOperand() != U->get()) 1299 return; 1300 1301 // We are storing the pointer into a memory location, potentially escaping. 1302 // As an optimization, we try to detect simple cases where it doesn't 1303 // actually escape, for example: 1304 // %ptr = alloca .. 1305 // %addr = alloca .. 1306 // store %ptr, %addr 1307 // %x = load %addr 1308 // .. 1309 // If %addr is only used by loading from it, we could simply treat %x as 1310 // another alias of %ptr, and not considering %ptr being escaped. 1311 auto IsSimpleStoreThenLoad = [&]() { 1312 auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand()); 1313 // If the memory location we are storing to is not an alloca, it 1314 // could be an alias of some other memory locations, which is difficult 1315 // to analyze. 1316 if (!AI) 1317 return false; 1318 // StoreAliases contains aliases of the memory location stored into. 1319 SmallVector<Instruction *, 4> StoreAliases = {AI}; 1320 while (!StoreAliases.empty()) { 1321 Instruction *I = StoreAliases.pop_back_val(); 1322 for (User *U : I->users()) { 1323 // If we are loading from the memory location, we are creating an 1324 // alias of the original pointer. 1325 if (auto *LI = dyn_cast<LoadInst>(U)) { 1326 enqueueUsers(*LI); 1327 handleAlias(*LI); 1328 continue; 1329 } 1330 // If we are overriding the memory location, the pointer certainly 1331 // won't escape. 1332 if (auto *S = dyn_cast<StoreInst>(U)) 1333 if (S->getPointerOperand() == I) 1334 continue; 1335 if (auto *II = dyn_cast<IntrinsicInst>(U)) 1336 if (II->isLifetimeStartOrEnd()) 1337 continue; 1338 // BitCastInst creats aliases of the memory location being stored 1339 // into. 1340 if (auto *BI = dyn_cast<BitCastInst>(U)) { 1341 StoreAliases.push_back(BI); 1342 continue; 1343 } 1344 return false; 1345 } 1346 } 1347 1348 return true; 1349 }; 1350 1351 if (!IsSimpleStoreThenLoad()) 1352 PI.setEscaped(&SI); 1353 } 1354 1355 // All mem intrinsics modify the data. 1356 void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); } 1357 1358 void visitBitCastInst(BitCastInst &BC) { 1359 Base::visitBitCastInst(BC); 1360 handleAlias(BC); 1361 } 1362 1363 void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) { 1364 Base::visitAddrSpaceCastInst(ASC); 1365 handleAlias(ASC); 1366 } 1367 1368 void visitGetElementPtrInst(GetElementPtrInst &GEPI) { 1369 // The base visitor will adjust Offset accordingly. 1370 Base::visitGetElementPtrInst(GEPI); 1371 handleAlias(GEPI); 1372 } 1373 1374 void visitIntrinsicInst(IntrinsicInst &II) { 1375 // When we found the lifetime markers refers to a 1376 // subrange of the original alloca, ignore the lifetime 1377 // markers to avoid misleading the analysis. 1378 if (II.getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown || 1379 !Offset.isZero()) 1380 return Base::visitIntrinsicInst(II); 1381 LifetimeStarts.insert(&II); 1382 } 1383 1384 void visitCallBase(CallBase &CB) { 1385 for (unsigned Op = 0, OpCount = CB.arg_size(); Op < OpCount; ++Op) 1386 if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op)) 1387 PI.setEscaped(&CB); 1388 handleMayWrite(CB); 1389 } 1390 1391 bool getShouldLiveOnFrame() const { 1392 if (!ShouldLiveOnFrame) 1393 ShouldLiveOnFrame = computeShouldLiveOnFrame(); 1394 return *ShouldLiveOnFrame; 1395 } 1396 1397 bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; } 1398 1399 DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const { 1400 assert(getShouldLiveOnFrame() && "This method should only be called if the " 1401 "alloca needs to live on the frame."); 1402 for (const auto &P : AliasOffetMap) 1403 if (!P.second) 1404 report_fatal_error("Unable to handle an alias with unknown offset " 1405 "created before CoroBegin."); 1406 return AliasOffetMap; 1407 } 1408 1409 private: 1410 const DominatorTree &DT; 1411 const CoroBeginInst &CoroBegin; 1412 const SuspendCrossingInfo &Checker; 1413 // All alias to the original AllocaInst, created before CoroBegin and used 1414 // after CoroBegin. Each entry contains the instruction and the offset in the 1415 // original Alloca. They need to be recreated after CoroBegin off the frame. 1416 DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{}; 1417 SmallPtrSet<Instruction *, 4> Users{}; 1418 SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{}; 1419 bool MayWriteBeforeCoroBegin{false}; 1420 bool ShouldUseLifetimeStartInfo{true}; 1421 1422 mutable llvm::Optional<bool> ShouldLiveOnFrame{}; 1423 1424 bool computeShouldLiveOnFrame() const { 1425 // If lifetime information is available, we check it first since it's 1426 // more precise. We look at every pair of lifetime.start intrinsic and 1427 // every basic block that uses the pointer to see if they cross suspension 1428 // points. The uses cover both direct uses as well as indirect uses. 1429 if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) { 1430 for (auto *I : Users) 1431 for (auto *S : LifetimeStarts) 1432 if (Checker.isDefinitionAcrossSuspend(*S, I)) 1433 return true; 1434 return false; 1435 } 1436 // FIXME: Ideally the isEscaped check should come at the beginning. 1437 // However there are a few loose ends that need to be fixed first before 1438 // we can do that. We need to make sure we are not over-conservative, so 1439 // that the data accessed in-between await_suspend and symmetric transfer 1440 // is always put on the stack, and also data accessed after coro.end is 1441 // always put on the stack (esp the return object). To fix that, we need 1442 // to: 1443 // 1) Potentially treat sret as nocapture in calls 1444 // 2) Special handle the return object and put it on the stack 1445 // 3) Utilize lifetime.end intrinsic 1446 if (PI.isEscaped()) 1447 return true; 1448 1449 for (auto *U1 : Users) 1450 for (auto *U2 : Users) 1451 if (Checker.isDefinitionAcrossSuspend(*U1, U2)) 1452 return true; 1453 1454 return false; 1455 } 1456 1457 void handleMayWrite(const Instruction &I) { 1458 if (!DT.dominates(&CoroBegin, &I)) 1459 MayWriteBeforeCoroBegin = true; 1460 } 1461 1462 bool usedAfterCoroBegin(Instruction &I) { 1463 for (auto &U : I.uses()) 1464 if (DT.dominates(&CoroBegin, U)) 1465 return true; 1466 return false; 1467 } 1468 1469 void handleAlias(Instruction &I) { 1470 // We track all aliases created prior to CoroBegin but used after. 1471 // These aliases may need to be recreated after CoroBegin if the alloca 1472 // need to live on the frame. 1473 if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I)) 1474 return; 1475 1476 if (!IsOffsetKnown) { 1477 AliasOffetMap[&I].reset(); 1478 } else { 1479 auto Itr = AliasOffetMap.find(&I); 1480 if (Itr == AliasOffetMap.end()) { 1481 AliasOffetMap[&I] = Offset; 1482 } else if (Itr->second && *Itr->second != Offset) { 1483 // If we have seen two different possible values for this alias, we set 1484 // it to empty. 1485 AliasOffetMap[&I].reset(); 1486 } 1487 } 1488 } 1489 }; 1490 } // namespace 1491 1492 // We need to make room to insert a spill after initial PHIs, but before 1493 // catchswitch instruction. Placing it before violates the requirement that 1494 // catchswitch, like all other EHPads must be the first nonPHI in a block. 1495 // 1496 // Split away catchswitch into a separate block and insert in its place: 1497 // 1498 // cleanuppad <InsertPt> cleanupret. 1499 // 1500 // cleanupret instruction will act as an insert point for the spill. 1501 static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) { 1502 BasicBlock *CurrentBlock = CatchSwitch->getParent(); 1503 BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch); 1504 CurrentBlock->getTerminator()->eraseFromParent(); 1505 1506 auto *CleanupPad = 1507 CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock); 1508 auto *CleanupRet = 1509 CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock); 1510 return CleanupRet; 1511 } 1512 1513 static void createFramePtr(coro::Shape &Shape) { 1514 auto *CB = Shape.CoroBegin; 1515 IRBuilder<> Builder(CB->getNextNode()); 1516 StructType *FrameTy = Shape.FrameTy; 1517 PointerType *FramePtrTy = FrameTy->getPointerTo(); 1518 Shape.FramePtr = 1519 cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr")); 1520 } 1521 1522 // Replace all alloca and SSA values that are accessed across suspend points 1523 // with GetElementPointer from coroutine frame + loads and stores. Create an 1524 // AllocaSpillBB that will become the new entry block for the resume parts of 1525 // the coroutine: 1526 // 1527 // %hdl = coro.begin(...) 1528 // whatever 1529 // 1530 // becomes: 1531 // 1532 // %hdl = coro.begin(...) 1533 // %FramePtr = bitcast i8* hdl to %f.frame* 1534 // br label %AllocaSpillBB 1535 // 1536 // AllocaSpillBB: 1537 // ; geps corresponding to allocas that were moved to coroutine frame 1538 // br label PostSpill 1539 // 1540 // PostSpill: 1541 // whatever 1542 // 1543 // 1544 static void insertSpills(const FrameDataInfo &FrameData, coro::Shape &Shape) { 1545 auto *CB = Shape.CoroBegin; 1546 LLVMContext &C = CB->getContext(); 1547 IRBuilder<> Builder(C); 1548 StructType *FrameTy = Shape.FrameTy; 1549 Value *FramePtr = Shape.FramePtr; 1550 DominatorTree DT(*CB->getFunction()); 1551 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache; 1552 1553 // Create a GEP with the given index into the coroutine frame for the original 1554 // value Orig. Appends an extra 0 index for array-allocas, preserving the 1555 // original type. 1556 auto GetFramePointer = [&](Value *Orig) -> Value * { 1557 FieldIDType Index = FrameData.getFieldIndex(Orig); 1558 SmallVector<Value *, 3> Indices = { 1559 ConstantInt::get(Type::getInt32Ty(C), 0), 1560 ConstantInt::get(Type::getInt32Ty(C), Index), 1561 }; 1562 1563 if (auto *AI = dyn_cast<AllocaInst>(Orig)) { 1564 if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) { 1565 auto Count = CI->getValue().getZExtValue(); 1566 if (Count > 1) { 1567 Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0)); 1568 } 1569 } else { 1570 report_fatal_error("Coroutines cannot handle non static allocas yet"); 1571 } 1572 } 1573 1574 auto GEP = cast<GetElementPtrInst>( 1575 Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices)); 1576 if (auto *AI = dyn_cast<AllocaInst>(Orig)) { 1577 if (FrameData.getDynamicAlign(Orig) != 0) { 1578 assert(FrameData.getDynamicAlign(Orig) == AI->getAlign().value()); 1579 auto *M = AI->getModule(); 1580 auto *IntPtrTy = M->getDataLayout().getIntPtrType(AI->getType()); 1581 auto *PtrValue = Builder.CreatePtrToInt(GEP, IntPtrTy); 1582 auto *AlignMask = 1583 ConstantInt::get(IntPtrTy, AI->getAlign().value() - 1); 1584 PtrValue = Builder.CreateAdd(PtrValue, AlignMask); 1585 PtrValue = Builder.CreateAnd(PtrValue, Builder.CreateNot(AlignMask)); 1586 return Builder.CreateIntToPtr(PtrValue, AI->getType()); 1587 } 1588 // If the type of GEP is not equal to the type of AllocaInst, it implies 1589 // that the AllocaInst may be reused in the Frame slot of other 1590 // AllocaInst. So We cast GEP to the AllocaInst here to re-use 1591 // the Frame storage. 1592 // 1593 // Note: If we change the strategy dealing with alignment, we need to refine 1594 // this casting. 1595 if (GEP->getResultElementType() != Orig->getType()) 1596 return Builder.CreateBitCast(GEP, Orig->getType(), 1597 Orig->getName() + Twine(".cast")); 1598 } 1599 return GEP; 1600 }; 1601 1602 for (auto const &E : FrameData.Spills) { 1603 Value *Def = E.first; 1604 auto SpillAlignment = Align(FrameData.getAlign(Def)); 1605 // Create a store instruction storing the value into the 1606 // coroutine frame. 1607 Instruction *InsertPt = nullptr; 1608 Type *ByValTy = nullptr; 1609 if (auto *Arg = dyn_cast<Argument>(Def)) { 1610 // For arguments, we will place the store instruction right after 1611 // the coroutine frame pointer instruction, i.e. bitcast of 1612 // coro.begin from i8* to %f.frame*. 1613 InsertPt = Shape.getInsertPtAfterFramePtr(); 1614 1615 // If we're spilling an Argument, make sure we clear 'nocapture' 1616 // from the coroutine function. 1617 Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture); 1618 1619 if (Arg->hasByValAttr()) 1620 ByValTy = Arg->getParamByValType(); 1621 } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) { 1622 // Don't spill immediately after a suspend; splitting assumes 1623 // that the suspend will be followed by a branch. 1624 InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI(); 1625 } else { 1626 auto *I = cast<Instruction>(Def); 1627 if (!DT.dominates(CB, I)) { 1628 // If it is not dominated by CoroBegin, then spill should be 1629 // inserted immediately after CoroFrame is computed. 1630 InsertPt = Shape.getInsertPtAfterFramePtr(); 1631 } else if (auto *II = dyn_cast<InvokeInst>(I)) { 1632 // If we are spilling the result of the invoke instruction, split 1633 // the normal edge and insert the spill in the new block. 1634 auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest()); 1635 InsertPt = NewBB->getTerminator(); 1636 } else if (isa<PHINode>(I)) { 1637 // Skip the PHINodes and EH pads instructions. 1638 BasicBlock *DefBlock = I->getParent(); 1639 if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator())) 1640 InsertPt = splitBeforeCatchSwitch(CSI); 1641 else 1642 InsertPt = &*DefBlock->getFirstInsertionPt(); 1643 } else { 1644 assert(!I->isTerminator() && "unexpected terminator"); 1645 // For all other values, the spill is placed immediately after 1646 // the definition. 1647 InsertPt = I->getNextNode(); 1648 } 1649 } 1650 1651 auto Index = FrameData.getFieldIndex(Def); 1652 Builder.SetInsertPoint(InsertPt); 1653 auto *G = Builder.CreateConstInBoundsGEP2_32( 1654 FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr")); 1655 if (ByValTy) { 1656 // For byval arguments, we need to store the pointed value in the frame, 1657 // instead of the pointer itself. 1658 auto *Value = Builder.CreateLoad(ByValTy, Def); 1659 Builder.CreateAlignedStore(Value, G, SpillAlignment); 1660 } else { 1661 Builder.CreateAlignedStore(Def, G, SpillAlignment); 1662 } 1663 1664 BasicBlock *CurrentBlock = nullptr; 1665 Value *CurrentReload = nullptr; 1666 for (auto *U : E.second) { 1667 // If we have not seen the use block, create a load instruction to reload 1668 // the spilled value from the coroutine frame. Populates the Value pointer 1669 // reference provided with the frame GEP. 1670 if (CurrentBlock != U->getParent()) { 1671 CurrentBlock = U->getParent(); 1672 Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt()); 1673 1674 auto *GEP = GetFramePointer(E.first); 1675 GEP->setName(E.first->getName() + Twine(".reload.addr")); 1676 if (ByValTy) 1677 CurrentReload = GEP; 1678 else 1679 CurrentReload = Builder.CreateAlignedLoad( 1680 FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP, 1681 SpillAlignment, E.first->getName() + Twine(".reload")); 1682 1683 TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def); 1684 for (DbgDeclareInst *DDI : DIs) { 1685 bool AllowUnresolved = false; 1686 // This dbg.declare is preserved for all coro-split function 1687 // fragments. It will be unreachable in the main function, and 1688 // processed by coro::salvageDebugInfo() by CoroCloner. 1689 DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved) 1690 .insertDeclare(CurrentReload, DDI->getVariable(), 1691 DDI->getExpression(), DDI->getDebugLoc(), 1692 &*Builder.GetInsertPoint()); 1693 // This dbg.declare is for the main function entry point. It 1694 // will be deleted in all coro-split functions. 1695 coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.OptimizeFrame); 1696 } 1697 } 1698 1699 // Salvage debug info on any dbg.addr that we see. We do not insert them 1700 // into each block where we have a use though. 1701 if (auto *DI = dyn_cast<DbgAddrIntrinsic>(U)) { 1702 coro::salvageDebugInfo(DbgPtrAllocaCache, DI, Shape.OptimizeFrame); 1703 } 1704 1705 // If we have a single edge PHINode, remove it and replace it with a 1706 // reload from the coroutine frame. (We already took care of multi edge 1707 // PHINodes by rewriting them in the rewritePHIs function). 1708 if (auto *PN = dyn_cast<PHINode>(U)) { 1709 assert(PN->getNumIncomingValues() == 1 && 1710 "unexpected number of incoming " 1711 "values in the PHINode"); 1712 PN->replaceAllUsesWith(CurrentReload); 1713 PN->eraseFromParent(); 1714 continue; 1715 } 1716 1717 // Replace all uses of CurrentValue in the current instruction with 1718 // reload. 1719 U->replaceUsesOfWith(Def, CurrentReload); 1720 } 1721 } 1722 1723 BasicBlock *FramePtrBB = Shape.getInsertPtAfterFramePtr()->getParent(); 1724 1725 auto SpillBlock = FramePtrBB->splitBasicBlock( 1726 Shape.getInsertPtAfterFramePtr(), "AllocaSpillBB"); 1727 SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill"); 1728 Shape.AllocaSpillBlock = SpillBlock; 1729 1730 // retcon and retcon.once lowering assumes all uses have been sunk. 1731 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 1732 Shape.ABI == coro::ABI::Async) { 1733 // If we found any allocas, replace all of their remaining uses with Geps. 1734 Builder.SetInsertPoint(&SpillBlock->front()); 1735 for (const auto &P : FrameData.Allocas) { 1736 AllocaInst *Alloca = P.Alloca; 1737 auto *G = GetFramePointer(Alloca); 1738 1739 // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G)) 1740 // here, as we are changing location of the instruction. 1741 G->takeName(Alloca); 1742 Alloca->replaceAllUsesWith(G); 1743 Alloca->eraseFromParent(); 1744 } 1745 return; 1746 } 1747 1748 // If we found any alloca, replace all of their remaining uses with GEP 1749 // instructions. To remain debugbility, we replace the uses of allocas for 1750 // dbg.declares and dbg.values with the reload from the frame. 1751 // Note: We cannot replace the alloca with GEP instructions indiscriminately, 1752 // as some of the uses may not be dominated by CoroBegin. 1753 Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front()); 1754 SmallVector<Instruction *, 4> UsersToUpdate; 1755 for (const auto &A : FrameData.Allocas) { 1756 AllocaInst *Alloca = A.Alloca; 1757 UsersToUpdate.clear(); 1758 for (User *U : Alloca->users()) { 1759 auto *I = cast<Instruction>(U); 1760 if (DT.dominates(CB, I)) 1761 UsersToUpdate.push_back(I); 1762 } 1763 if (UsersToUpdate.empty()) 1764 continue; 1765 auto *G = GetFramePointer(Alloca); 1766 G->setName(Alloca->getName() + Twine(".reload.addr")); 1767 1768 SmallVector<DbgVariableIntrinsic *, 4> DIs; 1769 findDbgUsers(DIs, Alloca); 1770 for (auto *DVI : DIs) 1771 DVI->replaceUsesOfWith(Alloca, G); 1772 1773 for (Instruction *I : UsersToUpdate) 1774 I->replaceUsesOfWith(Alloca, G); 1775 } 1776 Builder.SetInsertPoint(Shape.getInsertPtAfterFramePtr()); 1777 for (const auto &A : FrameData.Allocas) { 1778 AllocaInst *Alloca = A.Alloca; 1779 if (A.MayWriteBeforeCoroBegin) { 1780 // isEscaped really means potentially modified before CoroBegin. 1781 if (Alloca->isArrayAllocation()) 1782 report_fatal_error( 1783 "Coroutines cannot handle copying of array allocas yet"); 1784 1785 auto *G = GetFramePointer(Alloca); 1786 auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca); 1787 Builder.CreateStore(Value, G); 1788 } 1789 // For each alias to Alloca created before CoroBegin but used after 1790 // CoroBegin, we recreate them after CoroBegin by appplying the offset 1791 // to the pointer in the frame. 1792 for (const auto &Alias : A.Aliases) { 1793 auto *FramePtr = GetFramePointer(Alloca); 1794 auto *FramePtrRaw = 1795 Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C)); 1796 auto &Value = *Alias.second; 1797 auto ITy = IntegerType::get(C, Value.getBitWidth()); 1798 auto *AliasPtr = Builder.CreateGEP(Type::getInt8Ty(C), FramePtrRaw, 1799 ConstantInt::get(ITy, Value)); 1800 auto *AliasPtrTyped = 1801 Builder.CreateBitCast(AliasPtr, Alias.first->getType()); 1802 Alias.first->replaceUsesWithIf( 1803 AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); }); 1804 } 1805 } 1806 } 1807 1808 // Moves the values in the PHIs in SuccBB that correspong to PredBB into a new 1809 // PHI in InsertedBB. 1810 static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB, 1811 BasicBlock *InsertedBB, 1812 BasicBlock *PredBB, 1813 PHINode *UntilPHI = nullptr) { 1814 auto *PN = cast<PHINode>(&SuccBB->front()); 1815 do { 1816 int Index = PN->getBasicBlockIndex(InsertedBB); 1817 Value *V = PN->getIncomingValue(Index); 1818 PHINode *InputV = PHINode::Create( 1819 V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(), 1820 &InsertedBB->front()); 1821 InputV->addIncoming(V, PredBB); 1822 PN->setIncomingValue(Index, InputV); 1823 PN = dyn_cast<PHINode>(PN->getNextNode()); 1824 } while (PN != UntilPHI); 1825 } 1826 1827 // Rewrites the PHI Nodes in a cleanuppad. 1828 static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB, 1829 CleanupPadInst *CleanupPad) { 1830 // For every incoming edge to a CleanupPad we will create a new block holding 1831 // all incoming values in single-value PHI nodes. We will then create another 1832 // block to act as a dispather (as all unwind edges for related EH blocks 1833 // must be the same). 1834 // 1835 // cleanuppad: 1836 // %2 = phi i32[%0, %catchswitch], [%1, %catch.1] 1837 // %3 = cleanuppad within none [] 1838 // 1839 // It will create: 1840 // 1841 // cleanuppad.corodispatch 1842 // %2 = phi i8[0, %catchswitch], [1, %catch.1] 1843 // %3 = cleanuppad within none [] 1844 // switch i8 % 2, label %unreachable 1845 // [i8 0, label %cleanuppad.from.catchswitch 1846 // i8 1, label %cleanuppad.from.catch.1] 1847 // cleanuppad.from.catchswitch: 1848 // %4 = phi i32 [%0, %catchswitch] 1849 // br %label cleanuppad 1850 // cleanuppad.from.catch.1: 1851 // %6 = phi i32 [%1, %catch.1] 1852 // br %label cleanuppad 1853 // cleanuppad: 1854 // %8 = phi i32 [%4, %cleanuppad.from.catchswitch], 1855 // [%6, %cleanuppad.from.catch.1] 1856 1857 // Unreachable BB, in case switching on an invalid value in the dispatcher. 1858 auto *UnreachBB = BasicBlock::Create( 1859 CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent()); 1860 IRBuilder<> Builder(UnreachBB); 1861 Builder.CreateUnreachable(); 1862 1863 // Create a new cleanuppad which will be the dispatcher. 1864 auto *NewCleanupPadBB = 1865 BasicBlock::Create(CleanupPadBB->getContext(), 1866 CleanupPadBB->getName() + Twine(".corodispatch"), 1867 CleanupPadBB->getParent(), CleanupPadBB); 1868 Builder.SetInsertPoint(NewCleanupPadBB); 1869 auto *SwitchType = Builder.getInt8Ty(); 1870 auto *SetDispatchValuePN = 1871 Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB)); 1872 CleanupPad->removeFromParent(); 1873 CleanupPad->insertAfter(SetDispatchValuePN); 1874 auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB, 1875 pred_size(CleanupPadBB)); 1876 1877 int SwitchIndex = 0; 1878 SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB)); 1879 for (BasicBlock *Pred : Preds) { 1880 // Create a new cleanuppad and move the PHI values to there. 1881 auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(), 1882 CleanupPadBB->getName() + 1883 Twine(".from.") + Pred->getName(), 1884 CleanupPadBB->getParent(), CleanupPadBB); 1885 updatePhiNodes(CleanupPadBB, Pred, CaseBB); 1886 CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") + 1887 Pred->getName()); 1888 Builder.SetInsertPoint(CaseBB); 1889 Builder.CreateBr(CleanupPadBB); 1890 movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB); 1891 1892 // Update this Pred to the new unwind point. 1893 setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB); 1894 1895 // Setup the switch in the dispatcher. 1896 auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex); 1897 SetDispatchValuePN->addIncoming(SwitchConstant, Pred); 1898 SwitchOnDispatch->addCase(SwitchConstant, CaseBB); 1899 SwitchIndex++; 1900 } 1901 } 1902 1903 static void cleanupSinglePredPHIs(Function &F) { 1904 SmallVector<PHINode *, 32> Worklist; 1905 for (auto &BB : F) { 1906 for (auto &Phi : BB.phis()) { 1907 if (Phi.getNumIncomingValues() == 1) { 1908 Worklist.push_back(&Phi); 1909 } else 1910 break; 1911 } 1912 } 1913 while (!Worklist.empty()) { 1914 auto *Phi = Worklist.pop_back_val(); 1915 auto *OriginalValue = Phi->getIncomingValue(0); 1916 Phi->replaceAllUsesWith(OriginalValue); 1917 } 1918 } 1919 1920 static void rewritePHIs(BasicBlock &BB) { 1921 // For every incoming edge we will create a block holding all 1922 // incoming values in a single PHI nodes. 1923 // 1924 // loop: 1925 // %n.val = phi i32[%n, %entry], [%inc, %loop] 1926 // 1927 // It will create: 1928 // 1929 // loop.from.entry: 1930 // %n.loop.pre = phi i32 [%n, %entry] 1931 // br %label loop 1932 // loop.from.loop: 1933 // %inc.loop.pre = phi i32 [%inc, %loop] 1934 // br %label loop 1935 // 1936 // After this rewrite, further analysis will ignore any phi nodes with more 1937 // than one incoming edge. 1938 1939 // TODO: Simplify PHINodes in the basic block to remove duplicate 1940 // predecessors. 1941 1942 // Special case for CleanupPad: all EH blocks must have the same unwind edge 1943 // so we need to create an additional "dispatcher" block. 1944 if (auto *CleanupPad = 1945 dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) { 1946 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB)); 1947 for (BasicBlock *Pred : Preds) { 1948 if (CatchSwitchInst *CS = 1949 dyn_cast<CatchSwitchInst>(Pred->getTerminator())) { 1950 // CleanupPad with a CatchSwitch predecessor: therefore this is an 1951 // unwind destination that needs to be handle specially. 1952 assert(CS->getUnwindDest() == &BB); 1953 (void)CS; 1954 rewritePHIsForCleanupPad(&BB, CleanupPad); 1955 return; 1956 } 1957 } 1958 } 1959 1960 LandingPadInst *LandingPad = nullptr; 1961 PHINode *ReplPHI = nullptr; 1962 if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) { 1963 // ehAwareSplitEdge will clone the LandingPad in all the edge blocks. 1964 // We replace the original landing pad with a PHINode that will collect the 1965 // results from all of them. 1966 ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad); 1967 ReplPHI->takeName(LandingPad); 1968 LandingPad->replaceAllUsesWith(ReplPHI); 1969 // We will erase the original landing pad at the end of this function after 1970 // ehAwareSplitEdge cloned it in the transition blocks. 1971 } 1972 1973 SmallVector<BasicBlock *, 8> Preds(predecessors(&BB)); 1974 for (BasicBlock *Pred : Preds) { 1975 auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI); 1976 IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName()); 1977 1978 // Stop the moving of values at ReplPHI, as this is either null or the PHI 1979 // that replaced the landing pad. 1980 movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI); 1981 } 1982 1983 if (LandingPad) { 1984 // Calls to ehAwareSplitEdge function cloned the original lading pad. 1985 // No longer need it. 1986 LandingPad->eraseFromParent(); 1987 } 1988 } 1989 1990 static void rewritePHIs(Function &F) { 1991 SmallVector<BasicBlock *, 8> WorkList; 1992 1993 for (BasicBlock &BB : F) 1994 if (auto *PN = dyn_cast<PHINode>(&BB.front())) 1995 if (PN->getNumIncomingValues() > 1) 1996 WorkList.push_back(&BB); 1997 1998 for (BasicBlock *BB : WorkList) 1999 rewritePHIs(*BB); 2000 } 2001 2002 // Check for instructions that we can recreate on resume as opposed to spill 2003 // the result into a coroutine frame. 2004 static bool materializable(Instruction &V) { 2005 return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) || 2006 isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V); 2007 } 2008 2009 // Check for structural coroutine intrinsics that should not be spilled into 2010 // the coroutine frame. 2011 static bool isCoroutineStructureIntrinsic(Instruction &I) { 2012 return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) || 2013 isa<CoroSuspendInst>(&I); 2014 } 2015 2016 // For every use of the value that is across suspend point, recreate that value 2017 // after a suspend point. 2018 static void rewriteMaterializableInstructions(IRBuilder<> &IRB, 2019 const SpillInfo &Spills) { 2020 for (const auto &E : Spills) { 2021 Value *Def = E.first; 2022 BasicBlock *CurrentBlock = nullptr; 2023 Instruction *CurrentMaterialization = nullptr; 2024 for (Instruction *U : E.second) { 2025 // If we have not seen this block, materialize the value. 2026 if (CurrentBlock != U->getParent()) { 2027 2028 bool IsInCoroSuspendBlock = isa<AnyCoroSuspendInst>(U); 2029 CurrentBlock = U->getParent(); 2030 auto *InsertBlock = IsInCoroSuspendBlock 2031 ? CurrentBlock->getSinglePredecessor() 2032 : CurrentBlock; 2033 CurrentMaterialization = cast<Instruction>(Def)->clone(); 2034 CurrentMaterialization->setName(Def->getName()); 2035 CurrentMaterialization->insertBefore( 2036 IsInCoroSuspendBlock ? InsertBlock->getTerminator() 2037 : &*InsertBlock->getFirstInsertionPt()); 2038 } 2039 if (auto *PN = dyn_cast<PHINode>(U)) { 2040 assert(PN->getNumIncomingValues() == 1 && 2041 "unexpected number of incoming " 2042 "values in the PHINode"); 2043 PN->replaceAllUsesWith(CurrentMaterialization); 2044 PN->eraseFromParent(); 2045 continue; 2046 } 2047 // Replace all uses of Def in the current instruction with the 2048 // CurrentMaterialization for the block. 2049 U->replaceUsesOfWith(Def, CurrentMaterialization); 2050 } 2051 } 2052 } 2053 2054 // Splits the block at a particular instruction unless it is the first 2055 // instruction in the block with a single predecessor. 2056 static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) { 2057 auto *BB = I->getParent(); 2058 if (&BB->front() == I) { 2059 if (BB->getSinglePredecessor()) { 2060 BB->setName(Name); 2061 return BB; 2062 } 2063 } 2064 return BB->splitBasicBlock(I, Name); 2065 } 2066 2067 // Split above and below a particular instruction so that it 2068 // will be all alone by itself in a block. 2069 static void splitAround(Instruction *I, const Twine &Name) { 2070 splitBlockIfNotFirst(I, Name); 2071 splitBlockIfNotFirst(I->getNextNode(), "After" + Name); 2072 } 2073 2074 static bool isSuspendBlock(BasicBlock *BB) { 2075 return isa<AnyCoroSuspendInst>(BB->front()); 2076 } 2077 2078 typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet; 2079 2080 /// Does control flow starting at the given block ever reach a suspend 2081 /// instruction before reaching a block in VisitedOrFreeBBs? 2082 static bool isSuspendReachableFrom(BasicBlock *From, 2083 VisitedBlocksSet &VisitedOrFreeBBs) { 2084 // Eagerly try to add this block to the visited set. If it's already 2085 // there, stop recursing; this path doesn't reach a suspend before 2086 // either looping or reaching a freeing block. 2087 if (!VisitedOrFreeBBs.insert(From).second) 2088 return false; 2089 2090 // We assume that we'll already have split suspends into their own blocks. 2091 if (isSuspendBlock(From)) 2092 return true; 2093 2094 // Recurse on the successors. 2095 for (auto Succ : successors(From)) { 2096 if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs)) 2097 return true; 2098 } 2099 2100 return false; 2101 } 2102 2103 /// Is the given alloca "local", i.e. bounded in lifetime to not cross a 2104 /// suspend point? 2105 static bool isLocalAlloca(CoroAllocaAllocInst *AI) { 2106 // Seed the visited set with all the basic blocks containing a free 2107 // so that we won't pass them up. 2108 VisitedBlocksSet VisitedOrFreeBBs; 2109 for (auto User : AI->users()) { 2110 if (auto FI = dyn_cast<CoroAllocaFreeInst>(User)) 2111 VisitedOrFreeBBs.insert(FI->getParent()); 2112 } 2113 2114 return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs); 2115 } 2116 2117 /// After we split the coroutine, will the given basic block be along 2118 /// an obvious exit path for the resumption function? 2119 static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB, 2120 unsigned depth = 3) { 2121 // If we've bottomed out our depth count, stop searching and assume 2122 // that the path might loop back. 2123 if (depth == 0) return false; 2124 2125 // If this is a suspend block, we're about to exit the resumption function. 2126 if (isSuspendBlock(BB)) return true; 2127 2128 // Recurse into the successors. 2129 for (auto Succ : successors(BB)) { 2130 if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1)) 2131 return false; 2132 } 2133 2134 // If none of the successors leads back in a loop, we're on an exit/abort. 2135 return true; 2136 } 2137 2138 static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) { 2139 // Look for a free that isn't sufficiently obviously followed by 2140 // either a suspend or a termination, i.e. something that will leave 2141 // the coro resumption frame. 2142 for (auto U : AI->users()) { 2143 auto FI = dyn_cast<CoroAllocaFreeInst>(U); 2144 if (!FI) continue; 2145 2146 if (!willLeaveFunctionImmediatelyAfter(FI->getParent())) 2147 return true; 2148 } 2149 2150 // If we never found one, we don't need a stack save. 2151 return false; 2152 } 2153 2154 /// Turn each of the given local allocas into a normal (dynamic) alloca 2155 /// instruction. 2156 static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas, 2157 SmallVectorImpl<Instruction*> &DeadInsts) { 2158 for (auto AI : LocalAllocas) { 2159 auto M = AI->getModule(); 2160 IRBuilder<> Builder(AI); 2161 2162 // Save the stack depth. Try to avoid doing this if the stackrestore 2163 // is going to immediately precede a return or something. 2164 Value *StackSave = nullptr; 2165 if (localAllocaNeedsStackSave(AI)) 2166 StackSave = Builder.CreateCall( 2167 Intrinsic::getDeclaration(M, Intrinsic::stacksave)); 2168 2169 // Allocate memory. 2170 auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize()); 2171 Alloca->setAlignment(AI->getAlignment()); 2172 2173 for (auto U : AI->users()) { 2174 // Replace gets with the allocation. 2175 if (isa<CoroAllocaGetInst>(U)) { 2176 U->replaceAllUsesWith(Alloca); 2177 2178 // Replace frees with stackrestores. This is safe because 2179 // alloca.alloc is required to obey a stack discipline, although we 2180 // don't enforce that structurally. 2181 } else { 2182 auto FI = cast<CoroAllocaFreeInst>(U); 2183 if (StackSave) { 2184 Builder.SetInsertPoint(FI); 2185 Builder.CreateCall( 2186 Intrinsic::getDeclaration(M, Intrinsic::stackrestore), 2187 StackSave); 2188 } 2189 } 2190 DeadInsts.push_back(cast<Instruction>(U)); 2191 } 2192 2193 DeadInsts.push_back(AI); 2194 } 2195 } 2196 2197 /// Turn the given coro.alloca.alloc call into a dynamic allocation. 2198 /// This happens during the all-instructions iteration, so it must not 2199 /// delete the call. 2200 static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI, 2201 coro::Shape &Shape, 2202 SmallVectorImpl<Instruction*> &DeadInsts) { 2203 IRBuilder<> Builder(AI); 2204 auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr); 2205 2206 for (User *U : AI->users()) { 2207 if (isa<CoroAllocaGetInst>(U)) { 2208 U->replaceAllUsesWith(Alloc); 2209 } else { 2210 auto FI = cast<CoroAllocaFreeInst>(U); 2211 Builder.SetInsertPoint(FI); 2212 Shape.emitDealloc(Builder, Alloc, nullptr); 2213 } 2214 DeadInsts.push_back(cast<Instruction>(U)); 2215 } 2216 2217 // Push this on last so that it gets deleted after all the others. 2218 DeadInsts.push_back(AI); 2219 2220 // Return the new allocation value so that we can check for needed spills. 2221 return cast<Instruction>(Alloc); 2222 } 2223 2224 /// Get the current swifterror value. 2225 static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy, 2226 coro::Shape &Shape) { 2227 // Make a fake function pointer as a sort of intrinsic. 2228 auto FnTy = FunctionType::get(ValueTy, {}, false); 2229 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 2230 2231 auto Call = Builder.CreateCall(FnTy, Fn, {}); 2232 Shape.SwiftErrorOps.push_back(Call); 2233 2234 return Call; 2235 } 2236 2237 /// Set the given value as the current swifterror value. 2238 /// 2239 /// Returns a slot that can be used as a swifterror slot. 2240 static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V, 2241 coro::Shape &Shape) { 2242 // Make a fake function pointer as a sort of intrinsic. 2243 auto FnTy = FunctionType::get(V->getType()->getPointerTo(), 2244 {V->getType()}, false); 2245 auto Fn = ConstantPointerNull::get(FnTy->getPointerTo()); 2246 2247 auto Call = Builder.CreateCall(FnTy, Fn, { V }); 2248 Shape.SwiftErrorOps.push_back(Call); 2249 2250 return Call; 2251 } 2252 2253 /// Set the swifterror value from the given alloca before a call, 2254 /// then put in back in the alloca afterwards. 2255 /// 2256 /// Returns an address that will stand in for the swifterror slot 2257 /// until splitting. 2258 static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call, 2259 AllocaInst *Alloca, 2260 coro::Shape &Shape) { 2261 auto ValueTy = Alloca->getAllocatedType(); 2262 IRBuilder<> Builder(Call); 2263 2264 // Load the current value from the alloca and set it as the 2265 // swifterror value. 2266 auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca); 2267 auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape); 2268 2269 // Move to after the call. Since swifterror only has a guaranteed 2270 // value on normal exits, we can ignore implicit and explicit unwind 2271 // edges. 2272 if (isa<CallInst>(Call)) { 2273 Builder.SetInsertPoint(Call->getNextNode()); 2274 } else { 2275 auto Invoke = cast<InvokeInst>(Call); 2276 Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg()); 2277 } 2278 2279 // Get the current swifterror value and store it to the alloca. 2280 auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape); 2281 Builder.CreateStore(ValueAfterCall, Alloca); 2282 2283 return Addr; 2284 } 2285 2286 /// Eliminate a formerly-swifterror alloca by inserting the get/set 2287 /// intrinsics and attempting to MemToReg the alloca away. 2288 static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca, 2289 coro::Shape &Shape) { 2290 for (Use &Use : llvm::make_early_inc_range(Alloca->uses())) { 2291 // swifterror values can only be used in very specific ways. 2292 // We take advantage of that here. 2293 auto User = Use.getUser(); 2294 if (isa<LoadInst>(User) || isa<StoreInst>(User)) 2295 continue; 2296 2297 assert(isa<CallInst>(User) || isa<InvokeInst>(User)); 2298 auto Call = cast<Instruction>(User); 2299 2300 auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape); 2301 2302 // Use the returned slot address as the call argument. 2303 Use.set(Addr); 2304 } 2305 2306 // All the uses should be loads and stores now. 2307 assert(isAllocaPromotable(Alloca)); 2308 } 2309 2310 /// "Eliminate" a swifterror argument by reducing it to the alloca case 2311 /// and then loading and storing in the prologue and epilog. 2312 /// 2313 /// The argument keeps the swifterror flag. 2314 static void eliminateSwiftErrorArgument(Function &F, Argument &Arg, 2315 coro::Shape &Shape, 2316 SmallVectorImpl<AllocaInst*> &AllocasToPromote) { 2317 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg()); 2318 2319 auto ArgTy = cast<PointerType>(Arg.getType()); 2320 // swifterror arguments are required to have pointer-to-pointer type, 2321 // so create a pointer-typed alloca with opaque pointers. 2322 auto ValueTy = ArgTy->isOpaque() ? PointerType::getUnqual(F.getContext()) 2323 : ArgTy->getNonOpaquePointerElementType(); 2324 2325 // Reduce to the alloca case: 2326 2327 // Create an alloca and replace all uses of the arg with it. 2328 auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace()); 2329 Arg.replaceAllUsesWith(Alloca); 2330 2331 // Set an initial value in the alloca. swifterror is always null on entry. 2332 auto InitialValue = Constant::getNullValue(ValueTy); 2333 Builder.CreateStore(InitialValue, Alloca); 2334 2335 // Find all the suspends in the function and save and restore around them. 2336 for (auto Suspend : Shape.CoroSuspends) { 2337 (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape); 2338 } 2339 2340 // Find all the coro.ends in the function and restore the error value. 2341 for (auto End : Shape.CoroEnds) { 2342 Builder.SetInsertPoint(End); 2343 auto FinalValue = Builder.CreateLoad(ValueTy, Alloca); 2344 (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape); 2345 } 2346 2347 // Now we can use the alloca logic. 2348 AllocasToPromote.push_back(Alloca); 2349 eliminateSwiftErrorAlloca(F, Alloca, Shape); 2350 } 2351 2352 /// Eliminate all problematic uses of swifterror arguments and allocas 2353 /// from the function. We'll fix them up later when splitting the function. 2354 static void eliminateSwiftError(Function &F, coro::Shape &Shape) { 2355 SmallVector<AllocaInst*, 4> AllocasToPromote; 2356 2357 // Look for a swifterror argument. 2358 for (auto &Arg : F.args()) { 2359 if (!Arg.hasSwiftErrorAttr()) continue; 2360 2361 eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote); 2362 break; 2363 } 2364 2365 // Look for swifterror allocas. 2366 for (auto &Inst : F.getEntryBlock()) { 2367 auto Alloca = dyn_cast<AllocaInst>(&Inst); 2368 if (!Alloca || !Alloca->isSwiftError()) continue; 2369 2370 // Clear the swifterror flag. 2371 Alloca->setSwiftError(false); 2372 2373 AllocasToPromote.push_back(Alloca); 2374 eliminateSwiftErrorAlloca(F, Alloca, Shape); 2375 } 2376 2377 // If we have any allocas to promote, compute a dominator tree and 2378 // promote them en masse. 2379 if (!AllocasToPromote.empty()) { 2380 DominatorTree DT(F); 2381 PromoteMemToReg(AllocasToPromote, DT); 2382 } 2383 } 2384 2385 /// retcon and retcon.once conventions assume that all spill uses can be sunk 2386 /// after the coro.begin intrinsic. 2387 static void sinkSpillUsesAfterCoroBegin(Function &F, 2388 const FrameDataInfo &FrameData, 2389 CoroBeginInst *CoroBegin) { 2390 DominatorTree Dom(F); 2391 2392 SmallSetVector<Instruction *, 32> ToMove; 2393 SmallVector<Instruction *, 32> Worklist; 2394 2395 // Collect all users that precede coro.begin. 2396 for (auto *Def : FrameData.getAllDefs()) { 2397 for (User *U : Def->users()) { 2398 auto Inst = cast<Instruction>(U); 2399 if (Inst->getParent() != CoroBegin->getParent() || 2400 Dom.dominates(CoroBegin, Inst)) 2401 continue; 2402 if (ToMove.insert(Inst)) 2403 Worklist.push_back(Inst); 2404 } 2405 } 2406 // Recursively collect users before coro.begin. 2407 while (!Worklist.empty()) { 2408 auto *Def = Worklist.pop_back_val(); 2409 for (User *U : Def->users()) { 2410 auto Inst = cast<Instruction>(U); 2411 if (Dom.dominates(CoroBegin, Inst)) 2412 continue; 2413 if (ToMove.insert(Inst)) 2414 Worklist.push_back(Inst); 2415 } 2416 } 2417 2418 // Sort by dominance. 2419 SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end()); 2420 llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool { 2421 // If a dominates b it should preceed (<) b. 2422 return Dom.dominates(A, B); 2423 }); 2424 2425 Instruction *InsertPt = CoroBegin->getNextNode(); 2426 for (Instruction *Inst : InsertionList) 2427 Inst->moveBefore(InsertPt); 2428 } 2429 2430 /// For each local variable that all of its user are only used inside one of 2431 /// suspended region, we sink their lifetime.start markers to the place where 2432 /// after the suspend block. Doing so minimizes the lifetime of each variable, 2433 /// hence minimizing the amount of data we end up putting on the frame. 2434 static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape, 2435 SuspendCrossingInfo &Checker) { 2436 DominatorTree DT(F); 2437 2438 // Collect all possible basic blocks which may dominate all uses of allocas. 2439 SmallPtrSet<BasicBlock *, 4> DomSet; 2440 DomSet.insert(&F.getEntryBlock()); 2441 for (auto *CSI : Shape.CoroSuspends) { 2442 BasicBlock *SuspendBlock = CSI->getParent(); 2443 assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() && 2444 "should have split coro.suspend into its own block"); 2445 DomSet.insert(SuspendBlock->getSingleSuccessor()); 2446 } 2447 2448 for (Instruction &I : instructions(F)) { 2449 AllocaInst* AI = dyn_cast<AllocaInst>(&I); 2450 if (!AI) 2451 continue; 2452 2453 for (BasicBlock *DomBB : DomSet) { 2454 bool Valid = true; 2455 SmallVector<Instruction *, 1> Lifetimes; 2456 2457 auto isLifetimeStart = [](Instruction* I) { 2458 if (auto* II = dyn_cast<IntrinsicInst>(I)) 2459 return II->getIntrinsicID() == Intrinsic::lifetime_start; 2460 return false; 2461 }; 2462 2463 auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) { 2464 if (isLifetimeStart(U)) { 2465 Lifetimes.push_back(U); 2466 return true; 2467 } 2468 if (!U->hasOneUse() || U->stripPointerCasts() != AI) 2469 return false; 2470 if (isLifetimeStart(U->user_back())) { 2471 Lifetimes.push_back(U->user_back()); 2472 return true; 2473 } 2474 return false; 2475 }; 2476 2477 for (User *U : AI->users()) { 2478 Instruction *UI = cast<Instruction>(U); 2479 // For all users except lifetime.start markers, if they are all 2480 // dominated by one of the basic blocks and do not cross 2481 // suspend points as well, then there is no need to spill the 2482 // instruction. 2483 if (!DT.dominates(DomBB, UI->getParent()) || 2484 Checker.isDefinitionAcrossSuspend(DomBB, UI)) { 2485 // Skip lifetime.start, GEP and bitcast used by lifetime.start 2486 // markers. 2487 if (collectLifetimeStart(UI, AI)) 2488 continue; 2489 Valid = false; 2490 break; 2491 } 2492 } 2493 // Sink lifetime.start markers to dominate block when they are 2494 // only used outside the region. 2495 if (Valid && Lifetimes.size() != 0) { 2496 // May be AI itself, when the type of AI is i8* 2497 auto *NewBitCast = [&](AllocaInst *AI) -> Value* { 2498 if (isa<AllocaInst>(Lifetimes[0]->getOperand(1))) 2499 return AI; 2500 auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext()); 2501 return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "", 2502 DomBB->getTerminator()); 2503 }(AI); 2504 2505 auto *NewLifetime = Lifetimes[0]->clone(); 2506 NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast); 2507 NewLifetime->insertBefore(DomBB->getTerminator()); 2508 2509 // All the outsided lifetime.start markers are no longer necessary. 2510 for (Instruction *S : Lifetimes) 2511 S->eraseFromParent(); 2512 2513 break; 2514 } 2515 } 2516 } 2517 } 2518 2519 static void collectFrameAllocas(Function &F, coro::Shape &Shape, 2520 const SuspendCrossingInfo &Checker, 2521 SmallVectorImpl<AllocaInfo> &Allocas) { 2522 for (Instruction &I : instructions(F)) { 2523 auto *AI = dyn_cast<AllocaInst>(&I); 2524 if (!AI) 2525 continue; 2526 // The PromiseAlloca will be specially handled since it needs to be in a 2527 // fixed position in the frame. 2528 if (AI == Shape.SwitchLowering.PromiseAlloca) { 2529 continue; 2530 } 2531 DominatorTree DT(F); 2532 // The code that uses lifetime.start intrinsic does not work for functions 2533 // with loops without exit. Disable it on ABIs we know to generate such 2534 // code. 2535 bool ShouldUseLifetimeStartInfo = 2536 (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon && 2537 Shape.ABI != coro::ABI::RetconOnce); 2538 AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT, 2539 *Shape.CoroBegin, Checker, 2540 ShouldUseLifetimeStartInfo}; 2541 Visitor.visitPtr(*AI); 2542 if (!Visitor.getShouldLiveOnFrame()) 2543 continue; 2544 Allocas.emplace_back(AI, Visitor.getAliasesCopy(), 2545 Visitor.getMayWriteBeforeCoroBegin()); 2546 } 2547 } 2548 2549 void coro::salvageDebugInfo( 2550 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache, 2551 DbgVariableIntrinsic *DVI, bool OptimizeFrame) { 2552 Function *F = DVI->getFunction(); 2553 IRBuilder<> Builder(F->getContext()); 2554 auto InsertPt = F->getEntryBlock().getFirstInsertionPt(); 2555 while (isa<IntrinsicInst>(InsertPt)) 2556 ++InsertPt; 2557 Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt); 2558 DIExpression *Expr = DVI->getExpression(); 2559 // Follow the pointer arithmetic all the way to the incoming 2560 // function argument and convert into a DIExpression. 2561 bool SkipOutermostLoad = !isa<DbgValueInst>(DVI); 2562 Value *Storage = DVI->getVariableLocationOp(0); 2563 Value *OriginalStorage = Storage; 2564 2565 while (auto *Inst = dyn_cast_or_null<Instruction>(Storage)) { 2566 if (auto *LdInst = dyn_cast<LoadInst>(Inst)) { 2567 Storage = LdInst->getOperand(0); 2568 // FIXME: This is a heuristic that works around the fact that 2569 // LLVM IR debug intrinsics cannot yet distinguish between 2570 // memory and value locations: Because a dbg.declare(alloca) is 2571 // implicitly a memory location no DW_OP_deref operation for the 2572 // last direct load from an alloca is necessary. This condition 2573 // effectively drops the *last* DW_OP_deref in the expression. 2574 if (!SkipOutermostLoad) 2575 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore); 2576 } else if (auto *StInst = dyn_cast<StoreInst>(Inst)) { 2577 Storage = StInst->getOperand(0); 2578 } else { 2579 SmallVector<uint64_t, 16> Ops; 2580 SmallVector<Value *, 0> AdditionalValues; 2581 Value *Op = llvm::salvageDebugInfoImpl( 2582 *Inst, Expr ? Expr->getNumLocationOperands() : 0, Ops, 2583 AdditionalValues); 2584 if (!Op || !AdditionalValues.empty()) { 2585 // If salvaging failed or salvaging produced more than one location 2586 // operand, give up. 2587 break; 2588 } 2589 Storage = Op; 2590 Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, /*StackValue*/ false); 2591 } 2592 SkipOutermostLoad = false; 2593 } 2594 if (!Storage) 2595 return; 2596 2597 // Store a pointer to the coroutine frame object in an alloca so it 2598 // is available throughout the function when producing unoptimized 2599 // code. Extending the lifetime this way is correct because the 2600 // variable has been declared by a dbg.declare intrinsic. 2601 // 2602 // Avoid to create the alloca would be eliminated by optimization 2603 // passes and the corresponding dbg.declares would be invalid. 2604 if (!OptimizeFrame) 2605 if (auto *Arg = dyn_cast<llvm::Argument>(Storage)) { 2606 auto &Cached = DbgPtrAllocaCache[Storage]; 2607 if (!Cached) { 2608 Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr, 2609 Arg->getName() + ".debug"); 2610 Builder.CreateStore(Storage, Cached); 2611 } 2612 Storage = Cached; 2613 // FIXME: LLVM lacks nuanced semantics to differentiate between 2614 // memory and direct locations at the IR level. The backend will 2615 // turn a dbg.declare(alloca, ..., DIExpression()) into a memory 2616 // location. Thus, if there are deref and offset operations in the 2617 // expression, we need to add a DW_OP_deref at the *start* of the 2618 // expression to first load the contents of the alloca before 2619 // adjusting it with the expression. 2620 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore); 2621 } 2622 2623 DVI->replaceVariableLocationOp(OriginalStorage, Storage); 2624 DVI->setExpression(Expr); 2625 // We only hoist dbg.declare today since it doesn't make sense to hoist 2626 // dbg.value or dbg.addr since they do not have the same function wide 2627 // guarantees that dbg.declare does. 2628 if (!isa<DbgValueInst>(DVI) && !isa<DbgAddrIntrinsic>(DVI)) { 2629 if (auto *II = dyn_cast<InvokeInst>(Storage)) 2630 DVI->moveBefore(II->getNormalDest()->getFirstNonPHI()); 2631 else if (auto *CBI = dyn_cast<CallBrInst>(Storage)) 2632 DVI->moveBefore(CBI->getDefaultDest()->getFirstNonPHI()); 2633 else if (auto *InsertPt = dyn_cast<Instruction>(Storage)) { 2634 assert(!InsertPt->isTerminator() && 2635 "Unimaged terminator that could return a storage."); 2636 DVI->moveAfter(InsertPt); 2637 } else if (isa<Argument>(Storage)) 2638 DVI->moveAfter(F->getEntryBlock().getFirstNonPHI()); 2639 } 2640 } 2641 2642 void coro::buildCoroutineFrame(Function &F, Shape &Shape) { 2643 // Don't eliminate swifterror in async functions that won't be split. 2644 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty()) 2645 eliminateSwiftError(F, Shape); 2646 2647 if (Shape.ABI == coro::ABI::Switch && 2648 Shape.SwitchLowering.PromiseAlloca) { 2649 Shape.getSwitchCoroId()->clearPromise(); 2650 } 2651 2652 // Make sure that all coro.save, coro.suspend and the fallthrough coro.end 2653 // intrinsics are in their own blocks to simplify the logic of building up 2654 // SuspendCrossing data. 2655 for (auto *CSI : Shape.CoroSuspends) { 2656 if (auto *Save = CSI->getCoroSave()) 2657 splitAround(Save, "CoroSave"); 2658 splitAround(CSI, "CoroSuspend"); 2659 } 2660 2661 // Put CoroEnds into their own blocks. 2662 for (AnyCoroEndInst *CE : Shape.CoroEnds) { 2663 splitAround(CE, "CoroEnd"); 2664 2665 // Emit the musttail call function in a new block before the CoroEnd. 2666 // We do this here so that the right suspend crossing info is computed for 2667 // the uses of the musttail call function call. (Arguments to the coro.end 2668 // instructions would be ignored) 2669 if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) { 2670 auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction(); 2671 if (!MustTailCallFn) 2672 continue; 2673 IRBuilder<> Builder(AsyncEnd); 2674 SmallVector<Value *, 8> Args(AsyncEnd->args()); 2675 auto Arguments = ArrayRef<Value *>(Args).drop_front(3); 2676 auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn, 2677 Arguments, Builder); 2678 splitAround(Call, "MustTailCall.Before.CoroEnd"); 2679 } 2680 } 2681 2682 // Later code makes structural assumptions about single predecessors phis e.g 2683 // that they are not live accross a suspend point. 2684 cleanupSinglePredPHIs(F); 2685 2686 // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will 2687 // never has its definition separated from the PHI by the suspend point. 2688 rewritePHIs(F); 2689 2690 // Build suspend crossing info. 2691 SuspendCrossingInfo Checker(F, Shape); 2692 2693 IRBuilder<> Builder(F.getContext()); 2694 FrameDataInfo FrameData; 2695 SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas; 2696 SmallVector<Instruction*, 4> DeadInstructions; 2697 2698 { 2699 SpillInfo Spills; 2700 for (int Repeat = 0; Repeat < 4; ++Repeat) { 2701 // See if there are materializable instructions across suspend points. 2702 for (Instruction &I : instructions(F)) 2703 if (materializable(I)) { 2704 for (User *U : I.users()) 2705 if (Checker.isDefinitionAcrossSuspend(I, U)) 2706 Spills[&I].push_back(cast<Instruction>(U)); 2707 } 2708 2709 if (Spills.empty()) 2710 break; 2711 2712 // Rewrite materializable instructions to be materialized at the use 2713 // point. 2714 LLVM_DEBUG(dumpSpills("Materializations", Spills)); 2715 rewriteMaterializableInstructions(Builder, Spills); 2716 Spills.clear(); 2717 } 2718 } 2719 2720 if (Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon && 2721 Shape.ABI != coro::ABI::RetconOnce) 2722 sinkLifetimeStartMarkers(F, Shape, Checker); 2723 2724 if (Shape.ABI != coro::ABI::Async || !Shape.CoroSuspends.empty()) 2725 collectFrameAllocas(F, Shape, Checker, FrameData.Allocas); 2726 LLVM_DEBUG(dumpAllocas(FrameData.Allocas)); 2727 2728 // Collect the spills for arguments and other not-materializable values. 2729 for (Argument &A : F.args()) 2730 for (User *U : A.users()) 2731 if (Checker.isDefinitionAcrossSuspend(A, U)) 2732 FrameData.Spills[&A].push_back(cast<Instruction>(U)); 2733 2734 for (Instruction &I : instructions(F)) { 2735 // Values returned from coroutine structure intrinsics should not be part 2736 // of the Coroutine Frame. 2737 if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin) 2738 continue; 2739 2740 // The Coroutine Promise always included into coroutine frame, no need to 2741 // check for suspend crossing. 2742 if (Shape.ABI == coro::ABI::Switch && 2743 Shape.SwitchLowering.PromiseAlloca == &I) 2744 continue; 2745 2746 // Handle alloca.alloc specially here. 2747 if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) { 2748 // Check whether the alloca's lifetime is bounded by suspend points. 2749 if (isLocalAlloca(AI)) { 2750 LocalAllocas.push_back(AI); 2751 continue; 2752 } 2753 2754 // If not, do a quick rewrite of the alloca and then add spills of 2755 // the rewritten value. The rewrite doesn't invalidate anything in 2756 // Spills because the other alloca intrinsics have no other operands 2757 // besides AI, and it doesn't invalidate the iteration because we delay 2758 // erasing AI. 2759 auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions); 2760 2761 for (User *U : Alloc->users()) { 2762 if (Checker.isDefinitionAcrossSuspend(*Alloc, U)) 2763 FrameData.Spills[Alloc].push_back(cast<Instruction>(U)); 2764 } 2765 continue; 2766 } 2767 2768 // Ignore alloca.get; we process this as part of coro.alloca.alloc. 2769 if (isa<CoroAllocaGetInst>(I)) 2770 continue; 2771 2772 if (isa<AllocaInst>(I)) 2773 continue; 2774 2775 for (User *U : I.users()) 2776 if (Checker.isDefinitionAcrossSuspend(I, U)) { 2777 // We cannot spill a token. 2778 if (I.getType()->isTokenTy()) 2779 report_fatal_error( 2780 "token definition is separated from the use by a suspend point"); 2781 FrameData.Spills[&I].push_back(cast<Instruction>(U)); 2782 } 2783 } 2784 2785 // We don't want the layout of coroutine frame to be affected 2786 // by debug information. So we only choose to salvage DbgValueInst for 2787 // whose value is already in the frame. 2788 // We would handle the dbg.values for allocas specially 2789 for (auto &Iter : FrameData.Spills) { 2790 auto *V = Iter.first; 2791 SmallVector<DbgValueInst *, 16> DVIs; 2792 findDbgValues(DVIs, V); 2793 for (DbgValueInst *DVI : DVIs) 2794 if (Checker.isDefinitionAcrossSuspend(*V, DVI)) 2795 FrameData.Spills[V].push_back(DVI); 2796 } 2797 2798 LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills)); 2799 if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 2800 Shape.ABI == coro::ABI::Async) 2801 sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin); 2802 Shape.FrameTy = buildFrameType(F, Shape, FrameData); 2803 createFramePtr(Shape); 2804 // For now, this works for C++ programs only. 2805 buildFrameDebugInfo(F, Shape, FrameData); 2806 insertSpills(FrameData, Shape); 2807 lowerLocalAllocas(LocalAllocas, DeadInstructions); 2808 2809 for (auto I : DeadInstructions) 2810 I->eraseFromParent(); 2811 } 2812