1 //===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This pass builds the coroutine frame and outlines resume and destroy parts 9 // of the coroutine into separate functions. 10 // 11 // We present a coroutine to an LLVM as an ordinary function with suspension 12 // points marked up with intrinsics. We let the optimizer party on the coroutine 13 // as a single function for as long as possible. Shortly before the coroutine is 14 // eligible to be inlined into its callers, we split up the coroutine into parts 15 // corresponding to an initial, resume and destroy invocations of the coroutine, 16 // add them to the current SCC and restart the IPO pipeline to optimize the 17 // coroutine subfunctions we extracted before proceeding to the caller of the 18 // coroutine. 19 //===----------------------------------------------------------------------===// 20 21 #include "llvm/Transforms/Coroutines/CoroSplit.h" 22 #include "CoroInstr.h" 23 #include "CoroInternal.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/SmallPtrSet.h" 26 #include "llvm/ADT/SmallVector.h" 27 #include "llvm/ADT/StringRef.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/Analysis/CallGraph.h" 30 #include "llvm/Analysis/CallGraphSCCPass.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/BasicBlock.h" 34 #include "llvm/IR/CFG.h" 35 #include "llvm/IR/CallingConv.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/DerivedTypes.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/GlobalValue.h" 41 #include "llvm/IR/GlobalVariable.h" 42 #include "llvm/IR/IRBuilder.h" 43 #include "llvm/IR/InstIterator.h" 44 #include "llvm/IR/InstrTypes.h" 45 #include "llvm/IR/Instruction.h" 46 #include "llvm/IR/Instructions.h" 47 #include "llvm/IR/IntrinsicInst.h" 48 #include "llvm/IR/LLVMContext.h" 49 #include "llvm/IR/LegacyPassManager.h" 50 #include "llvm/IR/Module.h" 51 #include "llvm/IR/Type.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/IR/Verifier.h" 54 #include "llvm/InitializePasses.h" 55 #include "llvm/Pass.h" 56 #include "llvm/Support/Casting.h" 57 #include "llvm/Support/Debug.h" 58 #include "llvm/Support/PrettyStackTrace.h" 59 #include "llvm/Support/raw_ostream.h" 60 #include "llvm/Transforms/Scalar.h" 61 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 62 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 63 #include "llvm/Transforms/Utils/Cloning.h" 64 #include "llvm/Transforms/Utils/Local.h" 65 #include "llvm/Transforms/Utils/ValueMapper.h" 66 #include <cassert> 67 #include <cstddef> 68 #include <cstdint> 69 #include <initializer_list> 70 #include <iterator> 71 72 using namespace llvm; 73 74 #define DEBUG_TYPE "coro-split" 75 76 namespace { 77 78 /// A little helper class for building 79 class CoroCloner { 80 public: 81 enum class Kind { 82 /// The shared resume function for a switch lowering. 83 SwitchResume, 84 85 /// The shared unwind function for a switch lowering. 86 SwitchUnwind, 87 88 /// The shared cleanup function for a switch lowering. 89 SwitchCleanup, 90 91 /// An individual continuation function. 92 Continuation, 93 94 /// An async resume function. 95 Async, 96 }; 97 98 private: 99 Function &OrigF; 100 Function *NewF; 101 const Twine &Suffix; 102 coro::Shape &Shape; 103 Kind FKind; 104 ValueToValueMapTy VMap; 105 IRBuilder<> Builder; 106 Value *NewFramePtr = nullptr; 107 Value *SwiftErrorSlot = nullptr; 108 109 /// The active suspend instruction; meaningful only for continuation and async 110 /// ABIs. 111 AnyCoroSuspendInst *ActiveSuspend = nullptr; 112 113 public: 114 /// Create a cloner for a switch lowering. 115 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, 116 Kind FKind) 117 : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape), 118 FKind(FKind), Builder(OrigF.getContext()) { 119 assert(Shape.ABI == coro::ABI::Switch); 120 } 121 122 /// Create a cloner for a continuation lowering. 123 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, 124 Function *NewF, AnyCoroSuspendInst *ActiveSuspend) 125 : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape), 126 FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation), 127 Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) { 128 assert(Shape.ABI == coro::ABI::Retcon || 129 Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async); 130 assert(NewF && "need existing function for continuation"); 131 assert(ActiveSuspend && "need active suspend point for continuation"); 132 } 133 134 Function *getFunction() const { 135 assert(NewF != nullptr && "declaration not yet set"); 136 return NewF; 137 } 138 139 void create(); 140 141 private: 142 bool isSwitchDestroyFunction() { 143 switch (FKind) { 144 case Kind::Async: 145 case Kind::Continuation: 146 case Kind::SwitchResume: 147 return false; 148 case Kind::SwitchUnwind: 149 case Kind::SwitchCleanup: 150 return true; 151 } 152 llvm_unreachable("Unknown CoroCloner::Kind enum"); 153 } 154 155 void createDeclaration(); 156 void replaceEntryBlock(); 157 Value *deriveNewFramePointer(); 158 void replaceRetconOrAsyncSuspendUses(); 159 void replaceCoroSuspends(); 160 void replaceCoroEnds(); 161 void replaceSwiftErrorOps(); 162 void handleFinalSuspend(); 163 void maybeFreeContinuationStorage(); 164 }; 165 166 } // end anonymous namespace 167 168 static void maybeFreeRetconStorage(IRBuilder<> &Builder, 169 const coro::Shape &Shape, Value *FramePtr, 170 CallGraph *CG) { 171 assert(Shape.ABI == coro::ABI::Retcon || 172 Shape.ABI == coro::ABI::RetconOnce); 173 if (Shape.RetconLowering.IsFrameInlineInStorage) 174 return; 175 176 Shape.emitDealloc(Builder, FramePtr, CG); 177 } 178 179 /// Replace a non-unwind call to llvm.coro.end. 180 static void replaceFallthroughCoroEnd(CoroEndInst *End, 181 const coro::Shape &Shape, Value *FramePtr, 182 bool InResume, CallGraph *CG) { 183 // Start inserting right before the coro.end. 184 IRBuilder<> Builder(End); 185 186 // Create the return instruction. 187 switch (Shape.ABI) { 188 // The cloned functions in switch-lowering always return void. 189 case coro::ABI::Switch: 190 // coro.end doesn't immediately end the coroutine in the main function 191 // in this lowering, because we need to deallocate the coroutine. 192 if (!InResume) 193 return; 194 Builder.CreateRetVoid(); 195 break; 196 197 // In async lowering this returns. 198 case coro::ABI::Async: 199 Builder.CreateRetVoid(); 200 break; 201 202 // In unique continuation lowering, the continuations always return void. 203 // But we may have implicitly allocated storage. 204 case coro::ABI::RetconOnce: 205 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 206 Builder.CreateRetVoid(); 207 break; 208 209 // In non-unique continuation lowering, we signal completion by returning 210 // a null continuation. 211 case coro::ABI::Retcon: { 212 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 213 auto RetTy = Shape.getResumeFunctionType()->getReturnType(); 214 auto RetStructTy = dyn_cast<StructType>(RetTy); 215 PointerType *ContinuationTy = 216 cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy); 217 218 Value *ReturnValue = ConstantPointerNull::get(ContinuationTy); 219 if (RetStructTy) { 220 ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy), 221 ReturnValue, 0); 222 } 223 Builder.CreateRet(ReturnValue); 224 break; 225 } 226 } 227 228 // Remove the rest of the block, by splitting it into an unreachable block. 229 auto *BB = End->getParent(); 230 BB->splitBasicBlock(End); 231 BB->getTerminator()->eraseFromParent(); 232 } 233 234 /// Replace an unwind call to llvm.coro.end. 235 static void replaceUnwindCoroEnd(CoroEndInst *End, const coro::Shape &Shape, 236 Value *FramePtr, bool InResume, CallGraph *CG){ 237 IRBuilder<> Builder(End); 238 239 switch (Shape.ABI) { 240 // In switch-lowering, this does nothing in the main function. 241 case coro::ABI::Switch: 242 if (!InResume) 243 return; 244 break; 245 // In async lowering this does nothing. 246 case coro::ABI::Async: 247 break; 248 // In continuation-lowering, this frees the continuation storage. 249 case coro::ABI::Retcon: 250 case coro::ABI::RetconOnce: 251 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 252 break; 253 } 254 255 // If coro.end has an associated bundle, add cleanupret instruction. 256 if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) { 257 auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]); 258 auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr); 259 End->getParent()->splitBasicBlock(End); 260 CleanupRet->getParent()->getTerminator()->eraseFromParent(); 261 } 262 } 263 264 static void replaceCoroEnd(CoroEndInst *End, const coro::Shape &Shape, 265 Value *FramePtr, bool InResume, CallGraph *CG) { 266 if (End->isUnwind()) 267 replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG); 268 else 269 replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG); 270 271 auto &Context = End->getContext(); 272 End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context) 273 : ConstantInt::getFalse(Context)); 274 End->eraseFromParent(); 275 } 276 277 // Create an entry block for a resume function with a switch that will jump to 278 // suspend points. 279 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) { 280 assert(Shape.ABI == coro::ABI::Switch); 281 LLVMContext &C = F.getContext(); 282 283 // resume.entry: 284 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0, 285 // i32 2 286 // % index = load i32, i32* %index.addr 287 // switch i32 %index, label %unreachable [ 288 // i32 0, label %resume.0 289 // i32 1, label %resume.1 290 // ... 291 // ] 292 293 auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F); 294 auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F); 295 296 IRBuilder<> Builder(NewEntry); 297 auto *FramePtr = Shape.FramePtr; 298 auto *FrameTy = Shape.FrameTy; 299 auto *GepIndex = Builder.CreateStructGEP( 300 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr"); 301 auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index"); 302 auto *Switch = 303 Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size()); 304 Shape.SwitchLowering.ResumeSwitch = Switch; 305 306 size_t SuspendIndex = 0; 307 for (auto *AnyS : Shape.CoroSuspends) { 308 auto *S = cast<CoroSuspendInst>(AnyS); 309 ConstantInt *IndexVal = Shape.getIndex(SuspendIndex); 310 311 // Replace CoroSave with a store to Index: 312 // %index.addr = getelementptr %f.frame... (index field number) 313 // store i32 0, i32* %index.addr1 314 auto *Save = S->getCoroSave(); 315 Builder.SetInsertPoint(Save); 316 if (S->isFinal()) { 317 // Final suspend point is represented by storing zero in ResumeFnAddr. 318 auto *GepIndex = Builder.CreateStructGEP(FrameTy, FramePtr, 319 coro::Shape::SwitchFieldIndex::Resume, 320 "ResumeFn.addr"); 321 auto *NullPtr = ConstantPointerNull::get(cast<PointerType>( 322 cast<PointerType>(GepIndex->getType())->getElementType())); 323 Builder.CreateStore(NullPtr, GepIndex); 324 } else { 325 auto *GepIndex = Builder.CreateStructGEP( 326 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr"); 327 Builder.CreateStore(IndexVal, GepIndex); 328 } 329 Save->replaceAllUsesWith(ConstantTokenNone::get(C)); 330 Save->eraseFromParent(); 331 332 // Split block before and after coro.suspend and add a jump from an entry 333 // switch: 334 // 335 // whateverBB: 336 // whatever 337 // %0 = call i8 @llvm.coro.suspend(token none, i1 false) 338 // switch i8 %0, label %suspend[i8 0, label %resume 339 // i8 1, label %cleanup] 340 // becomes: 341 // 342 // whateverBB: 343 // whatever 344 // br label %resume.0.landing 345 // 346 // resume.0: ; <--- jump from the switch in the resume.entry 347 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false) 348 // br label %resume.0.landing 349 // 350 // resume.0.landing: 351 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0] 352 // switch i8 % 1, label %suspend [i8 0, label %resume 353 // i8 1, label %cleanup] 354 355 auto *SuspendBB = S->getParent(); 356 auto *ResumeBB = 357 SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex)); 358 auto *LandingBB = ResumeBB->splitBasicBlock( 359 S->getNextNode(), ResumeBB->getName() + Twine(".landing")); 360 Switch->addCase(IndexVal, ResumeBB); 361 362 cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB); 363 auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front()); 364 S->replaceAllUsesWith(PN); 365 PN->addIncoming(Builder.getInt8(-1), SuspendBB); 366 PN->addIncoming(S, ResumeBB); 367 368 ++SuspendIndex; 369 } 370 371 Builder.SetInsertPoint(UnreachBB); 372 Builder.CreateUnreachable(); 373 374 Shape.SwitchLowering.ResumeEntryBlock = NewEntry; 375 } 376 377 378 // Rewrite final suspend point handling. We do not use suspend index to 379 // represent the final suspend point. Instead we zero-out ResumeFnAddr in the 380 // coroutine frame, since it is undefined behavior to resume a coroutine 381 // suspended at the final suspend point. Thus, in the resume function, we can 382 // simply remove the last case (when coro::Shape is built, the final suspend 383 // point (if present) is always the last element of CoroSuspends array). 384 // In the destroy function, we add a code sequence to check if ResumeFnAddress 385 // is Null, and if so, jump to the appropriate label to handle cleanup from the 386 // final suspend point. 387 void CoroCloner::handleFinalSuspend() { 388 assert(Shape.ABI == coro::ABI::Switch && 389 Shape.SwitchLowering.HasFinalSuspend); 390 auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]); 391 auto FinalCaseIt = std::prev(Switch->case_end()); 392 BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor(); 393 Switch->removeCase(FinalCaseIt); 394 if (isSwitchDestroyFunction()) { 395 BasicBlock *OldSwitchBB = Switch->getParent(); 396 auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch"); 397 Builder.SetInsertPoint(OldSwitchBB->getTerminator()); 398 auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr, 399 coro::Shape::SwitchFieldIndex::Resume, 400 "ResumeFn.addr"); 401 auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(), 402 GepIndex); 403 auto *Cond = Builder.CreateIsNull(Load); 404 Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB); 405 OldSwitchBB->getTerminator()->eraseFromParent(); 406 } 407 } 408 409 static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape, 410 const Twine &Suffix, 411 Module::iterator InsertBefore) { 412 Module *M = OrigF.getParent(); 413 auto *FnTy = Shape.getResumeFunctionType(); 414 415 Function *NewF = 416 Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage, 417 OrigF.getName() + Suffix); 418 NewF->addParamAttr(0, Attribute::NonNull); 419 420 // For the async lowering ABI we can't guarantee that the context argument is 421 // not access via a different pointer not based on the argument. 422 if (Shape.ABI != coro::ABI::Async) 423 NewF->addParamAttr(0, Attribute::NoAlias); 424 425 M->getFunctionList().insert(InsertBefore, NewF); 426 427 return NewF; 428 } 429 430 /// Replace uses of the active llvm.coro.suspend.retcon/async call with the 431 /// arguments to the continuation function. 432 /// 433 /// This assumes that the builder has a meaningful insertion point. 434 void CoroCloner::replaceRetconOrAsyncSuspendUses() { 435 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 436 Shape.ABI == coro::ABI::Async); 437 438 auto NewS = VMap[ActiveSuspend]; 439 if (NewS->use_empty()) return; 440 441 // Copy out all the continuation arguments after the buffer pointer into 442 // an easily-indexed data structure for convenience. 443 SmallVector<Value*, 8> Args; 444 // The async ABI includes all arguments -- including the first argument. 445 bool IsAsyncABI = Shape.ABI == coro::ABI::Async; 446 for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()), 447 E = NewF->arg_end(); 448 I != E; ++I) 449 Args.push_back(&*I); 450 451 // If the suspend returns a single scalar value, we can just do a simple 452 // replacement. 453 if (!isa<StructType>(NewS->getType())) { 454 assert(Args.size() == 1); 455 NewS->replaceAllUsesWith(Args.front()); 456 return; 457 } 458 459 // Try to peephole extracts of an aggregate return. 460 for (auto UI = NewS->use_begin(), UE = NewS->use_end(); UI != UE; ) { 461 auto EVI = dyn_cast<ExtractValueInst>((UI++)->getUser()); 462 if (!EVI || EVI->getNumIndices() != 1) 463 continue; 464 465 EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]); 466 EVI->eraseFromParent(); 467 } 468 469 // If we have no remaining uses, we're done. 470 if (NewS->use_empty()) return; 471 472 // Otherwise, we need to create an aggregate. 473 Value *Agg = UndefValue::get(NewS->getType()); 474 for (size_t I = 0, E = Args.size(); I != E; ++I) 475 Agg = Builder.CreateInsertValue(Agg, Args[I], I); 476 477 NewS->replaceAllUsesWith(Agg); 478 } 479 480 void CoroCloner::replaceCoroSuspends() { 481 Value *SuspendResult; 482 483 switch (Shape.ABI) { 484 // In switch lowering, replace coro.suspend with the appropriate value 485 // for the type of function we're extracting. 486 // Replacing coro.suspend with (0) will result in control flow proceeding to 487 // a resume label associated with a suspend point, replacing it with (1) will 488 // result in control flow proceeding to a cleanup label associated with this 489 // suspend point. 490 case coro::ABI::Switch: 491 SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0); 492 break; 493 494 // In async lowering there are no uses of the result. 495 case coro::ABI::Async: 496 return; 497 498 // In returned-continuation lowering, the arguments from earlier 499 // continuations are theoretically arbitrary, and they should have been 500 // spilled. 501 case coro::ABI::RetconOnce: 502 case coro::ABI::Retcon: 503 return; 504 } 505 506 for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) { 507 // The active suspend was handled earlier. 508 if (CS == ActiveSuspend) continue; 509 510 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]); 511 MappedCS->replaceAllUsesWith(SuspendResult); 512 MappedCS->eraseFromParent(); 513 } 514 } 515 516 void CoroCloner::replaceCoroEnds() { 517 for (CoroEndInst *CE : Shape.CoroEnds) { 518 // We use a null call graph because there's no call graph node for 519 // the cloned function yet. We'll just be rebuilding that later. 520 auto NewCE = cast<CoroEndInst>(VMap[CE]); 521 replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr); 522 } 523 } 524 525 static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape, 526 ValueToValueMapTy *VMap) { 527 Value *CachedSlot = nullptr; 528 auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * { 529 if (CachedSlot) { 530 assert(CachedSlot->getType()->getPointerElementType() == ValueTy && 531 "multiple swifterror slots in function with different types"); 532 return CachedSlot; 533 } 534 535 // Check if the function has a swifterror argument. 536 for (auto &Arg : F.args()) { 537 if (Arg.isSwiftError()) { 538 CachedSlot = &Arg; 539 assert(Arg.getType()->getPointerElementType() == ValueTy && 540 "swifterror argument does not have expected type"); 541 return &Arg; 542 } 543 } 544 545 // Create a swifterror alloca. 546 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg()); 547 auto Alloca = Builder.CreateAlloca(ValueTy); 548 Alloca->setSwiftError(true); 549 550 CachedSlot = Alloca; 551 return Alloca; 552 }; 553 554 for (CallInst *Op : Shape.SwiftErrorOps) { 555 auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op; 556 IRBuilder<> Builder(MappedOp); 557 558 // If there are no arguments, this is a 'get' operation. 559 Value *MappedResult; 560 if (Op->getNumArgOperands() == 0) { 561 auto ValueTy = Op->getType(); 562 auto Slot = getSwiftErrorSlot(ValueTy); 563 MappedResult = Builder.CreateLoad(ValueTy, Slot); 564 } else { 565 assert(Op->getNumArgOperands() == 1); 566 auto Value = MappedOp->getArgOperand(0); 567 auto ValueTy = Value->getType(); 568 auto Slot = getSwiftErrorSlot(ValueTy); 569 Builder.CreateStore(Value, Slot); 570 MappedResult = Slot; 571 } 572 573 MappedOp->replaceAllUsesWith(MappedResult); 574 MappedOp->eraseFromParent(); 575 } 576 577 // If we're updating the original function, we've invalidated SwiftErrorOps. 578 if (VMap == nullptr) { 579 Shape.SwiftErrorOps.clear(); 580 } 581 } 582 583 void CoroCloner::replaceSwiftErrorOps() { 584 ::replaceSwiftErrorOps(*NewF, Shape, &VMap); 585 } 586 587 void CoroCloner::replaceEntryBlock() { 588 // In the original function, the AllocaSpillBlock is a block immediately 589 // following the allocation of the frame object which defines GEPs for 590 // all the allocas that have been moved into the frame, and it ends by 591 // branching to the original beginning of the coroutine. Make this 592 // the entry block of the cloned function. 593 auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]); 594 auto *OldEntry = &NewF->getEntryBlock(); 595 Entry->setName("entry" + Suffix); 596 Entry->moveBefore(OldEntry); 597 Entry->getTerminator()->eraseFromParent(); 598 599 // Clear all predecessors of the new entry block. There should be 600 // exactly one predecessor, which we created when splitting out 601 // AllocaSpillBlock to begin with. 602 assert(Entry->hasOneUse()); 603 auto BranchToEntry = cast<BranchInst>(Entry->user_back()); 604 assert(BranchToEntry->isUnconditional()); 605 Builder.SetInsertPoint(BranchToEntry); 606 Builder.CreateUnreachable(); 607 BranchToEntry->eraseFromParent(); 608 609 // Move any allocas into Entry that weren't moved into the frame. 610 for (auto IT = OldEntry->begin(), End = OldEntry->end(); IT != End;) { 611 Instruction &I = *IT++; 612 if (!isa<AllocaInst>(&I) || I.use_empty()) 613 continue; 614 615 I.moveBefore(*Entry, Entry->getFirstInsertionPt()); 616 } 617 618 // Branch from the entry to the appropriate place. 619 Builder.SetInsertPoint(Entry); 620 switch (Shape.ABI) { 621 case coro::ABI::Switch: { 622 // In switch-lowering, we built a resume-entry block in the original 623 // function. Make the entry block branch to this. 624 auto *SwitchBB = 625 cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]); 626 Builder.CreateBr(SwitchBB); 627 break; 628 } 629 case coro::ABI::Async: 630 case coro::ABI::Retcon: 631 case coro::ABI::RetconOnce: { 632 // In continuation ABIs, we want to branch to immediately after the 633 // active suspend point. Earlier phases will have put the suspend in its 634 // own basic block, so just thread our jump directly to its successor. 635 assert((Shape.ABI == coro::ABI::Async && 636 isa<CoroSuspendAsyncInst>(ActiveSuspend)) || 637 ((Shape.ABI == coro::ABI::Retcon || 638 Shape.ABI == coro::ABI::RetconOnce) && 639 isa<CoroSuspendRetconInst>(ActiveSuspend))); 640 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]); 641 auto Branch = cast<BranchInst>(MappedCS->getNextNode()); 642 assert(Branch->isUnconditional()); 643 Builder.CreateBr(Branch->getSuccessor(0)); 644 break; 645 } 646 } 647 } 648 649 /// Derive the value of the new frame pointer. 650 Value *CoroCloner::deriveNewFramePointer() { 651 // Builder should be inserting to the front of the new entry block. 652 653 switch (Shape.ABI) { 654 // In switch-lowering, the argument is the frame pointer. 655 case coro::ABI::Switch: 656 return &*NewF->arg_begin(); 657 case coro::ABI::Async: { 658 auto *CalleeContext = &*NewF->arg_begin(); 659 auto *FramePtrTy = Shape.FrameTy->getPointerTo(); 660 // The caller context is assumed to be stored at the begining of the callee 661 // context. 662 // struct async_context { 663 // struct async_context *caller; 664 // ... 665 auto &Context = Builder.getContext(); 666 auto *Int8PtrPtrTy = Type::getInt8PtrTy(Context)->getPointerTo(); 667 auto *CallerContextAddr = 668 Builder.CreateBitOrPointerCast(CalleeContext, Int8PtrPtrTy); 669 auto *CallerContext = Builder.CreateLoad(CallerContextAddr); 670 // The frame is located after the async_context header. 671 auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32( 672 Type::getInt8Ty(Context), CallerContext, 673 Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr"); 674 return Builder.CreateBitCast(FramePtrAddr, FramePtrTy); 675 } 676 // In continuation-lowering, the argument is the opaque storage. 677 case coro::ABI::Retcon: 678 case coro::ABI::RetconOnce: { 679 Argument *NewStorage = &*NewF->arg_begin(); 680 auto FramePtrTy = Shape.FrameTy->getPointerTo(); 681 682 // If the storage is inline, just bitcast to the storage to the frame type. 683 if (Shape.RetconLowering.IsFrameInlineInStorage) 684 return Builder.CreateBitCast(NewStorage, FramePtrTy); 685 686 // Otherwise, load the real frame from the opaque storage. 687 auto FramePtrPtr = 688 Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo()); 689 return Builder.CreateLoad(FramePtrTy, FramePtrPtr); 690 } 691 } 692 llvm_unreachable("bad ABI"); 693 } 694 695 static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context, 696 unsigned ParamIndex, 697 uint64_t Size, Align Alignment) { 698 AttrBuilder ParamAttrs; 699 ParamAttrs.addAttribute(Attribute::NonNull); 700 ParamAttrs.addAttribute(Attribute::NoAlias); 701 ParamAttrs.addAlignmentAttr(Alignment); 702 ParamAttrs.addDereferenceableAttr(Size); 703 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs); 704 } 705 706 /// Clone the body of the original function into a resume function of 707 /// some sort. 708 void CoroCloner::create() { 709 // Create the new function if we don't already have one. 710 if (!NewF) { 711 NewF = createCloneDeclaration(OrigF, Shape, Suffix, 712 OrigF.getParent()->end()); 713 } 714 715 // Replace all args with undefs. The buildCoroutineFrame algorithm already 716 // rewritten access to the args that occurs after suspend points with loads 717 // and stores to/from the coroutine frame. 718 for (Argument &A : OrigF.args()) 719 VMap[&A] = UndefValue::get(A.getType()); 720 721 SmallVector<ReturnInst *, 4> Returns; 722 723 // Ignore attempts to change certain attributes of the function. 724 // TODO: maybe there should be a way to suppress this during cloning? 725 auto savedVisibility = NewF->getVisibility(); 726 auto savedUnnamedAddr = NewF->getUnnamedAddr(); 727 auto savedDLLStorageClass = NewF->getDLLStorageClass(); 728 729 // NewF's linkage (which CloneFunctionInto does *not* change) might not 730 // be compatible with the visibility of OrigF (which it *does* change), 731 // so protect against that. 732 auto savedLinkage = NewF->getLinkage(); 733 NewF->setLinkage(llvm::GlobalValue::ExternalLinkage); 734 735 CloneFunctionInto(NewF, &OrigF, VMap, /*ModuleLevelChanges=*/true, Returns); 736 737 NewF->setLinkage(savedLinkage); 738 NewF->setVisibility(savedVisibility); 739 NewF->setUnnamedAddr(savedUnnamedAddr); 740 NewF->setDLLStorageClass(savedDLLStorageClass); 741 742 auto &Context = NewF->getContext(); 743 744 // Replace the attributes of the new function: 745 auto OrigAttrs = NewF->getAttributes(); 746 auto NewAttrs = AttributeList(); 747 748 switch (Shape.ABI) { 749 case coro::ABI::Switch: 750 // Bootstrap attributes by copying function attributes from the 751 // original function. This should include optimization settings and so on. 752 NewAttrs = NewAttrs.addAttributes(Context, AttributeList::FunctionIndex, 753 OrigAttrs.getFnAttributes()); 754 755 addFramePointerAttrs(NewAttrs, Context, 0, 756 Shape.FrameSize, Shape.FrameAlign); 757 break; 758 case coro::ABI::Async: 759 break; 760 case coro::ABI::Retcon: 761 case coro::ABI::RetconOnce: 762 // If we have a continuation prototype, just use its attributes, 763 // full-stop. 764 NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes(); 765 766 addFramePointerAttrs(NewAttrs, Context, 0, 767 Shape.getRetconCoroId()->getStorageSize(), 768 Shape.getRetconCoroId()->getStorageAlignment()); 769 break; 770 } 771 772 switch (Shape.ABI) { 773 // In these ABIs, the cloned functions always return 'void', and the 774 // existing return sites are meaningless. Note that for unique 775 // continuations, this includes the returns associated with suspends; 776 // this is fine because we can't suspend twice. 777 case coro::ABI::Switch: 778 case coro::ABI::RetconOnce: 779 // Remove old returns. 780 for (ReturnInst *Return : Returns) 781 changeToUnreachable(Return, /*UseLLVMTrap=*/false); 782 break; 783 784 // With multi-suspend continuations, we'll already have eliminated the 785 // original returns and inserted returns before all the suspend points, 786 // so we want to leave any returns in place. 787 case coro::ABI::Retcon: 788 break; 789 // Async lowering will insert musttail call functions at all suspend points 790 // followed by a return. 791 // Don't change returns to unreachable because that will trip up the verifier. 792 // These returns should be unreachable from the clone. 793 case coro::ABI::Async: 794 break; 795 } 796 797 NewF->setAttributes(NewAttrs); 798 NewF->setCallingConv(Shape.getResumeFunctionCC()); 799 800 // Set up the new entry block. 801 replaceEntryBlock(); 802 803 Builder.SetInsertPoint(&NewF->getEntryBlock().front()); 804 NewFramePtr = deriveNewFramePointer(); 805 806 // Remap frame pointer. 807 Value *OldFramePtr = VMap[Shape.FramePtr]; 808 NewFramePtr->takeName(OldFramePtr); 809 OldFramePtr->replaceAllUsesWith(NewFramePtr); 810 811 // Remap vFrame pointer. 812 auto *NewVFrame = Builder.CreateBitCast( 813 NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame"); 814 Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]); 815 OldVFrame->replaceAllUsesWith(NewVFrame); 816 817 switch (Shape.ABI) { 818 case coro::ABI::Switch: 819 // Rewrite final suspend handling as it is not done via switch (allows to 820 // remove final case from the switch, since it is undefined behavior to 821 // resume the coroutine suspended at the final suspend point. 822 if (Shape.SwitchLowering.HasFinalSuspend) 823 handleFinalSuspend(); 824 break; 825 case coro::ABI::Async: 826 case coro::ABI::Retcon: 827 case coro::ABI::RetconOnce: 828 // Replace uses of the active suspend with the corresponding 829 // continuation-function arguments. 830 assert(ActiveSuspend != nullptr && 831 "no active suspend when lowering a continuation-style coroutine"); 832 replaceRetconOrAsyncSuspendUses(); 833 break; 834 } 835 836 // Handle suspends. 837 replaceCoroSuspends(); 838 839 // Handle swifterror. 840 replaceSwiftErrorOps(); 841 842 // Remove coro.end intrinsics. 843 replaceCoroEnds(); 844 845 // Eliminate coro.free from the clones, replacing it with 'null' in cleanup, 846 // to suppress deallocation code. 847 if (Shape.ABI == coro::ABI::Switch) 848 coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]), 849 /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup); 850 } 851 852 // Create a resume clone by cloning the body of the original function, setting 853 // new entry block and replacing coro.suspend an appropriate value to force 854 // resume or cleanup pass for every suspend point. 855 static Function *createClone(Function &F, const Twine &Suffix, 856 coro::Shape &Shape, CoroCloner::Kind FKind) { 857 CoroCloner Cloner(F, Suffix, Shape, FKind); 858 Cloner.create(); 859 return Cloner.getFunction(); 860 } 861 862 /// Remove calls to llvm.coro.end in the original function. 863 static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG) { 864 for (auto End : Shape.CoroEnds) { 865 replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, CG); 866 } 867 } 868 869 static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) { 870 assert(Shape.ABI == coro::ABI::Async); 871 872 auto *FuncPtrStruct = cast<ConstantStruct>( 873 Shape.AsyncLowering.AsyncFuncPointer->getInitializer()); 874 auto *OrigContextSize = FuncPtrStruct->getOperand(0); 875 auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(1); 876 auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(), 877 Shape.AsyncLowering.ContextSize); 878 auto *NewFuncPtrStruct = ConstantStruct::get( 879 FuncPtrStruct->getType(), NewContextSize, OrigRelativeFunOffset); 880 881 Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct); 882 } 883 884 static void replaceFrameSize(coro::Shape &Shape) { 885 if (Shape.ABI == coro::ABI::Async) 886 updateAsyncFuncPointerContextSize(Shape); 887 888 if (Shape.CoroSizes.empty()) 889 return; 890 891 // In the same function all coro.sizes should have the same result type. 892 auto *SizeIntrin = Shape.CoroSizes.back(); 893 Module *M = SizeIntrin->getModule(); 894 const DataLayout &DL = M->getDataLayout(); 895 auto Size = DL.getTypeAllocSize(Shape.FrameTy); 896 auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size); 897 898 for (CoroSizeInst *CS : Shape.CoroSizes) { 899 CS->replaceAllUsesWith(SizeConstant); 900 CS->eraseFromParent(); 901 } 902 } 903 904 // Create a global constant array containing pointers to functions provided and 905 // set Info parameter of CoroBegin to point at this constant. Example: 906 // 907 // @f.resumers = internal constant [2 x void(%f.frame*)*] 908 // [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy] 909 // define void @f() { 910 // ... 911 // call i8* @llvm.coro.begin(i8* null, i32 0, i8* null, 912 // i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*)) 913 // 914 // Assumes that all the functions have the same signature. 915 static void setCoroInfo(Function &F, coro::Shape &Shape, 916 ArrayRef<Function *> Fns) { 917 // This only works under the switch-lowering ABI because coro elision 918 // only works on the switch-lowering ABI. 919 assert(Shape.ABI == coro::ABI::Switch); 920 921 SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end()); 922 assert(!Args.empty()); 923 Function *Part = *Fns.begin(); 924 Module *M = Part->getParent(); 925 auto *ArrTy = ArrayType::get(Part->getType(), Args.size()); 926 927 auto *ConstVal = ConstantArray::get(ArrTy, Args); 928 auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true, 929 GlobalVariable::PrivateLinkage, ConstVal, 930 F.getName() + Twine(".resumers")); 931 932 // Update coro.begin instruction to refer to this constant. 933 LLVMContext &C = F.getContext(); 934 auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C)); 935 Shape.getSwitchCoroId()->setInfo(BC); 936 } 937 938 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame. 939 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn, 940 Function *DestroyFn, Function *CleanupFn) { 941 assert(Shape.ABI == coro::ABI::Switch); 942 943 IRBuilder<> Builder(Shape.FramePtr->getNextNode()); 944 auto *ResumeAddr = Builder.CreateStructGEP( 945 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume, 946 "resume.addr"); 947 Builder.CreateStore(ResumeFn, ResumeAddr); 948 949 Value *DestroyOrCleanupFn = DestroyFn; 950 951 CoroIdInst *CoroId = Shape.getSwitchCoroId(); 952 if (CoroAllocInst *CA = CoroId->getCoroAlloc()) { 953 // If there is a CoroAlloc and it returns false (meaning we elide the 954 // allocation, use CleanupFn instead of DestroyFn). 955 DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn); 956 } 957 958 auto *DestroyAddr = Builder.CreateStructGEP( 959 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy, 960 "destroy.addr"); 961 Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr); 962 } 963 964 static void postSplitCleanup(Function &F) { 965 removeUnreachableBlocks(F); 966 967 // For now, we do a mandatory verification step because we don't 968 // entirely trust this pass. Note that we don't want to add a verifier 969 // pass to FPM below because it will also verify all the global data. 970 if (verifyFunction(F, &errs())) 971 report_fatal_error("Broken function"); 972 973 legacy::FunctionPassManager FPM(F.getParent()); 974 975 FPM.add(createSCCPPass()); 976 FPM.add(createCFGSimplificationPass()); 977 FPM.add(createEarlyCSEPass()); 978 FPM.add(createCFGSimplificationPass()); 979 980 FPM.doInitialization(); 981 FPM.run(F); 982 FPM.doFinalization(); 983 } 984 985 // Assuming we arrived at the block NewBlock from Prev instruction, store 986 // PHI's incoming values in the ResolvedValues map. 987 static void 988 scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock, 989 DenseMap<Value *, Value *> &ResolvedValues) { 990 auto *PrevBB = Prev->getParent(); 991 for (PHINode &PN : NewBlock->phis()) { 992 auto V = PN.getIncomingValueForBlock(PrevBB); 993 // See if we already resolved it. 994 auto VI = ResolvedValues.find(V); 995 if (VI != ResolvedValues.end()) 996 V = VI->second; 997 // Remember the value. 998 ResolvedValues[&PN] = V; 999 } 1000 } 1001 1002 // Replace a sequence of branches leading to a ret, with a clone of a ret 1003 // instruction. Suspend instruction represented by a switch, track the PHI 1004 // values and select the correct case successor when possible. 1005 static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) { 1006 DenseMap<Value *, Value *> ResolvedValues; 1007 BasicBlock *UnconditionalSucc = nullptr; 1008 1009 Instruction *I = InitialInst; 1010 while (I->isTerminator() || 1011 (isa<CmpInst>(I) && I->getNextNode()->isTerminator())) { 1012 if (isa<ReturnInst>(I)) { 1013 if (I != InitialInst) { 1014 // If InitialInst is an unconditional branch, 1015 // remove PHI values that come from basic block of InitialInst 1016 if (UnconditionalSucc) 1017 UnconditionalSucc->removePredecessor(InitialInst->getParent(), true); 1018 ReplaceInstWithInst(InitialInst, I->clone()); 1019 } 1020 return true; 1021 } 1022 if (auto *BR = dyn_cast<BranchInst>(I)) { 1023 if (BR->isUnconditional()) { 1024 BasicBlock *BB = BR->getSuccessor(0); 1025 if (I == InitialInst) 1026 UnconditionalSucc = BB; 1027 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1028 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1029 continue; 1030 } 1031 } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) { 1032 auto *BR = dyn_cast<BranchInst>(I->getNextNode()); 1033 if (BR && BR->isConditional() && CondCmp == BR->getCondition()) { 1034 // If the case number of suspended switch instruction is reduced to 1035 // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator. 1036 // And the comparsion looks like : %cond = icmp eq i8 %V, constant. 1037 ConstantInt *CondConst = dyn_cast<ConstantInt>(CondCmp->getOperand(1)); 1038 if (CondConst && CondCmp->getPredicate() == CmpInst::ICMP_EQ) { 1039 Value *V = CondCmp->getOperand(0); 1040 auto it = ResolvedValues.find(V); 1041 if (it != ResolvedValues.end()) 1042 V = it->second; 1043 1044 if (ConstantInt *Cond0 = dyn_cast<ConstantInt>(V)) { 1045 BasicBlock *BB = Cond0->equalsInt(CondConst->getZExtValue()) 1046 ? BR->getSuccessor(0) 1047 : BR->getSuccessor(1); 1048 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1049 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1050 continue; 1051 } 1052 } 1053 } 1054 } else if (auto *SI = dyn_cast<SwitchInst>(I)) { 1055 Value *V = SI->getCondition(); 1056 auto it = ResolvedValues.find(V); 1057 if (it != ResolvedValues.end()) 1058 V = it->second; 1059 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) { 1060 BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor(); 1061 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1062 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1063 continue; 1064 } 1065 } 1066 return false; 1067 } 1068 return false; 1069 } 1070 1071 // Check whether CI obeys the rules of musttail attribute. 1072 static bool shouldBeMustTail(const CallInst &CI, const Function &F) { 1073 if (CI.isInlineAsm()) 1074 return false; 1075 1076 // Match prototypes and calling conventions of resume function. 1077 FunctionType *CalleeTy = CI.getFunctionType(); 1078 if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1)) 1079 return false; 1080 1081 Type *CalleeParmTy = CalleeTy->getParamType(0); 1082 if (!CalleeParmTy->isPointerTy() || 1083 (CalleeParmTy->getPointerAddressSpace() != 0)) 1084 return false; 1085 1086 if (CI.getCallingConv() != F.getCallingConv()) 1087 return false; 1088 1089 // CI should not has any ABI-impacting function attributes. 1090 static const Attribute::AttrKind ABIAttrs[] = { 1091 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca, 1092 Attribute::Preallocated, Attribute::InReg, Attribute::Returned, 1093 Attribute::SwiftSelf, Attribute::SwiftError}; 1094 AttributeList Attrs = CI.getAttributes(); 1095 for (auto AK : ABIAttrs) 1096 if (Attrs.hasParamAttribute(0, AK)) 1097 return false; 1098 1099 return true; 1100 } 1101 1102 // Add musttail to any resume instructions that is immediately followed by a 1103 // suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call 1104 // for symmetrical coroutine control transfer (C++ Coroutines TS extension). 1105 // This transformation is done only in the resume part of the coroutine that has 1106 // identical signature and calling convention as the coro.resume call. 1107 static void addMustTailToCoroResumes(Function &F) { 1108 bool changed = false; 1109 1110 // Collect potential resume instructions. 1111 SmallVector<CallInst *, 4> Resumes; 1112 for (auto &I : instructions(F)) 1113 if (auto *Call = dyn_cast<CallInst>(&I)) 1114 if (shouldBeMustTail(*Call, F)) 1115 Resumes.push_back(Call); 1116 1117 // Set musttail on those that are followed by a ret instruction. 1118 for (CallInst *Call : Resumes) 1119 if (simplifyTerminatorLeadingToRet(Call->getNextNode())) { 1120 Call->setTailCallKind(CallInst::TCK_MustTail); 1121 changed = true; 1122 } 1123 1124 if (changed) 1125 removeUnreachableBlocks(F); 1126 } 1127 1128 // Coroutine has no suspend points. Remove heap allocation for the coroutine 1129 // frame if possible. 1130 static void handleNoSuspendCoroutine(coro::Shape &Shape) { 1131 auto *CoroBegin = Shape.CoroBegin; 1132 auto *CoroId = CoroBegin->getId(); 1133 auto *AllocInst = CoroId->getCoroAlloc(); 1134 switch (Shape.ABI) { 1135 case coro::ABI::Switch: { 1136 auto SwitchId = cast<CoroIdInst>(CoroId); 1137 coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr); 1138 if (AllocInst) { 1139 IRBuilder<> Builder(AllocInst); 1140 auto *Frame = Builder.CreateAlloca(Shape.FrameTy); 1141 Frame->setAlignment(Shape.FrameAlign); 1142 auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy()); 1143 AllocInst->replaceAllUsesWith(Builder.getFalse()); 1144 AllocInst->eraseFromParent(); 1145 CoroBegin->replaceAllUsesWith(VFrame); 1146 } else { 1147 CoroBegin->replaceAllUsesWith(CoroBegin->getMem()); 1148 } 1149 break; 1150 } 1151 case coro::ABI::Async: 1152 case coro::ABI::Retcon: 1153 case coro::ABI::RetconOnce: 1154 CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType())); 1155 break; 1156 } 1157 1158 CoroBegin->eraseFromParent(); 1159 } 1160 1161 // SimplifySuspendPoint needs to check that there is no calls between 1162 // coro_save and coro_suspend, since any of the calls may potentially resume 1163 // the coroutine and if that is the case we cannot eliminate the suspend point. 1164 static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) { 1165 for (Instruction *I = From; I != To; I = I->getNextNode()) { 1166 // Assume that no intrinsic can resume the coroutine. 1167 if (isa<IntrinsicInst>(I)) 1168 continue; 1169 1170 if (isa<CallBase>(I)) 1171 return true; 1172 } 1173 return false; 1174 } 1175 1176 static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) { 1177 SmallPtrSet<BasicBlock *, 8> Set; 1178 SmallVector<BasicBlock *, 8> Worklist; 1179 1180 Set.insert(SaveBB); 1181 Worklist.push_back(ResDesBB); 1182 1183 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr 1184 // returns a token consumed by suspend instruction, all blocks in between 1185 // will have to eventually hit SaveBB when going backwards from ResDesBB. 1186 while (!Worklist.empty()) { 1187 auto *BB = Worklist.pop_back_val(); 1188 Set.insert(BB); 1189 for (auto *Pred : predecessors(BB)) 1190 if (Set.count(Pred) == 0) 1191 Worklist.push_back(Pred); 1192 } 1193 1194 // SaveBB and ResDesBB are checked separately in hasCallsBetween. 1195 Set.erase(SaveBB); 1196 Set.erase(ResDesBB); 1197 1198 for (auto *BB : Set) 1199 if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr)) 1200 return true; 1201 1202 return false; 1203 } 1204 1205 static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) { 1206 auto *SaveBB = Save->getParent(); 1207 auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent(); 1208 1209 if (SaveBB == ResumeOrDestroyBB) 1210 return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy); 1211 1212 // Any calls from Save to the end of the block? 1213 if (hasCallsInBlockBetween(Save->getNextNode(), nullptr)) 1214 return true; 1215 1216 // Any calls from begging of the block up to ResumeOrDestroy? 1217 if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(), 1218 ResumeOrDestroy)) 1219 return true; 1220 1221 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB? 1222 if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB)) 1223 return true; 1224 1225 return false; 1226 } 1227 1228 // If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the 1229 // suspend point and replace it with nornal control flow. 1230 static bool simplifySuspendPoint(CoroSuspendInst *Suspend, 1231 CoroBeginInst *CoroBegin) { 1232 Instruction *Prev = Suspend->getPrevNode(); 1233 if (!Prev) { 1234 auto *Pred = Suspend->getParent()->getSinglePredecessor(); 1235 if (!Pred) 1236 return false; 1237 Prev = Pred->getTerminator(); 1238 } 1239 1240 CallBase *CB = dyn_cast<CallBase>(Prev); 1241 if (!CB) 1242 return false; 1243 1244 auto *Callee = CB->getCalledOperand()->stripPointerCasts(); 1245 1246 // See if the callsite is for resumption or destruction of the coroutine. 1247 auto *SubFn = dyn_cast<CoroSubFnInst>(Callee); 1248 if (!SubFn) 1249 return false; 1250 1251 // Does not refer to the current coroutine, we cannot do anything with it. 1252 if (SubFn->getFrame() != CoroBegin) 1253 return false; 1254 1255 // See if the transformation is safe. Specifically, see if there are any 1256 // calls in between Save and CallInstr. They can potenitally resume the 1257 // coroutine rendering this optimization unsafe. 1258 auto *Save = Suspend->getCoroSave(); 1259 if (hasCallsBetween(Save, CB)) 1260 return false; 1261 1262 // Replace llvm.coro.suspend with the value that results in resumption over 1263 // the resume or cleanup path. 1264 Suspend->replaceAllUsesWith(SubFn->getRawIndex()); 1265 Suspend->eraseFromParent(); 1266 Save->eraseFromParent(); 1267 1268 // No longer need a call to coro.resume or coro.destroy. 1269 if (auto *Invoke = dyn_cast<InvokeInst>(CB)) { 1270 BranchInst::Create(Invoke->getNormalDest(), Invoke); 1271 } 1272 1273 // Grab the CalledValue from CB before erasing the CallInstr. 1274 auto *CalledValue = CB->getCalledOperand(); 1275 CB->eraseFromParent(); 1276 1277 // If no more users remove it. Usually it is a bitcast of SubFn. 1278 if (CalledValue != SubFn && CalledValue->user_empty()) 1279 if (auto *I = dyn_cast<Instruction>(CalledValue)) 1280 I->eraseFromParent(); 1281 1282 // Now we are good to remove SubFn. 1283 if (SubFn->user_empty()) 1284 SubFn->eraseFromParent(); 1285 1286 return true; 1287 } 1288 1289 // Remove suspend points that are simplified. 1290 static void simplifySuspendPoints(coro::Shape &Shape) { 1291 // Currently, the only simplification we do is switch-lowering-specific. 1292 if (Shape.ABI != coro::ABI::Switch) 1293 return; 1294 1295 auto &S = Shape.CoroSuspends; 1296 size_t I = 0, N = S.size(); 1297 if (N == 0) 1298 return; 1299 while (true) { 1300 auto SI = cast<CoroSuspendInst>(S[I]); 1301 // Leave final.suspend to handleFinalSuspend since it is undefined behavior 1302 // to resume a coroutine suspended at the final suspend point. 1303 if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) { 1304 if (--N == I) 1305 break; 1306 std::swap(S[I], S[N]); 1307 continue; 1308 } 1309 if (++I == N) 1310 break; 1311 } 1312 S.resize(N); 1313 } 1314 1315 static void splitSwitchCoroutine(Function &F, coro::Shape &Shape, 1316 SmallVectorImpl<Function *> &Clones) { 1317 assert(Shape.ABI == coro::ABI::Switch); 1318 1319 createResumeEntryBlock(F, Shape); 1320 auto ResumeClone = createClone(F, ".resume", Shape, 1321 CoroCloner::Kind::SwitchResume); 1322 auto DestroyClone = createClone(F, ".destroy", Shape, 1323 CoroCloner::Kind::SwitchUnwind); 1324 auto CleanupClone = createClone(F, ".cleanup", Shape, 1325 CoroCloner::Kind::SwitchCleanup); 1326 1327 postSplitCleanup(*ResumeClone); 1328 postSplitCleanup(*DestroyClone); 1329 postSplitCleanup(*CleanupClone); 1330 1331 addMustTailToCoroResumes(*ResumeClone); 1332 1333 // Store addresses resume/destroy/cleanup functions in the coroutine frame. 1334 updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone); 1335 1336 assert(Clones.empty()); 1337 Clones.push_back(ResumeClone); 1338 Clones.push_back(DestroyClone); 1339 Clones.push_back(CleanupClone); 1340 1341 // Create a constant array referring to resume/destroy/clone functions pointed 1342 // by the last argument of @llvm.coro.info, so that CoroElide pass can 1343 // determined correct function to call. 1344 setCoroInfo(F, Shape, Clones); 1345 } 1346 1347 static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend, 1348 Value *Continuation) { 1349 auto *ResumeIntrinsic = Suspend->getResumeFunction(); 1350 auto &Context = Suspend->getParent()->getParent()->getContext(); 1351 auto *Int8PtrTy = Type::getInt8PtrTy(Context); 1352 1353 IRBuilder<> Builder(ResumeIntrinsic); 1354 auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy); 1355 ResumeIntrinsic->replaceAllUsesWith(Val); 1356 ResumeIntrinsic->eraseFromParent(); 1357 Suspend->setOperand(0, UndefValue::get(Int8PtrTy)); 1358 } 1359 1360 static void splitAsyncCoroutine(Function &F, coro::Shape &Shape, 1361 SmallVectorImpl<Function *> &Clones) { 1362 assert(Shape.ABI == coro::ABI::Async); 1363 assert(Clones.empty()); 1364 // Reset various things that the optimizer might have decided it 1365 // "knows" about the coroutine function due to not seeing a return. 1366 F.removeFnAttr(Attribute::NoReturn); 1367 F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias); 1368 F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull); 1369 1370 auto &Context = F.getContext(); 1371 auto *Int8PtrTy = Type::getInt8PtrTy(Context); 1372 1373 auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId()); 1374 IRBuilder<> Builder(Id); 1375 1376 auto *FramePtr = Id->getStorage(); 1377 FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy); 1378 FramePtr = Builder.CreateConstInBoundsGEP1_32( 1379 Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset, 1380 "async.ctx.frameptr"); 1381 1382 // Map all uses of llvm.coro.begin to the allocated frame pointer. 1383 { 1384 // Make sure we don't invalidate Shape.FramePtr. 1385 TrackingVH<Instruction> Handle(Shape.FramePtr); 1386 Shape.CoroBegin->replaceAllUsesWith(FramePtr); 1387 Shape.FramePtr = Handle.getValPtr(); 1388 } 1389 1390 // Create all the functions in order after the main function. 1391 auto NextF = std::next(F.getIterator()); 1392 1393 // Create a continuation function for each of the suspend points. 1394 Clones.reserve(Shape.CoroSuspends.size()); 1395 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) { 1396 auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]); 1397 1398 // Create the clone declaration. 1399 auto *Continuation = 1400 createCloneDeclaration(F, Shape, ".resume." + Twine(Idx), NextF); 1401 Clones.push_back(Continuation); 1402 1403 // Insert a branch to a new return block immediately before the suspend 1404 // point. 1405 auto *SuspendBB = Suspend->getParent(); 1406 auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend); 1407 auto *Branch = cast<BranchInst>(SuspendBB->getTerminator()); 1408 1409 // Place it before the first suspend. 1410 auto *ReturnBB = 1411 BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB); 1412 Branch->setSuccessor(0, ReturnBB); 1413 1414 IRBuilder<> Builder(ReturnBB); 1415 1416 // Insert the call to the tail call function. 1417 auto *Fun = Suspend->getMustTailCallFunction(); 1418 SmallVector<Value *, 8> Args(Suspend->operand_values()); 1419 auto *TailCall = Builder.CreateCall( 1420 cast<FunctionType>(Fun->getType()->getPointerElementType()), Fun, 1421 ArrayRef<Value *>(Args).drop_front(3).drop_back(1)); 1422 TailCall->setTailCallKind(CallInst::TCK_MustTail); 1423 TailCall->setCallingConv(Fun->getCallingConv()); 1424 Builder.CreateRetVoid(); 1425 1426 // Replace the lvm.coro.async.resume intrisic call. 1427 replaceAsyncResumeFunction(Suspend, Continuation); 1428 } 1429 1430 assert(Clones.size() == Shape.CoroSuspends.size()); 1431 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) { 1432 auto *Suspend = Shape.CoroSuspends[Idx]; 1433 auto *Clone = Clones[Idx]; 1434 1435 CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create(); 1436 } 1437 } 1438 1439 static void splitRetconCoroutine(Function &F, coro::Shape &Shape, 1440 SmallVectorImpl<Function *> &Clones) { 1441 assert(Shape.ABI == coro::ABI::Retcon || 1442 Shape.ABI == coro::ABI::RetconOnce); 1443 assert(Clones.empty()); 1444 1445 // Reset various things that the optimizer might have decided it 1446 // "knows" about the coroutine function due to not seeing a return. 1447 F.removeFnAttr(Attribute::NoReturn); 1448 F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias); 1449 F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull); 1450 1451 // Allocate the frame. 1452 auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId()); 1453 Value *RawFramePtr; 1454 if (Shape.RetconLowering.IsFrameInlineInStorage) { 1455 RawFramePtr = Id->getStorage(); 1456 } else { 1457 IRBuilder<> Builder(Id); 1458 1459 // Determine the size of the frame. 1460 const DataLayout &DL = F.getParent()->getDataLayout(); 1461 auto Size = DL.getTypeAllocSize(Shape.FrameTy); 1462 1463 // Allocate. We don't need to update the call graph node because we're 1464 // going to recompute it from scratch after splitting. 1465 // FIXME: pass the required alignment 1466 RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr); 1467 RawFramePtr = 1468 Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType()); 1469 1470 // Stash the allocated frame pointer in the continuation storage. 1471 auto Dest = Builder.CreateBitCast(Id->getStorage(), 1472 RawFramePtr->getType()->getPointerTo()); 1473 Builder.CreateStore(RawFramePtr, Dest); 1474 } 1475 1476 // Map all uses of llvm.coro.begin to the allocated frame pointer. 1477 { 1478 // Make sure we don't invalidate Shape.FramePtr. 1479 TrackingVH<Instruction> Handle(Shape.FramePtr); 1480 Shape.CoroBegin->replaceAllUsesWith(RawFramePtr); 1481 Shape.FramePtr = Handle.getValPtr(); 1482 } 1483 1484 // Create a unique return block. 1485 BasicBlock *ReturnBB = nullptr; 1486 SmallVector<PHINode *, 4> ReturnPHIs; 1487 1488 // Create all the functions in order after the main function. 1489 auto NextF = std::next(F.getIterator()); 1490 1491 // Create a continuation function for each of the suspend points. 1492 Clones.reserve(Shape.CoroSuspends.size()); 1493 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) { 1494 auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]); 1495 1496 // Create the clone declaration. 1497 auto Continuation = 1498 createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF); 1499 Clones.push_back(Continuation); 1500 1501 // Insert a branch to the unified return block immediately before 1502 // the suspend point. 1503 auto SuspendBB = Suspend->getParent(); 1504 auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend); 1505 auto Branch = cast<BranchInst>(SuspendBB->getTerminator()); 1506 1507 // Create the unified return block. 1508 if (!ReturnBB) { 1509 // Place it before the first suspend. 1510 ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F, 1511 NewSuspendBB); 1512 Shape.RetconLowering.ReturnBlock = ReturnBB; 1513 1514 IRBuilder<> Builder(ReturnBB); 1515 1516 // Create PHIs for all the return values. 1517 assert(ReturnPHIs.empty()); 1518 1519 // First, the continuation. 1520 ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(), 1521 Shape.CoroSuspends.size())); 1522 1523 // Next, all the directly-yielded values. 1524 for (auto ResultTy : Shape.getRetconResultTypes()) 1525 ReturnPHIs.push_back(Builder.CreatePHI(ResultTy, 1526 Shape.CoroSuspends.size())); 1527 1528 // Build the return value. 1529 auto RetTy = F.getReturnType(); 1530 1531 // Cast the continuation value if necessary. 1532 // We can't rely on the types matching up because that type would 1533 // have to be infinite. 1534 auto CastedContinuationTy = 1535 (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0)); 1536 auto *CastedContinuation = 1537 Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy); 1538 1539 Value *RetV; 1540 if (ReturnPHIs.size() == 1) { 1541 RetV = CastedContinuation; 1542 } else { 1543 RetV = UndefValue::get(RetTy); 1544 RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0); 1545 for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I) 1546 RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I); 1547 } 1548 1549 Builder.CreateRet(RetV); 1550 } 1551 1552 // Branch to the return block. 1553 Branch->setSuccessor(0, ReturnBB); 1554 ReturnPHIs[0]->addIncoming(Continuation, SuspendBB); 1555 size_t NextPHIIndex = 1; 1556 for (auto &VUse : Suspend->value_operands()) 1557 ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB); 1558 assert(NextPHIIndex == ReturnPHIs.size()); 1559 } 1560 1561 assert(Clones.size() == Shape.CoroSuspends.size()); 1562 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) { 1563 auto Suspend = Shape.CoroSuspends[i]; 1564 auto Clone = Clones[i]; 1565 1566 CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create(); 1567 } 1568 } 1569 1570 namespace { 1571 class PrettyStackTraceFunction : public PrettyStackTraceEntry { 1572 Function &F; 1573 public: 1574 PrettyStackTraceFunction(Function &F) : F(F) {} 1575 void print(raw_ostream &OS) const override { 1576 OS << "While splitting coroutine "; 1577 F.printAsOperand(OS, /*print type*/ false, F.getParent()); 1578 OS << "\n"; 1579 } 1580 }; 1581 } 1582 1583 static coro::Shape splitCoroutine(Function &F, 1584 SmallVectorImpl<Function *> &Clones, 1585 bool ReuseFrameSlot) { 1586 PrettyStackTraceFunction prettyStackTrace(F); 1587 1588 // The suspend-crossing algorithm in buildCoroutineFrame get tripped 1589 // up by uses in unreachable blocks, so remove them as a first pass. 1590 removeUnreachableBlocks(F); 1591 1592 coro::Shape Shape(F, ReuseFrameSlot); 1593 if (!Shape.CoroBegin) 1594 return Shape; 1595 1596 simplifySuspendPoints(Shape); 1597 buildCoroutineFrame(F, Shape); 1598 replaceFrameSize(Shape); 1599 1600 // If there are no suspend points, no split required, just remove 1601 // the allocation and deallocation blocks, they are not needed. 1602 if (Shape.CoroSuspends.empty()) { 1603 handleNoSuspendCoroutine(Shape); 1604 } else { 1605 switch (Shape.ABI) { 1606 case coro::ABI::Switch: 1607 splitSwitchCoroutine(F, Shape, Clones); 1608 break; 1609 case coro::ABI::Async: 1610 splitAsyncCoroutine(F, Shape, Clones); 1611 break; 1612 case coro::ABI::Retcon: 1613 case coro::ABI::RetconOnce: 1614 splitRetconCoroutine(F, Shape, Clones); 1615 break; 1616 } 1617 } 1618 1619 // Replace all the swifterror operations in the original function. 1620 // This invalidates SwiftErrorOps in the Shape. 1621 replaceSwiftErrorOps(F, Shape, nullptr); 1622 1623 return Shape; 1624 } 1625 1626 static void 1627 updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape, 1628 const SmallVectorImpl<Function *> &Clones, 1629 CallGraph &CG, CallGraphSCC &SCC) { 1630 if (!Shape.CoroBegin) 1631 return; 1632 1633 removeCoroEnds(Shape, &CG); 1634 postSplitCleanup(F); 1635 1636 // Update call graph and add the functions we created to the SCC. 1637 coro::updateCallGraph(F, Clones, CG, SCC); 1638 } 1639 1640 static void updateCallGraphAfterCoroutineSplit( 1641 LazyCallGraph::Node &N, const coro::Shape &Shape, 1642 const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C, 1643 LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, 1644 FunctionAnalysisManager &FAM) { 1645 if (!Shape.CoroBegin) 1646 return; 1647 1648 for (llvm::CoroEndInst *End : Shape.CoroEnds) { 1649 auto &Context = End->getContext(); 1650 End->replaceAllUsesWith(ConstantInt::getFalse(Context)); 1651 End->eraseFromParent(); 1652 } 1653 1654 postSplitCleanup(N.getFunction()); 1655 1656 // We've inserted instructions into coroutine 'f' that reference the three new 1657 // coroutine funclets. We must now update the call graph so that reference 1658 // edges between 'f' and its funclets are added to it. LazyCallGraph only 1659 // allows CGSCC passes to insert "trivial" reference edges. We've ensured 1660 // above, by inserting the funclets into the same SCC as the corutine, that 1661 // the edges are trivial. 1662 // 1663 // N.B.: If we didn't update the call graph here, a CGSCCToFunctionPassAdaptor 1664 // later in this CGSCC pass pipeline may be run, triggering a call graph 1665 // update of its own. Function passes run by the adaptor are not permitted to 1666 // add new edges of any kind to the graph, and the new edges inserted by this 1667 // pass would be misattributed to that unrelated function pass. 1668 updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM); 1669 } 1670 1671 // When we see the coroutine the first time, we insert an indirect call to a 1672 // devirt trigger function and mark the coroutine that it is now ready for 1673 // split. 1674 static void prepareForSplit(Function &F, CallGraph &CG) { 1675 Module &M = *F.getParent(); 1676 LLVMContext &Context = F.getContext(); 1677 #ifndef NDEBUG 1678 Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN); 1679 assert(DevirtFn && "coro.devirt.trigger function not found"); 1680 #endif 1681 1682 F.addFnAttr(CORO_PRESPLIT_ATTR, PREPARED_FOR_SPLIT); 1683 1684 // Insert an indirect call sequence that will be devirtualized by CoroElide 1685 // pass: 1686 // %0 = call i8* @llvm.coro.subfn.addr(i8* null, i8 -1) 1687 // %1 = bitcast i8* %0 to void(i8*)* 1688 // call void %1(i8* null) 1689 coro::LowererBase Lowerer(M); 1690 Instruction *InsertPt = F.getEntryBlock().getTerminator(); 1691 auto *Null = ConstantPointerNull::get(Type::getInt8PtrTy(Context)); 1692 auto *DevirtFnAddr = 1693 Lowerer.makeSubFnCall(Null, CoroSubFnInst::RestartTrigger, InsertPt); 1694 FunctionType *FnTy = FunctionType::get(Type::getVoidTy(Context), 1695 {Type::getInt8PtrTy(Context)}, false); 1696 auto *IndirectCall = CallInst::Create(FnTy, DevirtFnAddr, Null, "", InsertPt); 1697 1698 // Update CG graph with an indirect call we just added. 1699 CG[&F]->addCalledFunction(IndirectCall, CG.getCallsExternalNode()); 1700 } 1701 1702 // Make sure that there is a devirtualization trigger function that the 1703 // coro-split pass uses to force a restart of the CGSCC pipeline. If the devirt 1704 // trigger function is not found, we will create one and add it to the current 1705 // SCC. 1706 static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC) { 1707 Module &M = CG.getModule(); 1708 if (M.getFunction(CORO_DEVIRT_TRIGGER_FN)) 1709 return; 1710 1711 LLVMContext &C = M.getContext(); 1712 auto *FnTy = FunctionType::get(Type::getVoidTy(C), Type::getInt8PtrTy(C), 1713 /*isVarArg=*/false); 1714 Function *DevirtFn = 1715 Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage, 1716 CORO_DEVIRT_TRIGGER_FN, &M); 1717 DevirtFn->addFnAttr(Attribute::AlwaysInline); 1718 auto *Entry = BasicBlock::Create(C, "entry", DevirtFn); 1719 ReturnInst::Create(C, Entry); 1720 1721 auto *Node = CG.getOrInsertFunction(DevirtFn); 1722 1723 SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end()); 1724 Nodes.push_back(Node); 1725 SCC.initialize(Nodes); 1726 } 1727 1728 /// Replace a call to llvm.coro.prepare.retcon. 1729 static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG, 1730 LazyCallGraph::SCC &C) { 1731 auto CastFn = Prepare->getArgOperand(0); // as an i8* 1732 auto Fn = CastFn->stripPointerCasts(); // as its original type 1733 1734 // Attempt to peephole this pattern: 1735 // %0 = bitcast [[TYPE]] @some_function to i8* 1736 // %1 = call @llvm.coro.prepare.retcon(i8* %0) 1737 // %2 = bitcast %1 to [[TYPE]] 1738 // ==> 1739 // %2 = @some_function 1740 for (auto UI = Prepare->use_begin(), UE = Prepare->use_end(); UI != UE;) { 1741 // Look for bitcasts back to the original function type. 1742 auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser()); 1743 if (!Cast || Cast->getType() != Fn->getType()) 1744 continue; 1745 1746 // Replace and remove the cast. 1747 Cast->replaceAllUsesWith(Fn); 1748 Cast->eraseFromParent(); 1749 } 1750 1751 // Replace any remaining uses with the function as an i8*. 1752 // This can never directly be a callee, so we don't need to update CG. 1753 Prepare->replaceAllUsesWith(CastFn); 1754 Prepare->eraseFromParent(); 1755 1756 // Kill dead bitcasts. 1757 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) { 1758 if (!Cast->use_empty()) 1759 break; 1760 CastFn = Cast->getOperand(0); 1761 Cast->eraseFromParent(); 1762 } 1763 } 1764 /// Replace a call to llvm.coro.prepare.retcon. 1765 static void replacePrepare(CallInst *Prepare, CallGraph &CG) { 1766 auto CastFn = Prepare->getArgOperand(0); // as an i8* 1767 auto Fn = CastFn->stripPointerCasts(); // as its original type 1768 1769 // Find call graph nodes for the preparation. 1770 CallGraphNode *PrepareUserNode = nullptr, *FnNode = nullptr; 1771 if (auto ConcreteFn = dyn_cast<Function>(Fn)) { 1772 PrepareUserNode = CG[Prepare->getFunction()]; 1773 FnNode = CG[ConcreteFn]; 1774 } 1775 1776 // Attempt to peephole this pattern: 1777 // %0 = bitcast [[TYPE]] @some_function to i8* 1778 // %1 = call @llvm.coro.prepare.retcon(i8* %0) 1779 // %2 = bitcast %1 to [[TYPE]] 1780 // ==> 1781 // %2 = @some_function 1782 for (auto UI = Prepare->use_begin(), UE = Prepare->use_end(); 1783 UI != UE; ) { 1784 // Look for bitcasts back to the original function type. 1785 auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser()); 1786 if (!Cast || Cast->getType() != Fn->getType()) continue; 1787 1788 // Check whether the replacement will introduce new direct calls. 1789 // If so, we'll need to update the call graph. 1790 if (PrepareUserNode) { 1791 for (auto &Use : Cast->uses()) { 1792 if (auto *CB = dyn_cast<CallBase>(Use.getUser())) { 1793 if (!CB->isCallee(&Use)) 1794 continue; 1795 PrepareUserNode->removeCallEdgeFor(*CB); 1796 PrepareUserNode->addCalledFunction(CB, FnNode); 1797 } 1798 } 1799 } 1800 1801 // Replace and remove the cast. 1802 Cast->replaceAllUsesWith(Fn); 1803 Cast->eraseFromParent(); 1804 } 1805 1806 // Replace any remaining uses with the function as an i8*. 1807 // This can never directly be a callee, so we don't need to update CG. 1808 Prepare->replaceAllUsesWith(CastFn); 1809 Prepare->eraseFromParent(); 1810 1811 // Kill dead bitcasts. 1812 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) { 1813 if (!Cast->use_empty()) break; 1814 CastFn = Cast->getOperand(0); 1815 Cast->eraseFromParent(); 1816 } 1817 } 1818 1819 static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG, 1820 LazyCallGraph::SCC &C) { 1821 bool Changed = false; 1822 for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end(); PI != PE;) { 1823 // Intrinsics can only be used in calls. 1824 auto *Prepare = cast<CallInst>((PI++)->getUser()); 1825 replacePrepare(Prepare, CG, C); 1826 Changed = true; 1827 } 1828 1829 return Changed; 1830 } 1831 1832 /// Remove calls to llvm.coro.prepare.retcon, a barrier meant to prevent 1833 /// IPO from operating on calls to a retcon coroutine before it's been 1834 /// split. This is only safe to do after we've split all retcon 1835 /// coroutines in the module. We can do that this in this pass because 1836 /// this pass does promise to split all retcon coroutines (as opposed to 1837 /// switch coroutines, which are lowered in multiple stages). 1838 static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) { 1839 bool Changed = false; 1840 for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end(); 1841 PI != PE; ) { 1842 // Intrinsics can only be used in calls. 1843 auto *Prepare = cast<CallInst>((PI++)->getUser()); 1844 replacePrepare(Prepare, CG); 1845 Changed = true; 1846 } 1847 1848 return Changed; 1849 } 1850 1851 static bool declaresCoroSplitIntrinsics(const Module &M) { 1852 return coro::declaresIntrinsics( 1853 M, {"llvm.coro.begin", "llvm.coro.prepare.retcon"}); 1854 } 1855 1856 PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C, 1857 CGSCCAnalysisManager &AM, 1858 LazyCallGraph &CG, CGSCCUpdateResult &UR) { 1859 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a 1860 // non-zero number of nodes, so we assume that here and grab the first 1861 // node's function's module. 1862 Module &M = *C.begin()->getFunction().getParent(); 1863 auto &FAM = 1864 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 1865 1866 if (!declaresCoroSplitIntrinsics(M)) 1867 return PreservedAnalyses::all(); 1868 1869 // Check for uses of llvm.coro.prepare.retcon. 1870 auto *PrepareFn = M.getFunction("llvm.coro.prepare.retcon"); 1871 if (PrepareFn && PrepareFn->use_empty()) 1872 PrepareFn = nullptr; 1873 1874 // Find coroutines for processing. 1875 SmallVector<LazyCallGraph::Node *, 4> Coroutines; 1876 for (LazyCallGraph::Node &N : C) 1877 if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR)) 1878 Coroutines.push_back(&N); 1879 1880 if (Coroutines.empty() && !PrepareFn) 1881 return PreservedAnalyses::all(); 1882 1883 if (Coroutines.empty()) 1884 replaceAllPrepares(PrepareFn, CG, C); 1885 1886 // Split all the coroutines. 1887 for (LazyCallGraph::Node *N : Coroutines) { 1888 Function &F = N->getFunction(); 1889 Attribute Attr = F.getFnAttribute(CORO_PRESPLIT_ATTR); 1890 StringRef Value = Attr.getValueAsString(); 1891 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName() 1892 << "' state: " << Value << "\n"); 1893 if (Value == UNPREPARED_FOR_SPLIT) { 1894 // Enqueue a second iteration of the CGSCC pipeline. 1895 // N.B.: 1896 // The CoroSplitLegacy pass "triggers" a restart of the CGSCC pass 1897 // pipeline by inserting an indirect function call that the 1898 // CoroElideLegacy pass then replaces with a direct function call. The 1899 // legacy CGSCC pipeline's implicit behavior was as if wrapped in the new 1900 // pass manager abstraction DevirtSCCRepeatedPass. 1901 // 1902 // This pass does not need to "trigger" another run of the pipeline. 1903 // Instead, it simply enqueues the same RefSCC onto the pipeline's 1904 // worklist. 1905 UR.CWorklist.insert(&C); 1906 F.addFnAttr(CORO_PRESPLIT_ATTR, PREPARED_FOR_SPLIT); 1907 continue; 1908 } 1909 F.removeFnAttr(CORO_PRESPLIT_ATTR); 1910 1911 SmallVector<Function *, 4> Clones; 1912 const coro::Shape Shape = splitCoroutine(F, Clones, ReuseFrameSlot); 1913 updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM); 1914 } 1915 1916 if (PrepareFn) 1917 replaceAllPrepares(PrepareFn, CG, C); 1918 1919 return PreservedAnalyses::none(); 1920 } 1921 1922 namespace { 1923 1924 // We present a coroutine to LLVM as an ordinary function with suspension 1925 // points marked up with intrinsics. We let the optimizer party on the coroutine 1926 // as a single function for as long as possible. Shortly before the coroutine is 1927 // eligible to be inlined into its callers, we split up the coroutine into parts 1928 // corresponding to initial, resume and destroy invocations of the coroutine, 1929 // add them to the current SCC and restart the IPO pipeline to optimize the 1930 // coroutine subfunctions we extracted before proceeding to the caller of the 1931 // coroutine. 1932 struct CoroSplitLegacy : public CallGraphSCCPass { 1933 static char ID; // Pass identification, replacement for typeid 1934 1935 CoroSplitLegacy(bool ReuseFrameSlot = false) 1936 : CallGraphSCCPass(ID), ReuseFrameSlot(ReuseFrameSlot) { 1937 initializeCoroSplitLegacyPass(*PassRegistry::getPassRegistry()); 1938 } 1939 1940 bool Run = false; 1941 bool ReuseFrameSlot; 1942 1943 // A coroutine is identified by the presence of coro.begin intrinsic, if 1944 // we don't have any, this pass has nothing to do. 1945 bool doInitialization(CallGraph &CG) override { 1946 Run = declaresCoroSplitIntrinsics(CG.getModule()); 1947 return CallGraphSCCPass::doInitialization(CG); 1948 } 1949 1950 bool runOnSCC(CallGraphSCC &SCC) override { 1951 if (!Run) 1952 return false; 1953 1954 // Check for uses of llvm.coro.prepare.retcon. 1955 auto PrepareFn = 1956 SCC.getCallGraph().getModule().getFunction("llvm.coro.prepare.retcon"); 1957 if (PrepareFn && PrepareFn->use_empty()) 1958 PrepareFn = nullptr; 1959 1960 // Find coroutines for processing. 1961 SmallVector<Function *, 4> Coroutines; 1962 for (CallGraphNode *CGN : SCC) 1963 if (auto *F = CGN->getFunction()) 1964 if (F->hasFnAttribute(CORO_PRESPLIT_ATTR)) 1965 Coroutines.push_back(F); 1966 1967 if (Coroutines.empty() && !PrepareFn) 1968 return false; 1969 1970 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 1971 1972 if (Coroutines.empty()) 1973 return replaceAllPrepares(PrepareFn, CG); 1974 1975 createDevirtTriggerFunc(CG, SCC); 1976 1977 // Split all the coroutines. 1978 for (Function *F : Coroutines) { 1979 Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR); 1980 StringRef Value = Attr.getValueAsString(); 1981 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName() 1982 << "' state: " << Value << "\n"); 1983 if (Value == UNPREPARED_FOR_SPLIT) { 1984 prepareForSplit(*F, CG); 1985 continue; 1986 } 1987 F->removeFnAttr(CORO_PRESPLIT_ATTR); 1988 1989 SmallVector<Function *, 4> Clones; 1990 const coro::Shape Shape = splitCoroutine(*F, Clones, ReuseFrameSlot); 1991 updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC); 1992 } 1993 1994 if (PrepareFn) 1995 replaceAllPrepares(PrepareFn, CG); 1996 1997 return true; 1998 } 1999 2000 void getAnalysisUsage(AnalysisUsage &AU) const override { 2001 CallGraphSCCPass::getAnalysisUsage(AU); 2002 } 2003 2004 StringRef getPassName() const override { return "Coroutine Splitting"; } 2005 }; 2006 2007 } // end anonymous namespace 2008 2009 char CoroSplitLegacy::ID = 0; 2010 2011 INITIALIZE_PASS_BEGIN( 2012 CoroSplitLegacy, "coro-split", 2013 "Split coroutine into a set of functions driving its state machine", false, 2014 false) 2015 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 2016 INITIALIZE_PASS_END( 2017 CoroSplitLegacy, "coro-split", 2018 "Split coroutine into a set of functions driving its state machine", false, 2019 false) 2020 2021 Pass *llvm::createCoroSplitLegacyPass(bool ReuseFrameSlot) { 2022 return new CoroSplitLegacy(ReuseFrameSlot); 2023 } 2024