1 //===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This pass builds the coroutine frame and outlines resume and destroy parts 9 // of the coroutine into separate functions. 10 // 11 // We present a coroutine to an LLVM as an ordinary function with suspension 12 // points marked up with intrinsics. We let the optimizer party on the coroutine 13 // as a single function for as long as possible. Shortly before the coroutine is 14 // eligible to be inlined into its callers, we split up the coroutine into parts 15 // corresponding to an initial, resume and destroy invocations of the coroutine, 16 // add them to the current SCC and restart the IPO pipeline to optimize the 17 // coroutine subfunctions we extracted before proceeding to the caller of the 18 // coroutine. 19 //===----------------------------------------------------------------------===// 20 21 #include "llvm/Transforms/Coroutines/CoroSplit.h" 22 #include "CoroInstr.h" 23 #include "CoroInternal.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/SmallPtrSet.h" 26 #include "llvm/ADT/SmallVector.h" 27 #include "llvm/ADT/StringRef.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/Analysis/CallGraph.h" 30 #include "llvm/Analysis/CallGraphSCCPass.h" 31 #include "llvm/IR/Argument.h" 32 #include "llvm/IR/Attributes.h" 33 #include "llvm/IR/BasicBlock.h" 34 #include "llvm/IR/CFG.h" 35 #include "llvm/IR/CallingConv.h" 36 #include "llvm/IR/Constants.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/DerivedTypes.h" 39 #include "llvm/IR/Function.h" 40 #include "llvm/IR/GlobalValue.h" 41 #include "llvm/IR/GlobalVariable.h" 42 #include "llvm/IR/IRBuilder.h" 43 #include "llvm/IR/InstIterator.h" 44 #include "llvm/IR/InstrTypes.h" 45 #include "llvm/IR/Instruction.h" 46 #include "llvm/IR/Instructions.h" 47 #include "llvm/IR/IntrinsicInst.h" 48 #include "llvm/IR/LLVMContext.h" 49 #include "llvm/IR/LegacyPassManager.h" 50 #include "llvm/IR/Module.h" 51 #include "llvm/IR/Type.h" 52 #include "llvm/IR/Value.h" 53 #include "llvm/IR/Verifier.h" 54 #include "llvm/InitializePasses.h" 55 #include "llvm/Pass.h" 56 #include "llvm/Support/Casting.h" 57 #include "llvm/Support/Debug.h" 58 #include "llvm/Support/PrettyStackTrace.h" 59 #include "llvm/Support/raw_ostream.h" 60 #include "llvm/Transforms/Scalar.h" 61 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 62 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 63 #include "llvm/Transforms/Utils/Cloning.h" 64 #include "llvm/Transforms/Utils/Local.h" 65 #include "llvm/Transforms/Utils/ValueMapper.h" 66 #include <cassert> 67 #include <cstddef> 68 #include <cstdint> 69 #include <initializer_list> 70 #include <iterator> 71 72 using namespace llvm; 73 74 #define DEBUG_TYPE "coro-split" 75 76 namespace { 77 78 /// A little helper class for building 79 class CoroCloner { 80 public: 81 enum class Kind { 82 /// The shared resume function for a switch lowering. 83 SwitchResume, 84 85 /// The shared unwind function for a switch lowering. 86 SwitchUnwind, 87 88 /// The shared cleanup function for a switch lowering. 89 SwitchCleanup, 90 91 /// An individual continuation function. 92 Continuation, 93 94 /// An async resume function. 95 Async, 96 }; 97 98 private: 99 Function &OrigF; 100 Function *NewF; 101 const Twine &Suffix; 102 coro::Shape &Shape; 103 Kind FKind; 104 ValueToValueMapTy VMap; 105 IRBuilder<> Builder; 106 Value *NewFramePtr = nullptr; 107 Value *SwiftErrorSlot = nullptr; 108 109 /// The active suspend instruction; meaningful only for continuation and async 110 /// ABIs. 111 AnyCoroSuspendInst *ActiveSuspend = nullptr; 112 113 public: 114 /// Create a cloner for a switch lowering. 115 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, 116 Kind FKind) 117 : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape), 118 FKind(FKind), Builder(OrigF.getContext()) { 119 assert(Shape.ABI == coro::ABI::Switch); 120 } 121 122 /// Create a cloner for a continuation lowering. 123 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, 124 Function *NewF, AnyCoroSuspendInst *ActiveSuspend) 125 : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape), 126 FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation), 127 Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) { 128 assert(Shape.ABI == coro::ABI::Retcon || 129 Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async); 130 assert(NewF && "need existing function for continuation"); 131 assert(ActiveSuspend && "need active suspend point for continuation"); 132 } 133 134 Function *getFunction() const { 135 assert(NewF != nullptr && "declaration not yet set"); 136 return NewF; 137 } 138 139 void create(); 140 141 private: 142 bool isSwitchDestroyFunction() { 143 switch (FKind) { 144 case Kind::Async: 145 case Kind::Continuation: 146 case Kind::SwitchResume: 147 return false; 148 case Kind::SwitchUnwind: 149 case Kind::SwitchCleanup: 150 return true; 151 } 152 llvm_unreachable("Unknown CoroCloner::Kind enum"); 153 } 154 155 void createDeclaration(); 156 void replaceEntryBlock(); 157 Value *deriveNewFramePointer(); 158 void replaceRetconOrAsyncSuspendUses(); 159 void replaceCoroSuspends(); 160 void replaceCoroEnds(); 161 void replaceSwiftErrorOps(); 162 void handleFinalSuspend(); 163 void maybeFreeContinuationStorage(); 164 }; 165 166 } // end anonymous namespace 167 168 static void maybeFreeRetconStorage(IRBuilder<> &Builder, 169 const coro::Shape &Shape, Value *FramePtr, 170 CallGraph *CG) { 171 assert(Shape.ABI == coro::ABI::Retcon || 172 Shape.ABI == coro::ABI::RetconOnce); 173 if (Shape.RetconLowering.IsFrameInlineInStorage) 174 return; 175 176 Shape.emitDealloc(Builder, FramePtr, CG); 177 } 178 179 /// Replace a non-unwind call to llvm.coro.end. 180 static void replaceFallthroughCoroEnd(CoroEndInst *End, 181 const coro::Shape &Shape, Value *FramePtr, 182 bool InResume, CallGraph *CG) { 183 // Start inserting right before the coro.end. 184 IRBuilder<> Builder(End); 185 186 // Create the return instruction. 187 switch (Shape.ABI) { 188 // The cloned functions in switch-lowering always return void. 189 case coro::ABI::Switch: 190 // coro.end doesn't immediately end the coroutine in the main function 191 // in this lowering, because we need to deallocate the coroutine. 192 if (!InResume) 193 return; 194 Builder.CreateRetVoid(); 195 break; 196 197 // In async lowering this returns. 198 case coro::ABI::Async: 199 Builder.CreateRetVoid(); 200 break; 201 202 // In unique continuation lowering, the continuations always return void. 203 // But we may have implicitly allocated storage. 204 case coro::ABI::RetconOnce: 205 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 206 Builder.CreateRetVoid(); 207 break; 208 209 // In non-unique continuation lowering, we signal completion by returning 210 // a null continuation. 211 case coro::ABI::Retcon: { 212 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 213 auto RetTy = Shape.getResumeFunctionType()->getReturnType(); 214 auto RetStructTy = dyn_cast<StructType>(RetTy); 215 PointerType *ContinuationTy = 216 cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy); 217 218 Value *ReturnValue = ConstantPointerNull::get(ContinuationTy); 219 if (RetStructTy) { 220 ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy), 221 ReturnValue, 0); 222 } 223 Builder.CreateRet(ReturnValue); 224 break; 225 } 226 } 227 228 // Remove the rest of the block, by splitting it into an unreachable block. 229 auto *BB = End->getParent(); 230 BB->splitBasicBlock(End); 231 BB->getTerminator()->eraseFromParent(); 232 } 233 234 /// Replace an unwind call to llvm.coro.end. 235 static void replaceUnwindCoroEnd(CoroEndInst *End, const coro::Shape &Shape, 236 Value *FramePtr, bool InResume, CallGraph *CG){ 237 IRBuilder<> Builder(End); 238 239 switch (Shape.ABI) { 240 // In switch-lowering, this does nothing in the main function. 241 case coro::ABI::Switch: 242 if (!InResume) 243 return; 244 break; 245 // In async lowering this does nothing. 246 case coro::ABI::Async: 247 break; 248 // In continuation-lowering, this frees the continuation storage. 249 case coro::ABI::Retcon: 250 case coro::ABI::RetconOnce: 251 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 252 break; 253 } 254 255 // If coro.end has an associated bundle, add cleanupret instruction. 256 if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) { 257 auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]); 258 auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr); 259 End->getParent()->splitBasicBlock(End); 260 CleanupRet->getParent()->getTerminator()->eraseFromParent(); 261 } 262 } 263 264 static void replaceCoroEnd(CoroEndInst *End, const coro::Shape &Shape, 265 Value *FramePtr, bool InResume, CallGraph *CG) { 266 if (End->isUnwind()) 267 replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG); 268 else 269 replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG); 270 271 auto &Context = End->getContext(); 272 End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context) 273 : ConstantInt::getFalse(Context)); 274 End->eraseFromParent(); 275 } 276 277 // Create an entry block for a resume function with a switch that will jump to 278 // suspend points. 279 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) { 280 assert(Shape.ABI == coro::ABI::Switch); 281 LLVMContext &C = F.getContext(); 282 283 // resume.entry: 284 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0, 285 // i32 2 286 // % index = load i32, i32* %index.addr 287 // switch i32 %index, label %unreachable [ 288 // i32 0, label %resume.0 289 // i32 1, label %resume.1 290 // ... 291 // ] 292 293 auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F); 294 auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F); 295 296 IRBuilder<> Builder(NewEntry); 297 auto *FramePtr = Shape.FramePtr; 298 auto *FrameTy = Shape.FrameTy; 299 auto *GepIndex = Builder.CreateStructGEP( 300 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr"); 301 auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index"); 302 auto *Switch = 303 Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size()); 304 Shape.SwitchLowering.ResumeSwitch = Switch; 305 306 size_t SuspendIndex = 0; 307 for (auto *AnyS : Shape.CoroSuspends) { 308 auto *S = cast<CoroSuspendInst>(AnyS); 309 ConstantInt *IndexVal = Shape.getIndex(SuspendIndex); 310 311 // Replace CoroSave with a store to Index: 312 // %index.addr = getelementptr %f.frame... (index field number) 313 // store i32 0, i32* %index.addr1 314 auto *Save = S->getCoroSave(); 315 Builder.SetInsertPoint(Save); 316 if (S->isFinal()) { 317 // Final suspend point is represented by storing zero in ResumeFnAddr. 318 auto *GepIndex = Builder.CreateStructGEP(FrameTy, FramePtr, 319 coro::Shape::SwitchFieldIndex::Resume, 320 "ResumeFn.addr"); 321 auto *NullPtr = ConstantPointerNull::get(cast<PointerType>( 322 cast<PointerType>(GepIndex->getType())->getElementType())); 323 Builder.CreateStore(NullPtr, GepIndex); 324 } else { 325 auto *GepIndex = Builder.CreateStructGEP( 326 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr"); 327 Builder.CreateStore(IndexVal, GepIndex); 328 } 329 Save->replaceAllUsesWith(ConstantTokenNone::get(C)); 330 Save->eraseFromParent(); 331 332 // Split block before and after coro.suspend and add a jump from an entry 333 // switch: 334 // 335 // whateverBB: 336 // whatever 337 // %0 = call i8 @llvm.coro.suspend(token none, i1 false) 338 // switch i8 %0, label %suspend[i8 0, label %resume 339 // i8 1, label %cleanup] 340 // becomes: 341 // 342 // whateverBB: 343 // whatever 344 // br label %resume.0.landing 345 // 346 // resume.0: ; <--- jump from the switch in the resume.entry 347 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false) 348 // br label %resume.0.landing 349 // 350 // resume.0.landing: 351 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0] 352 // switch i8 % 1, label %suspend [i8 0, label %resume 353 // i8 1, label %cleanup] 354 355 auto *SuspendBB = S->getParent(); 356 auto *ResumeBB = 357 SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex)); 358 auto *LandingBB = ResumeBB->splitBasicBlock( 359 S->getNextNode(), ResumeBB->getName() + Twine(".landing")); 360 Switch->addCase(IndexVal, ResumeBB); 361 362 cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB); 363 auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front()); 364 S->replaceAllUsesWith(PN); 365 PN->addIncoming(Builder.getInt8(-1), SuspendBB); 366 PN->addIncoming(S, ResumeBB); 367 368 ++SuspendIndex; 369 } 370 371 Builder.SetInsertPoint(UnreachBB); 372 Builder.CreateUnreachable(); 373 374 Shape.SwitchLowering.ResumeEntryBlock = NewEntry; 375 } 376 377 378 // Rewrite final suspend point handling. We do not use suspend index to 379 // represent the final suspend point. Instead we zero-out ResumeFnAddr in the 380 // coroutine frame, since it is undefined behavior to resume a coroutine 381 // suspended at the final suspend point. Thus, in the resume function, we can 382 // simply remove the last case (when coro::Shape is built, the final suspend 383 // point (if present) is always the last element of CoroSuspends array). 384 // In the destroy function, we add a code sequence to check if ResumeFnAddress 385 // is Null, and if so, jump to the appropriate label to handle cleanup from the 386 // final suspend point. 387 void CoroCloner::handleFinalSuspend() { 388 assert(Shape.ABI == coro::ABI::Switch && 389 Shape.SwitchLowering.HasFinalSuspend); 390 auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]); 391 auto FinalCaseIt = std::prev(Switch->case_end()); 392 BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor(); 393 Switch->removeCase(FinalCaseIt); 394 if (isSwitchDestroyFunction()) { 395 BasicBlock *OldSwitchBB = Switch->getParent(); 396 auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch"); 397 Builder.SetInsertPoint(OldSwitchBB->getTerminator()); 398 auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr, 399 coro::Shape::SwitchFieldIndex::Resume, 400 "ResumeFn.addr"); 401 auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(), 402 GepIndex); 403 auto *Cond = Builder.CreateIsNull(Load); 404 Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB); 405 OldSwitchBB->getTerminator()->eraseFromParent(); 406 } 407 } 408 409 static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape, 410 const Twine &Suffix, 411 Module::iterator InsertBefore) { 412 Module *M = OrigF.getParent(); 413 auto *FnTy = Shape.getResumeFunctionType(); 414 415 Function *NewF = 416 Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage, 417 OrigF.getName() + Suffix); 418 NewF->addParamAttr(0, Attribute::NonNull); 419 420 // For the async lowering ABI we can't guarantee that the context argument is 421 // not access via a different pointer not based on the argument. 422 if (Shape.ABI != coro::ABI::Async) 423 NewF->addParamAttr(0, Attribute::NoAlias); 424 425 M->getFunctionList().insert(InsertBefore, NewF); 426 427 return NewF; 428 } 429 430 /// Replace uses of the active llvm.coro.suspend.retcon/async call with the 431 /// arguments to the continuation function. 432 /// 433 /// This assumes that the builder has a meaningful insertion point. 434 void CoroCloner::replaceRetconOrAsyncSuspendUses() { 435 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 436 Shape.ABI == coro::ABI::Async); 437 438 auto NewS = VMap[ActiveSuspend]; 439 if (NewS->use_empty()) return; 440 441 // Copy out all the continuation arguments after the buffer pointer into 442 // an easily-indexed data structure for convenience. 443 SmallVector<Value*, 8> Args; 444 // The async ABI includes all arguments -- including the first argument. 445 bool IsAsyncABI = Shape.ABI == coro::ABI::Async; 446 for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()), 447 E = NewF->arg_end(); 448 I != E; ++I) 449 Args.push_back(&*I); 450 451 // If the suspend returns a single scalar value, we can just do a simple 452 // replacement. 453 if (!isa<StructType>(NewS->getType())) { 454 assert(Args.size() == 1); 455 NewS->replaceAllUsesWith(Args.front()); 456 return; 457 } 458 459 // Try to peephole extracts of an aggregate return. 460 for (auto UI = NewS->use_begin(), UE = NewS->use_end(); UI != UE; ) { 461 auto EVI = dyn_cast<ExtractValueInst>((UI++)->getUser()); 462 if (!EVI || EVI->getNumIndices() != 1) 463 continue; 464 465 EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]); 466 EVI->eraseFromParent(); 467 } 468 469 // If we have no remaining uses, we're done. 470 if (NewS->use_empty()) return; 471 472 // Otherwise, we need to create an aggregate. 473 Value *Agg = UndefValue::get(NewS->getType()); 474 for (size_t I = 0, E = Args.size(); I != E; ++I) 475 Agg = Builder.CreateInsertValue(Agg, Args[I], I); 476 477 NewS->replaceAllUsesWith(Agg); 478 } 479 480 void CoroCloner::replaceCoroSuspends() { 481 Value *SuspendResult; 482 483 switch (Shape.ABI) { 484 // In switch lowering, replace coro.suspend with the appropriate value 485 // for the type of function we're extracting. 486 // Replacing coro.suspend with (0) will result in control flow proceeding to 487 // a resume label associated with a suspend point, replacing it with (1) will 488 // result in control flow proceeding to a cleanup label associated with this 489 // suspend point. 490 case coro::ABI::Switch: 491 SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0); 492 break; 493 494 // In async lowering there are no uses of the result. 495 case coro::ABI::Async: 496 return; 497 498 // In returned-continuation lowering, the arguments from earlier 499 // continuations are theoretically arbitrary, and they should have been 500 // spilled. 501 case coro::ABI::RetconOnce: 502 case coro::ABI::Retcon: 503 return; 504 } 505 506 for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) { 507 // The active suspend was handled earlier. 508 if (CS == ActiveSuspend) continue; 509 510 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]); 511 MappedCS->replaceAllUsesWith(SuspendResult); 512 MappedCS->eraseFromParent(); 513 } 514 } 515 516 void CoroCloner::replaceCoroEnds() { 517 for (CoroEndInst *CE : Shape.CoroEnds) { 518 // We use a null call graph because there's no call graph node for 519 // the cloned function yet. We'll just be rebuilding that later. 520 auto NewCE = cast<CoroEndInst>(VMap[CE]); 521 replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr); 522 } 523 } 524 525 static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape, 526 ValueToValueMapTy *VMap) { 527 Value *CachedSlot = nullptr; 528 auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * { 529 if (CachedSlot) { 530 assert(CachedSlot->getType()->getPointerElementType() == ValueTy && 531 "multiple swifterror slots in function with different types"); 532 return CachedSlot; 533 } 534 535 // Check if the function has a swifterror argument. 536 for (auto &Arg : F.args()) { 537 if (Arg.isSwiftError()) { 538 CachedSlot = &Arg; 539 assert(Arg.getType()->getPointerElementType() == ValueTy && 540 "swifterror argument does not have expected type"); 541 return &Arg; 542 } 543 } 544 545 // Create a swifterror alloca. 546 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg()); 547 auto Alloca = Builder.CreateAlloca(ValueTy); 548 Alloca->setSwiftError(true); 549 550 CachedSlot = Alloca; 551 return Alloca; 552 }; 553 554 for (CallInst *Op : Shape.SwiftErrorOps) { 555 auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op; 556 IRBuilder<> Builder(MappedOp); 557 558 // If there are no arguments, this is a 'get' operation. 559 Value *MappedResult; 560 if (Op->getNumArgOperands() == 0) { 561 auto ValueTy = Op->getType(); 562 auto Slot = getSwiftErrorSlot(ValueTy); 563 MappedResult = Builder.CreateLoad(ValueTy, Slot); 564 } else { 565 assert(Op->getNumArgOperands() == 1); 566 auto Value = MappedOp->getArgOperand(0); 567 auto ValueTy = Value->getType(); 568 auto Slot = getSwiftErrorSlot(ValueTy); 569 Builder.CreateStore(Value, Slot); 570 MappedResult = Slot; 571 } 572 573 MappedOp->replaceAllUsesWith(MappedResult); 574 MappedOp->eraseFromParent(); 575 } 576 577 // If we're updating the original function, we've invalidated SwiftErrorOps. 578 if (VMap == nullptr) { 579 Shape.SwiftErrorOps.clear(); 580 } 581 } 582 583 void CoroCloner::replaceSwiftErrorOps() { 584 ::replaceSwiftErrorOps(*NewF, Shape, &VMap); 585 } 586 587 void CoroCloner::replaceEntryBlock() { 588 // In the original function, the AllocaSpillBlock is a block immediately 589 // following the allocation of the frame object which defines GEPs for 590 // all the allocas that have been moved into the frame, and it ends by 591 // branching to the original beginning of the coroutine. Make this 592 // the entry block of the cloned function. 593 auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]); 594 auto *OldEntry = &NewF->getEntryBlock(); 595 Entry->setName("entry" + Suffix); 596 Entry->moveBefore(OldEntry); 597 Entry->getTerminator()->eraseFromParent(); 598 599 // Clear all predecessors of the new entry block. There should be 600 // exactly one predecessor, which we created when splitting out 601 // AllocaSpillBlock to begin with. 602 assert(Entry->hasOneUse()); 603 auto BranchToEntry = cast<BranchInst>(Entry->user_back()); 604 assert(BranchToEntry->isUnconditional()); 605 Builder.SetInsertPoint(BranchToEntry); 606 Builder.CreateUnreachable(); 607 BranchToEntry->eraseFromParent(); 608 609 // Branch from the entry to the appropriate place. 610 Builder.SetInsertPoint(Entry); 611 switch (Shape.ABI) { 612 case coro::ABI::Switch: { 613 // In switch-lowering, we built a resume-entry block in the original 614 // function. Make the entry block branch to this. 615 auto *SwitchBB = 616 cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]); 617 Builder.CreateBr(SwitchBB); 618 break; 619 } 620 case coro::ABI::Async: 621 case coro::ABI::Retcon: 622 case coro::ABI::RetconOnce: { 623 // In continuation ABIs, we want to branch to immediately after the 624 // active suspend point. Earlier phases will have put the suspend in its 625 // own basic block, so just thread our jump directly to its successor. 626 assert((Shape.ABI == coro::ABI::Async && 627 isa<CoroSuspendAsyncInst>(ActiveSuspend)) || 628 ((Shape.ABI == coro::ABI::Retcon || 629 Shape.ABI == coro::ABI::RetconOnce) && 630 isa<CoroSuspendRetconInst>(ActiveSuspend))); 631 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]); 632 auto Branch = cast<BranchInst>(MappedCS->getNextNode()); 633 assert(Branch->isUnconditional()); 634 Builder.CreateBr(Branch->getSuccessor(0)); 635 break; 636 } 637 } 638 639 // Any alloca that's still being used but not reachable from the new entry 640 // needs to be moved to the new entry. 641 Function *F = OldEntry->getParent(); 642 DominatorTree DT{*F}; 643 for (auto IT = inst_begin(F), End = inst_end(F); IT != End;) { 644 Instruction &I = *IT++; 645 if (!isa<AllocaInst>(&I) || I.use_empty()) 646 continue; 647 if (DT.isReachableFromEntry(I.getParent())) 648 continue; 649 I.moveBefore(*Entry, Entry->getFirstInsertionPt()); 650 } 651 } 652 653 /// Derive the value of the new frame pointer. 654 Value *CoroCloner::deriveNewFramePointer() { 655 // Builder should be inserting to the front of the new entry block. 656 657 switch (Shape.ABI) { 658 // In switch-lowering, the argument is the frame pointer. 659 case coro::ABI::Switch: 660 return &*NewF->arg_begin(); 661 // In async-lowering, one of the arguments is an async context as determined 662 // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of 663 // the resume function from the async context projection function associated 664 // with the active suspend. The frame is located as a tail to the async 665 // context header. 666 case coro::ABI::Async: { 667 auto *CalleeContext = NewF->getArg(Shape.AsyncLowering.ContextArgNo); 668 auto *FramePtrTy = Shape.FrameTy->getPointerTo(); 669 auto *ProjectionFunc = cast<CoroSuspendAsyncInst>(ActiveSuspend) 670 ->getAsyncContextProjectionFunction(); 671 auto DbgLoc = 672 cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc(); 673 // Calling i8* (i8*) 674 auto *CallerContext = Builder.CreateCall( 675 cast<FunctionType>(ProjectionFunc->getType()->getPointerElementType()), 676 ProjectionFunc, CalleeContext); 677 CallerContext->setCallingConv(ProjectionFunc->getCallingConv()); 678 CallerContext->setDebugLoc(DbgLoc); 679 // The frame is located after the async_context header. 680 auto &Context = Builder.getContext(); 681 auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32( 682 Type::getInt8Ty(Context), CallerContext, 683 Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr"); 684 // Inline the projection function. 685 InlineFunctionInfo InlineInfo; 686 auto InlineRes = InlineFunction(*CallerContext, InlineInfo); 687 assert(InlineRes.isSuccess()); 688 (void)InlineRes; 689 return Builder.CreateBitCast(FramePtrAddr, FramePtrTy); 690 } 691 // In continuation-lowering, the argument is the opaque storage. 692 case coro::ABI::Retcon: 693 case coro::ABI::RetconOnce: { 694 Argument *NewStorage = &*NewF->arg_begin(); 695 auto FramePtrTy = Shape.FrameTy->getPointerTo(); 696 697 // If the storage is inline, just bitcast to the storage to the frame type. 698 if (Shape.RetconLowering.IsFrameInlineInStorage) 699 return Builder.CreateBitCast(NewStorage, FramePtrTy); 700 701 // Otherwise, load the real frame from the opaque storage. 702 auto FramePtrPtr = 703 Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo()); 704 return Builder.CreateLoad(FramePtrTy, FramePtrPtr); 705 } 706 } 707 llvm_unreachable("bad ABI"); 708 } 709 710 static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context, 711 unsigned ParamIndex, 712 uint64_t Size, Align Alignment) { 713 AttrBuilder ParamAttrs; 714 ParamAttrs.addAttribute(Attribute::NonNull); 715 ParamAttrs.addAttribute(Attribute::NoAlias); 716 ParamAttrs.addAlignmentAttr(Alignment); 717 ParamAttrs.addDereferenceableAttr(Size); 718 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs); 719 } 720 721 /// Clone the body of the original function into a resume function of 722 /// some sort. 723 void CoroCloner::create() { 724 // Create the new function if we don't already have one. 725 if (!NewF) { 726 NewF = createCloneDeclaration(OrigF, Shape, Suffix, 727 OrigF.getParent()->end()); 728 } 729 730 // Replace all args with undefs. The buildCoroutineFrame algorithm already 731 // rewritten access to the args that occurs after suspend points with loads 732 // and stores to/from the coroutine frame. 733 for (Argument &A : OrigF.args()) 734 VMap[&A] = UndefValue::get(A.getType()); 735 736 SmallVector<ReturnInst *, 4> Returns; 737 738 // Ignore attempts to change certain attributes of the function. 739 // TODO: maybe there should be a way to suppress this during cloning? 740 auto savedVisibility = NewF->getVisibility(); 741 auto savedUnnamedAddr = NewF->getUnnamedAddr(); 742 auto savedDLLStorageClass = NewF->getDLLStorageClass(); 743 744 // NewF's linkage (which CloneFunctionInto does *not* change) might not 745 // be compatible with the visibility of OrigF (which it *does* change), 746 // so protect against that. 747 auto savedLinkage = NewF->getLinkage(); 748 NewF->setLinkage(llvm::GlobalValue::ExternalLinkage); 749 750 CloneFunctionInto(NewF, &OrigF, VMap, /*ModuleLevelChanges=*/true, Returns); 751 752 NewF->setLinkage(savedLinkage); 753 NewF->setVisibility(savedVisibility); 754 NewF->setUnnamedAddr(savedUnnamedAddr); 755 NewF->setDLLStorageClass(savedDLLStorageClass); 756 757 auto &Context = NewF->getContext(); 758 759 // Replace the attributes of the new function: 760 auto OrigAttrs = NewF->getAttributes(); 761 auto NewAttrs = AttributeList(); 762 763 switch (Shape.ABI) { 764 case coro::ABI::Switch: 765 // Bootstrap attributes by copying function attributes from the 766 // original function. This should include optimization settings and so on. 767 NewAttrs = NewAttrs.addAttributes(Context, AttributeList::FunctionIndex, 768 OrigAttrs.getFnAttributes()); 769 770 addFramePointerAttrs(NewAttrs, Context, 0, 771 Shape.FrameSize, Shape.FrameAlign); 772 break; 773 case coro::ABI::Async: 774 break; 775 case coro::ABI::Retcon: 776 case coro::ABI::RetconOnce: 777 // If we have a continuation prototype, just use its attributes, 778 // full-stop. 779 NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes(); 780 781 addFramePointerAttrs(NewAttrs, Context, 0, 782 Shape.getRetconCoroId()->getStorageSize(), 783 Shape.getRetconCoroId()->getStorageAlignment()); 784 break; 785 } 786 787 switch (Shape.ABI) { 788 // In these ABIs, the cloned functions always return 'void', and the 789 // existing return sites are meaningless. Note that for unique 790 // continuations, this includes the returns associated with suspends; 791 // this is fine because we can't suspend twice. 792 case coro::ABI::Switch: 793 case coro::ABI::RetconOnce: 794 // Remove old returns. 795 for (ReturnInst *Return : Returns) 796 changeToUnreachable(Return, /*UseLLVMTrap=*/false); 797 break; 798 799 // With multi-suspend continuations, we'll already have eliminated the 800 // original returns and inserted returns before all the suspend points, 801 // so we want to leave any returns in place. 802 case coro::ABI::Retcon: 803 break; 804 // Async lowering will insert musttail call functions at all suspend points 805 // followed by a return. 806 // Don't change returns to unreachable because that will trip up the verifier. 807 // These returns should be unreachable from the clone. 808 case coro::ABI::Async: 809 break; 810 } 811 812 NewF->setAttributes(NewAttrs); 813 NewF->setCallingConv(Shape.getResumeFunctionCC()); 814 815 // Set up the new entry block. 816 replaceEntryBlock(); 817 818 Builder.SetInsertPoint(&NewF->getEntryBlock().front()); 819 NewFramePtr = deriveNewFramePointer(); 820 821 // Remap frame pointer. 822 Value *OldFramePtr = VMap[Shape.FramePtr]; 823 NewFramePtr->takeName(OldFramePtr); 824 OldFramePtr->replaceAllUsesWith(NewFramePtr); 825 826 // Remap vFrame pointer. 827 auto *NewVFrame = Builder.CreateBitCast( 828 NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame"); 829 Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]); 830 OldVFrame->replaceAllUsesWith(NewVFrame); 831 832 switch (Shape.ABI) { 833 case coro::ABI::Switch: 834 // Rewrite final suspend handling as it is not done via switch (allows to 835 // remove final case from the switch, since it is undefined behavior to 836 // resume the coroutine suspended at the final suspend point. 837 if (Shape.SwitchLowering.HasFinalSuspend) 838 handleFinalSuspend(); 839 break; 840 case coro::ABI::Async: 841 case coro::ABI::Retcon: 842 case coro::ABI::RetconOnce: 843 // Replace uses of the active suspend with the corresponding 844 // continuation-function arguments. 845 assert(ActiveSuspend != nullptr && 846 "no active suspend when lowering a continuation-style coroutine"); 847 replaceRetconOrAsyncSuspendUses(); 848 break; 849 } 850 851 // Handle suspends. 852 replaceCoroSuspends(); 853 854 // Handle swifterror. 855 replaceSwiftErrorOps(); 856 857 // Remove coro.end intrinsics. 858 replaceCoroEnds(); 859 860 // Eliminate coro.free from the clones, replacing it with 'null' in cleanup, 861 // to suppress deallocation code. 862 if (Shape.ABI == coro::ABI::Switch) 863 coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]), 864 /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup); 865 } 866 867 // Create a resume clone by cloning the body of the original function, setting 868 // new entry block and replacing coro.suspend an appropriate value to force 869 // resume or cleanup pass for every suspend point. 870 static Function *createClone(Function &F, const Twine &Suffix, 871 coro::Shape &Shape, CoroCloner::Kind FKind) { 872 CoroCloner Cloner(F, Suffix, Shape, FKind); 873 Cloner.create(); 874 return Cloner.getFunction(); 875 } 876 877 /// Remove calls to llvm.coro.end in the original function. 878 static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG) { 879 for (auto End : Shape.CoroEnds) { 880 replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, CG); 881 } 882 } 883 884 static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) { 885 assert(Shape.ABI == coro::ABI::Async); 886 887 auto *FuncPtrStruct = cast<ConstantStruct>( 888 Shape.AsyncLowering.AsyncFuncPointer->getInitializer()); 889 auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0); 890 auto *OrigContextSize = FuncPtrStruct->getOperand(1); 891 auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(), 892 Shape.AsyncLowering.ContextSize); 893 auto *NewFuncPtrStruct = ConstantStruct::get( 894 FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize); 895 896 Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct); 897 } 898 899 static void replaceFrameSize(coro::Shape &Shape) { 900 if (Shape.ABI == coro::ABI::Async) 901 updateAsyncFuncPointerContextSize(Shape); 902 903 if (Shape.CoroSizes.empty()) 904 return; 905 906 // In the same function all coro.sizes should have the same result type. 907 auto *SizeIntrin = Shape.CoroSizes.back(); 908 Module *M = SizeIntrin->getModule(); 909 const DataLayout &DL = M->getDataLayout(); 910 auto Size = DL.getTypeAllocSize(Shape.FrameTy); 911 auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size); 912 913 for (CoroSizeInst *CS : Shape.CoroSizes) { 914 CS->replaceAllUsesWith(SizeConstant); 915 CS->eraseFromParent(); 916 } 917 } 918 919 // Create a global constant array containing pointers to functions provided and 920 // set Info parameter of CoroBegin to point at this constant. Example: 921 // 922 // @f.resumers = internal constant [2 x void(%f.frame*)*] 923 // [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy] 924 // define void @f() { 925 // ... 926 // call i8* @llvm.coro.begin(i8* null, i32 0, i8* null, 927 // i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*)) 928 // 929 // Assumes that all the functions have the same signature. 930 static void setCoroInfo(Function &F, coro::Shape &Shape, 931 ArrayRef<Function *> Fns) { 932 // This only works under the switch-lowering ABI because coro elision 933 // only works on the switch-lowering ABI. 934 assert(Shape.ABI == coro::ABI::Switch); 935 936 SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end()); 937 assert(!Args.empty()); 938 Function *Part = *Fns.begin(); 939 Module *M = Part->getParent(); 940 auto *ArrTy = ArrayType::get(Part->getType(), Args.size()); 941 942 auto *ConstVal = ConstantArray::get(ArrTy, Args); 943 auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true, 944 GlobalVariable::PrivateLinkage, ConstVal, 945 F.getName() + Twine(".resumers")); 946 947 // Update coro.begin instruction to refer to this constant. 948 LLVMContext &C = F.getContext(); 949 auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C)); 950 Shape.getSwitchCoroId()->setInfo(BC); 951 } 952 953 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame. 954 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn, 955 Function *DestroyFn, Function *CleanupFn) { 956 assert(Shape.ABI == coro::ABI::Switch); 957 958 IRBuilder<> Builder(Shape.FramePtr->getNextNode()); 959 auto *ResumeAddr = Builder.CreateStructGEP( 960 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume, 961 "resume.addr"); 962 Builder.CreateStore(ResumeFn, ResumeAddr); 963 964 Value *DestroyOrCleanupFn = DestroyFn; 965 966 CoroIdInst *CoroId = Shape.getSwitchCoroId(); 967 if (CoroAllocInst *CA = CoroId->getCoroAlloc()) { 968 // If there is a CoroAlloc and it returns false (meaning we elide the 969 // allocation, use CleanupFn instead of DestroyFn). 970 DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn); 971 } 972 973 auto *DestroyAddr = Builder.CreateStructGEP( 974 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy, 975 "destroy.addr"); 976 Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr); 977 } 978 979 static void postSplitCleanup(Function &F) { 980 removeUnreachableBlocks(F); 981 982 // For now, we do a mandatory verification step because we don't 983 // entirely trust this pass. Note that we don't want to add a verifier 984 // pass to FPM below because it will also verify all the global data. 985 if (verifyFunction(F, &errs())) 986 report_fatal_error("Broken function"); 987 988 legacy::FunctionPassManager FPM(F.getParent()); 989 990 FPM.add(createSCCPPass()); 991 FPM.add(createCFGSimplificationPass()); 992 FPM.add(createEarlyCSEPass()); 993 FPM.add(createCFGSimplificationPass()); 994 995 FPM.doInitialization(); 996 FPM.run(F); 997 FPM.doFinalization(); 998 } 999 1000 // Assuming we arrived at the block NewBlock from Prev instruction, store 1001 // PHI's incoming values in the ResolvedValues map. 1002 static void 1003 scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock, 1004 DenseMap<Value *, Value *> &ResolvedValues) { 1005 auto *PrevBB = Prev->getParent(); 1006 for (PHINode &PN : NewBlock->phis()) { 1007 auto V = PN.getIncomingValueForBlock(PrevBB); 1008 // See if we already resolved it. 1009 auto VI = ResolvedValues.find(V); 1010 if (VI != ResolvedValues.end()) 1011 V = VI->second; 1012 // Remember the value. 1013 ResolvedValues[&PN] = V; 1014 } 1015 } 1016 1017 // Replace a sequence of branches leading to a ret, with a clone of a ret 1018 // instruction. Suspend instruction represented by a switch, track the PHI 1019 // values and select the correct case successor when possible. 1020 static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) { 1021 DenseMap<Value *, Value *> ResolvedValues; 1022 BasicBlock *UnconditionalSucc = nullptr; 1023 1024 Instruction *I = InitialInst; 1025 while (I->isTerminator() || 1026 (isa<CmpInst>(I) && I->getNextNode()->isTerminator())) { 1027 if (isa<ReturnInst>(I)) { 1028 if (I != InitialInst) { 1029 // If InitialInst is an unconditional branch, 1030 // remove PHI values that come from basic block of InitialInst 1031 if (UnconditionalSucc) 1032 UnconditionalSucc->removePredecessor(InitialInst->getParent(), true); 1033 ReplaceInstWithInst(InitialInst, I->clone()); 1034 } 1035 return true; 1036 } 1037 if (auto *BR = dyn_cast<BranchInst>(I)) { 1038 if (BR->isUnconditional()) { 1039 BasicBlock *BB = BR->getSuccessor(0); 1040 if (I == InitialInst) 1041 UnconditionalSucc = BB; 1042 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1043 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1044 continue; 1045 } 1046 } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) { 1047 auto *BR = dyn_cast<BranchInst>(I->getNextNode()); 1048 if (BR && BR->isConditional() && CondCmp == BR->getCondition()) { 1049 // If the case number of suspended switch instruction is reduced to 1050 // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator. 1051 // And the comparsion looks like : %cond = icmp eq i8 %V, constant. 1052 ConstantInt *CondConst = dyn_cast<ConstantInt>(CondCmp->getOperand(1)); 1053 if (CondConst && CondCmp->getPredicate() == CmpInst::ICMP_EQ) { 1054 Value *V = CondCmp->getOperand(0); 1055 auto it = ResolvedValues.find(V); 1056 if (it != ResolvedValues.end()) 1057 V = it->second; 1058 1059 if (ConstantInt *Cond0 = dyn_cast<ConstantInt>(V)) { 1060 BasicBlock *BB = Cond0->equalsInt(CondConst->getZExtValue()) 1061 ? BR->getSuccessor(0) 1062 : BR->getSuccessor(1); 1063 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1064 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1065 continue; 1066 } 1067 } 1068 } 1069 } else if (auto *SI = dyn_cast<SwitchInst>(I)) { 1070 Value *V = SI->getCondition(); 1071 auto it = ResolvedValues.find(V); 1072 if (it != ResolvedValues.end()) 1073 V = it->second; 1074 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) { 1075 BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor(); 1076 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1077 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1078 continue; 1079 } 1080 } 1081 return false; 1082 } 1083 return false; 1084 } 1085 1086 // Check whether CI obeys the rules of musttail attribute. 1087 static bool shouldBeMustTail(const CallInst &CI, const Function &F) { 1088 if (CI.isInlineAsm()) 1089 return false; 1090 1091 // Match prototypes and calling conventions of resume function. 1092 FunctionType *CalleeTy = CI.getFunctionType(); 1093 if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1)) 1094 return false; 1095 1096 Type *CalleeParmTy = CalleeTy->getParamType(0); 1097 if (!CalleeParmTy->isPointerTy() || 1098 (CalleeParmTy->getPointerAddressSpace() != 0)) 1099 return false; 1100 1101 if (CI.getCallingConv() != F.getCallingConv()) 1102 return false; 1103 1104 // CI should not has any ABI-impacting function attributes. 1105 static const Attribute::AttrKind ABIAttrs[] = { 1106 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca, 1107 Attribute::Preallocated, Attribute::InReg, Attribute::Returned, 1108 Attribute::SwiftSelf, Attribute::SwiftError}; 1109 AttributeList Attrs = CI.getAttributes(); 1110 for (auto AK : ABIAttrs) 1111 if (Attrs.hasParamAttribute(0, AK)) 1112 return false; 1113 1114 return true; 1115 } 1116 1117 // Add musttail to any resume instructions that is immediately followed by a 1118 // suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call 1119 // for symmetrical coroutine control transfer (C++ Coroutines TS extension). 1120 // This transformation is done only in the resume part of the coroutine that has 1121 // identical signature and calling convention as the coro.resume call. 1122 static void addMustTailToCoroResumes(Function &F) { 1123 bool changed = false; 1124 1125 // Collect potential resume instructions. 1126 SmallVector<CallInst *, 4> Resumes; 1127 for (auto &I : instructions(F)) 1128 if (auto *Call = dyn_cast<CallInst>(&I)) 1129 if (shouldBeMustTail(*Call, F)) 1130 Resumes.push_back(Call); 1131 1132 // Set musttail on those that are followed by a ret instruction. 1133 for (CallInst *Call : Resumes) 1134 if (simplifyTerminatorLeadingToRet(Call->getNextNode())) { 1135 Call->setTailCallKind(CallInst::TCK_MustTail); 1136 changed = true; 1137 } 1138 1139 if (changed) 1140 removeUnreachableBlocks(F); 1141 } 1142 1143 // Coroutine has no suspend points. Remove heap allocation for the coroutine 1144 // frame if possible. 1145 static void handleNoSuspendCoroutine(coro::Shape &Shape) { 1146 auto *CoroBegin = Shape.CoroBegin; 1147 auto *CoroId = CoroBegin->getId(); 1148 auto *AllocInst = CoroId->getCoroAlloc(); 1149 switch (Shape.ABI) { 1150 case coro::ABI::Switch: { 1151 auto SwitchId = cast<CoroIdInst>(CoroId); 1152 coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr); 1153 if (AllocInst) { 1154 IRBuilder<> Builder(AllocInst); 1155 auto *Frame = Builder.CreateAlloca(Shape.FrameTy); 1156 Frame->setAlignment(Shape.FrameAlign); 1157 auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy()); 1158 AllocInst->replaceAllUsesWith(Builder.getFalse()); 1159 AllocInst->eraseFromParent(); 1160 CoroBegin->replaceAllUsesWith(VFrame); 1161 } else { 1162 CoroBegin->replaceAllUsesWith(CoroBegin->getMem()); 1163 } 1164 break; 1165 } 1166 case coro::ABI::Async: 1167 case coro::ABI::Retcon: 1168 case coro::ABI::RetconOnce: 1169 CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType())); 1170 break; 1171 } 1172 1173 CoroBegin->eraseFromParent(); 1174 } 1175 1176 // SimplifySuspendPoint needs to check that there is no calls between 1177 // coro_save and coro_suspend, since any of the calls may potentially resume 1178 // the coroutine and if that is the case we cannot eliminate the suspend point. 1179 static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) { 1180 for (Instruction *I = From; I != To; I = I->getNextNode()) { 1181 // Assume that no intrinsic can resume the coroutine. 1182 if (isa<IntrinsicInst>(I)) 1183 continue; 1184 1185 if (isa<CallBase>(I)) 1186 return true; 1187 } 1188 return false; 1189 } 1190 1191 static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) { 1192 SmallPtrSet<BasicBlock *, 8> Set; 1193 SmallVector<BasicBlock *, 8> Worklist; 1194 1195 Set.insert(SaveBB); 1196 Worklist.push_back(ResDesBB); 1197 1198 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr 1199 // returns a token consumed by suspend instruction, all blocks in between 1200 // will have to eventually hit SaveBB when going backwards from ResDesBB. 1201 while (!Worklist.empty()) { 1202 auto *BB = Worklist.pop_back_val(); 1203 Set.insert(BB); 1204 for (auto *Pred : predecessors(BB)) 1205 if (Set.count(Pred) == 0) 1206 Worklist.push_back(Pred); 1207 } 1208 1209 // SaveBB and ResDesBB are checked separately in hasCallsBetween. 1210 Set.erase(SaveBB); 1211 Set.erase(ResDesBB); 1212 1213 for (auto *BB : Set) 1214 if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr)) 1215 return true; 1216 1217 return false; 1218 } 1219 1220 static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) { 1221 auto *SaveBB = Save->getParent(); 1222 auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent(); 1223 1224 if (SaveBB == ResumeOrDestroyBB) 1225 return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy); 1226 1227 // Any calls from Save to the end of the block? 1228 if (hasCallsInBlockBetween(Save->getNextNode(), nullptr)) 1229 return true; 1230 1231 // Any calls from begging of the block up to ResumeOrDestroy? 1232 if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(), 1233 ResumeOrDestroy)) 1234 return true; 1235 1236 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB? 1237 if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB)) 1238 return true; 1239 1240 return false; 1241 } 1242 1243 // If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the 1244 // suspend point and replace it with nornal control flow. 1245 static bool simplifySuspendPoint(CoroSuspendInst *Suspend, 1246 CoroBeginInst *CoroBegin) { 1247 Instruction *Prev = Suspend->getPrevNode(); 1248 if (!Prev) { 1249 auto *Pred = Suspend->getParent()->getSinglePredecessor(); 1250 if (!Pred) 1251 return false; 1252 Prev = Pred->getTerminator(); 1253 } 1254 1255 CallBase *CB = dyn_cast<CallBase>(Prev); 1256 if (!CB) 1257 return false; 1258 1259 auto *Callee = CB->getCalledOperand()->stripPointerCasts(); 1260 1261 // See if the callsite is for resumption or destruction of the coroutine. 1262 auto *SubFn = dyn_cast<CoroSubFnInst>(Callee); 1263 if (!SubFn) 1264 return false; 1265 1266 // Does not refer to the current coroutine, we cannot do anything with it. 1267 if (SubFn->getFrame() != CoroBegin) 1268 return false; 1269 1270 // See if the transformation is safe. Specifically, see if there are any 1271 // calls in between Save and CallInstr. They can potenitally resume the 1272 // coroutine rendering this optimization unsafe. 1273 auto *Save = Suspend->getCoroSave(); 1274 if (hasCallsBetween(Save, CB)) 1275 return false; 1276 1277 // Replace llvm.coro.suspend with the value that results in resumption over 1278 // the resume or cleanup path. 1279 Suspend->replaceAllUsesWith(SubFn->getRawIndex()); 1280 Suspend->eraseFromParent(); 1281 Save->eraseFromParent(); 1282 1283 // No longer need a call to coro.resume or coro.destroy. 1284 if (auto *Invoke = dyn_cast<InvokeInst>(CB)) { 1285 BranchInst::Create(Invoke->getNormalDest(), Invoke); 1286 } 1287 1288 // Grab the CalledValue from CB before erasing the CallInstr. 1289 auto *CalledValue = CB->getCalledOperand(); 1290 CB->eraseFromParent(); 1291 1292 // If no more users remove it. Usually it is a bitcast of SubFn. 1293 if (CalledValue != SubFn && CalledValue->user_empty()) 1294 if (auto *I = dyn_cast<Instruction>(CalledValue)) 1295 I->eraseFromParent(); 1296 1297 // Now we are good to remove SubFn. 1298 if (SubFn->user_empty()) 1299 SubFn->eraseFromParent(); 1300 1301 return true; 1302 } 1303 1304 // Remove suspend points that are simplified. 1305 static void simplifySuspendPoints(coro::Shape &Shape) { 1306 // Currently, the only simplification we do is switch-lowering-specific. 1307 if (Shape.ABI != coro::ABI::Switch) 1308 return; 1309 1310 auto &S = Shape.CoroSuspends; 1311 size_t I = 0, N = S.size(); 1312 if (N == 0) 1313 return; 1314 while (true) { 1315 auto SI = cast<CoroSuspendInst>(S[I]); 1316 // Leave final.suspend to handleFinalSuspend since it is undefined behavior 1317 // to resume a coroutine suspended at the final suspend point. 1318 if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) { 1319 if (--N == I) 1320 break; 1321 std::swap(S[I], S[N]); 1322 continue; 1323 } 1324 if (++I == N) 1325 break; 1326 } 1327 S.resize(N); 1328 } 1329 1330 static void splitSwitchCoroutine(Function &F, coro::Shape &Shape, 1331 SmallVectorImpl<Function *> &Clones) { 1332 assert(Shape.ABI == coro::ABI::Switch); 1333 1334 createResumeEntryBlock(F, Shape); 1335 auto ResumeClone = createClone(F, ".resume", Shape, 1336 CoroCloner::Kind::SwitchResume); 1337 auto DestroyClone = createClone(F, ".destroy", Shape, 1338 CoroCloner::Kind::SwitchUnwind); 1339 auto CleanupClone = createClone(F, ".cleanup", Shape, 1340 CoroCloner::Kind::SwitchCleanup); 1341 1342 postSplitCleanup(*ResumeClone); 1343 postSplitCleanup(*DestroyClone); 1344 postSplitCleanup(*CleanupClone); 1345 1346 addMustTailToCoroResumes(*ResumeClone); 1347 1348 // Store addresses resume/destroy/cleanup functions in the coroutine frame. 1349 updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone); 1350 1351 assert(Clones.empty()); 1352 Clones.push_back(ResumeClone); 1353 Clones.push_back(DestroyClone); 1354 Clones.push_back(CleanupClone); 1355 1356 // Create a constant array referring to resume/destroy/clone functions pointed 1357 // by the last argument of @llvm.coro.info, so that CoroElide pass can 1358 // determined correct function to call. 1359 setCoroInfo(F, Shape, Clones); 1360 } 1361 1362 static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend, 1363 Value *Continuation) { 1364 auto *ResumeIntrinsic = Suspend->getResumeFunction(); 1365 auto &Context = Suspend->getParent()->getParent()->getContext(); 1366 auto *Int8PtrTy = Type::getInt8PtrTy(Context); 1367 1368 IRBuilder<> Builder(ResumeIntrinsic); 1369 auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy); 1370 ResumeIntrinsic->replaceAllUsesWith(Val); 1371 ResumeIntrinsic->eraseFromParent(); 1372 Suspend->setOperand(0, UndefValue::get(Int8PtrTy)); 1373 } 1374 1375 /// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs. 1376 static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy, 1377 ArrayRef<Value *> FnArgs, 1378 SmallVectorImpl<Value *> &CallArgs) { 1379 size_t ArgIdx = 0; 1380 for (auto paramTy : FnTy->params()) { 1381 assert(ArgIdx < FnArgs.size()); 1382 if (paramTy != FnArgs[ArgIdx]->getType()) 1383 CallArgs.push_back( 1384 Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy)); 1385 else 1386 CallArgs.push_back(FnArgs[ArgIdx]); 1387 ++ArgIdx; 1388 } 1389 } 1390 1391 static void splitAsyncCoroutine(Function &F, coro::Shape &Shape, 1392 SmallVectorImpl<Function *> &Clones) { 1393 assert(Shape.ABI == coro::ABI::Async); 1394 assert(Clones.empty()); 1395 // Reset various things that the optimizer might have decided it 1396 // "knows" about the coroutine function due to not seeing a return. 1397 F.removeFnAttr(Attribute::NoReturn); 1398 F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias); 1399 F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull); 1400 1401 auto &Context = F.getContext(); 1402 auto *Int8PtrTy = Type::getInt8PtrTy(Context); 1403 1404 auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId()); 1405 IRBuilder<> Builder(Id); 1406 1407 auto *FramePtr = Id->getStorage(); 1408 FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy); 1409 FramePtr = Builder.CreateConstInBoundsGEP1_32( 1410 Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset, 1411 "async.ctx.frameptr"); 1412 1413 // Map all uses of llvm.coro.begin to the allocated frame pointer. 1414 { 1415 // Make sure we don't invalidate Shape.FramePtr. 1416 TrackingVH<Instruction> Handle(Shape.FramePtr); 1417 Shape.CoroBegin->replaceAllUsesWith(FramePtr); 1418 Shape.FramePtr = Handle.getValPtr(); 1419 } 1420 1421 // Create all the functions in order after the main function. 1422 auto NextF = std::next(F.getIterator()); 1423 1424 // Create a continuation function for each of the suspend points. 1425 Clones.reserve(Shape.CoroSuspends.size()); 1426 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) { 1427 auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]); 1428 1429 // Create the clone declaration. 1430 auto *Continuation = 1431 createCloneDeclaration(F, Shape, ".resume." + Twine(Idx), NextF); 1432 Clones.push_back(Continuation); 1433 1434 // Insert a branch to a new return block immediately before the suspend 1435 // point. 1436 auto *SuspendBB = Suspend->getParent(); 1437 auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend); 1438 auto *Branch = cast<BranchInst>(SuspendBB->getTerminator()); 1439 1440 // Place it before the first suspend. 1441 auto *ReturnBB = 1442 BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB); 1443 Branch->setSuccessor(0, ReturnBB); 1444 1445 IRBuilder<> Builder(ReturnBB); 1446 1447 // Insert the call to the tail call function and inline it. 1448 auto *Fn = Suspend->getMustTailCallFunction(); 1449 auto DbgLoc = Suspend->getDebugLoc(); 1450 SmallVector<Value *, 8> Args(Suspend->operand_values()); 1451 auto FnArgs = ArrayRef<Value *>(Args).drop_front(3).drop_back(1); 1452 auto FnTy = cast<FunctionType>(Fn->getType()->getPointerElementType()); 1453 // Coerce the arguments, llvm optimizations seem to ignore the types in 1454 // vaarg functions and throws away casts in optimized mode. 1455 SmallVector<Value *, 8> CallArgs; 1456 coerceArguments(Builder, FnTy, FnArgs, CallArgs); 1457 auto *TailCall = Builder.CreateCall(FnTy, Fn, CallArgs); 1458 TailCall->setDebugLoc(DbgLoc); 1459 TailCall->setTailCall(); 1460 TailCall->setCallingConv(Fn->getCallingConv()); 1461 Builder.CreateRetVoid(); 1462 InlineFunctionInfo FnInfo; 1463 auto InlineRes = InlineFunction(*TailCall, FnInfo); 1464 assert(InlineRes.isSuccess() && "Expected inlining to succeed"); 1465 (void)InlineRes; 1466 1467 // Replace the lvm.coro.async.resume intrisic call. 1468 replaceAsyncResumeFunction(Suspend, Continuation); 1469 } 1470 1471 assert(Clones.size() == Shape.CoroSuspends.size()); 1472 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) { 1473 auto *Suspend = Shape.CoroSuspends[Idx]; 1474 auto *Clone = Clones[Idx]; 1475 1476 CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create(); 1477 } 1478 } 1479 1480 static void splitRetconCoroutine(Function &F, coro::Shape &Shape, 1481 SmallVectorImpl<Function *> &Clones) { 1482 assert(Shape.ABI == coro::ABI::Retcon || 1483 Shape.ABI == coro::ABI::RetconOnce); 1484 assert(Clones.empty()); 1485 1486 // Reset various things that the optimizer might have decided it 1487 // "knows" about the coroutine function due to not seeing a return. 1488 F.removeFnAttr(Attribute::NoReturn); 1489 F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias); 1490 F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull); 1491 1492 // Allocate the frame. 1493 auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId()); 1494 Value *RawFramePtr; 1495 if (Shape.RetconLowering.IsFrameInlineInStorage) { 1496 RawFramePtr = Id->getStorage(); 1497 } else { 1498 IRBuilder<> Builder(Id); 1499 1500 // Determine the size of the frame. 1501 const DataLayout &DL = F.getParent()->getDataLayout(); 1502 auto Size = DL.getTypeAllocSize(Shape.FrameTy); 1503 1504 // Allocate. We don't need to update the call graph node because we're 1505 // going to recompute it from scratch after splitting. 1506 // FIXME: pass the required alignment 1507 RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr); 1508 RawFramePtr = 1509 Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType()); 1510 1511 // Stash the allocated frame pointer in the continuation storage. 1512 auto Dest = Builder.CreateBitCast(Id->getStorage(), 1513 RawFramePtr->getType()->getPointerTo()); 1514 Builder.CreateStore(RawFramePtr, Dest); 1515 } 1516 1517 // Map all uses of llvm.coro.begin to the allocated frame pointer. 1518 { 1519 // Make sure we don't invalidate Shape.FramePtr. 1520 TrackingVH<Instruction> Handle(Shape.FramePtr); 1521 Shape.CoroBegin->replaceAllUsesWith(RawFramePtr); 1522 Shape.FramePtr = Handle.getValPtr(); 1523 } 1524 1525 // Create a unique return block. 1526 BasicBlock *ReturnBB = nullptr; 1527 SmallVector<PHINode *, 4> ReturnPHIs; 1528 1529 // Create all the functions in order after the main function. 1530 auto NextF = std::next(F.getIterator()); 1531 1532 // Create a continuation function for each of the suspend points. 1533 Clones.reserve(Shape.CoroSuspends.size()); 1534 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) { 1535 auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]); 1536 1537 // Create the clone declaration. 1538 auto Continuation = 1539 createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF); 1540 Clones.push_back(Continuation); 1541 1542 // Insert a branch to the unified return block immediately before 1543 // the suspend point. 1544 auto SuspendBB = Suspend->getParent(); 1545 auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend); 1546 auto Branch = cast<BranchInst>(SuspendBB->getTerminator()); 1547 1548 // Create the unified return block. 1549 if (!ReturnBB) { 1550 // Place it before the first suspend. 1551 ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F, 1552 NewSuspendBB); 1553 Shape.RetconLowering.ReturnBlock = ReturnBB; 1554 1555 IRBuilder<> Builder(ReturnBB); 1556 1557 // Create PHIs for all the return values. 1558 assert(ReturnPHIs.empty()); 1559 1560 // First, the continuation. 1561 ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(), 1562 Shape.CoroSuspends.size())); 1563 1564 // Next, all the directly-yielded values. 1565 for (auto ResultTy : Shape.getRetconResultTypes()) 1566 ReturnPHIs.push_back(Builder.CreatePHI(ResultTy, 1567 Shape.CoroSuspends.size())); 1568 1569 // Build the return value. 1570 auto RetTy = F.getReturnType(); 1571 1572 // Cast the continuation value if necessary. 1573 // We can't rely on the types matching up because that type would 1574 // have to be infinite. 1575 auto CastedContinuationTy = 1576 (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0)); 1577 auto *CastedContinuation = 1578 Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy); 1579 1580 Value *RetV; 1581 if (ReturnPHIs.size() == 1) { 1582 RetV = CastedContinuation; 1583 } else { 1584 RetV = UndefValue::get(RetTy); 1585 RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0); 1586 for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I) 1587 RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I); 1588 } 1589 1590 Builder.CreateRet(RetV); 1591 } 1592 1593 // Branch to the return block. 1594 Branch->setSuccessor(0, ReturnBB); 1595 ReturnPHIs[0]->addIncoming(Continuation, SuspendBB); 1596 size_t NextPHIIndex = 1; 1597 for (auto &VUse : Suspend->value_operands()) 1598 ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB); 1599 assert(NextPHIIndex == ReturnPHIs.size()); 1600 } 1601 1602 assert(Clones.size() == Shape.CoroSuspends.size()); 1603 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) { 1604 auto Suspend = Shape.CoroSuspends[i]; 1605 auto Clone = Clones[i]; 1606 1607 CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create(); 1608 } 1609 } 1610 1611 namespace { 1612 class PrettyStackTraceFunction : public PrettyStackTraceEntry { 1613 Function &F; 1614 public: 1615 PrettyStackTraceFunction(Function &F) : F(F) {} 1616 void print(raw_ostream &OS) const override { 1617 OS << "While splitting coroutine "; 1618 F.printAsOperand(OS, /*print type*/ false, F.getParent()); 1619 OS << "\n"; 1620 } 1621 }; 1622 } 1623 1624 static coro::Shape splitCoroutine(Function &F, 1625 SmallVectorImpl<Function *> &Clones, 1626 bool ReuseFrameSlot) { 1627 PrettyStackTraceFunction prettyStackTrace(F); 1628 1629 // The suspend-crossing algorithm in buildCoroutineFrame get tripped 1630 // up by uses in unreachable blocks, so remove them as a first pass. 1631 removeUnreachableBlocks(F); 1632 1633 coro::Shape Shape(F, ReuseFrameSlot); 1634 if (!Shape.CoroBegin) 1635 return Shape; 1636 1637 simplifySuspendPoints(Shape); 1638 buildCoroutineFrame(F, Shape); 1639 replaceFrameSize(Shape); 1640 1641 // If there are no suspend points, no split required, just remove 1642 // the allocation and deallocation blocks, they are not needed. 1643 if (Shape.CoroSuspends.empty()) { 1644 handleNoSuspendCoroutine(Shape); 1645 } else { 1646 switch (Shape.ABI) { 1647 case coro::ABI::Switch: 1648 splitSwitchCoroutine(F, Shape, Clones); 1649 break; 1650 case coro::ABI::Async: 1651 splitAsyncCoroutine(F, Shape, Clones); 1652 break; 1653 case coro::ABI::Retcon: 1654 case coro::ABI::RetconOnce: 1655 splitRetconCoroutine(F, Shape, Clones); 1656 break; 1657 } 1658 } 1659 1660 // Replace all the swifterror operations in the original function. 1661 // This invalidates SwiftErrorOps in the Shape. 1662 replaceSwiftErrorOps(F, Shape, nullptr); 1663 1664 return Shape; 1665 } 1666 1667 static void 1668 updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape, 1669 const SmallVectorImpl<Function *> &Clones, 1670 CallGraph &CG, CallGraphSCC &SCC) { 1671 if (!Shape.CoroBegin) 1672 return; 1673 1674 removeCoroEnds(Shape, &CG); 1675 postSplitCleanup(F); 1676 1677 // Update call graph and add the functions we created to the SCC. 1678 coro::updateCallGraph(F, Clones, CG, SCC); 1679 } 1680 1681 static void updateCallGraphAfterCoroutineSplit( 1682 LazyCallGraph::Node &N, const coro::Shape &Shape, 1683 const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C, 1684 LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, 1685 FunctionAnalysisManager &FAM) { 1686 if (!Shape.CoroBegin) 1687 return; 1688 1689 for (llvm::CoroEndInst *End : Shape.CoroEnds) { 1690 auto &Context = End->getContext(); 1691 End->replaceAllUsesWith(ConstantInt::getFalse(Context)); 1692 End->eraseFromParent(); 1693 } 1694 1695 postSplitCleanup(N.getFunction()); 1696 1697 // We've inserted instructions into coroutine 'f' that reference the three new 1698 // coroutine funclets. We must now update the call graph so that reference 1699 // edges between 'f' and its funclets are added to it. LazyCallGraph only 1700 // allows CGSCC passes to insert "trivial" reference edges. We've ensured 1701 // above, by inserting the funclets into the same SCC as the corutine, that 1702 // the edges are trivial. 1703 // 1704 // N.B.: If we didn't update the call graph here, a CGSCCToFunctionPassAdaptor 1705 // later in this CGSCC pass pipeline may be run, triggering a call graph 1706 // update of its own. Function passes run by the adaptor are not permitted to 1707 // add new edges of any kind to the graph, and the new edges inserted by this 1708 // pass would be misattributed to that unrelated function pass. 1709 updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM); 1710 } 1711 1712 // When we see the coroutine the first time, we insert an indirect call to a 1713 // devirt trigger function and mark the coroutine that it is now ready for 1714 // split. 1715 // Async lowering uses this after it has split the function to restart the 1716 // pipeline. 1717 static void prepareForSplit(Function &F, CallGraph &CG, 1718 bool MarkForAsyncRestart = false) { 1719 Module &M = *F.getParent(); 1720 LLVMContext &Context = F.getContext(); 1721 #ifndef NDEBUG 1722 Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN); 1723 assert(DevirtFn && "coro.devirt.trigger function not found"); 1724 #endif 1725 1726 F.addFnAttr(CORO_PRESPLIT_ATTR, MarkForAsyncRestart 1727 ? ASYNC_RESTART_AFTER_SPLIT 1728 : PREPARED_FOR_SPLIT); 1729 1730 // Insert an indirect call sequence that will be devirtualized by CoroElide 1731 // pass: 1732 // %0 = call i8* @llvm.coro.subfn.addr(i8* null, i8 -1) 1733 // %1 = bitcast i8* %0 to void(i8*)* 1734 // call void %1(i8* null) 1735 coro::LowererBase Lowerer(M); 1736 Instruction *InsertPt = 1737 MarkForAsyncRestart ? F.getEntryBlock().getFirstNonPHIOrDbgOrLifetime() 1738 : F.getEntryBlock().getTerminator(); 1739 auto *Null = ConstantPointerNull::get(Type::getInt8PtrTy(Context)); 1740 auto *DevirtFnAddr = 1741 Lowerer.makeSubFnCall(Null, CoroSubFnInst::RestartTrigger, InsertPt); 1742 FunctionType *FnTy = FunctionType::get(Type::getVoidTy(Context), 1743 {Type::getInt8PtrTy(Context)}, false); 1744 auto *IndirectCall = CallInst::Create(FnTy, DevirtFnAddr, Null, "", InsertPt); 1745 1746 // Update CG graph with an indirect call we just added. 1747 CG[&F]->addCalledFunction(IndirectCall, CG.getCallsExternalNode()); 1748 } 1749 1750 // Make sure that there is a devirtualization trigger function that the 1751 // coro-split pass uses to force a restart of the CGSCC pipeline. If the devirt 1752 // trigger function is not found, we will create one and add it to the current 1753 // SCC. 1754 static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC) { 1755 Module &M = CG.getModule(); 1756 if (M.getFunction(CORO_DEVIRT_TRIGGER_FN)) 1757 return; 1758 1759 LLVMContext &C = M.getContext(); 1760 auto *FnTy = FunctionType::get(Type::getVoidTy(C), Type::getInt8PtrTy(C), 1761 /*isVarArg=*/false); 1762 Function *DevirtFn = 1763 Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage, 1764 CORO_DEVIRT_TRIGGER_FN, &M); 1765 DevirtFn->addFnAttr(Attribute::AlwaysInline); 1766 auto *Entry = BasicBlock::Create(C, "entry", DevirtFn); 1767 ReturnInst::Create(C, Entry); 1768 1769 auto *Node = CG.getOrInsertFunction(DevirtFn); 1770 1771 SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end()); 1772 Nodes.push_back(Node); 1773 SCC.initialize(Nodes); 1774 } 1775 1776 /// Replace a call to llvm.coro.prepare.retcon. 1777 static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG, 1778 LazyCallGraph::SCC &C) { 1779 auto CastFn = Prepare->getArgOperand(0); // as an i8* 1780 auto Fn = CastFn->stripPointerCasts(); // as its original type 1781 1782 // Attempt to peephole this pattern: 1783 // %0 = bitcast [[TYPE]] @some_function to i8* 1784 // %1 = call @llvm.coro.prepare.retcon(i8* %0) 1785 // %2 = bitcast %1 to [[TYPE]] 1786 // ==> 1787 // %2 = @some_function 1788 for (auto UI = Prepare->use_begin(), UE = Prepare->use_end(); UI != UE;) { 1789 // Look for bitcasts back to the original function type. 1790 auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser()); 1791 if (!Cast || Cast->getType() != Fn->getType()) 1792 continue; 1793 1794 // Replace and remove the cast. 1795 Cast->replaceAllUsesWith(Fn); 1796 Cast->eraseFromParent(); 1797 } 1798 1799 // Replace any remaining uses with the function as an i8*. 1800 // This can never directly be a callee, so we don't need to update CG. 1801 Prepare->replaceAllUsesWith(CastFn); 1802 Prepare->eraseFromParent(); 1803 1804 // Kill dead bitcasts. 1805 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) { 1806 if (!Cast->use_empty()) 1807 break; 1808 CastFn = Cast->getOperand(0); 1809 Cast->eraseFromParent(); 1810 } 1811 } 1812 /// Replace a call to llvm.coro.prepare.retcon. 1813 static void replacePrepare(CallInst *Prepare, CallGraph &CG) { 1814 auto CastFn = Prepare->getArgOperand(0); // as an i8* 1815 auto Fn = CastFn->stripPointerCasts(); // as its original type 1816 1817 // Find call graph nodes for the preparation. 1818 CallGraphNode *PrepareUserNode = nullptr, *FnNode = nullptr; 1819 if (auto ConcreteFn = dyn_cast<Function>(Fn)) { 1820 PrepareUserNode = CG[Prepare->getFunction()]; 1821 FnNode = CG[ConcreteFn]; 1822 } 1823 1824 // Attempt to peephole this pattern: 1825 // %0 = bitcast [[TYPE]] @some_function to i8* 1826 // %1 = call @llvm.coro.prepare.retcon(i8* %0) 1827 // %2 = bitcast %1 to [[TYPE]] 1828 // ==> 1829 // %2 = @some_function 1830 for (auto UI = Prepare->use_begin(), UE = Prepare->use_end(); 1831 UI != UE; ) { 1832 // Look for bitcasts back to the original function type. 1833 auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser()); 1834 if (!Cast || Cast->getType() != Fn->getType()) continue; 1835 1836 // Check whether the replacement will introduce new direct calls. 1837 // If so, we'll need to update the call graph. 1838 if (PrepareUserNode) { 1839 for (auto &Use : Cast->uses()) { 1840 if (auto *CB = dyn_cast<CallBase>(Use.getUser())) { 1841 if (!CB->isCallee(&Use)) 1842 continue; 1843 PrepareUserNode->removeCallEdgeFor(*CB); 1844 PrepareUserNode->addCalledFunction(CB, FnNode); 1845 } 1846 } 1847 } 1848 1849 // Replace and remove the cast. 1850 Cast->replaceAllUsesWith(Fn); 1851 Cast->eraseFromParent(); 1852 } 1853 1854 // Replace any remaining uses with the function as an i8*. 1855 // This can never directly be a callee, so we don't need to update CG. 1856 Prepare->replaceAllUsesWith(CastFn); 1857 Prepare->eraseFromParent(); 1858 1859 // Kill dead bitcasts. 1860 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) { 1861 if (!Cast->use_empty()) break; 1862 CastFn = Cast->getOperand(0); 1863 Cast->eraseFromParent(); 1864 } 1865 } 1866 1867 static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG, 1868 LazyCallGraph::SCC &C) { 1869 bool Changed = false; 1870 for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end(); PI != PE;) { 1871 // Intrinsics can only be used in calls. 1872 auto *Prepare = cast<CallInst>((PI++)->getUser()); 1873 replacePrepare(Prepare, CG, C); 1874 Changed = true; 1875 } 1876 1877 return Changed; 1878 } 1879 1880 /// Remove calls to llvm.coro.prepare.retcon, a barrier meant to prevent 1881 /// IPO from operating on calls to a retcon coroutine before it's been 1882 /// split. This is only safe to do after we've split all retcon 1883 /// coroutines in the module. We can do that this in this pass because 1884 /// this pass does promise to split all retcon coroutines (as opposed to 1885 /// switch coroutines, which are lowered in multiple stages). 1886 static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) { 1887 bool Changed = false; 1888 for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end(); 1889 PI != PE; ) { 1890 // Intrinsics can only be used in calls. 1891 auto *Prepare = cast<CallInst>((PI++)->getUser()); 1892 replacePrepare(Prepare, CG); 1893 Changed = true; 1894 } 1895 1896 return Changed; 1897 } 1898 1899 static bool declaresCoroSplitIntrinsics(const Module &M) { 1900 return coro::declaresIntrinsics(M, {"llvm.coro.begin", 1901 "llvm.coro.prepare.retcon", 1902 "llvm.coro.prepare.async"}); 1903 } 1904 1905 static void addPrepareFunction(const Module &M, 1906 SmallVectorImpl<Function *> &Fns, 1907 StringRef Name) { 1908 auto *PrepareFn = M.getFunction(Name); 1909 if (PrepareFn && !PrepareFn->use_empty()) 1910 Fns.push_back(PrepareFn); 1911 } 1912 1913 PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C, 1914 CGSCCAnalysisManager &AM, 1915 LazyCallGraph &CG, CGSCCUpdateResult &UR) { 1916 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a 1917 // non-zero number of nodes, so we assume that here and grab the first 1918 // node's function's module. 1919 Module &M = *C.begin()->getFunction().getParent(); 1920 auto &FAM = 1921 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 1922 1923 if (!declaresCoroSplitIntrinsics(M)) 1924 return PreservedAnalyses::all(); 1925 1926 // Check for uses of llvm.coro.prepare.retcon/async. 1927 SmallVector<Function *, 2> PrepareFns; 1928 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon"); 1929 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async"); 1930 1931 // Find coroutines for processing. 1932 SmallVector<LazyCallGraph::Node *, 4> Coroutines; 1933 for (LazyCallGraph::Node &N : C) 1934 if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR)) 1935 Coroutines.push_back(&N); 1936 1937 if (Coroutines.empty() && PrepareFns.empty()) 1938 return PreservedAnalyses::all(); 1939 1940 if (Coroutines.empty()) { 1941 for (auto *PrepareFn : PrepareFns) { 1942 replaceAllPrepares(PrepareFn, CG, C); 1943 } 1944 } 1945 1946 // Split all the coroutines. 1947 for (LazyCallGraph::Node *N : Coroutines) { 1948 Function &F = N->getFunction(); 1949 Attribute Attr = F.getFnAttribute(CORO_PRESPLIT_ATTR); 1950 StringRef Value = Attr.getValueAsString(); 1951 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName() 1952 << "' state: " << Value << "\n"); 1953 if (Value == UNPREPARED_FOR_SPLIT) { 1954 // Enqueue a second iteration of the CGSCC pipeline. 1955 // N.B.: 1956 // The CoroSplitLegacy pass "triggers" a restart of the CGSCC pass 1957 // pipeline by inserting an indirect function call that the 1958 // CoroElideLegacy pass then replaces with a direct function call. The 1959 // legacy CGSCC pipeline's implicit behavior was as if wrapped in the new 1960 // pass manager abstraction DevirtSCCRepeatedPass. 1961 // 1962 // This pass does not need to "trigger" another run of the pipeline. 1963 // Instead, it simply enqueues the same RefSCC onto the pipeline's 1964 // worklist. 1965 UR.CWorklist.insert(&C); 1966 F.addFnAttr(CORO_PRESPLIT_ATTR, PREPARED_FOR_SPLIT); 1967 continue; 1968 } 1969 F.removeFnAttr(CORO_PRESPLIT_ATTR); 1970 1971 SmallVector<Function *, 4> Clones; 1972 const coro::Shape Shape = splitCoroutine(F, Clones, ReuseFrameSlot); 1973 updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM); 1974 1975 if (Shape.ABI == coro::ABI::Async && !Shape.CoroSuspends.empty()) { 1976 // We want the inliner to be run on the newly inserted functions. 1977 UR.CWorklist.insert(&C); 1978 } 1979 } 1980 1981 if (!PrepareFns.empty()) { 1982 for (auto *PrepareFn : PrepareFns) { 1983 replaceAllPrepares(PrepareFn, CG, C); 1984 } 1985 } 1986 1987 return PreservedAnalyses::none(); 1988 } 1989 1990 namespace { 1991 1992 // We present a coroutine to LLVM as an ordinary function with suspension 1993 // points marked up with intrinsics. We let the optimizer party on the coroutine 1994 // as a single function for as long as possible. Shortly before the coroutine is 1995 // eligible to be inlined into its callers, we split up the coroutine into parts 1996 // corresponding to initial, resume and destroy invocations of the coroutine, 1997 // add them to the current SCC and restart the IPO pipeline to optimize the 1998 // coroutine subfunctions we extracted before proceeding to the caller of the 1999 // coroutine. 2000 struct CoroSplitLegacy : public CallGraphSCCPass { 2001 static char ID; // Pass identification, replacement for typeid 2002 2003 CoroSplitLegacy(bool ReuseFrameSlot = false) 2004 : CallGraphSCCPass(ID), ReuseFrameSlot(ReuseFrameSlot) { 2005 initializeCoroSplitLegacyPass(*PassRegistry::getPassRegistry()); 2006 } 2007 2008 bool Run = false; 2009 bool ReuseFrameSlot; 2010 2011 // A coroutine is identified by the presence of coro.begin intrinsic, if 2012 // we don't have any, this pass has nothing to do. 2013 bool doInitialization(CallGraph &CG) override { 2014 Run = declaresCoroSplitIntrinsics(CG.getModule()); 2015 return CallGraphSCCPass::doInitialization(CG); 2016 } 2017 2018 bool runOnSCC(CallGraphSCC &SCC) override { 2019 if (!Run) 2020 return false; 2021 2022 // Check for uses of llvm.coro.prepare.retcon. 2023 SmallVector<Function *, 2> PrepareFns; 2024 auto &M = SCC.getCallGraph().getModule(); 2025 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon"); 2026 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async"); 2027 2028 // Find coroutines for processing. 2029 SmallVector<Function *, 4> Coroutines; 2030 for (CallGraphNode *CGN : SCC) 2031 if (auto *F = CGN->getFunction()) 2032 if (F->hasFnAttribute(CORO_PRESPLIT_ATTR)) 2033 Coroutines.push_back(F); 2034 2035 if (Coroutines.empty() && PrepareFns.empty()) 2036 return false; 2037 2038 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 2039 2040 if (Coroutines.empty()) { 2041 bool Changed = false; 2042 for (auto *PrepareFn : PrepareFns) 2043 Changed |= replaceAllPrepares(PrepareFn, CG); 2044 return Changed; 2045 } 2046 2047 createDevirtTriggerFunc(CG, SCC); 2048 2049 // Split all the coroutines. 2050 for (Function *F : Coroutines) { 2051 Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR); 2052 StringRef Value = Attr.getValueAsString(); 2053 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName() 2054 << "' state: " << Value << "\n"); 2055 // Async lowering marks coroutines to trigger a restart of the pipeline 2056 // after it has split them. 2057 if (Value == ASYNC_RESTART_AFTER_SPLIT) { 2058 F->removeFnAttr(CORO_PRESPLIT_ATTR); 2059 continue; 2060 } 2061 if (Value == UNPREPARED_FOR_SPLIT) { 2062 prepareForSplit(*F, CG); 2063 continue; 2064 } 2065 F->removeFnAttr(CORO_PRESPLIT_ATTR); 2066 2067 SmallVector<Function *, 4> Clones; 2068 const coro::Shape Shape = splitCoroutine(*F, Clones, ReuseFrameSlot); 2069 updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC); 2070 if (Shape.ABI == coro::ABI::Async) { 2071 // Restart SCC passes. 2072 // Mark function for CoroElide pass. It will devirtualize causing a 2073 // restart of the SCC pipeline. 2074 prepareForSplit(*F, CG, true /*MarkForAsyncRestart*/); 2075 } 2076 } 2077 2078 for (auto *PrepareFn : PrepareFns) 2079 replaceAllPrepares(PrepareFn, CG); 2080 2081 return true; 2082 } 2083 2084 void getAnalysisUsage(AnalysisUsage &AU) const override { 2085 CallGraphSCCPass::getAnalysisUsage(AU); 2086 } 2087 2088 StringRef getPassName() const override { return "Coroutine Splitting"; } 2089 }; 2090 2091 } // end anonymous namespace 2092 2093 char CoroSplitLegacy::ID = 0; 2094 2095 INITIALIZE_PASS_BEGIN( 2096 CoroSplitLegacy, "coro-split", 2097 "Split coroutine into a set of functions driving its state machine", false, 2098 false) 2099 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 2100 INITIALIZE_PASS_END( 2101 CoroSplitLegacy, "coro-split", 2102 "Split coroutine into a set of functions driving its state machine", false, 2103 false) 2104 2105 Pass *llvm::createCoroSplitLegacyPass(bool ReuseFrameSlot) { 2106 return new CoroSplitLegacy(ReuseFrameSlot); 2107 } 2108