1 //===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This pass builds the coroutine frame and outlines resume and destroy parts 9 // of the coroutine into separate functions. 10 // 11 // We present a coroutine to an LLVM as an ordinary function with suspension 12 // points marked up with intrinsics. We let the optimizer party on the coroutine 13 // as a single function for as long as possible. Shortly before the coroutine is 14 // eligible to be inlined into its callers, we split up the coroutine into parts 15 // corresponding to an initial, resume and destroy invocations of the coroutine, 16 // add them to the current SCC and restart the IPO pipeline to optimize the 17 // coroutine subfunctions we extracted before proceeding to the caller of the 18 // coroutine. 19 //===----------------------------------------------------------------------===// 20 21 #include "llvm/Transforms/Coroutines/CoroSplit.h" 22 #include "CoroInstr.h" 23 #include "CoroInternal.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/SmallPtrSet.h" 26 #include "llvm/ADT/SmallVector.h" 27 #include "llvm/ADT/StringRef.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/Analysis/CFG.h" 30 #include "llvm/Analysis/CallGraph.h" 31 #include "llvm/Analysis/CallGraphSCCPass.h" 32 #include "llvm/Analysis/LazyCallGraph.h" 33 #include "llvm/IR/Argument.h" 34 #include "llvm/IR/Attributes.h" 35 #include "llvm/IR/BasicBlock.h" 36 #include "llvm/IR/CFG.h" 37 #include "llvm/IR/CallingConv.h" 38 #include "llvm/IR/Constants.h" 39 #include "llvm/IR/DataLayout.h" 40 #include "llvm/IR/DerivedTypes.h" 41 #include "llvm/IR/Dominators.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/IR/GlobalValue.h" 44 #include "llvm/IR/GlobalVariable.h" 45 #include "llvm/IR/IRBuilder.h" 46 #include "llvm/IR/InstIterator.h" 47 #include "llvm/IR/InstrTypes.h" 48 #include "llvm/IR/Instruction.h" 49 #include "llvm/IR/Instructions.h" 50 #include "llvm/IR/IntrinsicInst.h" 51 #include "llvm/IR/LLVMContext.h" 52 #include "llvm/IR/LegacyPassManager.h" 53 #include "llvm/IR/Module.h" 54 #include "llvm/IR/Type.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/IR/Verifier.h" 57 #include "llvm/InitializePasses.h" 58 #include "llvm/Pass.h" 59 #include "llvm/Support/Casting.h" 60 #include "llvm/Support/Debug.h" 61 #include "llvm/Support/PrettyStackTrace.h" 62 #include "llvm/Support/raw_ostream.h" 63 #include "llvm/Transforms/Scalar.h" 64 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 65 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 66 #include "llvm/Transforms/Utils/Cloning.h" 67 #include "llvm/Transforms/Utils/Local.h" 68 #include "llvm/Transforms/Utils/ValueMapper.h" 69 #include <cassert> 70 #include <cstddef> 71 #include <cstdint> 72 #include <initializer_list> 73 #include <iterator> 74 75 using namespace llvm; 76 77 #define DEBUG_TYPE "coro-split" 78 79 namespace { 80 81 /// A little helper class for building 82 class CoroCloner { 83 public: 84 enum class Kind { 85 /// The shared resume function for a switch lowering. 86 SwitchResume, 87 88 /// The shared unwind function for a switch lowering. 89 SwitchUnwind, 90 91 /// The shared cleanup function for a switch lowering. 92 SwitchCleanup, 93 94 /// An individual continuation function. 95 Continuation, 96 97 /// An async resume function. 98 Async, 99 }; 100 101 private: 102 Function &OrigF; 103 Function *NewF; 104 const Twine &Suffix; 105 coro::Shape &Shape; 106 Kind FKind; 107 ValueToValueMapTy VMap; 108 IRBuilder<> Builder; 109 Value *NewFramePtr = nullptr; 110 111 /// The active suspend instruction; meaningful only for continuation and async 112 /// ABIs. 113 AnyCoroSuspendInst *ActiveSuspend = nullptr; 114 115 public: 116 /// Create a cloner for a switch lowering. 117 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, 118 Kind FKind) 119 : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape), 120 FKind(FKind), Builder(OrigF.getContext()) { 121 assert(Shape.ABI == coro::ABI::Switch); 122 } 123 124 /// Create a cloner for a continuation lowering. 125 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, 126 Function *NewF, AnyCoroSuspendInst *ActiveSuspend) 127 : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape), 128 FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation), 129 Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) { 130 assert(Shape.ABI == coro::ABI::Retcon || 131 Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async); 132 assert(NewF && "need existing function for continuation"); 133 assert(ActiveSuspend && "need active suspend point for continuation"); 134 } 135 136 Function *getFunction() const { 137 assert(NewF != nullptr && "declaration not yet set"); 138 return NewF; 139 } 140 141 void create(); 142 143 private: 144 bool isSwitchDestroyFunction() { 145 switch (FKind) { 146 case Kind::Async: 147 case Kind::Continuation: 148 case Kind::SwitchResume: 149 return false; 150 case Kind::SwitchUnwind: 151 case Kind::SwitchCleanup: 152 return true; 153 } 154 llvm_unreachable("Unknown CoroCloner::Kind enum"); 155 } 156 157 void replaceEntryBlock(); 158 Value *deriveNewFramePointer(); 159 void replaceRetconOrAsyncSuspendUses(); 160 void replaceCoroSuspends(); 161 void replaceCoroEnds(); 162 void replaceSwiftErrorOps(); 163 void salvageDebugInfo(); 164 void handleFinalSuspend(); 165 }; 166 167 } // end anonymous namespace 168 169 static void maybeFreeRetconStorage(IRBuilder<> &Builder, 170 const coro::Shape &Shape, Value *FramePtr, 171 CallGraph *CG) { 172 assert(Shape.ABI == coro::ABI::Retcon || 173 Shape.ABI == coro::ABI::RetconOnce); 174 if (Shape.RetconLowering.IsFrameInlineInStorage) 175 return; 176 177 Shape.emitDealloc(Builder, FramePtr, CG); 178 } 179 180 /// Replace an llvm.coro.end.async. 181 /// Will inline the must tail call function call if there is one. 182 /// \returns true if cleanup of the coro.end block is needed, false otherwise. 183 static bool replaceCoroEndAsync(AnyCoroEndInst *End) { 184 IRBuilder<> Builder(End); 185 186 auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End); 187 if (!EndAsync) { 188 Builder.CreateRetVoid(); 189 return true /*needs cleanup of coro.end block*/; 190 } 191 192 auto *MustTailCallFunc = EndAsync->getMustTailCallFunction(); 193 if (!MustTailCallFunc) { 194 Builder.CreateRetVoid(); 195 return true /*needs cleanup of coro.end block*/; 196 } 197 198 // Move the must tail call from the predecessor block into the end block. 199 auto *CoroEndBlock = End->getParent(); 200 auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor(); 201 assert(MustTailCallFuncBlock && "Must have a single predecessor block"); 202 auto It = MustTailCallFuncBlock->getTerminator()->getIterator(); 203 auto *MustTailCall = cast<CallInst>(&*std::prev(It)); 204 CoroEndBlock->getInstList().splice( 205 End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall); 206 207 // Insert the return instruction. 208 Builder.SetInsertPoint(End); 209 Builder.CreateRetVoid(); 210 InlineFunctionInfo FnInfo; 211 212 // Remove the rest of the block, by splitting it into an unreachable block. 213 auto *BB = End->getParent(); 214 BB->splitBasicBlock(End); 215 BB->getTerminator()->eraseFromParent(); 216 217 auto InlineRes = InlineFunction(*MustTailCall, FnInfo); 218 assert(InlineRes.isSuccess() && "Expected inlining to succeed"); 219 (void)InlineRes; 220 221 // We have cleaned up the coro.end block above. 222 return false; 223 } 224 225 /// Replace a non-unwind call to llvm.coro.end. 226 static void replaceFallthroughCoroEnd(AnyCoroEndInst *End, 227 const coro::Shape &Shape, Value *FramePtr, 228 bool InResume, CallGraph *CG) { 229 // Start inserting right before the coro.end. 230 IRBuilder<> Builder(End); 231 232 // Create the return instruction. 233 switch (Shape.ABI) { 234 // The cloned functions in switch-lowering always return void. 235 case coro::ABI::Switch: 236 // coro.end doesn't immediately end the coroutine in the main function 237 // in this lowering, because we need to deallocate the coroutine. 238 if (!InResume) 239 return; 240 Builder.CreateRetVoid(); 241 break; 242 243 // In async lowering this returns. 244 case coro::ABI::Async: { 245 bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End); 246 if (!CoroEndBlockNeedsCleanup) 247 return; 248 break; 249 } 250 251 // In unique continuation lowering, the continuations always return void. 252 // But we may have implicitly allocated storage. 253 case coro::ABI::RetconOnce: 254 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 255 Builder.CreateRetVoid(); 256 break; 257 258 // In non-unique continuation lowering, we signal completion by returning 259 // a null continuation. 260 case coro::ABI::Retcon: { 261 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 262 auto RetTy = Shape.getResumeFunctionType()->getReturnType(); 263 auto RetStructTy = dyn_cast<StructType>(RetTy); 264 PointerType *ContinuationTy = 265 cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy); 266 267 Value *ReturnValue = ConstantPointerNull::get(ContinuationTy); 268 if (RetStructTy) { 269 ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy), 270 ReturnValue, 0); 271 } 272 Builder.CreateRet(ReturnValue); 273 break; 274 } 275 } 276 277 // Remove the rest of the block, by splitting it into an unreachable block. 278 auto *BB = End->getParent(); 279 BB->splitBasicBlock(End); 280 BB->getTerminator()->eraseFromParent(); 281 } 282 283 /// Replace an unwind call to llvm.coro.end. 284 static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, 285 Value *FramePtr, bool InResume, 286 CallGraph *CG) { 287 IRBuilder<> Builder(End); 288 289 switch (Shape.ABI) { 290 // In switch-lowering, this does nothing in the main function. 291 case coro::ABI::Switch: 292 if (!InResume) 293 return; 294 break; 295 // In async lowering this does nothing. 296 case coro::ABI::Async: 297 break; 298 // In continuation-lowering, this frees the continuation storage. 299 case coro::ABI::Retcon: 300 case coro::ABI::RetconOnce: 301 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 302 break; 303 } 304 305 // If coro.end has an associated bundle, add cleanupret instruction. 306 if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) { 307 auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]); 308 auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr); 309 End->getParent()->splitBasicBlock(End); 310 CleanupRet->getParent()->getTerminator()->eraseFromParent(); 311 } 312 } 313 314 static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, 315 Value *FramePtr, bool InResume, CallGraph *CG) { 316 if (End->isUnwind()) 317 replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG); 318 else 319 replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG); 320 321 auto &Context = End->getContext(); 322 End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context) 323 : ConstantInt::getFalse(Context)); 324 End->eraseFromParent(); 325 } 326 327 // Create an entry block for a resume function with a switch that will jump to 328 // suspend points. 329 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) { 330 assert(Shape.ABI == coro::ABI::Switch); 331 LLVMContext &C = F.getContext(); 332 333 // resume.entry: 334 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0, 335 // i32 2 336 // % index = load i32, i32* %index.addr 337 // switch i32 %index, label %unreachable [ 338 // i32 0, label %resume.0 339 // i32 1, label %resume.1 340 // ... 341 // ] 342 343 auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F); 344 auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F); 345 346 IRBuilder<> Builder(NewEntry); 347 auto *FramePtr = Shape.FramePtr; 348 auto *FrameTy = Shape.FrameTy; 349 auto *GepIndex = Builder.CreateStructGEP( 350 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr"); 351 auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index"); 352 auto *Switch = 353 Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size()); 354 Shape.SwitchLowering.ResumeSwitch = Switch; 355 356 size_t SuspendIndex = 0; 357 for (auto *AnyS : Shape.CoroSuspends) { 358 auto *S = cast<CoroSuspendInst>(AnyS); 359 ConstantInt *IndexVal = Shape.getIndex(SuspendIndex); 360 361 // Replace CoroSave with a store to Index: 362 // %index.addr = getelementptr %f.frame... (index field number) 363 // store i32 0, i32* %index.addr1 364 auto *Save = S->getCoroSave(); 365 Builder.SetInsertPoint(Save); 366 if (S->isFinal()) { 367 // Final suspend point is represented by storing zero in ResumeFnAddr. 368 auto *GepIndex = Builder.CreateStructGEP(FrameTy, FramePtr, 369 coro::Shape::SwitchFieldIndex::Resume, 370 "ResumeFn.addr"); 371 auto *NullPtr = ConstantPointerNull::get(cast<PointerType>( 372 FrameTy->getTypeAtIndex(coro::Shape::SwitchFieldIndex::Resume))); 373 Builder.CreateStore(NullPtr, GepIndex); 374 } else { 375 auto *GepIndex = Builder.CreateStructGEP( 376 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr"); 377 Builder.CreateStore(IndexVal, GepIndex); 378 } 379 Save->replaceAllUsesWith(ConstantTokenNone::get(C)); 380 Save->eraseFromParent(); 381 382 // Split block before and after coro.suspend and add a jump from an entry 383 // switch: 384 // 385 // whateverBB: 386 // whatever 387 // %0 = call i8 @llvm.coro.suspend(token none, i1 false) 388 // switch i8 %0, label %suspend[i8 0, label %resume 389 // i8 1, label %cleanup] 390 // becomes: 391 // 392 // whateverBB: 393 // whatever 394 // br label %resume.0.landing 395 // 396 // resume.0: ; <--- jump from the switch in the resume.entry 397 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false) 398 // br label %resume.0.landing 399 // 400 // resume.0.landing: 401 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0] 402 // switch i8 % 1, label %suspend [i8 0, label %resume 403 // i8 1, label %cleanup] 404 405 auto *SuspendBB = S->getParent(); 406 auto *ResumeBB = 407 SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex)); 408 auto *LandingBB = ResumeBB->splitBasicBlock( 409 S->getNextNode(), ResumeBB->getName() + Twine(".landing")); 410 Switch->addCase(IndexVal, ResumeBB); 411 412 cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB); 413 auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front()); 414 S->replaceAllUsesWith(PN); 415 PN->addIncoming(Builder.getInt8(-1), SuspendBB); 416 PN->addIncoming(S, ResumeBB); 417 418 ++SuspendIndex; 419 } 420 421 Builder.SetInsertPoint(UnreachBB); 422 Builder.CreateUnreachable(); 423 424 Shape.SwitchLowering.ResumeEntryBlock = NewEntry; 425 } 426 427 428 // Rewrite final suspend point handling. We do not use suspend index to 429 // represent the final suspend point. Instead we zero-out ResumeFnAddr in the 430 // coroutine frame, since it is undefined behavior to resume a coroutine 431 // suspended at the final suspend point. Thus, in the resume function, we can 432 // simply remove the last case (when coro::Shape is built, the final suspend 433 // point (if present) is always the last element of CoroSuspends array). 434 // In the destroy function, we add a code sequence to check if ResumeFnAddress 435 // is Null, and if so, jump to the appropriate label to handle cleanup from the 436 // final suspend point. 437 void CoroCloner::handleFinalSuspend() { 438 assert(Shape.ABI == coro::ABI::Switch && 439 Shape.SwitchLowering.HasFinalSuspend); 440 auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]); 441 auto FinalCaseIt = std::prev(Switch->case_end()); 442 BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor(); 443 Switch->removeCase(FinalCaseIt); 444 if (isSwitchDestroyFunction()) { 445 BasicBlock *OldSwitchBB = Switch->getParent(); 446 auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch"); 447 Builder.SetInsertPoint(OldSwitchBB->getTerminator()); 448 auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr, 449 coro::Shape::SwitchFieldIndex::Resume, 450 "ResumeFn.addr"); 451 auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(), 452 GepIndex); 453 auto *Cond = Builder.CreateIsNull(Load); 454 Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB); 455 OldSwitchBB->getTerminator()->eraseFromParent(); 456 } 457 } 458 459 static FunctionType * 460 getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend) { 461 auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Suspend); 462 auto *StructTy = cast<StructType>(AsyncSuspend->getType()); 463 auto &Context = Suspend->getParent()->getParent()->getContext(); 464 auto *VoidTy = Type::getVoidTy(Context); 465 return FunctionType::get(VoidTy, StructTy->elements(), false); 466 } 467 468 static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape, 469 const Twine &Suffix, 470 Module::iterator InsertBefore, 471 AnyCoroSuspendInst *ActiveSuspend) { 472 Module *M = OrigF.getParent(); 473 auto *FnTy = (Shape.ABI != coro::ABI::Async) 474 ? Shape.getResumeFunctionType() 475 : getFunctionTypeFromAsyncSuspend(ActiveSuspend); 476 477 Function *NewF = 478 Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage, 479 OrigF.getName() + Suffix); 480 if (Shape.ABI != coro::ABI::Async) 481 NewF->addParamAttr(0, Attribute::NonNull); 482 483 // For the async lowering ABI we can't guarantee that the context argument is 484 // not access via a different pointer not based on the argument. 485 if (Shape.ABI != coro::ABI::Async) 486 NewF->addParamAttr(0, Attribute::NoAlias); 487 488 M->getFunctionList().insert(InsertBefore, NewF); 489 490 return NewF; 491 } 492 493 /// Replace uses of the active llvm.coro.suspend.retcon/async call with the 494 /// arguments to the continuation function. 495 /// 496 /// This assumes that the builder has a meaningful insertion point. 497 void CoroCloner::replaceRetconOrAsyncSuspendUses() { 498 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 499 Shape.ABI == coro::ABI::Async); 500 501 auto NewS = VMap[ActiveSuspend]; 502 if (NewS->use_empty()) return; 503 504 // Copy out all the continuation arguments after the buffer pointer into 505 // an easily-indexed data structure for convenience. 506 SmallVector<Value*, 8> Args; 507 // The async ABI includes all arguments -- including the first argument. 508 bool IsAsyncABI = Shape.ABI == coro::ABI::Async; 509 for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()), 510 E = NewF->arg_end(); 511 I != E; ++I) 512 Args.push_back(&*I); 513 514 // If the suspend returns a single scalar value, we can just do a simple 515 // replacement. 516 if (!isa<StructType>(NewS->getType())) { 517 assert(Args.size() == 1); 518 NewS->replaceAllUsesWith(Args.front()); 519 return; 520 } 521 522 // Try to peephole extracts of an aggregate return. 523 for (Use &U : llvm::make_early_inc_range(NewS->uses())) { 524 auto *EVI = dyn_cast<ExtractValueInst>(U.getUser()); 525 if (!EVI || EVI->getNumIndices() != 1) 526 continue; 527 528 EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]); 529 EVI->eraseFromParent(); 530 } 531 532 // If we have no remaining uses, we're done. 533 if (NewS->use_empty()) return; 534 535 // Otherwise, we need to create an aggregate. 536 Value *Agg = UndefValue::get(NewS->getType()); 537 for (size_t I = 0, E = Args.size(); I != E; ++I) 538 Agg = Builder.CreateInsertValue(Agg, Args[I], I); 539 540 NewS->replaceAllUsesWith(Agg); 541 } 542 543 void CoroCloner::replaceCoroSuspends() { 544 Value *SuspendResult; 545 546 switch (Shape.ABI) { 547 // In switch lowering, replace coro.suspend with the appropriate value 548 // for the type of function we're extracting. 549 // Replacing coro.suspend with (0) will result in control flow proceeding to 550 // a resume label associated with a suspend point, replacing it with (1) will 551 // result in control flow proceeding to a cleanup label associated with this 552 // suspend point. 553 case coro::ABI::Switch: 554 SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0); 555 break; 556 557 // In async lowering there are no uses of the result. 558 case coro::ABI::Async: 559 return; 560 561 // In returned-continuation lowering, the arguments from earlier 562 // continuations are theoretically arbitrary, and they should have been 563 // spilled. 564 case coro::ABI::RetconOnce: 565 case coro::ABI::Retcon: 566 return; 567 } 568 569 for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) { 570 // The active suspend was handled earlier. 571 if (CS == ActiveSuspend) continue; 572 573 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]); 574 MappedCS->replaceAllUsesWith(SuspendResult); 575 MappedCS->eraseFromParent(); 576 } 577 } 578 579 void CoroCloner::replaceCoroEnds() { 580 for (AnyCoroEndInst *CE : Shape.CoroEnds) { 581 // We use a null call graph because there's no call graph node for 582 // the cloned function yet. We'll just be rebuilding that later. 583 auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]); 584 replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr); 585 } 586 } 587 588 static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape, 589 ValueToValueMapTy *VMap) { 590 if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty()) 591 return; 592 Value *CachedSlot = nullptr; 593 auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * { 594 if (CachedSlot) { 595 assert(CachedSlot->getType()->getPointerElementType() == ValueTy && 596 "multiple swifterror slots in function with different types"); 597 return CachedSlot; 598 } 599 600 // Check if the function has a swifterror argument. 601 for (auto &Arg : F.args()) { 602 if (Arg.isSwiftError()) { 603 CachedSlot = &Arg; 604 assert(Arg.getType()->getPointerElementType() == ValueTy && 605 "swifterror argument does not have expected type"); 606 return &Arg; 607 } 608 } 609 610 // Create a swifterror alloca. 611 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg()); 612 auto Alloca = Builder.CreateAlloca(ValueTy); 613 Alloca->setSwiftError(true); 614 615 CachedSlot = Alloca; 616 return Alloca; 617 }; 618 619 for (CallInst *Op : Shape.SwiftErrorOps) { 620 auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op; 621 IRBuilder<> Builder(MappedOp); 622 623 // If there are no arguments, this is a 'get' operation. 624 Value *MappedResult; 625 if (Op->arg_empty()) { 626 auto ValueTy = Op->getType(); 627 auto Slot = getSwiftErrorSlot(ValueTy); 628 MappedResult = Builder.CreateLoad(ValueTy, Slot); 629 } else { 630 assert(Op->arg_size() == 1); 631 auto Value = MappedOp->getArgOperand(0); 632 auto ValueTy = Value->getType(); 633 auto Slot = getSwiftErrorSlot(ValueTy); 634 Builder.CreateStore(Value, Slot); 635 MappedResult = Slot; 636 } 637 638 MappedOp->replaceAllUsesWith(MappedResult); 639 MappedOp->eraseFromParent(); 640 } 641 642 // If we're updating the original function, we've invalidated SwiftErrorOps. 643 if (VMap == nullptr) { 644 Shape.SwiftErrorOps.clear(); 645 } 646 } 647 648 void CoroCloner::replaceSwiftErrorOps() { 649 ::replaceSwiftErrorOps(*NewF, Shape, &VMap); 650 } 651 652 void CoroCloner::salvageDebugInfo() { 653 SmallVector<DbgVariableIntrinsic *, 8> Worklist; 654 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache; 655 for (auto &BB : *NewF) 656 for (auto &I : BB) 657 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I)) 658 Worklist.push_back(DVI); 659 for (DbgVariableIntrinsic *DVI : Worklist) 660 coro::salvageDebugInfo(DbgPtrAllocaCache, DVI, Shape.ReuseFrameSlot); 661 662 // Remove all salvaged dbg.declare intrinsics that became 663 // either unreachable or stale due to the CoroSplit transformation. 664 DominatorTree DomTree(*NewF); 665 auto IsUnreachableBlock = [&](BasicBlock *BB) { 666 return !isPotentiallyReachable(&NewF->getEntryBlock(), BB, nullptr, 667 &DomTree); 668 }; 669 for (DbgVariableIntrinsic *DVI : Worklist) { 670 if (IsUnreachableBlock(DVI->getParent())) 671 DVI->eraseFromParent(); 672 else if (isa_and_nonnull<AllocaInst>(DVI->getVariableLocationOp(0))) { 673 // Count all non-debuginfo uses in reachable blocks. 674 unsigned Uses = 0; 675 for (auto *User : DVI->getVariableLocationOp(0)->users()) 676 if (auto *I = dyn_cast<Instruction>(User)) 677 if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent())) 678 ++Uses; 679 if (!Uses) 680 DVI->eraseFromParent(); 681 } 682 } 683 } 684 685 void CoroCloner::replaceEntryBlock() { 686 // In the original function, the AllocaSpillBlock is a block immediately 687 // following the allocation of the frame object which defines GEPs for 688 // all the allocas that have been moved into the frame, and it ends by 689 // branching to the original beginning of the coroutine. Make this 690 // the entry block of the cloned function. 691 auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]); 692 auto *OldEntry = &NewF->getEntryBlock(); 693 Entry->setName("entry" + Suffix); 694 Entry->moveBefore(OldEntry); 695 Entry->getTerminator()->eraseFromParent(); 696 697 // Clear all predecessors of the new entry block. There should be 698 // exactly one predecessor, which we created when splitting out 699 // AllocaSpillBlock to begin with. 700 assert(Entry->hasOneUse()); 701 auto BranchToEntry = cast<BranchInst>(Entry->user_back()); 702 assert(BranchToEntry->isUnconditional()); 703 Builder.SetInsertPoint(BranchToEntry); 704 Builder.CreateUnreachable(); 705 BranchToEntry->eraseFromParent(); 706 707 // Branch from the entry to the appropriate place. 708 Builder.SetInsertPoint(Entry); 709 switch (Shape.ABI) { 710 case coro::ABI::Switch: { 711 // In switch-lowering, we built a resume-entry block in the original 712 // function. Make the entry block branch to this. 713 auto *SwitchBB = 714 cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]); 715 Builder.CreateBr(SwitchBB); 716 break; 717 } 718 case coro::ABI::Async: 719 case coro::ABI::Retcon: 720 case coro::ABI::RetconOnce: { 721 // In continuation ABIs, we want to branch to immediately after the 722 // active suspend point. Earlier phases will have put the suspend in its 723 // own basic block, so just thread our jump directly to its successor. 724 assert((Shape.ABI == coro::ABI::Async && 725 isa<CoroSuspendAsyncInst>(ActiveSuspend)) || 726 ((Shape.ABI == coro::ABI::Retcon || 727 Shape.ABI == coro::ABI::RetconOnce) && 728 isa<CoroSuspendRetconInst>(ActiveSuspend))); 729 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]); 730 auto Branch = cast<BranchInst>(MappedCS->getNextNode()); 731 assert(Branch->isUnconditional()); 732 Builder.CreateBr(Branch->getSuccessor(0)); 733 break; 734 } 735 } 736 737 // Any static alloca that's still being used but not reachable from the new 738 // entry needs to be moved to the new entry. 739 Function *F = OldEntry->getParent(); 740 DominatorTree DT{*F}; 741 for (Instruction &I : llvm::make_early_inc_range(instructions(F))) { 742 auto *Alloca = dyn_cast<AllocaInst>(&I); 743 if (!Alloca || I.use_empty()) 744 continue; 745 if (DT.isReachableFromEntry(I.getParent()) || 746 !isa<ConstantInt>(Alloca->getArraySize())) 747 continue; 748 I.moveBefore(*Entry, Entry->getFirstInsertionPt()); 749 } 750 } 751 752 /// Derive the value of the new frame pointer. 753 Value *CoroCloner::deriveNewFramePointer() { 754 // Builder should be inserting to the front of the new entry block. 755 756 switch (Shape.ABI) { 757 // In switch-lowering, the argument is the frame pointer. 758 case coro::ABI::Switch: 759 return &*NewF->arg_begin(); 760 // In async-lowering, one of the arguments is an async context as determined 761 // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of 762 // the resume function from the async context projection function associated 763 // with the active suspend. The frame is located as a tail to the async 764 // context header. 765 case coro::ABI::Async: { 766 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend); 767 auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff; 768 auto *CalleeContext = NewF->getArg(ContextIdx); 769 auto *FramePtrTy = Shape.FrameTy->getPointerTo(); 770 auto *ProjectionFunc = 771 ActiveAsyncSuspend->getAsyncContextProjectionFunction(); 772 auto DbgLoc = 773 cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc(); 774 // Calling i8* (i8*) 775 auto *CallerContext = Builder.CreateCall(ProjectionFunc->getFunctionType(), 776 ProjectionFunc, CalleeContext); 777 CallerContext->setCallingConv(ProjectionFunc->getCallingConv()); 778 CallerContext->setDebugLoc(DbgLoc); 779 // The frame is located after the async_context header. 780 auto &Context = Builder.getContext(); 781 auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32( 782 Type::getInt8Ty(Context), CallerContext, 783 Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr"); 784 // Inline the projection function. 785 InlineFunctionInfo InlineInfo; 786 auto InlineRes = InlineFunction(*CallerContext, InlineInfo); 787 assert(InlineRes.isSuccess()); 788 (void)InlineRes; 789 return Builder.CreateBitCast(FramePtrAddr, FramePtrTy); 790 } 791 // In continuation-lowering, the argument is the opaque storage. 792 case coro::ABI::Retcon: 793 case coro::ABI::RetconOnce: { 794 Argument *NewStorage = &*NewF->arg_begin(); 795 auto FramePtrTy = Shape.FrameTy->getPointerTo(); 796 797 // If the storage is inline, just bitcast to the storage to the frame type. 798 if (Shape.RetconLowering.IsFrameInlineInStorage) 799 return Builder.CreateBitCast(NewStorage, FramePtrTy); 800 801 // Otherwise, load the real frame from the opaque storage. 802 auto FramePtrPtr = 803 Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo()); 804 return Builder.CreateLoad(FramePtrTy, FramePtrPtr); 805 } 806 } 807 llvm_unreachable("bad ABI"); 808 } 809 810 static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context, 811 unsigned ParamIndex, 812 uint64_t Size, Align Alignment) { 813 AttrBuilder ParamAttrs; 814 ParamAttrs.addAttribute(Attribute::NonNull); 815 ParamAttrs.addAttribute(Attribute::NoAlias); 816 ParamAttrs.addAlignmentAttr(Alignment); 817 ParamAttrs.addDereferenceableAttr(Size); 818 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs); 819 } 820 821 static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context, 822 unsigned ParamIndex) { 823 AttrBuilder ParamAttrs; 824 ParamAttrs.addAttribute(Attribute::SwiftAsync); 825 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs); 826 } 827 828 static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context, 829 unsigned ParamIndex) { 830 AttrBuilder ParamAttrs; 831 ParamAttrs.addAttribute(Attribute::SwiftSelf); 832 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs); 833 } 834 835 /// Clone the body of the original function into a resume function of 836 /// some sort. 837 void CoroCloner::create() { 838 // Create the new function if we don't already have one. 839 if (!NewF) { 840 NewF = createCloneDeclaration(OrigF, Shape, Suffix, 841 OrigF.getParent()->end(), ActiveSuspend); 842 } 843 844 // Replace all args with undefs. The buildCoroutineFrame algorithm already 845 // rewritten access to the args that occurs after suspend points with loads 846 // and stores to/from the coroutine frame. 847 for (Argument &A : OrigF.args()) 848 VMap[&A] = UndefValue::get(A.getType()); 849 850 SmallVector<ReturnInst *, 4> Returns; 851 852 // Ignore attempts to change certain attributes of the function. 853 // TODO: maybe there should be a way to suppress this during cloning? 854 auto savedVisibility = NewF->getVisibility(); 855 auto savedUnnamedAddr = NewF->getUnnamedAddr(); 856 auto savedDLLStorageClass = NewF->getDLLStorageClass(); 857 858 // NewF's linkage (which CloneFunctionInto does *not* change) might not 859 // be compatible with the visibility of OrigF (which it *does* change), 860 // so protect against that. 861 auto savedLinkage = NewF->getLinkage(); 862 NewF->setLinkage(llvm::GlobalValue::ExternalLinkage); 863 864 CloneFunctionInto(NewF, &OrigF, VMap, 865 CloneFunctionChangeType::LocalChangesOnly, Returns); 866 867 auto &Context = NewF->getContext(); 868 869 // For async functions / continuations, adjust the scope line of the 870 // clone to the line number of the suspend point. However, only 871 // adjust the scope line when the files are the same. This ensures 872 // line number and file name belong together. The scope line is 873 // associated with all pre-prologue instructions. This avoids a jump 874 // in the linetable from the function declaration to the suspend point. 875 if (DISubprogram *SP = NewF->getSubprogram()) { 876 assert(SP != OrigF.getSubprogram() && SP->isDistinct()); 877 if (ActiveSuspend) 878 if (auto DL = ActiveSuspend->getDebugLoc()) 879 if (SP->getFile() == DL->getFile()) 880 SP->setScopeLine(DL->getLine()); 881 // Update the linkage name to reflect the modified symbol name. It 882 // is necessary to update the linkage name in Swift, since the 883 // mangling changes for resume functions. It might also be the 884 // right thing to do in C++, but due to a limitation in LLVM's 885 // AsmPrinter we can only do this if the function doesn't have an 886 // abstract specification, since the DWARF backend expects the 887 // abstract specification to contain the linkage name and asserts 888 // that they are identical. 889 if (!SP->getDeclaration() && SP->getUnit() && 890 SP->getUnit()->getSourceLanguage() == dwarf::DW_LANG_Swift) 891 SP->replaceLinkageName(MDString::get(Context, NewF->getName())); 892 } 893 894 NewF->setLinkage(savedLinkage); 895 NewF->setVisibility(savedVisibility); 896 NewF->setUnnamedAddr(savedUnnamedAddr); 897 NewF->setDLLStorageClass(savedDLLStorageClass); 898 899 // Replace the attributes of the new function: 900 auto OrigAttrs = NewF->getAttributes(); 901 auto NewAttrs = AttributeList(); 902 903 switch (Shape.ABI) { 904 case coro::ABI::Switch: 905 // Bootstrap attributes by copying function attributes from the 906 // original function. This should include optimization settings and so on. 907 NewAttrs = NewAttrs.addFnAttributes(Context, OrigAttrs.getFnAttrs()); 908 909 addFramePointerAttrs(NewAttrs, Context, 0, 910 Shape.FrameSize, Shape.FrameAlign); 911 break; 912 case coro::ABI::Async: { 913 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend); 914 if (OrigF.hasParamAttribute(Shape.AsyncLowering.ContextArgNo, 915 Attribute::SwiftAsync)) { 916 uint32_t ArgAttributeIndices = 917 ActiveAsyncSuspend->getStorageArgumentIndex(); 918 auto ContextArgIndex = ArgAttributeIndices & 0xff; 919 addAsyncContextAttrs(NewAttrs, Context, ContextArgIndex); 920 921 // `swiftasync` must preceed `swiftself` so 0 is not a valid index for 922 // `swiftself`. 923 auto SwiftSelfIndex = ArgAttributeIndices >> 8; 924 if (SwiftSelfIndex) 925 addSwiftSelfAttrs(NewAttrs, Context, SwiftSelfIndex); 926 } 927 928 // Transfer the original function's attributes. 929 auto FnAttrs = OrigF.getAttributes().getFnAttrs(); 930 NewAttrs = NewAttrs.addFnAttributes(Context, FnAttrs); 931 break; 932 } 933 case coro::ABI::Retcon: 934 case coro::ABI::RetconOnce: 935 // If we have a continuation prototype, just use its attributes, 936 // full-stop. 937 NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes(); 938 939 addFramePointerAttrs(NewAttrs, Context, 0, 940 Shape.getRetconCoroId()->getStorageSize(), 941 Shape.getRetconCoroId()->getStorageAlignment()); 942 break; 943 } 944 945 switch (Shape.ABI) { 946 // In these ABIs, the cloned functions always return 'void', and the 947 // existing return sites are meaningless. Note that for unique 948 // continuations, this includes the returns associated with suspends; 949 // this is fine because we can't suspend twice. 950 case coro::ABI::Switch: 951 case coro::ABI::RetconOnce: 952 // Remove old returns. 953 for (ReturnInst *Return : Returns) 954 changeToUnreachable(Return); 955 break; 956 957 // With multi-suspend continuations, we'll already have eliminated the 958 // original returns and inserted returns before all the suspend points, 959 // so we want to leave any returns in place. 960 case coro::ABI::Retcon: 961 break; 962 // Async lowering will insert musttail call functions at all suspend points 963 // followed by a return. 964 // Don't change returns to unreachable because that will trip up the verifier. 965 // These returns should be unreachable from the clone. 966 case coro::ABI::Async: 967 break; 968 } 969 970 NewF->setAttributes(NewAttrs); 971 NewF->setCallingConv(Shape.getResumeFunctionCC()); 972 973 // Set up the new entry block. 974 replaceEntryBlock(); 975 976 Builder.SetInsertPoint(&NewF->getEntryBlock().front()); 977 NewFramePtr = deriveNewFramePointer(); 978 979 // Remap frame pointer. 980 Value *OldFramePtr = VMap[Shape.FramePtr]; 981 NewFramePtr->takeName(OldFramePtr); 982 OldFramePtr->replaceAllUsesWith(NewFramePtr); 983 984 // Remap vFrame pointer. 985 auto *NewVFrame = Builder.CreateBitCast( 986 NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame"); 987 Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]); 988 OldVFrame->replaceAllUsesWith(NewVFrame); 989 990 switch (Shape.ABI) { 991 case coro::ABI::Switch: 992 // Rewrite final suspend handling as it is not done via switch (allows to 993 // remove final case from the switch, since it is undefined behavior to 994 // resume the coroutine suspended at the final suspend point. 995 if (Shape.SwitchLowering.HasFinalSuspend) 996 handleFinalSuspend(); 997 break; 998 case coro::ABI::Async: 999 case coro::ABI::Retcon: 1000 case coro::ABI::RetconOnce: 1001 // Replace uses of the active suspend with the corresponding 1002 // continuation-function arguments. 1003 assert(ActiveSuspend != nullptr && 1004 "no active suspend when lowering a continuation-style coroutine"); 1005 replaceRetconOrAsyncSuspendUses(); 1006 break; 1007 } 1008 1009 // Handle suspends. 1010 replaceCoroSuspends(); 1011 1012 // Handle swifterror. 1013 replaceSwiftErrorOps(); 1014 1015 // Remove coro.end intrinsics. 1016 replaceCoroEnds(); 1017 1018 // Salvage debug info that points into the coroutine frame. 1019 salvageDebugInfo(); 1020 1021 // Eliminate coro.free from the clones, replacing it with 'null' in cleanup, 1022 // to suppress deallocation code. 1023 if (Shape.ABI == coro::ABI::Switch) 1024 coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]), 1025 /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup); 1026 } 1027 1028 // Create a resume clone by cloning the body of the original function, setting 1029 // new entry block and replacing coro.suspend an appropriate value to force 1030 // resume or cleanup pass for every suspend point. 1031 static Function *createClone(Function &F, const Twine &Suffix, 1032 coro::Shape &Shape, CoroCloner::Kind FKind) { 1033 CoroCloner Cloner(F, Suffix, Shape, FKind); 1034 Cloner.create(); 1035 return Cloner.getFunction(); 1036 } 1037 1038 /// Remove calls to llvm.coro.end in the original function. 1039 static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG) { 1040 for (auto End : Shape.CoroEnds) { 1041 replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, CG); 1042 } 1043 } 1044 1045 static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) { 1046 assert(Shape.ABI == coro::ABI::Async); 1047 1048 auto *FuncPtrStruct = cast<ConstantStruct>( 1049 Shape.AsyncLowering.AsyncFuncPointer->getInitializer()); 1050 auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0); 1051 auto *OrigContextSize = FuncPtrStruct->getOperand(1); 1052 auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(), 1053 Shape.AsyncLowering.ContextSize); 1054 auto *NewFuncPtrStruct = ConstantStruct::get( 1055 FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize); 1056 1057 Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct); 1058 } 1059 1060 static void replaceFrameSize(coro::Shape &Shape) { 1061 if (Shape.ABI == coro::ABI::Async) 1062 updateAsyncFuncPointerContextSize(Shape); 1063 1064 if (Shape.CoroSizes.empty()) 1065 return; 1066 1067 // In the same function all coro.sizes should have the same result type. 1068 auto *SizeIntrin = Shape.CoroSizes.back(); 1069 Module *M = SizeIntrin->getModule(); 1070 const DataLayout &DL = M->getDataLayout(); 1071 auto Size = DL.getTypeAllocSize(Shape.FrameTy); 1072 auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size); 1073 1074 for (CoroSizeInst *CS : Shape.CoroSizes) { 1075 CS->replaceAllUsesWith(SizeConstant); 1076 CS->eraseFromParent(); 1077 } 1078 } 1079 1080 // Create a global constant array containing pointers to functions provided and 1081 // set Info parameter of CoroBegin to point at this constant. Example: 1082 // 1083 // @f.resumers = internal constant [2 x void(%f.frame*)*] 1084 // [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy] 1085 // define void @f() { 1086 // ... 1087 // call i8* @llvm.coro.begin(i8* null, i32 0, i8* null, 1088 // i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*)) 1089 // 1090 // Assumes that all the functions have the same signature. 1091 static void setCoroInfo(Function &F, coro::Shape &Shape, 1092 ArrayRef<Function *> Fns) { 1093 // This only works under the switch-lowering ABI because coro elision 1094 // only works on the switch-lowering ABI. 1095 assert(Shape.ABI == coro::ABI::Switch); 1096 1097 SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end()); 1098 assert(!Args.empty()); 1099 Function *Part = *Fns.begin(); 1100 Module *M = Part->getParent(); 1101 auto *ArrTy = ArrayType::get(Part->getType(), Args.size()); 1102 1103 auto *ConstVal = ConstantArray::get(ArrTy, Args); 1104 auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true, 1105 GlobalVariable::PrivateLinkage, ConstVal, 1106 F.getName() + Twine(".resumers")); 1107 1108 // Update coro.begin instruction to refer to this constant. 1109 LLVMContext &C = F.getContext(); 1110 auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C)); 1111 Shape.getSwitchCoroId()->setInfo(BC); 1112 } 1113 1114 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame. 1115 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn, 1116 Function *DestroyFn, Function *CleanupFn) { 1117 assert(Shape.ABI == coro::ABI::Switch); 1118 1119 IRBuilder<> Builder(Shape.FramePtr->getNextNode()); 1120 auto *ResumeAddr = Builder.CreateStructGEP( 1121 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume, 1122 "resume.addr"); 1123 Builder.CreateStore(ResumeFn, ResumeAddr); 1124 1125 Value *DestroyOrCleanupFn = DestroyFn; 1126 1127 CoroIdInst *CoroId = Shape.getSwitchCoroId(); 1128 if (CoroAllocInst *CA = CoroId->getCoroAlloc()) { 1129 // If there is a CoroAlloc and it returns false (meaning we elide the 1130 // allocation, use CleanupFn instead of DestroyFn). 1131 DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn); 1132 } 1133 1134 auto *DestroyAddr = Builder.CreateStructGEP( 1135 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy, 1136 "destroy.addr"); 1137 Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr); 1138 } 1139 1140 static void postSplitCleanup(Function &F) { 1141 removeUnreachableBlocks(F); 1142 1143 #ifndef NDEBUG 1144 // For now, we do a mandatory verification step because we don't 1145 // entirely trust this pass. Note that we don't want to add a verifier 1146 // pass to FPM below because it will also verify all the global data. 1147 if (verifyFunction(F, &errs())) 1148 report_fatal_error("Broken function"); 1149 #endif 1150 } 1151 1152 // Assuming we arrived at the block NewBlock from Prev instruction, store 1153 // PHI's incoming values in the ResolvedValues map. 1154 static void 1155 scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock, 1156 DenseMap<Value *, Value *> &ResolvedValues) { 1157 auto *PrevBB = Prev->getParent(); 1158 for (PHINode &PN : NewBlock->phis()) { 1159 auto V = PN.getIncomingValueForBlock(PrevBB); 1160 // See if we already resolved it. 1161 auto VI = ResolvedValues.find(V); 1162 if (VI != ResolvedValues.end()) 1163 V = VI->second; 1164 // Remember the value. 1165 ResolvedValues[&PN] = V; 1166 } 1167 } 1168 1169 // Replace a sequence of branches leading to a ret, with a clone of a ret 1170 // instruction. Suspend instruction represented by a switch, track the PHI 1171 // values and select the correct case successor when possible. 1172 static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) { 1173 DenseMap<Value *, Value *> ResolvedValues; 1174 BasicBlock *UnconditionalSucc = nullptr; 1175 1176 Instruction *I = InitialInst; 1177 while (I->isTerminator() || 1178 (isa<CmpInst>(I) && I->getNextNode()->isTerminator())) { 1179 if (isa<ReturnInst>(I)) { 1180 if (I != InitialInst) { 1181 // If InitialInst is an unconditional branch, 1182 // remove PHI values that come from basic block of InitialInst 1183 if (UnconditionalSucc) 1184 UnconditionalSucc->removePredecessor(InitialInst->getParent(), true); 1185 ReplaceInstWithInst(InitialInst, I->clone()); 1186 } 1187 return true; 1188 } 1189 if (auto *BR = dyn_cast<BranchInst>(I)) { 1190 if (BR->isUnconditional()) { 1191 BasicBlock *BB = BR->getSuccessor(0); 1192 if (I == InitialInst) 1193 UnconditionalSucc = BB; 1194 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1195 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1196 continue; 1197 } 1198 } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) { 1199 auto *BR = dyn_cast<BranchInst>(I->getNextNode()); 1200 if (BR && BR->isConditional() && CondCmp == BR->getCondition()) { 1201 // If the case number of suspended switch instruction is reduced to 1202 // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator. 1203 // And the comparsion looks like : %cond = icmp eq i8 %V, constant. 1204 ConstantInt *CondConst = dyn_cast<ConstantInt>(CondCmp->getOperand(1)); 1205 if (CondConst && CondCmp->getPredicate() == CmpInst::ICMP_EQ) { 1206 Value *V = CondCmp->getOperand(0); 1207 auto it = ResolvedValues.find(V); 1208 if (it != ResolvedValues.end()) 1209 V = it->second; 1210 1211 if (ConstantInt *Cond0 = dyn_cast<ConstantInt>(V)) { 1212 BasicBlock *BB = Cond0->equalsInt(CondConst->getZExtValue()) 1213 ? BR->getSuccessor(0) 1214 : BR->getSuccessor(1); 1215 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1216 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1217 continue; 1218 } 1219 } 1220 } 1221 } else if (auto *SI = dyn_cast<SwitchInst>(I)) { 1222 Value *V = SI->getCondition(); 1223 auto it = ResolvedValues.find(V); 1224 if (it != ResolvedValues.end()) 1225 V = it->second; 1226 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) { 1227 BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor(); 1228 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1229 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1230 continue; 1231 } 1232 } 1233 return false; 1234 } 1235 return false; 1236 } 1237 1238 // Check whether CI obeys the rules of musttail attribute. 1239 static bool shouldBeMustTail(const CallInst &CI, const Function &F) { 1240 if (CI.isInlineAsm()) 1241 return false; 1242 1243 // Match prototypes and calling conventions of resume function. 1244 FunctionType *CalleeTy = CI.getFunctionType(); 1245 if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1)) 1246 return false; 1247 1248 Type *CalleeParmTy = CalleeTy->getParamType(0); 1249 if (!CalleeParmTy->isPointerTy() || 1250 (CalleeParmTy->getPointerAddressSpace() != 0)) 1251 return false; 1252 1253 if (CI.getCallingConv() != F.getCallingConv()) 1254 return false; 1255 1256 // CI should not has any ABI-impacting function attributes. 1257 static const Attribute::AttrKind ABIAttrs[] = { 1258 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca, 1259 Attribute::Preallocated, Attribute::InReg, Attribute::Returned, 1260 Attribute::SwiftSelf, Attribute::SwiftError}; 1261 AttributeList Attrs = CI.getAttributes(); 1262 for (auto AK : ABIAttrs) 1263 if (Attrs.hasParamAttr(0, AK)) 1264 return false; 1265 1266 return true; 1267 } 1268 1269 // Add musttail to any resume instructions that is immediately followed by a 1270 // suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call 1271 // for symmetrical coroutine control transfer (C++ Coroutines TS extension). 1272 // This transformation is done only in the resume part of the coroutine that has 1273 // identical signature and calling convention as the coro.resume call. 1274 static void addMustTailToCoroResumes(Function &F) { 1275 bool changed = false; 1276 1277 // Collect potential resume instructions. 1278 SmallVector<CallInst *, 4> Resumes; 1279 for (auto &I : instructions(F)) 1280 if (auto *Call = dyn_cast<CallInst>(&I)) 1281 if (shouldBeMustTail(*Call, F)) 1282 Resumes.push_back(Call); 1283 1284 // Set musttail on those that are followed by a ret instruction. 1285 for (CallInst *Call : Resumes) 1286 if (simplifyTerminatorLeadingToRet(Call->getNextNode())) { 1287 Call->setTailCallKind(CallInst::TCK_MustTail); 1288 changed = true; 1289 } 1290 1291 if (changed) 1292 removeUnreachableBlocks(F); 1293 } 1294 1295 // Coroutine has no suspend points. Remove heap allocation for the coroutine 1296 // frame if possible. 1297 static void handleNoSuspendCoroutine(coro::Shape &Shape) { 1298 auto *CoroBegin = Shape.CoroBegin; 1299 auto *CoroId = CoroBegin->getId(); 1300 auto *AllocInst = CoroId->getCoroAlloc(); 1301 switch (Shape.ABI) { 1302 case coro::ABI::Switch: { 1303 auto SwitchId = cast<CoroIdInst>(CoroId); 1304 coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr); 1305 if (AllocInst) { 1306 IRBuilder<> Builder(AllocInst); 1307 auto *Frame = Builder.CreateAlloca(Shape.FrameTy); 1308 Frame->setAlignment(Shape.FrameAlign); 1309 auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy()); 1310 AllocInst->replaceAllUsesWith(Builder.getFalse()); 1311 AllocInst->eraseFromParent(); 1312 CoroBegin->replaceAllUsesWith(VFrame); 1313 } else { 1314 CoroBegin->replaceAllUsesWith(CoroBegin->getMem()); 1315 } 1316 1317 break; 1318 } 1319 case coro::ABI::Async: 1320 case coro::ABI::Retcon: 1321 case coro::ABI::RetconOnce: 1322 CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType())); 1323 break; 1324 } 1325 1326 CoroBegin->eraseFromParent(); 1327 } 1328 1329 // SimplifySuspendPoint needs to check that there is no calls between 1330 // coro_save and coro_suspend, since any of the calls may potentially resume 1331 // the coroutine and if that is the case we cannot eliminate the suspend point. 1332 static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) { 1333 for (Instruction *I = From; I != To; I = I->getNextNode()) { 1334 // Assume that no intrinsic can resume the coroutine. 1335 if (isa<IntrinsicInst>(I)) 1336 continue; 1337 1338 if (isa<CallBase>(I)) 1339 return true; 1340 } 1341 return false; 1342 } 1343 1344 static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) { 1345 SmallPtrSet<BasicBlock *, 8> Set; 1346 SmallVector<BasicBlock *, 8> Worklist; 1347 1348 Set.insert(SaveBB); 1349 Worklist.push_back(ResDesBB); 1350 1351 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr 1352 // returns a token consumed by suspend instruction, all blocks in between 1353 // will have to eventually hit SaveBB when going backwards from ResDesBB. 1354 while (!Worklist.empty()) { 1355 auto *BB = Worklist.pop_back_val(); 1356 Set.insert(BB); 1357 for (auto *Pred : predecessors(BB)) 1358 if (!Set.contains(Pred)) 1359 Worklist.push_back(Pred); 1360 } 1361 1362 // SaveBB and ResDesBB are checked separately in hasCallsBetween. 1363 Set.erase(SaveBB); 1364 Set.erase(ResDesBB); 1365 1366 for (auto *BB : Set) 1367 if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr)) 1368 return true; 1369 1370 return false; 1371 } 1372 1373 static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) { 1374 auto *SaveBB = Save->getParent(); 1375 auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent(); 1376 1377 if (SaveBB == ResumeOrDestroyBB) 1378 return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy); 1379 1380 // Any calls from Save to the end of the block? 1381 if (hasCallsInBlockBetween(Save->getNextNode(), nullptr)) 1382 return true; 1383 1384 // Any calls from begging of the block up to ResumeOrDestroy? 1385 if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(), 1386 ResumeOrDestroy)) 1387 return true; 1388 1389 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB? 1390 if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB)) 1391 return true; 1392 1393 return false; 1394 } 1395 1396 // If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the 1397 // suspend point and replace it with nornal control flow. 1398 static bool simplifySuspendPoint(CoroSuspendInst *Suspend, 1399 CoroBeginInst *CoroBegin) { 1400 Instruction *Prev = Suspend->getPrevNode(); 1401 if (!Prev) { 1402 auto *Pred = Suspend->getParent()->getSinglePredecessor(); 1403 if (!Pred) 1404 return false; 1405 Prev = Pred->getTerminator(); 1406 } 1407 1408 CallBase *CB = dyn_cast<CallBase>(Prev); 1409 if (!CB) 1410 return false; 1411 1412 auto *Callee = CB->getCalledOperand()->stripPointerCasts(); 1413 1414 // See if the callsite is for resumption or destruction of the coroutine. 1415 auto *SubFn = dyn_cast<CoroSubFnInst>(Callee); 1416 if (!SubFn) 1417 return false; 1418 1419 // Does not refer to the current coroutine, we cannot do anything with it. 1420 if (SubFn->getFrame() != CoroBegin) 1421 return false; 1422 1423 // See if the transformation is safe. Specifically, see if there are any 1424 // calls in between Save and CallInstr. They can potenitally resume the 1425 // coroutine rendering this optimization unsafe. 1426 auto *Save = Suspend->getCoroSave(); 1427 if (hasCallsBetween(Save, CB)) 1428 return false; 1429 1430 // Replace llvm.coro.suspend with the value that results in resumption over 1431 // the resume or cleanup path. 1432 Suspend->replaceAllUsesWith(SubFn->getRawIndex()); 1433 Suspend->eraseFromParent(); 1434 Save->eraseFromParent(); 1435 1436 // No longer need a call to coro.resume or coro.destroy. 1437 if (auto *Invoke = dyn_cast<InvokeInst>(CB)) { 1438 BranchInst::Create(Invoke->getNormalDest(), Invoke); 1439 } 1440 1441 // Grab the CalledValue from CB before erasing the CallInstr. 1442 auto *CalledValue = CB->getCalledOperand(); 1443 CB->eraseFromParent(); 1444 1445 // If no more users remove it. Usually it is a bitcast of SubFn. 1446 if (CalledValue != SubFn && CalledValue->user_empty()) 1447 if (auto *I = dyn_cast<Instruction>(CalledValue)) 1448 I->eraseFromParent(); 1449 1450 // Now we are good to remove SubFn. 1451 if (SubFn->user_empty()) 1452 SubFn->eraseFromParent(); 1453 1454 return true; 1455 } 1456 1457 // Remove suspend points that are simplified. 1458 static void simplifySuspendPoints(coro::Shape &Shape) { 1459 // Currently, the only simplification we do is switch-lowering-specific. 1460 if (Shape.ABI != coro::ABI::Switch) 1461 return; 1462 1463 auto &S = Shape.CoroSuspends; 1464 size_t I = 0, N = S.size(); 1465 if (N == 0) 1466 return; 1467 while (true) { 1468 auto SI = cast<CoroSuspendInst>(S[I]); 1469 // Leave final.suspend to handleFinalSuspend since it is undefined behavior 1470 // to resume a coroutine suspended at the final suspend point. 1471 if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) { 1472 if (--N == I) 1473 break; 1474 std::swap(S[I], S[N]); 1475 continue; 1476 } 1477 if (++I == N) 1478 break; 1479 } 1480 S.resize(N); 1481 } 1482 1483 static void splitSwitchCoroutine(Function &F, coro::Shape &Shape, 1484 SmallVectorImpl<Function *> &Clones) { 1485 assert(Shape.ABI == coro::ABI::Switch); 1486 1487 createResumeEntryBlock(F, Shape); 1488 auto ResumeClone = createClone(F, ".resume", Shape, 1489 CoroCloner::Kind::SwitchResume); 1490 auto DestroyClone = createClone(F, ".destroy", Shape, 1491 CoroCloner::Kind::SwitchUnwind); 1492 auto CleanupClone = createClone(F, ".cleanup", Shape, 1493 CoroCloner::Kind::SwitchCleanup); 1494 1495 postSplitCleanup(*ResumeClone); 1496 postSplitCleanup(*DestroyClone); 1497 postSplitCleanup(*CleanupClone); 1498 1499 addMustTailToCoroResumes(*ResumeClone); 1500 1501 // Store addresses resume/destroy/cleanup functions in the coroutine frame. 1502 updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone); 1503 1504 assert(Clones.empty()); 1505 Clones.push_back(ResumeClone); 1506 Clones.push_back(DestroyClone); 1507 Clones.push_back(CleanupClone); 1508 1509 // Create a constant array referring to resume/destroy/clone functions pointed 1510 // by the last argument of @llvm.coro.info, so that CoroElide pass can 1511 // determined correct function to call. 1512 setCoroInfo(F, Shape, Clones); 1513 } 1514 1515 static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend, 1516 Value *Continuation) { 1517 auto *ResumeIntrinsic = Suspend->getResumeFunction(); 1518 auto &Context = Suspend->getParent()->getParent()->getContext(); 1519 auto *Int8PtrTy = Type::getInt8PtrTy(Context); 1520 1521 IRBuilder<> Builder(ResumeIntrinsic); 1522 auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy); 1523 ResumeIntrinsic->replaceAllUsesWith(Val); 1524 ResumeIntrinsic->eraseFromParent(); 1525 Suspend->setOperand(CoroSuspendAsyncInst::ResumeFunctionArg, 1526 UndefValue::get(Int8PtrTy)); 1527 } 1528 1529 /// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs. 1530 static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy, 1531 ArrayRef<Value *> FnArgs, 1532 SmallVectorImpl<Value *> &CallArgs) { 1533 size_t ArgIdx = 0; 1534 for (auto paramTy : FnTy->params()) { 1535 assert(ArgIdx < FnArgs.size()); 1536 if (paramTy != FnArgs[ArgIdx]->getType()) 1537 CallArgs.push_back( 1538 Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy)); 1539 else 1540 CallArgs.push_back(FnArgs[ArgIdx]); 1541 ++ArgIdx; 1542 } 1543 } 1544 1545 CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, 1546 ArrayRef<Value *> Arguments, 1547 IRBuilder<> &Builder) { 1548 auto *FnTy = MustTailCallFn->getFunctionType(); 1549 // Coerce the arguments, llvm optimizations seem to ignore the types in 1550 // vaarg functions and throws away casts in optimized mode. 1551 SmallVector<Value *, 8> CallArgs; 1552 coerceArguments(Builder, FnTy, Arguments, CallArgs); 1553 1554 auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs); 1555 TailCall->setTailCallKind(CallInst::TCK_MustTail); 1556 TailCall->setDebugLoc(Loc); 1557 TailCall->setCallingConv(MustTailCallFn->getCallingConv()); 1558 return TailCall; 1559 } 1560 1561 static void splitAsyncCoroutine(Function &F, coro::Shape &Shape, 1562 SmallVectorImpl<Function *> &Clones) { 1563 assert(Shape.ABI == coro::ABI::Async); 1564 assert(Clones.empty()); 1565 // Reset various things that the optimizer might have decided it 1566 // "knows" about the coroutine function due to not seeing a return. 1567 F.removeFnAttr(Attribute::NoReturn); 1568 F.removeRetAttr(Attribute::NoAlias); 1569 F.removeRetAttr(Attribute::NonNull); 1570 1571 auto &Context = F.getContext(); 1572 auto *Int8PtrTy = Type::getInt8PtrTy(Context); 1573 1574 auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId()); 1575 IRBuilder<> Builder(Id); 1576 1577 auto *FramePtr = Id->getStorage(); 1578 FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy); 1579 FramePtr = Builder.CreateConstInBoundsGEP1_32( 1580 Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset, 1581 "async.ctx.frameptr"); 1582 1583 // Map all uses of llvm.coro.begin to the allocated frame pointer. 1584 { 1585 // Make sure we don't invalidate Shape.FramePtr. 1586 TrackingVH<Instruction> Handle(Shape.FramePtr); 1587 Shape.CoroBegin->replaceAllUsesWith(FramePtr); 1588 Shape.FramePtr = Handle.getValPtr(); 1589 } 1590 1591 // Create all the functions in order after the main function. 1592 auto NextF = std::next(F.getIterator()); 1593 1594 // Create a continuation function for each of the suspend points. 1595 Clones.reserve(Shape.CoroSuspends.size()); 1596 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) { 1597 auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]); 1598 1599 // Create the clone declaration. 1600 auto ResumeNameSuffix = ".resume."; 1601 auto ProjectionFunctionName = 1602 Suspend->getAsyncContextProjectionFunction()->getName(); 1603 bool UseSwiftMangling = false; 1604 if (ProjectionFunctionName.equals("__swift_async_resume_project_context")) { 1605 ResumeNameSuffix = "TQ"; 1606 UseSwiftMangling = true; 1607 } else if (ProjectionFunctionName.equals( 1608 "__swift_async_resume_get_context")) { 1609 ResumeNameSuffix = "TY"; 1610 UseSwiftMangling = true; 1611 } 1612 auto *Continuation = createCloneDeclaration( 1613 F, Shape, 1614 UseSwiftMangling ? ResumeNameSuffix + Twine(Idx) + "_" 1615 : ResumeNameSuffix + Twine(Idx), 1616 NextF, Suspend); 1617 Clones.push_back(Continuation); 1618 1619 // Insert a branch to a new return block immediately before the suspend 1620 // point. 1621 auto *SuspendBB = Suspend->getParent(); 1622 auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend); 1623 auto *Branch = cast<BranchInst>(SuspendBB->getTerminator()); 1624 1625 // Place it before the first suspend. 1626 auto *ReturnBB = 1627 BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB); 1628 Branch->setSuccessor(0, ReturnBB); 1629 1630 IRBuilder<> Builder(ReturnBB); 1631 1632 // Insert the call to the tail call function and inline it. 1633 auto *Fn = Suspend->getMustTailCallFunction(); 1634 SmallVector<Value *, 8> Args(Suspend->args()); 1635 auto FnArgs = ArrayRef<Value *>(Args).drop_front( 1636 CoroSuspendAsyncInst::MustTailCallFuncArg + 1); 1637 auto *TailCall = 1638 coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder); 1639 Builder.CreateRetVoid(); 1640 InlineFunctionInfo FnInfo; 1641 auto InlineRes = InlineFunction(*TailCall, FnInfo); 1642 assert(InlineRes.isSuccess() && "Expected inlining to succeed"); 1643 (void)InlineRes; 1644 1645 // Replace the lvm.coro.async.resume intrisic call. 1646 replaceAsyncResumeFunction(Suspend, Continuation); 1647 } 1648 1649 assert(Clones.size() == Shape.CoroSuspends.size()); 1650 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) { 1651 auto *Suspend = Shape.CoroSuspends[Idx]; 1652 auto *Clone = Clones[Idx]; 1653 1654 CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create(); 1655 } 1656 } 1657 1658 static void splitRetconCoroutine(Function &F, coro::Shape &Shape, 1659 SmallVectorImpl<Function *> &Clones) { 1660 assert(Shape.ABI == coro::ABI::Retcon || 1661 Shape.ABI == coro::ABI::RetconOnce); 1662 assert(Clones.empty()); 1663 1664 // Reset various things that the optimizer might have decided it 1665 // "knows" about the coroutine function due to not seeing a return. 1666 F.removeFnAttr(Attribute::NoReturn); 1667 F.removeRetAttr(Attribute::NoAlias); 1668 F.removeRetAttr(Attribute::NonNull); 1669 1670 // Allocate the frame. 1671 auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId()); 1672 Value *RawFramePtr; 1673 if (Shape.RetconLowering.IsFrameInlineInStorage) { 1674 RawFramePtr = Id->getStorage(); 1675 } else { 1676 IRBuilder<> Builder(Id); 1677 1678 // Determine the size of the frame. 1679 const DataLayout &DL = F.getParent()->getDataLayout(); 1680 auto Size = DL.getTypeAllocSize(Shape.FrameTy); 1681 1682 // Allocate. We don't need to update the call graph node because we're 1683 // going to recompute it from scratch after splitting. 1684 // FIXME: pass the required alignment 1685 RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr); 1686 RawFramePtr = 1687 Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType()); 1688 1689 // Stash the allocated frame pointer in the continuation storage. 1690 auto Dest = Builder.CreateBitCast(Id->getStorage(), 1691 RawFramePtr->getType()->getPointerTo()); 1692 Builder.CreateStore(RawFramePtr, Dest); 1693 } 1694 1695 // Map all uses of llvm.coro.begin to the allocated frame pointer. 1696 { 1697 // Make sure we don't invalidate Shape.FramePtr. 1698 TrackingVH<Instruction> Handle(Shape.FramePtr); 1699 Shape.CoroBegin->replaceAllUsesWith(RawFramePtr); 1700 Shape.FramePtr = Handle.getValPtr(); 1701 } 1702 1703 // Create a unique return block. 1704 BasicBlock *ReturnBB = nullptr; 1705 SmallVector<PHINode *, 4> ReturnPHIs; 1706 1707 // Create all the functions in order after the main function. 1708 auto NextF = std::next(F.getIterator()); 1709 1710 // Create a continuation function for each of the suspend points. 1711 Clones.reserve(Shape.CoroSuspends.size()); 1712 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) { 1713 auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]); 1714 1715 // Create the clone declaration. 1716 auto Continuation = 1717 createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF, nullptr); 1718 Clones.push_back(Continuation); 1719 1720 // Insert a branch to the unified return block immediately before 1721 // the suspend point. 1722 auto SuspendBB = Suspend->getParent(); 1723 auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend); 1724 auto Branch = cast<BranchInst>(SuspendBB->getTerminator()); 1725 1726 // Create the unified return block. 1727 if (!ReturnBB) { 1728 // Place it before the first suspend. 1729 ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F, 1730 NewSuspendBB); 1731 Shape.RetconLowering.ReturnBlock = ReturnBB; 1732 1733 IRBuilder<> Builder(ReturnBB); 1734 1735 // Create PHIs for all the return values. 1736 assert(ReturnPHIs.empty()); 1737 1738 // First, the continuation. 1739 ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(), 1740 Shape.CoroSuspends.size())); 1741 1742 // Next, all the directly-yielded values. 1743 for (auto ResultTy : Shape.getRetconResultTypes()) 1744 ReturnPHIs.push_back(Builder.CreatePHI(ResultTy, 1745 Shape.CoroSuspends.size())); 1746 1747 // Build the return value. 1748 auto RetTy = F.getReturnType(); 1749 1750 // Cast the continuation value if necessary. 1751 // We can't rely on the types matching up because that type would 1752 // have to be infinite. 1753 auto CastedContinuationTy = 1754 (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0)); 1755 auto *CastedContinuation = 1756 Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy); 1757 1758 Value *RetV; 1759 if (ReturnPHIs.size() == 1) { 1760 RetV = CastedContinuation; 1761 } else { 1762 RetV = UndefValue::get(RetTy); 1763 RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0); 1764 for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I) 1765 RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I); 1766 } 1767 1768 Builder.CreateRet(RetV); 1769 } 1770 1771 // Branch to the return block. 1772 Branch->setSuccessor(0, ReturnBB); 1773 ReturnPHIs[0]->addIncoming(Continuation, SuspendBB); 1774 size_t NextPHIIndex = 1; 1775 for (auto &VUse : Suspend->value_operands()) 1776 ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB); 1777 assert(NextPHIIndex == ReturnPHIs.size()); 1778 } 1779 1780 assert(Clones.size() == Shape.CoroSuspends.size()); 1781 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) { 1782 auto Suspend = Shape.CoroSuspends[i]; 1783 auto Clone = Clones[i]; 1784 1785 CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create(); 1786 } 1787 } 1788 1789 namespace { 1790 class PrettyStackTraceFunction : public PrettyStackTraceEntry { 1791 Function &F; 1792 public: 1793 PrettyStackTraceFunction(Function &F) : F(F) {} 1794 void print(raw_ostream &OS) const override { 1795 OS << "While splitting coroutine "; 1796 F.printAsOperand(OS, /*print type*/ false, F.getParent()); 1797 OS << "\n"; 1798 } 1799 }; 1800 } 1801 1802 static coro::Shape splitCoroutine(Function &F, 1803 SmallVectorImpl<Function *> &Clones, 1804 bool ReuseFrameSlot) { 1805 PrettyStackTraceFunction prettyStackTrace(F); 1806 1807 // The suspend-crossing algorithm in buildCoroutineFrame get tripped 1808 // up by uses in unreachable blocks, so remove them as a first pass. 1809 removeUnreachableBlocks(F); 1810 1811 coro::Shape Shape(F, ReuseFrameSlot); 1812 if (!Shape.CoroBegin) 1813 return Shape; 1814 1815 simplifySuspendPoints(Shape); 1816 buildCoroutineFrame(F, Shape); 1817 replaceFrameSize(Shape); 1818 1819 // If there are no suspend points, no split required, just remove 1820 // the allocation and deallocation blocks, they are not needed. 1821 if (Shape.CoroSuspends.empty()) { 1822 handleNoSuspendCoroutine(Shape); 1823 } else { 1824 switch (Shape.ABI) { 1825 case coro::ABI::Switch: 1826 splitSwitchCoroutine(F, Shape, Clones); 1827 break; 1828 case coro::ABI::Async: 1829 splitAsyncCoroutine(F, Shape, Clones); 1830 break; 1831 case coro::ABI::Retcon: 1832 case coro::ABI::RetconOnce: 1833 splitRetconCoroutine(F, Shape, Clones); 1834 break; 1835 } 1836 } 1837 1838 // Replace all the swifterror operations in the original function. 1839 // This invalidates SwiftErrorOps in the Shape. 1840 replaceSwiftErrorOps(F, Shape, nullptr); 1841 1842 return Shape; 1843 } 1844 1845 static void 1846 updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape, 1847 const SmallVectorImpl<Function *> &Clones, 1848 CallGraph &CG, CallGraphSCC &SCC) { 1849 if (!Shape.CoroBegin) 1850 return; 1851 1852 removeCoroEnds(Shape, &CG); 1853 postSplitCleanup(F); 1854 1855 // Update call graph and add the functions we created to the SCC. 1856 coro::updateCallGraph(F, Clones, CG, SCC); 1857 } 1858 1859 static void updateCallGraphAfterCoroutineSplit( 1860 LazyCallGraph::Node &N, const coro::Shape &Shape, 1861 const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C, 1862 LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, 1863 FunctionAnalysisManager &FAM) { 1864 if (!Shape.CoroBegin) 1865 return; 1866 1867 for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) { 1868 auto &Context = End->getContext(); 1869 End->replaceAllUsesWith(ConstantInt::getFalse(Context)); 1870 End->eraseFromParent(); 1871 } 1872 1873 if (!Clones.empty()) { 1874 switch (Shape.ABI) { 1875 case coro::ABI::Switch: 1876 // Each clone in the Switch lowering is independent of the other clones. 1877 // Let the LazyCallGraph know about each one separately. 1878 for (Function *Clone : Clones) 1879 CG.addSplitFunction(N.getFunction(), *Clone); 1880 break; 1881 case coro::ABI::Async: 1882 case coro::ABI::Retcon: 1883 case coro::ABI::RetconOnce: 1884 // Each clone in the Async/Retcon lowering references of the other clones. 1885 // Let the LazyCallGraph know about all of them at once. 1886 if (!Clones.empty()) 1887 CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones); 1888 break; 1889 } 1890 1891 // Let the CGSCC infra handle the changes to the original function. 1892 updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM); 1893 } 1894 1895 // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges 1896 // to the split functions. 1897 postSplitCleanup(N.getFunction()); 1898 updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM); 1899 } 1900 1901 // When we see the coroutine the first time, we insert an indirect call to a 1902 // devirt trigger function and mark the coroutine that it is now ready for 1903 // split. 1904 // Async lowering uses this after it has split the function to restart the 1905 // pipeline. 1906 static void prepareForSplit(Function &F, CallGraph &CG, 1907 bool MarkForAsyncRestart = false) { 1908 Module &M = *F.getParent(); 1909 LLVMContext &Context = F.getContext(); 1910 #ifndef NDEBUG 1911 Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN); 1912 assert(DevirtFn && "coro.devirt.trigger function not found"); 1913 #endif 1914 1915 F.addFnAttr(CORO_PRESPLIT_ATTR, MarkForAsyncRestart 1916 ? ASYNC_RESTART_AFTER_SPLIT 1917 : PREPARED_FOR_SPLIT); 1918 1919 // Insert an indirect call sequence that will be devirtualized by CoroElide 1920 // pass: 1921 // %0 = call i8* @llvm.coro.subfn.addr(i8* null, i8 -1) 1922 // %1 = bitcast i8* %0 to void(i8*)* 1923 // call void %1(i8* null) 1924 coro::LowererBase Lowerer(M); 1925 Instruction *InsertPt = 1926 MarkForAsyncRestart ? F.getEntryBlock().getFirstNonPHIOrDbgOrLifetime() 1927 : F.getEntryBlock().getTerminator(); 1928 auto *Null = ConstantPointerNull::get(Type::getInt8PtrTy(Context)); 1929 auto *DevirtFnAddr = 1930 Lowerer.makeSubFnCall(Null, CoroSubFnInst::RestartTrigger, InsertPt); 1931 FunctionType *FnTy = FunctionType::get(Type::getVoidTy(Context), 1932 {Type::getInt8PtrTy(Context)}, false); 1933 auto *IndirectCall = CallInst::Create(FnTy, DevirtFnAddr, Null, "", InsertPt); 1934 1935 // Update CG graph with an indirect call we just added. 1936 CG[&F]->addCalledFunction(IndirectCall, CG.getCallsExternalNode()); 1937 } 1938 1939 // Make sure that there is a devirtualization trigger function that the 1940 // coro-split pass uses to force a restart of the CGSCC pipeline. If the devirt 1941 // trigger function is not found, we will create one and add it to the current 1942 // SCC. 1943 static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC) { 1944 Module &M = CG.getModule(); 1945 if (M.getFunction(CORO_DEVIRT_TRIGGER_FN)) 1946 return; 1947 1948 LLVMContext &C = M.getContext(); 1949 auto *FnTy = FunctionType::get(Type::getVoidTy(C), Type::getInt8PtrTy(C), 1950 /*isVarArg=*/false); 1951 Function *DevirtFn = 1952 Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage, 1953 CORO_DEVIRT_TRIGGER_FN, &M); 1954 DevirtFn->addFnAttr(Attribute::AlwaysInline); 1955 auto *Entry = BasicBlock::Create(C, "entry", DevirtFn); 1956 ReturnInst::Create(C, Entry); 1957 1958 auto *Node = CG.getOrInsertFunction(DevirtFn); 1959 1960 SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end()); 1961 Nodes.push_back(Node); 1962 SCC.initialize(Nodes); 1963 } 1964 1965 /// Replace a call to llvm.coro.prepare.retcon. 1966 static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG, 1967 LazyCallGraph::SCC &C) { 1968 auto CastFn = Prepare->getArgOperand(0); // as an i8* 1969 auto Fn = CastFn->stripPointerCasts(); // as its original type 1970 1971 // Attempt to peephole this pattern: 1972 // %0 = bitcast [[TYPE]] @some_function to i8* 1973 // %1 = call @llvm.coro.prepare.retcon(i8* %0) 1974 // %2 = bitcast %1 to [[TYPE]] 1975 // ==> 1976 // %2 = @some_function 1977 for (Use &U : llvm::make_early_inc_range(Prepare->uses())) { 1978 // Look for bitcasts back to the original function type. 1979 auto *Cast = dyn_cast<BitCastInst>(U.getUser()); 1980 if (!Cast || Cast->getType() != Fn->getType()) 1981 continue; 1982 1983 // Replace and remove the cast. 1984 Cast->replaceAllUsesWith(Fn); 1985 Cast->eraseFromParent(); 1986 } 1987 1988 // Replace any remaining uses with the function as an i8*. 1989 // This can never directly be a callee, so we don't need to update CG. 1990 Prepare->replaceAllUsesWith(CastFn); 1991 Prepare->eraseFromParent(); 1992 1993 // Kill dead bitcasts. 1994 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) { 1995 if (!Cast->use_empty()) 1996 break; 1997 CastFn = Cast->getOperand(0); 1998 Cast->eraseFromParent(); 1999 } 2000 } 2001 /// Replace a call to llvm.coro.prepare.retcon. 2002 static void replacePrepare(CallInst *Prepare, CallGraph &CG) { 2003 auto CastFn = Prepare->getArgOperand(0); // as an i8* 2004 auto Fn = CastFn->stripPointerCasts(); // as its original type 2005 2006 // Find call graph nodes for the preparation. 2007 CallGraphNode *PrepareUserNode = nullptr, *FnNode = nullptr; 2008 if (auto ConcreteFn = dyn_cast<Function>(Fn)) { 2009 PrepareUserNode = CG[Prepare->getFunction()]; 2010 FnNode = CG[ConcreteFn]; 2011 } 2012 2013 // Attempt to peephole this pattern: 2014 // %0 = bitcast [[TYPE]] @some_function to i8* 2015 // %1 = call @llvm.coro.prepare.retcon(i8* %0) 2016 // %2 = bitcast %1 to [[TYPE]] 2017 // ==> 2018 // %2 = @some_function 2019 for (Use &U : llvm::make_early_inc_range(Prepare->uses())) { 2020 // Look for bitcasts back to the original function type. 2021 auto *Cast = dyn_cast<BitCastInst>(U.getUser()); 2022 if (!Cast || Cast->getType() != Fn->getType()) continue; 2023 2024 // Check whether the replacement will introduce new direct calls. 2025 // If so, we'll need to update the call graph. 2026 if (PrepareUserNode) { 2027 for (auto &Use : Cast->uses()) { 2028 if (auto *CB = dyn_cast<CallBase>(Use.getUser())) { 2029 if (!CB->isCallee(&Use)) 2030 continue; 2031 PrepareUserNode->removeCallEdgeFor(*CB); 2032 PrepareUserNode->addCalledFunction(CB, FnNode); 2033 } 2034 } 2035 } 2036 2037 // Replace and remove the cast. 2038 Cast->replaceAllUsesWith(Fn); 2039 Cast->eraseFromParent(); 2040 } 2041 2042 // Replace any remaining uses with the function as an i8*. 2043 // This can never directly be a callee, so we don't need to update CG. 2044 Prepare->replaceAllUsesWith(CastFn); 2045 Prepare->eraseFromParent(); 2046 2047 // Kill dead bitcasts. 2048 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) { 2049 if (!Cast->use_empty()) break; 2050 CastFn = Cast->getOperand(0); 2051 Cast->eraseFromParent(); 2052 } 2053 } 2054 2055 static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG, 2056 LazyCallGraph::SCC &C) { 2057 bool Changed = false; 2058 for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) { 2059 // Intrinsics can only be used in calls. 2060 auto *Prepare = cast<CallInst>(P.getUser()); 2061 replacePrepare(Prepare, CG, C); 2062 Changed = true; 2063 } 2064 2065 return Changed; 2066 } 2067 2068 /// Remove calls to llvm.coro.prepare.retcon, a barrier meant to prevent 2069 /// IPO from operating on calls to a retcon coroutine before it's been 2070 /// split. This is only safe to do after we've split all retcon 2071 /// coroutines in the module. We can do that this in this pass because 2072 /// this pass does promise to split all retcon coroutines (as opposed to 2073 /// switch coroutines, which are lowered in multiple stages). 2074 static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) { 2075 bool Changed = false; 2076 for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) { 2077 // Intrinsics can only be used in calls. 2078 auto *Prepare = cast<CallInst>(P.getUser()); 2079 replacePrepare(Prepare, CG); 2080 Changed = true; 2081 } 2082 2083 return Changed; 2084 } 2085 2086 static bool declaresCoroSplitIntrinsics(const Module &M) { 2087 return coro::declaresIntrinsics(M, {"llvm.coro.begin", 2088 "llvm.coro.prepare.retcon", 2089 "llvm.coro.prepare.async"}); 2090 } 2091 2092 static void addPrepareFunction(const Module &M, 2093 SmallVectorImpl<Function *> &Fns, 2094 StringRef Name) { 2095 auto *PrepareFn = M.getFunction(Name); 2096 if (PrepareFn && !PrepareFn->use_empty()) 2097 Fns.push_back(PrepareFn); 2098 } 2099 2100 PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C, 2101 CGSCCAnalysisManager &AM, 2102 LazyCallGraph &CG, CGSCCUpdateResult &UR) { 2103 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a 2104 // non-zero number of nodes, so we assume that here and grab the first 2105 // node's function's module. 2106 Module &M = *C.begin()->getFunction().getParent(); 2107 auto &FAM = 2108 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 2109 2110 if (!declaresCoroSplitIntrinsics(M)) 2111 return PreservedAnalyses::all(); 2112 2113 // Check for uses of llvm.coro.prepare.retcon/async. 2114 SmallVector<Function *, 2> PrepareFns; 2115 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon"); 2116 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async"); 2117 2118 // Find coroutines for processing. 2119 SmallVector<LazyCallGraph::Node *, 4> Coroutines; 2120 for (LazyCallGraph::Node &N : C) 2121 if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR)) 2122 Coroutines.push_back(&N); 2123 2124 if (Coroutines.empty() && PrepareFns.empty()) 2125 return PreservedAnalyses::all(); 2126 2127 if (Coroutines.empty()) { 2128 for (auto *PrepareFn : PrepareFns) { 2129 replaceAllPrepares(PrepareFn, CG, C); 2130 } 2131 } 2132 2133 // Split all the coroutines. 2134 for (LazyCallGraph::Node *N : Coroutines) { 2135 Function &F = N->getFunction(); 2136 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName() 2137 << "' state: " 2138 << F.getFnAttribute(CORO_PRESPLIT_ATTR).getValueAsString() 2139 << "\n"); 2140 F.removeFnAttr(CORO_PRESPLIT_ATTR); 2141 2142 SmallVector<Function *, 4> Clones; 2143 const coro::Shape Shape = splitCoroutine(F, Clones, ReuseFrameSlot); 2144 updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM); 2145 2146 if (!Shape.CoroSuspends.empty()) { 2147 // Run the CGSCC pipeline on the original and newly split functions. 2148 UR.CWorklist.insert(&C); 2149 for (Function *Clone : Clones) 2150 UR.CWorklist.insert(CG.lookupSCC(CG.get(*Clone))); 2151 } 2152 } 2153 2154 if (!PrepareFns.empty()) { 2155 for (auto *PrepareFn : PrepareFns) { 2156 replaceAllPrepares(PrepareFn, CG, C); 2157 } 2158 } 2159 2160 return PreservedAnalyses::none(); 2161 } 2162 2163 namespace { 2164 2165 // We present a coroutine to LLVM as an ordinary function with suspension 2166 // points marked up with intrinsics. We let the optimizer party on the coroutine 2167 // as a single function for as long as possible. Shortly before the coroutine is 2168 // eligible to be inlined into its callers, we split up the coroutine into parts 2169 // corresponding to initial, resume and destroy invocations of the coroutine, 2170 // add them to the current SCC and restart the IPO pipeline to optimize the 2171 // coroutine subfunctions we extracted before proceeding to the caller of the 2172 // coroutine. 2173 struct CoroSplitLegacy : public CallGraphSCCPass { 2174 static char ID; // Pass identification, replacement for typeid 2175 2176 CoroSplitLegacy(bool ReuseFrameSlot = false) 2177 : CallGraphSCCPass(ID), ReuseFrameSlot(ReuseFrameSlot) { 2178 initializeCoroSplitLegacyPass(*PassRegistry::getPassRegistry()); 2179 } 2180 2181 bool Run = false; 2182 bool ReuseFrameSlot; 2183 2184 // A coroutine is identified by the presence of coro.begin intrinsic, if 2185 // we don't have any, this pass has nothing to do. 2186 bool doInitialization(CallGraph &CG) override { 2187 Run = declaresCoroSplitIntrinsics(CG.getModule()); 2188 return CallGraphSCCPass::doInitialization(CG); 2189 } 2190 2191 bool runOnSCC(CallGraphSCC &SCC) override { 2192 if (!Run) 2193 return false; 2194 2195 // Check for uses of llvm.coro.prepare.retcon. 2196 SmallVector<Function *, 2> PrepareFns; 2197 auto &M = SCC.getCallGraph().getModule(); 2198 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon"); 2199 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async"); 2200 2201 // Find coroutines for processing. 2202 SmallVector<Function *, 4> Coroutines; 2203 for (CallGraphNode *CGN : SCC) 2204 if (auto *F = CGN->getFunction()) 2205 if (F->hasFnAttribute(CORO_PRESPLIT_ATTR)) 2206 Coroutines.push_back(F); 2207 2208 if (Coroutines.empty() && PrepareFns.empty()) 2209 return false; 2210 2211 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 2212 2213 if (Coroutines.empty()) { 2214 bool Changed = false; 2215 for (auto *PrepareFn : PrepareFns) 2216 Changed |= replaceAllPrepares(PrepareFn, CG); 2217 return Changed; 2218 } 2219 2220 createDevirtTriggerFunc(CG, SCC); 2221 2222 // Split all the coroutines. 2223 for (Function *F : Coroutines) { 2224 Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR); 2225 StringRef Value = Attr.getValueAsString(); 2226 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName() 2227 << "' state: " << Value << "\n"); 2228 // Async lowering marks coroutines to trigger a restart of the pipeline 2229 // after it has split them. 2230 if (Value == ASYNC_RESTART_AFTER_SPLIT) { 2231 F->removeFnAttr(CORO_PRESPLIT_ATTR); 2232 continue; 2233 } 2234 if (Value == UNPREPARED_FOR_SPLIT) { 2235 prepareForSplit(*F, CG); 2236 continue; 2237 } 2238 F->removeFnAttr(CORO_PRESPLIT_ATTR); 2239 2240 SmallVector<Function *, 4> Clones; 2241 const coro::Shape Shape = splitCoroutine(*F, Clones, ReuseFrameSlot); 2242 updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC); 2243 if (Shape.ABI == coro::ABI::Async) { 2244 // Restart SCC passes. 2245 // Mark function for CoroElide pass. It will devirtualize causing a 2246 // restart of the SCC pipeline. 2247 prepareForSplit(*F, CG, true /*MarkForAsyncRestart*/); 2248 } 2249 } 2250 2251 for (auto *PrepareFn : PrepareFns) 2252 replaceAllPrepares(PrepareFn, CG); 2253 2254 return true; 2255 } 2256 2257 void getAnalysisUsage(AnalysisUsage &AU) const override { 2258 CallGraphSCCPass::getAnalysisUsage(AU); 2259 } 2260 2261 StringRef getPassName() const override { return "Coroutine Splitting"; } 2262 }; 2263 2264 } // end anonymous namespace 2265 2266 char CoroSplitLegacy::ID = 0; 2267 2268 INITIALIZE_PASS_BEGIN( 2269 CoroSplitLegacy, "coro-split", 2270 "Split coroutine into a set of functions driving its state machine", false, 2271 false) 2272 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 2273 INITIALIZE_PASS_END( 2274 CoroSplitLegacy, "coro-split", 2275 "Split coroutine into a set of functions driving its state machine", false, 2276 false) 2277 2278 Pass *llvm::createCoroSplitLegacyPass(bool ReuseFrameSlot) { 2279 return new CoroSplitLegacy(ReuseFrameSlot); 2280 } 2281