1 //===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This pass builds the coroutine frame and outlines resume and destroy parts 9 // of the coroutine into separate functions. 10 // 11 // We present a coroutine to an LLVM as an ordinary function with suspension 12 // points marked up with intrinsics. We let the optimizer party on the coroutine 13 // as a single function for as long as possible. Shortly before the coroutine is 14 // eligible to be inlined into its callers, we split up the coroutine into parts 15 // corresponding to an initial, resume and destroy invocations of the coroutine, 16 // add them to the current SCC and restart the IPO pipeline to optimize the 17 // coroutine subfunctions we extracted before proceeding to the caller of the 18 // coroutine. 19 //===----------------------------------------------------------------------===// 20 21 #include "llvm/Transforms/Coroutines/CoroSplit.h" 22 #include "CoroInstr.h" 23 #include "CoroInternal.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/SmallPtrSet.h" 26 #include "llvm/ADT/SmallVector.h" 27 #include "llvm/ADT/StringRef.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/Analysis/CFG.h" 30 #include "llvm/Analysis/CallGraph.h" 31 #include "llvm/Analysis/CallGraphSCCPass.h" 32 #include "llvm/Analysis/LazyCallGraph.h" 33 #include "llvm/IR/Argument.h" 34 #include "llvm/IR/Attributes.h" 35 #include "llvm/IR/BasicBlock.h" 36 #include "llvm/IR/CFG.h" 37 #include "llvm/IR/CallingConv.h" 38 #include "llvm/IR/Constants.h" 39 #include "llvm/IR/DataLayout.h" 40 #include "llvm/IR/DerivedTypes.h" 41 #include "llvm/IR/Dominators.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/IR/GlobalValue.h" 44 #include "llvm/IR/GlobalVariable.h" 45 #include "llvm/IR/IRBuilder.h" 46 #include "llvm/IR/InstIterator.h" 47 #include "llvm/IR/InstrTypes.h" 48 #include "llvm/IR/Instruction.h" 49 #include "llvm/IR/Instructions.h" 50 #include "llvm/IR/IntrinsicInst.h" 51 #include "llvm/IR/LLVMContext.h" 52 #include "llvm/IR/LegacyPassManager.h" 53 #include "llvm/IR/Module.h" 54 #include "llvm/IR/Type.h" 55 #include "llvm/IR/Value.h" 56 #include "llvm/IR/Verifier.h" 57 #include "llvm/InitializePasses.h" 58 #include "llvm/Pass.h" 59 #include "llvm/Support/Casting.h" 60 #include "llvm/Support/Debug.h" 61 #include "llvm/Support/PrettyStackTrace.h" 62 #include "llvm/Support/raw_ostream.h" 63 #include "llvm/Transforms/Scalar.h" 64 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 65 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 66 #include "llvm/Transforms/Utils/Cloning.h" 67 #include "llvm/Transforms/Utils/Local.h" 68 #include "llvm/Transforms/Utils/ValueMapper.h" 69 #include <cassert> 70 #include <cstddef> 71 #include <cstdint> 72 #include <initializer_list> 73 #include <iterator> 74 75 using namespace llvm; 76 77 #define DEBUG_TYPE "coro-split" 78 79 namespace { 80 81 /// A little helper class for building 82 class CoroCloner { 83 public: 84 enum class Kind { 85 /// The shared resume function for a switch lowering. 86 SwitchResume, 87 88 /// The shared unwind function for a switch lowering. 89 SwitchUnwind, 90 91 /// The shared cleanup function for a switch lowering. 92 SwitchCleanup, 93 94 /// An individual continuation function. 95 Continuation, 96 97 /// An async resume function. 98 Async, 99 }; 100 101 private: 102 Function &OrigF; 103 Function *NewF; 104 const Twine &Suffix; 105 coro::Shape &Shape; 106 Kind FKind; 107 ValueToValueMapTy VMap; 108 IRBuilder<> Builder; 109 Value *NewFramePtr = nullptr; 110 111 /// The active suspend instruction; meaningful only for continuation and async 112 /// ABIs. 113 AnyCoroSuspendInst *ActiveSuspend = nullptr; 114 115 public: 116 /// Create a cloner for a switch lowering. 117 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, 118 Kind FKind) 119 : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape), 120 FKind(FKind), Builder(OrigF.getContext()) { 121 assert(Shape.ABI == coro::ABI::Switch); 122 } 123 124 /// Create a cloner for a continuation lowering. 125 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, 126 Function *NewF, AnyCoroSuspendInst *ActiveSuspend) 127 : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape), 128 FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation), 129 Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) { 130 assert(Shape.ABI == coro::ABI::Retcon || 131 Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async); 132 assert(NewF && "need existing function for continuation"); 133 assert(ActiveSuspend && "need active suspend point for continuation"); 134 } 135 136 Function *getFunction() const { 137 assert(NewF != nullptr && "declaration not yet set"); 138 return NewF; 139 } 140 141 void create(); 142 143 private: 144 bool isSwitchDestroyFunction() { 145 switch (FKind) { 146 case Kind::Async: 147 case Kind::Continuation: 148 case Kind::SwitchResume: 149 return false; 150 case Kind::SwitchUnwind: 151 case Kind::SwitchCleanup: 152 return true; 153 } 154 llvm_unreachable("Unknown CoroCloner::Kind enum"); 155 } 156 157 void replaceEntryBlock(); 158 Value *deriveNewFramePointer(); 159 void replaceRetconOrAsyncSuspendUses(); 160 void replaceCoroSuspends(); 161 void replaceCoroEnds(); 162 void replaceSwiftErrorOps(); 163 void salvageDebugInfo(); 164 void handleFinalSuspend(); 165 }; 166 167 } // end anonymous namespace 168 169 static void maybeFreeRetconStorage(IRBuilder<> &Builder, 170 const coro::Shape &Shape, Value *FramePtr, 171 CallGraph *CG) { 172 assert(Shape.ABI == coro::ABI::Retcon || 173 Shape.ABI == coro::ABI::RetconOnce); 174 if (Shape.RetconLowering.IsFrameInlineInStorage) 175 return; 176 177 Shape.emitDealloc(Builder, FramePtr, CG); 178 } 179 180 /// Replace an llvm.coro.end.async. 181 /// Will inline the must tail call function call if there is one. 182 /// \returns true if cleanup of the coro.end block is needed, false otherwise. 183 static bool replaceCoroEndAsync(AnyCoroEndInst *End) { 184 IRBuilder<> Builder(End); 185 186 auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End); 187 if (!EndAsync) { 188 Builder.CreateRetVoid(); 189 return true /*needs cleanup of coro.end block*/; 190 } 191 192 auto *MustTailCallFunc = EndAsync->getMustTailCallFunction(); 193 if (!MustTailCallFunc) { 194 Builder.CreateRetVoid(); 195 return true /*needs cleanup of coro.end block*/; 196 } 197 198 // Move the must tail call from the predecessor block into the end block. 199 auto *CoroEndBlock = End->getParent(); 200 auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor(); 201 assert(MustTailCallFuncBlock && "Must have a single predecessor block"); 202 auto It = MustTailCallFuncBlock->getTerminator()->getIterator(); 203 auto *MustTailCall = cast<CallInst>(&*std::prev(It)); 204 CoroEndBlock->getInstList().splice( 205 End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall); 206 207 // Insert the return instruction. 208 Builder.SetInsertPoint(End); 209 Builder.CreateRetVoid(); 210 InlineFunctionInfo FnInfo; 211 212 // Remove the rest of the block, by splitting it into an unreachable block. 213 auto *BB = End->getParent(); 214 BB->splitBasicBlock(End); 215 BB->getTerminator()->eraseFromParent(); 216 217 auto InlineRes = InlineFunction(*MustTailCall, FnInfo); 218 assert(InlineRes.isSuccess() && "Expected inlining to succeed"); 219 (void)InlineRes; 220 221 // We have cleaned up the coro.end block above. 222 return false; 223 } 224 225 /// Replace a non-unwind call to llvm.coro.end. 226 static void replaceFallthroughCoroEnd(AnyCoroEndInst *End, 227 const coro::Shape &Shape, Value *FramePtr, 228 bool InResume, CallGraph *CG) { 229 // Start inserting right before the coro.end. 230 IRBuilder<> Builder(End); 231 232 // Create the return instruction. 233 switch (Shape.ABI) { 234 // The cloned functions in switch-lowering always return void. 235 case coro::ABI::Switch: 236 // coro.end doesn't immediately end the coroutine in the main function 237 // in this lowering, because we need to deallocate the coroutine. 238 if (!InResume) 239 return; 240 Builder.CreateRetVoid(); 241 break; 242 243 // In async lowering this returns. 244 case coro::ABI::Async: { 245 bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End); 246 if (!CoroEndBlockNeedsCleanup) 247 return; 248 break; 249 } 250 251 // In unique continuation lowering, the continuations always return void. 252 // But we may have implicitly allocated storage. 253 case coro::ABI::RetconOnce: 254 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 255 Builder.CreateRetVoid(); 256 break; 257 258 // In non-unique continuation lowering, we signal completion by returning 259 // a null continuation. 260 case coro::ABI::Retcon: { 261 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 262 auto RetTy = Shape.getResumeFunctionType()->getReturnType(); 263 auto RetStructTy = dyn_cast<StructType>(RetTy); 264 PointerType *ContinuationTy = 265 cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy); 266 267 Value *ReturnValue = ConstantPointerNull::get(ContinuationTy); 268 if (RetStructTy) { 269 ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy), 270 ReturnValue, 0); 271 } 272 Builder.CreateRet(ReturnValue); 273 break; 274 } 275 } 276 277 // Remove the rest of the block, by splitting it into an unreachable block. 278 auto *BB = End->getParent(); 279 BB->splitBasicBlock(End); 280 BB->getTerminator()->eraseFromParent(); 281 } 282 283 // Mark a coroutine as done, which implies that the coroutine is finished and 284 // never get resumed. 285 // 286 // In resume-switched ABI, the done state is represented by storing zero in 287 // ResumeFnAddr. 288 // 289 // NOTE: We couldn't omit the argument `FramePtr`. It is necessary because the 290 // pointer to the frame in splitted function is not stored in `Shape`. 291 static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape, 292 Value *FramePtr) { 293 assert( 294 Shape.ABI == coro::ABI::Switch && 295 "markCoroutineAsDone is only supported for Switch-Resumed ABI for now."); 296 auto *GepIndex = Builder.CreateStructGEP( 297 Shape.FrameTy, FramePtr, coro::Shape::SwitchFieldIndex::Resume, 298 "ResumeFn.addr"); 299 auto *NullPtr = ConstantPointerNull::get(cast<PointerType>( 300 Shape.FrameTy->getTypeAtIndex(coro::Shape::SwitchFieldIndex::Resume))); 301 Builder.CreateStore(NullPtr, GepIndex); 302 } 303 304 /// Replace an unwind call to llvm.coro.end. 305 static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, 306 Value *FramePtr, bool InResume, 307 CallGraph *CG) { 308 IRBuilder<> Builder(End); 309 310 switch (Shape.ABI) { 311 // In switch-lowering, this does nothing in the main function. 312 case coro::ABI::Switch: { 313 // In C++'s specification, the coroutine should be marked as done 314 // if promise.unhandled_exception() throws. The frontend will 315 // call coro.end(true) along this path. 316 // 317 // FIXME: We should refactor this once there is other language 318 // which uses Switch-Resumed style other than C++. 319 markCoroutineAsDone(Builder, Shape, FramePtr); 320 if (!InResume) 321 return; 322 break; 323 } 324 // In async lowering this does nothing. 325 case coro::ABI::Async: 326 break; 327 // In continuation-lowering, this frees the continuation storage. 328 case coro::ABI::Retcon: 329 case coro::ABI::RetconOnce: 330 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 331 break; 332 } 333 334 // If coro.end has an associated bundle, add cleanupret instruction. 335 if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) { 336 auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]); 337 auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr); 338 End->getParent()->splitBasicBlock(End); 339 CleanupRet->getParent()->getTerminator()->eraseFromParent(); 340 } 341 } 342 343 static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, 344 Value *FramePtr, bool InResume, CallGraph *CG) { 345 if (End->isUnwind()) 346 replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG); 347 else 348 replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG); 349 350 auto &Context = End->getContext(); 351 End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context) 352 : ConstantInt::getFalse(Context)); 353 End->eraseFromParent(); 354 } 355 356 // Create an entry block for a resume function with a switch that will jump to 357 // suspend points. 358 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) { 359 assert(Shape.ABI == coro::ABI::Switch); 360 LLVMContext &C = F.getContext(); 361 362 // resume.entry: 363 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0, 364 // i32 2 365 // % index = load i32, i32* %index.addr 366 // switch i32 %index, label %unreachable [ 367 // i32 0, label %resume.0 368 // i32 1, label %resume.1 369 // ... 370 // ] 371 372 auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F); 373 auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F); 374 375 IRBuilder<> Builder(NewEntry); 376 auto *FramePtr = Shape.FramePtr; 377 auto *FrameTy = Shape.FrameTy; 378 auto *GepIndex = Builder.CreateStructGEP( 379 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr"); 380 auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index"); 381 auto *Switch = 382 Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size()); 383 Shape.SwitchLowering.ResumeSwitch = Switch; 384 385 size_t SuspendIndex = 0; 386 for (auto *AnyS : Shape.CoroSuspends) { 387 auto *S = cast<CoroSuspendInst>(AnyS); 388 ConstantInt *IndexVal = Shape.getIndex(SuspendIndex); 389 390 // Replace CoroSave with a store to Index: 391 // %index.addr = getelementptr %f.frame... (index field number) 392 // store i32 0, i32* %index.addr1 393 auto *Save = S->getCoroSave(); 394 Builder.SetInsertPoint(Save); 395 if (S->isFinal()) { 396 // The coroutine should be marked done if it reaches the final suspend 397 // point. 398 markCoroutineAsDone(Builder, Shape, FramePtr); 399 } else { 400 auto *GepIndex = Builder.CreateStructGEP( 401 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr"); 402 Builder.CreateStore(IndexVal, GepIndex); 403 } 404 Save->replaceAllUsesWith(ConstantTokenNone::get(C)); 405 Save->eraseFromParent(); 406 407 // Split block before and after coro.suspend and add a jump from an entry 408 // switch: 409 // 410 // whateverBB: 411 // whatever 412 // %0 = call i8 @llvm.coro.suspend(token none, i1 false) 413 // switch i8 %0, label %suspend[i8 0, label %resume 414 // i8 1, label %cleanup] 415 // becomes: 416 // 417 // whateverBB: 418 // whatever 419 // br label %resume.0.landing 420 // 421 // resume.0: ; <--- jump from the switch in the resume.entry 422 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false) 423 // br label %resume.0.landing 424 // 425 // resume.0.landing: 426 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0] 427 // switch i8 % 1, label %suspend [i8 0, label %resume 428 // i8 1, label %cleanup] 429 430 auto *SuspendBB = S->getParent(); 431 auto *ResumeBB = 432 SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex)); 433 auto *LandingBB = ResumeBB->splitBasicBlock( 434 S->getNextNode(), ResumeBB->getName() + Twine(".landing")); 435 Switch->addCase(IndexVal, ResumeBB); 436 437 cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB); 438 auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front()); 439 S->replaceAllUsesWith(PN); 440 PN->addIncoming(Builder.getInt8(-1), SuspendBB); 441 PN->addIncoming(S, ResumeBB); 442 443 ++SuspendIndex; 444 } 445 446 Builder.SetInsertPoint(UnreachBB); 447 Builder.CreateUnreachable(); 448 449 Shape.SwitchLowering.ResumeEntryBlock = NewEntry; 450 } 451 452 453 // Rewrite final suspend point handling. We do not use suspend index to 454 // represent the final suspend point. Instead we zero-out ResumeFnAddr in the 455 // coroutine frame, since it is undefined behavior to resume a coroutine 456 // suspended at the final suspend point. Thus, in the resume function, we can 457 // simply remove the last case (when coro::Shape is built, the final suspend 458 // point (if present) is always the last element of CoroSuspends array). 459 // In the destroy function, we add a code sequence to check if ResumeFnAddress 460 // is Null, and if so, jump to the appropriate label to handle cleanup from the 461 // final suspend point. 462 void CoroCloner::handleFinalSuspend() { 463 assert(Shape.ABI == coro::ABI::Switch && 464 Shape.SwitchLowering.HasFinalSuspend); 465 auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]); 466 auto FinalCaseIt = std::prev(Switch->case_end()); 467 BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor(); 468 Switch->removeCase(FinalCaseIt); 469 if (isSwitchDestroyFunction()) { 470 BasicBlock *OldSwitchBB = Switch->getParent(); 471 auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch"); 472 Builder.SetInsertPoint(OldSwitchBB->getTerminator()); 473 auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr, 474 coro::Shape::SwitchFieldIndex::Resume, 475 "ResumeFn.addr"); 476 auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(), 477 GepIndex); 478 auto *Cond = Builder.CreateIsNull(Load); 479 Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB); 480 OldSwitchBB->getTerminator()->eraseFromParent(); 481 } 482 } 483 484 static FunctionType * 485 getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend) { 486 auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Suspend); 487 auto *StructTy = cast<StructType>(AsyncSuspend->getType()); 488 auto &Context = Suspend->getParent()->getParent()->getContext(); 489 auto *VoidTy = Type::getVoidTy(Context); 490 return FunctionType::get(VoidTy, StructTy->elements(), false); 491 } 492 493 static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape, 494 const Twine &Suffix, 495 Module::iterator InsertBefore, 496 AnyCoroSuspendInst *ActiveSuspend) { 497 Module *M = OrigF.getParent(); 498 auto *FnTy = (Shape.ABI != coro::ABI::Async) 499 ? Shape.getResumeFunctionType() 500 : getFunctionTypeFromAsyncSuspend(ActiveSuspend); 501 502 Function *NewF = 503 Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage, 504 OrigF.getName() + Suffix); 505 if (Shape.ABI != coro::ABI::Async) 506 NewF->addParamAttr(0, Attribute::NonNull); 507 508 // For the async lowering ABI we can't guarantee that the context argument is 509 // not access via a different pointer not based on the argument. 510 if (Shape.ABI != coro::ABI::Async) 511 NewF->addParamAttr(0, Attribute::NoAlias); 512 513 M->getFunctionList().insert(InsertBefore, NewF); 514 515 return NewF; 516 } 517 518 /// Replace uses of the active llvm.coro.suspend.retcon/async call with the 519 /// arguments to the continuation function. 520 /// 521 /// This assumes that the builder has a meaningful insertion point. 522 void CoroCloner::replaceRetconOrAsyncSuspendUses() { 523 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 524 Shape.ABI == coro::ABI::Async); 525 526 auto NewS = VMap[ActiveSuspend]; 527 if (NewS->use_empty()) return; 528 529 // Copy out all the continuation arguments after the buffer pointer into 530 // an easily-indexed data structure for convenience. 531 SmallVector<Value*, 8> Args; 532 // The async ABI includes all arguments -- including the first argument. 533 bool IsAsyncABI = Shape.ABI == coro::ABI::Async; 534 for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()), 535 E = NewF->arg_end(); 536 I != E; ++I) 537 Args.push_back(&*I); 538 539 // If the suspend returns a single scalar value, we can just do a simple 540 // replacement. 541 if (!isa<StructType>(NewS->getType())) { 542 assert(Args.size() == 1); 543 NewS->replaceAllUsesWith(Args.front()); 544 return; 545 } 546 547 // Try to peephole extracts of an aggregate return. 548 for (Use &U : llvm::make_early_inc_range(NewS->uses())) { 549 auto *EVI = dyn_cast<ExtractValueInst>(U.getUser()); 550 if (!EVI || EVI->getNumIndices() != 1) 551 continue; 552 553 EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]); 554 EVI->eraseFromParent(); 555 } 556 557 // If we have no remaining uses, we're done. 558 if (NewS->use_empty()) return; 559 560 // Otherwise, we need to create an aggregate. 561 Value *Agg = UndefValue::get(NewS->getType()); 562 for (size_t I = 0, E = Args.size(); I != E; ++I) 563 Agg = Builder.CreateInsertValue(Agg, Args[I], I); 564 565 NewS->replaceAllUsesWith(Agg); 566 } 567 568 void CoroCloner::replaceCoroSuspends() { 569 Value *SuspendResult; 570 571 switch (Shape.ABI) { 572 // In switch lowering, replace coro.suspend with the appropriate value 573 // for the type of function we're extracting. 574 // Replacing coro.suspend with (0) will result in control flow proceeding to 575 // a resume label associated with a suspend point, replacing it with (1) will 576 // result in control flow proceeding to a cleanup label associated with this 577 // suspend point. 578 case coro::ABI::Switch: 579 SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0); 580 break; 581 582 // In async lowering there are no uses of the result. 583 case coro::ABI::Async: 584 return; 585 586 // In returned-continuation lowering, the arguments from earlier 587 // continuations are theoretically arbitrary, and they should have been 588 // spilled. 589 case coro::ABI::RetconOnce: 590 case coro::ABI::Retcon: 591 return; 592 } 593 594 for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) { 595 // The active suspend was handled earlier. 596 if (CS == ActiveSuspend) continue; 597 598 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]); 599 MappedCS->replaceAllUsesWith(SuspendResult); 600 MappedCS->eraseFromParent(); 601 } 602 } 603 604 void CoroCloner::replaceCoroEnds() { 605 for (AnyCoroEndInst *CE : Shape.CoroEnds) { 606 // We use a null call graph because there's no call graph node for 607 // the cloned function yet. We'll just be rebuilding that later. 608 auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]); 609 replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr); 610 } 611 } 612 613 static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape, 614 ValueToValueMapTy *VMap) { 615 if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty()) 616 return; 617 Value *CachedSlot = nullptr; 618 auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * { 619 if (CachedSlot) { 620 assert(CachedSlot->getType()->getPointerElementType() == ValueTy && 621 "multiple swifterror slots in function with different types"); 622 return CachedSlot; 623 } 624 625 // Check if the function has a swifterror argument. 626 for (auto &Arg : F.args()) { 627 if (Arg.isSwiftError()) { 628 CachedSlot = &Arg; 629 assert(Arg.getType()->getPointerElementType() == ValueTy && 630 "swifterror argument does not have expected type"); 631 return &Arg; 632 } 633 } 634 635 // Create a swifterror alloca. 636 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg()); 637 auto Alloca = Builder.CreateAlloca(ValueTy); 638 Alloca->setSwiftError(true); 639 640 CachedSlot = Alloca; 641 return Alloca; 642 }; 643 644 for (CallInst *Op : Shape.SwiftErrorOps) { 645 auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op; 646 IRBuilder<> Builder(MappedOp); 647 648 // If there are no arguments, this is a 'get' operation. 649 Value *MappedResult; 650 if (Op->arg_empty()) { 651 auto ValueTy = Op->getType(); 652 auto Slot = getSwiftErrorSlot(ValueTy); 653 MappedResult = Builder.CreateLoad(ValueTy, Slot); 654 } else { 655 assert(Op->arg_size() == 1); 656 auto Value = MappedOp->getArgOperand(0); 657 auto ValueTy = Value->getType(); 658 auto Slot = getSwiftErrorSlot(ValueTy); 659 Builder.CreateStore(Value, Slot); 660 MappedResult = Slot; 661 } 662 663 MappedOp->replaceAllUsesWith(MappedResult); 664 MappedOp->eraseFromParent(); 665 } 666 667 // If we're updating the original function, we've invalidated SwiftErrorOps. 668 if (VMap == nullptr) { 669 Shape.SwiftErrorOps.clear(); 670 } 671 } 672 673 void CoroCloner::replaceSwiftErrorOps() { 674 ::replaceSwiftErrorOps(*NewF, Shape, &VMap); 675 } 676 677 void CoroCloner::salvageDebugInfo() { 678 SmallVector<DbgVariableIntrinsic *, 8> Worklist; 679 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache; 680 for (auto &BB : *NewF) 681 for (auto &I : BB) 682 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I)) 683 Worklist.push_back(DVI); 684 for (DbgVariableIntrinsic *DVI : Worklist) 685 coro::salvageDebugInfo(DbgPtrAllocaCache, DVI, Shape.OptimizeFrame); 686 687 // Remove all salvaged dbg.declare intrinsics that became 688 // either unreachable or stale due to the CoroSplit transformation. 689 DominatorTree DomTree(*NewF); 690 auto IsUnreachableBlock = [&](BasicBlock *BB) { 691 return !isPotentiallyReachable(&NewF->getEntryBlock(), BB, nullptr, 692 &DomTree); 693 }; 694 for (DbgVariableIntrinsic *DVI : Worklist) { 695 if (IsUnreachableBlock(DVI->getParent())) 696 DVI->eraseFromParent(); 697 else if (isa_and_nonnull<AllocaInst>(DVI->getVariableLocationOp(0))) { 698 // Count all non-debuginfo uses in reachable blocks. 699 unsigned Uses = 0; 700 for (auto *User : DVI->getVariableLocationOp(0)->users()) 701 if (auto *I = dyn_cast<Instruction>(User)) 702 if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent())) 703 ++Uses; 704 if (!Uses) 705 DVI->eraseFromParent(); 706 } 707 } 708 } 709 710 void CoroCloner::replaceEntryBlock() { 711 // In the original function, the AllocaSpillBlock is a block immediately 712 // following the allocation of the frame object which defines GEPs for 713 // all the allocas that have been moved into the frame, and it ends by 714 // branching to the original beginning of the coroutine. Make this 715 // the entry block of the cloned function. 716 auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]); 717 auto *OldEntry = &NewF->getEntryBlock(); 718 Entry->setName("entry" + Suffix); 719 Entry->moveBefore(OldEntry); 720 Entry->getTerminator()->eraseFromParent(); 721 722 // Clear all predecessors of the new entry block. There should be 723 // exactly one predecessor, which we created when splitting out 724 // AllocaSpillBlock to begin with. 725 assert(Entry->hasOneUse()); 726 auto BranchToEntry = cast<BranchInst>(Entry->user_back()); 727 assert(BranchToEntry->isUnconditional()); 728 Builder.SetInsertPoint(BranchToEntry); 729 Builder.CreateUnreachable(); 730 BranchToEntry->eraseFromParent(); 731 732 // Branch from the entry to the appropriate place. 733 Builder.SetInsertPoint(Entry); 734 switch (Shape.ABI) { 735 case coro::ABI::Switch: { 736 // In switch-lowering, we built a resume-entry block in the original 737 // function. Make the entry block branch to this. 738 auto *SwitchBB = 739 cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]); 740 Builder.CreateBr(SwitchBB); 741 break; 742 } 743 case coro::ABI::Async: 744 case coro::ABI::Retcon: 745 case coro::ABI::RetconOnce: { 746 // In continuation ABIs, we want to branch to immediately after the 747 // active suspend point. Earlier phases will have put the suspend in its 748 // own basic block, so just thread our jump directly to its successor. 749 assert((Shape.ABI == coro::ABI::Async && 750 isa<CoroSuspendAsyncInst>(ActiveSuspend)) || 751 ((Shape.ABI == coro::ABI::Retcon || 752 Shape.ABI == coro::ABI::RetconOnce) && 753 isa<CoroSuspendRetconInst>(ActiveSuspend))); 754 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]); 755 auto Branch = cast<BranchInst>(MappedCS->getNextNode()); 756 assert(Branch->isUnconditional()); 757 Builder.CreateBr(Branch->getSuccessor(0)); 758 break; 759 } 760 } 761 762 // Any static alloca that's still being used but not reachable from the new 763 // entry needs to be moved to the new entry. 764 Function *F = OldEntry->getParent(); 765 DominatorTree DT{*F}; 766 for (Instruction &I : llvm::make_early_inc_range(instructions(F))) { 767 auto *Alloca = dyn_cast<AllocaInst>(&I); 768 if (!Alloca || I.use_empty()) 769 continue; 770 if (DT.isReachableFromEntry(I.getParent()) || 771 !isa<ConstantInt>(Alloca->getArraySize())) 772 continue; 773 I.moveBefore(*Entry, Entry->getFirstInsertionPt()); 774 } 775 } 776 777 /// Derive the value of the new frame pointer. 778 Value *CoroCloner::deriveNewFramePointer() { 779 // Builder should be inserting to the front of the new entry block. 780 781 switch (Shape.ABI) { 782 // In switch-lowering, the argument is the frame pointer. 783 case coro::ABI::Switch: 784 return &*NewF->arg_begin(); 785 // In async-lowering, one of the arguments is an async context as determined 786 // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of 787 // the resume function from the async context projection function associated 788 // with the active suspend. The frame is located as a tail to the async 789 // context header. 790 case coro::ABI::Async: { 791 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend); 792 auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff; 793 auto *CalleeContext = NewF->getArg(ContextIdx); 794 auto *FramePtrTy = Shape.FrameTy->getPointerTo(); 795 auto *ProjectionFunc = 796 ActiveAsyncSuspend->getAsyncContextProjectionFunction(); 797 auto DbgLoc = 798 cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc(); 799 // Calling i8* (i8*) 800 auto *CallerContext = Builder.CreateCall(ProjectionFunc->getFunctionType(), 801 ProjectionFunc, CalleeContext); 802 CallerContext->setCallingConv(ProjectionFunc->getCallingConv()); 803 CallerContext->setDebugLoc(DbgLoc); 804 // The frame is located after the async_context header. 805 auto &Context = Builder.getContext(); 806 auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32( 807 Type::getInt8Ty(Context), CallerContext, 808 Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr"); 809 // Inline the projection function. 810 InlineFunctionInfo InlineInfo; 811 auto InlineRes = InlineFunction(*CallerContext, InlineInfo); 812 assert(InlineRes.isSuccess()); 813 (void)InlineRes; 814 return Builder.CreateBitCast(FramePtrAddr, FramePtrTy); 815 } 816 // In continuation-lowering, the argument is the opaque storage. 817 case coro::ABI::Retcon: 818 case coro::ABI::RetconOnce: { 819 Argument *NewStorage = &*NewF->arg_begin(); 820 auto FramePtrTy = Shape.FrameTy->getPointerTo(); 821 822 // If the storage is inline, just bitcast to the storage to the frame type. 823 if (Shape.RetconLowering.IsFrameInlineInStorage) 824 return Builder.CreateBitCast(NewStorage, FramePtrTy); 825 826 // Otherwise, load the real frame from the opaque storage. 827 auto FramePtrPtr = 828 Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo()); 829 return Builder.CreateLoad(FramePtrTy, FramePtrPtr); 830 } 831 } 832 llvm_unreachable("bad ABI"); 833 } 834 835 static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context, 836 unsigned ParamIndex, 837 uint64_t Size, Align Alignment) { 838 AttrBuilder ParamAttrs; 839 ParamAttrs.addAttribute(Attribute::NonNull); 840 ParamAttrs.addAttribute(Attribute::NoAlias); 841 ParamAttrs.addAlignmentAttr(Alignment); 842 ParamAttrs.addDereferenceableAttr(Size); 843 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs); 844 } 845 846 static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context, 847 unsigned ParamIndex) { 848 AttrBuilder ParamAttrs; 849 ParamAttrs.addAttribute(Attribute::SwiftAsync); 850 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs); 851 } 852 853 static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context, 854 unsigned ParamIndex) { 855 AttrBuilder ParamAttrs; 856 ParamAttrs.addAttribute(Attribute::SwiftSelf); 857 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs); 858 } 859 860 /// Clone the body of the original function into a resume function of 861 /// some sort. 862 void CoroCloner::create() { 863 // Create the new function if we don't already have one. 864 if (!NewF) { 865 NewF = createCloneDeclaration(OrigF, Shape, Suffix, 866 OrigF.getParent()->end(), ActiveSuspend); 867 } 868 869 // Replace all args with undefs. The buildCoroutineFrame algorithm already 870 // rewritten access to the args that occurs after suspend points with loads 871 // and stores to/from the coroutine frame. 872 for (Argument &A : OrigF.args()) 873 VMap[&A] = UndefValue::get(A.getType()); 874 875 SmallVector<ReturnInst *, 4> Returns; 876 877 // Ignore attempts to change certain attributes of the function. 878 // TODO: maybe there should be a way to suppress this during cloning? 879 auto savedVisibility = NewF->getVisibility(); 880 auto savedUnnamedAddr = NewF->getUnnamedAddr(); 881 auto savedDLLStorageClass = NewF->getDLLStorageClass(); 882 883 // NewF's linkage (which CloneFunctionInto does *not* change) might not 884 // be compatible with the visibility of OrigF (which it *does* change), 885 // so protect against that. 886 auto savedLinkage = NewF->getLinkage(); 887 NewF->setLinkage(llvm::GlobalValue::ExternalLinkage); 888 889 CloneFunctionInto(NewF, &OrigF, VMap, 890 CloneFunctionChangeType::LocalChangesOnly, Returns); 891 892 auto &Context = NewF->getContext(); 893 894 // For async functions / continuations, adjust the scope line of the 895 // clone to the line number of the suspend point. However, only 896 // adjust the scope line when the files are the same. This ensures 897 // line number and file name belong together. The scope line is 898 // associated with all pre-prologue instructions. This avoids a jump 899 // in the linetable from the function declaration to the suspend point. 900 if (DISubprogram *SP = NewF->getSubprogram()) { 901 assert(SP != OrigF.getSubprogram() && SP->isDistinct()); 902 if (ActiveSuspend) 903 if (auto DL = ActiveSuspend->getDebugLoc()) 904 if (SP->getFile() == DL->getFile()) 905 SP->setScopeLine(DL->getLine()); 906 // Update the linkage name to reflect the modified symbol name. It 907 // is necessary to update the linkage name in Swift, since the 908 // mangling changes for resume functions. It might also be the 909 // right thing to do in C++, but due to a limitation in LLVM's 910 // AsmPrinter we can only do this if the function doesn't have an 911 // abstract specification, since the DWARF backend expects the 912 // abstract specification to contain the linkage name and asserts 913 // that they are identical. 914 if (!SP->getDeclaration() && SP->getUnit() && 915 SP->getUnit()->getSourceLanguage() == dwarf::DW_LANG_Swift) 916 SP->replaceLinkageName(MDString::get(Context, NewF->getName())); 917 } 918 919 NewF->setLinkage(savedLinkage); 920 NewF->setVisibility(savedVisibility); 921 NewF->setUnnamedAddr(savedUnnamedAddr); 922 NewF->setDLLStorageClass(savedDLLStorageClass); 923 924 // Replace the attributes of the new function: 925 auto OrigAttrs = NewF->getAttributes(); 926 auto NewAttrs = AttributeList(); 927 928 switch (Shape.ABI) { 929 case coro::ABI::Switch: 930 // Bootstrap attributes by copying function attributes from the 931 // original function. This should include optimization settings and so on. 932 NewAttrs = NewAttrs.addFnAttributes(Context, OrigAttrs.getFnAttrs()); 933 934 addFramePointerAttrs(NewAttrs, Context, 0, 935 Shape.FrameSize, Shape.FrameAlign); 936 break; 937 case coro::ABI::Async: { 938 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend); 939 if (OrigF.hasParamAttribute(Shape.AsyncLowering.ContextArgNo, 940 Attribute::SwiftAsync)) { 941 uint32_t ArgAttributeIndices = 942 ActiveAsyncSuspend->getStorageArgumentIndex(); 943 auto ContextArgIndex = ArgAttributeIndices & 0xff; 944 addAsyncContextAttrs(NewAttrs, Context, ContextArgIndex); 945 946 // `swiftasync` must preceed `swiftself` so 0 is not a valid index for 947 // `swiftself`. 948 auto SwiftSelfIndex = ArgAttributeIndices >> 8; 949 if (SwiftSelfIndex) 950 addSwiftSelfAttrs(NewAttrs, Context, SwiftSelfIndex); 951 } 952 953 // Transfer the original function's attributes. 954 auto FnAttrs = OrigF.getAttributes().getFnAttrs(); 955 NewAttrs = NewAttrs.addFnAttributes(Context, FnAttrs); 956 break; 957 } 958 case coro::ABI::Retcon: 959 case coro::ABI::RetconOnce: 960 // If we have a continuation prototype, just use its attributes, 961 // full-stop. 962 NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes(); 963 964 addFramePointerAttrs(NewAttrs, Context, 0, 965 Shape.getRetconCoroId()->getStorageSize(), 966 Shape.getRetconCoroId()->getStorageAlignment()); 967 break; 968 } 969 970 switch (Shape.ABI) { 971 // In these ABIs, the cloned functions always return 'void', and the 972 // existing return sites are meaningless. Note that for unique 973 // continuations, this includes the returns associated with suspends; 974 // this is fine because we can't suspend twice. 975 case coro::ABI::Switch: 976 case coro::ABI::RetconOnce: 977 // Remove old returns. 978 for (ReturnInst *Return : Returns) 979 changeToUnreachable(Return); 980 break; 981 982 // With multi-suspend continuations, we'll already have eliminated the 983 // original returns and inserted returns before all the suspend points, 984 // so we want to leave any returns in place. 985 case coro::ABI::Retcon: 986 break; 987 // Async lowering will insert musttail call functions at all suspend points 988 // followed by a return. 989 // Don't change returns to unreachable because that will trip up the verifier. 990 // These returns should be unreachable from the clone. 991 case coro::ABI::Async: 992 break; 993 } 994 995 NewF->setAttributes(NewAttrs); 996 NewF->setCallingConv(Shape.getResumeFunctionCC()); 997 998 // Set up the new entry block. 999 replaceEntryBlock(); 1000 1001 Builder.SetInsertPoint(&NewF->getEntryBlock().front()); 1002 NewFramePtr = deriveNewFramePointer(); 1003 1004 // Remap frame pointer. 1005 Value *OldFramePtr = VMap[Shape.FramePtr]; 1006 NewFramePtr->takeName(OldFramePtr); 1007 OldFramePtr->replaceAllUsesWith(NewFramePtr); 1008 1009 // Remap vFrame pointer. 1010 auto *NewVFrame = Builder.CreateBitCast( 1011 NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame"); 1012 Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]); 1013 OldVFrame->replaceAllUsesWith(NewVFrame); 1014 1015 switch (Shape.ABI) { 1016 case coro::ABI::Switch: 1017 // Rewrite final suspend handling as it is not done via switch (allows to 1018 // remove final case from the switch, since it is undefined behavior to 1019 // resume the coroutine suspended at the final suspend point. 1020 if (Shape.SwitchLowering.HasFinalSuspend) 1021 handleFinalSuspend(); 1022 break; 1023 case coro::ABI::Async: 1024 case coro::ABI::Retcon: 1025 case coro::ABI::RetconOnce: 1026 // Replace uses of the active suspend with the corresponding 1027 // continuation-function arguments. 1028 assert(ActiveSuspend != nullptr && 1029 "no active suspend when lowering a continuation-style coroutine"); 1030 replaceRetconOrAsyncSuspendUses(); 1031 break; 1032 } 1033 1034 // Handle suspends. 1035 replaceCoroSuspends(); 1036 1037 // Handle swifterror. 1038 replaceSwiftErrorOps(); 1039 1040 // Remove coro.end intrinsics. 1041 replaceCoroEnds(); 1042 1043 // Salvage debug info that points into the coroutine frame. 1044 salvageDebugInfo(); 1045 1046 // Eliminate coro.free from the clones, replacing it with 'null' in cleanup, 1047 // to suppress deallocation code. 1048 if (Shape.ABI == coro::ABI::Switch) 1049 coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]), 1050 /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup); 1051 } 1052 1053 // Create a resume clone by cloning the body of the original function, setting 1054 // new entry block and replacing coro.suspend an appropriate value to force 1055 // resume or cleanup pass for every suspend point. 1056 static Function *createClone(Function &F, const Twine &Suffix, 1057 coro::Shape &Shape, CoroCloner::Kind FKind) { 1058 CoroCloner Cloner(F, Suffix, Shape, FKind); 1059 Cloner.create(); 1060 return Cloner.getFunction(); 1061 } 1062 1063 /// Remove calls to llvm.coro.end in the original function. 1064 static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG) { 1065 for (auto End : Shape.CoroEnds) { 1066 replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, CG); 1067 } 1068 } 1069 1070 static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) { 1071 assert(Shape.ABI == coro::ABI::Async); 1072 1073 auto *FuncPtrStruct = cast<ConstantStruct>( 1074 Shape.AsyncLowering.AsyncFuncPointer->getInitializer()); 1075 auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0); 1076 auto *OrigContextSize = FuncPtrStruct->getOperand(1); 1077 auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(), 1078 Shape.AsyncLowering.ContextSize); 1079 auto *NewFuncPtrStruct = ConstantStruct::get( 1080 FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize); 1081 1082 Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct); 1083 } 1084 1085 static void replaceFrameSize(coro::Shape &Shape) { 1086 if (Shape.ABI == coro::ABI::Async) 1087 updateAsyncFuncPointerContextSize(Shape); 1088 1089 if (Shape.CoroSizes.empty()) 1090 return; 1091 1092 // In the same function all coro.sizes should have the same result type. 1093 auto *SizeIntrin = Shape.CoroSizes.back(); 1094 Module *M = SizeIntrin->getModule(); 1095 const DataLayout &DL = M->getDataLayout(); 1096 auto Size = DL.getTypeAllocSize(Shape.FrameTy); 1097 auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size); 1098 1099 for (CoroSizeInst *CS : Shape.CoroSizes) { 1100 CS->replaceAllUsesWith(SizeConstant); 1101 CS->eraseFromParent(); 1102 } 1103 } 1104 1105 // Create a global constant array containing pointers to functions provided and 1106 // set Info parameter of CoroBegin to point at this constant. Example: 1107 // 1108 // @f.resumers = internal constant [2 x void(%f.frame*)*] 1109 // [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy] 1110 // define void @f() { 1111 // ... 1112 // call i8* @llvm.coro.begin(i8* null, i32 0, i8* null, 1113 // i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*)) 1114 // 1115 // Assumes that all the functions have the same signature. 1116 static void setCoroInfo(Function &F, coro::Shape &Shape, 1117 ArrayRef<Function *> Fns) { 1118 // This only works under the switch-lowering ABI because coro elision 1119 // only works on the switch-lowering ABI. 1120 assert(Shape.ABI == coro::ABI::Switch); 1121 1122 SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end()); 1123 assert(!Args.empty()); 1124 Function *Part = *Fns.begin(); 1125 Module *M = Part->getParent(); 1126 auto *ArrTy = ArrayType::get(Part->getType(), Args.size()); 1127 1128 auto *ConstVal = ConstantArray::get(ArrTy, Args); 1129 auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true, 1130 GlobalVariable::PrivateLinkage, ConstVal, 1131 F.getName() + Twine(".resumers")); 1132 1133 // Update coro.begin instruction to refer to this constant. 1134 LLVMContext &C = F.getContext(); 1135 auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C)); 1136 Shape.getSwitchCoroId()->setInfo(BC); 1137 } 1138 1139 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame. 1140 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn, 1141 Function *DestroyFn, Function *CleanupFn) { 1142 assert(Shape.ABI == coro::ABI::Switch); 1143 1144 IRBuilder<> Builder(Shape.FramePtr->getNextNode()); 1145 auto *ResumeAddr = Builder.CreateStructGEP( 1146 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume, 1147 "resume.addr"); 1148 Builder.CreateStore(ResumeFn, ResumeAddr); 1149 1150 Value *DestroyOrCleanupFn = DestroyFn; 1151 1152 CoroIdInst *CoroId = Shape.getSwitchCoroId(); 1153 if (CoroAllocInst *CA = CoroId->getCoroAlloc()) { 1154 // If there is a CoroAlloc and it returns false (meaning we elide the 1155 // allocation, use CleanupFn instead of DestroyFn). 1156 DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn); 1157 } 1158 1159 auto *DestroyAddr = Builder.CreateStructGEP( 1160 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy, 1161 "destroy.addr"); 1162 Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr); 1163 } 1164 1165 static void postSplitCleanup(Function &F) { 1166 removeUnreachableBlocks(F); 1167 1168 #ifndef NDEBUG 1169 // For now, we do a mandatory verification step because we don't 1170 // entirely trust this pass. Note that we don't want to add a verifier 1171 // pass to FPM below because it will also verify all the global data. 1172 if (verifyFunction(F, &errs())) 1173 report_fatal_error("Broken function"); 1174 #endif 1175 } 1176 1177 // Assuming we arrived at the block NewBlock from Prev instruction, store 1178 // PHI's incoming values in the ResolvedValues map. 1179 static void 1180 scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock, 1181 DenseMap<Value *, Value *> &ResolvedValues) { 1182 auto *PrevBB = Prev->getParent(); 1183 for (PHINode &PN : NewBlock->phis()) { 1184 auto V = PN.getIncomingValueForBlock(PrevBB); 1185 // See if we already resolved it. 1186 auto VI = ResolvedValues.find(V); 1187 if (VI != ResolvedValues.end()) 1188 V = VI->second; 1189 // Remember the value. 1190 ResolvedValues[&PN] = V; 1191 } 1192 } 1193 1194 // Replace a sequence of branches leading to a ret, with a clone of a ret 1195 // instruction. Suspend instruction represented by a switch, track the PHI 1196 // values and select the correct case successor when possible. 1197 static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) { 1198 DenseMap<Value *, Value *> ResolvedValues; 1199 BasicBlock *UnconditionalSucc = nullptr; 1200 1201 Instruction *I = InitialInst; 1202 while (I->isTerminator() || 1203 (isa<CmpInst>(I) && I->getNextNode()->isTerminator())) { 1204 if (isa<ReturnInst>(I)) { 1205 if (I != InitialInst) { 1206 // If InitialInst is an unconditional branch, 1207 // remove PHI values that come from basic block of InitialInst 1208 if (UnconditionalSucc) 1209 UnconditionalSucc->removePredecessor(InitialInst->getParent(), true); 1210 ReplaceInstWithInst(InitialInst, I->clone()); 1211 } 1212 return true; 1213 } 1214 if (auto *BR = dyn_cast<BranchInst>(I)) { 1215 if (BR->isUnconditional()) { 1216 BasicBlock *BB = BR->getSuccessor(0); 1217 if (I == InitialInst) 1218 UnconditionalSucc = BB; 1219 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1220 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1221 continue; 1222 } 1223 } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) { 1224 auto *BR = dyn_cast<BranchInst>(I->getNextNode()); 1225 if (BR && BR->isConditional() && CondCmp == BR->getCondition()) { 1226 // If the case number of suspended switch instruction is reduced to 1227 // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator. 1228 // And the comparsion looks like : %cond = icmp eq i8 %V, constant. 1229 ConstantInt *CondConst = dyn_cast<ConstantInt>(CondCmp->getOperand(1)); 1230 if (CondConst && CondCmp->getPredicate() == CmpInst::ICMP_EQ) { 1231 Value *V = CondCmp->getOperand(0); 1232 auto it = ResolvedValues.find(V); 1233 if (it != ResolvedValues.end()) 1234 V = it->second; 1235 1236 if (ConstantInt *Cond0 = dyn_cast<ConstantInt>(V)) { 1237 BasicBlock *BB = Cond0->equalsInt(CondConst->getZExtValue()) 1238 ? BR->getSuccessor(0) 1239 : BR->getSuccessor(1); 1240 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1241 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1242 continue; 1243 } 1244 } 1245 } 1246 } else if (auto *SI = dyn_cast<SwitchInst>(I)) { 1247 Value *V = SI->getCondition(); 1248 auto it = ResolvedValues.find(V); 1249 if (it != ResolvedValues.end()) 1250 V = it->second; 1251 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) { 1252 BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor(); 1253 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1254 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1255 continue; 1256 } 1257 } 1258 return false; 1259 } 1260 return false; 1261 } 1262 1263 // Check whether CI obeys the rules of musttail attribute. 1264 static bool shouldBeMustTail(const CallInst &CI, const Function &F) { 1265 if (CI.isInlineAsm()) 1266 return false; 1267 1268 // Match prototypes and calling conventions of resume function. 1269 FunctionType *CalleeTy = CI.getFunctionType(); 1270 if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1)) 1271 return false; 1272 1273 Type *CalleeParmTy = CalleeTy->getParamType(0); 1274 if (!CalleeParmTy->isPointerTy() || 1275 (CalleeParmTy->getPointerAddressSpace() != 0)) 1276 return false; 1277 1278 if (CI.getCallingConv() != F.getCallingConv()) 1279 return false; 1280 1281 // CI should not has any ABI-impacting function attributes. 1282 static const Attribute::AttrKind ABIAttrs[] = { 1283 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca, 1284 Attribute::Preallocated, Attribute::InReg, Attribute::Returned, 1285 Attribute::SwiftSelf, Attribute::SwiftError}; 1286 AttributeList Attrs = CI.getAttributes(); 1287 for (auto AK : ABIAttrs) 1288 if (Attrs.hasParamAttr(0, AK)) 1289 return false; 1290 1291 return true; 1292 } 1293 1294 // Add musttail to any resume instructions that is immediately followed by a 1295 // suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call 1296 // for symmetrical coroutine control transfer (C++ Coroutines TS extension). 1297 // This transformation is done only in the resume part of the coroutine that has 1298 // identical signature and calling convention as the coro.resume call. 1299 static void addMustTailToCoroResumes(Function &F) { 1300 bool changed = false; 1301 1302 // Collect potential resume instructions. 1303 SmallVector<CallInst *, 4> Resumes; 1304 for (auto &I : instructions(F)) 1305 if (auto *Call = dyn_cast<CallInst>(&I)) 1306 if (shouldBeMustTail(*Call, F)) 1307 Resumes.push_back(Call); 1308 1309 // Set musttail on those that are followed by a ret instruction. 1310 for (CallInst *Call : Resumes) 1311 if (simplifyTerminatorLeadingToRet(Call->getNextNode())) { 1312 Call->setTailCallKind(CallInst::TCK_MustTail); 1313 changed = true; 1314 } 1315 1316 if (changed) 1317 removeUnreachableBlocks(F); 1318 } 1319 1320 // Coroutine has no suspend points. Remove heap allocation for the coroutine 1321 // frame if possible. 1322 static void handleNoSuspendCoroutine(coro::Shape &Shape) { 1323 auto *CoroBegin = Shape.CoroBegin; 1324 auto *CoroId = CoroBegin->getId(); 1325 auto *AllocInst = CoroId->getCoroAlloc(); 1326 switch (Shape.ABI) { 1327 case coro::ABI::Switch: { 1328 auto SwitchId = cast<CoroIdInst>(CoroId); 1329 coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr); 1330 if (AllocInst) { 1331 IRBuilder<> Builder(AllocInst); 1332 auto *Frame = Builder.CreateAlloca(Shape.FrameTy); 1333 Frame->setAlignment(Shape.FrameAlign); 1334 auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy()); 1335 AllocInst->replaceAllUsesWith(Builder.getFalse()); 1336 AllocInst->eraseFromParent(); 1337 CoroBegin->replaceAllUsesWith(VFrame); 1338 } else { 1339 CoroBegin->replaceAllUsesWith(CoroBegin->getMem()); 1340 } 1341 1342 break; 1343 } 1344 case coro::ABI::Async: 1345 case coro::ABI::Retcon: 1346 case coro::ABI::RetconOnce: 1347 CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType())); 1348 break; 1349 } 1350 1351 CoroBegin->eraseFromParent(); 1352 } 1353 1354 // SimplifySuspendPoint needs to check that there is no calls between 1355 // coro_save and coro_suspend, since any of the calls may potentially resume 1356 // the coroutine and if that is the case we cannot eliminate the suspend point. 1357 static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) { 1358 for (Instruction *I = From; I != To; I = I->getNextNode()) { 1359 // Assume that no intrinsic can resume the coroutine. 1360 if (isa<IntrinsicInst>(I)) 1361 continue; 1362 1363 if (isa<CallBase>(I)) 1364 return true; 1365 } 1366 return false; 1367 } 1368 1369 static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) { 1370 SmallPtrSet<BasicBlock *, 8> Set; 1371 SmallVector<BasicBlock *, 8> Worklist; 1372 1373 Set.insert(SaveBB); 1374 Worklist.push_back(ResDesBB); 1375 1376 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr 1377 // returns a token consumed by suspend instruction, all blocks in between 1378 // will have to eventually hit SaveBB when going backwards from ResDesBB. 1379 while (!Worklist.empty()) { 1380 auto *BB = Worklist.pop_back_val(); 1381 Set.insert(BB); 1382 for (auto *Pred : predecessors(BB)) 1383 if (!Set.contains(Pred)) 1384 Worklist.push_back(Pred); 1385 } 1386 1387 // SaveBB and ResDesBB are checked separately in hasCallsBetween. 1388 Set.erase(SaveBB); 1389 Set.erase(ResDesBB); 1390 1391 for (auto *BB : Set) 1392 if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr)) 1393 return true; 1394 1395 return false; 1396 } 1397 1398 static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) { 1399 auto *SaveBB = Save->getParent(); 1400 auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent(); 1401 1402 if (SaveBB == ResumeOrDestroyBB) 1403 return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy); 1404 1405 // Any calls from Save to the end of the block? 1406 if (hasCallsInBlockBetween(Save->getNextNode(), nullptr)) 1407 return true; 1408 1409 // Any calls from begging of the block up to ResumeOrDestroy? 1410 if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(), 1411 ResumeOrDestroy)) 1412 return true; 1413 1414 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB? 1415 if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB)) 1416 return true; 1417 1418 return false; 1419 } 1420 1421 // If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the 1422 // suspend point and replace it with nornal control flow. 1423 static bool simplifySuspendPoint(CoroSuspendInst *Suspend, 1424 CoroBeginInst *CoroBegin) { 1425 Instruction *Prev = Suspend->getPrevNode(); 1426 if (!Prev) { 1427 auto *Pred = Suspend->getParent()->getSinglePredecessor(); 1428 if (!Pred) 1429 return false; 1430 Prev = Pred->getTerminator(); 1431 } 1432 1433 CallBase *CB = dyn_cast<CallBase>(Prev); 1434 if (!CB) 1435 return false; 1436 1437 auto *Callee = CB->getCalledOperand()->stripPointerCasts(); 1438 1439 // See if the callsite is for resumption or destruction of the coroutine. 1440 auto *SubFn = dyn_cast<CoroSubFnInst>(Callee); 1441 if (!SubFn) 1442 return false; 1443 1444 // Does not refer to the current coroutine, we cannot do anything with it. 1445 if (SubFn->getFrame() != CoroBegin) 1446 return false; 1447 1448 // See if the transformation is safe. Specifically, see if there are any 1449 // calls in between Save and CallInstr. They can potenitally resume the 1450 // coroutine rendering this optimization unsafe. 1451 auto *Save = Suspend->getCoroSave(); 1452 if (hasCallsBetween(Save, CB)) 1453 return false; 1454 1455 // Replace llvm.coro.suspend with the value that results in resumption over 1456 // the resume or cleanup path. 1457 Suspend->replaceAllUsesWith(SubFn->getRawIndex()); 1458 Suspend->eraseFromParent(); 1459 Save->eraseFromParent(); 1460 1461 // No longer need a call to coro.resume or coro.destroy. 1462 if (auto *Invoke = dyn_cast<InvokeInst>(CB)) { 1463 BranchInst::Create(Invoke->getNormalDest(), Invoke); 1464 } 1465 1466 // Grab the CalledValue from CB before erasing the CallInstr. 1467 auto *CalledValue = CB->getCalledOperand(); 1468 CB->eraseFromParent(); 1469 1470 // If no more users remove it. Usually it is a bitcast of SubFn. 1471 if (CalledValue != SubFn && CalledValue->user_empty()) 1472 if (auto *I = dyn_cast<Instruction>(CalledValue)) 1473 I->eraseFromParent(); 1474 1475 // Now we are good to remove SubFn. 1476 if (SubFn->user_empty()) 1477 SubFn->eraseFromParent(); 1478 1479 return true; 1480 } 1481 1482 // Remove suspend points that are simplified. 1483 static void simplifySuspendPoints(coro::Shape &Shape) { 1484 // Currently, the only simplification we do is switch-lowering-specific. 1485 if (Shape.ABI != coro::ABI::Switch) 1486 return; 1487 1488 auto &S = Shape.CoroSuspends; 1489 size_t I = 0, N = S.size(); 1490 if (N == 0) 1491 return; 1492 while (true) { 1493 auto SI = cast<CoroSuspendInst>(S[I]); 1494 // Leave final.suspend to handleFinalSuspend since it is undefined behavior 1495 // to resume a coroutine suspended at the final suspend point. 1496 if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) { 1497 if (--N == I) 1498 break; 1499 std::swap(S[I], S[N]); 1500 continue; 1501 } 1502 if (++I == N) 1503 break; 1504 } 1505 S.resize(N); 1506 } 1507 1508 static void splitSwitchCoroutine(Function &F, coro::Shape &Shape, 1509 SmallVectorImpl<Function *> &Clones) { 1510 assert(Shape.ABI == coro::ABI::Switch); 1511 1512 createResumeEntryBlock(F, Shape); 1513 auto ResumeClone = createClone(F, ".resume", Shape, 1514 CoroCloner::Kind::SwitchResume); 1515 auto DestroyClone = createClone(F, ".destroy", Shape, 1516 CoroCloner::Kind::SwitchUnwind); 1517 auto CleanupClone = createClone(F, ".cleanup", Shape, 1518 CoroCloner::Kind::SwitchCleanup); 1519 1520 postSplitCleanup(*ResumeClone); 1521 postSplitCleanup(*DestroyClone); 1522 postSplitCleanup(*CleanupClone); 1523 1524 addMustTailToCoroResumes(*ResumeClone); 1525 1526 // Store addresses resume/destroy/cleanup functions in the coroutine frame. 1527 updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone); 1528 1529 assert(Clones.empty()); 1530 Clones.push_back(ResumeClone); 1531 Clones.push_back(DestroyClone); 1532 Clones.push_back(CleanupClone); 1533 1534 // Create a constant array referring to resume/destroy/clone functions pointed 1535 // by the last argument of @llvm.coro.info, so that CoroElide pass can 1536 // determined correct function to call. 1537 setCoroInfo(F, Shape, Clones); 1538 } 1539 1540 static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend, 1541 Value *Continuation) { 1542 auto *ResumeIntrinsic = Suspend->getResumeFunction(); 1543 auto &Context = Suspend->getParent()->getParent()->getContext(); 1544 auto *Int8PtrTy = Type::getInt8PtrTy(Context); 1545 1546 IRBuilder<> Builder(ResumeIntrinsic); 1547 auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy); 1548 ResumeIntrinsic->replaceAllUsesWith(Val); 1549 ResumeIntrinsic->eraseFromParent(); 1550 Suspend->setOperand(CoroSuspendAsyncInst::ResumeFunctionArg, 1551 UndefValue::get(Int8PtrTy)); 1552 } 1553 1554 /// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs. 1555 static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy, 1556 ArrayRef<Value *> FnArgs, 1557 SmallVectorImpl<Value *> &CallArgs) { 1558 size_t ArgIdx = 0; 1559 for (auto paramTy : FnTy->params()) { 1560 assert(ArgIdx < FnArgs.size()); 1561 if (paramTy != FnArgs[ArgIdx]->getType()) 1562 CallArgs.push_back( 1563 Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy)); 1564 else 1565 CallArgs.push_back(FnArgs[ArgIdx]); 1566 ++ArgIdx; 1567 } 1568 } 1569 1570 CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, 1571 ArrayRef<Value *> Arguments, 1572 IRBuilder<> &Builder) { 1573 auto *FnTy = MustTailCallFn->getFunctionType(); 1574 // Coerce the arguments, llvm optimizations seem to ignore the types in 1575 // vaarg functions and throws away casts in optimized mode. 1576 SmallVector<Value *, 8> CallArgs; 1577 coerceArguments(Builder, FnTy, Arguments, CallArgs); 1578 1579 auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs); 1580 TailCall->setTailCallKind(CallInst::TCK_MustTail); 1581 TailCall->setDebugLoc(Loc); 1582 TailCall->setCallingConv(MustTailCallFn->getCallingConv()); 1583 return TailCall; 1584 } 1585 1586 static void splitAsyncCoroutine(Function &F, coro::Shape &Shape, 1587 SmallVectorImpl<Function *> &Clones) { 1588 assert(Shape.ABI == coro::ABI::Async); 1589 assert(Clones.empty()); 1590 // Reset various things that the optimizer might have decided it 1591 // "knows" about the coroutine function due to not seeing a return. 1592 F.removeFnAttr(Attribute::NoReturn); 1593 F.removeRetAttr(Attribute::NoAlias); 1594 F.removeRetAttr(Attribute::NonNull); 1595 1596 auto &Context = F.getContext(); 1597 auto *Int8PtrTy = Type::getInt8PtrTy(Context); 1598 1599 auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId()); 1600 IRBuilder<> Builder(Id); 1601 1602 auto *FramePtr = Id->getStorage(); 1603 FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy); 1604 FramePtr = Builder.CreateConstInBoundsGEP1_32( 1605 Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset, 1606 "async.ctx.frameptr"); 1607 1608 // Map all uses of llvm.coro.begin to the allocated frame pointer. 1609 { 1610 // Make sure we don't invalidate Shape.FramePtr. 1611 TrackingVH<Instruction> Handle(Shape.FramePtr); 1612 Shape.CoroBegin->replaceAllUsesWith(FramePtr); 1613 Shape.FramePtr = Handle.getValPtr(); 1614 } 1615 1616 // Create all the functions in order after the main function. 1617 auto NextF = std::next(F.getIterator()); 1618 1619 // Create a continuation function for each of the suspend points. 1620 Clones.reserve(Shape.CoroSuspends.size()); 1621 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) { 1622 auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]); 1623 1624 // Create the clone declaration. 1625 auto ResumeNameSuffix = ".resume."; 1626 auto ProjectionFunctionName = 1627 Suspend->getAsyncContextProjectionFunction()->getName(); 1628 bool UseSwiftMangling = false; 1629 if (ProjectionFunctionName.equals("__swift_async_resume_project_context")) { 1630 ResumeNameSuffix = "TQ"; 1631 UseSwiftMangling = true; 1632 } else if (ProjectionFunctionName.equals( 1633 "__swift_async_resume_get_context")) { 1634 ResumeNameSuffix = "TY"; 1635 UseSwiftMangling = true; 1636 } 1637 auto *Continuation = createCloneDeclaration( 1638 F, Shape, 1639 UseSwiftMangling ? ResumeNameSuffix + Twine(Idx) + "_" 1640 : ResumeNameSuffix + Twine(Idx), 1641 NextF, Suspend); 1642 Clones.push_back(Continuation); 1643 1644 // Insert a branch to a new return block immediately before the suspend 1645 // point. 1646 auto *SuspendBB = Suspend->getParent(); 1647 auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend); 1648 auto *Branch = cast<BranchInst>(SuspendBB->getTerminator()); 1649 1650 // Place it before the first suspend. 1651 auto *ReturnBB = 1652 BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB); 1653 Branch->setSuccessor(0, ReturnBB); 1654 1655 IRBuilder<> Builder(ReturnBB); 1656 1657 // Insert the call to the tail call function and inline it. 1658 auto *Fn = Suspend->getMustTailCallFunction(); 1659 SmallVector<Value *, 8> Args(Suspend->args()); 1660 auto FnArgs = ArrayRef<Value *>(Args).drop_front( 1661 CoroSuspendAsyncInst::MustTailCallFuncArg + 1); 1662 auto *TailCall = 1663 coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder); 1664 Builder.CreateRetVoid(); 1665 InlineFunctionInfo FnInfo; 1666 auto InlineRes = InlineFunction(*TailCall, FnInfo); 1667 assert(InlineRes.isSuccess() && "Expected inlining to succeed"); 1668 (void)InlineRes; 1669 1670 // Replace the lvm.coro.async.resume intrisic call. 1671 replaceAsyncResumeFunction(Suspend, Continuation); 1672 } 1673 1674 assert(Clones.size() == Shape.CoroSuspends.size()); 1675 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) { 1676 auto *Suspend = Shape.CoroSuspends[Idx]; 1677 auto *Clone = Clones[Idx]; 1678 1679 CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create(); 1680 } 1681 } 1682 1683 static void splitRetconCoroutine(Function &F, coro::Shape &Shape, 1684 SmallVectorImpl<Function *> &Clones) { 1685 assert(Shape.ABI == coro::ABI::Retcon || 1686 Shape.ABI == coro::ABI::RetconOnce); 1687 assert(Clones.empty()); 1688 1689 // Reset various things that the optimizer might have decided it 1690 // "knows" about the coroutine function due to not seeing a return. 1691 F.removeFnAttr(Attribute::NoReturn); 1692 F.removeRetAttr(Attribute::NoAlias); 1693 F.removeRetAttr(Attribute::NonNull); 1694 1695 // Allocate the frame. 1696 auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId()); 1697 Value *RawFramePtr; 1698 if (Shape.RetconLowering.IsFrameInlineInStorage) { 1699 RawFramePtr = Id->getStorage(); 1700 } else { 1701 IRBuilder<> Builder(Id); 1702 1703 // Determine the size of the frame. 1704 const DataLayout &DL = F.getParent()->getDataLayout(); 1705 auto Size = DL.getTypeAllocSize(Shape.FrameTy); 1706 1707 // Allocate. We don't need to update the call graph node because we're 1708 // going to recompute it from scratch after splitting. 1709 // FIXME: pass the required alignment 1710 RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr); 1711 RawFramePtr = 1712 Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType()); 1713 1714 // Stash the allocated frame pointer in the continuation storage. 1715 auto Dest = Builder.CreateBitCast(Id->getStorage(), 1716 RawFramePtr->getType()->getPointerTo()); 1717 Builder.CreateStore(RawFramePtr, Dest); 1718 } 1719 1720 // Map all uses of llvm.coro.begin to the allocated frame pointer. 1721 { 1722 // Make sure we don't invalidate Shape.FramePtr. 1723 TrackingVH<Instruction> Handle(Shape.FramePtr); 1724 Shape.CoroBegin->replaceAllUsesWith(RawFramePtr); 1725 Shape.FramePtr = Handle.getValPtr(); 1726 } 1727 1728 // Create a unique return block. 1729 BasicBlock *ReturnBB = nullptr; 1730 SmallVector<PHINode *, 4> ReturnPHIs; 1731 1732 // Create all the functions in order after the main function. 1733 auto NextF = std::next(F.getIterator()); 1734 1735 // Create a continuation function for each of the suspend points. 1736 Clones.reserve(Shape.CoroSuspends.size()); 1737 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) { 1738 auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]); 1739 1740 // Create the clone declaration. 1741 auto Continuation = 1742 createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF, nullptr); 1743 Clones.push_back(Continuation); 1744 1745 // Insert a branch to the unified return block immediately before 1746 // the suspend point. 1747 auto SuspendBB = Suspend->getParent(); 1748 auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend); 1749 auto Branch = cast<BranchInst>(SuspendBB->getTerminator()); 1750 1751 // Create the unified return block. 1752 if (!ReturnBB) { 1753 // Place it before the first suspend. 1754 ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F, 1755 NewSuspendBB); 1756 Shape.RetconLowering.ReturnBlock = ReturnBB; 1757 1758 IRBuilder<> Builder(ReturnBB); 1759 1760 // Create PHIs for all the return values. 1761 assert(ReturnPHIs.empty()); 1762 1763 // First, the continuation. 1764 ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(), 1765 Shape.CoroSuspends.size())); 1766 1767 // Next, all the directly-yielded values. 1768 for (auto ResultTy : Shape.getRetconResultTypes()) 1769 ReturnPHIs.push_back(Builder.CreatePHI(ResultTy, 1770 Shape.CoroSuspends.size())); 1771 1772 // Build the return value. 1773 auto RetTy = F.getReturnType(); 1774 1775 // Cast the continuation value if necessary. 1776 // We can't rely on the types matching up because that type would 1777 // have to be infinite. 1778 auto CastedContinuationTy = 1779 (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0)); 1780 auto *CastedContinuation = 1781 Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy); 1782 1783 Value *RetV; 1784 if (ReturnPHIs.size() == 1) { 1785 RetV = CastedContinuation; 1786 } else { 1787 RetV = UndefValue::get(RetTy); 1788 RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0); 1789 for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I) 1790 RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I); 1791 } 1792 1793 Builder.CreateRet(RetV); 1794 } 1795 1796 // Branch to the return block. 1797 Branch->setSuccessor(0, ReturnBB); 1798 ReturnPHIs[0]->addIncoming(Continuation, SuspendBB); 1799 size_t NextPHIIndex = 1; 1800 for (auto &VUse : Suspend->value_operands()) 1801 ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB); 1802 assert(NextPHIIndex == ReturnPHIs.size()); 1803 } 1804 1805 assert(Clones.size() == Shape.CoroSuspends.size()); 1806 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) { 1807 auto Suspend = Shape.CoroSuspends[i]; 1808 auto Clone = Clones[i]; 1809 1810 CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create(); 1811 } 1812 } 1813 1814 namespace { 1815 class PrettyStackTraceFunction : public PrettyStackTraceEntry { 1816 Function &F; 1817 public: 1818 PrettyStackTraceFunction(Function &F) : F(F) {} 1819 void print(raw_ostream &OS) const override { 1820 OS << "While splitting coroutine "; 1821 F.printAsOperand(OS, /*print type*/ false, F.getParent()); 1822 OS << "\n"; 1823 } 1824 }; 1825 } 1826 1827 static coro::Shape splitCoroutine(Function &F, 1828 SmallVectorImpl<Function *> &Clones, 1829 bool OptimizeFrame) { 1830 PrettyStackTraceFunction prettyStackTrace(F); 1831 1832 // The suspend-crossing algorithm in buildCoroutineFrame get tripped 1833 // up by uses in unreachable blocks, so remove them as a first pass. 1834 removeUnreachableBlocks(F); 1835 1836 coro::Shape Shape(F, OptimizeFrame); 1837 if (!Shape.CoroBegin) 1838 return Shape; 1839 1840 simplifySuspendPoints(Shape); 1841 buildCoroutineFrame(F, Shape); 1842 replaceFrameSize(Shape); 1843 1844 // If there are no suspend points, no split required, just remove 1845 // the allocation and deallocation blocks, they are not needed. 1846 if (Shape.CoroSuspends.empty()) { 1847 handleNoSuspendCoroutine(Shape); 1848 } else { 1849 switch (Shape.ABI) { 1850 case coro::ABI::Switch: 1851 splitSwitchCoroutine(F, Shape, Clones); 1852 break; 1853 case coro::ABI::Async: 1854 splitAsyncCoroutine(F, Shape, Clones); 1855 break; 1856 case coro::ABI::Retcon: 1857 case coro::ABI::RetconOnce: 1858 splitRetconCoroutine(F, Shape, Clones); 1859 break; 1860 } 1861 } 1862 1863 // Replace all the swifterror operations in the original function. 1864 // This invalidates SwiftErrorOps in the Shape. 1865 replaceSwiftErrorOps(F, Shape, nullptr); 1866 1867 return Shape; 1868 } 1869 1870 static void 1871 updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape, 1872 const SmallVectorImpl<Function *> &Clones, 1873 CallGraph &CG, CallGraphSCC &SCC) { 1874 if (!Shape.CoroBegin) 1875 return; 1876 1877 removeCoroEnds(Shape, &CG); 1878 postSplitCleanup(F); 1879 1880 // Update call graph and add the functions we created to the SCC. 1881 coro::updateCallGraph(F, Clones, CG, SCC); 1882 } 1883 1884 static void updateCallGraphAfterCoroutineSplit( 1885 LazyCallGraph::Node &N, const coro::Shape &Shape, 1886 const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C, 1887 LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, 1888 FunctionAnalysisManager &FAM) { 1889 if (!Shape.CoroBegin) 1890 return; 1891 1892 for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) { 1893 auto &Context = End->getContext(); 1894 End->replaceAllUsesWith(ConstantInt::getFalse(Context)); 1895 End->eraseFromParent(); 1896 } 1897 1898 if (!Clones.empty()) { 1899 switch (Shape.ABI) { 1900 case coro::ABI::Switch: 1901 // Each clone in the Switch lowering is independent of the other clones. 1902 // Let the LazyCallGraph know about each one separately. 1903 for (Function *Clone : Clones) 1904 CG.addSplitFunction(N.getFunction(), *Clone); 1905 break; 1906 case coro::ABI::Async: 1907 case coro::ABI::Retcon: 1908 case coro::ABI::RetconOnce: 1909 // Each clone in the Async/Retcon lowering references of the other clones. 1910 // Let the LazyCallGraph know about all of them at once. 1911 if (!Clones.empty()) 1912 CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones); 1913 break; 1914 } 1915 1916 // Let the CGSCC infra handle the changes to the original function. 1917 updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM); 1918 } 1919 1920 // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges 1921 // to the split functions. 1922 postSplitCleanup(N.getFunction()); 1923 updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM); 1924 } 1925 1926 // When we see the coroutine the first time, we insert an indirect call to a 1927 // devirt trigger function and mark the coroutine that it is now ready for 1928 // split. 1929 // Async lowering uses this after it has split the function to restart the 1930 // pipeline. 1931 static void prepareForSplit(Function &F, CallGraph &CG, 1932 bool MarkForAsyncRestart = false) { 1933 Module &M = *F.getParent(); 1934 LLVMContext &Context = F.getContext(); 1935 #ifndef NDEBUG 1936 Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN); 1937 assert(DevirtFn && "coro.devirt.trigger function not found"); 1938 #endif 1939 1940 F.addFnAttr(CORO_PRESPLIT_ATTR, MarkForAsyncRestart 1941 ? ASYNC_RESTART_AFTER_SPLIT 1942 : PREPARED_FOR_SPLIT); 1943 1944 // Insert an indirect call sequence that will be devirtualized by CoroElide 1945 // pass: 1946 // %0 = call i8* @llvm.coro.subfn.addr(i8* null, i8 -1) 1947 // %1 = bitcast i8* %0 to void(i8*)* 1948 // call void %1(i8* null) 1949 coro::LowererBase Lowerer(M); 1950 Instruction *InsertPt = 1951 MarkForAsyncRestart ? F.getEntryBlock().getFirstNonPHIOrDbgOrLifetime() 1952 : F.getEntryBlock().getTerminator(); 1953 auto *Null = ConstantPointerNull::get(Type::getInt8PtrTy(Context)); 1954 auto *DevirtFnAddr = 1955 Lowerer.makeSubFnCall(Null, CoroSubFnInst::RestartTrigger, InsertPt); 1956 FunctionType *FnTy = FunctionType::get(Type::getVoidTy(Context), 1957 {Type::getInt8PtrTy(Context)}, false); 1958 auto *IndirectCall = CallInst::Create(FnTy, DevirtFnAddr, Null, "", InsertPt); 1959 1960 // Update CG graph with an indirect call we just added. 1961 CG[&F]->addCalledFunction(IndirectCall, CG.getCallsExternalNode()); 1962 } 1963 1964 // Make sure that there is a devirtualization trigger function that the 1965 // coro-split pass uses to force a restart of the CGSCC pipeline. If the devirt 1966 // trigger function is not found, we will create one and add it to the current 1967 // SCC. 1968 static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC) { 1969 Module &M = CG.getModule(); 1970 if (M.getFunction(CORO_DEVIRT_TRIGGER_FN)) 1971 return; 1972 1973 LLVMContext &C = M.getContext(); 1974 auto *FnTy = FunctionType::get(Type::getVoidTy(C), Type::getInt8PtrTy(C), 1975 /*isVarArg=*/false); 1976 Function *DevirtFn = 1977 Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage, 1978 CORO_DEVIRT_TRIGGER_FN, &M); 1979 DevirtFn->addFnAttr(Attribute::AlwaysInline); 1980 auto *Entry = BasicBlock::Create(C, "entry", DevirtFn); 1981 ReturnInst::Create(C, Entry); 1982 1983 auto *Node = CG.getOrInsertFunction(DevirtFn); 1984 1985 SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end()); 1986 Nodes.push_back(Node); 1987 SCC.initialize(Nodes); 1988 } 1989 1990 /// Replace a call to llvm.coro.prepare.retcon. 1991 static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG, 1992 LazyCallGraph::SCC &C) { 1993 auto CastFn = Prepare->getArgOperand(0); // as an i8* 1994 auto Fn = CastFn->stripPointerCasts(); // as its original type 1995 1996 // Attempt to peephole this pattern: 1997 // %0 = bitcast [[TYPE]] @some_function to i8* 1998 // %1 = call @llvm.coro.prepare.retcon(i8* %0) 1999 // %2 = bitcast %1 to [[TYPE]] 2000 // ==> 2001 // %2 = @some_function 2002 for (Use &U : llvm::make_early_inc_range(Prepare->uses())) { 2003 // Look for bitcasts back to the original function type. 2004 auto *Cast = dyn_cast<BitCastInst>(U.getUser()); 2005 if (!Cast || Cast->getType() != Fn->getType()) 2006 continue; 2007 2008 // Replace and remove the cast. 2009 Cast->replaceAllUsesWith(Fn); 2010 Cast->eraseFromParent(); 2011 } 2012 2013 // Replace any remaining uses with the function as an i8*. 2014 // This can never directly be a callee, so we don't need to update CG. 2015 Prepare->replaceAllUsesWith(CastFn); 2016 Prepare->eraseFromParent(); 2017 2018 // Kill dead bitcasts. 2019 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) { 2020 if (!Cast->use_empty()) 2021 break; 2022 CastFn = Cast->getOperand(0); 2023 Cast->eraseFromParent(); 2024 } 2025 } 2026 /// Replace a call to llvm.coro.prepare.retcon. 2027 static void replacePrepare(CallInst *Prepare, CallGraph &CG) { 2028 auto CastFn = Prepare->getArgOperand(0); // as an i8* 2029 auto Fn = CastFn->stripPointerCasts(); // as its original type 2030 2031 // Find call graph nodes for the preparation. 2032 CallGraphNode *PrepareUserNode = nullptr, *FnNode = nullptr; 2033 if (auto ConcreteFn = dyn_cast<Function>(Fn)) { 2034 PrepareUserNode = CG[Prepare->getFunction()]; 2035 FnNode = CG[ConcreteFn]; 2036 } 2037 2038 // Attempt to peephole this pattern: 2039 // %0 = bitcast [[TYPE]] @some_function to i8* 2040 // %1 = call @llvm.coro.prepare.retcon(i8* %0) 2041 // %2 = bitcast %1 to [[TYPE]] 2042 // ==> 2043 // %2 = @some_function 2044 for (Use &U : llvm::make_early_inc_range(Prepare->uses())) { 2045 // Look for bitcasts back to the original function type. 2046 auto *Cast = dyn_cast<BitCastInst>(U.getUser()); 2047 if (!Cast || Cast->getType() != Fn->getType()) continue; 2048 2049 // Check whether the replacement will introduce new direct calls. 2050 // If so, we'll need to update the call graph. 2051 if (PrepareUserNode) { 2052 for (auto &Use : Cast->uses()) { 2053 if (auto *CB = dyn_cast<CallBase>(Use.getUser())) { 2054 if (!CB->isCallee(&Use)) 2055 continue; 2056 PrepareUserNode->removeCallEdgeFor(*CB); 2057 PrepareUserNode->addCalledFunction(CB, FnNode); 2058 } 2059 } 2060 } 2061 2062 // Replace and remove the cast. 2063 Cast->replaceAllUsesWith(Fn); 2064 Cast->eraseFromParent(); 2065 } 2066 2067 // Replace any remaining uses with the function as an i8*. 2068 // This can never directly be a callee, so we don't need to update CG. 2069 Prepare->replaceAllUsesWith(CastFn); 2070 Prepare->eraseFromParent(); 2071 2072 // Kill dead bitcasts. 2073 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) { 2074 if (!Cast->use_empty()) break; 2075 CastFn = Cast->getOperand(0); 2076 Cast->eraseFromParent(); 2077 } 2078 } 2079 2080 static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG, 2081 LazyCallGraph::SCC &C) { 2082 bool Changed = false; 2083 for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) { 2084 // Intrinsics can only be used in calls. 2085 auto *Prepare = cast<CallInst>(P.getUser()); 2086 replacePrepare(Prepare, CG, C); 2087 Changed = true; 2088 } 2089 2090 return Changed; 2091 } 2092 2093 /// Remove calls to llvm.coro.prepare.retcon, a barrier meant to prevent 2094 /// IPO from operating on calls to a retcon coroutine before it's been 2095 /// split. This is only safe to do after we've split all retcon 2096 /// coroutines in the module. We can do that this in this pass because 2097 /// this pass does promise to split all retcon coroutines (as opposed to 2098 /// switch coroutines, which are lowered in multiple stages). 2099 static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) { 2100 bool Changed = false; 2101 for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) { 2102 // Intrinsics can only be used in calls. 2103 auto *Prepare = cast<CallInst>(P.getUser()); 2104 replacePrepare(Prepare, CG); 2105 Changed = true; 2106 } 2107 2108 return Changed; 2109 } 2110 2111 static bool declaresCoroSplitIntrinsics(const Module &M) { 2112 return coro::declaresIntrinsics(M, {"llvm.coro.begin", 2113 "llvm.coro.prepare.retcon", 2114 "llvm.coro.prepare.async"}); 2115 } 2116 2117 static void addPrepareFunction(const Module &M, 2118 SmallVectorImpl<Function *> &Fns, 2119 StringRef Name) { 2120 auto *PrepareFn = M.getFunction(Name); 2121 if (PrepareFn && !PrepareFn->use_empty()) 2122 Fns.push_back(PrepareFn); 2123 } 2124 2125 PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C, 2126 CGSCCAnalysisManager &AM, 2127 LazyCallGraph &CG, CGSCCUpdateResult &UR) { 2128 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a 2129 // non-zero number of nodes, so we assume that here and grab the first 2130 // node's function's module. 2131 Module &M = *C.begin()->getFunction().getParent(); 2132 auto &FAM = 2133 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 2134 2135 if (!declaresCoroSplitIntrinsics(M)) 2136 return PreservedAnalyses::all(); 2137 2138 // Check for uses of llvm.coro.prepare.retcon/async. 2139 SmallVector<Function *, 2> PrepareFns; 2140 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon"); 2141 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async"); 2142 2143 // Find coroutines for processing. 2144 SmallVector<LazyCallGraph::Node *, 4> Coroutines; 2145 for (LazyCallGraph::Node &N : C) 2146 if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR)) 2147 Coroutines.push_back(&N); 2148 2149 if (Coroutines.empty() && PrepareFns.empty()) 2150 return PreservedAnalyses::all(); 2151 2152 if (Coroutines.empty()) { 2153 for (auto *PrepareFn : PrepareFns) { 2154 replaceAllPrepares(PrepareFn, CG, C); 2155 } 2156 } 2157 2158 // Split all the coroutines. 2159 for (LazyCallGraph::Node *N : Coroutines) { 2160 Function &F = N->getFunction(); 2161 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName() 2162 << "' state: " 2163 << F.getFnAttribute(CORO_PRESPLIT_ATTR).getValueAsString() 2164 << "\n"); 2165 F.removeFnAttr(CORO_PRESPLIT_ATTR); 2166 2167 SmallVector<Function *, 4> Clones; 2168 const coro::Shape Shape = splitCoroutine(F, Clones, OptimizeFrame); 2169 updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM); 2170 2171 if (!Shape.CoroSuspends.empty()) { 2172 // Run the CGSCC pipeline on the original and newly split functions. 2173 UR.CWorklist.insert(&C); 2174 for (Function *Clone : Clones) 2175 UR.CWorklist.insert(CG.lookupSCC(CG.get(*Clone))); 2176 } 2177 } 2178 2179 if (!PrepareFns.empty()) { 2180 for (auto *PrepareFn : PrepareFns) { 2181 replaceAllPrepares(PrepareFn, CG, C); 2182 } 2183 } 2184 2185 return PreservedAnalyses::none(); 2186 } 2187 2188 namespace { 2189 2190 // We present a coroutine to LLVM as an ordinary function with suspension 2191 // points marked up with intrinsics. We let the optimizer party on the coroutine 2192 // as a single function for as long as possible. Shortly before the coroutine is 2193 // eligible to be inlined into its callers, we split up the coroutine into parts 2194 // corresponding to initial, resume and destroy invocations of the coroutine, 2195 // add them to the current SCC and restart the IPO pipeline to optimize the 2196 // coroutine subfunctions we extracted before proceeding to the caller of the 2197 // coroutine. 2198 struct CoroSplitLegacy : public CallGraphSCCPass { 2199 static char ID; // Pass identification, replacement for typeid 2200 2201 CoroSplitLegacy(bool OptimizeFrame = false) 2202 : CallGraphSCCPass(ID), OptimizeFrame(OptimizeFrame) { 2203 initializeCoroSplitLegacyPass(*PassRegistry::getPassRegistry()); 2204 } 2205 2206 bool Run = false; 2207 bool OptimizeFrame; 2208 2209 // A coroutine is identified by the presence of coro.begin intrinsic, if 2210 // we don't have any, this pass has nothing to do. 2211 bool doInitialization(CallGraph &CG) override { 2212 Run = declaresCoroSplitIntrinsics(CG.getModule()); 2213 return CallGraphSCCPass::doInitialization(CG); 2214 } 2215 2216 bool runOnSCC(CallGraphSCC &SCC) override { 2217 if (!Run) 2218 return false; 2219 2220 // Check for uses of llvm.coro.prepare.retcon. 2221 SmallVector<Function *, 2> PrepareFns; 2222 auto &M = SCC.getCallGraph().getModule(); 2223 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon"); 2224 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async"); 2225 2226 // Find coroutines for processing. 2227 SmallVector<Function *, 4> Coroutines; 2228 for (CallGraphNode *CGN : SCC) 2229 if (auto *F = CGN->getFunction()) 2230 if (F->hasFnAttribute(CORO_PRESPLIT_ATTR)) 2231 Coroutines.push_back(F); 2232 2233 if (Coroutines.empty() && PrepareFns.empty()) 2234 return false; 2235 2236 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 2237 2238 if (Coroutines.empty()) { 2239 bool Changed = false; 2240 for (auto *PrepareFn : PrepareFns) 2241 Changed |= replaceAllPrepares(PrepareFn, CG); 2242 return Changed; 2243 } 2244 2245 createDevirtTriggerFunc(CG, SCC); 2246 2247 // Split all the coroutines. 2248 for (Function *F : Coroutines) { 2249 Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR); 2250 StringRef Value = Attr.getValueAsString(); 2251 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName() 2252 << "' state: " << Value << "\n"); 2253 // Async lowering marks coroutines to trigger a restart of the pipeline 2254 // after it has split them. 2255 if (Value == ASYNC_RESTART_AFTER_SPLIT) { 2256 F->removeFnAttr(CORO_PRESPLIT_ATTR); 2257 continue; 2258 } 2259 if (Value == UNPREPARED_FOR_SPLIT) { 2260 prepareForSplit(*F, CG); 2261 continue; 2262 } 2263 F->removeFnAttr(CORO_PRESPLIT_ATTR); 2264 2265 SmallVector<Function *, 4> Clones; 2266 const coro::Shape Shape = splitCoroutine(*F, Clones, OptimizeFrame); 2267 updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC); 2268 if (Shape.ABI == coro::ABI::Async) { 2269 // Restart SCC passes. 2270 // Mark function for CoroElide pass. It will devirtualize causing a 2271 // restart of the SCC pipeline. 2272 prepareForSplit(*F, CG, true /*MarkForAsyncRestart*/); 2273 } 2274 } 2275 2276 for (auto *PrepareFn : PrepareFns) 2277 replaceAllPrepares(PrepareFn, CG); 2278 2279 return true; 2280 } 2281 2282 void getAnalysisUsage(AnalysisUsage &AU) const override { 2283 CallGraphSCCPass::getAnalysisUsage(AU); 2284 } 2285 2286 StringRef getPassName() const override { return "Coroutine Splitting"; } 2287 }; 2288 2289 } // end anonymous namespace 2290 2291 char CoroSplitLegacy::ID = 0; 2292 2293 INITIALIZE_PASS_BEGIN( 2294 CoroSplitLegacy, "coro-split", 2295 "Split coroutine into a set of functions driving its state machine", false, 2296 false) 2297 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 2298 INITIALIZE_PASS_END( 2299 CoroSplitLegacy, "coro-split", 2300 "Split coroutine into a set of functions driving its state machine", false, 2301 false) 2302 2303 Pass *llvm::createCoroSplitLegacyPass(bool OptimizeFrame) { 2304 return new CoroSplitLegacy(OptimizeFrame); 2305 } 2306