1 //===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This pass builds the coroutine frame and outlines resume and destroy parts 9 // of the coroutine into separate functions. 10 // 11 // We present a coroutine to an LLVM as an ordinary function with suspension 12 // points marked up with intrinsics. We let the optimizer party on the coroutine 13 // as a single function for as long as possible. Shortly before the coroutine is 14 // eligible to be inlined into its callers, we split up the coroutine into parts 15 // corresponding to an initial, resume and destroy invocations of the coroutine, 16 // add them to the current SCC and restart the IPO pipeline to optimize the 17 // coroutine subfunctions we extracted before proceeding to the caller of the 18 // coroutine. 19 //===----------------------------------------------------------------------===// 20 21 #include "llvm/Transforms/Coroutines/CoroSplit.h" 22 #include "CoroInstr.h" 23 #include "CoroInternal.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/SmallPtrSet.h" 26 #include "llvm/ADT/SmallVector.h" 27 #include "llvm/ADT/StringRef.h" 28 #include "llvm/ADT/Twine.h" 29 #include "llvm/Analysis/CallGraph.h" 30 #include "llvm/Analysis/CallGraphSCCPass.h" 31 #include "llvm/Analysis/LazyCallGraph.h" 32 #include "llvm/IR/Argument.h" 33 #include "llvm/IR/Attributes.h" 34 #include "llvm/IR/BasicBlock.h" 35 #include "llvm/IR/CFG.h" 36 #include "llvm/IR/CallingConv.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DataLayout.h" 39 #include "llvm/IR/DerivedTypes.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/GlobalValue.h" 42 #include "llvm/IR/GlobalVariable.h" 43 #include "llvm/IR/IRBuilder.h" 44 #include "llvm/IR/InstIterator.h" 45 #include "llvm/IR/InstrTypes.h" 46 #include "llvm/IR/Instruction.h" 47 #include "llvm/IR/Instructions.h" 48 #include "llvm/IR/IntrinsicInst.h" 49 #include "llvm/IR/LLVMContext.h" 50 #include "llvm/IR/LegacyPassManager.h" 51 #include "llvm/IR/Module.h" 52 #include "llvm/IR/Type.h" 53 #include "llvm/IR/Value.h" 54 #include "llvm/IR/Verifier.h" 55 #include "llvm/InitializePasses.h" 56 #include "llvm/Pass.h" 57 #include "llvm/Support/Casting.h" 58 #include "llvm/Support/Debug.h" 59 #include "llvm/Support/PrettyStackTrace.h" 60 #include "llvm/Support/raw_ostream.h" 61 #include "llvm/Transforms/Scalar.h" 62 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 63 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 64 #include "llvm/Transforms/Utils/Cloning.h" 65 #include "llvm/Transforms/Utils/Local.h" 66 #include "llvm/Transforms/Utils/ValueMapper.h" 67 #include <cassert> 68 #include <cstddef> 69 #include <cstdint> 70 #include <initializer_list> 71 #include <iterator> 72 73 using namespace llvm; 74 75 #define DEBUG_TYPE "coro-split" 76 77 namespace { 78 79 /// A little helper class for building 80 class CoroCloner { 81 public: 82 enum class Kind { 83 /// The shared resume function for a switch lowering. 84 SwitchResume, 85 86 /// The shared unwind function for a switch lowering. 87 SwitchUnwind, 88 89 /// The shared cleanup function for a switch lowering. 90 SwitchCleanup, 91 92 /// An individual continuation function. 93 Continuation, 94 95 /// An async resume function. 96 Async, 97 }; 98 99 private: 100 Function &OrigF; 101 Function *NewF; 102 const Twine &Suffix; 103 coro::Shape &Shape; 104 Kind FKind; 105 ValueToValueMapTy VMap; 106 IRBuilder<> Builder; 107 Value *NewFramePtr = nullptr; 108 109 /// The active suspend instruction; meaningful only for continuation and async 110 /// ABIs. 111 AnyCoroSuspendInst *ActiveSuspend = nullptr; 112 113 public: 114 /// Create a cloner for a switch lowering. 115 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, 116 Kind FKind) 117 : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape), 118 FKind(FKind), Builder(OrigF.getContext()) { 119 assert(Shape.ABI == coro::ABI::Switch); 120 } 121 122 /// Create a cloner for a continuation lowering. 123 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, 124 Function *NewF, AnyCoroSuspendInst *ActiveSuspend) 125 : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape), 126 FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation), 127 Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) { 128 assert(Shape.ABI == coro::ABI::Retcon || 129 Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async); 130 assert(NewF && "need existing function for continuation"); 131 assert(ActiveSuspend && "need active suspend point for continuation"); 132 } 133 134 Function *getFunction() const { 135 assert(NewF != nullptr && "declaration not yet set"); 136 return NewF; 137 } 138 139 void create(); 140 141 private: 142 bool isSwitchDestroyFunction() { 143 switch (FKind) { 144 case Kind::Async: 145 case Kind::Continuation: 146 case Kind::SwitchResume: 147 return false; 148 case Kind::SwitchUnwind: 149 case Kind::SwitchCleanup: 150 return true; 151 } 152 llvm_unreachable("Unknown CoroCloner::Kind enum"); 153 } 154 155 void replaceEntryBlock(); 156 Value *deriveNewFramePointer(); 157 void replaceRetconOrAsyncSuspendUses(); 158 void replaceCoroSuspends(); 159 void replaceCoroEnds(); 160 void replaceSwiftErrorOps(); 161 void handleFinalSuspend(); 162 }; 163 164 } // end anonymous namespace 165 166 static void maybeFreeRetconStorage(IRBuilder<> &Builder, 167 const coro::Shape &Shape, Value *FramePtr, 168 CallGraph *CG) { 169 assert(Shape.ABI == coro::ABI::Retcon || 170 Shape.ABI == coro::ABI::RetconOnce); 171 if (Shape.RetconLowering.IsFrameInlineInStorage) 172 return; 173 174 Shape.emitDealloc(Builder, FramePtr, CG); 175 } 176 177 /// Replace an llvm.coro.end.async. 178 /// Will inline the must tail call function call if there is one. 179 /// \returns true if cleanup of the coro.end block is needed, false otherwise. 180 static bool replaceCoroEndAsync(AnyCoroEndInst *End) { 181 IRBuilder<> Builder(End); 182 183 auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End); 184 if (!EndAsync) { 185 Builder.CreateRetVoid(); 186 return true /*needs cleanup of coro.end block*/; 187 } 188 189 auto *MustTailCallFunc = EndAsync->getMustTailCallFunction(); 190 if (!MustTailCallFunc) { 191 Builder.CreateRetVoid(); 192 return true /*needs cleanup of coro.end block*/; 193 } 194 195 // Move the must tail call from the predecessor block into the end block. 196 auto *CoroEndBlock = End->getParent(); 197 auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor(); 198 assert(MustTailCallFuncBlock && "Must have a single predecessor block"); 199 auto It = MustTailCallFuncBlock->getTerminator()->getIterator(); 200 auto *MustTailCall = cast<CallInst>(&*std::prev(It)); 201 CoroEndBlock->getInstList().splice( 202 End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall); 203 204 // Insert the return instruction. 205 Builder.SetInsertPoint(End); 206 Builder.CreateRetVoid(); 207 InlineFunctionInfo FnInfo; 208 209 // Remove the rest of the block, by splitting it into an unreachable block. 210 auto *BB = End->getParent(); 211 BB->splitBasicBlock(End); 212 BB->getTerminator()->eraseFromParent(); 213 214 auto InlineRes = InlineFunction(*MustTailCall, FnInfo); 215 assert(InlineRes.isSuccess() && "Expected inlining to succeed"); 216 (void)InlineRes; 217 218 // We have cleaned up the coro.end block above. 219 return false; 220 } 221 222 /// Replace a non-unwind call to llvm.coro.end. 223 static void replaceFallthroughCoroEnd(AnyCoroEndInst *End, 224 const coro::Shape &Shape, Value *FramePtr, 225 bool InResume, CallGraph *CG) { 226 // Start inserting right before the coro.end. 227 IRBuilder<> Builder(End); 228 229 // Create the return instruction. 230 switch (Shape.ABI) { 231 // The cloned functions in switch-lowering always return void. 232 case coro::ABI::Switch: 233 // coro.end doesn't immediately end the coroutine in the main function 234 // in this lowering, because we need to deallocate the coroutine. 235 if (!InResume) 236 return; 237 Builder.CreateRetVoid(); 238 break; 239 240 // In async lowering this returns. 241 case coro::ABI::Async: { 242 bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End); 243 if (!CoroEndBlockNeedsCleanup) 244 return; 245 break; 246 } 247 248 // In unique continuation lowering, the continuations always return void. 249 // But we may have implicitly allocated storage. 250 case coro::ABI::RetconOnce: 251 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 252 Builder.CreateRetVoid(); 253 break; 254 255 // In non-unique continuation lowering, we signal completion by returning 256 // a null continuation. 257 case coro::ABI::Retcon: { 258 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 259 auto RetTy = Shape.getResumeFunctionType()->getReturnType(); 260 auto RetStructTy = dyn_cast<StructType>(RetTy); 261 PointerType *ContinuationTy = 262 cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy); 263 264 Value *ReturnValue = ConstantPointerNull::get(ContinuationTy); 265 if (RetStructTy) { 266 ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy), 267 ReturnValue, 0); 268 } 269 Builder.CreateRet(ReturnValue); 270 break; 271 } 272 } 273 274 // Remove the rest of the block, by splitting it into an unreachable block. 275 auto *BB = End->getParent(); 276 BB->splitBasicBlock(End); 277 BB->getTerminator()->eraseFromParent(); 278 } 279 280 /// Replace an unwind call to llvm.coro.end. 281 static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, 282 Value *FramePtr, bool InResume, 283 CallGraph *CG) { 284 IRBuilder<> Builder(End); 285 286 switch (Shape.ABI) { 287 // In switch-lowering, this does nothing in the main function. 288 case coro::ABI::Switch: 289 if (!InResume) 290 return; 291 break; 292 // In async lowering this does nothing. 293 case coro::ABI::Async: 294 break; 295 // In continuation-lowering, this frees the continuation storage. 296 case coro::ABI::Retcon: 297 case coro::ABI::RetconOnce: 298 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 299 break; 300 } 301 302 // If coro.end has an associated bundle, add cleanupret instruction. 303 if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) { 304 auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]); 305 auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr); 306 End->getParent()->splitBasicBlock(End); 307 CleanupRet->getParent()->getTerminator()->eraseFromParent(); 308 } 309 } 310 311 static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, 312 Value *FramePtr, bool InResume, CallGraph *CG) { 313 if (End->isUnwind()) 314 replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG); 315 else 316 replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG); 317 318 auto &Context = End->getContext(); 319 End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context) 320 : ConstantInt::getFalse(Context)); 321 End->eraseFromParent(); 322 } 323 324 // Create an entry block for a resume function with a switch that will jump to 325 // suspend points. 326 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) { 327 assert(Shape.ABI == coro::ABI::Switch); 328 LLVMContext &C = F.getContext(); 329 330 // resume.entry: 331 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0, 332 // i32 2 333 // % index = load i32, i32* %index.addr 334 // switch i32 %index, label %unreachable [ 335 // i32 0, label %resume.0 336 // i32 1, label %resume.1 337 // ... 338 // ] 339 340 auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F); 341 auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F); 342 343 IRBuilder<> Builder(NewEntry); 344 auto *FramePtr = Shape.FramePtr; 345 auto *FrameTy = Shape.FrameTy; 346 auto *GepIndex = Builder.CreateStructGEP( 347 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr"); 348 auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index"); 349 auto *Switch = 350 Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size()); 351 Shape.SwitchLowering.ResumeSwitch = Switch; 352 353 size_t SuspendIndex = 0; 354 for (auto *AnyS : Shape.CoroSuspends) { 355 auto *S = cast<CoroSuspendInst>(AnyS); 356 ConstantInt *IndexVal = Shape.getIndex(SuspendIndex); 357 358 // Replace CoroSave with a store to Index: 359 // %index.addr = getelementptr %f.frame... (index field number) 360 // store i32 0, i32* %index.addr1 361 auto *Save = S->getCoroSave(); 362 Builder.SetInsertPoint(Save); 363 if (S->isFinal()) { 364 // Final suspend point is represented by storing zero in ResumeFnAddr. 365 auto *GepIndex = Builder.CreateStructGEP(FrameTy, FramePtr, 366 coro::Shape::SwitchFieldIndex::Resume, 367 "ResumeFn.addr"); 368 auto *NullPtr = ConstantPointerNull::get(cast<PointerType>( 369 cast<PointerType>(GepIndex->getType())->getElementType())); 370 Builder.CreateStore(NullPtr, GepIndex); 371 } else { 372 auto *GepIndex = Builder.CreateStructGEP( 373 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr"); 374 Builder.CreateStore(IndexVal, GepIndex); 375 } 376 Save->replaceAllUsesWith(ConstantTokenNone::get(C)); 377 Save->eraseFromParent(); 378 379 // Split block before and after coro.suspend and add a jump from an entry 380 // switch: 381 // 382 // whateverBB: 383 // whatever 384 // %0 = call i8 @llvm.coro.suspend(token none, i1 false) 385 // switch i8 %0, label %suspend[i8 0, label %resume 386 // i8 1, label %cleanup] 387 // becomes: 388 // 389 // whateverBB: 390 // whatever 391 // br label %resume.0.landing 392 // 393 // resume.0: ; <--- jump from the switch in the resume.entry 394 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false) 395 // br label %resume.0.landing 396 // 397 // resume.0.landing: 398 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0] 399 // switch i8 % 1, label %suspend [i8 0, label %resume 400 // i8 1, label %cleanup] 401 402 auto *SuspendBB = S->getParent(); 403 auto *ResumeBB = 404 SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex)); 405 auto *LandingBB = ResumeBB->splitBasicBlock( 406 S->getNextNode(), ResumeBB->getName() + Twine(".landing")); 407 Switch->addCase(IndexVal, ResumeBB); 408 409 cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB); 410 auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front()); 411 S->replaceAllUsesWith(PN); 412 PN->addIncoming(Builder.getInt8(-1), SuspendBB); 413 PN->addIncoming(S, ResumeBB); 414 415 ++SuspendIndex; 416 } 417 418 Builder.SetInsertPoint(UnreachBB); 419 Builder.CreateUnreachable(); 420 421 Shape.SwitchLowering.ResumeEntryBlock = NewEntry; 422 } 423 424 425 // Rewrite final suspend point handling. We do not use suspend index to 426 // represent the final suspend point. Instead we zero-out ResumeFnAddr in the 427 // coroutine frame, since it is undefined behavior to resume a coroutine 428 // suspended at the final suspend point. Thus, in the resume function, we can 429 // simply remove the last case (when coro::Shape is built, the final suspend 430 // point (if present) is always the last element of CoroSuspends array). 431 // In the destroy function, we add a code sequence to check if ResumeFnAddress 432 // is Null, and if so, jump to the appropriate label to handle cleanup from the 433 // final suspend point. 434 void CoroCloner::handleFinalSuspend() { 435 assert(Shape.ABI == coro::ABI::Switch && 436 Shape.SwitchLowering.HasFinalSuspend); 437 auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]); 438 auto FinalCaseIt = std::prev(Switch->case_end()); 439 BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor(); 440 Switch->removeCase(FinalCaseIt); 441 if (isSwitchDestroyFunction()) { 442 BasicBlock *OldSwitchBB = Switch->getParent(); 443 auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch"); 444 Builder.SetInsertPoint(OldSwitchBB->getTerminator()); 445 auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr, 446 coro::Shape::SwitchFieldIndex::Resume, 447 "ResumeFn.addr"); 448 auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(), 449 GepIndex); 450 auto *Cond = Builder.CreateIsNull(Load); 451 Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB); 452 OldSwitchBB->getTerminator()->eraseFromParent(); 453 } 454 } 455 456 static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape, 457 const Twine &Suffix, 458 Module::iterator InsertBefore) { 459 Module *M = OrigF.getParent(); 460 auto *FnTy = Shape.getResumeFunctionType(); 461 462 Function *NewF = 463 Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage, 464 OrigF.getName() + Suffix); 465 NewF->addParamAttr(0, Attribute::NonNull); 466 467 // For the async lowering ABI we can't guarantee that the context argument is 468 // not access via a different pointer not based on the argument. 469 if (Shape.ABI != coro::ABI::Async) 470 NewF->addParamAttr(0, Attribute::NoAlias); 471 472 M->getFunctionList().insert(InsertBefore, NewF); 473 474 return NewF; 475 } 476 477 /// Replace uses of the active llvm.coro.suspend.retcon/async call with the 478 /// arguments to the continuation function. 479 /// 480 /// This assumes that the builder has a meaningful insertion point. 481 void CoroCloner::replaceRetconOrAsyncSuspendUses() { 482 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 483 Shape.ABI == coro::ABI::Async); 484 485 auto NewS = VMap[ActiveSuspend]; 486 if (NewS->use_empty()) return; 487 488 // Copy out all the continuation arguments after the buffer pointer into 489 // an easily-indexed data structure for convenience. 490 SmallVector<Value*, 8> Args; 491 // The async ABI includes all arguments -- including the first argument. 492 bool IsAsyncABI = Shape.ABI == coro::ABI::Async; 493 for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()), 494 E = NewF->arg_end(); 495 I != E; ++I) 496 Args.push_back(&*I); 497 498 // If the suspend returns a single scalar value, we can just do a simple 499 // replacement. 500 if (!isa<StructType>(NewS->getType())) { 501 assert(Args.size() == 1); 502 NewS->replaceAllUsesWith(Args.front()); 503 return; 504 } 505 506 // Try to peephole extracts of an aggregate return. 507 for (auto UI = NewS->use_begin(), UE = NewS->use_end(); UI != UE; ) { 508 auto EVI = dyn_cast<ExtractValueInst>((UI++)->getUser()); 509 if (!EVI || EVI->getNumIndices() != 1) 510 continue; 511 512 EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]); 513 EVI->eraseFromParent(); 514 } 515 516 // If we have no remaining uses, we're done. 517 if (NewS->use_empty()) return; 518 519 // Otherwise, we need to create an aggregate. 520 Value *Agg = UndefValue::get(NewS->getType()); 521 for (size_t I = 0, E = Args.size(); I != E; ++I) 522 Agg = Builder.CreateInsertValue(Agg, Args[I], I); 523 524 NewS->replaceAllUsesWith(Agg); 525 } 526 527 void CoroCloner::replaceCoroSuspends() { 528 Value *SuspendResult; 529 530 switch (Shape.ABI) { 531 // In switch lowering, replace coro.suspend with the appropriate value 532 // for the type of function we're extracting. 533 // Replacing coro.suspend with (0) will result in control flow proceeding to 534 // a resume label associated with a suspend point, replacing it with (1) will 535 // result in control flow proceeding to a cleanup label associated with this 536 // suspend point. 537 case coro::ABI::Switch: 538 SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0); 539 break; 540 541 // In async lowering there are no uses of the result. 542 case coro::ABI::Async: 543 return; 544 545 // In returned-continuation lowering, the arguments from earlier 546 // continuations are theoretically arbitrary, and they should have been 547 // spilled. 548 case coro::ABI::RetconOnce: 549 case coro::ABI::Retcon: 550 return; 551 } 552 553 for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) { 554 // The active suspend was handled earlier. 555 if (CS == ActiveSuspend) continue; 556 557 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]); 558 MappedCS->replaceAllUsesWith(SuspendResult); 559 MappedCS->eraseFromParent(); 560 } 561 } 562 563 void CoroCloner::replaceCoroEnds() { 564 for (AnyCoroEndInst *CE : Shape.CoroEnds) { 565 // We use a null call graph because there's no call graph node for 566 // the cloned function yet. We'll just be rebuilding that later. 567 auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]); 568 replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr); 569 } 570 } 571 572 static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape, 573 ValueToValueMapTy *VMap) { 574 Value *CachedSlot = nullptr; 575 auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * { 576 if (CachedSlot) { 577 assert(CachedSlot->getType()->getPointerElementType() == ValueTy && 578 "multiple swifterror slots in function with different types"); 579 return CachedSlot; 580 } 581 582 // Check if the function has a swifterror argument. 583 for (auto &Arg : F.args()) { 584 if (Arg.isSwiftError()) { 585 CachedSlot = &Arg; 586 assert(Arg.getType()->getPointerElementType() == ValueTy && 587 "swifterror argument does not have expected type"); 588 return &Arg; 589 } 590 } 591 592 // Create a swifterror alloca. 593 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg()); 594 auto Alloca = Builder.CreateAlloca(ValueTy); 595 Alloca->setSwiftError(true); 596 597 CachedSlot = Alloca; 598 return Alloca; 599 }; 600 601 for (CallInst *Op : Shape.SwiftErrorOps) { 602 auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op; 603 IRBuilder<> Builder(MappedOp); 604 605 // If there are no arguments, this is a 'get' operation. 606 Value *MappedResult; 607 if (Op->getNumArgOperands() == 0) { 608 auto ValueTy = Op->getType(); 609 auto Slot = getSwiftErrorSlot(ValueTy); 610 MappedResult = Builder.CreateLoad(ValueTy, Slot); 611 } else { 612 assert(Op->getNumArgOperands() == 1); 613 auto Value = MappedOp->getArgOperand(0); 614 auto ValueTy = Value->getType(); 615 auto Slot = getSwiftErrorSlot(ValueTy); 616 Builder.CreateStore(Value, Slot); 617 MappedResult = Slot; 618 } 619 620 MappedOp->replaceAllUsesWith(MappedResult); 621 MappedOp->eraseFromParent(); 622 } 623 624 // If we're updating the original function, we've invalidated SwiftErrorOps. 625 if (VMap == nullptr) { 626 Shape.SwiftErrorOps.clear(); 627 } 628 } 629 630 void CoroCloner::replaceSwiftErrorOps() { 631 ::replaceSwiftErrorOps(*NewF, Shape, &VMap); 632 } 633 634 void CoroCloner::replaceEntryBlock() { 635 // In the original function, the AllocaSpillBlock is a block immediately 636 // following the allocation of the frame object which defines GEPs for 637 // all the allocas that have been moved into the frame, and it ends by 638 // branching to the original beginning of the coroutine. Make this 639 // the entry block of the cloned function. 640 auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]); 641 auto *OldEntry = &NewF->getEntryBlock(); 642 Entry->setName("entry" + Suffix); 643 Entry->moveBefore(OldEntry); 644 Entry->getTerminator()->eraseFromParent(); 645 646 // Clear all predecessors of the new entry block. There should be 647 // exactly one predecessor, which we created when splitting out 648 // AllocaSpillBlock to begin with. 649 assert(Entry->hasOneUse()); 650 auto BranchToEntry = cast<BranchInst>(Entry->user_back()); 651 assert(BranchToEntry->isUnconditional()); 652 Builder.SetInsertPoint(BranchToEntry); 653 Builder.CreateUnreachable(); 654 BranchToEntry->eraseFromParent(); 655 656 // Branch from the entry to the appropriate place. 657 Builder.SetInsertPoint(Entry); 658 switch (Shape.ABI) { 659 case coro::ABI::Switch: { 660 // In switch-lowering, we built a resume-entry block in the original 661 // function. Make the entry block branch to this. 662 auto *SwitchBB = 663 cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]); 664 Builder.CreateBr(SwitchBB); 665 break; 666 } 667 case coro::ABI::Async: 668 case coro::ABI::Retcon: 669 case coro::ABI::RetconOnce: { 670 // In continuation ABIs, we want to branch to immediately after the 671 // active suspend point. Earlier phases will have put the suspend in its 672 // own basic block, so just thread our jump directly to its successor. 673 assert((Shape.ABI == coro::ABI::Async && 674 isa<CoroSuspendAsyncInst>(ActiveSuspend)) || 675 ((Shape.ABI == coro::ABI::Retcon || 676 Shape.ABI == coro::ABI::RetconOnce) && 677 isa<CoroSuspendRetconInst>(ActiveSuspend))); 678 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]); 679 auto Branch = cast<BranchInst>(MappedCS->getNextNode()); 680 assert(Branch->isUnconditional()); 681 Builder.CreateBr(Branch->getSuccessor(0)); 682 break; 683 } 684 } 685 686 // Any alloca that's still being used but not reachable from the new entry 687 // needs to be moved to the new entry. 688 Function *F = OldEntry->getParent(); 689 DominatorTree DT{*F}; 690 for (auto IT = inst_begin(F), End = inst_end(F); IT != End;) { 691 Instruction &I = *IT++; 692 if (!isa<AllocaInst>(&I) || I.use_empty()) 693 continue; 694 if (DT.isReachableFromEntry(I.getParent())) 695 continue; 696 I.moveBefore(*Entry, Entry->getFirstInsertionPt()); 697 } 698 } 699 700 /// Derive the value of the new frame pointer. 701 Value *CoroCloner::deriveNewFramePointer() { 702 // Builder should be inserting to the front of the new entry block. 703 704 switch (Shape.ABI) { 705 // In switch-lowering, the argument is the frame pointer. 706 case coro::ABI::Switch: 707 return &*NewF->arg_begin(); 708 // In async-lowering, one of the arguments is an async context as determined 709 // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of 710 // the resume function from the async context projection function associated 711 // with the active suspend. The frame is located as a tail to the async 712 // context header. 713 case coro::ABI::Async: { 714 auto *CalleeContext = NewF->getArg(Shape.AsyncLowering.ContextArgNo); 715 auto *FramePtrTy = Shape.FrameTy->getPointerTo(); 716 auto *ProjectionFunc = cast<CoroSuspendAsyncInst>(ActiveSuspend) 717 ->getAsyncContextProjectionFunction(); 718 auto DbgLoc = 719 cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc(); 720 // Calling i8* (i8*) 721 auto *CallerContext = Builder.CreateCall( 722 cast<FunctionType>(ProjectionFunc->getType()->getPointerElementType()), 723 ProjectionFunc, CalleeContext); 724 CallerContext->setCallingConv(ProjectionFunc->getCallingConv()); 725 CallerContext->setDebugLoc(DbgLoc); 726 // The frame is located after the async_context header. 727 auto &Context = Builder.getContext(); 728 auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32( 729 Type::getInt8Ty(Context), CallerContext, 730 Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr"); 731 // Inline the projection function. 732 InlineFunctionInfo InlineInfo; 733 auto InlineRes = InlineFunction(*CallerContext, InlineInfo); 734 assert(InlineRes.isSuccess()); 735 (void)InlineRes; 736 return Builder.CreateBitCast(FramePtrAddr, FramePtrTy); 737 } 738 // In continuation-lowering, the argument is the opaque storage. 739 case coro::ABI::Retcon: 740 case coro::ABI::RetconOnce: { 741 Argument *NewStorage = &*NewF->arg_begin(); 742 auto FramePtrTy = Shape.FrameTy->getPointerTo(); 743 744 // If the storage is inline, just bitcast to the storage to the frame type. 745 if (Shape.RetconLowering.IsFrameInlineInStorage) 746 return Builder.CreateBitCast(NewStorage, FramePtrTy); 747 748 // Otherwise, load the real frame from the opaque storage. 749 auto FramePtrPtr = 750 Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo()); 751 return Builder.CreateLoad(FramePtrTy, FramePtrPtr); 752 } 753 } 754 llvm_unreachable("bad ABI"); 755 } 756 757 static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context, 758 unsigned ParamIndex, 759 uint64_t Size, Align Alignment) { 760 AttrBuilder ParamAttrs; 761 ParamAttrs.addAttribute(Attribute::NonNull); 762 ParamAttrs.addAttribute(Attribute::NoAlias); 763 ParamAttrs.addAlignmentAttr(Alignment); 764 ParamAttrs.addDereferenceableAttr(Size); 765 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs); 766 } 767 768 /// Clone the body of the original function into a resume function of 769 /// some sort. 770 void CoroCloner::create() { 771 // Create the new function if we don't already have one. 772 if (!NewF) { 773 NewF = createCloneDeclaration(OrigF, Shape, Suffix, 774 OrigF.getParent()->end()); 775 } 776 777 // Replace all args with undefs. The buildCoroutineFrame algorithm already 778 // rewritten access to the args that occurs after suspend points with loads 779 // and stores to/from the coroutine frame. 780 for (Argument &A : OrigF.args()) 781 VMap[&A] = UndefValue::get(A.getType()); 782 783 SmallVector<ReturnInst *, 4> Returns; 784 785 // Ignore attempts to change certain attributes of the function. 786 // TODO: maybe there should be a way to suppress this during cloning? 787 auto savedVisibility = NewF->getVisibility(); 788 auto savedUnnamedAddr = NewF->getUnnamedAddr(); 789 auto savedDLLStorageClass = NewF->getDLLStorageClass(); 790 791 // NewF's linkage (which CloneFunctionInto does *not* change) might not 792 // be compatible with the visibility of OrigF (which it *does* change), 793 // so protect against that. 794 auto savedLinkage = NewF->getLinkage(); 795 NewF->setLinkage(llvm::GlobalValue::ExternalLinkage); 796 797 CloneFunctionInto(NewF, &OrigF, VMap, /*ModuleLevelChanges=*/true, Returns); 798 799 NewF->setLinkage(savedLinkage); 800 NewF->setVisibility(savedVisibility); 801 NewF->setUnnamedAddr(savedUnnamedAddr); 802 NewF->setDLLStorageClass(savedDLLStorageClass); 803 804 auto &Context = NewF->getContext(); 805 806 // Replace the attributes of the new function: 807 auto OrigAttrs = NewF->getAttributes(); 808 auto NewAttrs = AttributeList(); 809 810 switch (Shape.ABI) { 811 case coro::ABI::Switch: 812 // Bootstrap attributes by copying function attributes from the 813 // original function. This should include optimization settings and so on. 814 NewAttrs = NewAttrs.addAttributes(Context, AttributeList::FunctionIndex, 815 OrigAttrs.getFnAttributes()); 816 817 addFramePointerAttrs(NewAttrs, Context, 0, 818 Shape.FrameSize, Shape.FrameAlign); 819 break; 820 case coro::ABI::Async: 821 break; 822 case coro::ABI::Retcon: 823 case coro::ABI::RetconOnce: 824 // If we have a continuation prototype, just use its attributes, 825 // full-stop. 826 NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes(); 827 828 addFramePointerAttrs(NewAttrs, Context, 0, 829 Shape.getRetconCoroId()->getStorageSize(), 830 Shape.getRetconCoroId()->getStorageAlignment()); 831 break; 832 } 833 834 switch (Shape.ABI) { 835 // In these ABIs, the cloned functions always return 'void', and the 836 // existing return sites are meaningless. Note that for unique 837 // continuations, this includes the returns associated with suspends; 838 // this is fine because we can't suspend twice. 839 case coro::ABI::Switch: 840 case coro::ABI::RetconOnce: 841 // Remove old returns. 842 for (ReturnInst *Return : Returns) 843 changeToUnreachable(Return, /*UseLLVMTrap=*/false); 844 break; 845 846 // With multi-suspend continuations, we'll already have eliminated the 847 // original returns and inserted returns before all the suspend points, 848 // so we want to leave any returns in place. 849 case coro::ABI::Retcon: 850 break; 851 // Async lowering will insert musttail call functions at all suspend points 852 // followed by a return. 853 // Don't change returns to unreachable because that will trip up the verifier. 854 // These returns should be unreachable from the clone. 855 case coro::ABI::Async: 856 break; 857 } 858 859 NewF->setAttributes(NewAttrs); 860 NewF->setCallingConv(Shape.getResumeFunctionCC()); 861 862 // Set up the new entry block. 863 replaceEntryBlock(); 864 865 Builder.SetInsertPoint(&NewF->getEntryBlock().front()); 866 NewFramePtr = deriveNewFramePointer(); 867 868 // Remap frame pointer. 869 Value *OldFramePtr = VMap[Shape.FramePtr]; 870 NewFramePtr->takeName(OldFramePtr); 871 OldFramePtr->replaceAllUsesWith(NewFramePtr); 872 873 // Remap vFrame pointer. 874 auto *NewVFrame = Builder.CreateBitCast( 875 NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame"); 876 Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]); 877 OldVFrame->replaceAllUsesWith(NewVFrame); 878 879 switch (Shape.ABI) { 880 case coro::ABI::Switch: 881 // Rewrite final suspend handling as it is not done via switch (allows to 882 // remove final case from the switch, since it is undefined behavior to 883 // resume the coroutine suspended at the final suspend point. 884 if (Shape.SwitchLowering.HasFinalSuspend) 885 handleFinalSuspend(); 886 break; 887 case coro::ABI::Async: 888 case coro::ABI::Retcon: 889 case coro::ABI::RetconOnce: 890 // Replace uses of the active suspend with the corresponding 891 // continuation-function arguments. 892 assert(ActiveSuspend != nullptr && 893 "no active suspend when lowering a continuation-style coroutine"); 894 replaceRetconOrAsyncSuspendUses(); 895 break; 896 } 897 898 // Handle suspends. 899 replaceCoroSuspends(); 900 901 // Handle swifterror. 902 replaceSwiftErrorOps(); 903 904 // Remove coro.end intrinsics. 905 replaceCoroEnds(); 906 907 // Eliminate coro.free from the clones, replacing it with 'null' in cleanup, 908 // to suppress deallocation code. 909 if (Shape.ABI == coro::ABI::Switch) 910 coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]), 911 /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup); 912 } 913 914 // Create a resume clone by cloning the body of the original function, setting 915 // new entry block and replacing coro.suspend an appropriate value to force 916 // resume or cleanup pass for every suspend point. 917 static Function *createClone(Function &F, const Twine &Suffix, 918 coro::Shape &Shape, CoroCloner::Kind FKind) { 919 CoroCloner Cloner(F, Suffix, Shape, FKind); 920 Cloner.create(); 921 return Cloner.getFunction(); 922 } 923 924 /// Remove calls to llvm.coro.end in the original function. 925 static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG) { 926 for (auto End : Shape.CoroEnds) { 927 replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, CG); 928 } 929 } 930 931 static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) { 932 assert(Shape.ABI == coro::ABI::Async); 933 934 auto *FuncPtrStruct = cast<ConstantStruct>( 935 Shape.AsyncLowering.AsyncFuncPointer->getInitializer()); 936 auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0); 937 auto *OrigContextSize = FuncPtrStruct->getOperand(1); 938 auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(), 939 Shape.AsyncLowering.ContextSize); 940 auto *NewFuncPtrStruct = ConstantStruct::get( 941 FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize); 942 943 Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct); 944 } 945 946 static void replaceFrameSize(coro::Shape &Shape) { 947 if (Shape.ABI == coro::ABI::Async) 948 updateAsyncFuncPointerContextSize(Shape); 949 950 if (Shape.CoroSizes.empty()) 951 return; 952 953 // In the same function all coro.sizes should have the same result type. 954 auto *SizeIntrin = Shape.CoroSizes.back(); 955 Module *M = SizeIntrin->getModule(); 956 const DataLayout &DL = M->getDataLayout(); 957 auto Size = DL.getTypeAllocSize(Shape.FrameTy); 958 auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size); 959 960 for (CoroSizeInst *CS : Shape.CoroSizes) { 961 CS->replaceAllUsesWith(SizeConstant); 962 CS->eraseFromParent(); 963 } 964 } 965 966 // Create a global constant array containing pointers to functions provided and 967 // set Info parameter of CoroBegin to point at this constant. Example: 968 // 969 // @f.resumers = internal constant [2 x void(%f.frame*)*] 970 // [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy] 971 // define void @f() { 972 // ... 973 // call i8* @llvm.coro.begin(i8* null, i32 0, i8* null, 974 // i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*)) 975 // 976 // Assumes that all the functions have the same signature. 977 static void setCoroInfo(Function &F, coro::Shape &Shape, 978 ArrayRef<Function *> Fns) { 979 // This only works under the switch-lowering ABI because coro elision 980 // only works on the switch-lowering ABI. 981 assert(Shape.ABI == coro::ABI::Switch); 982 983 SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end()); 984 assert(!Args.empty()); 985 Function *Part = *Fns.begin(); 986 Module *M = Part->getParent(); 987 auto *ArrTy = ArrayType::get(Part->getType(), Args.size()); 988 989 auto *ConstVal = ConstantArray::get(ArrTy, Args); 990 auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true, 991 GlobalVariable::PrivateLinkage, ConstVal, 992 F.getName() + Twine(".resumers")); 993 994 // Update coro.begin instruction to refer to this constant. 995 LLVMContext &C = F.getContext(); 996 auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C)); 997 Shape.getSwitchCoroId()->setInfo(BC); 998 } 999 1000 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame. 1001 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn, 1002 Function *DestroyFn, Function *CleanupFn) { 1003 assert(Shape.ABI == coro::ABI::Switch); 1004 1005 IRBuilder<> Builder(Shape.FramePtr->getNextNode()); 1006 auto *ResumeAddr = Builder.CreateStructGEP( 1007 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume, 1008 "resume.addr"); 1009 Builder.CreateStore(ResumeFn, ResumeAddr); 1010 1011 Value *DestroyOrCleanupFn = DestroyFn; 1012 1013 CoroIdInst *CoroId = Shape.getSwitchCoroId(); 1014 if (CoroAllocInst *CA = CoroId->getCoroAlloc()) { 1015 // If there is a CoroAlloc and it returns false (meaning we elide the 1016 // allocation, use CleanupFn instead of DestroyFn). 1017 DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn); 1018 } 1019 1020 auto *DestroyAddr = Builder.CreateStructGEP( 1021 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy, 1022 "destroy.addr"); 1023 Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr); 1024 } 1025 1026 static void postSplitCleanup(Function &F) { 1027 removeUnreachableBlocks(F); 1028 1029 // For now, we do a mandatory verification step because we don't 1030 // entirely trust this pass. Note that we don't want to add a verifier 1031 // pass to FPM below because it will also verify all the global data. 1032 if (verifyFunction(F, &errs())) 1033 report_fatal_error("Broken function"); 1034 1035 legacy::FunctionPassManager FPM(F.getParent()); 1036 1037 FPM.add(createSCCPPass()); 1038 FPM.add(createCFGSimplificationPass()); 1039 FPM.add(createEarlyCSEPass()); 1040 FPM.add(createCFGSimplificationPass()); 1041 1042 FPM.doInitialization(); 1043 FPM.run(F); 1044 FPM.doFinalization(); 1045 } 1046 1047 // Assuming we arrived at the block NewBlock from Prev instruction, store 1048 // PHI's incoming values in the ResolvedValues map. 1049 static void 1050 scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock, 1051 DenseMap<Value *, Value *> &ResolvedValues) { 1052 auto *PrevBB = Prev->getParent(); 1053 for (PHINode &PN : NewBlock->phis()) { 1054 auto V = PN.getIncomingValueForBlock(PrevBB); 1055 // See if we already resolved it. 1056 auto VI = ResolvedValues.find(V); 1057 if (VI != ResolvedValues.end()) 1058 V = VI->second; 1059 // Remember the value. 1060 ResolvedValues[&PN] = V; 1061 } 1062 } 1063 1064 // Replace a sequence of branches leading to a ret, with a clone of a ret 1065 // instruction. Suspend instruction represented by a switch, track the PHI 1066 // values and select the correct case successor when possible. 1067 static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) { 1068 DenseMap<Value *, Value *> ResolvedValues; 1069 BasicBlock *UnconditionalSucc = nullptr; 1070 1071 Instruction *I = InitialInst; 1072 while (I->isTerminator() || 1073 (isa<CmpInst>(I) && I->getNextNode()->isTerminator())) { 1074 if (isa<ReturnInst>(I)) { 1075 if (I != InitialInst) { 1076 // If InitialInst is an unconditional branch, 1077 // remove PHI values that come from basic block of InitialInst 1078 if (UnconditionalSucc) 1079 UnconditionalSucc->removePredecessor(InitialInst->getParent(), true); 1080 ReplaceInstWithInst(InitialInst, I->clone()); 1081 } 1082 return true; 1083 } 1084 if (auto *BR = dyn_cast<BranchInst>(I)) { 1085 if (BR->isUnconditional()) { 1086 BasicBlock *BB = BR->getSuccessor(0); 1087 if (I == InitialInst) 1088 UnconditionalSucc = BB; 1089 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1090 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1091 continue; 1092 } 1093 } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) { 1094 auto *BR = dyn_cast<BranchInst>(I->getNextNode()); 1095 if (BR && BR->isConditional() && CondCmp == BR->getCondition()) { 1096 // If the case number of suspended switch instruction is reduced to 1097 // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator. 1098 // And the comparsion looks like : %cond = icmp eq i8 %V, constant. 1099 ConstantInt *CondConst = dyn_cast<ConstantInt>(CondCmp->getOperand(1)); 1100 if (CondConst && CondCmp->getPredicate() == CmpInst::ICMP_EQ) { 1101 Value *V = CondCmp->getOperand(0); 1102 auto it = ResolvedValues.find(V); 1103 if (it != ResolvedValues.end()) 1104 V = it->second; 1105 1106 if (ConstantInt *Cond0 = dyn_cast<ConstantInt>(V)) { 1107 BasicBlock *BB = Cond0->equalsInt(CondConst->getZExtValue()) 1108 ? BR->getSuccessor(0) 1109 : BR->getSuccessor(1); 1110 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1111 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1112 continue; 1113 } 1114 } 1115 } 1116 } else if (auto *SI = dyn_cast<SwitchInst>(I)) { 1117 Value *V = SI->getCondition(); 1118 auto it = ResolvedValues.find(V); 1119 if (it != ResolvedValues.end()) 1120 V = it->second; 1121 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) { 1122 BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor(); 1123 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1124 I = BB->getFirstNonPHIOrDbgOrLifetime(); 1125 continue; 1126 } 1127 } 1128 return false; 1129 } 1130 return false; 1131 } 1132 1133 // Check whether CI obeys the rules of musttail attribute. 1134 static bool shouldBeMustTail(const CallInst &CI, const Function &F) { 1135 if (CI.isInlineAsm()) 1136 return false; 1137 1138 // Match prototypes and calling conventions of resume function. 1139 FunctionType *CalleeTy = CI.getFunctionType(); 1140 if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1)) 1141 return false; 1142 1143 Type *CalleeParmTy = CalleeTy->getParamType(0); 1144 if (!CalleeParmTy->isPointerTy() || 1145 (CalleeParmTy->getPointerAddressSpace() != 0)) 1146 return false; 1147 1148 if (CI.getCallingConv() != F.getCallingConv()) 1149 return false; 1150 1151 // CI should not has any ABI-impacting function attributes. 1152 static const Attribute::AttrKind ABIAttrs[] = { 1153 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca, 1154 Attribute::Preallocated, Attribute::InReg, Attribute::Returned, 1155 Attribute::SwiftSelf, Attribute::SwiftError}; 1156 AttributeList Attrs = CI.getAttributes(); 1157 for (auto AK : ABIAttrs) 1158 if (Attrs.hasParamAttribute(0, AK)) 1159 return false; 1160 1161 return true; 1162 } 1163 1164 // Add musttail to any resume instructions that is immediately followed by a 1165 // suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call 1166 // for symmetrical coroutine control transfer (C++ Coroutines TS extension). 1167 // This transformation is done only in the resume part of the coroutine that has 1168 // identical signature and calling convention as the coro.resume call. 1169 static void addMustTailToCoroResumes(Function &F) { 1170 bool changed = false; 1171 1172 // Collect potential resume instructions. 1173 SmallVector<CallInst *, 4> Resumes; 1174 for (auto &I : instructions(F)) 1175 if (auto *Call = dyn_cast<CallInst>(&I)) 1176 if (shouldBeMustTail(*Call, F)) 1177 Resumes.push_back(Call); 1178 1179 // Set musttail on those that are followed by a ret instruction. 1180 for (CallInst *Call : Resumes) 1181 if (simplifyTerminatorLeadingToRet(Call->getNextNode())) { 1182 Call->setTailCallKind(CallInst::TCK_MustTail); 1183 changed = true; 1184 } 1185 1186 if (changed) 1187 removeUnreachableBlocks(F); 1188 } 1189 1190 // Coroutine has no suspend points. Remove heap allocation for the coroutine 1191 // frame if possible. 1192 static void handleNoSuspendCoroutine(coro::Shape &Shape) { 1193 auto *CoroBegin = Shape.CoroBegin; 1194 auto *CoroId = CoroBegin->getId(); 1195 auto *AllocInst = CoroId->getCoroAlloc(); 1196 switch (Shape.ABI) { 1197 case coro::ABI::Switch: { 1198 auto SwitchId = cast<CoroIdInst>(CoroId); 1199 coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr); 1200 if (AllocInst) { 1201 IRBuilder<> Builder(AllocInst); 1202 auto *Frame = Builder.CreateAlloca(Shape.FrameTy); 1203 Frame->setAlignment(Shape.FrameAlign); 1204 auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy()); 1205 AllocInst->replaceAllUsesWith(Builder.getFalse()); 1206 AllocInst->eraseFromParent(); 1207 CoroBegin->replaceAllUsesWith(VFrame); 1208 } else { 1209 CoroBegin->replaceAllUsesWith(CoroBegin->getMem()); 1210 } 1211 break; 1212 } 1213 case coro::ABI::Async: 1214 case coro::ABI::Retcon: 1215 case coro::ABI::RetconOnce: 1216 CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType())); 1217 break; 1218 } 1219 1220 CoroBegin->eraseFromParent(); 1221 } 1222 1223 // SimplifySuspendPoint needs to check that there is no calls between 1224 // coro_save and coro_suspend, since any of the calls may potentially resume 1225 // the coroutine and if that is the case we cannot eliminate the suspend point. 1226 static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) { 1227 for (Instruction *I = From; I != To; I = I->getNextNode()) { 1228 // Assume that no intrinsic can resume the coroutine. 1229 if (isa<IntrinsicInst>(I)) 1230 continue; 1231 1232 if (isa<CallBase>(I)) 1233 return true; 1234 } 1235 return false; 1236 } 1237 1238 static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) { 1239 SmallPtrSet<BasicBlock *, 8> Set; 1240 SmallVector<BasicBlock *, 8> Worklist; 1241 1242 Set.insert(SaveBB); 1243 Worklist.push_back(ResDesBB); 1244 1245 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr 1246 // returns a token consumed by suspend instruction, all blocks in between 1247 // will have to eventually hit SaveBB when going backwards from ResDesBB. 1248 while (!Worklist.empty()) { 1249 auto *BB = Worklist.pop_back_val(); 1250 Set.insert(BB); 1251 for (auto *Pred : predecessors(BB)) 1252 if (Set.count(Pred) == 0) 1253 Worklist.push_back(Pred); 1254 } 1255 1256 // SaveBB and ResDesBB are checked separately in hasCallsBetween. 1257 Set.erase(SaveBB); 1258 Set.erase(ResDesBB); 1259 1260 for (auto *BB : Set) 1261 if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr)) 1262 return true; 1263 1264 return false; 1265 } 1266 1267 static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) { 1268 auto *SaveBB = Save->getParent(); 1269 auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent(); 1270 1271 if (SaveBB == ResumeOrDestroyBB) 1272 return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy); 1273 1274 // Any calls from Save to the end of the block? 1275 if (hasCallsInBlockBetween(Save->getNextNode(), nullptr)) 1276 return true; 1277 1278 // Any calls from begging of the block up to ResumeOrDestroy? 1279 if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(), 1280 ResumeOrDestroy)) 1281 return true; 1282 1283 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB? 1284 if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB)) 1285 return true; 1286 1287 return false; 1288 } 1289 1290 // If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the 1291 // suspend point and replace it with nornal control flow. 1292 static bool simplifySuspendPoint(CoroSuspendInst *Suspend, 1293 CoroBeginInst *CoroBegin) { 1294 Instruction *Prev = Suspend->getPrevNode(); 1295 if (!Prev) { 1296 auto *Pred = Suspend->getParent()->getSinglePredecessor(); 1297 if (!Pred) 1298 return false; 1299 Prev = Pred->getTerminator(); 1300 } 1301 1302 CallBase *CB = dyn_cast<CallBase>(Prev); 1303 if (!CB) 1304 return false; 1305 1306 auto *Callee = CB->getCalledOperand()->stripPointerCasts(); 1307 1308 // See if the callsite is for resumption or destruction of the coroutine. 1309 auto *SubFn = dyn_cast<CoroSubFnInst>(Callee); 1310 if (!SubFn) 1311 return false; 1312 1313 // Does not refer to the current coroutine, we cannot do anything with it. 1314 if (SubFn->getFrame() != CoroBegin) 1315 return false; 1316 1317 // See if the transformation is safe. Specifically, see if there are any 1318 // calls in between Save and CallInstr. They can potenitally resume the 1319 // coroutine rendering this optimization unsafe. 1320 auto *Save = Suspend->getCoroSave(); 1321 if (hasCallsBetween(Save, CB)) 1322 return false; 1323 1324 // Replace llvm.coro.suspend with the value that results in resumption over 1325 // the resume or cleanup path. 1326 Suspend->replaceAllUsesWith(SubFn->getRawIndex()); 1327 Suspend->eraseFromParent(); 1328 Save->eraseFromParent(); 1329 1330 // No longer need a call to coro.resume or coro.destroy. 1331 if (auto *Invoke = dyn_cast<InvokeInst>(CB)) { 1332 BranchInst::Create(Invoke->getNormalDest(), Invoke); 1333 } 1334 1335 // Grab the CalledValue from CB before erasing the CallInstr. 1336 auto *CalledValue = CB->getCalledOperand(); 1337 CB->eraseFromParent(); 1338 1339 // If no more users remove it. Usually it is a bitcast of SubFn. 1340 if (CalledValue != SubFn && CalledValue->user_empty()) 1341 if (auto *I = dyn_cast<Instruction>(CalledValue)) 1342 I->eraseFromParent(); 1343 1344 // Now we are good to remove SubFn. 1345 if (SubFn->user_empty()) 1346 SubFn->eraseFromParent(); 1347 1348 return true; 1349 } 1350 1351 // Remove suspend points that are simplified. 1352 static void simplifySuspendPoints(coro::Shape &Shape) { 1353 // Currently, the only simplification we do is switch-lowering-specific. 1354 if (Shape.ABI != coro::ABI::Switch) 1355 return; 1356 1357 auto &S = Shape.CoroSuspends; 1358 size_t I = 0, N = S.size(); 1359 if (N == 0) 1360 return; 1361 while (true) { 1362 auto SI = cast<CoroSuspendInst>(S[I]); 1363 // Leave final.suspend to handleFinalSuspend since it is undefined behavior 1364 // to resume a coroutine suspended at the final suspend point. 1365 if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) { 1366 if (--N == I) 1367 break; 1368 std::swap(S[I], S[N]); 1369 continue; 1370 } 1371 if (++I == N) 1372 break; 1373 } 1374 S.resize(N); 1375 } 1376 1377 static void splitSwitchCoroutine(Function &F, coro::Shape &Shape, 1378 SmallVectorImpl<Function *> &Clones) { 1379 assert(Shape.ABI == coro::ABI::Switch); 1380 1381 createResumeEntryBlock(F, Shape); 1382 auto ResumeClone = createClone(F, ".resume", Shape, 1383 CoroCloner::Kind::SwitchResume); 1384 auto DestroyClone = createClone(F, ".destroy", Shape, 1385 CoroCloner::Kind::SwitchUnwind); 1386 auto CleanupClone = createClone(F, ".cleanup", Shape, 1387 CoroCloner::Kind::SwitchCleanup); 1388 1389 postSplitCleanup(*ResumeClone); 1390 postSplitCleanup(*DestroyClone); 1391 postSplitCleanup(*CleanupClone); 1392 1393 addMustTailToCoroResumes(*ResumeClone); 1394 1395 // Store addresses resume/destroy/cleanup functions in the coroutine frame. 1396 updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone); 1397 1398 assert(Clones.empty()); 1399 Clones.push_back(ResumeClone); 1400 Clones.push_back(DestroyClone); 1401 Clones.push_back(CleanupClone); 1402 1403 // Create a constant array referring to resume/destroy/clone functions pointed 1404 // by the last argument of @llvm.coro.info, so that CoroElide pass can 1405 // determined correct function to call. 1406 setCoroInfo(F, Shape, Clones); 1407 } 1408 1409 static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend, 1410 Value *Continuation) { 1411 auto *ResumeIntrinsic = Suspend->getResumeFunction(); 1412 auto &Context = Suspend->getParent()->getParent()->getContext(); 1413 auto *Int8PtrTy = Type::getInt8PtrTy(Context); 1414 1415 IRBuilder<> Builder(ResumeIntrinsic); 1416 auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy); 1417 ResumeIntrinsic->replaceAllUsesWith(Val); 1418 ResumeIntrinsic->eraseFromParent(); 1419 Suspend->setOperand(0, UndefValue::get(Int8PtrTy)); 1420 } 1421 1422 /// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs. 1423 static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy, 1424 ArrayRef<Value *> FnArgs, 1425 SmallVectorImpl<Value *> &CallArgs) { 1426 size_t ArgIdx = 0; 1427 for (auto paramTy : FnTy->params()) { 1428 assert(ArgIdx < FnArgs.size()); 1429 if (paramTy != FnArgs[ArgIdx]->getType()) 1430 CallArgs.push_back( 1431 Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy)); 1432 else 1433 CallArgs.push_back(FnArgs[ArgIdx]); 1434 ++ArgIdx; 1435 } 1436 } 1437 1438 CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, 1439 ArrayRef<Value *> Arguments, 1440 IRBuilder<> &Builder) { 1441 auto *FnTy = 1442 cast<FunctionType>(MustTailCallFn->getType()->getPointerElementType()); 1443 // Coerce the arguments, llvm optimizations seem to ignore the types in 1444 // vaarg functions and throws away casts in optimized mode. 1445 SmallVector<Value *, 8> CallArgs; 1446 coerceArguments(Builder, FnTy, Arguments, CallArgs); 1447 1448 auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs); 1449 TailCall->setTailCallKind(CallInst::TCK_MustTail); 1450 TailCall->setDebugLoc(Loc); 1451 TailCall->setCallingConv(MustTailCallFn->getCallingConv()); 1452 return TailCall; 1453 } 1454 1455 static void splitAsyncCoroutine(Function &F, coro::Shape &Shape, 1456 SmallVectorImpl<Function *> &Clones) { 1457 assert(Shape.ABI == coro::ABI::Async); 1458 assert(Clones.empty()); 1459 // Reset various things that the optimizer might have decided it 1460 // "knows" about the coroutine function due to not seeing a return. 1461 F.removeFnAttr(Attribute::NoReturn); 1462 F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias); 1463 F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull); 1464 1465 auto &Context = F.getContext(); 1466 auto *Int8PtrTy = Type::getInt8PtrTy(Context); 1467 1468 auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId()); 1469 IRBuilder<> Builder(Id); 1470 1471 auto *FramePtr = Id->getStorage(); 1472 FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy); 1473 FramePtr = Builder.CreateConstInBoundsGEP1_32( 1474 Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset, 1475 "async.ctx.frameptr"); 1476 1477 // Map all uses of llvm.coro.begin to the allocated frame pointer. 1478 { 1479 // Make sure we don't invalidate Shape.FramePtr. 1480 TrackingVH<Instruction> Handle(Shape.FramePtr); 1481 Shape.CoroBegin->replaceAllUsesWith(FramePtr); 1482 Shape.FramePtr = Handle.getValPtr(); 1483 } 1484 1485 // Create all the functions in order after the main function. 1486 auto NextF = std::next(F.getIterator()); 1487 1488 // Create a continuation function for each of the suspend points. 1489 Clones.reserve(Shape.CoroSuspends.size()); 1490 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) { 1491 auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]); 1492 1493 // Create the clone declaration. 1494 auto *Continuation = 1495 createCloneDeclaration(F, Shape, ".resume." + Twine(Idx), NextF); 1496 Clones.push_back(Continuation); 1497 1498 // Insert a branch to a new return block immediately before the suspend 1499 // point. 1500 auto *SuspendBB = Suspend->getParent(); 1501 auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend); 1502 auto *Branch = cast<BranchInst>(SuspendBB->getTerminator()); 1503 1504 // Place it before the first suspend. 1505 auto *ReturnBB = 1506 BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB); 1507 Branch->setSuccessor(0, ReturnBB); 1508 1509 IRBuilder<> Builder(ReturnBB); 1510 1511 // Insert the call to the tail call function and inline it. 1512 auto *Fn = Suspend->getMustTailCallFunction(); 1513 SmallVector<Value *, 8> Args(Suspend->args()); 1514 auto FnArgs = ArrayRef<Value *>(Args).drop_front(3); 1515 auto *TailCall = 1516 coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder); 1517 Builder.CreateRetVoid(); 1518 InlineFunctionInfo FnInfo; 1519 auto InlineRes = InlineFunction(*TailCall, FnInfo); 1520 assert(InlineRes.isSuccess() && "Expected inlining to succeed"); 1521 (void)InlineRes; 1522 1523 // Replace the lvm.coro.async.resume intrisic call. 1524 replaceAsyncResumeFunction(Suspend, Continuation); 1525 } 1526 1527 assert(Clones.size() == Shape.CoroSuspends.size()); 1528 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) { 1529 auto *Suspend = Shape.CoroSuspends[Idx]; 1530 auto *Clone = Clones[Idx]; 1531 1532 CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create(); 1533 } 1534 } 1535 1536 static void splitRetconCoroutine(Function &F, coro::Shape &Shape, 1537 SmallVectorImpl<Function *> &Clones) { 1538 assert(Shape.ABI == coro::ABI::Retcon || 1539 Shape.ABI == coro::ABI::RetconOnce); 1540 assert(Clones.empty()); 1541 1542 // Reset various things that the optimizer might have decided it 1543 // "knows" about the coroutine function due to not seeing a return. 1544 F.removeFnAttr(Attribute::NoReturn); 1545 F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias); 1546 F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull); 1547 1548 // Allocate the frame. 1549 auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId()); 1550 Value *RawFramePtr; 1551 if (Shape.RetconLowering.IsFrameInlineInStorage) { 1552 RawFramePtr = Id->getStorage(); 1553 } else { 1554 IRBuilder<> Builder(Id); 1555 1556 // Determine the size of the frame. 1557 const DataLayout &DL = F.getParent()->getDataLayout(); 1558 auto Size = DL.getTypeAllocSize(Shape.FrameTy); 1559 1560 // Allocate. We don't need to update the call graph node because we're 1561 // going to recompute it from scratch after splitting. 1562 // FIXME: pass the required alignment 1563 RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr); 1564 RawFramePtr = 1565 Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType()); 1566 1567 // Stash the allocated frame pointer in the continuation storage. 1568 auto Dest = Builder.CreateBitCast(Id->getStorage(), 1569 RawFramePtr->getType()->getPointerTo()); 1570 Builder.CreateStore(RawFramePtr, Dest); 1571 } 1572 1573 // Map all uses of llvm.coro.begin to the allocated frame pointer. 1574 { 1575 // Make sure we don't invalidate Shape.FramePtr. 1576 TrackingVH<Instruction> Handle(Shape.FramePtr); 1577 Shape.CoroBegin->replaceAllUsesWith(RawFramePtr); 1578 Shape.FramePtr = Handle.getValPtr(); 1579 } 1580 1581 // Create a unique return block. 1582 BasicBlock *ReturnBB = nullptr; 1583 SmallVector<PHINode *, 4> ReturnPHIs; 1584 1585 // Create all the functions in order after the main function. 1586 auto NextF = std::next(F.getIterator()); 1587 1588 // Create a continuation function for each of the suspend points. 1589 Clones.reserve(Shape.CoroSuspends.size()); 1590 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) { 1591 auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]); 1592 1593 // Create the clone declaration. 1594 auto Continuation = 1595 createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF); 1596 Clones.push_back(Continuation); 1597 1598 // Insert a branch to the unified return block immediately before 1599 // the suspend point. 1600 auto SuspendBB = Suspend->getParent(); 1601 auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend); 1602 auto Branch = cast<BranchInst>(SuspendBB->getTerminator()); 1603 1604 // Create the unified return block. 1605 if (!ReturnBB) { 1606 // Place it before the first suspend. 1607 ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F, 1608 NewSuspendBB); 1609 Shape.RetconLowering.ReturnBlock = ReturnBB; 1610 1611 IRBuilder<> Builder(ReturnBB); 1612 1613 // Create PHIs for all the return values. 1614 assert(ReturnPHIs.empty()); 1615 1616 // First, the continuation. 1617 ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(), 1618 Shape.CoroSuspends.size())); 1619 1620 // Next, all the directly-yielded values. 1621 for (auto ResultTy : Shape.getRetconResultTypes()) 1622 ReturnPHIs.push_back(Builder.CreatePHI(ResultTy, 1623 Shape.CoroSuspends.size())); 1624 1625 // Build the return value. 1626 auto RetTy = F.getReturnType(); 1627 1628 // Cast the continuation value if necessary. 1629 // We can't rely on the types matching up because that type would 1630 // have to be infinite. 1631 auto CastedContinuationTy = 1632 (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0)); 1633 auto *CastedContinuation = 1634 Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy); 1635 1636 Value *RetV; 1637 if (ReturnPHIs.size() == 1) { 1638 RetV = CastedContinuation; 1639 } else { 1640 RetV = UndefValue::get(RetTy); 1641 RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0); 1642 for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I) 1643 RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I); 1644 } 1645 1646 Builder.CreateRet(RetV); 1647 } 1648 1649 // Branch to the return block. 1650 Branch->setSuccessor(0, ReturnBB); 1651 ReturnPHIs[0]->addIncoming(Continuation, SuspendBB); 1652 size_t NextPHIIndex = 1; 1653 for (auto &VUse : Suspend->value_operands()) 1654 ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB); 1655 assert(NextPHIIndex == ReturnPHIs.size()); 1656 } 1657 1658 assert(Clones.size() == Shape.CoroSuspends.size()); 1659 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) { 1660 auto Suspend = Shape.CoroSuspends[i]; 1661 auto Clone = Clones[i]; 1662 1663 CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create(); 1664 } 1665 } 1666 1667 namespace { 1668 class PrettyStackTraceFunction : public PrettyStackTraceEntry { 1669 Function &F; 1670 public: 1671 PrettyStackTraceFunction(Function &F) : F(F) {} 1672 void print(raw_ostream &OS) const override { 1673 OS << "While splitting coroutine "; 1674 F.printAsOperand(OS, /*print type*/ false, F.getParent()); 1675 OS << "\n"; 1676 } 1677 }; 1678 } 1679 1680 static coro::Shape splitCoroutine(Function &F, 1681 SmallVectorImpl<Function *> &Clones, 1682 bool ReuseFrameSlot) { 1683 PrettyStackTraceFunction prettyStackTrace(F); 1684 1685 // The suspend-crossing algorithm in buildCoroutineFrame get tripped 1686 // up by uses in unreachable blocks, so remove them as a first pass. 1687 removeUnreachableBlocks(F); 1688 1689 coro::Shape Shape(F, ReuseFrameSlot); 1690 if (!Shape.CoroBegin) 1691 return Shape; 1692 1693 simplifySuspendPoints(Shape); 1694 buildCoroutineFrame(F, Shape); 1695 replaceFrameSize(Shape); 1696 1697 // If there are no suspend points, no split required, just remove 1698 // the allocation and deallocation blocks, they are not needed. 1699 if (Shape.CoroSuspends.empty()) { 1700 handleNoSuspendCoroutine(Shape); 1701 } else { 1702 switch (Shape.ABI) { 1703 case coro::ABI::Switch: 1704 splitSwitchCoroutine(F, Shape, Clones); 1705 break; 1706 case coro::ABI::Async: 1707 splitAsyncCoroutine(F, Shape, Clones); 1708 break; 1709 case coro::ABI::Retcon: 1710 case coro::ABI::RetconOnce: 1711 splitRetconCoroutine(F, Shape, Clones); 1712 break; 1713 } 1714 } 1715 1716 // Replace all the swifterror operations in the original function. 1717 // This invalidates SwiftErrorOps in the Shape. 1718 replaceSwiftErrorOps(F, Shape, nullptr); 1719 1720 return Shape; 1721 } 1722 1723 static void 1724 updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape, 1725 const SmallVectorImpl<Function *> &Clones, 1726 CallGraph &CG, CallGraphSCC &SCC) { 1727 if (!Shape.CoroBegin) 1728 return; 1729 1730 removeCoroEnds(Shape, &CG); 1731 postSplitCleanup(F); 1732 1733 // Update call graph and add the functions we created to the SCC. 1734 coro::updateCallGraph(F, Clones, CG, SCC); 1735 } 1736 1737 static void updateCallGraphAfterCoroutineSplit( 1738 LazyCallGraph::Node &N, const coro::Shape &Shape, 1739 const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C, 1740 LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, 1741 FunctionAnalysisManager &FAM) { 1742 if (!Shape.CoroBegin) 1743 return; 1744 1745 for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) { 1746 auto &Context = End->getContext(); 1747 End->replaceAllUsesWith(ConstantInt::getFalse(Context)); 1748 End->eraseFromParent(); 1749 } 1750 1751 if (!Clones.empty()) { 1752 switch (Shape.ABI) { 1753 case coro::ABI::Switch: 1754 // Each clone in the Switch lowering is independent of the other clones. 1755 // Let the LazyCallGraph know about each one separately. 1756 for (Function *Clone : Clones) 1757 CG.addSplitFunction(N.getFunction(), *Clone); 1758 break; 1759 case coro::ABI::Async: 1760 case coro::ABI::Retcon: 1761 case coro::ABI::RetconOnce: 1762 // Each clone in the Async/Retcon lowering references of the other clones. 1763 // Let the LazyCallGraph know about all of them at once. 1764 CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones); 1765 break; 1766 } 1767 1768 // Let the CGSCC infra handle the changes to the original function. 1769 updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM); 1770 } 1771 1772 // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges 1773 // to the split functions. 1774 postSplitCleanup(N.getFunction()); 1775 updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM); 1776 } 1777 1778 // When we see the coroutine the first time, we insert an indirect call to a 1779 // devirt trigger function and mark the coroutine that it is now ready for 1780 // split. 1781 // Async lowering uses this after it has split the function to restart the 1782 // pipeline. 1783 static void prepareForSplit(Function &F, CallGraph &CG, 1784 bool MarkForAsyncRestart = false) { 1785 Module &M = *F.getParent(); 1786 LLVMContext &Context = F.getContext(); 1787 #ifndef NDEBUG 1788 Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN); 1789 assert(DevirtFn && "coro.devirt.trigger function not found"); 1790 #endif 1791 1792 F.addFnAttr(CORO_PRESPLIT_ATTR, MarkForAsyncRestart 1793 ? ASYNC_RESTART_AFTER_SPLIT 1794 : PREPARED_FOR_SPLIT); 1795 1796 // Insert an indirect call sequence that will be devirtualized by CoroElide 1797 // pass: 1798 // %0 = call i8* @llvm.coro.subfn.addr(i8* null, i8 -1) 1799 // %1 = bitcast i8* %0 to void(i8*)* 1800 // call void %1(i8* null) 1801 coro::LowererBase Lowerer(M); 1802 Instruction *InsertPt = 1803 MarkForAsyncRestart ? F.getEntryBlock().getFirstNonPHIOrDbgOrLifetime() 1804 : F.getEntryBlock().getTerminator(); 1805 auto *Null = ConstantPointerNull::get(Type::getInt8PtrTy(Context)); 1806 auto *DevirtFnAddr = 1807 Lowerer.makeSubFnCall(Null, CoroSubFnInst::RestartTrigger, InsertPt); 1808 FunctionType *FnTy = FunctionType::get(Type::getVoidTy(Context), 1809 {Type::getInt8PtrTy(Context)}, false); 1810 auto *IndirectCall = CallInst::Create(FnTy, DevirtFnAddr, Null, "", InsertPt); 1811 1812 // Update CG graph with an indirect call we just added. 1813 CG[&F]->addCalledFunction(IndirectCall, CG.getCallsExternalNode()); 1814 } 1815 1816 // Make sure that there is a devirtualization trigger function that the 1817 // coro-split pass uses to force a restart of the CGSCC pipeline. If the devirt 1818 // trigger function is not found, we will create one and add it to the current 1819 // SCC. 1820 static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC) { 1821 Module &M = CG.getModule(); 1822 if (M.getFunction(CORO_DEVIRT_TRIGGER_FN)) 1823 return; 1824 1825 LLVMContext &C = M.getContext(); 1826 auto *FnTy = FunctionType::get(Type::getVoidTy(C), Type::getInt8PtrTy(C), 1827 /*isVarArg=*/false); 1828 Function *DevirtFn = 1829 Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage, 1830 CORO_DEVIRT_TRIGGER_FN, &M); 1831 DevirtFn->addFnAttr(Attribute::AlwaysInline); 1832 auto *Entry = BasicBlock::Create(C, "entry", DevirtFn); 1833 ReturnInst::Create(C, Entry); 1834 1835 auto *Node = CG.getOrInsertFunction(DevirtFn); 1836 1837 SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end()); 1838 Nodes.push_back(Node); 1839 SCC.initialize(Nodes); 1840 } 1841 1842 /// Replace a call to llvm.coro.prepare.retcon. 1843 static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG, 1844 LazyCallGraph::SCC &C) { 1845 auto CastFn = Prepare->getArgOperand(0); // as an i8* 1846 auto Fn = CastFn->stripPointerCasts(); // as its original type 1847 1848 // Attempt to peephole this pattern: 1849 // %0 = bitcast [[TYPE]] @some_function to i8* 1850 // %1 = call @llvm.coro.prepare.retcon(i8* %0) 1851 // %2 = bitcast %1 to [[TYPE]] 1852 // ==> 1853 // %2 = @some_function 1854 for (auto UI = Prepare->use_begin(), UE = Prepare->use_end(); UI != UE;) { 1855 // Look for bitcasts back to the original function type. 1856 auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser()); 1857 if (!Cast || Cast->getType() != Fn->getType()) 1858 continue; 1859 1860 // Replace and remove the cast. 1861 Cast->replaceAllUsesWith(Fn); 1862 Cast->eraseFromParent(); 1863 } 1864 1865 // Replace any remaining uses with the function as an i8*. 1866 // This can never directly be a callee, so we don't need to update CG. 1867 Prepare->replaceAllUsesWith(CastFn); 1868 Prepare->eraseFromParent(); 1869 1870 // Kill dead bitcasts. 1871 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) { 1872 if (!Cast->use_empty()) 1873 break; 1874 CastFn = Cast->getOperand(0); 1875 Cast->eraseFromParent(); 1876 } 1877 } 1878 /// Replace a call to llvm.coro.prepare.retcon. 1879 static void replacePrepare(CallInst *Prepare, CallGraph &CG) { 1880 auto CastFn = Prepare->getArgOperand(0); // as an i8* 1881 auto Fn = CastFn->stripPointerCasts(); // as its original type 1882 1883 // Find call graph nodes for the preparation. 1884 CallGraphNode *PrepareUserNode = nullptr, *FnNode = nullptr; 1885 if (auto ConcreteFn = dyn_cast<Function>(Fn)) { 1886 PrepareUserNode = CG[Prepare->getFunction()]; 1887 FnNode = CG[ConcreteFn]; 1888 } 1889 1890 // Attempt to peephole this pattern: 1891 // %0 = bitcast [[TYPE]] @some_function to i8* 1892 // %1 = call @llvm.coro.prepare.retcon(i8* %0) 1893 // %2 = bitcast %1 to [[TYPE]] 1894 // ==> 1895 // %2 = @some_function 1896 for (auto UI = Prepare->use_begin(), UE = Prepare->use_end(); 1897 UI != UE; ) { 1898 // Look for bitcasts back to the original function type. 1899 auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser()); 1900 if (!Cast || Cast->getType() != Fn->getType()) continue; 1901 1902 // Check whether the replacement will introduce new direct calls. 1903 // If so, we'll need to update the call graph. 1904 if (PrepareUserNode) { 1905 for (auto &Use : Cast->uses()) { 1906 if (auto *CB = dyn_cast<CallBase>(Use.getUser())) { 1907 if (!CB->isCallee(&Use)) 1908 continue; 1909 PrepareUserNode->removeCallEdgeFor(*CB); 1910 PrepareUserNode->addCalledFunction(CB, FnNode); 1911 } 1912 } 1913 } 1914 1915 // Replace and remove the cast. 1916 Cast->replaceAllUsesWith(Fn); 1917 Cast->eraseFromParent(); 1918 } 1919 1920 // Replace any remaining uses with the function as an i8*. 1921 // This can never directly be a callee, so we don't need to update CG. 1922 Prepare->replaceAllUsesWith(CastFn); 1923 Prepare->eraseFromParent(); 1924 1925 // Kill dead bitcasts. 1926 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) { 1927 if (!Cast->use_empty()) break; 1928 CastFn = Cast->getOperand(0); 1929 Cast->eraseFromParent(); 1930 } 1931 } 1932 1933 static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG, 1934 LazyCallGraph::SCC &C) { 1935 bool Changed = false; 1936 for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end(); PI != PE;) { 1937 // Intrinsics can only be used in calls. 1938 auto *Prepare = cast<CallInst>((PI++)->getUser()); 1939 replacePrepare(Prepare, CG, C); 1940 Changed = true; 1941 } 1942 1943 return Changed; 1944 } 1945 1946 /// Remove calls to llvm.coro.prepare.retcon, a barrier meant to prevent 1947 /// IPO from operating on calls to a retcon coroutine before it's been 1948 /// split. This is only safe to do after we've split all retcon 1949 /// coroutines in the module. We can do that this in this pass because 1950 /// this pass does promise to split all retcon coroutines (as opposed to 1951 /// switch coroutines, which are lowered in multiple stages). 1952 static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) { 1953 bool Changed = false; 1954 for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end(); 1955 PI != PE; ) { 1956 // Intrinsics can only be used in calls. 1957 auto *Prepare = cast<CallInst>((PI++)->getUser()); 1958 replacePrepare(Prepare, CG); 1959 Changed = true; 1960 } 1961 1962 return Changed; 1963 } 1964 1965 static bool declaresCoroSplitIntrinsics(const Module &M) { 1966 return coro::declaresIntrinsics(M, {"llvm.coro.begin", 1967 "llvm.coro.prepare.retcon", 1968 "llvm.coro.prepare.async"}); 1969 } 1970 1971 static void addPrepareFunction(const Module &M, 1972 SmallVectorImpl<Function *> &Fns, 1973 StringRef Name) { 1974 auto *PrepareFn = M.getFunction(Name); 1975 if (PrepareFn && !PrepareFn->use_empty()) 1976 Fns.push_back(PrepareFn); 1977 } 1978 1979 PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C, 1980 CGSCCAnalysisManager &AM, 1981 LazyCallGraph &CG, CGSCCUpdateResult &UR) { 1982 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a 1983 // non-zero number of nodes, so we assume that here and grab the first 1984 // node's function's module. 1985 Module &M = *C.begin()->getFunction().getParent(); 1986 auto &FAM = 1987 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 1988 1989 if (!declaresCoroSplitIntrinsics(M)) 1990 return PreservedAnalyses::all(); 1991 1992 // Check for uses of llvm.coro.prepare.retcon/async. 1993 SmallVector<Function *, 2> PrepareFns; 1994 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon"); 1995 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async"); 1996 1997 // Find coroutines for processing. 1998 SmallVector<LazyCallGraph::Node *, 4> Coroutines; 1999 for (LazyCallGraph::Node &N : C) 2000 if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR)) 2001 Coroutines.push_back(&N); 2002 2003 if (Coroutines.empty() && PrepareFns.empty()) 2004 return PreservedAnalyses::all(); 2005 2006 if (Coroutines.empty()) { 2007 for (auto *PrepareFn : PrepareFns) { 2008 replaceAllPrepares(PrepareFn, CG, C); 2009 } 2010 } 2011 2012 // Split all the coroutines. 2013 for (LazyCallGraph::Node *N : Coroutines) { 2014 Function &F = N->getFunction(); 2015 Attribute Attr = F.getFnAttribute(CORO_PRESPLIT_ATTR); 2016 StringRef Value = Attr.getValueAsString(); 2017 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName() 2018 << "' state: " << Value << "\n"); 2019 if (Value == UNPREPARED_FOR_SPLIT) { 2020 // Enqueue a second iteration of the CGSCC pipeline on this SCC. 2021 UR.CWorklist.insert(&C); 2022 F.addFnAttr(CORO_PRESPLIT_ATTR, PREPARED_FOR_SPLIT); 2023 continue; 2024 } 2025 F.removeFnAttr(CORO_PRESPLIT_ATTR); 2026 2027 SmallVector<Function *, 4> Clones; 2028 const coro::Shape Shape = splitCoroutine(F, Clones, ReuseFrameSlot); 2029 updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM); 2030 2031 if ((Shape.ABI == coro::ABI::Async || Shape.ABI == coro::ABI::Retcon || 2032 Shape.ABI == coro::ABI::RetconOnce) && 2033 !Shape.CoroSuspends.empty()) { 2034 // Run the CGSCC pipeline on the newly split functions. 2035 // All clones will be in the same RefSCC, so choose a random clone. 2036 UR.RCWorklist.insert(CG.lookupRefSCC(CG.get(*Clones[0]))); 2037 } 2038 } 2039 2040 if (!PrepareFns.empty()) { 2041 for (auto *PrepareFn : PrepareFns) { 2042 replaceAllPrepares(PrepareFn, CG, C); 2043 } 2044 } 2045 2046 return PreservedAnalyses::none(); 2047 } 2048 2049 namespace { 2050 2051 // We present a coroutine to LLVM as an ordinary function with suspension 2052 // points marked up with intrinsics. We let the optimizer party on the coroutine 2053 // as a single function for as long as possible. Shortly before the coroutine is 2054 // eligible to be inlined into its callers, we split up the coroutine into parts 2055 // corresponding to initial, resume and destroy invocations of the coroutine, 2056 // add them to the current SCC and restart the IPO pipeline to optimize the 2057 // coroutine subfunctions we extracted before proceeding to the caller of the 2058 // coroutine. 2059 struct CoroSplitLegacy : public CallGraphSCCPass { 2060 static char ID; // Pass identification, replacement for typeid 2061 2062 CoroSplitLegacy(bool ReuseFrameSlot = false) 2063 : CallGraphSCCPass(ID), ReuseFrameSlot(ReuseFrameSlot) { 2064 initializeCoroSplitLegacyPass(*PassRegistry::getPassRegistry()); 2065 } 2066 2067 bool Run = false; 2068 bool ReuseFrameSlot; 2069 2070 // A coroutine is identified by the presence of coro.begin intrinsic, if 2071 // we don't have any, this pass has nothing to do. 2072 bool doInitialization(CallGraph &CG) override { 2073 Run = declaresCoroSplitIntrinsics(CG.getModule()); 2074 return CallGraphSCCPass::doInitialization(CG); 2075 } 2076 2077 bool runOnSCC(CallGraphSCC &SCC) override { 2078 if (!Run) 2079 return false; 2080 2081 // Check for uses of llvm.coro.prepare.retcon. 2082 SmallVector<Function *, 2> PrepareFns; 2083 auto &M = SCC.getCallGraph().getModule(); 2084 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon"); 2085 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async"); 2086 2087 // Find coroutines for processing. 2088 SmallVector<Function *, 4> Coroutines; 2089 for (CallGraphNode *CGN : SCC) 2090 if (auto *F = CGN->getFunction()) 2091 if (F->hasFnAttribute(CORO_PRESPLIT_ATTR)) 2092 Coroutines.push_back(F); 2093 2094 if (Coroutines.empty() && PrepareFns.empty()) 2095 return false; 2096 2097 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph(); 2098 2099 if (Coroutines.empty()) { 2100 bool Changed = false; 2101 for (auto *PrepareFn : PrepareFns) 2102 Changed |= replaceAllPrepares(PrepareFn, CG); 2103 return Changed; 2104 } 2105 2106 createDevirtTriggerFunc(CG, SCC); 2107 2108 // Split all the coroutines. 2109 for (Function *F : Coroutines) { 2110 Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR); 2111 StringRef Value = Attr.getValueAsString(); 2112 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName() 2113 << "' state: " << Value << "\n"); 2114 // Async lowering marks coroutines to trigger a restart of the pipeline 2115 // after it has split them. 2116 if (Value == ASYNC_RESTART_AFTER_SPLIT) { 2117 F->removeFnAttr(CORO_PRESPLIT_ATTR); 2118 continue; 2119 } 2120 if (Value == UNPREPARED_FOR_SPLIT) { 2121 prepareForSplit(*F, CG); 2122 continue; 2123 } 2124 F->removeFnAttr(CORO_PRESPLIT_ATTR); 2125 2126 SmallVector<Function *, 4> Clones; 2127 const coro::Shape Shape = splitCoroutine(*F, Clones, ReuseFrameSlot); 2128 updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC); 2129 if (Shape.ABI == coro::ABI::Async) { 2130 // Restart SCC passes. 2131 // Mark function for CoroElide pass. It will devirtualize causing a 2132 // restart of the SCC pipeline. 2133 prepareForSplit(*F, CG, true /*MarkForAsyncRestart*/); 2134 } 2135 } 2136 2137 for (auto *PrepareFn : PrepareFns) 2138 replaceAllPrepares(PrepareFn, CG); 2139 2140 return true; 2141 } 2142 2143 void getAnalysisUsage(AnalysisUsage &AU) const override { 2144 CallGraphSCCPass::getAnalysisUsage(AU); 2145 } 2146 2147 StringRef getPassName() const override { return "Coroutine Splitting"; } 2148 }; 2149 2150 } // end anonymous namespace 2151 2152 char CoroSplitLegacy::ID = 0; 2153 2154 INITIALIZE_PASS_BEGIN( 2155 CoroSplitLegacy, "coro-split", 2156 "Split coroutine into a set of functions driving its state machine", false, 2157 false) 2158 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass) 2159 INITIALIZE_PASS_END( 2160 CoroSplitLegacy, "coro-split", 2161 "Split coroutine into a set of functions driving its state machine", false, 2162 false) 2163 2164 Pass *llvm::createCoroSplitLegacyPass(bool ReuseFrameSlot) { 2165 return new CoroSplitLegacy(ReuseFrameSlot); 2166 } 2167