1 //===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // This pass builds the coroutine frame and outlines resume and destroy parts 9 // of the coroutine into separate functions. 10 // 11 // We present a coroutine to an LLVM as an ordinary function with suspension 12 // points marked up with intrinsics. We let the optimizer party on the coroutine 13 // as a single function for as long as possible. Shortly before the coroutine is 14 // eligible to be inlined into its callers, we split up the coroutine into parts 15 // corresponding to an initial, resume and destroy invocations of the coroutine, 16 // add them to the current SCC and restart the IPO pipeline to optimize the 17 // coroutine subfunctions we extracted before proceeding to the caller of the 18 // coroutine. 19 //===----------------------------------------------------------------------===// 20 21 #include "llvm/Transforms/Coroutines/CoroSplit.h" 22 #include "CoroInstr.h" 23 #include "CoroInternal.h" 24 #include "llvm/ADT/DenseMap.h" 25 #include "llvm/ADT/PriorityWorklist.h" 26 #include "llvm/ADT/SmallPtrSet.h" 27 #include "llvm/ADT/SmallVector.h" 28 #include "llvm/ADT/StringRef.h" 29 #include "llvm/ADT/Twine.h" 30 #include "llvm/Analysis/CFG.h" 31 #include "llvm/Analysis/CallGraph.h" 32 #include "llvm/Analysis/ConstantFolding.h" 33 #include "llvm/Analysis/LazyCallGraph.h" 34 #include "llvm/BinaryFormat/Dwarf.h" 35 #include "llvm/IR/Argument.h" 36 #include "llvm/IR/Attributes.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/CFG.h" 39 #include "llvm/IR/CallingConv.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/DerivedTypes.h" 43 #include "llvm/IR/Dominators.h" 44 #include "llvm/IR/Function.h" 45 #include "llvm/IR/GlobalValue.h" 46 #include "llvm/IR/GlobalVariable.h" 47 #include "llvm/IR/IRBuilder.h" 48 #include "llvm/IR/InstIterator.h" 49 #include "llvm/IR/InstrTypes.h" 50 #include "llvm/IR/Instruction.h" 51 #include "llvm/IR/Instructions.h" 52 #include "llvm/IR/IntrinsicInst.h" 53 #include "llvm/IR/LLVMContext.h" 54 #include "llvm/IR/Module.h" 55 #include "llvm/IR/Type.h" 56 #include "llvm/IR/Value.h" 57 #include "llvm/IR/Verifier.h" 58 #include "llvm/Support/Casting.h" 59 #include "llvm/Support/Debug.h" 60 #include "llvm/Support/PrettyStackTrace.h" 61 #include "llvm/Support/raw_ostream.h" 62 #include "llvm/Transforms/Scalar.h" 63 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 64 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 65 #include "llvm/Transforms/Utils/Cloning.h" 66 #include "llvm/Transforms/Utils/Local.h" 67 #include "llvm/Transforms/Utils/ValueMapper.h" 68 #include <cassert> 69 #include <cstddef> 70 #include <cstdint> 71 #include <initializer_list> 72 #include <iterator> 73 74 using namespace llvm; 75 76 #define DEBUG_TYPE "coro-split" 77 78 namespace { 79 80 /// A little helper class for building 81 class CoroCloner { 82 public: 83 enum class Kind { 84 /// The shared resume function for a switch lowering. 85 SwitchResume, 86 87 /// The shared unwind function for a switch lowering. 88 SwitchUnwind, 89 90 /// The shared cleanup function for a switch lowering. 91 SwitchCleanup, 92 93 /// An individual continuation function. 94 Continuation, 95 96 /// An async resume function. 97 Async, 98 }; 99 100 private: 101 Function &OrigF; 102 Function *NewF; 103 const Twine &Suffix; 104 coro::Shape &Shape; 105 Kind FKind; 106 ValueToValueMapTy VMap; 107 IRBuilder<> Builder; 108 Value *NewFramePtr = nullptr; 109 110 /// The active suspend instruction; meaningful only for continuation and async 111 /// ABIs. 112 AnyCoroSuspendInst *ActiveSuspend = nullptr; 113 114 public: 115 /// Create a cloner for a switch lowering. 116 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, 117 Kind FKind) 118 : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape), 119 FKind(FKind), Builder(OrigF.getContext()) { 120 assert(Shape.ABI == coro::ABI::Switch); 121 } 122 123 /// Create a cloner for a continuation lowering. 124 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape, 125 Function *NewF, AnyCoroSuspendInst *ActiveSuspend) 126 : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape), 127 FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation), 128 Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) { 129 assert(Shape.ABI == coro::ABI::Retcon || 130 Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async); 131 assert(NewF && "need existing function for continuation"); 132 assert(ActiveSuspend && "need active suspend point for continuation"); 133 } 134 135 Function *getFunction() const { 136 assert(NewF != nullptr && "declaration not yet set"); 137 return NewF; 138 } 139 140 void create(); 141 142 private: 143 bool isSwitchDestroyFunction() { 144 switch (FKind) { 145 case Kind::Async: 146 case Kind::Continuation: 147 case Kind::SwitchResume: 148 return false; 149 case Kind::SwitchUnwind: 150 case Kind::SwitchCleanup: 151 return true; 152 } 153 llvm_unreachable("Unknown CoroCloner::Kind enum"); 154 } 155 156 void replaceEntryBlock(); 157 Value *deriveNewFramePointer(); 158 void replaceRetconOrAsyncSuspendUses(); 159 void replaceCoroSuspends(); 160 void replaceCoroEnds(); 161 void replaceSwiftErrorOps(); 162 void salvageDebugInfo(); 163 void handleFinalSuspend(); 164 }; 165 166 } // end anonymous namespace 167 168 static void maybeFreeRetconStorage(IRBuilder<> &Builder, 169 const coro::Shape &Shape, Value *FramePtr, 170 CallGraph *CG) { 171 assert(Shape.ABI == coro::ABI::Retcon || 172 Shape.ABI == coro::ABI::RetconOnce); 173 if (Shape.RetconLowering.IsFrameInlineInStorage) 174 return; 175 176 Shape.emitDealloc(Builder, FramePtr, CG); 177 } 178 179 /// Replace an llvm.coro.end.async. 180 /// Will inline the must tail call function call if there is one. 181 /// \returns true if cleanup of the coro.end block is needed, false otherwise. 182 static bool replaceCoroEndAsync(AnyCoroEndInst *End) { 183 IRBuilder<> Builder(End); 184 185 auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End); 186 if (!EndAsync) { 187 Builder.CreateRetVoid(); 188 return true /*needs cleanup of coro.end block*/; 189 } 190 191 auto *MustTailCallFunc = EndAsync->getMustTailCallFunction(); 192 if (!MustTailCallFunc) { 193 Builder.CreateRetVoid(); 194 return true /*needs cleanup of coro.end block*/; 195 } 196 197 // Move the must tail call from the predecessor block into the end block. 198 auto *CoroEndBlock = End->getParent(); 199 auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor(); 200 assert(MustTailCallFuncBlock && "Must have a single predecessor block"); 201 auto It = MustTailCallFuncBlock->getTerminator()->getIterator(); 202 auto *MustTailCall = cast<CallInst>(&*std::prev(It)); 203 CoroEndBlock->getInstList().splice( 204 End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall); 205 206 // Insert the return instruction. 207 Builder.SetInsertPoint(End); 208 Builder.CreateRetVoid(); 209 InlineFunctionInfo FnInfo; 210 211 // Remove the rest of the block, by splitting it into an unreachable block. 212 auto *BB = End->getParent(); 213 BB->splitBasicBlock(End); 214 BB->getTerminator()->eraseFromParent(); 215 216 auto InlineRes = InlineFunction(*MustTailCall, FnInfo); 217 assert(InlineRes.isSuccess() && "Expected inlining to succeed"); 218 (void)InlineRes; 219 220 // We have cleaned up the coro.end block above. 221 return false; 222 } 223 224 /// Replace a non-unwind call to llvm.coro.end. 225 static void replaceFallthroughCoroEnd(AnyCoroEndInst *End, 226 const coro::Shape &Shape, Value *FramePtr, 227 bool InResume, CallGraph *CG) { 228 // Start inserting right before the coro.end. 229 IRBuilder<> Builder(End); 230 231 // Create the return instruction. 232 switch (Shape.ABI) { 233 // The cloned functions in switch-lowering always return void. 234 case coro::ABI::Switch: 235 // coro.end doesn't immediately end the coroutine in the main function 236 // in this lowering, because we need to deallocate the coroutine. 237 if (!InResume) 238 return; 239 Builder.CreateRetVoid(); 240 break; 241 242 // In async lowering this returns. 243 case coro::ABI::Async: { 244 bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End); 245 if (!CoroEndBlockNeedsCleanup) 246 return; 247 break; 248 } 249 250 // In unique continuation lowering, the continuations always return void. 251 // But we may have implicitly allocated storage. 252 case coro::ABI::RetconOnce: 253 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 254 Builder.CreateRetVoid(); 255 break; 256 257 // In non-unique continuation lowering, we signal completion by returning 258 // a null continuation. 259 case coro::ABI::Retcon: { 260 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 261 auto RetTy = Shape.getResumeFunctionType()->getReturnType(); 262 auto RetStructTy = dyn_cast<StructType>(RetTy); 263 PointerType *ContinuationTy = 264 cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy); 265 266 Value *ReturnValue = ConstantPointerNull::get(ContinuationTy); 267 if (RetStructTy) { 268 ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy), 269 ReturnValue, 0); 270 } 271 Builder.CreateRet(ReturnValue); 272 break; 273 } 274 } 275 276 // Remove the rest of the block, by splitting it into an unreachable block. 277 auto *BB = End->getParent(); 278 BB->splitBasicBlock(End); 279 BB->getTerminator()->eraseFromParent(); 280 } 281 282 // Mark a coroutine as done, which implies that the coroutine is finished and 283 // never get resumed. 284 // 285 // In resume-switched ABI, the done state is represented by storing zero in 286 // ResumeFnAddr. 287 // 288 // NOTE: We couldn't omit the argument `FramePtr`. It is necessary because the 289 // pointer to the frame in splitted function is not stored in `Shape`. 290 static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape, 291 Value *FramePtr) { 292 assert( 293 Shape.ABI == coro::ABI::Switch && 294 "markCoroutineAsDone is only supported for Switch-Resumed ABI for now."); 295 auto *GepIndex = Builder.CreateStructGEP( 296 Shape.FrameTy, FramePtr, coro::Shape::SwitchFieldIndex::Resume, 297 "ResumeFn.addr"); 298 auto *NullPtr = ConstantPointerNull::get(cast<PointerType>( 299 Shape.FrameTy->getTypeAtIndex(coro::Shape::SwitchFieldIndex::Resume))); 300 Builder.CreateStore(NullPtr, GepIndex); 301 } 302 303 /// Replace an unwind call to llvm.coro.end. 304 static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, 305 Value *FramePtr, bool InResume, 306 CallGraph *CG) { 307 IRBuilder<> Builder(End); 308 309 switch (Shape.ABI) { 310 // In switch-lowering, this does nothing in the main function. 311 case coro::ABI::Switch: { 312 // In C++'s specification, the coroutine should be marked as done 313 // if promise.unhandled_exception() throws. The frontend will 314 // call coro.end(true) along this path. 315 // 316 // FIXME: We should refactor this once there is other language 317 // which uses Switch-Resumed style other than C++. 318 markCoroutineAsDone(Builder, Shape, FramePtr); 319 if (!InResume) 320 return; 321 break; 322 } 323 // In async lowering this does nothing. 324 case coro::ABI::Async: 325 break; 326 // In continuation-lowering, this frees the continuation storage. 327 case coro::ABI::Retcon: 328 case coro::ABI::RetconOnce: 329 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG); 330 break; 331 } 332 333 // If coro.end has an associated bundle, add cleanupret instruction. 334 if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) { 335 auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]); 336 auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr); 337 End->getParent()->splitBasicBlock(End); 338 CleanupRet->getParent()->getTerminator()->eraseFromParent(); 339 } 340 } 341 342 static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape, 343 Value *FramePtr, bool InResume, CallGraph *CG) { 344 if (End->isUnwind()) 345 replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG); 346 else 347 replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG); 348 349 auto &Context = End->getContext(); 350 End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context) 351 : ConstantInt::getFalse(Context)); 352 End->eraseFromParent(); 353 } 354 355 // Create an entry block for a resume function with a switch that will jump to 356 // suspend points. 357 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) { 358 assert(Shape.ABI == coro::ABI::Switch); 359 LLVMContext &C = F.getContext(); 360 361 // resume.entry: 362 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0, 363 // i32 2 364 // % index = load i32, i32* %index.addr 365 // switch i32 %index, label %unreachable [ 366 // i32 0, label %resume.0 367 // i32 1, label %resume.1 368 // ... 369 // ] 370 371 auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F); 372 auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F); 373 374 IRBuilder<> Builder(NewEntry); 375 auto *FramePtr = Shape.FramePtr; 376 auto *FrameTy = Shape.FrameTy; 377 auto *GepIndex = Builder.CreateStructGEP( 378 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr"); 379 auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index"); 380 auto *Switch = 381 Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size()); 382 Shape.SwitchLowering.ResumeSwitch = Switch; 383 384 size_t SuspendIndex = 0; 385 for (auto *AnyS : Shape.CoroSuspends) { 386 auto *S = cast<CoroSuspendInst>(AnyS); 387 ConstantInt *IndexVal = Shape.getIndex(SuspendIndex); 388 389 // Replace CoroSave with a store to Index: 390 // %index.addr = getelementptr %f.frame... (index field number) 391 // store i32 0, i32* %index.addr1 392 auto *Save = S->getCoroSave(); 393 Builder.SetInsertPoint(Save); 394 if (S->isFinal()) { 395 // The coroutine should be marked done if it reaches the final suspend 396 // point. 397 markCoroutineAsDone(Builder, Shape, FramePtr); 398 } else { 399 auto *GepIndex = Builder.CreateStructGEP( 400 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr"); 401 Builder.CreateStore(IndexVal, GepIndex); 402 } 403 Save->replaceAllUsesWith(ConstantTokenNone::get(C)); 404 Save->eraseFromParent(); 405 406 // Split block before and after coro.suspend and add a jump from an entry 407 // switch: 408 // 409 // whateverBB: 410 // whatever 411 // %0 = call i8 @llvm.coro.suspend(token none, i1 false) 412 // switch i8 %0, label %suspend[i8 0, label %resume 413 // i8 1, label %cleanup] 414 // becomes: 415 // 416 // whateverBB: 417 // whatever 418 // br label %resume.0.landing 419 // 420 // resume.0: ; <--- jump from the switch in the resume.entry 421 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false) 422 // br label %resume.0.landing 423 // 424 // resume.0.landing: 425 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0] 426 // switch i8 % 1, label %suspend [i8 0, label %resume 427 // i8 1, label %cleanup] 428 429 auto *SuspendBB = S->getParent(); 430 auto *ResumeBB = 431 SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex)); 432 auto *LandingBB = ResumeBB->splitBasicBlock( 433 S->getNextNode(), ResumeBB->getName() + Twine(".landing")); 434 Switch->addCase(IndexVal, ResumeBB); 435 436 cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB); 437 auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front()); 438 S->replaceAllUsesWith(PN); 439 PN->addIncoming(Builder.getInt8(-1), SuspendBB); 440 PN->addIncoming(S, ResumeBB); 441 442 ++SuspendIndex; 443 } 444 445 Builder.SetInsertPoint(UnreachBB); 446 Builder.CreateUnreachable(); 447 448 Shape.SwitchLowering.ResumeEntryBlock = NewEntry; 449 } 450 451 452 // Rewrite final suspend point handling. We do not use suspend index to 453 // represent the final suspend point. Instead we zero-out ResumeFnAddr in the 454 // coroutine frame, since it is undefined behavior to resume a coroutine 455 // suspended at the final suspend point. Thus, in the resume function, we can 456 // simply remove the last case (when coro::Shape is built, the final suspend 457 // point (if present) is always the last element of CoroSuspends array). 458 // In the destroy function, we add a code sequence to check if ResumeFnAddress 459 // is Null, and if so, jump to the appropriate label to handle cleanup from the 460 // final suspend point. 461 void CoroCloner::handleFinalSuspend() { 462 assert(Shape.ABI == coro::ABI::Switch && 463 Shape.SwitchLowering.HasFinalSuspend); 464 auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]); 465 auto FinalCaseIt = std::prev(Switch->case_end()); 466 BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor(); 467 Switch->removeCase(FinalCaseIt); 468 if (isSwitchDestroyFunction()) { 469 BasicBlock *OldSwitchBB = Switch->getParent(); 470 auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch"); 471 Builder.SetInsertPoint(OldSwitchBB->getTerminator()); 472 auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr, 473 coro::Shape::SwitchFieldIndex::Resume, 474 "ResumeFn.addr"); 475 auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(), 476 GepIndex); 477 auto *Cond = Builder.CreateIsNull(Load); 478 Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB); 479 OldSwitchBB->getTerminator()->eraseFromParent(); 480 } 481 } 482 483 static FunctionType * 484 getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend) { 485 auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Suspend); 486 auto *StructTy = cast<StructType>(AsyncSuspend->getType()); 487 auto &Context = Suspend->getParent()->getParent()->getContext(); 488 auto *VoidTy = Type::getVoidTy(Context); 489 return FunctionType::get(VoidTy, StructTy->elements(), false); 490 } 491 492 static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape, 493 const Twine &Suffix, 494 Module::iterator InsertBefore, 495 AnyCoroSuspendInst *ActiveSuspend) { 496 Module *M = OrigF.getParent(); 497 auto *FnTy = (Shape.ABI != coro::ABI::Async) 498 ? Shape.getResumeFunctionType() 499 : getFunctionTypeFromAsyncSuspend(ActiveSuspend); 500 501 Function *NewF = 502 Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage, 503 OrigF.getName() + Suffix); 504 if (Shape.ABI != coro::ABI::Async) 505 NewF->addParamAttr(0, Attribute::NonNull); 506 507 // For the async lowering ABI we can't guarantee that the context argument is 508 // not access via a different pointer not based on the argument. 509 if (Shape.ABI != coro::ABI::Async) 510 NewF->addParamAttr(0, Attribute::NoAlias); 511 512 M->getFunctionList().insert(InsertBefore, NewF); 513 514 return NewF; 515 } 516 517 /// Replace uses of the active llvm.coro.suspend.retcon/async call with the 518 /// arguments to the continuation function. 519 /// 520 /// This assumes that the builder has a meaningful insertion point. 521 void CoroCloner::replaceRetconOrAsyncSuspendUses() { 522 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce || 523 Shape.ABI == coro::ABI::Async); 524 525 auto NewS = VMap[ActiveSuspend]; 526 if (NewS->use_empty()) return; 527 528 // Copy out all the continuation arguments after the buffer pointer into 529 // an easily-indexed data structure for convenience. 530 SmallVector<Value*, 8> Args; 531 // The async ABI includes all arguments -- including the first argument. 532 bool IsAsyncABI = Shape.ABI == coro::ABI::Async; 533 for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()), 534 E = NewF->arg_end(); 535 I != E; ++I) 536 Args.push_back(&*I); 537 538 // If the suspend returns a single scalar value, we can just do a simple 539 // replacement. 540 if (!isa<StructType>(NewS->getType())) { 541 assert(Args.size() == 1); 542 NewS->replaceAllUsesWith(Args.front()); 543 return; 544 } 545 546 // Try to peephole extracts of an aggregate return. 547 for (Use &U : llvm::make_early_inc_range(NewS->uses())) { 548 auto *EVI = dyn_cast<ExtractValueInst>(U.getUser()); 549 if (!EVI || EVI->getNumIndices() != 1) 550 continue; 551 552 EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]); 553 EVI->eraseFromParent(); 554 } 555 556 // If we have no remaining uses, we're done. 557 if (NewS->use_empty()) return; 558 559 // Otherwise, we need to create an aggregate. 560 Value *Agg = UndefValue::get(NewS->getType()); 561 for (size_t I = 0, E = Args.size(); I != E; ++I) 562 Agg = Builder.CreateInsertValue(Agg, Args[I], I); 563 564 NewS->replaceAllUsesWith(Agg); 565 } 566 567 void CoroCloner::replaceCoroSuspends() { 568 Value *SuspendResult; 569 570 switch (Shape.ABI) { 571 // In switch lowering, replace coro.suspend with the appropriate value 572 // for the type of function we're extracting. 573 // Replacing coro.suspend with (0) will result in control flow proceeding to 574 // a resume label associated with a suspend point, replacing it with (1) will 575 // result in control flow proceeding to a cleanup label associated with this 576 // suspend point. 577 case coro::ABI::Switch: 578 SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0); 579 break; 580 581 // In async lowering there are no uses of the result. 582 case coro::ABI::Async: 583 return; 584 585 // In returned-continuation lowering, the arguments from earlier 586 // continuations are theoretically arbitrary, and they should have been 587 // spilled. 588 case coro::ABI::RetconOnce: 589 case coro::ABI::Retcon: 590 return; 591 } 592 593 for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) { 594 // The active suspend was handled earlier. 595 if (CS == ActiveSuspend) continue; 596 597 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]); 598 MappedCS->replaceAllUsesWith(SuspendResult); 599 MappedCS->eraseFromParent(); 600 } 601 } 602 603 void CoroCloner::replaceCoroEnds() { 604 for (AnyCoroEndInst *CE : Shape.CoroEnds) { 605 // We use a null call graph because there's no call graph node for 606 // the cloned function yet. We'll just be rebuilding that later. 607 auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]); 608 replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr); 609 } 610 } 611 612 static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape, 613 ValueToValueMapTy *VMap) { 614 if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty()) 615 return; 616 Value *CachedSlot = nullptr; 617 auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * { 618 if (CachedSlot) { 619 assert(cast<PointerType>(CachedSlot->getType()) 620 ->isOpaqueOrPointeeTypeMatches(ValueTy) && 621 "multiple swifterror slots in function with different types"); 622 return CachedSlot; 623 } 624 625 // Check if the function has a swifterror argument. 626 for (auto &Arg : F.args()) { 627 if (Arg.isSwiftError()) { 628 CachedSlot = &Arg; 629 assert(cast<PointerType>(Arg.getType()) 630 ->isOpaqueOrPointeeTypeMatches(ValueTy) && 631 "swifterror argument does not have expected type"); 632 return &Arg; 633 } 634 } 635 636 // Create a swifterror alloca. 637 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg()); 638 auto Alloca = Builder.CreateAlloca(ValueTy); 639 Alloca->setSwiftError(true); 640 641 CachedSlot = Alloca; 642 return Alloca; 643 }; 644 645 for (CallInst *Op : Shape.SwiftErrorOps) { 646 auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op; 647 IRBuilder<> Builder(MappedOp); 648 649 // If there are no arguments, this is a 'get' operation. 650 Value *MappedResult; 651 if (Op->arg_empty()) { 652 auto ValueTy = Op->getType(); 653 auto Slot = getSwiftErrorSlot(ValueTy); 654 MappedResult = Builder.CreateLoad(ValueTy, Slot); 655 } else { 656 assert(Op->arg_size() == 1); 657 auto Value = MappedOp->getArgOperand(0); 658 auto ValueTy = Value->getType(); 659 auto Slot = getSwiftErrorSlot(ValueTy); 660 Builder.CreateStore(Value, Slot); 661 MappedResult = Slot; 662 } 663 664 MappedOp->replaceAllUsesWith(MappedResult); 665 MappedOp->eraseFromParent(); 666 } 667 668 // If we're updating the original function, we've invalidated SwiftErrorOps. 669 if (VMap == nullptr) { 670 Shape.SwiftErrorOps.clear(); 671 } 672 } 673 674 void CoroCloner::replaceSwiftErrorOps() { 675 ::replaceSwiftErrorOps(*NewF, Shape, &VMap); 676 } 677 678 void CoroCloner::salvageDebugInfo() { 679 SmallVector<DbgVariableIntrinsic *, 8> Worklist; 680 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache; 681 for (auto &BB : *NewF) 682 for (auto &I : BB) 683 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I)) 684 Worklist.push_back(DVI); 685 for (DbgVariableIntrinsic *DVI : Worklist) 686 coro::salvageDebugInfo(DbgPtrAllocaCache, DVI, Shape.OptimizeFrame); 687 688 // Remove all salvaged dbg.declare intrinsics that became 689 // either unreachable or stale due to the CoroSplit transformation. 690 DominatorTree DomTree(*NewF); 691 auto IsUnreachableBlock = [&](BasicBlock *BB) { 692 return !isPotentiallyReachable(&NewF->getEntryBlock(), BB, nullptr, 693 &DomTree); 694 }; 695 for (DbgVariableIntrinsic *DVI : Worklist) { 696 if (IsUnreachableBlock(DVI->getParent())) 697 DVI->eraseFromParent(); 698 else if (isa_and_nonnull<AllocaInst>(DVI->getVariableLocationOp(0))) { 699 // Count all non-debuginfo uses in reachable blocks. 700 unsigned Uses = 0; 701 for (auto *User : DVI->getVariableLocationOp(0)->users()) 702 if (auto *I = dyn_cast<Instruction>(User)) 703 if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent())) 704 ++Uses; 705 if (!Uses) 706 DVI->eraseFromParent(); 707 } 708 } 709 } 710 711 void CoroCloner::replaceEntryBlock() { 712 // In the original function, the AllocaSpillBlock is a block immediately 713 // following the allocation of the frame object which defines GEPs for 714 // all the allocas that have been moved into the frame, and it ends by 715 // branching to the original beginning of the coroutine. Make this 716 // the entry block of the cloned function. 717 auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]); 718 auto *OldEntry = &NewF->getEntryBlock(); 719 Entry->setName("entry" + Suffix); 720 Entry->moveBefore(OldEntry); 721 Entry->getTerminator()->eraseFromParent(); 722 723 // Clear all predecessors of the new entry block. There should be 724 // exactly one predecessor, which we created when splitting out 725 // AllocaSpillBlock to begin with. 726 assert(Entry->hasOneUse()); 727 auto BranchToEntry = cast<BranchInst>(Entry->user_back()); 728 assert(BranchToEntry->isUnconditional()); 729 Builder.SetInsertPoint(BranchToEntry); 730 Builder.CreateUnreachable(); 731 BranchToEntry->eraseFromParent(); 732 733 // Branch from the entry to the appropriate place. 734 Builder.SetInsertPoint(Entry); 735 switch (Shape.ABI) { 736 case coro::ABI::Switch: { 737 // In switch-lowering, we built a resume-entry block in the original 738 // function. Make the entry block branch to this. 739 auto *SwitchBB = 740 cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]); 741 Builder.CreateBr(SwitchBB); 742 break; 743 } 744 case coro::ABI::Async: 745 case coro::ABI::Retcon: 746 case coro::ABI::RetconOnce: { 747 // In continuation ABIs, we want to branch to immediately after the 748 // active suspend point. Earlier phases will have put the suspend in its 749 // own basic block, so just thread our jump directly to its successor. 750 assert((Shape.ABI == coro::ABI::Async && 751 isa<CoroSuspendAsyncInst>(ActiveSuspend)) || 752 ((Shape.ABI == coro::ABI::Retcon || 753 Shape.ABI == coro::ABI::RetconOnce) && 754 isa<CoroSuspendRetconInst>(ActiveSuspend))); 755 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]); 756 auto Branch = cast<BranchInst>(MappedCS->getNextNode()); 757 assert(Branch->isUnconditional()); 758 Builder.CreateBr(Branch->getSuccessor(0)); 759 break; 760 } 761 } 762 763 // Any static alloca that's still being used but not reachable from the new 764 // entry needs to be moved to the new entry. 765 Function *F = OldEntry->getParent(); 766 DominatorTree DT{*F}; 767 for (Instruction &I : llvm::make_early_inc_range(instructions(F))) { 768 auto *Alloca = dyn_cast<AllocaInst>(&I); 769 if (!Alloca || I.use_empty()) 770 continue; 771 if (DT.isReachableFromEntry(I.getParent()) || 772 !isa<ConstantInt>(Alloca->getArraySize())) 773 continue; 774 I.moveBefore(*Entry, Entry->getFirstInsertionPt()); 775 } 776 } 777 778 /// Derive the value of the new frame pointer. 779 Value *CoroCloner::deriveNewFramePointer() { 780 // Builder should be inserting to the front of the new entry block. 781 782 switch (Shape.ABI) { 783 // In switch-lowering, the argument is the frame pointer. 784 case coro::ABI::Switch: 785 return &*NewF->arg_begin(); 786 // In async-lowering, one of the arguments is an async context as determined 787 // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of 788 // the resume function from the async context projection function associated 789 // with the active suspend. The frame is located as a tail to the async 790 // context header. 791 case coro::ABI::Async: { 792 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend); 793 auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff; 794 auto *CalleeContext = NewF->getArg(ContextIdx); 795 auto *FramePtrTy = Shape.FrameTy->getPointerTo(); 796 auto *ProjectionFunc = 797 ActiveAsyncSuspend->getAsyncContextProjectionFunction(); 798 auto DbgLoc = 799 cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc(); 800 // Calling i8* (i8*) 801 auto *CallerContext = Builder.CreateCall(ProjectionFunc->getFunctionType(), 802 ProjectionFunc, CalleeContext); 803 CallerContext->setCallingConv(ProjectionFunc->getCallingConv()); 804 CallerContext->setDebugLoc(DbgLoc); 805 // The frame is located after the async_context header. 806 auto &Context = Builder.getContext(); 807 auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32( 808 Type::getInt8Ty(Context), CallerContext, 809 Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr"); 810 // Inline the projection function. 811 InlineFunctionInfo InlineInfo; 812 auto InlineRes = InlineFunction(*CallerContext, InlineInfo); 813 assert(InlineRes.isSuccess()); 814 (void)InlineRes; 815 return Builder.CreateBitCast(FramePtrAddr, FramePtrTy); 816 } 817 // In continuation-lowering, the argument is the opaque storage. 818 case coro::ABI::Retcon: 819 case coro::ABI::RetconOnce: { 820 Argument *NewStorage = &*NewF->arg_begin(); 821 auto FramePtrTy = Shape.FrameTy->getPointerTo(); 822 823 // If the storage is inline, just bitcast to the storage to the frame type. 824 if (Shape.RetconLowering.IsFrameInlineInStorage) 825 return Builder.CreateBitCast(NewStorage, FramePtrTy); 826 827 // Otherwise, load the real frame from the opaque storage. 828 auto FramePtrPtr = 829 Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo()); 830 return Builder.CreateLoad(FramePtrTy, FramePtrPtr); 831 } 832 } 833 llvm_unreachable("bad ABI"); 834 } 835 836 static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context, 837 unsigned ParamIndex, 838 uint64_t Size, Align Alignment) { 839 AttrBuilder ParamAttrs(Context); 840 ParamAttrs.addAttribute(Attribute::NonNull); 841 ParamAttrs.addAttribute(Attribute::NoAlias); 842 ParamAttrs.addAlignmentAttr(Alignment); 843 ParamAttrs.addDereferenceableAttr(Size); 844 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs); 845 } 846 847 static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context, 848 unsigned ParamIndex) { 849 AttrBuilder ParamAttrs(Context); 850 ParamAttrs.addAttribute(Attribute::SwiftAsync); 851 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs); 852 } 853 854 static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context, 855 unsigned ParamIndex) { 856 AttrBuilder ParamAttrs(Context); 857 ParamAttrs.addAttribute(Attribute::SwiftSelf); 858 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs); 859 } 860 861 /// Clone the body of the original function into a resume function of 862 /// some sort. 863 void CoroCloner::create() { 864 // Create the new function if we don't already have one. 865 if (!NewF) { 866 NewF = createCloneDeclaration(OrigF, Shape, Suffix, 867 OrigF.getParent()->end(), ActiveSuspend); 868 } 869 870 // Replace all args with dummy instructions. If an argument is the old frame 871 // pointer, the dummy will be replaced by the new frame pointer once it is 872 // computed below. Uses of all other arguments should have already been 873 // rewritten by buildCoroutineFrame() to use loads/stores on the coroutine 874 // frame. 875 SmallVector<Instruction *> DummyArgs; 876 for (Argument &A : OrigF.args()) { 877 DummyArgs.push_back(new FreezeInst(UndefValue::get(A.getType()))); 878 VMap[&A] = DummyArgs.back(); 879 } 880 881 SmallVector<ReturnInst *, 4> Returns; 882 883 // Ignore attempts to change certain attributes of the function. 884 // TODO: maybe there should be a way to suppress this during cloning? 885 auto savedVisibility = NewF->getVisibility(); 886 auto savedUnnamedAddr = NewF->getUnnamedAddr(); 887 auto savedDLLStorageClass = NewF->getDLLStorageClass(); 888 889 // NewF's linkage (which CloneFunctionInto does *not* change) might not 890 // be compatible with the visibility of OrigF (which it *does* change), 891 // so protect against that. 892 auto savedLinkage = NewF->getLinkage(); 893 NewF->setLinkage(llvm::GlobalValue::ExternalLinkage); 894 895 CloneFunctionInto(NewF, &OrigF, VMap, 896 CloneFunctionChangeType::LocalChangesOnly, Returns); 897 898 auto &Context = NewF->getContext(); 899 900 // For async functions / continuations, adjust the scope line of the 901 // clone to the line number of the suspend point. However, only 902 // adjust the scope line when the files are the same. This ensures 903 // line number and file name belong together. The scope line is 904 // associated with all pre-prologue instructions. This avoids a jump 905 // in the linetable from the function declaration to the suspend point. 906 if (DISubprogram *SP = NewF->getSubprogram()) { 907 assert(SP != OrigF.getSubprogram() && SP->isDistinct()); 908 if (ActiveSuspend) 909 if (auto DL = ActiveSuspend->getDebugLoc()) 910 if (SP->getFile() == DL->getFile()) 911 SP->setScopeLine(DL->getLine()); 912 // Update the linkage name to reflect the modified symbol name. It 913 // is necessary to update the linkage name in Swift, since the 914 // mangling changes for resume functions. It might also be the 915 // right thing to do in C++, but due to a limitation in LLVM's 916 // AsmPrinter we can only do this if the function doesn't have an 917 // abstract specification, since the DWARF backend expects the 918 // abstract specification to contain the linkage name and asserts 919 // that they are identical. 920 if (!SP->getDeclaration() && SP->getUnit() && 921 SP->getUnit()->getSourceLanguage() == dwarf::DW_LANG_Swift) 922 SP->replaceLinkageName(MDString::get(Context, NewF->getName())); 923 } 924 925 NewF->setLinkage(savedLinkage); 926 NewF->setVisibility(savedVisibility); 927 NewF->setUnnamedAddr(savedUnnamedAddr); 928 NewF->setDLLStorageClass(savedDLLStorageClass); 929 // The function sanitizer metadata needs to match the signature of the 930 // function it is being attached to. However this does not hold for split 931 // functions here. Thus remove the metadata for split functions. 932 if (Shape.ABI == coro::ABI::Switch && 933 NewF->hasMetadata(LLVMContext::MD_func_sanitize)) 934 NewF->eraseMetadata(LLVMContext::MD_func_sanitize); 935 936 // Replace the attributes of the new function: 937 auto OrigAttrs = NewF->getAttributes(); 938 auto NewAttrs = AttributeList(); 939 940 switch (Shape.ABI) { 941 case coro::ABI::Switch: 942 // Bootstrap attributes by copying function attributes from the 943 // original function. This should include optimization settings and so on. 944 NewAttrs = NewAttrs.addFnAttributes( 945 Context, AttrBuilder(Context, OrigAttrs.getFnAttrs())); 946 947 addFramePointerAttrs(NewAttrs, Context, 0, 948 Shape.FrameSize, Shape.FrameAlign); 949 break; 950 case coro::ABI::Async: { 951 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend); 952 if (OrigF.hasParamAttribute(Shape.AsyncLowering.ContextArgNo, 953 Attribute::SwiftAsync)) { 954 uint32_t ArgAttributeIndices = 955 ActiveAsyncSuspend->getStorageArgumentIndex(); 956 auto ContextArgIndex = ArgAttributeIndices & 0xff; 957 addAsyncContextAttrs(NewAttrs, Context, ContextArgIndex); 958 959 // `swiftasync` must preceed `swiftself` so 0 is not a valid index for 960 // `swiftself`. 961 auto SwiftSelfIndex = ArgAttributeIndices >> 8; 962 if (SwiftSelfIndex) 963 addSwiftSelfAttrs(NewAttrs, Context, SwiftSelfIndex); 964 } 965 966 // Transfer the original function's attributes. 967 auto FnAttrs = OrigF.getAttributes().getFnAttrs(); 968 NewAttrs = NewAttrs.addFnAttributes(Context, AttrBuilder(Context, FnAttrs)); 969 break; 970 } 971 case coro::ABI::Retcon: 972 case coro::ABI::RetconOnce: 973 // If we have a continuation prototype, just use its attributes, 974 // full-stop. 975 NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes(); 976 977 addFramePointerAttrs(NewAttrs, Context, 0, 978 Shape.getRetconCoroId()->getStorageSize(), 979 Shape.getRetconCoroId()->getStorageAlignment()); 980 break; 981 } 982 983 switch (Shape.ABI) { 984 // In these ABIs, the cloned functions always return 'void', and the 985 // existing return sites are meaningless. Note that for unique 986 // continuations, this includes the returns associated with suspends; 987 // this is fine because we can't suspend twice. 988 case coro::ABI::Switch: 989 case coro::ABI::RetconOnce: 990 // Remove old returns. 991 for (ReturnInst *Return : Returns) 992 changeToUnreachable(Return); 993 break; 994 995 // With multi-suspend continuations, we'll already have eliminated the 996 // original returns and inserted returns before all the suspend points, 997 // so we want to leave any returns in place. 998 case coro::ABI::Retcon: 999 break; 1000 // Async lowering will insert musttail call functions at all suspend points 1001 // followed by a return. 1002 // Don't change returns to unreachable because that will trip up the verifier. 1003 // These returns should be unreachable from the clone. 1004 case coro::ABI::Async: 1005 break; 1006 } 1007 1008 NewF->setAttributes(NewAttrs); 1009 NewF->setCallingConv(Shape.getResumeFunctionCC()); 1010 1011 // Set up the new entry block. 1012 replaceEntryBlock(); 1013 1014 Builder.SetInsertPoint(&NewF->getEntryBlock().front()); 1015 NewFramePtr = deriveNewFramePointer(); 1016 1017 // Remap frame pointer. 1018 Value *OldFramePtr = VMap[Shape.FramePtr]; 1019 NewFramePtr->takeName(OldFramePtr); 1020 OldFramePtr->replaceAllUsesWith(NewFramePtr); 1021 1022 // Remap vFrame pointer. 1023 auto *NewVFrame = Builder.CreateBitCast( 1024 NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame"); 1025 Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]); 1026 if (OldVFrame != NewVFrame) 1027 OldVFrame->replaceAllUsesWith(NewVFrame); 1028 1029 // All uses of the arguments should have been resolved by this point, 1030 // so we can safely remove the dummy values. 1031 for (Instruction *DummyArg : DummyArgs) { 1032 DummyArg->replaceAllUsesWith(UndefValue::get(DummyArg->getType())); 1033 DummyArg->deleteValue(); 1034 } 1035 1036 switch (Shape.ABI) { 1037 case coro::ABI::Switch: 1038 // Rewrite final suspend handling as it is not done via switch (allows to 1039 // remove final case from the switch, since it is undefined behavior to 1040 // resume the coroutine suspended at the final suspend point. 1041 if (Shape.SwitchLowering.HasFinalSuspend) 1042 handleFinalSuspend(); 1043 break; 1044 case coro::ABI::Async: 1045 case coro::ABI::Retcon: 1046 case coro::ABI::RetconOnce: 1047 // Replace uses of the active suspend with the corresponding 1048 // continuation-function arguments. 1049 assert(ActiveSuspend != nullptr && 1050 "no active suspend when lowering a continuation-style coroutine"); 1051 replaceRetconOrAsyncSuspendUses(); 1052 break; 1053 } 1054 1055 // Handle suspends. 1056 replaceCoroSuspends(); 1057 1058 // Handle swifterror. 1059 replaceSwiftErrorOps(); 1060 1061 // Remove coro.end intrinsics. 1062 replaceCoroEnds(); 1063 1064 // Salvage debug info that points into the coroutine frame. 1065 salvageDebugInfo(); 1066 1067 // Eliminate coro.free from the clones, replacing it with 'null' in cleanup, 1068 // to suppress deallocation code. 1069 if (Shape.ABI == coro::ABI::Switch) 1070 coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]), 1071 /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup); 1072 } 1073 1074 // Create a resume clone by cloning the body of the original function, setting 1075 // new entry block and replacing coro.suspend an appropriate value to force 1076 // resume or cleanup pass for every suspend point. 1077 static Function *createClone(Function &F, const Twine &Suffix, 1078 coro::Shape &Shape, CoroCloner::Kind FKind) { 1079 CoroCloner Cloner(F, Suffix, Shape, FKind); 1080 Cloner.create(); 1081 return Cloner.getFunction(); 1082 } 1083 1084 static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) { 1085 assert(Shape.ABI == coro::ABI::Async); 1086 1087 auto *FuncPtrStruct = cast<ConstantStruct>( 1088 Shape.AsyncLowering.AsyncFuncPointer->getInitializer()); 1089 auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0); 1090 auto *OrigContextSize = FuncPtrStruct->getOperand(1); 1091 auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(), 1092 Shape.AsyncLowering.ContextSize); 1093 auto *NewFuncPtrStruct = ConstantStruct::get( 1094 FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize); 1095 1096 Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct); 1097 } 1098 1099 static void replaceFrameSizeAndAlignment(coro::Shape &Shape) { 1100 if (Shape.ABI == coro::ABI::Async) 1101 updateAsyncFuncPointerContextSize(Shape); 1102 1103 for (CoroAlignInst *CA : Shape.CoroAligns) { 1104 CA->replaceAllUsesWith( 1105 ConstantInt::get(CA->getType(), Shape.FrameAlign.value())); 1106 CA->eraseFromParent(); 1107 } 1108 1109 if (Shape.CoroSizes.empty()) 1110 return; 1111 1112 // In the same function all coro.sizes should have the same result type. 1113 auto *SizeIntrin = Shape.CoroSizes.back(); 1114 Module *M = SizeIntrin->getModule(); 1115 const DataLayout &DL = M->getDataLayout(); 1116 auto Size = DL.getTypeAllocSize(Shape.FrameTy); 1117 auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size); 1118 1119 for (CoroSizeInst *CS : Shape.CoroSizes) { 1120 CS->replaceAllUsesWith(SizeConstant); 1121 CS->eraseFromParent(); 1122 } 1123 } 1124 1125 // Create a global constant array containing pointers to functions provided and 1126 // set Info parameter of CoroBegin to point at this constant. Example: 1127 // 1128 // @f.resumers = internal constant [2 x void(%f.frame*)*] 1129 // [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy] 1130 // define void @f() { 1131 // ... 1132 // call i8* @llvm.coro.begin(i8* null, i32 0, i8* null, 1133 // i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*)) 1134 // 1135 // Assumes that all the functions have the same signature. 1136 static void setCoroInfo(Function &F, coro::Shape &Shape, 1137 ArrayRef<Function *> Fns) { 1138 // This only works under the switch-lowering ABI because coro elision 1139 // only works on the switch-lowering ABI. 1140 assert(Shape.ABI == coro::ABI::Switch); 1141 1142 SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end()); 1143 assert(!Args.empty()); 1144 Function *Part = *Fns.begin(); 1145 Module *M = Part->getParent(); 1146 auto *ArrTy = ArrayType::get(Part->getType(), Args.size()); 1147 1148 auto *ConstVal = ConstantArray::get(ArrTy, Args); 1149 auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true, 1150 GlobalVariable::PrivateLinkage, ConstVal, 1151 F.getName() + Twine(".resumers")); 1152 1153 // Update coro.begin instruction to refer to this constant. 1154 LLVMContext &C = F.getContext(); 1155 auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C)); 1156 Shape.getSwitchCoroId()->setInfo(BC); 1157 } 1158 1159 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame. 1160 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn, 1161 Function *DestroyFn, Function *CleanupFn) { 1162 assert(Shape.ABI == coro::ABI::Switch); 1163 1164 IRBuilder<> Builder(Shape.getInsertPtAfterFramePtr()); 1165 1166 auto *ResumeAddr = Builder.CreateStructGEP( 1167 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume, 1168 "resume.addr"); 1169 Builder.CreateStore(ResumeFn, ResumeAddr); 1170 1171 Value *DestroyOrCleanupFn = DestroyFn; 1172 1173 CoroIdInst *CoroId = Shape.getSwitchCoroId(); 1174 if (CoroAllocInst *CA = CoroId->getCoroAlloc()) { 1175 // If there is a CoroAlloc and it returns false (meaning we elide the 1176 // allocation, use CleanupFn instead of DestroyFn). 1177 DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn); 1178 } 1179 1180 auto *DestroyAddr = Builder.CreateStructGEP( 1181 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy, 1182 "destroy.addr"); 1183 Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr); 1184 } 1185 1186 static void postSplitCleanup(Function &F) { 1187 removeUnreachableBlocks(F); 1188 1189 #ifndef NDEBUG 1190 // For now, we do a mandatory verification step because we don't 1191 // entirely trust this pass. Note that we don't want to add a verifier 1192 // pass to FPM below because it will also verify all the global data. 1193 if (verifyFunction(F, &errs())) 1194 report_fatal_error("Broken function"); 1195 #endif 1196 } 1197 1198 // Assuming we arrived at the block NewBlock from Prev instruction, store 1199 // PHI's incoming values in the ResolvedValues map. 1200 static void 1201 scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock, 1202 DenseMap<Value *, Value *> &ResolvedValues) { 1203 auto *PrevBB = Prev->getParent(); 1204 for (PHINode &PN : NewBlock->phis()) { 1205 auto V = PN.getIncomingValueForBlock(PrevBB); 1206 // See if we already resolved it. 1207 auto VI = ResolvedValues.find(V); 1208 if (VI != ResolvedValues.end()) 1209 V = VI->second; 1210 // Remember the value. 1211 ResolvedValues[&PN] = V; 1212 } 1213 } 1214 1215 // Replace a sequence of branches leading to a ret, with a clone of a ret 1216 // instruction. Suspend instruction represented by a switch, track the PHI 1217 // values and select the correct case successor when possible. 1218 static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) { 1219 DenseMap<Value *, Value *> ResolvedValues; 1220 BasicBlock *UnconditionalSucc = nullptr; 1221 assert(InitialInst->getModule()); 1222 const DataLayout &DL = InitialInst->getModule()->getDataLayout(); 1223 1224 auto GetFirstValidInstruction = [](Instruction *I) { 1225 while (I) { 1226 // BitCastInst wouldn't generate actual code so that we could skip it. 1227 if (isa<BitCastInst>(I) || I->isDebugOrPseudoInst() || 1228 I->isLifetimeStartOrEnd()) 1229 I = I->getNextNode(); 1230 else if (isInstructionTriviallyDead(I)) 1231 // Duing we are in the middle of the transformation, we need to erase 1232 // the dead instruction manually. 1233 I = &*I->eraseFromParent(); 1234 else 1235 break; 1236 } 1237 return I; 1238 }; 1239 1240 auto TryResolveConstant = [&ResolvedValues](Value *V) { 1241 auto It = ResolvedValues.find(V); 1242 if (It != ResolvedValues.end()) 1243 V = It->second; 1244 return dyn_cast<ConstantInt>(V); 1245 }; 1246 1247 Instruction *I = InitialInst; 1248 while (I->isTerminator() || isa<CmpInst>(I)) { 1249 if (isa<ReturnInst>(I)) { 1250 if (I != InitialInst) { 1251 // If InitialInst is an unconditional branch, 1252 // remove PHI values that come from basic block of InitialInst 1253 if (UnconditionalSucc) 1254 UnconditionalSucc->removePredecessor(InitialInst->getParent(), true); 1255 ReplaceInstWithInst(InitialInst, I->clone()); 1256 } 1257 return true; 1258 } 1259 if (auto *BR = dyn_cast<BranchInst>(I)) { 1260 if (BR->isUnconditional()) { 1261 BasicBlock *Succ = BR->getSuccessor(0); 1262 if (I == InitialInst) 1263 UnconditionalSucc = Succ; 1264 scanPHIsAndUpdateValueMap(I, Succ, ResolvedValues); 1265 I = GetFirstValidInstruction(Succ->getFirstNonPHIOrDbgOrLifetime()); 1266 continue; 1267 } 1268 1269 BasicBlock *BB = BR->getParent(); 1270 // Handle the case the condition of the conditional branch is constant. 1271 // e.g., 1272 // 1273 // br i1 false, label %cleanup, label %CoroEnd 1274 // 1275 // It is possible during the transformation. We could continue the 1276 // simplifying in this case. 1277 if (ConstantFoldTerminator(BB, /*DeleteDeadConditions=*/true)) { 1278 // Handle this branch in next iteration. 1279 I = BB->getTerminator(); 1280 continue; 1281 } 1282 } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) { 1283 // If the case number of suspended switch instruction is reduced to 1284 // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator. 1285 auto *BR = dyn_cast<BranchInst>( 1286 GetFirstValidInstruction(CondCmp->getNextNode())); 1287 if (!BR || !BR->isConditional() || CondCmp != BR->getCondition()) 1288 return false; 1289 1290 // And the comparsion looks like : %cond = icmp eq i8 %V, constant. 1291 // So we try to resolve constant for the first operand only since the 1292 // second operand should be literal constant by design. 1293 ConstantInt *Cond0 = TryResolveConstant(CondCmp->getOperand(0)); 1294 auto *Cond1 = dyn_cast<ConstantInt>(CondCmp->getOperand(1)); 1295 if (!Cond0 || !Cond1) 1296 return false; 1297 1298 // Both operands of the CmpInst are Constant. So that we could evaluate 1299 // it immediately to get the destination. 1300 auto *ConstResult = 1301 dyn_cast_or_null<ConstantInt>(ConstantFoldCompareInstOperands( 1302 CondCmp->getPredicate(), Cond0, Cond1, DL)); 1303 if (!ConstResult) 1304 return false; 1305 1306 CondCmp->replaceAllUsesWith(ConstResult); 1307 CondCmp->eraseFromParent(); 1308 1309 // Handle this branch in next iteration. 1310 I = BR; 1311 continue; 1312 } else if (auto *SI = dyn_cast<SwitchInst>(I)) { 1313 ConstantInt *Cond = TryResolveConstant(SI->getCondition()); 1314 if (!Cond) 1315 return false; 1316 1317 BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor(); 1318 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues); 1319 I = GetFirstValidInstruction(BB->getFirstNonPHIOrDbgOrLifetime()); 1320 continue; 1321 } 1322 1323 return false; 1324 } 1325 return false; 1326 } 1327 1328 // Check whether CI obeys the rules of musttail attribute. 1329 static bool shouldBeMustTail(const CallInst &CI, const Function &F) { 1330 if (CI.isInlineAsm()) 1331 return false; 1332 1333 // Match prototypes and calling conventions of resume function. 1334 FunctionType *CalleeTy = CI.getFunctionType(); 1335 if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1)) 1336 return false; 1337 1338 Type *CalleeParmTy = CalleeTy->getParamType(0); 1339 if (!CalleeParmTy->isPointerTy() || 1340 (CalleeParmTy->getPointerAddressSpace() != 0)) 1341 return false; 1342 1343 if (CI.getCallingConv() != F.getCallingConv()) 1344 return false; 1345 1346 // CI should not has any ABI-impacting function attributes. 1347 static const Attribute::AttrKind ABIAttrs[] = { 1348 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca, 1349 Attribute::Preallocated, Attribute::InReg, Attribute::Returned, 1350 Attribute::SwiftSelf, Attribute::SwiftError}; 1351 AttributeList Attrs = CI.getAttributes(); 1352 for (auto AK : ABIAttrs) 1353 if (Attrs.hasParamAttr(0, AK)) 1354 return false; 1355 1356 return true; 1357 } 1358 1359 // Add musttail to any resume instructions that is immediately followed by a 1360 // suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call 1361 // for symmetrical coroutine control transfer (C++ Coroutines TS extension). 1362 // This transformation is done only in the resume part of the coroutine that has 1363 // identical signature and calling convention as the coro.resume call. 1364 static void addMustTailToCoroResumes(Function &F) { 1365 bool changed = false; 1366 1367 // Collect potential resume instructions. 1368 SmallVector<CallInst *, 4> Resumes; 1369 for (auto &I : instructions(F)) 1370 if (auto *Call = dyn_cast<CallInst>(&I)) 1371 if (shouldBeMustTail(*Call, F)) 1372 Resumes.push_back(Call); 1373 1374 // Set musttail on those that are followed by a ret instruction. 1375 for (CallInst *Call : Resumes) 1376 if (simplifyTerminatorLeadingToRet(Call->getNextNode())) { 1377 Call->setTailCallKind(CallInst::TCK_MustTail); 1378 changed = true; 1379 } 1380 1381 if (changed) 1382 removeUnreachableBlocks(F); 1383 } 1384 1385 // Coroutine has no suspend points. Remove heap allocation for the coroutine 1386 // frame if possible. 1387 static void handleNoSuspendCoroutine(coro::Shape &Shape) { 1388 auto *CoroBegin = Shape.CoroBegin; 1389 auto *CoroId = CoroBegin->getId(); 1390 auto *AllocInst = CoroId->getCoroAlloc(); 1391 switch (Shape.ABI) { 1392 case coro::ABI::Switch: { 1393 auto SwitchId = cast<CoroIdInst>(CoroId); 1394 coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr); 1395 if (AllocInst) { 1396 IRBuilder<> Builder(AllocInst); 1397 auto *Frame = Builder.CreateAlloca(Shape.FrameTy); 1398 Frame->setAlignment(Shape.FrameAlign); 1399 auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy()); 1400 AllocInst->replaceAllUsesWith(Builder.getFalse()); 1401 AllocInst->eraseFromParent(); 1402 CoroBegin->replaceAllUsesWith(VFrame); 1403 } else { 1404 CoroBegin->replaceAllUsesWith(CoroBegin->getMem()); 1405 } 1406 1407 break; 1408 } 1409 case coro::ABI::Async: 1410 case coro::ABI::Retcon: 1411 case coro::ABI::RetconOnce: 1412 CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType())); 1413 break; 1414 } 1415 1416 CoroBegin->eraseFromParent(); 1417 } 1418 1419 // SimplifySuspendPoint needs to check that there is no calls between 1420 // coro_save and coro_suspend, since any of the calls may potentially resume 1421 // the coroutine and if that is the case we cannot eliminate the suspend point. 1422 static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) { 1423 for (Instruction *I = From; I != To; I = I->getNextNode()) { 1424 // Assume that no intrinsic can resume the coroutine. 1425 if (isa<IntrinsicInst>(I)) 1426 continue; 1427 1428 if (isa<CallBase>(I)) 1429 return true; 1430 } 1431 return false; 1432 } 1433 1434 static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) { 1435 SmallPtrSet<BasicBlock *, 8> Set; 1436 SmallVector<BasicBlock *, 8> Worklist; 1437 1438 Set.insert(SaveBB); 1439 Worklist.push_back(ResDesBB); 1440 1441 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr 1442 // returns a token consumed by suspend instruction, all blocks in between 1443 // will have to eventually hit SaveBB when going backwards from ResDesBB. 1444 while (!Worklist.empty()) { 1445 auto *BB = Worklist.pop_back_val(); 1446 Set.insert(BB); 1447 for (auto *Pred : predecessors(BB)) 1448 if (!Set.contains(Pred)) 1449 Worklist.push_back(Pred); 1450 } 1451 1452 // SaveBB and ResDesBB are checked separately in hasCallsBetween. 1453 Set.erase(SaveBB); 1454 Set.erase(ResDesBB); 1455 1456 for (auto *BB : Set) 1457 if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr)) 1458 return true; 1459 1460 return false; 1461 } 1462 1463 static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) { 1464 auto *SaveBB = Save->getParent(); 1465 auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent(); 1466 1467 if (SaveBB == ResumeOrDestroyBB) 1468 return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy); 1469 1470 // Any calls from Save to the end of the block? 1471 if (hasCallsInBlockBetween(Save->getNextNode(), nullptr)) 1472 return true; 1473 1474 // Any calls from begging of the block up to ResumeOrDestroy? 1475 if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(), 1476 ResumeOrDestroy)) 1477 return true; 1478 1479 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB? 1480 if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB)) 1481 return true; 1482 1483 return false; 1484 } 1485 1486 // If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the 1487 // suspend point and replace it with nornal control flow. 1488 static bool simplifySuspendPoint(CoroSuspendInst *Suspend, 1489 CoroBeginInst *CoroBegin) { 1490 Instruction *Prev = Suspend->getPrevNode(); 1491 if (!Prev) { 1492 auto *Pred = Suspend->getParent()->getSinglePredecessor(); 1493 if (!Pred) 1494 return false; 1495 Prev = Pred->getTerminator(); 1496 } 1497 1498 CallBase *CB = dyn_cast<CallBase>(Prev); 1499 if (!CB) 1500 return false; 1501 1502 auto *Callee = CB->getCalledOperand()->stripPointerCasts(); 1503 1504 // See if the callsite is for resumption or destruction of the coroutine. 1505 auto *SubFn = dyn_cast<CoroSubFnInst>(Callee); 1506 if (!SubFn) 1507 return false; 1508 1509 // Does not refer to the current coroutine, we cannot do anything with it. 1510 if (SubFn->getFrame() != CoroBegin) 1511 return false; 1512 1513 // See if the transformation is safe. Specifically, see if there are any 1514 // calls in between Save and CallInstr. They can potenitally resume the 1515 // coroutine rendering this optimization unsafe. 1516 auto *Save = Suspend->getCoroSave(); 1517 if (hasCallsBetween(Save, CB)) 1518 return false; 1519 1520 // Replace llvm.coro.suspend with the value that results in resumption over 1521 // the resume or cleanup path. 1522 Suspend->replaceAllUsesWith(SubFn->getRawIndex()); 1523 Suspend->eraseFromParent(); 1524 Save->eraseFromParent(); 1525 1526 // No longer need a call to coro.resume or coro.destroy. 1527 if (auto *Invoke = dyn_cast<InvokeInst>(CB)) { 1528 BranchInst::Create(Invoke->getNormalDest(), Invoke); 1529 } 1530 1531 // Grab the CalledValue from CB before erasing the CallInstr. 1532 auto *CalledValue = CB->getCalledOperand(); 1533 CB->eraseFromParent(); 1534 1535 // If no more users remove it. Usually it is a bitcast of SubFn. 1536 if (CalledValue != SubFn && CalledValue->user_empty()) 1537 if (auto *I = dyn_cast<Instruction>(CalledValue)) 1538 I->eraseFromParent(); 1539 1540 // Now we are good to remove SubFn. 1541 if (SubFn->user_empty()) 1542 SubFn->eraseFromParent(); 1543 1544 return true; 1545 } 1546 1547 // Remove suspend points that are simplified. 1548 static void simplifySuspendPoints(coro::Shape &Shape) { 1549 // Currently, the only simplification we do is switch-lowering-specific. 1550 if (Shape.ABI != coro::ABI::Switch) 1551 return; 1552 1553 auto &S = Shape.CoroSuspends; 1554 size_t I = 0, N = S.size(); 1555 if (N == 0) 1556 return; 1557 while (true) { 1558 auto SI = cast<CoroSuspendInst>(S[I]); 1559 // Leave final.suspend to handleFinalSuspend since it is undefined behavior 1560 // to resume a coroutine suspended at the final suspend point. 1561 if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) { 1562 if (--N == I) 1563 break; 1564 std::swap(S[I], S[N]); 1565 continue; 1566 } 1567 if (++I == N) 1568 break; 1569 } 1570 S.resize(N); 1571 } 1572 1573 static void splitSwitchCoroutine(Function &F, coro::Shape &Shape, 1574 SmallVectorImpl<Function *> &Clones) { 1575 assert(Shape.ABI == coro::ABI::Switch); 1576 1577 createResumeEntryBlock(F, Shape); 1578 auto ResumeClone = createClone(F, ".resume", Shape, 1579 CoroCloner::Kind::SwitchResume); 1580 auto DestroyClone = createClone(F, ".destroy", Shape, 1581 CoroCloner::Kind::SwitchUnwind); 1582 auto CleanupClone = createClone(F, ".cleanup", Shape, 1583 CoroCloner::Kind::SwitchCleanup); 1584 1585 postSplitCleanup(*ResumeClone); 1586 postSplitCleanup(*DestroyClone); 1587 postSplitCleanup(*CleanupClone); 1588 1589 addMustTailToCoroResumes(*ResumeClone); 1590 1591 // Store addresses resume/destroy/cleanup functions in the coroutine frame. 1592 updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone); 1593 1594 assert(Clones.empty()); 1595 Clones.push_back(ResumeClone); 1596 Clones.push_back(DestroyClone); 1597 Clones.push_back(CleanupClone); 1598 1599 // Create a constant array referring to resume/destroy/clone functions pointed 1600 // by the last argument of @llvm.coro.info, so that CoroElide pass can 1601 // determined correct function to call. 1602 setCoroInfo(F, Shape, Clones); 1603 } 1604 1605 static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend, 1606 Value *Continuation) { 1607 auto *ResumeIntrinsic = Suspend->getResumeFunction(); 1608 auto &Context = Suspend->getParent()->getParent()->getContext(); 1609 auto *Int8PtrTy = Type::getInt8PtrTy(Context); 1610 1611 IRBuilder<> Builder(ResumeIntrinsic); 1612 auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy); 1613 ResumeIntrinsic->replaceAllUsesWith(Val); 1614 ResumeIntrinsic->eraseFromParent(); 1615 Suspend->setOperand(CoroSuspendAsyncInst::ResumeFunctionArg, 1616 UndefValue::get(Int8PtrTy)); 1617 } 1618 1619 /// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs. 1620 static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy, 1621 ArrayRef<Value *> FnArgs, 1622 SmallVectorImpl<Value *> &CallArgs) { 1623 size_t ArgIdx = 0; 1624 for (auto paramTy : FnTy->params()) { 1625 assert(ArgIdx < FnArgs.size()); 1626 if (paramTy != FnArgs[ArgIdx]->getType()) 1627 CallArgs.push_back( 1628 Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy)); 1629 else 1630 CallArgs.push_back(FnArgs[ArgIdx]); 1631 ++ArgIdx; 1632 } 1633 } 1634 1635 CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn, 1636 ArrayRef<Value *> Arguments, 1637 IRBuilder<> &Builder) { 1638 auto *FnTy = MustTailCallFn->getFunctionType(); 1639 // Coerce the arguments, llvm optimizations seem to ignore the types in 1640 // vaarg functions and throws away casts in optimized mode. 1641 SmallVector<Value *, 8> CallArgs; 1642 coerceArguments(Builder, FnTy, Arguments, CallArgs); 1643 1644 auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs); 1645 TailCall->setTailCallKind(CallInst::TCK_MustTail); 1646 TailCall->setDebugLoc(Loc); 1647 TailCall->setCallingConv(MustTailCallFn->getCallingConv()); 1648 return TailCall; 1649 } 1650 1651 static void splitAsyncCoroutine(Function &F, coro::Shape &Shape, 1652 SmallVectorImpl<Function *> &Clones) { 1653 assert(Shape.ABI == coro::ABI::Async); 1654 assert(Clones.empty()); 1655 // Reset various things that the optimizer might have decided it 1656 // "knows" about the coroutine function due to not seeing a return. 1657 F.removeFnAttr(Attribute::NoReturn); 1658 F.removeRetAttr(Attribute::NoAlias); 1659 F.removeRetAttr(Attribute::NonNull); 1660 1661 auto &Context = F.getContext(); 1662 auto *Int8PtrTy = Type::getInt8PtrTy(Context); 1663 1664 auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId()); 1665 IRBuilder<> Builder(Id); 1666 1667 auto *FramePtr = Id->getStorage(); 1668 FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy); 1669 FramePtr = Builder.CreateConstInBoundsGEP1_32( 1670 Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset, 1671 "async.ctx.frameptr"); 1672 1673 // Map all uses of llvm.coro.begin to the allocated frame pointer. 1674 { 1675 // Make sure we don't invalidate Shape.FramePtr. 1676 TrackingVH<Value> Handle(Shape.FramePtr); 1677 Shape.CoroBegin->replaceAllUsesWith(FramePtr); 1678 Shape.FramePtr = Handle.getValPtr(); 1679 } 1680 1681 // Create all the functions in order after the main function. 1682 auto NextF = std::next(F.getIterator()); 1683 1684 // Create a continuation function for each of the suspend points. 1685 Clones.reserve(Shape.CoroSuspends.size()); 1686 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) { 1687 auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]); 1688 1689 // Create the clone declaration. 1690 auto ResumeNameSuffix = ".resume."; 1691 auto ProjectionFunctionName = 1692 Suspend->getAsyncContextProjectionFunction()->getName(); 1693 bool UseSwiftMangling = false; 1694 if (ProjectionFunctionName.equals("__swift_async_resume_project_context")) { 1695 ResumeNameSuffix = "TQ"; 1696 UseSwiftMangling = true; 1697 } else if (ProjectionFunctionName.equals( 1698 "__swift_async_resume_get_context")) { 1699 ResumeNameSuffix = "TY"; 1700 UseSwiftMangling = true; 1701 } 1702 auto *Continuation = createCloneDeclaration( 1703 F, Shape, 1704 UseSwiftMangling ? ResumeNameSuffix + Twine(Idx) + "_" 1705 : ResumeNameSuffix + Twine(Idx), 1706 NextF, Suspend); 1707 Clones.push_back(Continuation); 1708 1709 // Insert a branch to a new return block immediately before the suspend 1710 // point. 1711 auto *SuspendBB = Suspend->getParent(); 1712 auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend); 1713 auto *Branch = cast<BranchInst>(SuspendBB->getTerminator()); 1714 1715 // Place it before the first suspend. 1716 auto *ReturnBB = 1717 BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB); 1718 Branch->setSuccessor(0, ReturnBB); 1719 1720 IRBuilder<> Builder(ReturnBB); 1721 1722 // Insert the call to the tail call function and inline it. 1723 auto *Fn = Suspend->getMustTailCallFunction(); 1724 SmallVector<Value *, 8> Args(Suspend->args()); 1725 auto FnArgs = ArrayRef<Value *>(Args).drop_front( 1726 CoroSuspendAsyncInst::MustTailCallFuncArg + 1); 1727 auto *TailCall = 1728 coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder); 1729 Builder.CreateRetVoid(); 1730 InlineFunctionInfo FnInfo; 1731 auto InlineRes = InlineFunction(*TailCall, FnInfo); 1732 assert(InlineRes.isSuccess() && "Expected inlining to succeed"); 1733 (void)InlineRes; 1734 1735 // Replace the lvm.coro.async.resume intrisic call. 1736 replaceAsyncResumeFunction(Suspend, Continuation); 1737 } 1738 1739 assert(Clones.size() == Shape.CoroSuspends.size()); 1740 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) { 1741 auto *Suspend = Shape.CoroSuspends[Idx]; 1742 auto *Clone = Clones[Idx]; 1743 1744 CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create(); 1745 } 1746 } 1747 1748 static void splitRetconCoroutine(Function &F, coro::Shape &Shape, 1749 SmallVectorImpl<Function *> &Clones) { 1750 assert(Shape.ABI == coro::ABI::Retcon || 1751 Shape.ABI == coro::ABI::RetconOnce); 1752 assert(Clones.empty()); 1753 1754 // Reset various things that the optimizer might have decided it 1755 // "knows" about the coroutine function due to not seeing a return. 1756 F.removeFnAttr(Attribute::NoReturn); 1757 F.removeRetAttr(Attribute::NoAlias); 1758 F.removeRetAttr(Attribute::NonNull); 1759 1760 // Allocate the frame. 1761 auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId()); 1762 Value *RawFramePtr; 1763 if (Shape.RetconLowering.IsFrameInlineInStorage) { 1764 RawFramePtr = Id->getStorage(); 1765 } else { 1766 IRBuilder<> Builder(Id); 1767 1768 // Determine the size of the frame. 1769 const DataLayout &DL = F.getParent()->getDataLayout(); 1770 auto Size = DL.getTypeAllocSize(Shape.FrameTy); 1771 1772 // Allocate. We don't need to update the call graph node because we're 1773 // going to recompute it from scratch after splitting. 1774 // FIXME: pass the required alignment 1775 RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr); 1776 RawFramePtr = 1777 Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType()); 1778 1779 // Stash the allocated frame pointer in the continuation storage. 1780 auto Dest = Builder.CreateBitCast(Id->getStorage(), 1781 RawFramePtr->getType()->getPointerTo()); 1782 Builder.CreateStore(RawFramePtr, Dest); 1783 } 1784 1785 // Map all uses of llvm.coro.begin to the allocated frame pointer. 1786 { 1787 // Make sure we don't invalidate Shape.FramePtr. 1788 TrackingVH<Value> Handle(Shape.FramePtr); 1789 Shape.CoroBegin->replaceAllUsesWith(RawFramePtr); 1790 Shape.FramePtr = Handle.getValPtr(); 1791 } 1792 1793 // Create a unique return block. 1794 BasicBlock *ReturnBB = nullptr; 1795 SmallVector<PHINode *, 4> ReturnPHIs; 1796 1797 // Create all the functions in order after the main function. 1798 auto NextF = std::next(F.getIterator()); 1799 1800 // Create a continuation function for each of the suspend points. 1801 Clones.reserve(Shape.CoroSuspends.size()); 1802 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) { 1803 auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]); 1804 1805 // Create the clone declaration. 1806 auto Continuation = 1807 createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF, nullptr); 1808 Clones.push_back(Continuation); 1809 1810 // Insert a branch to the unified return block immediately before 1811 // the suspend point. 1812 auto SuspendBB = Suspend->getParent(); 1813 auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend); 1814 auto Branch = cast<BranchInst>(SuspendBB->getTerminator()); 1815 1816 // Create the unified return block. 1817 if (!ReturnBB) { 1818 // Place it before the first suspend. 1819 ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F, 1820 NewSuspendBB); 1821 Shape.RetconLowering.ReturnBlock = ReturnBB; 1822 1823 IRBuilder<> Builder(ReturnBB); 1824 1825 // Create PHIs for all the return values. 1826 assert(ReturnPHIs.empty()); 1827 1828 // First, the continuation. 1829 ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(), 1830 Shape.CoroSuspends.size())); 1831 1832 // Next, all the directly-yielded values. 1833 for (auto ResultTy : Shape.getRetconResultTypes()) 1834 ReturnPHIs.push_back(Builder.CreatePHI(ResultTy, 1835 Shape.CoroSuspends.size())); 1836 1837 // Build the return value. 1838 auto RetTy = F.getReturnType(); 1839 1840 // Cast the continuation value if necessary. 1841 // We can't rely on the types matching up because that type would 1842 // have to be infinite. 1843 auto CastedContinuationTy = 1844 (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0)); 1845 auto *CastedContinuation = 1846 Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy); 1847 1848 Value *RetV; 1849 if (ReturnPHIs.size() == 1) { 1850 RetV = CastedContinuation; 1851 } else { 1852 RetV = UndefValue::get(RetTy); 1853 RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0); 1854 for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I) 1855 RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I); 1856 } 1857 1858 Builder.CreateRet(RetV); 1859 } 1860 1861 // Branch to the return block. 1862 Branch->setSuccessor(0, ReturnBB); 1863 ReturnPHIs[0]->addIncoming(Continuation, SuspendBB); 1864 size_t NextPHIIndex = 1; 1865 for (auto &VUse : Suspend->value_operands()) 1866 ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB); 1867 assert(NextPHIIndex == ReturnPHIs.size()); 1868 } 1869 1870 assert(Clones.size() == Shape.CoroSuspends.size()); 1871 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) { 1872 auto Suspend = Shape.CoroSuspends[i]; 1873 auto Clone = Clones[i]; 1874 1875 CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create(); 1876 } 1877 } 1878 1879 namespace { 1880 class PrettyStackTraceFunction : public PrettyStackTraceEntry { 1881 Function &F; 1882 public: 1883 PrettyStackTraceFunction(Function &F) : F(F) {} 1884 void print(raw_ostream &OS) const override { 1885 OS << "While splitting coroutine "; 1886 F.printAsOperand(OS, /*print type*/ false, F.getParent()); 1887 OS << "\n"; 1888 } 1889 }; 1890 } 1891 1892 static coro::Shape splitCoroutine(Function &F, 1893 SmallVectorImpl<Function *> &Clones, 1894 bool OptimizeFrame) { 1895 PrettyStackTraceFunction prettyStackTrace(F); 1896 1897 // The suspend-crossing algorithm in buildCoroutineFrame get tripped 1898 // up by uses in unreachable blocks, so remove them as a first pass. 1899 removeUnreachableBlocks(F); 1900 1901 coro::Shape Shape(F, OptimizeFrame); 1902 if (!Shape.CoroBegin) 1903 return Shape; 1904 1905 simplifySuspendPoints(Shape); 1906 buildCoroutineFrame(F, Shape); 1907 replaceFrameSizeAndAlignment(Shape); 1908 1909 // If there are no suspend points, no split required, just remove 1910 // the allocation and deallocation blocks, they are not needed. 1911 if (Shape.CoroSuspends.empty()) { 1912 handleNoSuspendCoroutine(Shape); 1913 } else { 1914 switch (Shape.ABI) { 1915 case coro::ABI::Switch: 1916 splitSwitchCoroutine(F, Shape, Clones); 1917 break; 1918 case coro::ABI::Async: 1919 splitAsyncCoroutine(F, Shape, Clones); 1920 break; 1921 case coro::ABI::Retcon: 1922 case coro::ABI::RetconOnce: 1923 splitRetconCoroutine(F, Shape, Clones); 1924 break; 1925 } 1926 } 1927 1928 // Replace all the swifterror operations in the original function. 1929 // This invalidates SwiftErrorOps in the Shape. 1930 replaceSwiftErrorOps(F, Shape, nullptr); 1931 1932 // Finally, salvage the llvm.dbg.{declare,addr} in our original function that 1933 // point into the coroutine frame. We only do this for the current function 1934 // since the Cloner salvaged debug info for us in the new coroutine funclets. 1935 SmallVector<DbgVariableIntrinsic *, 8> Worklist; 1936 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache; 1937 for (auto &BB : F) { 1938 for (auto &I : BB) { 1939 if (auto *DDI = dyn_cast<DbgDeclareInst>(&I)) { 1940 Worklist.push_back(DDI); 1941 continue; 1942 } 1943 if (auto *DDI = dyn_cast<DbgAddrIntrinsic>(&I)) { 1944 Worklist.push_back(DDI); 1945 continue; 1946 } 1947 } 1948 } 1949 for (auto *DDI : Worklist) 1950 coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.OptimizeFrame); 1951 1952 return Shape; 1953 } 1954 1955 static void updateCallGraphAfterCoroutineSplit( 1956 LazyCallGraph::Node &N, const coro::Shape &Shape, 1957 const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C, 1958 LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR, 1959 FunctionAnalysisManager &FAM) { 1960 if (!Shape.CoroBegin) 1961 return; 1962 1963 for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) { 1964 auto &Context = End->getContext(); 1965 End->replaceAllUsesWith(ConstantInt::getFalse(Context)); 1966 End->eraseFromParent(); 1967 } 1968 1969 if (!Clones.empty()) { 1970 switch (Shape.ABI) { 1971 case coro::ABI::Switch: 1972 // Each clone in the Switch lowering is independent of the other clones. 1973 // Let the LazyCallGraph know about each one separately. 1974 for (Function *Clone : Clones) 1975 CG.addSplitFunction(N.getFunction(), *Clone); 1976 break; 1977 case coro::ABI::Async: 1978 case coro::ABI::Retcon: 1979 case coro::ABI::RetconOnce: 1980 // Each clone in the Async/Retcon lowering references of the other clones. 1981 // Let the LazyCallGraph know about all of them at once. 1982 if (!Clones.empty()) 1983 CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones); 1984 break; 1985 } 1986 1987 // Let the CGSCC infra handle the changes to the original function. 1988 updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM); 1989 } 1990 1991 // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges 1992 // to the split functions. 1993 postSplitCleanup(N.getFunction()); 1994 updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM); 1995 } 1996 1997 /// Replace a call to llvm.coro.prepare.retcon. 1998 static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG, 1999 LazyCallGraph::SCC &C) { 2000 auto CastFn = Prepare->getArgOperand(0); // as an i8* 2001 auto Fn = CastFn->stripPointerCasts(); // as its original type 2002 2003 // Attempt to peephole this pattern: 2004 // %0 = bitcast [[TYPE]] @some_function to i8* 2005 // %1 = call @llvm.coro.prepare.retcon(i8* %0) 2006 // %2 = bitcast %1 to [[TYPE]] 2007 // ==> 2008 // %2 = @some_function 2009 for (Use &U : llvm::make_early_inc_range(Prepare->uses())) { 2010 // Look for bitcasts back to the original function type. 2011 auto *Cast = dyn_cast<BitCastInst>(U.getUser()); 2012 if (!Cast || Cast->getType() != Fn->getType()) 2013 continue; 2014 2015 // Replace and remove the cast. 2016 Cast->replaceAllUsesWith(Fn); 2017 Cast->eraseFromParent(); 2018 } 2019 2020 // Replace any remaining uses with the function as an i8*. 2021 // This can never directly be a callee, so we don't need to update CG. 2022 Prepare->replaceAllUsesWith(CastFn); 2023 Prepare->eraseFromParent(); 2024 2025 // Kill dead bitcasts. 2026 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) { 2027 if (!Cast->use_empty()) 2028 break; 2029 CastFn = Cast->getOperand(0); 2030 Cast->eraseFromParent(); 2031 } 2032 } 2033 2034 static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG, 2035 LazyCallGraph::SCC &C) { 2036 bool Changed = false; 2037 for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) { 2038 // Intrinsics can only be used in calls. 2039 auto *Prepare = cast<CallInst>(P.getUser()); 2040 replacePrepare(Prepare, CG, C); 2041 Changed = true; 2042 } 2043 2044 return Changed; 2045 } 2046 2047 static void addPrepareFunction(const Module &M, 2048 SmallVectorImpl<Function *> &Fns, 2049 StringRef Name) { 2050 auto *PrepareFn = M.getFunction(Name); 2051 if (PrepareFn && !PrepareFn->use_empty()) 2052 Fns.push_back(PrepareFn); 2053 } 2054 2055 PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C, 2056 CGSCCAnalysisManager &AM, 2057 LazyCallGraph &CG, CGSCCUpdateResult &UR) { 2058 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a 2059 // non-zero number of nodes, so we assume that here and grab the first 2060 // node's function's module. 2061 Module &M = *C.begin()->getFunction().getParent(); 2062 auto &FAM = 2063 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager(); 2064 2065 // Check for uses of llvm.coro.prepare.retcon/async. 2066 SmallVector<Function *, 2> PrepareFns; 2067 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon"); 2068 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async"); 2069 2070 // Find coroutines for processing. 2071 SmallVector<LazyCallGraph::Node *> Coroutines; 2072 for (LazyCallGraph::Node &N : C) 2073 if (N.getFunction().isPresplitCoroutine()) 2074 Coroutines.push_back(&N); 2075 2076 if (Coroutines.empty() && PrepareFns.empty()) 2077 return PreservedAnalyses::all(); 2078 2079 if (Coroutines.empty()) { 2080 for (auto *PrepareFn : PrepareFns) { 2081 replaceAllPrepares(PrepareFn, CG, C); 2082 } 2083 } 2084 2085 // Split all the coroutines. 2086 for (LazyCallGraph::Node *N : Coroutines) { 2087 Function &F = N->getFunction(); 2088 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName() 2089 << "\n"); 2090 F.setSplittedCoroutine(); 2091 2092 SmallVector<Function *, 4> Clones; 2093 const coro::Shape Shape = splitCoroutine(F, Clones, OptimizeFrame); 2094 updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM); 2095 2096 if (!Shape.CoroSuspends.empty()) { 2097 // Run the CGSCC pipeline on the original and newly split functions. 2098 UR.CWorklist.insert(&C); 2099 for (Function *Clone : Clones) 2100 UR.CWorklist.insert(CG.lookupSCC(CG.get(*Clone))); 2101 } 2102 } 2103 2104 if (!PrepareFns.empty()) { 2105 for (auto *PrepareFn : PrepareFns) { 2106 replaceAllPrepares(PrepareFn, CG, C); 2107 } 2108 } 2109 2110 return PreservedAnalyses::none(); 2111 } 2112