1 //===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This pass builds the coroutine frame and outlines resume and destroy parts
9 // of the coroutine into separate functions.
10 //
11 // We present a coroutine to an LLVM as an ordinary function with suspension
12 // points marked up with intrinsics. We let the optimizer party on the coroutine
13 // as a single function for as long as possible. Shortly before the coroutine is
14 // eligible to be inlined into its callers, we split up the coroutine into parts
15 // corresponding to an initial, resume and destroy invocations of the coroutine,
16 // add them to the current SCC and restart the IPO pipeline to optimize the
17 // coroutine subfunctions we extracted before proceeding to the caller of the
18 // coroutine.
19 //===----------------------------------------------------------------------===//
20
21 #include "llvm/Transforms/Coroutines/CoroSplit.h"
22 #include "CoroInstr.h"
23 #include "CoroInternal.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/PriorityWorklist.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/StringRef.h"
29 #include "llvm/ADT/Twine.h"
30 #include "llvm/Analysis/CFG.h"
31 #include "llvm/Analysis/CallGraph.h"
32 #include "llvm/Analysis/ConstantFolding.h"
33 #include "llvm/Analysis/LazyCallGraph.h"
34 #include "llvm/Analysis/TargetTransformInfo.h"
35 #include "llvm/BinaryFormat/Dwarf.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/BasicBlock.h"
39 #include "llvm/IR/CFG.h"
40 #include "llvm/IR/CallingConv.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GlobalValue.h"
47 #include "llvm/IR/GlobalVariable.h"
48 #include "llvm/IR/IRBuilder.h"
49 #include "llvm/IR/InstIterator.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/LLVMContext.h"
55 #include "llvm/IR/Module.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/Value.h"
58 #include "llvm/IR/Verifier.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/PrettyStackTrace.h"
62 #include "llvm/Support/raw_ostream.h"
63 #include "llvm/Transforms/Scalar.h"
64 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
65 #include "llvm/Transforms/Utils/CallGraphUpdater.h"
66 #include "llvm/Transforms/Utils/Cloning.h"
67 #include "llvm/Transforms/Utils/Local.h"
68 #include "llvm/Transforms/Utils/ValueMapper.h"
69 #include <cassert>
70 #include <cstddef>
71 #include <cstdint>
72 #include <initializer_list>
73 #include <iterator>
74
75 using namespace llvm;
76
77 #define DEBUG_TYPE "coro-split"
78
79 namespace {
80
81 /// A little helper class for building
82 class CoroCloner {
83 public:
84 enum class Kind {
85 /// The shared resume function for a switch lowering.
86 SwitchResume,
87
88 /// The shared unwind function for a switch lowering.
89 SwitchUnwind,
90
91 /// The shared cleanup function for a switch lowering.
92 SwitchCleanup,
93
94 /// An individual continuation function.
95 Continuation,
96
97 /// An async resume function.
98 Async,
99 };
100
101 private:
102 Function &OrigF;
103 Function *NewF;
104 const Twine &Suffix;
105 coro::Shape &Shape;
106 Kind FKind;
107 ValueToValueMapTy VMap;
108 IRBuilder<> Builder;
109 Value *NewFramePtr = nullptr;
110
111 /// The active suspend instruction; meaningful only for continuation and async
112 /// ABIs.
113 AnyCoroSuspendInst *ActiveSuspend = nullptr;
114
115 public:
116 /// Create a cloner for a switch lowering.
CoroCloner(Function & OrigF,const Twine & Suffix,coro::Shape & Shape,Kind FKind)117 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
118 Kind FKind)
119 : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape),
120 FKind(FKind), Builder(OrigF.getContext()) {
121 assert(Shape.ABI == coro::ABI::Switch);
122 }
123
124 /// Create a cloner for a continuation lowering.
CoroCloner(Function & OrigF,const Twine & Suffix,coro::Shape & Shape,Function * NewF,AnyCoroSuspendInst * ActiveSuspend)125 CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
126 Function *NewF, AnyCoroSuspendInst *ActiveSuspend)
127 : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape),
128 FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation),
129 Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) {
130 assert(Shape.ABI == coro::ABI::Retcon ||
131 Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async);
132 assert(NewF && "need existing function for continuation");
133 assert(ActiveSuspend && "need active suspend point for continuation");
134 }
135
getFunction() const136 Function *getFunction() const {
137 assert(NewF != nullptr && "declaration not yet set");
138 return NewF;
139 }
140
141 void create();
142
143 private:
isSwitchDestroyFunction()144 bool isSwitchDestroyFunction() {
145 switch (FKind) {
146 case Kind::Async:
147 case Kind::Continuation:
148 case Kind::SwitchResume:
149 return false;
150 case Kind::SwitchUnwind:
151 case Kind::SwitchCleanup:
152 return true;
153 }
154 llvm_unreachable("Unknown CoroCloner::Kind enum");
155 }
156
157 void replaceEntryBlock();
158 Value *deriveNewFramePointer();
159 void replaceRetconOrAsyncSuspendUses();
160 void replaceCoroSuspends();
161 void replaceCoroEnds();
162 void replaceSwiftErrorOps();
163 void salvageDebugInfo();
164 void handleFinalSuspend();
165 };
166
167 } // end anonymous namespace
168
maybeFreeRetconStorage(IRBuilder<> & Builder,const coro::Shape & Shape,Value * FramePtr,CallGraph * CG)169 static void maybeFreeRetconStorage(IRBuilder<> &Builder,
170 const coro::Shape &Shape, Value *FramePtr,
171 CallGraph *CG) {
172 assert(Shape.ABI == coro::ABI::Retcon ||
173 Shape.ABI == coro::ABI::RetconOnce);
174 if (Shape.RetconLowering.IsFrameInlineInStorage)
175 return;
176
177 Shape.emitDealloc(Builder, FramePtr, CG);
178 }
179
180 /// Replace an llvm.coro.end.async.
181 /// Will inline the must tail call function call if there is one.
182 /// \returns true if cleanup of the coro.end block is needed, false otherwise.
replaceCoroEndAsync(AnyCoroEndInst * End)183 static bool replaceCoroEndAsync(AnyCoroEndInst *End) {
184 IRBuilder<> Builder(End);
185
186 auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End);
187 if (!EndAsync) {
188 Builder.CreateRetVoid();
189 return true /*needs cleanup of coro.end block*/;
190 }
191
192 auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
193 if (!MustTailCallFunc) {
194 Builder.CreateRetVoid();
195 return true /*needs cleanup of coro.end block*/;
196 }
197
198 // Move the must tail call from the predecessor block into the end block.
199 auto *CoroEndBlock = End->getParent();
200 auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
201 assert(MustTailCallFuncBlock && "Must have a single predecessor block");
202 auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
203 auto *MustTailCall = cast<CallInst>(&*std::prev(It));
204 CoroEndBlock->getInstList().splice(
205 End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall);
206
207 // Insert the return instruction.
208 Builder.SetInsertPoint(End);
209 Builder.CreateRetVoid();
210 InlineFunctionInfo FnInfo;
211
212 // Remove the rest of the block, by splitting it into an unreachable block.
213 auto *BB = End->getParent();
214 BB->splitBasicBlock(End);
215 BB->getTerminator()->eraseFromParent();
216
217 auto InlineRes = InlineFunction(*MustTailCall, FnInfo);
218 assert(InlineRes.isSuccess() && "Expected inlining to succeed");
219 (void)InlineRes;
220
221 // We have cleaned up the coro.end block above.
222 return false;
223 }
224
225 /// Replace a non-unwind call to llvm.coro.end.
replaceFallthroughCoroEnd(AnyCoroEndInst * End,const coro::Shape & Shape,Value * FramePtr,bool InResume,CallGraph * CG)226 static void replaceFallthroughCoroEnd(AnyCoroEndInst *End,
227 const coro::Shape &Shape, Value *FramePtr,
228 bool InResume, CallGraph *CG) {
229 // Start inserting right before the coro.end.
230 IRBuilder<> Builder(End);
231
232 // Create the return instruction.
233 switch (Shape.ABI) {
234 // The cloned functions in switch-lowering always return void.
235 case coro::ABI::Switch:
236 // coro.end doesn't immediately end the coroutine in the main function
237 // in this lowering, because we need to deallocate the coroutine.
238 if (!InResume)
239 return;
240 Builder.CreateRetVoid();
241 break;
242
243 // In async lowering this returns.
244 case coro::ABI::Async: {
245 bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
246 if (!CoroEndBlockNeedsCleanup)
247 return;
248 break;
249 }
250
251 // In unique continuation lowering, the continuations always return void.
252 // But we may have implicitly allocated storage.
253 case coro::ABI::RetconOnce:
254 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
255 Builder.CreateRetVoid();
256 break;
257
258 // In non-unique continuation lowering, we signal completion by returning
259 // a null continuation.
260 case coro::ABI::Retcon: {
261 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
262 auto RetTy = Shape.getResumeFunctionType()->getReturnType();
263 auto RetStructTy = dyn_cast<StructType>(RetTy);
264 PointerType *ContinuationTy =
265 cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy);
266
267 Value *ReturnValue = ConstantPointerNull::get(ContinuationTy);
268 if (RetStructTy) {
269 ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy),
270 ReturnValue, 0);
271 }
272 Builder.CreateRet(ReturnValue);
273 break;
274 }
275 }
276
277 // Remove the rest of the block, by splitting it into an unreachable block.
278 auto *BB = End->getParent();
279 BB->splitBasicBlock(End);
280 BB->getTerminator()->eraseFromParent();
281 }
282
283 // Mark a coroutine as done, which implies that the coroutine is finished and
284 // never get resumed.
285 //
286 // In resume-switched ABI, the done state is represented by storing zero in
287 // ResumeFnAddr.
288 //
289 // NOTE: We couldn't omit the argument `FramePtr`. It is necessary because the
290 // pointer to the frame in splitted function is not stored in `Shape`.
markCoroutineAsDone(IRBuilder<> & Builder,const coro::Shape & Shape,Value * FramePtr)291 static void markCoroutineAsDone(IRBuilder<> &Builder, const coro::Shape &Shape,
292 Value *FramePtr) {
293 assert(
294 Shape.ABI == coro::ABI::Switch &&
295 "markCoroutineAsDone is only supported for Switch-Resumed ABI for now.");
296 auto *GepIndex = Builder.CreateStructGEP(
297 Shape.FrameTy, FramePtr, coro::Shape::SwitchFieldIndex::Resume,
298 "ResumeFn.addr");
299 auto *NullPtr = ConstantPointerNull::get(cast<PointerType>(
300 Shape.FrameTy->getTypeAtIndex(coro::Shape::SwitchFieldIndex::Resume)));
301 Builder.CreateStore(NullPtr, GepIndex);
302 }
303
304 /// Replace an unwind call to llvm.coro.end.
replaceUnwindCoroEnd(AnyCoroEndInst * End,const coro::Shape & Shape,Value * FramePtr,bool InResume,CallGraph * CG)305 static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
306 Value *FramePtr, bool InResume,
307 CallGraph *CG) {
308 IRBuilder<> Builder(End);
309
310 switch (Shape.ABI) {
311 // In switch-lowering, this does nothing in the main function.
312 case coro::ABI::Switch: {
313 // In C++'s specification, the coroutine should be marked as done
314 // if promise.unhandled_exception() throws. The frontend will
315 // call coro.end(true) along this path.
316 //
317 // FIXME: We should refactor this once there is other language
318 // which uses Switch-Resumed style other than C++.
319 markCoroutineAsDone(Builder, Shape, FramePtr);
320 if (!InResume)
321 return;
322 break;
323 }
324 // In async lowering this does nothing.
325 case coro::ABI::Async:
326 break;
327 // In continuation-lowering, this frees the continuation storage.
328 case coro::ABI::Retcon:
329 case coro::ABI::RetconOnce:
330 maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
331 break;
332 }
333
334 // If coro.end has an associated bundle, add cleanupret instruction.
335 if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) {
336 auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]);
337 auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr);
338 End->getParent()->splitBasicBlock(End);
339 CleanupRet->getParent()->getTerminator()->eraseFromParent();
340 }
341 }
342
replaceCoroEnd(AnyCoroEndInst * End,const coro::Shape & Shape,Value * FramePtr,bool InResume,CallGraph * CG)343 static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
344 Value *FramePtr, bool InResume, CallGraph *CG) {
345 if (End->isUnwind())
346 replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG);
347 else
348 replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG);
349
350 auto &Context = End->getContext();
351 End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context)
352 : ConstantInt::getFalse(Context));
353 End->eraseFromParent();
354 }
355
356 // Create an entry block for a resume function with a switch that will jump to
357 // suspend points.
createResumeEntryBlock(Function & F,coro::Shape & Shape)358 static void createResumeEntryBlock(Function &F, coro::Shape &Shape) {
359 assert(Shape.ABI == coro::ABI::Switch);
360 LLVMContext &C = F.getContext();
361
362 // resume.entry:
363 // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0,
364 // i32 2
365 // % index = load i32, i32* %index.addr
366 // switch i32 %index, label %unreachable [
367 // i32 0, label %resume.0
368 // i32 1, label %resume.1
369 // ...
370 // ]
371
372 auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F);
373 auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F);
374
375 IRBuilder<> Builder(NewEntry);
376 auto *FramePtr = Shape.FramePtr;
377 auto *FrameTy = Shape.FrameTy;
378 auto *GepIndex = Builder.CreateStructGEP(
379 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
380 auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index");
381 auto *Switch =
382 Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size());
383 Shape.SwitchLowering.ResumeSwitch = Switch;
384
385 size_t SuspendIndex = 0;
386 for (auto *AnyS : Shape.CoroSuspends) {
387 auto *S = cast<CoroSuspendInst>(AnyS);
388 ConstantInt *IndexVal = Shape.getIndex(SuspendIndex);
389
390 // Replace CoroSave with a store to Index:
391 // %index.addr = getelementptr %f.frame... (index field number)
392 // store i32 %IndexVal, i32* %index.addr1
393 auto *Save = S->getCoroSave();
394 Builder.SetInsertPoint(Save);
395 if (S->isFinal()) {
396 // The coroutine should be marked done if it reaches the final suspend
397 // point.
398 markCoroutineAsDone(Builder, Shape, FramePtr);
399 } else {
400 auto *GepIndex = Builder.CreateStructGEP(
401 FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
402 Builder.CreateStore(IndexVal, GepIndex);
403 }
404 Save->replaceAllUsesWith(ConstantTokenNone::get(C));
405 Save->eraseFromParent();
406
407 // Split block before and after coro.suspend and add a jump from an entry
408 // switch:
409 //
410 // whateverBB:
411 // whatever
412 // %0 = call i8 @llvm.coro.suspend(token none, i1 false)
413 // switch i8 %0, label %suspend[i8 0, label %resume
414 // i8 1, label %cleanup]
415 // becomes:
416 //
417 // whateverBB:
418 // whatever
419 // br label %resume.0.landing
420 //
421 // resume.0: ; <--- jump from the switch in the resume.entry
422 // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
423 // br label %resume.0.landing
424 //
425 // resume.0.landing:
426 // %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
427 // switch i8 % 1, label %suspend [i8 0, label %resume
428 // i8 1, label %cleanup]
429
430 auto *SuspendBB = S->getParent();
431 auto *ResumeBB =
432 SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex));
433 auto *LandingBB = ResumeBB->splitBasicBlock(
434 S->getNextNode(), ResumeBB->getName() + Twine(".landing"));
435 Switch->addCase(IndexVal, ResumeBB);
436
437 cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB);
438 auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front());
439 S->replaceAllUsesWith(PN);
440 PN->addIncoming(Builder.getInt8(-1), SuspendBB);
441 PN->addIncoming(S, ResumeBB);
442
443 ++SuspendIndex;
444 }
445
446 Builder.SetInsertPoint(UnreachBB);
447 Builder.CreateUnreachable();
448
449 Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
450 }
451
452
453 // Rewrite final suspend point handling. We do not use suspend index to
454 // represent the final suspend point. Instead we zero-out ResumeFnAddr in the
455 // coroutine frame, since it is undefined behavior to resume a coroutine
456 // suspended at the final suspend point. Thus, in the resume function, we can
457 // simply remove the last case (when coro::Shape is built, the final suspend
458 // point (if present) is always the last element of CoroSuspends array).
459 // In the destroy function, we add a code sequence to check if ResumeFnAddress
460 // is Null, and if so, jump to the appropriate label to handle cleanup from the
461 // final suspend point.
handleFinalSuspend()462 void CoroCloner::handleFinalSuspend() {
463 assert(Shape.ABI == coro::ABI::Switch &&
464 Shape.SwitchLowering.HasFinalSuspend);
465 auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]);
466 auto FinalCaseIt = std::prev(Switch->case_end());
467 BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
468 Switch->removeCase(FinalCaseIt);
469 if (isSwitchDestroyFunction()) {
470 BasicBlock *OldSwitchBB = Switch->getParent();
471 auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch");
472 Builder.SetInsertPoint(OldSwitchBB->getTerminator());
473 auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr,
474 coro::Shape::SwitchFieldIndex::Resume,
475 "ResumeFn.addr");
476 auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(),
477 GepIndex);
478 auto *Cond = Builder.CreateIsNull(Load);
479 Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB);
480 OldSwitchBB->getTerminator()->eraseFromParent();
481 }
482 }
483
484 static FunctionType *
getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst * Suspend)485 getFunctionTypeFromAsyncSuspend(AnyCoroSuspendInst *Suspend) {
486 auto *AsyncSuspend = cast<CoroSuspendAsyncInst>(Suspend);
487 auto *StructTy = cast<StructType>(AsyncSuspend->getType());
488 auto &Context = Suspend->getParent()->getParent()->getContext();
489 auto *VoidTy = Type::getVoidTy(Context);
490 return FunctionType::get(VoidTy, StructTy->elements(), false);
491 }
492
createCloneDeclaration(Function & OrigF,coro::Shape & Shape,const Twine & Suffix,Module::iterator InsertBefore,AnyCoroSuspendInst * ActiveSuspend)493 static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape,
494 const Twine &Suffix,
495 Module::iterator InsertBefore,
496 AnyCoroSuspendInst *ActiveSuspend) {
497 Module *M = OrigF.getParent();
498 auto *FnTy = (Shape.ABI != coro::ABI::Async)
499 ? Shape.getResumeFunctionType()
500 : getFunctionTypeFromAsyncSuspend(ActiveSuspend);
501
502 Function *NewF =
503 Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage,
504 OrigF.getName() + Suffix);
505 if (Shape.ABI != coro::ABI::Async)
506 NewF->addParamAttr(0, Attribute::NonNull);
507
508 // For the async lowering ABI we can't guarantee that the context argument is
509 // not access via a different pointer not based on the argument.
510 if (Shape.ABI != coro::ABI::Async)
511 NewF->addParamAttr(0, Attribute::NoAlias);
512
513 M->getFunctionList().insert(InsertBefore, NewF);
514
515 return NewF;
516 }
517
518 /// Replace uses of the active llvm.coro.suspend.retcon/async call with the
519 /// arguments to the continuation function.
520 ///
521 /// This assumes that the builder has a meaningful insertion point.
replaceRetconOrAsyncSuspendUses()522 void CoroCloner::replaceRetconOrAsyncSuspendUses() {
523 assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
524 Shape.ABI == coro::ABI::Async);
525
526 auto NewS = VMap[ActiveSuspend];
527 if (NewS->use_empty()) return;
528
529 // Copy out all the continuation arguments after the buffer pointer into
530 // an easily-indexed data structure for convenience.
531 SmallVector<Value*, 8> Args;
532 // The async ABI includes all arguments -- including the first argument.
533 bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
534 for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()),
535 E = NewF->arg_end();
536 I != E; ++I)
537 Args.push_back(&*I);
538
539 // If the suspend returns a single scalar value, we can just do a simple
540 // replacement.
541 if (!isa<StructType>(NewS->getType())) {
542 assert(Args.size() == 1);
543 NewS->replaceAllUsesWith(Args.front());
544 return;
545 }
546
547 // Try to peephole extracts of an aggregate return.
548 for (Use &U : llvm::make_early_inc_range(NewS->uses())) {
549 auto *EVI = dyn_cast<ExtractValueInst>(U.getUser());
550 if (!EVI || EVI->getNumIndices() != 1)
551 continue;
552
553 EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]);
554 EVI->eraseFromParent();
555 }
556
557 // If we have no remaining uses, we're done.
558 if (NewS->use_empty()) return;
559
560 // Otherwise, we need to create an aggregate.
561 Value *Agg = UndefValue::get(NewS->getType());
562 for (size_t I = 0, E = Args.size(); I != E; ++I)
563 Agg = Builder.CreateInsertValue(Agg, Args[I], I);
564
565 NewS->replaceAllUsesWith(Agg);
566 }
567
replaceCoroSuspends()568 void CoroCloner::replaceCoroSuspends() {
569 Value *SuspendResult;
570
571 switch (Shape.ABI) {
572 // In switch lowering, replace coro.suspend with the appropriate value
573 // for the type of function we're extracting.
574 // Replacing coro.suspend with (0) will result in control flow proceeding to
575 // a resume label associated with a suspend point, replacing it with (1) will
576 // result in control flow proceeding to a cleanup label associated with this
577 // suspend point.
578 case coro::ABI::Switch:
579 SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0);
580 break;
581
582 // In async lowering there are no uses of the result.
583 case coro::ABI::Async:
584 return;
585
586 // In returned-continuation lowering, the arguments from earlier
587 // continuations are theoretically arbitrary, and they should have been
588 // spilled.
589 case coro::ABI::RetconOnce:
590 case coro::ABI::Retcon:
591 return;
592 }
593
594 for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) {
595 // The active suspend was handled earlier.
596 if (CS == ActiveSuspend) continue;
597
598 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]);
599 MappedCS->replaceAllUsesWith(SuspendResult);
600 MappedCS->eraseFromParent();
601 }
602 }
603
replaceCoroEnds()604 void CoroCloner::replaceCoroEnds() {
605 for (AnyCoroEndInst *CE : Shape.CoroEnds) {
606 // We use a null call graph because there's no call graph node for
607 // the cloned function yet. We'll just be rebuilding that later.
608 auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]);
609 replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr);
610 }
611 }
612
replaceSwiftErrorOps(Function & F,coro::Shape & Shape,ValueToValueMapTy * VMap)613 static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
614 ValueToValueMapTy *VMap) {
615 if (Shape.ABI == coro::ABI::Async && Shape.CoroSuspends.empty())
616 return;
617 Value *CachedSlot = nullptr;
618 auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
619 if (CachedSlot) {
620 assert(cast<PointerType>(CachedSlot->getType())
621 ->isOpaqueOrPointeeTypeMatches(ValueTy) &&
622 "multiple swifterror slots in function with different types");
623 return CachedSlot;
624 }
625
626 // Check if the function has a swifterror argument.
627 for (auto &Arg : F.args()) {
628 if (Arg.isSwiftError()) {
629 CachedSlot = &Arg;
630 assert(cast<PointerType>(Arg.getType())
631 ->isOpaqueOrPointeeTypeMatches(ValueTy) &&
632 "swifterror argument does not have expected type");
633 return &Arg;
634 }
635 }
636
637 // Create a swifterror alloca.
638 IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
639 auto Alloca = Builder.CreateAlloca(ValueTy);
640 Alloca->setSwiftError(true);
641
642 CachedSlot = Alloca;
643 return Alloca;
644 };
645
646 for (CallInst *Op : Shape.SwiftErrorOps) {
647 auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op;
648 IRBuilder<> Builder(MappedOp);
649
650 // If there are no arguments, this is a 'get' operation.
651 Value *MappedResult;
652 if (Op->arg_empty()) {
653 auto ValueTy = Op->getType();
654 auto Slot = getSwiftErrorSlot(ValueTy);
655 MappedResult = Builder.CreateLoad(ValueTy, Slot);
656 } else {
657 assert(Op->arg_size() == 1);
658 auto Value = MappedOp->getArgOperand(0);
659 auto ValueTy = Value->getType();
660 auto Slot = getSwiftErrorSlot(ValueTy);
661 Builder.CreateStore(Value, Slot);
662 MappedResult = Slot;
663 }
664
665 MappedOp->replaceAllUsesWith(MappedResult);
666 MappedOp->eraseFromParent();
667 }
668
669 // If we're updating the original function, we've invalidated SwiftErrorOps.
670 if (VMap == nullptr) {
671 Shape.SwiftErrorOps.clear();
672 }
673 }
674
replaceSwiftErrorOps()675 void CoroCloner::replaceSwiftErrorOps() {
676 ::replaceSwiftErrorOps(*NewF, Shape, &VMap);
677 }
678
salvageDebugInfo()679 void CoroCloner::salvageDebugInfo() {
680 SmallVector<DbgVariableIntrinsic *, 8> Worklist;
681 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
682 for (auto &BB : *NewF)
683 for (auto &I : BB)
684 if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I))
685 Worklist.push_back(DVI);
686 for (DbgVariableIntrinsic *DVI : Worklist)
687 coro::salvageDebugInfo(DbgPtrAllocaCache, DVI, Shape.OptimizeFrame);
688
689 // Remove all salvaged dbg.declare intrinsics that became
690 // either unreachable or stale due to the CoroSplit transformation.
691 DominatorTree DomTree(*NewF);
692 auto IsUnreachableBlock = [&](BasicBlock *BB) {
693 return !isPotentiallyReachable(&NewF->getEntryBlock(), BB, nullptr,
694 &DomTree);
695 };
696 for (DbgVariableIntrinsic *DVI : Worklist) {
697 if (IsUnreachableBlock(DVI->getParent()))
698 DVI->eraseFromParent();
699 else if (isa_and_nonnull<AllocaInst>(DVI->getVariableLocationOp(0))) {
700 // Count all non-debuginfo uses in reachable blocks.
701 unsigned Uses = 0;
702 for (auto *User : DVI->getVariableLocationOp(0)->users())
703 if (auto *I = dyn_cast<Instruction>(User))
704 if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent()))
705 ++Uses;
706 if (!Uses)
707 DVI->eraseFromParent();
708 }
709 }
710 }
711
replaceEntryBlock()712 void CoroCloner::replaceEntryBlock() {
713 // In the original function, the AllocaSpillBlock is a block immediately
714 // following the allocation of the frame object which defines GEPs for
715 // all the allocas that have been moved into the frame, and it ends by
716 // branching to the original beginning of the coroutine. Make this
717 // the entry block of the cloned function.
718 auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]);
719 auto *OldEntry = &NewF->getEntryBlock();
720 Entry->setName("entry" + Suffix);
721 Entry->moveBefore(OldEntry);
722 Entry->getTerminator()->eraseFromParent();
723
724 // Clear all predecessors of the new entry block. There should be
725 // exactly one predecessor, which we created when splitting out
726 // AllocaSpillBlock to begin with.
727 assert(Entry->hasOneUse());
728 auto BranchToEntry = cast<BranchInst>(Entry->user_back());
729 assert(BranchToEntry->isUnconditional());
730 Builder.SetInsertPoint(BranchToEntry);
731 Builder.CreateUnreachable();
732 BranchToEntry->eraseFromParent();
733
734 // Branch from the entry to the appropriate place.
735 Builder.SetInsertPoint(Entry);
736 switch (Shape.ABI) {
737 case coro::ABI::Switch: {
738 // In switch-lowering, we built a resume-entry block in the original
739 // function. Make the entry block branch to this.
740 auto *SwitchBB =
741 cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]);
742 Builder.CreateBr(SwitchBB);
743 break;
744 }
745 case coro::ABI::Async:
746 case coro::ABI::Retcon:
747 case coro::ABI::RetconOnce: {
748 // In continuation ABIs, we want to branch to immediately after the
749 // active suspend point. Earlier phases will have put the suspend in its
750 // own basic block, so just thread our jump directly to its successor.
751 assert((Shape.ABI == coro::ABI::Async &&
752 isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||
753 ((Shape.ABI == coro::ABI::Retcon ||
754 Shape.ABI == coro::ABI::RetconOnce) &&
755 isa<CoroSuspendRetconInst>(ActiveSuspend)));
756 auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]);
757 auto Branch = cast<BranchInst>(MappedCS->getNextNode());
758 assert(Branch->isUnconditional());
759 Builder.CreateBr(Branch->getSuccessor(0));
760 break;
761 }
762 }
763
764 // Any static alloca that's still being used but not reachable from the new
765 // entry needs to be moved to the new entry.
766 Function *F = OldEntry->getParent();
767 DominatorTree DT{*F};
768 for (Instruction &I : llvm::make_early_inc_range(instructions(F))) {
769 auto *Alloca = dyn_cast<AllocaInst>(&I);
770 if (!Alloca || I.use_empty())
771 continue;
772 if (DT.isReachableFromEntry(I.getParent()) ||
773 !isa<ConstantInt>(Alloca->getArraySize()))
774 continue;
775 I.moveBefore(*Entry, Entry->getFirstInsertionPt());
776 }
777 }
778
779 /// Derive the value of the new frame pointer.
deriveNewFramePointer()780 Value *CoroCloner::deriveNewFramePointer() {
781 // Builder should be inserting to the front of the new entry block.
782
783 switch (Shape.ABI) {
784 // In switch-lowering, the argument is the frame pointer.
785 case coro::ABI::Switch:
786 return &*NewF->arg_begin();
787 // In async-lowering, one of the arguments is an async context as determined
788 // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
789 // the resume function from the async context projection function associated
790 // with the active suspend. The frame is located as a tail to the async
791 // context header.
792 case coro::ABI::Async: {
793 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
794 auto ContextIdx = ActiveAsyncSuspend->getStorageArgumentIndex() & 0xff;
795 auto *CalleeContext = NewF->getArg(ContextIdx);
796 auto *FramePtrTy = Shape.FrameTy->getPointerTo();
797 auto *ProjectionFunc =
798 ActiveAsyncSuspend->getAsyncContextProjectionFunction();
799 auto DbgLoc =
800 cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc();
801 // Calling i8* (i8*)
802 auto *CallerContext = Builder.CreateCall(ProjectionFunc->getFunctionType(),
803 ProjectionFunc, CalleeContext);
804 CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
805 CallerContext->setDebugLoc(DbgLoc);
806 // The frame is located after the async_context header.
807 auto &Context = Builder.getContext();
808 auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
809 Type::getInt8Ty(Context), CallerContext,
810 Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr");
811 // Inline the projection function.
812 InlineFunctionInfo InlineInfo;
813 auto InlineRes = InlineFunction(*CallerContext, InlineInfo);
814 assert(InlineRes.isSuccess());
815 (void)InlineRes;
816 return Builder.CreateBitCast(FramePtrAddr, FramePtrTy);
817 }
818 // In continuation-lowering, the argument is the opaque storage.
819 case coro::ABI::Retcon:
820 case coro::ABI::RetconOnce: {
821 Argument *NewStorage = &*NewF->arg_begin();
822 auto FramePtrTy = Shape.FrameTy->getPointerTo();
823
824 // If the storage is inline, just bitcast to the storage to the frame type.
825 if (Shape.RetconLowering.IsFrameInlineInStorage)
826 return Builder.CreateBitCast(NewStorage, FramePtrTy);
827
828 // Otherwise, load the real frame from the opaque storage.
829 auto FramePtrPtr =
830 Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo());
831 return Builder.CreateLoad(FramePtrTy, FramePtrPtr);
832 }
833 }
834 llvm_unreachable("bad ABI");
835 }
836
addFramePointerAttrs(AttributeList & Attrs,LLVMContext & Context,unsigned ParamIndex,uint64_t Size,Align Alignment)837 static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
838 unsigned ParamIndex,
839 uint64_t Size, Align Alignment) {
840 AttrBuilder ParamAttrs(Context);
841 ParamAttrs.addAttribute(Attribute::NonNull);
842 ParamAttrs.addAttribute(Attribute::NoAlias);
843 ParamAttrs.addAlignmentAttr(Alignment);
844 ParamAttrs.addDereferenceableAttr(Size);
845 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
846 }
847
addAsyncContextAttrs(AttributeList & Attrs,LLVMContext & Context,unsigned ParamIndex)848 static void addAsyncContextAttrs(AttributeList &Attrs, LLVMContext &Context,
849 unsigned ParamIndex) {
850 AttrBuilder ParamAttrs(Context);
851 ParamAttrs.addAttribute(Attribute::SwiftAsync);
852 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
853 }
854
addSwiftSelfAttrs(AttributeList & Attrs,LLVMContext & Context,unsigned ParamIndex)855 static void addSwiftSelfAttrs(AttributeList &Attrs, LLVMContext &Context,
856 unsigned ParamIndex) {
857 AttrBuilder ParamAttrs(Context);
858 ParamAttrs.addAttribute(Attribute::SwiftSelf);
859 Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
860 }
861
862 /// Clone the body of the original function into a resume function of
863 /// some sort.
create()864 void CoroCloner::create() {
865 // Create the new function if we don't already have one.
866 if (!NewF) {
867 NewF = createCloneDeclaration(OrigF, Shape, Suffix,
868 OrigF.getParent()->end(), ActiveSuspend);
869 }
870
871 // Replace all args with dummy instructions. If an argument is the old frame
872 // pointer, the dummy will be replaced by the new frame pointer once it is
873 // computed below. Uses of all other arguments should have already been
874 // rewritten by buildCoroutineFrame() to use loads/stores on the coroutine
875 // frame.
876 SmallVector<Instruction *> DummyArgs;
877 for (Argument &A : OrigF.args()) {
878 DummyArgs.push_back(new FreezeInst(UndefValue::get(A.getType())));
879 VMap[&A] = DummyArgs.back();
880 }
881
882 SmallVector<ReturnInst *, 4> Returns;
883
884 // Ignore attempts to change certain attributes of the function.
885 // TODO: maybe there should be a way to suppress this during cloning?
886 auto savedVisibility = NewF->getVisibility();
887 auto savedUnnamedAddr = NewF->getUnnamedAddr();
888 auto savedDLLStorageClass = NewF->getDLLStorageClass();
889
890 // NewF's linkage (which CloneFunctionInto does *not* change) might not
891 // be compatible with the visibility of OrigF (which it *does* change),
892 // so protect against that.
893 auto savedLinkage = NewF->getLinkage();
894 NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
895
896 CloneFunctionInto(NewF, &OrigF, VMap,
897 CloneFunctionChangeType::LocalChangesOnly, Returns);
898
899 auto &Context = NewF->getContext();
900
901 // For async functions / continuations, adjust the scope line of the
902 // clone to the line number of the suspend point. However, only
903 // adjust the scope line when the files are the same. This ensures
904 // line number and file name belong together. The scope line is
905 // associated with all pre-prologue instructions. This avoids a jump
906 // in the linetable from the function declaration to the suspend point.
907 if (DISubprogram *SP = NewF->getSubprogram()) {
908 assert(SP != OrigF.getSubprogram() && SP->isDistinct());
909 if (ActiveSuspend)
910 if (auto DL = ActiveSuspend->getDebugLoc())
911 if (SP->getFile() == DL->getFile())
912 SP->setScopeLine(DL->getLine());
913 // Update the linkage name to reflect the modified symbol name. It
914 // is necessary to update the linkage name in Swift, since the
915 // mangling changes for resume functions. It might also be the
916 // right thing to do in C++, but due to a limitation in LLVM's
917 // AsmPrinter we can only do this if the function doesn't have an
918 // abstract specification, since the DWARF backend expects the
919 // abstract specification to contain the linkage name and asserts
920 // that they are identical.
921 if (!SP->getDeclaration() && SP->getUnit() &&
922 SP->getUnit()->getSourceLanguage() == dwarf::DW_LANG_Swift)
923 SP->replaceLinkageName(MDString::get(Context, NewF->getName()));
924 }
925
926 NewF->setLinkage(savedLinkage);
927 NewF->setVisibility(savedVisibility);
928 NewF->setUnnamedAddr(savedUnnamedAddr);
929 NewF->setDLLStorageClass(savedDLLStorageClass);
930 // The function sanitizer metadata needs to match the signature of the
931 // function it is being attached to. However this does not hold for split
932 // functions here. Thus remove the metadata for split functions.
933 if (Shape.ABI == coro::ABI::Switch &&
934 NewF->hasMetadata(LLVMContext::MD_func_sanitize))
935 NewF->eraseMetadata(LLVMContext::MD_func_sanitize);
936
937 // Replace the attributes of the new function:
938 auto OrigAttrs = NewF->getAttributes();
939 auto NewAttrs = AttributeList();
940
941 switch (Shape.ABI) {
942 case coro::ABI::Switch:
943 // Bootstrap attributes by copying function attributes from the
944 // original function. This should include optimization settings and so on.
945 NewAttrs = NewAttrs.addFnAttributes(
946 Context, AttrBuilder(Context, OrigAttrs.getFnAttrs()));
947
948 addFramePointerAttrs(NewAttrs, Context, 0,
949 Shape.FrameSize, Shape.FrameAlign);
950 break;
951 case coro::ABI::Async: {
952 auto *ActiveAsyncSuspend = cast<CoroSuspendAsyncInst>(ActiveSuspend);
953 if (OrigF.hasParamAttribute(Shape.AsyncLowering.ContextArgNo,
954 Attribute::SwiftAsync)) {
955 uint32_t ArgAttributeIndices =
956 ActiveAsyncSuspend->getStorageArgumentIndex();
957 auto ContextArgIndex = ArgAttributeIndices & 0xff;
958 addAsyncContextAttrs(NewAttrs, Context, ContextArgIndex);
959
960 // `swiftasync` must preceed `swiftself` so 0 is not a valid index for
961 // `swiftself`.
962 auto SwiftSelfIndex = ArgAttributeIndices >> 8;
963 if (SwiftSelfIndex)
964 addSwiftSelfAttrs(NewAttrs, Context, SwiftSelfIndex);
965 }
966
967 // Transfer the original function's attributes.
968 auto FnAttrs = OrigF.getAttributes().getFnAttrs();
969 NewAttrs = NewAttrs.addFnAttributes(Context, AttrBuilder(Context, FnAttrs));
970 break;
971 }
972 case coro::ABI::Retcon:
973 case coro::ABI::RetconOnce:
974 // If we have a continuation prototype, just use its attributes,
975 // full-stop.
976 NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes();
977
978 addFramePointerAttrs(NewAttrs, Context, 0,
979 Shape.getRetconCoroId()->getStorageSize(),
980 Shape.getRetconCoroId()->getStorageAlignment());
981 break;
982 }
983
984 switch (Shape.ABI) {
985 // In these ABIs, the cloned functions always return 'void', and the
986 // existing return sites are meaningless. Note that for unique
987 // continuations, this includes the returns associated with suspends;
988 // this is fine because we can't suspend twice.
989 case coro::ABI::Switch:
990 case coro::ABI::RetconOnce:
991 // Remove old returns.
992 for (ReturnInst *Return : Returns)
993 changeToUnreachable(Return);
994 break;
995
996 // With multi-suspend continuations, we'll already have eliminated the
997 // original returns and inserted returns before all the suspend points,
998 // so we want to leave any returns in place.
999 case coro::ABI::Retcon:
1000 break;
1001 // Async lowering will insert musttail call functions at all suspend points
1002 // followed by a return.
1003 // Don't change returns to unreachable because that will trip up the verifier.
1004 // These returns should be unreachable from the clone.
1005 case coro::ABI::Async:
1006 break;
1007 }
1008
1009 NewF->setAttributes(NewAttrs);
1010 NewF->setCallingConv(Shape.getResumeFunctionCC());
1011
1012 // Set up the new entry block.
1013 replaceEntryBlock();
1014
1015 Builder.SetInsertPoint(&NewF->getEntryBlock().front());
1016 NewFramePtr = deriveNewFramePointer();
1017
1018 // Remap frame pointer.
1019 Value *OldFramePtr = VMap[Shape.FramePtr];
1020 NewFramePtr->takeName(OldFramePtr);
1021 OldFramePtr->replaceAllUsesWith(NewFramePtr);
1022
1023 // Remap vFrame pointer.
1024 auto *NewVFrame = Builder.CreateBitCast(
1025 NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame");
1026 Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]);
1027 if (OldVFrame != NewVFrame)
1028 OldVFrame->replaceAllUsesWith(NewVFrame);
1029
1030 // All uses of the arguments should have been resolved by this point,
1031 // so we can safely remove the dummy values.
1032 for (Instruction *DummyArg : DummyArgs) {
1033 DummyArg->replaceAllUsesWith(UndefValue::get(DummyArg->getType()));
1034 DummyArg->deleteValue();
1035 }
1036
1037 switch (Shape.ABI) {
1038 case coro::ABI::Switch:
1039 // Rewrite final suspend handling as it is not done via switch (allows to
1040 // remove final case from the switch, since it is undefined behavior to
1041 // resume the coroutine suspended at the final suspend point.
1042 if (Shape.SwitchLowering.HasFinalSuspend)
1043 handleFinalSuspend();
1044 break;
1045 case coro::ABI::Async:
1046 case coro::ABI::Retcon:
1047 case coro::ABI::RetconOnce:
1048 // Replace uses of the active suspend with the corresponding
1049 // continuation-function arguments.
1050 assert(ActiveSuspend != nullptr &&
1051 "no active suspend when lowering a continuation-style coroutine");
1052 replaceRetconOrAsyncSuspendUses();
1053 break;
1054 }
1055
1056 // Handle suspends.
1057 replaceCoroSuspends();
1058
1059 // Handle swifterror.
1060 replaceSwiftErrorOps();
1061
1062 // Remove coro.end intrinsics.
1063 replaceCoroEnds();
1064
1065 // Salvage debug info that points into the coroutine frame.
1066 salvageDebugInfo();
1067
1068 // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
1069 // to suppress deallocation code.
1070 if (Shape.ABI == coro::ABI::Switch)
1071 coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]),
1072 /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup);
1073 }
1074
1075 // Create a resume clone by cloning the body of the original function, setting
1076 // new entry block and replacing coro.suspend an appropriate value to force
1077 // resume or cleanup pass for every suspend point.
createClone(Function & F,const Twine & Suffix,coro::Shape & Shape,CoroCloner::Kind FKind)1078 static Function *createClone(Function &F, const Twine &Suffix,
1079 coro::Shape &Shape, CoroCloner::Kind FKind) {
1080 CoroCloner Cloner(F, Suffix, Shape, FKind);
1081 Cloner.create();
1082 return Cloner.getFunction();
1083 }
1084
updateAsyncFuncPointerContextSize(coro::Shape & Shape)1085 static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) {
1086 assert(Shape.ABI == coro::ABI::Async);
1087
1088 auto *FuncPtrStruct = cast<ConstantStruct>(
1089 Shape.AsyncLowering.AsyncFuncPointer->getInitializer());
1090 auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0);
1091 auto *OrigContextSize = FuncPtrStruct->getOperand(1);
1092 auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(),
1093 Shape.AsyncLowering.ContextSize);
1094 auto *NewFuncPtrStruct = ConstantStruct::get(
1095 FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize);
1096
1097 Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
1098 }
1099
replaceFrameSizeAndAlignment(coro::Shape & Shape)1100 static void replaceFrameSizeAndAlignment(coro::Shape &Shape) {
1101 if (Shape.ABI == coro::ABI::Async)
1102 updateAsyncFuncPointerContextSize(Shape);
1103
1104 for (CoroAlignInst *CA : Shape.CoroAligns) {
1105 CA->replaceAllUsesWith(
1106 ConstantInt::get(CA->getType(), Shape.FrameAlign.value()));
1107 CA->eraseFromParent();
1108 }
1109
1110 if (Shape.CoroSizes.empty())
1111 return;
1112
1113 // In the same function all coro.sizes should have the same result type.
1114 auto *SizeIntrin = Shape.CoroSizes.back();
1115 Module *M = SizeIntrin->getModule();
1116 const DataLayout &DL = M->getDataLayout();
1117 auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1118 auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size);
1119
1120 for (CoroSizeInst *CS : Shape.CoroSizes) {
1121 CS->replaceAllUsesWith(SizeConstant);
1122 CS->eraseFromParent();
1123 }
1124 }
1125
1126 // Create a global constant array containing pointers to functions provided and
1127 // set Info parameter of CoroBegin to point at this constant. Example:
1128 //
1129 // @f.resumers = internal constant [2 x void(%f.frame*)*]
1130 // [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy]
1131 // define void @f() {
1132 // ...
1133 // call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
1134 // i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*))
1135 //
1136 // Assumes that all the functions have the same signature.
setCoroInfo(Function & F,coro::Shape & Shape,ArrayRef<Function * > Fns)1137 static void setCoroInfo(Function &F, coro::Shape &Shape,
1138 ArrayRef<Function *> Fns) {
1139 // This only works under the switch-lowering ABI because coro elision
1140 // only works on the switch-lowering ABI.
1141 assert(Shape.ABI == coro::ABI::Switch);
1142
1143 SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end());
1144 assert(!Args.empty());
1145 Function *Part = *Fns.begin();
1146 Module *M = Part->getParent();
1147 auto *ArrTy = ArrayType::get(Part->getType(), Args.size());
1148
1149 auto *ConstVal = ConstantArray::get(ArrTy, Args);
1150 auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
1151 GlobalVariable::PrivateLinkage, ConstVal,
1152 F.getName() + Twine(".resumers"));
1153
1154 // Update coro.begin instruction to refer to this constant.
1155 LLVMContext &C = F.getContext();
1156 auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C));
1157 Shape.getSwitchCoroId()->setInfo(BC);
1158 }
1159
1160 // Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
updateCoroFrame(coro::Shape & Shape,Function * ResumeFn,Function * DestroyFn,Function * CleanupFn)1161 static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
1162 Function *DestroyFn, Function *CleanupFn) {
1163 assert(Shape.ABI == coro::ABI::Switch);
1164
1165 IRBuilder<> Builder(Shape.getInsertPtAfterFramePtr());
1166
1167 auto *ResumeAddr = Builder.CreateStructGEP(
1168 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume,
1169 "resume.addr");
1170 Builder.CreateStore(ResumeFn, ResumeAddr);
1171
1172 Value *DestroyOrCleanupFn = DestroyFn;
1173
1174 CoroIdInst *CoroId = Shape.getSwitchCoroId();
1175 if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
1176 // If there is a CoroAlloc and it returns false (meaning we elide the
1177 // allocation, use CleanupFn instead of DestroyFn).
1178 DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn);
1179 }
1180
1181 auto *DestroyAddr = Builder.CreateStructGEP(
1182 Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy,
1183 "destroy.addr");
1184 Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr);
1185 }
1186
postSplitCleanup(Function & F)1187 static void postSplitCleanup(Function &F) {
1188 removeUnreachableBlocks(F);
1189
1190 #ifndef NDEBUG
1191 // For now, we do a mandatory verification step because we don't
1192 // entirely trust this pass. Note that we don't want to add a verifier
1193 // pass to FPM below because it will also verify all the global data.
1194 if (verifyFunction(F, &errs()))
1195 report_fatal_error("Broken function");
1196 #endif
1197 }
1198
1199 // Assuming we arrived at the block NewBlock from Prev instruction, store
1200 // PHI's incoming values in the ResolvedValues map.
1201 static void
scanPHIsAndUpdateValueMap(Instruction * Prev,BasicBlock * NewBlock,DenseMap<Value *,Value * > & ResolvedValues)1202 scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock,
1203 DenseMap<Value *, Value *> &ResolvedValues) {
1204 auto *PrevBB = Prev->getParent();
1205 for (PHINode &PN : NewBlock->phis()) {
1206 auto V = PN.getIncomingValueForBlock(PrevBB);
1207 // See if we already resolved it.
1208 auto VI = ResolvedValues.find(V);
1209 if (VI != ResolvedValues.end())
1210 V = VI->second;
1211 // Remember the value.
1212 ResolvedValues[&PN] = V;
1213 }
1214 }
1215
1216 // Replace a sequence of branches leading to a ret, with a clone of a ret
1217 // instruction. Suspend instruction represented by a switch, track the PHI
1218 // values and select the correct case successor when possible.
simplifyTerminatorLeadingToRet(Instruction * InitialInst)1219 static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
1220 DenseMap<Value *, Value *> ResolvedValues;
1221 BasicBlock *UnconditionalSucc = nullptr;
1222 assert(InitialInst->getModule());
1223 const DataLayout &DL = InitialInst->getModule()->getDataLayout();
1224
1225 auto GetFirstValidInstruction = [](Instruction *I) {
1226 while (I) {
1227 // BitCastInst wouldn't generate actual code so that we could skip it.
1228 if (isa<BitCastInst>(I) || I->isDebugOrPseudoInst() ||
1229 I->isLifetimeStartOrEnd())
1230 I = I->getNextNode();
1231 else if (isInstructionTriviallyDead(I))
1232 // Duing we are in the middle of the transformation, we need to erase
1233 // the dead instruction manually.
1234 I = &*I->eraseFromParent();
1235 else
1236 break;
1237 }
1238 return I;
1239 };
1240
1241 auto TryResolveConstant = [&ResolvedValues](Value *V) {
1242 auto It = ResolvedValues.find(V);
1243 if (It != ResolvedValues.end())
1244 V = It->second;
1245 return dyn_cast<ConstantInt>(V);
1246 };
1247
1248 Instruction *I = InitialInst;
1249 while (I->isTerminator() || isa<CmpInst>(I)) {
1250 if (isa<ReturnInst>(I)) {
1251 if (I != InitialInst) {
1252 // If InitialInst is an unconditional branch,
1253 // remove PHI values that come from basic block of InitialInst
1254 if (UnconditionalSucc)
1255 UnconditionalSucc->removePredecessor(InitialInst->getParent(), true);
1256 ReplaceInstWithInst(InitialInst, I->clone());
1257 }
1258 return true;
1259 }
1260 if (auto *BR = dyn_cast<BranchInst>(I)) {
1261 if (BR->isUnconditional()) {
1262 BasicBlock *Succ = BR->getSuccessor(0);
1263 if (I == InitialInst)
1264 UnconditionalSucc = Succ;
1265 scanPHIsAndUpdateValueMap(I, Succ, ResolvedValues);
1266 I = GetFirstValidInstruction(Succ->getFirstNonPHIOrDbgOrLifetime());
1267 continue;
1268 }
1269
1270 BasicBlock *BB = BR->getParent();
1271 // Handle the case the condition of the conditional branch is constant.
1272 // e.g.,
1273 //
1274 // br i1 false, label %cleanup, label %CoroEnd
1275 //
1276 // It is possible during the transformation. We could continue the
1277 // simplifying in this case.
1278 if (ConstantFoldTerminator(BB, /*DeleteDeadConditions=*/true)) {
1279 // Handle this branch in next iteration.
1280 I = BB->getTerminator();
1281 continue;
1282 }
1283 } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) {
1284 // If the case number of suspended switch instruction is reduced to
1285 // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
1286 auto *BR = dyn_cast<BranchInst>(
1287 GetFirstValidInstruction(CondCmp->getNextNode()));
1288 if (!BR || !BR->isConditional() || CondCmp != BR->getCondition())
1289 return false;
1290
1291 // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
1292 // So we try to resolve constant for the first operand only since the
1293 // second operand should be literal constant by design.
1294 ConstantInt *Cond0 = TryResolveConstant(CondCmp->getOperand(0));
1295 auto *Cond1 = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
1296 if (!Cond0 || !Cond1)
1297 return false;
1298
1299 // Both operands of the CmpInst are Constant. So that we could evaluate
1300 // it immediately to get the destination.
1301 auto *ConstResult =
1302 dyn_cast_or_null<ConstantInt>(ConstantFoldCompareInstOperands(
1303 CondCmp->getPredicate(), Cond0, Cond1, DL));
1304 if (!ConstResult)
1305 return false;
1306
1307 CondCmp->replaceAllUsesWith(ConstResult);
1308 CondCmp->eraseFromParent();
1309
1310 // Handle this branch in next iteration.
1311 I = BR;
1312 continue;
1313 } else if (auto *SI = dyn_cast<SwitchInst>(I)) {
1314 ConstantInt *Cond = TryResolveConstant(SI->getCondition());
1315 if (!Cond)
1316 return false;
1317
1318 BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
1319 scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
1320 I = GetFirstValidInstruction(BB->getFirstNonPHIOrDbgOrLifetime());
1321 continue;
1322 }
1323
1324 return false;
1325 }
1326 return false;
1327 }
1328
1329 // Check whether CI obeys the rules of musttail attribute.
shouldBeMustTail(const CallInst & CI,const Function & F)1330 static bool shouldBeMustTail(const CallInst &CI, const Function &F) {
1331 if (CI.isInlineAsm())
1332 return false;
1333
1334 // Match prototypes and calling conventions of resume function.
1335 FunctionType *CalleeTy = CI.getFunctionType();
1336 if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1))
1337 return false;
1338
1339 Type *CalleeParmTy = CalleeTy->getParamType(0);
1340 if (!CalleeParmTy->isPointerTy() ||
1341 (CalleeParmTy->getPointerAddressSpace() != 0))
1342 return false;
1343
1344 if (CI.getCallingConv() != F.getCallingConv())
1345 return false;
1346
1347 // CI should not has any ABI-impacting function attributes.
1348 static const Attribute::AttrKind ABIAttrs[] = {
1349 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
1350 Attribute::Preallocated, Attribute::InReg, Attribute::Returned,
1351 Attribute::SwiftSelf, Attribute::SwiftError};
1352 AttributeList Attrs = CI.getAttributes();
1353 for (auto AK : ABIAttrs)
1354 if (Attrs.hasParamAttr(0, AK))
1355 return false;
1356
1357 return true;
1358 }
1359
1360 // Add musttail to any resume instructions that is immediately followed by a
1361 // suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call
1362 // for symmetrical coroutine control transfer (C++ Coroutines TS extension).
1363 // This transformation is done only in the resume part of the coroutine that has
1364 // identical signature and calling convention as the coro.resume call.
addMustTailToCoroResumes(Function & F)1365 static void addMustTailToCoroResumes(Function &F) {
1366 bool changed = false;
1367
1368 // Collect potential resume instructions.
1369 SmallVector<CallInst *, 4> Resumes;
1370 for (auto &I : instructions(F))
1371 if (auto *Call = dyn_cast<CallInst>(&I))
1372 if (shouldBeMustTail(*Call, F))
1373 Resumes.push_back(Call);
1374
1375 // Set musttail on those that are followed by a ret instruction.
1376 for (CallInst *Call : Resumes)
1377 if (simplifyTerminatorLeadingToRet(Call->getNextNode())) {
1378 Call->setTailCallKind(CallInst::TCK_MustTail);
1379 changed = true;
1380 }
1381
1382 if (changed)
1383 removeUnreachableBlocks(F);
1384 }
1385
1386 // Coroutine has no suspend points. Remove heap allocation for the coroutine
1387 // frame if possible.
handleNoSuspendCoroutine(coro::Shape & Shape)1388 static void handleNoSuspendCoroutine(coro::Shape &Shape) {
1389 auto *CoroBegin = Shape.CoroBegin;
1390 auto *CoroId = CoroBegin->getId();
1391 auto *AllocInst = CoroId->getCoroAlloc();
1392 switch (Shape.ABI) {
1393 case coro::ABI::Switch: {
1394 auto SwitchId = cast<CoroIdInst>(CoroId);
1395 coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr);
1396 if (AllocInst) {
1397 IRBuilder<> Builder(AllocInst);
1398 auto *Frame = Builder.CreateAlloca(Shape.FrameTy);
1399 Frame->setAlignment(Shape.FrameAlign);
1400 auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy());
1401 AllocInst->replaceAllUsesWith(Builder.getFalse());
1402 AllocInst->eraseFromParent();
1403 CoroBegin->replaceAllUsesWith(VFrame);
1404 } else {
1405 CoroBegin->replaceAllUsesWith(CoroBegin->getMem());
1406 }
1407
1408 break;
1409 }
1410 case coro::ABI::Async:
1411 case coro::ABI::Retcon:
1412 case coro::ABI::RetconOnce:
1413 CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType()));
1414 break;
1415 }
1416
1417 CoroBegin->eraseFromParent();
1418 }
1419
1420 // SimplifySuspendPoint needs to check that there is no calls between
1421 // coro_save and coro_suspend, since any of the calls may potentially resume
1422 // the coroutine and if that is the case we cannot eliminate the suspend point.
hasCallsInBlockBetween(Instruction * From,Instruction * To)1423 static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) {
1424 for (Instruction *I = From; I != To; I = I->getNextNode()) {
1425 // Assume that no intrinsic can resume the coroutine.
1426 if (isa<IntrinsicInst>(I))
1427 continue;
1428
1429 if (isa<CallBase>(I))
1430 return true;
1431 }
1432 return false;
1433 }
1434
hasCallsInBlocksBetween(BasicBlock * SaveBB,BasicBlock * ResDesBB)1435 static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
1436 SmallPtrSet<BasicBlock *, 8> Set;
1437 SmallVector<BasicBlock *, 8> Worklist;
1438
1439 Set.insert(SaveBB);
1440 Worklist.push_back(ResDesBB);
1441
1442 // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
1443 // returns a token consumed by suspend instruction, all blocks in between
1444 // will have to eventually hit SaveBB when going backwards from ResDesBB.
1445 while (!Worklist.empty()) {
1446 auto *BB = Worklist.pop_back_val();
1447 Set.insert(BB);
1448 for (auto *Pred : predecessors(BB))
1449 if (!Set.contains(Pred))
1450 Worklist.push_back(Pred);
1451 }
1452
1453 // SaveBB and ResDesBB are checked separately in hasCallsBetween.
1454 Set.erase(SaveBB);
1455 Set.erase(ResDesBB);
1456
1457 for (auto *BB : Set)
1458 if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr))
1459 return true;
1460
1461 return false;
1462 }
1463
hasCallsBetween(Instruction * Save,Instruction * ResumeOrDestroy)1464 static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
1465 auto *SaveBB = Save->getParent();
1466 auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
1467
1468 if (SaveBB == ResumeOrDestroyBB)
1469 return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy);
1470
1471 // Any calls from Save to the end of the block?
1472 if (hasCallsInBlockBetween(Save->getNextNode(), nullptr))
1473 return true;
1474
1475 // Any calls from begging of the block up to ResumeOrDestroy?
1476 if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(),
1477 ResumeOrDestroy))
1478 return true;
1479
1480 // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
1481 if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB))
1482 return true;
1483
1484 return false;
1485 }
1486
1487 // If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
1488 // suspend point and replace it with nornal control flow.
simplifySuspendPoint(CoroSuspendInst * Suspend,CoroBeginInst * CoroBegin)1489 static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
1490 CoroBeginInst *CoroBegin) {
1491 Instruction *Prev = Suspend->getPrevNode();
1492 if (!Prev) {
1493 auto *Pred = Suspend->getParent()->getSinglePredecessor();
1494 if (!Pred)
1495 return false;
1496 Prev = Pred->getTerminator();
1497 }
1498
1499 CallBase *CB = dyn_cast<CallBase>(Prev);
1500 if (!CB)
1501 return false;
1502
1503 auto *Callee = CB->getCalledOperand()->stripPointerCasts();
1504
1505 // See if the callsite is for resumption or destruction of the coroutine.
1506 auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
1507 if (!SubFn)
1508 return false;
1509
1510 // Does not refer to the current coroutine, we cannot do anything with it.
1511 if (SubFn->getFrame() != CoroBegin)
1512 return false;
1513
1514 // See if the transformation is safe. Specifically, see if there are any
1515 // calls in between Save and CallInstr. They can potenitally resume the
1516 // coroutine rendering this optimization unsafe.
1517 auto *Save = Suspend->getCoroSave();
1518 if (hasCallsBetween(Save, CB))
1519 return false;
1520
1521 // Replace llvm.coro.suspend with the value that results in resumption over
1522 // the resume or cleanup path.
1523 Suspend->replaceAllUsesWith(SubFn->getRawIndex());
1524 Suspend->eraseFromParent();
1525 Save->eraseFromParent();
1526
1527 // No longer need a call to coro.resume or coro.destroy.
1528 if (auto *Invoke = dyn_cast<InvokeInst>(CB)) {
1529 BranchInst::Create(Invoke->getNormalDest(), Invoke);
1530 }
1531
1532 // Grab the CalledValue from CB before erasing the CallInstr.
1533 auto *CalledValue = CB->getCalledOperand();
1534 CB->eraseFromParent();
1535
1536 // If no more users remove it. Usually it is a bitcast of SubFn.
1537 if (CalledValue != SubFn && CalledValue->user_empty())
1538 if (auto *I = dyn_cast<Instruction>(CalledValue))
1539 I->eraseFromParent();
1540
1541 // Now we are good to remove SubFn.
1542 if (SubFn->user_empty())
1543 SubFn->eraseFromParent();
1544
1545 return true;
1546 }
1547
1548 // Remove suspend points that are simplified.
simplifySuspendPoints(coro::Shape & Shape)1549 static void simplifySuspendPoints(coro::Shape &Shape) {
1550 // Currently, the only simplification we do is switch-lowering-specific.
1551 if (Shape.ABI != coro::ABI::Switch)
1552 return;
1553
1554 auto &S = Shape.CoroSuspends;
1555 size_t I = 0, N = S.size();
1556 if (N == 0)
1557 return;
1558 while (true) {
1559 auto SI = cast<CoroSuspendInst>(S[I]);
1560 // Leave final.suspend to handleFinalSuspend since it is undefined behavior
1561 // to resume a coroutine suspended at the final suspend point.
1562 if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) {
1563 if (--N == I)
1564 break;
1565 std::swap(S[I], S[N]);
1566 continue;
1567 }
1568 if (++I == N)
1569 break;
1570 }
1571 S.resize(N);
1572 }
1573
splitSwitchCoroutine(Function & F,coro::Shape & Shape,SmallVectorImpl<Function * > & Clones,TargetTransformInfo & TTI)1574 static void splitSwitchCoroutine(Function &F, coro::Shape &Shape,
1575 SmallVectorImpl<Function *> &Clones,
1576 TargetTransformInfo &TTI) {
1577 assert(Shape.ABI == coro::ABI::Switch);
1578
1579 createResumeEntryBlock(F, Shape);
1580 auto ResumeClone = createClone(F, ".resume", Shape,
1581 CoroCloner::Kind::SwitchResume);
1582 auto DestroyClone = createClone(F, ".destroy", Shape,
1583 CoroCloner::Kind::SwitchUnwind);
1584 auto CleanupClone = createClone(F, ".cleanup", Shape,
1585 CoroCloner::Kind::SwitchCleanup);
1586
1587 postSplitCleanup(*ResumeClone);
1588 postSplitCleanup(*DestroyClone);
1589 postSplitCleanup(*CleanupClone);
1590
1591 // Adding musttail call to support symmetric transfer.
1592 // Skip targets which don't support tail call.
1593 //
1594 // FIXME: Could we support symmetric transfer effectively without musttail
1595 // call?
1596 if (TTI.supportsTailCalls())
1597 addMustTailToCoroResumes(*ResumeClone);
1598
1599 // Store addresses resume/destroy/cleanup functions in the coroutine frame.
1600 updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone);
1601
1602 assert(Clones.empty());
1603 Clones.push_back(ResumeClone);
1604 Clones.push_back(DestroyClone);
1605 Clones.push_back(CleanupClone);
1606
1607 // Create a constant array referring to resume/destroy/clone functions pointed
1608 // by the last argument of @llvm.coro.info, so that CoroElide pass can
1609 // determined correct function to call.
1610 setCoroInfo(F, Shape, Clones);
1611 }
1612
replaceAsyncResumeFunction(CoroSuspendAsyncInst * Suspend,Value * Continuation)1613 static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend,
1614 Value *Continuation) {
1615 auto *ResumeIntrinsic = Suspend->getResumeFunction();
1616 auto &Context = Suspend->getParent()->getParent()->getContext();
1617 auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1618
1619 IRBuilder<> Builder(ResumeIntrinsic);
1620 auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy);
1621 ResumeIntrinsic->replaceAllUsesWith(Val);
1622 ResumeIntrinsic->eraseFromParent();
1623 Suspend->setOperand(CoroSuspendAsyncInst::ResumeFunctionArg,
1624 UndefValue::get(Int8PtrTy));
1625 }
1626
1627 /// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
coerceArguments(IRBuilder<> & Builder,FunctionType * FnTy,ArrayRef<Value * > FnArgs,SmallVectorImpl<Value * > & CallArgs)1628 static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy,
1629 ArrayRef<Value *> FnArgs,
1630 SmallVectorImpl<Value *> &CallArgs) {
1631 size_t ArgIdx = 0;
1632 for (auto paramTy : FnTy->params()) {
1633 assert(ArgIdx < FnArgs.size());
1634 if (paramTy != FnArgs[ArgIdx]->getType())
1635 CallArgs.push_back(
1636 Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy));
1637 else
1638 CallArgs.push_back(FnArgs[ArgIdx]);
1639 ++ArgIdx;
1640 }
1641 }
1642
createMustTailCall(DebugLoc Loc,Function * MustTailCallFn,ArrayRef<Value * > Arguments,IRBuilder<> & Builder)1643 CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
1644 ArrayRef<Value *> Arguments,
1645 IRBuilder<> &Builder) {
1646 auto *FnTy = MustTailCallFn->getFunctionType();
1647 // Coerce the arguments, llvm optimizations seem to ignore the types in
1648 // vaarg functions and throws away casts in optimized mode.
1649 SmallVector<Value *, 8> CallArgs;
1650 coerceArguments(Builder, FnTy, Arguments, CallArgs);
1651
1652 auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs);
1653 TailCall->setTailCallKind(CallInst::TCK_MustTail);
1654 TailCall->setDebugLoc(Loc);
1655 TailCall->setCallingConv(MustTailCallFn->getCallingConv());
1656 return TailCall;
1657 }
1658
splitAsyncCoroutine(Function & F,coro::Shape & Shape,SmallVectorImpl<Function * > & Clones)1659 static void splitAsyncCoroutine(Function &F, coro::Shape &Shape,
1660 SmallVectorImpl<Function *> &Clones) {
1661 assert(Shape.ABI == coro::ABI::Async);
1662 assert(Clones.empty());
1663 // Reset various things that the optimizer might have decided it
1664 // "knows" about the coroutine function due to not seeing a return.
1665 F.removeFnAttr(Attribute::NoReturn);
1666 F.removeRetAttr(Attribute::NoAlias);
1667 F.removeRetAttr(Attribute::NonNull);
1668
1669 auto &Context = F.getContext();
1670 auto *Int8PtrTy = Type::getInt8PtrTy(Context);
1671
1672 auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId());
1673 IRBuilder<> Builder(Id);
1674
1675 auto *FramePtr = Id->getStorage();
1676 FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy);
1677 FramePtr = Builder.CreateConstInBoundsGEP1_32(
1678 Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset,
1679 "async.ctx.frameptr");
1680
1681 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1682 {
1683 // Make sure we don't invalidate Shape.FramePtr.
1684 TrackingVH<Value> Handle(Shape.FramePtr);
1685 Shape.CoroBegin->replaceAllUsesWith(FramePtr);
1686 Shape.FramePtr = Handle.getValPtr();
1687 }
1688
1689 // Create all the functions in order after the main function.
1690 auto NextF = std::next(F.getIterator());
1691
1692 // Create a continuation function for each of the suspend points.
1693 Clones.reserve(Shape.CoroSuspends.size());
1694 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1695 auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]);
1696
1697 // Create the clone declaration.
1698 auto ResumeNameSuffix = ".resume.";
1699 auto ProjectionFunctionName =
1700 Suspend->getAsyncContextProjectionFunction()->getName();
1701 bool UseSwiftMangling = false;
1702 if (ProjectionFunctionName.equals("__swift_async_resume_project_context")) {
1703 ResumeNameSuffix = "TQ";
1704 UseSwiftMangling = true;
1705 } else if (ProjectionFunctionName.equals(
1706 "__swift_async_resume_get_context")) {
1707 ResumeNameSuffix = "TY";
1708 UseSwiftMangling = true;
1709 }
1710 auto *Continuation = createCloneDeclaration(
1711 F, Shape,
1712 UseSwiftMangling ? ResumeNameSuffix + Twine(Idx) + "_"
1713 : ResumeNameSuffix + Twine(Idx),
1714 NextF, Suspend);
1715 Clones.push_back(Continuation);
1716
1717 // Insert a branch to a new return block immediately before the suspend
1718 // point.
1719 auto *SuspendBB = Suspend->getParent();
1720 auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1721 auto *Branch = cast<BranchInst>(SuspendBB->getTerminator());
1722
1723 // Place it before the first suspend.
1724 auto *ReturnBB =
1725 BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB);
1726 Branch->setSuccessor(0, ReturnBB);
1727
1728 IRBuilder<> Builder(ReturnBB);
1729
1730 // Insert the call to the tail call function and inline it.
1731 auto *Fn = Suspend->getMustTailCallFunction();
1732 SmallVector<Value *, 8> Args(Suspend->args());
1733 auto FnArgs = ArrayRef<Value *>(Args).drop_front(
1734 CoroSuspendAsyncInst::MustTailCallFuncArg + 1);
1735 auto *TailCall =
1736 coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder);
1737 Builder.CreateRetVoid();
1738 InlineFunctionInfo FnInfo;
1739 auto InlineRes = InlineFunction(*TailCall, FnInfo);
1740 assert(InlineRes.isSuccess() && "Expected inlining to succeed");
1741 (void)InlineRes;
1742
1743 // Replace the lvm.coro.async.resume intrisic call.
1744 replaceAsyncResumeFunction(Suspend, Continuation);
1745 }
1746
1747 assert(Clones.size() == Shape.CoroSuspends.size());
1748 for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
1749 auto *Suspend = Shape.CoroSuspends[Idx];
1750 auto *Clone = Clones[Idx];
1751
1752 CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create();
1753 }
1754 }
1755
splitRetconCoroutine(Function & F,coro::Shape & Shape,SmallVectorImpl<Function * > & Clones)1756 static void splitRetconCoroutine(Function &F, coro::Shape &Shape,
1757 SmallVectorImpl<Function *> &Clones) {
1758 assert(Shape.ABI == coro::ABI::Retcon ||
1759 Shape.ABI == coro::ABI::RetconOnce);
1760 assert(Clones.empty());
1761
1762 // Reset various things that the optimizer might have decided it
1763 // "knows" about the coroutine function due to not seeing a return.
1764 F.removeFnAttr(Attribute::NoReturn);
1765 F.removeRetAttr(Attribute::NoAlias);
1766 F.removeRetAttr(Attribute::NonNull);
1767
1768 // Allocate the frame.
1769 auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId());
1770 Value *RawFramePtr;
1771 if (Shape.RetconLowering.IsFrameInlineInStorage) {
1772 RawFramePtr = Id->getStorage();
1773 } else {
1774 IRBuilder<> Builder(Id);
1775
1776 // Determine the size of the frame.
1777 const DataLayout &DL = F.getParent()->getDataLayout();
1778 auto Size = DL.getTypeAllocSize(Shape.FrameTy);
1779
1780 // Allocate. We don't need to update the call graph node because we're
1781 // going to recompute it from scratch after splitting.
1782 // FIXME: pass the required alignment
1783 RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr);
1784 RawFramePtr =
1785 Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType());
1786
1787 // Stash the allocated frame pointer in the continuation storage.
1788 auto Dest = Builder.CreateBitCast(Id->getStorage(),
1789 RawFramePtr->getType()->getPointerTo());
1790 Builder.CreateStore(RawFramePtr, Dest);
1791 }
1792
1793 // Map all uses of llvm.coro.begin to the allocated frame pointer.
1794 {
1795 // Make sure we don't invalidate Shape.FramePtr.
1796 TrackingVH<Value> Handle(Shape.FramePtr);
1797 Shape.CoroBegin->replaceAllUsesWith(RawFramePtr);
1798 Shape.FramePtr = Handle.getValPtr();
1799 }
1800
1801 // Create a unique return block.
1802 BasicBlock *ReturnBB = nullptr;
1803 SmallVector<PHINode *, 4> ReturnPHIs;
1804
1805 // Create all the functions in order after the main function.
1806 auto NextF = std::next(F.getIterator());
1807
1808 // Create a continuation function for each of the suspend points.
1809 Clones.reserve(Shape.CoroSuspends.size());
1810 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1811 auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]);
1812
1813 // Create the clone declaration.
1814 auto Continuation =
1815 createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF, nullptr);
1816 Clones.push_back(Continuation);
1817
1818 // Insert a branch to the unified return block immediately before
1819 // the suspend point.
1820 auto SuspendBB = Suspend->getParent();
1821 auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
1822 auto Branch = cast<BranchInst>(SuspendBB->getTerminator());
1823
1824 // Create the unified return block.
1825 if (!ReturnBB) {
1826 // Place it before the first suspend.
1827 ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F,
1828 NewSuspendBB);
1829 Shape.RetconLowering.ReturnBlock = ReturnBB;
1830
1831 IRBuilder<> Builder(ReturnBB);
1832
1833 // Create PHIs for all the return values.
1834 assert(ReturnPHIs.empty());
1835
1836 // First, the continuation.
1837 ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(),
1838 Shape.CoroSuspends.size()));
1839
1840 // Next, all the directly-yielded values.
1841 for (auto ResultTy : Shape.getRetconResultTypes())
1842 ReturnPHIs.push_back(Builder.CreatePHI(ResultTy,
1843 Shape.CoroSuspends.size()));
1844
1845 // Build the return value.
1846 auto RetTy = F.getReturnType();
1847
1848 // Cast the continuation value if necessary.
1849 // We can't rely on the types matching up because that type would
1850 // have to be infinite.
1851 auto CastedContinuationTy =
1852 (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0));
1853 auto *CastedContinuation =
1854 Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy);
1855
1856 Value *RetV;
1857 if (ReturnPHIs.size() == 1) {
1858 RetV = CastedContinuation;
1859 } else {
1860 RetV = UndefValue::get(RetTy);
1861 RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0);
1862 for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I)
1863 RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I);
1864 }
1865
1866 Builder.CreateRet(RetV);
1867 }
1868
1869 // Branch to the return block.
1870 Branch->setSuccessor(0, ReturnBB);
1871 ReturnPHIs[0]->addIncoming(Continuation, SuspendBB);
1872 size_t NextPHIIndex = 1;
1873 for (auto &VUse : Suspend->value_operands())
1874 ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB);
1875 assert(NextPHIIndex == ReturnPHIs.size());
1876 }
1877
1878 assert(Clones.size() == Shape.CoroSuspends.size());
1879 for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
1880 auto Suspend = Shape.CoroSuspends[i];
1881 auto Clone = Clones[i];
1882
1883 CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create();
1884 }
1885 }
1886
1887 namespace {
1888 class PrettyStackTraceFunction : public PrettyStackTraceEntry {
1889 Function &F;
1890 public:
PrettyStackTraceFunction(Function & F)1891 PrettyStackTraceFunction(Function &F) : F(F) {}
print(raw_ostream & OS) const1892 void print(raw_ostream &OS) const override {
1893 OS << "While splitting coroutine ";
1894 F.printAsOperand(OS, /*print type*/ false, F.getParent());
1895 OS << "\n";
1896 }
1897 };
1898 }
1899
splitCoroutine(Function & F,SmallVectorImpl<Function * > & Clones,TargetTransformInfo & TTI,bool OptimizeFrame)1900 static coro::Shape splitCoroutine(Function &F,
1901 SmallVectorImpl<Function *> &Clones,
1902 TargetTransformInfo &TTI,
1903 bool OptimizeFrame) {
1904 PrettyStackTraceFunction prettyStackTrace(F);
1905
1906 // The suspend-crossing algorithm in buildCoroutineFrame get tripped
1907 // up by uses in unreachable blocks, so remove them as a first pass.
1908 removeUnreachableBlocks(F);
1909
1910 coro::Shape Shape(F, OptimizeFrame);
1911 if (!Shape.CoroBegin)
1912 return Shape;
1913
1914 simplifySuspendPoints(Shape);
1915 buildCoroutineFrame(F, Shape);
1916 replaceFrameSizeAndAlignment(Shape);
1917
1918 // If there are no suspend points, no split required, just remove
1919 // the allocation and deallocation blocks, they are not needed.
1920 if (Shape.CoroSuspends.empty()) {
1921 handleNoSuspendCoroutine(Shape);
1922 } else {
1923 switch (Shape.ABI) {
1924 case coro::ABI::Switch:
1925 splitSwitchCoroutine(F, Shape, Clones, TTI);
1926 break;
1927 case coro::ABI::Async:
1928 splitAsyncCoroutine(F, Shape, Clones);
1929 break;
1930 case coro::ABI::Retcon:
1931 case coro::ABI::RetconOnce:
1932 splitRetconCoroutine(F, Shape, Clones);
1933 break;
1934 }
1935 }
1936
1937 // Replace all the swifterror operations in the original function.
1938 // This invalidates SwiftErrorOps in the Shape.
1939 replaceSwiftErrorOps(F, Shape, nullptr);
1940
1941 // Finally, salvage the llvm.dbg.{declare,addr} in our original function that
1942 // point into the coroutine frame. We only do this for the current function
1943 // since the Cloner salvaged debug info for us in the new coroutine funclets.
1944 SmallVector<DbgVariableIntrinsic *, 8> Worklist;
1945 SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
1946 for (auto &BB : F) {
1947 for (auto &I : BB) {
1948 if (auto *DDI = dyn_cast<DbgDeclareInst>(&I)) {
1949 Worklist.push_back(DDI);
1950 continue;
1951 }
1952 if (auto *DDI = dyn_cast<DbgAddrIntrinsic>(&I)) {
1953 Worklist.push_back(DDI);
1954 continue;
1955 }
1956 }
1957 }
1958 for (auto *DDI : Worklist)
1959 coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, Shape.OptimizeFrame);
1960
1961 return Shape;
1962 }
1963
1964 /// Remove calls to llvm.coro.end in the original function.
removeCoroEnds(const coro::Shape & Shape)1965 static void removeCoroEnds(const coro::Shape &Shape) {
1966 for (auto End : Shape.CoroEnds) {
1967 replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, nullptr);
1968 }
1969 }
1970
updateCallGraphAfterCoroutineSplit(LazyCallGraph::Node & N,const coro::Shape & Shape,const SmallVectorImpl<Function * > & Clones,LazyCallGraph::SCC & C,LazyCallGraph & CG,CGSCCAnalysisManager & AM,CGSCCUpdateResult & UR,FunctionAnalysisManager & FAM)1971 static void updateCallGraphAfterCoroutineSplit(
1972 LazyCallGraph::Node &N, const coro::Shape &Shape,
1973 const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C,
1974 LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
1975 FunctionAnalysisManager &FAM) {
1976 if (!Shape.CoroBegin)
1977 return;
1978
1979 if (Shape.ABI != coro::ABI::Switch)
1980 removeCoroEnds(Shape);
1981 else {
1982 for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) {
1983 auto &Context = End->getContext();
1984 End->replaceAllUsesWith(ConstantInt::getFalse(Context));
1985 End->eraseFromParent();
1986 }
1987 }
1988
1989 if (!Clones.empty()) {
1990 switch (Shape.ABI) {
1991 case coro::ABI::Switch:
1992 // Each clone in the Switch lowering is independent of the other clones.
1993 // Let the LazyCallGraph know about each one separately.
1994 for (Function *Clone : Clones)
1995 CG.addSplitFunction(N.getFunction(), *Clone);
1996 break;
1997 case coro::ABI::Async:
1998 case coro::ABI::Retcon:
1999 case coro::ABI::RetconOnce:
2000 // Each clone in the Async/Retcon lowering references of the other clones.
2001 // Let the LazyCallGraph know about all of them at once.
2002 if (!Clones.empty())
2003 CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones);
2004 break;
2005 }
2006
2007 // Let the CGSCC infra handle the changes to the original function.
2008 updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM);
2009 }
2010
2011 // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
2012 // to the split functions.
2013 postSplitCleanup(N.getFunction());
2014 updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM);
2015 }
2016
2017 /// Replace a call to llvm.coro.prepare.retcon.
replacePrepare(CallInst * Prepare,LazyCallGraph & CG,LazyCallGraph::SCC & C)2018 static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
2019 LazyCallGraph::SCC &C) {
2020 auto CastFn = Prepare->getArgOperand(0); // as an i8*
2021 auto Fn = CastFn->stripPointerCasts(); // as its original type
2022
2023 // Attempt to peephole this pattern:
2024 // %0 = bitcast [[TYPE]] @some_function to i8*
2025 // %1 = call @llvm.coro.prepare.retcon(i8* %0)
2026 // %2 = bitcast %1 to [[TYPE]]
2027 // ==>
2028 // %2 = @some_function
2029 for (Use &U : llvm::make_early_inc_range(Prepare->uses())) {
2030 // Look for bitcasts back to the original function type.
2031 auto *Cast = dyn_cast<BitCastInst>(U.getUser());
2032 if (!Cast || Cast->getType() != Fn->getType())
2033 continue;
2034
2035 // Replace and remove the cast.
2036 Cast->replaceAllUsesWith(Fn);
2037 Cast->eraseFromParent();
2038 }
2039
2040 // Replace any remaining uses with the function as an i8*.
2041 // This can never directly be a callee, so we don't need to update CG.
2042 Prepare->replaceAllUsesWith(CastFn);
2043 Prepare->eraseFromParent();
2044
2045 // Kill dead bitcasts.
2046 while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
2047 if (!Cast->use_empty())
2048 break;
2049 CastFn = Cast->getOperand(0);
2050 Cast->eraseFromParent();
2051 }
2052 }
2053
replaceAllPrepares(Function * PrepareFn,LazyCallGraph & CG,LazyCallGraph::SCC & C)2054 static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
2055 LazyCallGraph::SCC &C) {
2056 bool Changed = false;
2057 for (Use &P : llvm::make_early_inc_range(PrepareFn->uses())) {
2058 // Intrinsics can only be used in calls.
2059 auto *Prepare = cast<CallInst>(P.getUser());
2060 replacePrepare(Prepare, CG, C);
2061 Changed = true;
2062 }
2063
2064 return Changed;
2065 }
2066
addPrepareFunction(const Module & M,SmallVectorImpl<Function * > & Fns,StringRef Name)2067 static void addPrepareFunction(const Module &M,
2068 SmallVectorImpl<Function *> &Fns,
2069 StringRef Name) {
2070 auto *PrepareFn = M.getFunction(Name);
2071 if (PrepareFn && !PrepareFn->use_empty())
2072 Fns.push_back(PrepareFn);
2073 }
2074
run(LazyCallGraph::SCC & C,CGSCCAnalysisManager & AM,LazyCallGraph & CG,CGSCCUpdateResult & UR)2075 PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C,
2076 CGSCCAnalysisManager &AM,
2077 LazyCallGraph &CG, CGSCCUpdateResult &UR) {
2078 // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
2079 // non-zero number of nodes, so we assume that here and grab the first
2080 // node's function's module.
2081 Module &M = *C.begin()->getFunction().getParent();
2082 auto &FAM =
2083 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
2084
2085 // Check for uses of llvm.coro.prepare.retcon/async.
2086 SmallVector<Function *, 2> PrepareFns;
2087 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
2088 addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
2089
2090 // Find coroutines for processing.
2091 SmallVector<LazyCallGraph::Node *> Coroutines;
2092 for (LazyCallGraph::Node &N : C)
2093 if (N.getFunction().isPresplitCoroutine())
2094 Coroutines.push_back(&N);
2095
2096 if (Coroutines.empty() && PrepareFns.empty())
2097 return PreservedAnalyses::all();
2098
2099 if (Coroutines.empty()) {
2100 for (auto *PrepareFn : PrepareFns) {
2101 replaceAllPrepares(PrepareFn, CG, C);
2102 }
2103 }
2104
2105 // Split all the coroutines.
2106 for (LazyCallGraph::Node *N : Coroutines) {
2107 Function &F = N->getFunction();
2108 LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()
2109 << "\n");
2110 F.setSplittedCoroutine();
2111
2112 SmallVector<Function *, 4> Clones;
2113 const coro::Shape Shape = splitCoroutine(
2114 F, Clones, FAM.getResult<TargetIRAnalysis>(F), OptimizeFrame);
2115 updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM);
2116
2117 if (!Shape.CoroSuspends.empty()) {
2118 // Run the CGSCC pipeline on the original and newly split functions.
2119 UR.CWorklist.insert(&C);
2120 for (Function *Clone : Clones)
2121 UR.CWorklist.insert(CG.lookupSCC(CG.get(*Clone)));
2122 }
2123 }
2124
2125 if (!PrepareFns.empty()) {
2126 for (auto *PrepareFn : PrepareFns) {
2127 replaceAllPrepares(PrepareFn, CG, C);
2128 }
2129 }
2130
2131 return PreservedAnalyses::none();
2132 }
2133