1 //===- CoroElide.cpp - Coroutine Frame Allocation Elision Pass ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "llvm/Transforms/Coroutines/CoroElide.h"
10 #include "CoroInternal.h"
11 #include "llvm/ADT/DenseMap.h"
12 #include "llvm/ADT/Statistic.h"
13 #include "llvm/Analysis/AliasAnalysis.h"
14 #include "llvm/Analysis/InstructionSimplify.h"
15 #include "llvm/IR/Dominators.h"
16 #include "llvm/IR/InstIterator.h"
17 #include "llvm/Support/ErrorHandling.h"
18 #include "llvm/Support/FileSystem.h"
19
20 using namespace llvm;
21
22 #define DEBUG_TYPE "coro-elide"
23
24 STATISTIC(NumOfCoroElided, "The # of coroutine get elided.");
25
26 #ifndef NDEBUG
27 static cl::opt<std::string> CoroElideInfoOutputFilename(
28 "coro-elide-info-output-file", cl::value_desc("filename"),
29 cl::desc("File to record the coroutines got elided"), cl::Hidden);
30 #endif
31
32 namespace {
33 // Created on demand if the coro-elide pass has work to do.
34 struct Lowerer : coro::LowererBase {
35 SmallVector<CoroIdInst *, 4> CoroIds;
36 SmallVector<CoroBeginInst *, 1> CoroBegins;
37 SmallVector<CoroAllocInst *, 1> CoroAllocs;
38 SmallVector<CoroSubFnInst *, 4> ResumeAddr;
39 DenseMap<CoroBeginInst *, SmallVector<CoroSubFnInst *, 4>> DestroyAddr;
40 SmallPtrSet<const SwitchInst *, 4> CoroSuspendSwitches;
41
Lowerer__anone2662b2e0111::Lowerer42 Lowerer(Module &M) : LowererBase(M) {}
43
44 void elideHeapAllocations(Function *F, uint64_t FrameSize, Align FrameAlign,
45 AAResults &AA);
46 bool shouldElide(Function *F, DominatorTree &DT) const;
47 void collectPostSplitCoroIds(Function *F);
48 bool processCoroId(CoroIdInst *, AAResults &AA, DominatorTree &DT);
49 bool hasEscapePath(const CoroBeginInst *,
50 const SmallPtrSetImpl<BasicBlock *> &) const;
51 };
52 } // end anonymous namespace
53
54 // Go through the list of coro.subfn.addr intrinsics and replace them with the
55 // provided constant.
replaceWithConstant(Constant * Value,SmallVectorImpl<CoroSubFnInst * > & Users)56 static void replaceWithConstant(Constant *Value,
57 SmallVectorImpl<CoroSubFnInst *> &Users) {
58 if (Users.empty())
59 return;
60
61 // See if we need to bitcast the constant to match the type of the intrinsic
62 // being replaced. Note: All coro.subfn.addr intrinsics return the same type,
63 // so we only need to examine the type of the first one in the list.
64 Type *IntrTy = Users.front()->getType();
65 Type *ValueTy = Value->getType();
66 if (ValueTy != IntrTy) {
67 // May need to tweak the function type to match the type expected at the
68 // use site.
69 assert(ValueTy->isPointerTy() && IntrTy->isPointerTy());
70 Value = ConstantExpr::getBitCast(Value, IntrTy);
71 }
72
73 // Now the value type matches the type of the intrinsic. Replace them all!
74 for (CoroSubFnInst *I : Users)
75 replaceAndRecursivelySimplify(I, Value);
76 }
77
78 // See if any operand of the call instruction references the coroutine frame.
operandReferences(CallInst * CI,AllocaInst * Frame,AAResults & AA)79 static bool operandReferences(CallInst *CI, AllocaInst *Frame, AAResults &AA) {
80 for (Value *Op : CI->operand_values())
81 if (!AA.isNoAlias(Op, Frame))
82 return true;
83 return false;
84 }
85
86 // Look for any tail calls referencing the coroutine frame and remove tail
87 // attribute from them, since now coroutine frame resides on the stack and tail
88 // call implies that the function does not references anything on the stack.
89 // However if it's a musttail call, we cannot remove the tailcall attribute.
90 // It's safe to keep it there as the musttail call is for symmetric transfer,
91 // and by that point the frame should have been destroyed and hence not
92 // interfering with operands.
removeTailCallAttribute(AllocaInst * Frame,AAResults & AA)93 static void removeTailCallAttribute(AllocaInst *Frame, AAResults &AA) {
94 Function &F = *Frame->getFunction();
95 for (Instruction &I : instructions(F))
96 if (auto *Call = dyn_cast<CallInst>(&I))
97 if (Call->isTailCall() && operandReferences(Call, Frame, AA) &&
98 !Call->isMustTailCall())
99 Call->setTailCall(false);
100 }
101
102 // Given a resume function @f.resume(%f.frame* %frame), returns the size
103 // and expected alignment of %f.frame type.
getFrameLayout(Function * Resume)104 static Optional<std::pair<uint64_t, Align>> getFrameLayout(Function *Resume) {
105 // Pull information from the function attributes.
106 auto Size = Resume->getParamDereferenceableBytes(0);
107 if (!Size)
108 return None;
109 return std::make_pair(Size, Resume->getParamAlign(0).valueOrOne());
110 }
111
112 // Finds first non alloca instruction in the entry block of a function.
getFirstNonAllocaInTheEntryBlock(Function * F)113 static Instruction *getFirstNonAllocaInTheEntryBlock(Function *F) {
114 for (Instruction &I : F->getEntryBlock())
115 if (!isa<AllocaInst>(&I))
116 return &I;
117 llvm_unreachable("no terminator in the entry block");
118 }
119
120 #ifndef NDEBUG
getOrCreateLogFile()121 static std::unique_ptr<raw_fd_ostream> getOrCreateLogFile() {
122 assert(!CoroElideInfoOutputFilename.empty() &&
123 "coro-elide-info-output-file shouldn't be empty");
124 std::error_code EC;
125 auto Result = std::make_unique<raw_fd_ostream>(CoroElideInfoOutputFilename,
126 EC, sys::fs::OF_Append);
127 if (!EC)
128 return Result;
129 llvm::errs() << "Error opening coro-elide-info-output-file '"
130 << CoroElideInfoOutputFilename << " for appending!\n";
131 return std::make_unique<raw_fd_ostream>(2, false); // stderr.
132 }
133 #endif
134
135 // To elide heap allocations we need to suppress code blocks guarded by
136 // llvm.coro.alloc and llvm.coro.free instructions.
elideHeapAllocations(Function * F,uint64_t FrameSize,Align FrameAlign,AAResults & AA)137 void Lowerer::elideHeapAllocations(Function *F, uint64_t FrameSize,
138 Align FrameAlign, AAResults &AA) {
139 LLVMContext &C = F->getContext();
140 auto *InsertPt =
141 getFirstNonAllocaInTheEntryBlock(CoroIds.front()->getFunction());
142
143 // Replacing llvm.coro.alloc with false will suppress dynamic
144 // allocation as it is expected for the frontend to generate the code that
145 // looks like:
146 // id = coro.id(...)
147 // mem = coro.alloc(id) ? malloc(coro.size()) : 0;
148 // coro.begin(id, mem)
149 auto *False = ConstantInt::getFalse(C);
150 for (auto *CA : CoroAllocs) {
151 CA->replaceAllUsesWith(False);
152 CA->eraseFromParent();
153 }
154
155 // FIXME: Design how to transmit alignment information for every alloca that
156 // is spilled into the coroutine frame and recreate the alignment information
157 // here. Possibly we will need to do a mini SROA here and break the coroutine
158 // frame into individual AllocaInst recreating the original alignment.
159 const DataLayout &DL = F->getParent()->getDataLayout();
160 auto FrameTy = ArrayType::get(Type::getInt8Ty(C), FrameSize);
161 auto *Frame = new AllocaInst(FrameTy, DL.getAllocaAddrSpace(), "", InsertPt);
162 Frame->setAlignment(FrameAlign);
163 auto *FrameVoidPtr =
164 new BitCastInst(Frame, Type::getInt8PtrTy(C), "vFrame", InsertPt);
165
166 for (auto *CB : CoroBegins) {
167 CB->replaceAllUsesWith(FrameVoidPtr);
168 CB->eraseFromParent();
169 }
170
171 // Since now coroutine frame lives on the stack we need to make sure that
172 // any tail call referencing it, must be made non-tail call.
173 removeTailCallAttribute(Frame, AA);
174 }
175
hasEscapePath(const CoroBeginInst * CB,const SmallPtrSetImpl<BasicBlock * > & TIs) const176 bool Lowerer::hasEscapePath(const CoroBeginInst *CB,
177 const SmallPtrSetImpl<BasicBlock *> &TIs) const {
178 const auto &It = DestroyAddr.find(CB);
179 assert(It != DestroyAddr.end());
180
181 // Limit the number of blocks we visit.
182 unsigned Limit = 32 * (1 + It->second.size());
183
184 SmallVector<const BasicBlock *, 32> Worklist;
185 Worklist.push_back(CB->getParent());
186
187 SmallPtrSet<const BasicBlock *, 32> Visited;
188 // Consider basicblock of coro.destroy as visited one, so that we
189 // skip the path pass through coro.destroy.
190 for (auto *DA : It->second)
191 Visited.insert(DA->getParent());
192
193 do {
194 const auto *BB = Worklist.pop_back_val();
195 if (!Visited.insert(BB).second)
196 continue;
197 if (TIs.count(BB))
198 return true;
199
200 // Conservatively say that there is potentially a path.
201 if (!--Limit)
202 return true;
203
204 auto TI = BB->getTerminator();
205 // Although the default dest of coro.suspend switches is suspend pointer
206 // which means a escape path to normal terminator, it is reasonable to skip
207 // it since coroutine frame doesn't change outside the coroutine body.
208 if (isa<SwitchInst>(TI) &&
209 CoroSuspendSwitches.count(cast<SwitchInst>(TI))) {
210 Worklist.push_back(cast<SwitchInst>(TI)->getSuccessor(1));
211 Worklist.push_back(cast<SwitchInst>(TI)->getSuccessor(2));
212 } else
213 Worklist.append(succ_begin(BB), succ_end(BB));
214
215 } while (!Worklist.empty());
216
217 // We have exhausted all possible paths and are certain that coro.begin can
218 // not reach to any of terminators.
219 return false;
220 }
221
shouldElide(Function * F,DominatorTree & DT) const222 bool Lowerer::shouldElide(Function *F, DominatorTree &DT) const {
223 // If no CoroAllocs, we cannot suppress allocation, so elision is not
224 // possible.
225 if (CoroAllocs.empty())
226 return false;
227
228 // Check that for every coro.begin there is at least one coro.destroy directly
229 // referencing the SSA value of that coro.begin along each
230 // non-exceptional path.
231 // If the value escaped, then coro.destroy would have been referencing a
232 // memory location storing that value and not the virtual register.
233
234 SmallPtrSet<BasicBlock *, 8> Terminators;
235 // First gather all of the non-exceptional terminators for the function.
236 // Consider the final coro.suspend as the real terminator when the current
237 // function is a coroutine.
238 for (BasicBlock &B : *F) {
239 auto *TI = B.getTerminator();
240 if (TI->getNumSuccessors() == 0 && !TI->isExceptionalTerminator() &&
241 !isa<UnreachableInst>(TI))
242 Terminators.insert(&B);
243 }
244
245 // Filter out the coro.destroy that lie along exceptional paths.
246 SmallPtrSet<CoroBeginInst *, 8> ReferencedCoroBegins;
247 for (auto &It : DestroyAddr) {
248 // If there is any coro.destroy dominates all of the terminators for the
249 // coro.begin, we could know the corresponding coro.begin wouldn't escape.
250 for (Instruction *DA : It.second) {
251 if (llvm::all_of(Terminators, [&](auto *TI) {
252 return DT.dominates(DA, TI->getTerminator());
253 })) {
254 ReferencedCoroBegins.insert(It.first);
255 break;
256 }
257 }
258
259 // Whether there is any paths from coro.begin to Terminators which not pass
260 // through any of the coro.destroys.
261 //
262 // hasEscapePath is relatively slow, so we avoid to run it as much as
263 // possible.
264 if (!ReferencedCoroBegins.count(It.first) &&
265 !hasEscapePath(It.first, Terminators))
266 ReferencedCoroBegins.insert(It.first);
267 }
268
269 // If size of the set is the same as total number of coro.begin, that means we
270 // found a coro.free or coro.destroy referencing each coro.begin, so we can
271 // perform heap elision.
272 return ReferencedCoroBegins.size() == CoroBegins.size();
273 }
274
collectPostSplitCoroIds(Function * F)275 void Lowerer::collectPostSplitCoroIds(Function *F) {
276 CoroIds.clear();
277 CoroSuspendSwitches.clear();
278 for (auto &I : instructions(F)) {
279 if (auto *CII = dyn_cast<CoroIdInst>(&I))
280 if (CII->getInfo().isPostSplit())
281 // If it is the coroutine itself, don't touch it.
282 if (CII->getCoroutine() != CII->getFunction())
283 CoroIds.push_back(CII);
284
285 // Consider case like:
286 // %0 = call i8 @llvm.coro.suspend(...)
287 // switch i8 %0, label %suspend [i8 0, label %resume
288 // i8 1, label %cleanup]
289 // and collect the SwitchInsts which are used by escape analysis later.
290 if (auto *CSI = dyn_cast<CoroSuspendInst>(&I))
291 if (CSI->hasOneUse() && isa<SwitchInst>(CSI->use_begin()->getUser())) {
292 SwitchInst *SWI = cast<SwitchInst>(CSI->use_begin()->getUser());
293 if (SWI->getNumCases() == 2)
294 CoroSuspendSwitches.insert(SWI);
295 }
296 }
297 }
298
processCoroId(CoroIdInst * CoroId,AAResults & AA,DominatorTree & DT)299 bool Lowerer::processCoroId(CoroIdInst *CoroId, AAResults &AA,
300 DominatorTree &DT) {
301 CoroBegins.clear();
302 CoroAllocs.clear();
303 ResumeAddr.clear();
304 DestroyAddr.clear();
305
306 // Collect all coro.begin and coro.allocs associated with this coro.id.
307 for (User *U : CoroId->users()) {
308 if (auto *CB = dyn_cast<CoroBeginInst>(U))
309 CoroBegins.push_back(CB);
310 else if (auto *CA = dyn_cast<CoroAllocInst>(U))
311 CoroAllocs.push_back(CA);
312 }
313
314 // Collect all coro.subfn.addrs associated with coro.begin.
315 // Note, we only devirtualize the calls if their coro.subfn.addr refers to
316 // coro.begin directly. If we run into cases where this check is too
317 // conservative, we can consider relaxing the check.
318 for (CoroBeginInst *CB : CoroBegins) {
319 for (User *U : CB->users())
320 if (auto *II = dyn_cast<CoroSubFnInst>(U))
321 switch (II->getIndex()) {
322 case CoroSubFnInst::ResumeIndex:
323 ResumeAddr.push_back(II);
324 break;
325 case CoroSubFnInst::DestroyIndex:
326 DestroyAddr[CB].push_back(II);
327 break;
328 default:
329 llvm_unreachable("unexpected coro.subfn.addr constant");
330 }
331 }
332
333 // PostSplit coro.id refers to an array of subfunctions in its Info
334 // argument.
335 ConstantArray *Resumers = CoroId->getInfo().Resumers;
336 assert(Resumers && "PostSplit coro.id Info argument must refer to an array"
337 "of coroutine subfunctions");
338 auto *ResumeAddrConstant =
339 Resumers->getAggregateElement(CoroSubFnInst::ResumeIndex);
340
341 replaceWithConstant(ResumeAddrConstant, ResumeAddr);
342
343 bool ShouldElide = shouldElide(CoroId->getFunction(), DT);
344
345 auto *DestroyAddrConstant = Resumers->getAggregateElement(
346 ShouldElide ? CoroSubFnInst::CleanupIndex : CoroSubFnInst::DestroyIndex);
347
348 for (auto &It : DestroyAddr)
349 replaceWithConstant(DestroyAddrConstant, It.second);
350
351 if (ShouldElide) {
352 if (auto FrameSizeAndAlign =
353 getFrameLayout(cast<Function>(ResumeAddrConstant))) {
354 elideHeapAllocations(CoroId->getFunction(), FrameSizeAndAlign->first,
355 FrameSizeAndAlign->second, AA);
356 coro::replaceCoroFree(CoroId, /*Elide=*/true);
357 NumOfCoroElided++;
358 #ifndef NDEBUG
359 if (!CoroElideInfoOutputFilename.empty())
360 *getOrCreateLogFile()
361 << "Elide " << CoroId->getCoroutine()->getName() << " in "
362 << CoroId->getFunction()->getName() << "\n";
363 #endif
364 }
365 }
366
367 return true;
368 }
369
declaresCoroElideIntrinsics(Module & M)370 static bool declaresCoroElideIntrinsics(Module &M) {
371 return coro::declaresIntrinsics(M, {"llvm.coro.id", "llvm.coro.id.async"});
372 }
373
run(Function & F,FunctionAnalysisManager & AM)374 PreservedAnalyses CoroElidePass::run(Function &F, FunctionAnalysisManager &AM) {
375 auto &M = *F.getParent();
376 if (!declaresCoroElideIntrinsics(M))
377 return PreservedAnalyses::all();
378
379 Lowerer L(M);
380 L.CoroIds.clear();
381 L.collectPostSplitCoroIds(&F);
382 // If we did not find any coro.id, there is nothing to do.
383 if (L.CoroIds.empty())
384 return PreservedAnalyses::all();
385
386 AAResults &AA = AM.getResult<AAManager>(F);
387 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
388
389 bool Changed = false;
390 for (auto *CII : L.CoroIds)
391 Changed |= L.processCoroId(CII, AA, DT);
392
393 return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
394 }
395