1 //===- TailRecursionElimination.cpp - Eliminate Tail Calls ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file transforms calls of the current function (self recursion) followed
10 // by a return instruction with a branch to the entry of the function, creating
11 // a loop. This pass also implements the following extensions to the basic
12 // algorithm:
13 //
14 // 1. Trivial instructions between the call and return do not prevent the
15 // transformation from taking place, though currently the analysis cannot
16 // support moving any really useful instructions (only dead ones).
17 // 2. This pass transforms functions that are prevented from being tail
18 // recursive by an associative and commutative expression to use an
19 // accumulator variable, thus compiling the typical naive factorial or
20 // 'fib' implementation into efficient code.
21 // 3. TRE is performed if the function returns void, if the return
22 // returns the result returned by the call, or if the function returns a
23 // run-time constant on all exits from the function. It is possible, though
24 // unlikely, that the return returns something else (like constant 0), and
25 // can still be TRE'd. It can be TRE'd if ALL OTHER return instructions in
26 // the function return the exact same value.
27 // 4. If it can prove that callees do not access their caller stack frame,
28 // they are marked as eligible for tail call elimination (by the code
29 // generator).
30 //
31 // There are several improvements that could be made:
32 //
33 // 1. If the function has any alloca instructions, these instructions will be
34 // moved out of the entry block of the function, causing them to be
35 // evaluated each time through the tail recursion. Safely keeping allocas
36 // in the entry block requires analysis to proves that the tail-called
37 // function does not read or write the stack object.
38 // 2. Tail recursion is only performed if the call immediately precedes the
39 // return instruction. It's possible that there could be a jump between
40 // the call and the return.
41 // 3. There can be intervening operations between the call and the return that
42 // prevent the TRE from occurring. For example, there could be GEP's and
43 // stores to memory that will not be read or written by the call. This
44 // requires some substantial analysis (such as with DSA) to prove safe to
45 // move ahead of the call, but doing so could allow many more TREs to be
46 // performed, for example in TreeAdd/TreeAlloc from the treeadd benchmark.
47 // 4. The algorithm we use to detect if callees access their caller stack
48 // frames is very primitive.
49 //
50 //===----------------------------------------------------------------------===//
51
52 #include "llvm/Transforms/Scalar/TailRecursionElimination.h"
53 #include "llvm/ADT/STLExtras.h"
54 #include "llvm/ADT/SmallPtrSet.h"
55 #include "llvm/ADT/Statistic.h"
56 #include "llvm/Analysis/DomTreeUpdater.h"
57 #include "llvm/Analysis/GlobalsModRef.h"
58 #include "llvm/Analysis/InstructionSimplify.h"
59 #include "llvm/Analysis/Loads.h"
60 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
61 #include "llvm/Analysis/PostDominators.h"
62 #include "llvm/Analysis/TargetTransformInfo.h"
63 #include "llvm/Analysis/ValueTracking.h"
64 #include "llvm/IR/CFG.h"
65 #include "llvm/IR/Constants.h"
66 #include "llvm/IR/DataLayout.h"
67 #include "llvm/IR/DerivedTypes.h"
68 #include "llvm/IR/DiagnosticInfo.h"
69 #include "llvm/IR/Dominators.h"
70 #include "llvm/IR/Function.h"
71 #include "llvm/IR/IRBuilder.h"
72 #include "llvm/IR/InstIterator.h"
73 #include "llvm/IR/Instructions.h"
74 #include "llvm/IR/IntrinsicInst.h"
75 #include "llvm/IR/Module.h"
76 #include "llvm/InitializePasses.h"
77 #include "llvm/Pass.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include "llvm/Transforms/Scalar.h"
81 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
82 using namespace llvm;
83
84 #define DEBUG_TYPE "tailcallelim"
85
86 STATISTIC(NumEliminated, "Number of tail calls removed");
87 STATISTIC(NumRetDuped, "Number of return duplicated");
88 STATISTIC(NumAccumAdded, "Number of accumulators introduced");
89
90 /// Scan the specified function for alloca instructions.
91 /// If it contains any dynamic allocas, returns false.
canTRE(Function & F)92 static bool canTRE(Function &F) {
93 // TODO: We don't do TRE if dynamic allocas are used.
94 // Dynamic allocas allocate stack space which should be
95 // deallocated before new iteration started. That is
96 // currently not implemented.
97 return llvm::all_of(instructions(F), [](Instruction &I) {
98 auto *AI = dyn_cast<AllocaInst>(&I);
99 return !AI || AI->isStaticAlloca();
100 });
101 }
102
103 namespace {
104 struct AllocaDerivedValueTracker {
105 // Start at a root value and walk its use-def chain to mark calls that use the
106 // value or a derived value in AllocaUsers, and places where it may escape in
107 // EscapePoints.
walk__anon856ff9200211::AllocaDerivedValueTracker108 void walk(Value *Root) {
109 SmallVector<Use *, 32> Worklist;
110 SmallPtrSet<Use *, 32> Visited;
111
112 auto AddUsesToWorklist = [&](Value *V) {
113 for (auto &U : V->uses()) {
114 if (!Visited.insert(&U).second)
115 continue;
116 Worklist.push_back(&U);
117 }
118 };
119
120 AddUsesToWorklist(Root);
121
122 while (!Worklist.empty()) {
123 Use *U = Worklist.pop_back_val();
124 Instruction *I = cast<Instruction>(U->getUser());
125
126 switch (I->getOpcode()) {
127 case Instruction::Call:
128 case Instruction::Invoke: {
129 auto &CB = cast<CallBase>(*I);
130 // If the alloca-derived argument is passed byval it is not an escape
131 // point, or a use of an alloca. Calling with byval copies the contents
132 // of the alloca into argument registers or stack slots, which exist
133 // beyond the lifetime of the current frame.
134 if (CB.isArgOperand(U) && CB.isByValArgument(CB.getArgOperandNo(U)))
135 continue;
136 bool IsNocapture =
137 CB.isDataOperand(U) && CB.doesNotCapture(CB.getDataOperandNo(U));
138 callUsesLocalStack(CB, IsNocapture);
139 if (IsNocapture) {
140 // If the alloca-derived argument is passed in as nocapture, then it
141 // can't propagate to the call's return. That would be capturing.
142 continue;
143 }
144 break;
145 }
146 case Instruction::Load: {
147 // The result of a load is not alloca-derived (unless an alloca has
148 // otherwise escaped, but this is a local analysis).
149 continue;
150 }
151 case Instruction::Store: {
152 if (U->getOperandNo() == 0)
153 EscapePoints.insert(I);
154 continue; // Stores have no users to analyze.
155 }
156 case Instruction::BitCast:
157 case Instruction::GetElementPtr:
158 case Instruction::PHI:
159 case Instruction::Select:
160 case Instruction::AddrSpaceCast:
161 break;
162 default:
163 EscapePoints.insert(I);
164 break;
165 }
166
167 AddUsesToWorklist(I);
168 }
169 }
170
callUsesLocalStack__anon856ff9200211::AllocaDerivedValueTracker171 void callUsesLocalStack(CallBase &CB, bool IsNocapture) {
172 // Add it to the list of alloca users.
173 AllocaUsers.insert(&CB);
174
175 // If it's nocapture then it can't capture this alloca.
176 if (IsNocapture)
177 return;
178
179 // If it can write to memory, it can leak the alloca value.
180 if (!CB.onlyReadsMemory())
181 EscapePoints.insert(&CB);
182 }
183
184 SmallPtrSet<Instruction *, 32> AllocaUsers;
185 SmallPtrSet<Instruction *, 32> EscapePoints;
186 };
187 }
188
markTails(Function & F,OptimizationRemarkEmitter * ORE)189 static bool markTails(Function &F, OptimizationRemarkEmitter *ORE) {
190 if (F.callsFunctionThatReturnsTwice())
191 return false;
192
193 // The local stack holds all alloca instructions and all byval arguments.
194 AllocaDerivedValueTracker Tracker;
195 for (Argument &Arg : F.args()) {
196 if (Arg.hasByValAttr())
197 Tracker.walk(&Arg);
198 }
199 for (auto &BB : F) {
200 for (auto &I : BB)
201 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
202 Tracker.walk(AI);
203 }
204
205 bool Modified = false;
206
207 // Track whether a block is reachable after an alloca has escaped. Blocks that
208 // contain the escaping instruction will be marked as being visited without an
209 // escaped alloca, since that is how the block began.
210 enum VisitType {
211 UNVISITED,
212 UNESCAPED,
213 ESCAPED
214 };
215 DenseMap<BasicBlock *, VisitType> Visited;
216
217 // We propagate the fact that an alloca has escaped from block to successor.
218 // Visit the blocks that are propagating the escapedness first. To do this, we
219 // maintain two worklists.
220 SmallVector<BasicBlock *, 32> WorklistUnescaped, WorklistEscaped;
221
222 // We may enter a block and visit it thinking that no alloca has escaped yet,
223 // then see an escape point and go back around a loop edge and come back to
224 // the same block twice. Because of this, we defer setting tail on calls when
225 // we first encounter them in a block. Every entry in this list does not
226 // statically use an alloca via use-def chain analysis, but may find an alloca
227 // through other means if the block turns out to be reachable after an escape
228 // point.
229 SmallVector<CallInst *, 32> DeferredTails;
230
231 BasicBlock *BB = &F.getEntryBlock();
232 VisitType Escaped = UNESCAPED;
233 do {
234 for (auto &I : *BB) {
235 if (Tracker.EscapePoints.count(&I))
236 Escaped = ESCAPED;
237
238 CallInst *CI = dyn_cast<CallInst>(&I);
239 // A PseudoProbeInst has the IntrInaccessibleMemOnly tag hence it is
240 // considered accessing memory and will be marked as a tail call if we
241 // don't bail out here.
242 if (!CI || CI->isTailCall() || isa<DbgInfoIntrinsic>(&I) ||
243 isa<PseudoProbeInst>(&I))
244 continue;
245
246 // Special-case operand bundles "clang.arc.attachedcall" and "ptrauth".
247 bool IsNoTail =
248 CI->isNoTailCall() || CI->hasOperandBundlesOtherThan(
249 {LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_ptrauth});
250
251 if (!IsNoTail && CI->doesNotAccessMemory()) {
252 // A call to a readnone function whose arguments are all things computed
253 // outside this function can be marked tail. Even if you stored the
254 // alloca address into a global, a readnone function can't load the
255 // global anyhow.
256 //
257 // Note that this runs whether we know an alloca has escaped or not. If
258 // it has, then we can't trust Tracker.AllocaUsers to be accurate.
259 bool SafeToTail = true;
260 for (auto &Arg : CI->args()) {
261 if (isa<Constant>(Arg.getUser()))
262 continue;
263 if (Argument *A = dyn_cast<Argument>(Arg.getUser()))
264 if (!A->hasByValAttr())
265 continue;
266 SafeToTail = false;
267 break;
268 }
269 if (SafeToTail) {
270 using namespace ore;
271 ORE->emit([&]() {
272 return OptimizationRemark(DEBUG_TYPE, "tailcall-readnone", CI)
273 << "marked as tail call candidate (readnone)";
274 });
275 CI->setTailCall();
276 Modified = true;
277 continue;
278 }
279 }
280
281 if (!IsNoTail && Escaped == UNESCAPED && !Tracker.AllocaUsers.count(CI))
282 DeferredTails.push_back(CI);
283 }
284
285 for (auto *SuccBB : successors(BB)) {
286 auto &State = Visited[SuccBB];
287 if (State < Escaped) {
288 State = Escaped;
289 if (State == ESCAPED)
290 WorklistEscaped.push_back(SuccBB);
291 else
292 WorklistUnescaped.push_back(SuccBB);
293 }
294 }
295
296 if (!WorklistEscaped.empty()) {
297 BB = WorklistEscaped.pop_back_val();
298 Escaped = ESCAPED;
299 } else {
300 BB = nullptr;
301 while (!WorklistUnescaped.empty()) {
302 auto *NextBB = WorklistUnescaped.pop_back_val();
303 if (Visited[NextBB] == UNESCAPED) {
304 BB = NextBB;
305 Escaped = UNESCAPED;
306 break;
307 }
308 }
309 }
310 } while (BB);
311
312 for (CallInst *CI : DeferredTails) {
313 if (Visited[CI->getParent()] != ESCAPED) {
314 // If the escape point was part way through the block, calls after the
315 // escape point wouldn't have been put into DeferredTails.
316 LLVM_DEBUG(dbgs() << "Marked as tail call candidate: " << *CI << "\n");
317 CI->setTailCall();
318 Modified = true;
319 }
320 }
321
322 return Modified;
323 }
324
325 /// Return true if it is safe to move the specified
326 /// instruction from after the call to before the call, assuming that all
327 /// instructions between the call and this instruction are movable.
328 ///
canMoveAboveCall(Instruction * I,CallInst * CI,AliasAnalysis * AA)329 static bool canMoveAboveCall(Instruction *I, CallInst *CI, AliasAnalysis *AA) {
330 if (isa<DbgInfoIntrinsic>(I))
331 return true;
332
333 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
334 if (II->getIntrinsicID() == Intrinsic::lifetime_end &&
335 llvm::findAllocaForValue(II->getArgOperand(1)))
336 return true;
337
338 // FIXME: We can move load/store/call/free instructions above the call if the
339 // call does not mod/ref the memory location being processed.
340 if (I->mayHaveSideEffects()) // This also handles volatile loads.
341 return false;
342
343 if (LoadInst *L = dyn_cast<LoadInst>(I)) {
344 // Loads may always be moved above calls without side effects.
345 if (CI->mayHaveSideEffects()) {
346 // Non-volatile loads may be moved above a call with side effects if it
347 // does not write to memory and the load provably won't trap.
348 // Writes to memory only matter if they may alias the pointer
349 // being loaded from.
350 const DataLayout &DL = L->getModule()->getDataLayout();
351 if (isModSet(AA->getModRefInfo(CI, MemoryLocation::get(L))) ||
352 !isSafeToLoadUnconditionally(L->getPointerOperand(), L->getType(),
353 L->getAlign(), DL, L))
354 return false;
355 }
356 }
357
358 // Otherwise, if this is a side-effect free instruction, check to make sure
359 // that it does not use the return value of the call. If it doesn't use the
360 // return value of the call, it must only use things that are defined before
361 // the call, or movable instructions between the call and the instruction
362 // itself.
363 return !is_contained(I->operands(), CI);
364 }
365
canTransformAccumulatorRecursion(Instruction * I,CallInst * CI)366 static bool canTransformAccumulatorRecursion(Instruction *I, CallInst *CI) {
367 if (!I->isAssociative() || !I->isCommutative())
368 return false;
369
370 assert(I->getNumOperands() == 2 &&
371 "Associative/commutative operations should have 2 args!");
372
373 // Exactly one operand should be the result of the call instruction.
374 if ((I->getOperand(0) == CI && I->getOperand(1) == CI) ||
375 (I->getOperand(0) != CI && I->getOperand(1) != CI))
376 return false;
377
378 // The only user of this instruction we allow is a single return instruction.
379 if (!I->hasOneUse() || !isa<ReturnInst>(I->user_back()))
380 return false;
381
382 return true;
383 }
384
firstNonDbg(BasicBlock::iterator I)385 static Instruction *firstNonDbg(BasicBlock::iterator I) {
386 while (isa<DbgInfoIntrinsic>(I))
387 ++I;
388 return &*I;
389 }
390
391 namespace {
392 class TailRecursionEliminator {
393 Function &F;
394 const TargetTransformInfo *TTI;
395 AliasAnalysis *AA;
396 OptimizationRemarkEmitter *ORE;
397 DomTreeUpdater &DTU;
398
399 // The below are shared state we want to have available when eliminating any
400 // calls in the function. There values should be populated by
401 // createTailRecurseLoopHeader the first time we find a call we can eliminate.
402 BasicBlock *HeaderBB = nullptr;
403 SmallVector<PHINode *, 8> ArgumentPHIs;
404
405 // PHI node to store our return value.
406 PHINode *RetPN = nullptr;
407
408 // i1 PHI node to track if we have a valid return value stored in RetPN.
409 PHINode *RetKnownPN = nullptr;
410
411 // Vector of select instructions we insereted. These selects use RetKnownPN
412 // to either propagate RetPN or select a new return value.
413 SmallVector<SelectInst *, 8> RetSelects;
414
415 // The below are shared state needed when performing accumulator recursion.
416 // There values should be populated by insertAccumulator the first time we
417 // find an elimination that requires an accumulator.
418
419 // PHI node to store our current accumulated value.
420 PHINode *AccPN = nullptr;
421
422 // The instruction doing the accumulating.
423 Instruction *AccumulatorRecursionInstr = nullptr;
424
TailRecursionEliminator(Function & F,const TargetTransformInfo * TTI,AliasAnalysis * AA,OptimizationRemarkEmitter * ORE,DomTreeUpdater & DTU)425 TailRecursionEliminator(Function &F, const TargetTransformInfo *TTI,
426 AliasAnalysis *AA, OptimizationRemarkEmitter *ORE,
427 DomTreeUpdater &DTU)
428 : F(F), TTI(TTI), AA(AA), ORE(ORE), DTU(DTU) {}
429
430 CallInst *findTRECandidate(BasicBlock *BB);
431
432 void createTailRecurseLoopHeader(CallInst *CI);
433
434 void insertAccumulator(Instruction *AccRecInstr);
435
436 bool eliminateCall(CallInst *CI);
437
438 void cleanupAndFinalize();
439
440 bool processBlock(BasicBlock &BB);
441
442 void copyByValueOperandIntoLocalTemp(CallInst *CI, int OpndIdx);
443
444 void copyLocalTempOfByValueOperandIntoArguments(CallInst *CI, int OpndIdx);
445
446 public:
447 static bool eliminate(Function &F, const TargetTransformInfo *TTI,
448 AliasAnalysis *AA, OptimizationRemarkEmitter *ORE,
449 DomTreeUpdater &DTU);
450 };
451 } // namespace
452
findTRECandidate(BasicBlock * BB)453 CallInst *TailRecursionEliminator::findTRECandidate(BasicBlock *BB) {
454 Instruction *TI = BB->getTerminator();
455
456 if (&BB->front() == TI) // Make sure there is something before the terminator.
457 return nullptr;
458
459 // Scan backwards from the return, checking to see if there is a tail call in
460 // this block. If so, set CI to it.
461 CallInst *CI = nullptr;
462 BasicBlock::iterator BBI(TI);
463 while (true) {
464 CI = dyn_cast<CallInst>(BBI);
465 if (CI && CI->getCalledFunction() == &F)
466 break;
467
468 if (BBI == BB->begin())
469 return nullptr; // Didn't find a potential tail call.
470 --BBI;
471 }
472
473 assert((!CI->isTailCall() || !CI->isNoTailCall()) &&
474 "Incompatible call site attributes(Tail,NoTail)");
475 if (!CI->isTailCall())
476 return nullptr;
477
478 // As a special case, detect code like this:
479 // double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call
480 // and disable this xform in this case, because the code generator will
481 // lower the call to fabs into inline code.
482 if (BB == &F.getEntryBlock() &&
483 firstNonDbg(BB->front().getIterator()) == CI &&
484 firstNonDbg(std::next(BB->begin())) == TI && CI->getCalledFunction() &&
485 !TTI->isLoweredToCall(CI->getCalledFunction())) {
486 // A single-block function with just a call and a return. Check that
487 // the arguments match.
488 auto I = CI->arg_begin(), E = CI->arg_end();
489 Function::arg_iterator FI = F.arg_begin(), FE = F.arg_end();
490 for (; I != E && FI != FE; ++I, ++FI)
491 if (*I != &*FI) break;
492 if (I == E && FI == FE)
493 return nullptr;
494 }
495
496 return CI;
497 }
498
createTailRecurseLoopHeader(CallInst * CI)499 void TailRecursionEliminator::createTailRecurseLoopHeader(CallInst *CI) {
500 HeaderBB = &F.getEntryBlock();
501 BasicBlock *NewEntry = BasicBlock::Create(F.getContext(), "", &F, HeaderBB);
502 NewEntry->takeName(HeaderBB);
503 HeaderBB->setName("tailrecurse");
504 BranchInst *BI = BranchInst::Create(HeaderBB, NewEntry);
505 BI->setDebugLoc(CI->getDebugLoc());
506
507 // Move all fixed sized allocas from HeaderBB to NewEntry.
508 for (BasicBlock::iterator OEBI = HeaderBB->begin(), E = HeaderBB->end(),
509 NEBI = NewEntry->begin();
510 OEBI != E;)
511 if (AllocaInst *AI = dyn_cast<AllocaInst>(OEBI++))
512 if (isa<ConstantInt>(AI->getArraySize()))
513 AI->moveBefore(&*NEBI);
514
515 // Now that we have created a new block, which jumps to the entry
516 // block, insert a PHI node for each argument of the function.
517 // For now, we initialize each PHI to only have the real arguments
518 // which are passed in.
519 Instruction *InsertPos = &HeaderBB->front();
520 for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I) {
521 PHINode *PN =
522 PHINode::Create(I->getType(), 2, I->getName() + ".tr", InsertPos);
523 I->replaceAllUsesWith(PN); // Everyone use the PHI node now!
524 PN->addIncoming(&*I, NewEntry);
525 ArgumentPHIs.push_back(PN);
526 }
527
528 // If the function doen't return void, create the RetPN and RetKnownPN PHI
529 // nodes to track our return value. We initialize RetPN with poison and
530 // RetKnownPN with false since we can't know our return value at function
531 // entry.
532 Type *RetType = F.getReturnType();
533 if (!RetType->isVoidTy()) {
534 Type *BoolType = Type::getInt1Ty(F.getContext());
535 RetPN = PHINode::Create(RetType, 2, "ret.tr", InsertPos);
536 RetKnownPN = PHINode::Create(BoolType, 2, "ret.known.tr", InsertPos);
537
538 RetPN->addIncoming(PoisonValue::get(RetType), NewEntry);
539 RetKnownPN->addIncoming(ConstantInt::getFalse(BoolType), NewEntry);
540 }
541
542 // The entry block was changed from HeaderBB to NewEntry.
543 // The forward DominatorTree needs to be recalculated when the EntryBB is
544 // changed. In this corner-case we recalculate the entire tree.
545 DTU.recalculate(*NewEntry->getParent());
546 }
547
insertAccumulator(Instruction * AccRecInstr)548 void TailRecursionEliminator::insertAccumulator(Instruction *AccRecInstr) {
549 assert(!AccPN && "Trying to insert multiple accumulators");
550
551 AccumulatorRecursionInstr = AccRecInstr;
552
553 // Start by inserting a new PHI node for the accumulator.
554 pred_iterator PB = pred_begin(HeaderBB), PE = pred_end(HeaderBB);
555 AccPN = PHINode::Create(F.getReturnType(), std::distance(PB, PE) + 1,
556 "accumulator.tr", &HeaderBB->front());
557
558 // Loop over all of the predecessors of the tail recursion block. For the
559 // real entry into the function we seed the PHI with the identity constant for
560 // the accumulation operation. For any other existing branches to this block
561 // (due to other tail recursions eliminated) the accumulator is not modified.
562 // Because we haven't added the branch in the current block to HeaderBB yet,
563 // it will not show up as a predecessor.
564 for (pred_iterator PI = PB; PI != PE; ++PI) {
565 BasicBlock *P = *PI;
566 if (P == &F.getEntryBlock()) {
567 Constant *Identity = ConstantExpr::getBinOpIdentity(
568 AccRecInstr->getOpcode(), AccRecInstr->getType());
569 AccPN->addIncoming(Identity, P);
570 } else {
571 AccPN->addIncoming(AccPN, P);
572 }
573 }
574
575 ++NumAccumAdded;
576 }
577
578 // Creates a copy of contents of ByValue operand of the specified
579 // call instruction into the newly created temporarily variable.
copyByValueOperandIntoLocalTemp(CallInst * CI,int OpndIdx)580 void TailRecursionEliminator::copyByValueOperandIntoLocalTemp(CallInst *CI,
581 int OpndIdx) {
582 Type *AggTy = CI->getParamByValType(OpndIdx);
583 assert(AggTy);
584 const DataLayout &DL = F.getParent()->getDataLayout();
585
586 // Get alignment of byVal operand.
587 Align Alignment(CI->getParamAlign(OpndIdx).valueOrOne());
588
589 // Create alloca for temporarily byval operands.
590 // Put alloca into the entry block.
591 Value *NewAlloca = new AllocaInst(
592 AggTy, DL.getAllocaAddrSpace(), nullptr, Alignment,
593 CI->getArgOperand(OpndIdx)->getName(), &*F.getEntryBlock().begin());
594
595 IRBuilder<> Builder(CI);
596 Value *Size = Builder.getInt64(DL.getTypeAllocSize(AggTy));
597
598 // Copy data from byvalue operand into the temporarily variable.
599 Builder.CreateMemCpy(NewAlloca, /*DstAlign*/ Alignment,
600 CI->getArgOperand(OpndIdx),
601 /*SrcAlign*/ Alignment, Size);
602 CI->setArgOperand(OpndIdx, NewAlloca);
603 }
604
605 // Creates a copy from temporarily variable(keeping value of ByVal argument)
606 // into the corresponding function argument location.
copyLocalTempOfByValueOperandIntoArguments(CallInst * CI,int OpndIdx)607 void TailRecursionEliminator::copyLocalTempOfByValueOperandIntoArguments(
608 CallInst *CI, int OpndIdx) {
609 Type *AggTy = CI->getParamByValType(OpndIdx);
610 assert(AggTy);
611 const DataLayout &DL = F.getParent()->getDataLayout();
612
613 // Get alignment of byVal operand.
614 Align Alignment(CI->getParamAlign(OpndIdx).valueOrOne());
615
616 IRBuilder<> Builder(CI);
617 Value *Size = Builder.getInt64(DL.getTypeAllocSize(AggTy));
618
619 // Copy data from the temporarily variable into corresponding
620 // function argument location.
621 Builder.CreateMemCpy(F.getArg(OpndIdx), /*DstAlign*/ Alignment,
622 CI->getArgOperand(OpndIdx),
623 /*SrcAlign*/ Alignment, Size);
624 }
625
eliminateCall(CallInst * CI)626 bool TailRecursionEliminator::eliminateCall(CallInst *CI) {
627 ReturnInst *Ret = cast<ReturnInst>(CI->getParent()->getTerminator());
628
629 // Ok, we found a potential tail call. We can currently only transform the
630 // tail call if all of the instructions between the call and the return are
631 // movable to above the call itself, leaving the call next to the return.
632 // Check that this is the case now.
633 Instruction *AccRecInstr = nullptr;
634 BasicBlock::iterator BBI(CI);
635 for (++BBI; &*BBI != Ret; ++BBI) {
636 if (canMoveAboveCall(&*BBI, CI, AA))
637 continue;
638
639 // If we can't move the instruction above the call, it might be because it
640 // is an associative and commutative operation that could be transformed
641 // using accumulator recursion elimination. Check to see if this is the
642 // case, and if so, remember which instruction accumulates for later.
643 if (AccPN || !canTransformAccumulatorRecursion(&*BBI, CI))
644 return false; // We cannot eliminate the tail recursion!
645
646 // Yes, this is accumulator recursion. Remember which instruction
647 // accumulates.
648 AccRecInstr = &*BBI;
649 }
650
651 BasicBlock *BB = Ret->getParent();
652
653 using namespace ore;
654 ORE->emit([&]() {
655 return OptimizationRemark(DEBUG_TYPE, "tailcall-recursion", CI)
656 << "transforming tail recursion into loop";
657 });
658
659 // OK! We can transform this tail call. If this is the first one found,
660 // create the new entry block, allowing us to branch back to the old entry.
661 if (!HeaderBB)
662 createTailRecurseLoopHeader(CI);
663
664 // Copy values of ByVal operands into local temporarily variables.
665 for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
666 if (CI->isByValArgument(I))
667 copyByValueOperandIntoLocalTemp(CI, I);
668 }
669
670 // Ok, now that we know we have a pseudo-entry block WITH all of the
671 // required PHI nodes, add entries into the PHI node for the actual
672 // parameters passed into the tail-recursive call.
673 for (unsigned I = 0, E = CI->arg_size(); I != E; ++I) {
674 if (CI->isByValArgument(I)) {
675 copyLocalTempOfByValueOperandIntoArguments(CI, I);
676 ArgumentPHIs[I]->addIncoming(F.getArg(I), BB);
677 } else
678 ArgumentPHIs[I]->addIncoming(CI->getArgOperand(I), BB);
679 }
680
681 if (AccRecInstr) {
682 insertAccumulator(AccRecInstr);
683
684 // Rewrite the accumulator recursion instruction so that it does not use
685 // the result of the call anymore, instead, use the PHI node we just
686 // inserted.
687 AccRecInstr->setOperand(AccRecInstr->getOperand(0) != CI, AccPN);
688 }
689
690 // Update our return value tracking
691 if (RetPN) {
692 if (Ret->getReturnValue() == CI || AccRecInstr) {
693 // Defer selecting a return value
694 RetPN->addIncoming(RetPN, BB);
695 RetKnownPN->addIncoming(RetKnownPN, BB);
696 } else {
697 // We found a return value we want to use, insert a select instruction to
698 // select it if we don't already know what our return value will be and
699 // store the result in our return value PHI node.
700 SelectInst *SI = SelectInst::Create(
701 RetKnownPN, RetPN, Ret->getReturnValue(), "current.ret.tr", Ret);
702 RetSelects.push_back(SI);
703
704 RetPN->addIncoming(SI, BB);
705 RetKnownPN->addIncoming(ConstantInt::getTrue(RetKnownPN->getType()), BB);
706 }
707
708 if (AccPN)
709 AccPN->addIncoming(AccRecInstr ? AccRecInstr : AccPN, BB);
710 }
711
712 // Now that all of the PHI nodes are in place, remove the call and
713 // ret instructions, replacing them with an unconditional branch.
714 BranchInst *NewBI = BranchInst::Create(HeaderBB, Ret);
715 NewBI->setDebugLoc(CI->getDebugLoc());
716
717 BB->getInstList().erase(Ret); // Remove return.
718 BB->getInstList().erase(CI); // Remove call.
719 DTU.applyUpdates({{DominatorTree::Insert, BB, HeaderBB}});
720 ++NumEliminated;
721 return true;
722 }
723
cleanupAndFinalize()724 void TailRecursionEliminator::cleanupAndFinalize() {
725 // If we eliminated any tail recursions, it's possible that we inserted some
726 // silly PHI nodes which just merge an initial value (the incoming operand)
727 // with themselves. Check to see if we did and clean up our mess if so. This
728 // occurs when a function passes an argument straight through to its tail
729 // call.
730 for (PHINode *PN : ArgumentPHIs) {
731 // If the PHI Node is a dynamic constant, replace it with the value it is.
732 if (Value *PNV = simplifyInstruction(PN, F.getParent()->getDataLayout())) {
733 PN->replaceAllUsesWith(PNV);
734 PN->eraseFromParent();
735 }
736 }
737
738 if (RetPN) {
739 if (RetSelects.empty()) {
740 // If we didn't insert any select instructions, then we know we didn't
741 // store a return value and we can remove the PHI nodes we inserted.
742 RetPN->dropAllReferences();
743 RetPN->eraseFromParent();
744
745 RetKnownPN->dropAllReferences();
746 RetKnownPN->eraseFromParent();
747
748 if (AccPN) {
749 // We need to insert a copy of our accumulator instruction before any
750 // return in the function, and return its result instead.
751 Instruction *AccRecInstr = AccumulatorRecursionInstr;
752 for (BasicBlock &BB : F) {
753 ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator());
754 if (!RI)
755 continue;
756
757 Instruction *AccRecInstrNew = AccRecInstr->clone();
758 AccRecInstrNew->setName("accumulator.ret.tr");
759 AccRecInstrNew->setOperand(AccRecInstr->getOperand(0) == AccPN,
760 RI->getOperand(0));
761 AccRecInstrNew->insertBefore(RI);
762 RI->setOperand(0, AccRecInstrNew);
763 }
764 }
765 } else {
766 // We need to insert a select instruction before any return left in the
767 // function to select our stored return value if we have one.
768 for (BasicBlock &BB : F) {
769 ReturnInst *RI = dyn_cast<ReturnInst>(BB.getTerminator());
770 if (!RI)
771 continue;
772
773 SelectInst *SI = SelectInst::Create(
774 RetKnownPN, RetPN, RI->getOperand(0), "current.ret.tr", RI);
775 RetSelects.push_back(SI);
776 RI->setOperand(0, SI);
777 }
778
779 if (AccPN) {
780 // We need to insert a copy of our accumulator instruction before any
781 // of the selects we inserted, and select its result instead.
782 Instruction *AccRecInstr = AccumulatorRecursionInstr;
783 for (SelectInst *SI : RetSelects) {
784 Instruction *AccRecInstrNew = AccRecInstr->clone();
785 AccRecInstrNew->setName("accumulator.ret.tr");
786 AccRecInstrNew->setOperand(AccRecInstr->getOperand(0) == AccPN,
787 SI->getFalseValue());
788 AccRecInstrNew->insertBefore(SI);
789 SI->setFalseValue(AccRecInstrNew);
790 }
791 }
792 }
793 }
794 }
795
processBlock(BasicBlock & BB)796 bool TailRecursionEliminator::processBlock(BasicBlock &BB) {
797 Instruction *TI = BB.getTerminator();
798
799 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
800 if (BI->isConditional())
801 return false;
802
803 BasicBlock *Succ = BI->getSuccessor(0);
804 ReturnInst *Ret = dyn_cast<ReturnInst>(Succ->getFirstNonPHIOrDbg(true));
805
806 if (!Ret)
807 return false;
808
809 CallInst *CI = findTRECandidate(&BB);
810
811 if (!CI)
812 return false;
813
814 LLVM_DEBUG(dbgs() << "FOLDING: " << *Succ
815 << "INTO UNCOND BRANCH PRED: " << BB);
816 FoldReturnIntoUncondBranch(Ret, Succ, &BB, &DTU);
817 ++NumRetDuped;
818
819 // If all predecessors of Succ have been eliminated by
820 // FoldReturnIntoUncondBranch, delete it. It is important to empty it,
821 // because the ret instruction in there is still using a value which
822 // eliminateCall will attempt to remove. This block can only contain
823 // instructions that can't have uses, therefore it is safe to remove.
824 if (pred_empty(Succ))
825 DTU.deleteBB(Succ);
826
827 eliminateCall(CI);
828 return true;
829 } else if (isa<ReturnInst>(TI)) {
830 CallInst *CI = findTRECandidate(&BB);
831
832 if (CI)
833 return eliminateCall(CI);
834 }
835
836 return false;
837 }
838
eliminate(Function & F,const TargetTransformInfo * TTI,AliasAnalysis * AA,OptimizationRemarkEmitter * ORE,DomTreeUpdater & DTU)839 bool TailRecursionEliminator::eliminate(Function &F,
840 const TargetTransformInfo *TTI,
841 AliasAnalysis *AA,
842 OptimizationRemarkEmitter *ORE,
843 DomTreeUpdater &DTU) {
844 if (F.getFnAttribute("disable-tail-calls").getValueAsBool())
845 return false;
846
847 bool MadeChange = false;
848 MadeChange |= markTails(F, ORE);
849
850 // If this function is a varargs function, we won't be able to PHI the args
851 // right, so don't even try to convert it...
852 if (F.getFunctionType()->isVarArg())
853 return MadeChange;
854
855 if (!canTRE(F))
856 return MadeChange;
857
858 // Change any tail recursive calls to loops.
859 TailRecursionEliminator TRE(F, TTI, AA, ORE, DTU);
860
861 for (BasicBlock &BB : F)
862 MadeChange |= TRE.processBlock(BB);
863
864 TRE.cleanupAndFinalize();
865
866 return MadeChange;
867 }
868
869 namespace {
870 struct TailCallElim : public FunctionPass {
871 static char ID; // Pass identification, replacement for typeid
TailCallElim__anon856ff9200711::TailCallElim872 TailCallElim() : FunctionPass(ID) {
873 initializeTailCallElimPass(*PassRegistry::getPassRegistry());
874 }
875
getAnalysisUsage__anon856ff9200711::TailCallElim876 void getAnalysisUsage(AnalysisUsage &AU) const override {
877 AU.addRequired<TargetTransformInfoWrapperPass>();
878 AU.addRequired<AAResultsWrapperPass>();
879 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
880 AU.addPreserved<GlobalsAAWrapperPass>();
881 AU.addPreserved<DominatorTreeWrapperPass>();
882 AU.addPreserved<PostDominatorTreeWrapperPass>();
883 }
884
runOnFunction__anon856ff9200711::TailCallElim885 bool runOnFunction(Function &F) override {
886 if (skipFunction(F))
887 return false;
888
889 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
890 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
891 auto *PDTWP = getAnalysisIfAvailable<PostDominatorTreeWrapperPass>();
892 auto *PDT = PDTWP ? &PDTWP->getPostDomTree() : nullptr;
893 // There is no noticable performance difference here between Lazy and Eager
894 // UpdateStrategy based on some test results. It is feasible to switch the
895 // UpdateStrategy to Lazy if we find it profitable later.
896 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Eager);
897
898 return TailRecursionEliminator::eliminate(
899 F, &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F),
900 &getAnalysis<AAResultsWrapperPass>().getAAResults(),
901 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(), DTU);
902 }
903 };
904 }
905
906 char TailCallElim::ID = 0;
907 INITIALIZE_PASS_BEGIN(TailCallElim, "tailcallelim", "Tail Call Elimination",
908 false, false)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)909 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
910 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
911 INITIALIZE_PASS_END(TailCallElim, "tailcallelim", "Tail Call Elimination",
912 false, false)
913
914 // Public interface to the TailCallElimination pass
915 FunctionPass *llvm::createTailCallEliminationPass() {
916 return new TailCallElim();
917 }
918
run(Function & F,FunctionAnalysisManager & AM)919 PreservedAnalyses TailCallElimPass::run(Function &F,
920 FunctionAnalysisManager &AM) {
921
922 TargetTransformInfo &TTI = AM.getResult<TargetIRAnalysis>(F);
923 AliasAnalysis &AA = AM.getResult<AAManager>(F);
924 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
925 auto *DT = AM.getCachedResult<DominatorTreeAnalysis>(F);
926 auto *PDT = AM.getCachedResult<PostDominatorTreeAnalysis>(F);
927 // There is no noticable performance difference here between Lazy and Eager
928 // UpdateStrategy based on some test results. It is feasible to switch the
929 // UpdateStrategy to Lazy if we find it profitable later.
930 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Eager);
931 bool Changed = TailRecursionEliminator::eliminate(F, &TTI, &AA, &ORE, DTU);
932
933 if (!Changed)
934 return PreservedAnalyses::all();
935 PreservedAnalyses PA;
936 PA.preserve<DominatorTreeAnalysis>();
937 PA.preserve<PostDominatorTreeAnalysis>();
938 return PA;
939 }
940