1 //===- TailRecursionElimination.cpp - Eliminate Tail Calls ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file was developed by the LLVM research group and is distributed under 6 // the University of Illinois Open Source License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file transforms calls of the current function (self recursion) followed 11 // by a return instruction with a branch to the entry of the function, creating 12 // a loop. This pass also implements the following extensions to the basic 13 // algorithm: 14 // 15 // 1. Trivial instructions between the call and return do not prevent the 16 // transformation from taking place, though currently the analysis cannot 17 // support moving any really useful instructions (only dead ones). 18 // 2. This pass transforms functions that are prevented from being tail 19 // recursive by an associative expression to use an accumulator variable, 20 // thus compiling the typical naive factorial or 'fib' implementation into 21 // efficient code. 22 // 3. TRE is performed if the function returns void, if the return 23 // returns the result returned by the call, or if the function returns a 24 // run-time constant on all exits from the function. It is possible, though 25 // unlikely, that the return returns something else (like constant 0), and 26 // can still be TRE'd. It can be TRE'd if ALL OTHER return instructions in 27 // the function return the exact same value. 28 // 4. If it can prove that callees do not access theier caller stack frame, 29 // they are marked as eligible for tail call elimination (by the code 30 // generator). 31 // 32 // There are several improvements that could be made: 33 // 34 // 1. If the function has any alloca instructions, these instructions will be 35 // moved out of the entry block of the function, causing them to be 36 // evaluated each time through the tail recursion. Safely keeping allocas 37 // in the entry block requires analysis to proves that the tail-called 38 // function does not read or write the stack object. 39 // 2. Tail recursion is only performed if the call immediately preceeds the 40 // return instruction. It's possible that there could be a jump between 41 // the call and the return. 42 // 3. There can be intervening operations between the call and the return that 43 // prevent the TRE from occurring. For example, there could be GEP's and 44 // stores to memory that will not be read or written by the call. This 45 // requires some substantial analysis (such as with DSA) to prove safe to 46 // move ahead of the call, but doing so could allow many more TREs to be 47 // performed, for example in TreeAdd/TreeAlloc from the treeadd benchmark. 48 // 4. The algorithm we use to detect if callees access their caller stack 49 // frames is very primitive. 50 // 51 //===----------------------------------------------------------------------===// 52 53 #include "llvm/Transforms/Scalar.h" 54 #include "llvm/DerivedTypes.h" 55 #include "llvm/Function.h" 56 #include "llvm/Instructions.h" 57 #include "llvm/Pass.h" 58 #include "llvm/Support/CFG.h" 59 #include "llvm/ADT/Statistic.h" 60 using namespace llvm; 61 62 namespace { 63 Statistic<> NumEliminated("tailcallelim", "Number of tail calls removed"); 64 Statistic<> NumAccumAdded("tailcallelim","Number of accumulators introduced"); 65 66 struct TailCallElim : public FunctionPass { 67 virtual bool runOnFunction(Function &F); 68 69 private: 70 bool ProcessReturningBlock(ReturnInst *RI, BasicBlock *&OldEntry, 71 std::vector<PHINode*> &ArgumentPHIs); 72 bool CanMoveAboveCall(Instruction *I, CallInst *CI); 73 Value *CanTransformAccumulatorRecursion(Instruction *I, CallInst *CI); 74 }; 75 RegisterOpt<TailCallElim> X("tailcallelim", "Tail Call Elimination"); 76 } 77 78 // Public interface to the TailCallElimination pass 79 FunctionPass *llvm::createTailCallEliminationPass() { 80 return new TailCallElim(); 81 } 82 83 84 /// AllocaMightEscapeToCalls - Return true if this alloca may be accessed by 85 /// callees of this function. We only do very simple analysis right now, this 86 /// could be expanded in the future to use mod/ref information for particular 87 /// call sites if desired. 88 static bool AllocaMightEscapeToCalls(AllocaInst *AI) { 89 // FIXME: do simple 'address taken' analysis. 90 return true; 91 } 92 93 /// FunctionContainsAllocas - Scan the specified basic block for alloca 94 /// instructions. If it contains any that might be accessed by calls, return 95 /// true. 96 static bool CheckForEscapingAllocas(BasicBlock *BB) { 97 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 98 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) 99 if (AllocaMightEscapeToCalls(AI)) 100 return true; 101 return false; 102 } 103 104 bool TailCallElim::runOnFunction(Function &F) { 105 // If this function is a varargs function, we won't be able to PHI the args 106 // right, so don't even try to convert it... 107 if (F.getFunctionType()->isVarArg()) return false; 108 109 BasicBlock *OldEntry = 0; 110 std::vector<PHINode*> ArgumentPHIs; 111 bool MadeChange = false; 112 113 bool FunctionContainsEscapingAllocas = false; 114 115 // Loop over the function, looking for any returning blocks, and keeping track 116 // of whether this function has any non-trivially used allocas. 117 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { 118 if (!FunctionContainsEscapingAllocas) 119 FunctionContainsEscapingAllocas = CheckForEscapingAllocas(BB); 120 121 if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB->getTerminator())) 122 MadeChange |= ProcessReturningBlock(Ret, OldEntry, ArgumentPHIs); 123 } 124 125 // If we eliminated any tail recursions, it's possible that we inserted some 126 // silly PHI nodes which just merge an initial value (the incoming operand) 127 // with themselves. Check to see if we did and clean up our mess if so. This 128 // occurs when a function passes an argument straight through to its tail 129 // call. 130 if (!ArgumentPHIs.empty()) { 131 unsigned NumIncoming = ArgumentPHIs[0]->getNumIncomingValues(); 132 for (unsigned i = 0, e = ArgumentPHIs.size(); i != e; ++i) { 133 PHINode *PN = ArgumentPHIs[i]; 134 Value *V = 0; 135 for (unsigned op = 0, e = NumIncoming; op != e; ++op) { 136 Value *Op = PN->getIncomingValue(op); 137 if (Op != PN) { 138 if (V == 0) { 139 V = Op; // First value seen? 140 } else if (V != Op) { 141 V = 0; 142 break; 143 } 144 } 145 } 146 147 // If the PHI Node is a dynamic constant, replace it with the value it is. 148 if (V) { 149 PN->replaceAllUsesWith(V); 150 PN->getParent()->getInstList().erase(PN); 151 } 152 } 153 } 154 155 // Finally, if this function contains no non-escaping allocas, mark all calls 156 // in the function as eligible for tail calls (there is no stack memory for 157 // them to access). 158 if (!FunctionContainsEscapingAllocas) 159 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) 160 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) 161 if (CallInst *CI = dyn_cast<CallInst>(I)) 162 CI->setTailCall(); 163 164 return MadeChange; 165 } 166 167 168 /// CanMoveAboveCall - Return true if it is safe to move the specified 169 /// instruction from after the call to before the call, assuming that all 170 /// instructions between the call and this instruction are movable. 171 /// 172 bool TailCallElim::CanMoveAboveCall(Instruction *I, CallInst *CI) { 173 // FIXME: We can move load/store/call/free instructions above the call if the 174 // call does not mod/ref the memory location being processed. 175 if (I->mayWriteToMemory() || isa<LoadInst>(I)) 176 return false; 177 178 // Otherwise, if this is a side-effect free instruction, check to make sure 179 // that it does not use the return value of the call. If it doesn't use the 180 // return value of the call, it must only use things that are defined before 181 // the call, or movable instructions between the call and the instruction 182 // itself. 183 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) 184 if (I->getOperand(i) == CI) 185 return false; 186 return true; 187 } 188 189 // isDynamicConstant - Return true if the specified value is the same when the 190 // return would exit as it was when the initial iteration of the recursive 191 // function was executed. 192 // 193 // We currently handle static constants and arguments that are not modified as 194 // part of the recursion. 195 // 196 static bool isDynamicConstant(Value *V, CallInst *CI) { 197 if (isa<Constant>(V)) return true; // Static constants are always dyn consts 198 199 // Check to see if this is an immutable argument, if so, the value 200 // will be available to initialize the accumulator. 201 if (Argument *Arg = dyn_cast<Argument>(V)) { 202 // Figure out which argument number this is... 203 unsigned ArgNo = 0; 204 Function *F = CI->getParent()->getParent(); 205 for (Function::arg_iterator AI = F->arg_begin(); &*AI != Arg; ++AI) 206 ++ArgNo; 207 208 // If we are passing this argument into call as the corresponding 209 // argument operand, then the argument is dynamically constant. 210 // Otherwise, we cannot transform this function safely. 211 if (CI->getOperand(ArgNo+1) == Arg) 212 return true; 213 } 214 // Not a constant or immutable argument, we can't safely transform. 215 return false; 216 } 217 218 // getCommonReturnValue - Check to see if the function containing the specified 219 // return instruction and tail call consistently returns the same 220 // runtime-constant value at all exit points. If so, return the returned value. 221 // 222 static Value *getCommonReturnValue(ReturnInst *TheRI, CallInst *CI) { 223 Function *F = TheRI->getParent()->getParent(); 224 Value *ReturnedValue = 0; 225 226 for (Function::iterator BBI = F->begin(), E = F->end(); BBI != E; ++BBI) 227 if (ReturnInst *RI = dyn_cast<ReturnInst>(BBI->getTerminator())) 228 if (RI != TheRI) { 229 Value *RetOp = RI->getOperand(0); 230 231 // We can only perform this transformation if the value returned is 232 // evaluatable at the start of the initial invocation of the function, 233 // instead of at the end of the evaluation. 234 // 235 if (!isDynamicConstant(RetOp, CI)) 236 return 0; 237 238 if (ReturnedValue && RetOp != ReturnedValue) 239 return 0; // Cannot transform if differing values are returned. 240 ReturnedValue = RetOp; 241 } 242 return ReturnedValue; 243 } 244 245 /// CanTransformAccumulatorRecursion - If the specified instruction can be 246 /// transformed using accumulator recursion elimination, return the constant 247 /// which is the start of the accumulator value. Otherwise return null. 248 /// 249 Value *TailCallElim::CanTransformAccumulatorRecursion(Instruction *I, 250 CallInst *CI) { 251 if (!I->isAssociative()) return 0; 252 assert(I->getNumOperands() == 2 && 253 "Associative operations should have 2 args!"); 254 255 // Exactly one operand should be the result of the call instruction... 256 if (I->getOperand(0) == CI && I->getOperand(1) == CI || 257 I->getOperand(0) != CI && I->getOperand(1) != CI) 258 return 0; 259 260 // The only user of this instruction we allow is a single return instruction. 261 if (!I->hasOneUse() || !isa<ReturnInst>(I->use_back())) 262 return 0; 263 264 // Ok, now we have to check all of the other return instructions in this 265 // function. If they return non-constants or differing values, then we cannot 266 // transform the function safely. 267 return getCommonReturnValue(cast<ReturnInst>(I->use_back()), CI); 268 } 269 270 bool TailCallElim::ProcessReturningBlock(ReturnInst *Ret, BasicBlock *&OldEntry, 271 std::vector<PHINode*> &ArgumentPHIs) { 272 BasicBlock *BB = Ret->getParent(); 273 Function *F = BB->getParent(); 274 275 if (&BB->front() == Ret) // Make sure there is something before the ret... 276 return false; 277 278 // Scan backwards from the return, checking to see if there is a tail call in 279 // this block. If so, set CI to it. 280 CallInst *CI; 281 BasicBlock::iterator BBI = Ret; 282 while (1) { 283 CI = dyn_cast<CallInst>(BBI); 284 if (CI && CI->getCalledFunction() == F) 285 break; 286 287 if (BBI == BB->begin()) 288 return false; // Didn't find a potential tail call. 289 --BBI; 290 } 291 292 // If we are introducing accumulator recursion to eliminate associative 293 // operations after the call instruction, this variable contains the initial 294 // value for the accumulator. If this value is set, we actually perform 295 // accumulator recursion elimination instead of simple tail recursion 296 // elimination. 297 Value *AccumulatorRecursionEliminationInitVal = 0; 298 Instruction *AccumulatorRecursionInstr = 0; 299 300 // Ok, we found a potential tail call. We can currently only transform the 301 // tail call if all of the instructions between the call and the return are 302 // movable to above the call itself, leaving the call next to the return. 303 // Check that this is the case now. 304 for (BBI = CI, ++BBI; &*BBI != Ret; ++BBI) 305 if (!CanMoveAboveCall(BBI, CI)) { 306 // If we can't move the instruction above the call, it might be because it 307 // is an associative operation that could be tranformed using accumulator 308 // recursion elimination. Check to see if this is the case, and if so, 309 // remember the initial accumulator value for later. 310 if ((AccumulatorRecursionEliminationInitVal = 311 CanTransformAccumulatorRecursion(BBI, CI))) { 312 // Yes, this is accumulator recursion. Remember which instruction 313 // accumulates. 314 AccumulatorRecursionInstr = BBI; 315 } else { 316 return false; // Otherwise, we cannot eliminate the tail recursion! 317 } 318 } 319 320 // We can only transform call/return pairs that either ignore the return value 321 // of the call and return void, ignore the value of the call and return a 322 // constant, return the value returned by the tail call, or that are being 323 // accumulator recursion variable eliminated. 324 if (Ret->getNumOperands() != 0 && Ret->getReturnValue() != CI && 325 AccumulatorRecursionEliminationInitVal == 0 && 326 !getCommonReturnValue(Ret, CI)) 327 return false; 328 329 // OK! We can transform this tail call. If this is the first one found, 330 // create the new entry block, allowing us to branch back to the old entry. 331 if (OldEntry == 0) { 332 OldEntry = &F->getEntryBlock(); 333 std::string OldName = OldEntry->getName(); OldEntry->setName("tailrecurse"); 334 BasicBlock *NewEntry = new BasicBlock(OldName, F, OldEntry); 335 new BranchInst(OldEntry, NewEntry); 336 337 // Now that we have created a new block, which jumps to the entry 338 // block, insert a PHI node for each argument of the function. 339 // For now, we initialize each PHI to only have the real arguments 340 // which are passed in. 341 Instruction *InsertPos = OldEntry->begin(); 342 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); 343 I != E; ++I) { 344 PHINode *PN = new PHINode(I->getType(), I->getName()+".tr", InsertPos); 345 I->replaceAllUsesWith(PN); // Everyone use the PHI node now! 346 PN->addIncoming(I, NewEntry); 347 ArgumentPHIs.push_back(PN); 348 } 349 } 350 351 // Ok, now that we know we have a pseudo-entry block WITH all of the 352 // required PHI nodes, add entries into the PHI node for the actual 353 // parameters passed into the tail-recursive call. 354 for (unsigned i = 0, e = CI->getNumOperands()-1; i != e; ++i) 355 ArgumentPHIs[i]->addIncoming(CI->getOperand(i+1), BB); 356 357 // If we are introducing an accumulator variable to eliminate the recursion, 358 // do so now. Note that we _know_ that no subsequent tail recursion 359 // eliminations will happen on this function because of the way the 360 // accumulator recursion predicate is set up. 361 // 362 if (AccumulatorRecursionEliminationInitVal) { 363 Instruction *AccRecInstr = AccumulatorRecursionInstr; 364 // Start by inserting a new PHI node for the accumulator. 365 PHINode *AccPN = new PHINode(AccRecInstr->getType(), "accumulator.tr", 366 OldEntry->begin()); 367 368 // Loop over all of the predecessors of the tail recursion block. For the 369 // real entry into the function we seed the PHI with the initial value, 370 // computed earlier. For any other existing branches to this block (due to 371 // other tail recursions eliminated) the accumulator is not modified. 372 // Because we haven't added the branch in the current block to OldEntry yet, 373 // it will not show up as a predecessor. 374 for (pred_iterator PI = pred_begin(OldEntry), PE = pred_end(OldEntry); 375 PI != PE; ++PI) { 376 if (*PI == &F->getEntryBlock()) 377 AccPN->addIncoming(AccumulatorRecursionEliminationInitVal, *PI); 378 else 379 AccPN->addIncoming(AccPN, *PI); 380 } 381 382 // Add an incoming argument for the current block, which is computed by our 383 // associative accumulator instruction. 384 AccPN->addIncoming(AccRecInstr, BB); 385 386 // Next, rewrite the accumulator recursion instruction so that it does not 387 // use the result of the call anymore, instead, use the PHI node we just 388 // inserted. 389 AccRecInstr->setOperand(AccRecInstr->getOperand(0) != CI, AccPN); 390 391 // Finally, rewrite any return instructions in the program to return the PHI 392 // node instead of the "initval" that they do currently. This loop will 393 // actually rewrite the return value we are destroying, but that's ok. 394 for (Function::iterator BBI = F->begin(), E = F->end(); BBI != E; ++BBI) 395 if (ReturnInst *RI = dyn_cast<ReturnInst>(BBI->getTerminator())) 396 RI->setOperand(0, AccPN); 397 ++NumAccumAdded; 398 } 399 400 // Now that all of the PHI nodes are in place, remove the call and 401 // ret instructions, replacing them with an unconditional branch. 402 new BranchInst(OldEntry, Ret); 403 BB->getInstList().erase(Ret); // Remove return. 404 BB->getInstList().erase(CI); // Remove call. 405 ++NumEliminated; 406 return true; 407 } 408