1 //===- TailRecursionElimination.cpp - Eliminate Tail Calls ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file transforms calls of the current function (self recursion) followed 11 // by a return instruction with a branch to the entry of the function, creating 12 // a loop. This pass also implements the following extensions to the basic 13 // algorithm: 14 // 15 // 1. Trivial instructions between the call and return do not prevent the 16 // transformation from taking place, though currently the analysis cannot 17 // support moving any really useful instructions (only dead ones). 18 // 2. This pass transforms functions that are prevented from being tail 19 // recursive by an associative and commutative expression to use an 20 // accumulator variable, thus compiling the typical naive factorial or 21 // 'fib' implementation into efficient code. 22 // 3. TRE is performed if the function returns void, if the return 23 // returns the result returned by the call, or if the function returns a 24 // run-time constant on all exits from the function. It is possible, though 25 // unlikely, that the return returns something else (like constant 0), and 26 // can still be TRE'd. It can be TRE'd if ALL OTHER return instructions in 27 // the function return the exact same value. 28 // 4. If it can prove that callees do not access their caller stack frame, 29 // they are marked as eligible for tail call elimination (by the code 30 // generator). 31 // 32 // There are several improvements that could be made: 33 // 34 // 1. If the function has any alloca instructions, these instructions will be 35 // moved out of the entry block of the function, causing them to be 36 // evaluated each time through the tail recursion. Safely keeping allocas 37 // in the entry block requires analysis to proves that the tail-called 38 // function does not read or write the stack object. 39 // 2. Tail recursion is only performed if the call immediately precedes the 40 // return instruction. It's possible that there could be a jump between 41 // the call and the return. 42 // 3. There can be intervening operations between the call and the return that 43 // prevent the TRE from occurring. For example, there could be GEP's and 44 // stores to memory that will not be read or written by the call. This 45 // requires some substantial analysis (such as with DSA) to prove safe to 46 // move ahead of the call, but doing so could allow many more TREs to be 47 // performed, for example in TreeAdd/TreeAlloc from the treeadd benchmark. 48 // 4. The algorithm we use to detect if callees access their caller stack 49 // frames is very primitive. 50 // 51 //===----------------------------------------------------------------------===// 52 53 #include "llvm/Transforms/Scalar/TailRecursionElimination.h" 54 #include "llvm/ADT/STLExtras.h" 55 #include "llvm/ADT/SmallPtrSet.h" 56 #include "llvm/ADT/Statistic.h" 57 #include "llvm/Analysis/CFG.h" 58 #include "llvm/Analysis/CaptureTracking.h" 59 #include "llvm/Analysis/GlobalsModRef.h" 60 #include "llvm/Analysis/InlineCost.h" 61 #include "llvm/Analysis/InstructionSimplify.h" 62 #include "llvm/Analysis/Loads.h" 63 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 64 #include "llvm/Analysis/PostDominators.h" 65 #include "llvm/Analysis/TargetTransformInfo.h" 66 #include "llvm/IR/CFG.h" 67 #include "llvm/IR/CallSite.h" 68 #include "llvm/IR/Constants.h" 69 #include "llvm/IR/DataLayout.h" 70 #include "llvm/IR/DerivedTypes.h" 71 #include "llvm/IR/DiagnosticInfo.h" 72 #include "llvm/IR/DomTreeUpdater.h" 73 #include "llvm/IR/Dominators.h" 74 #include "llvm/IR/Function.h" 75 #include "llvm/IR/InstIterator.h" 76 #include "llvm/IR/Instructions.h" 77 #include "llvm/IR/IntrinsicInst.h" 78 #include "llvm/IR/Module.h" 79 #include "llvm/IR/ValueHandle.h" 80 #include "llvm/Pass.h" 81 #include "llvm/Support/Debug.h" 82 #include "llvm/Support/raw_ostream.h" 83 #include "llvm/Transforms/Scalar.h" 84 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 85 using namespace llvm; 86 87 #define DEBUG_TYPE "tailcallelim" 88 89 STATISTIC(NumEliminated, "Number of tail calls removed"); 90 STATISTIC(NumRetDuped, "Number of return duplicated"); 91 STATISTIC(NumAccumAdded, "Number of accumulators introduced"); 92 93 /// Scan the specified function for alloca instructions. 94 /// If it contains any dynamic allocas, returns false. 95 static bool canTRE(Function &F) { 96 // Because of PR962, we don't TRE dynamic allocas. 97 return llvm::all_of(instructions(F), [](Instruction &I) { 98 auto *AI = dyn_cast<AllocaInst>(&I); 99 return !AI || AI->isStaticAlloca(); 100 }); 101 } 102 103 namespace { 104 struct AllocaDerivedValueTracker { 105 // Start at a root value and walk its use-def chain to mark calls that use the 106 // value or a derived value in AllocaUsers, and places where it may escape in 107 // EscapePoints. 108 void walk(Value *Root) { 109 SmallVector<Use *, 32> Worklist; 110 SmallPtrSet<Use *, 32> Visited; 111 112 auto AddUsesToWorklist = [&](Value *V) { 113 for (auto &U : V->uses()) { 114 if (!Visited.insert(&U).second) 115 continue; 116 Worklist.push_back(&U); 117 } 118 }; 119 120 AddUsesToWorklist(Root); 121 122 while (!Worklist.empty()) { 123 Use *U = Worklist.pop_back_val(); 124 Instruction *I = cast<Instruction>(U->getUser()); 125 126 switch (I->getOpcode()) { 127 case Instruction::Call: 128 case Instruction::Invoke: { 129 CallSite CS(I); 130 bool IsNocapture = 131 CS.isDataOperand(U) && CS.doesNotCapture(CS.getDataOperandNo(U)); 132 callUsesLocalStack(CS, IsNocapture); 133 if (IsNocapture) { 134 // If the alloca-derived argument is passed in as nocapture, then it 135 // can't propagate to the call's return. That would be capturing. 136 continue; 137 } 138 break; 139 } 140 case Instruction::Load: { 141 // The result of a load is not alloca-derived (unless an alloca has 142 // otherwise escaped, but this is a local analysis). 143 continue; 144 } 145 case Instruction::Store: { 146 if (U->getOperandNo() == 0) 147 EscapePoints.insert(I); 148 continue; // Stores have no users to analyze. 149 } 150 case Instruction::BitCast: 151 case Instruction::GetElementPtr: 152 case Instruction::PHI: 153 case Instruction::Select: 154 case Instruction::AddrSpaceCast: 155 break; 156 default: 157 EscapePoints.insert(I); 158 break; 159 } 160 161 AddUsesToWorklist(I); 162 } 163 } 164 165 void callUsesLocalStack(CallSite CS, bool IsNocapture) { 166 // Add it to the list of alloca users. 167 AllocaUsers.insert(CS.getInstruction()); 168 169 // If it's nocapture then it can't capture this alloca. 170 if (IsNocapture) 171 return; 172 173 // If it can write to memory, it can leak the alloca value. 174 if (!CS.onlyReadsMemory()) 175 EscapePoints.insert(CS.getInstruction()); 176 } 177 178 SmallPtrSet<Instruction *, 32> AllocaUsers; 179 SmallPtrSet<Instruction *, 32> EscapePoints; 180 }; 181 } 182 183 static bool markTails(Function &F, bool &AllCallsAreTailCalls, 184 OptimizationRemarkEmitter *ORE) { 185 if (F.callsFunctionThatReturnsTwice()) 186 return false; 187 AllCallsAreTailCalls = true; 188 189 // The local stack holds all alloca instructions and all byval arguments. 190 AllocaDerivedValueTracker Tracker; 191 for (Argument &Arg : F.args()) { 192 if (Arg.hasByValAttr()) 193 Tracker.walk(&Arg); 194 } 195 for (auto &BB : F) { 196 for (auto &I : BB) 197 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) 198 Tracker.walk(AI); 199 } 200 201 bool Modified = false; 202 203 // Track whether a block is reachable after an alloca has escaped. Blocks that 204 // contain the escaping instruction will be marked as being visited without an 205 // escaped alloca, since that is how the block began. 206 enum VisitType { 207 UNVISITED, 208 UNESCAPED, 209 ESCAPED 210 }; 211 DenseMap<BasicBlock *, VisitType> Visited; 212 213 // We propagate the fact that an alloca has escaped from block to successor. 214 // Visit the blocks that are propagating the escapedness first. To do this, we 215 // maintain two worklists. 216 SmallVector<BasicBlock *, 32> WorklistUnescaped, WorklistEscaped; 217 218 // We may enter a block and visit it thinking that no alloca has escaped yet, 219 // then see an escape point and go back around a loop edge and come back to 220 // the same block twice. Because of this, we defer setting tail on calls when 221 // we first encounter them in a block. Every entry in this list does not 222 // statically use an alloca via use-def chain analysis, but may find an alloca 223 // through other means if the block turns out to be reachable after an escape 224 // point. 225 SmallVector<CallInst *, 32> DeferredTails; 226 227 BasicBlock *BB = &F.getEntryBlock(); 228 VisitType Escaped = UNESCAPED; 229 do { 230 for (auto &I : *BB) { 231 if (Tracker.EscapePoints.count(&I)) 232 Escaped = ESCAPED; 233 234 CallInst *CI = dyn_cast<CallInst>(&I); 235 if (!CI || CI->isTailCall() || isa<DbgInfoIntrinsic>(&I)) 236 continue; 237 238 bool IsNoTail = CI->isNoTailCall() || CI->hasOperandBundles(); 239 240 if (!IsNoTail && CI->doesNotAccessMemory()) { 241 // A call to a readnone function whose arguments are all things computed 242 // outside this function can be marked tail. Even if you stored the 243 // alloca address into a global, a readnone function can't load the 244 // global anyhow. 245 // 246 // Note that this runs whether we know an alloca has escaped or not. If 247 // it has, then we can't trust Tracker.AllocaUsers to be accurate. 248 bool SafeToTail = true; 249 for (auto &Arg : CI->arg_operands()) { 250 if (isa<Constant>(Arg.getUser())) 251 continue; 252 if (Argument *A = dyn_cast<Argument>(Arg.getUser())) 253 if (!A->hasByValAttr()) 254 continue; 255 SafeToTail = false; 256 break; 257 } 258 if (SafeToTail) { 259 using namespace ore; 260 ORE->emit([&]() { 261 return OptimizationRemark(DEBUG_TYPE, "tailcall-readnone", CI) 262 << "marked as tail call candidate (readnone)"; 263 }); 264 CI->setTailCall(); 265 Modified = true; 266 continue; 267 } 268 } 269 270 if (!IsNoTail && Escaped == UNESCAPED && !Tracker.AllocaUsers.count(CI)) { 271 DeferredTails.push_back(CI); 272 } else { 273 AllCallsAreTailCalls = false; 274 } 275 } 276 277 for (auto *SuccBB : make_range(succ_begin(BB), succ_end(BB))) { 278 auto &State = Visited[SuccBB]; 279 if (State < Escaped) { 280 State = Escaped; 281 if (State == ESCAPED) 282 WorklistEscaped.push_back(SuccBB); 283 else 284 WorklistUnescaped.push_back(SuccBB); 285 } 286 } 287 288 if (!WorklistEscaped.empty()) { 289 BB = WorklistEscaped.pop_back_val(); 290 Escaped = ESCAPED; 291 } else { 292 BB = nullptr; 293 while (!WorklistUnescaped.empty()) { 294 auto *NextBB = WorklistUnescaped.pop_back_val(); 295 if (Visited[NextBB] == UNESCAPED) { 296 BB = NextBB; 297 Escaped = UNESCAPED; 298 break; 299 } 300 } 301 } 302 } while (BB); 303 304 for (CallInst *CI : DeferredTails) { 305 if (Visited[CI->getParent()] != ESCAPED) { 306 // If the escape point was part way through the block, calls after the 307 // escape point wouldn't have been put into DeferredTails. 308 LLVM_DEBUG(dbgs() << "Marked as tail call candidate: " << *CI << "\n"); 309 CI->setTailCall(); 310 Modified = true; 311 } else { 312 AllCallsAreTailCalls = false; 313 } 314 } 315 316 return Modified; 317 } 318 319 /// Return true if it is safe to move the specified 320 /// instruction from after the call to before the call, assuming that all 321 /// instructions between the call and this instruction are movable. 322 /// 323 static bool canMoveAboveCall(Instruction *I, CallInst *CI, AliasAnalysis *AA) { 324 // FIXME: We can move load/store/call/free instructions above the call if the 325 // call does not mod/ref the memory location being processed. 326 if (I->mayHaveSideEffects()) // This also handles volatile loads. 327 return false; 328 329 if (LoadInst *L = dyn_cast<LoadInst>(I)) { 330 // Loads may always be moved above calls without side effects. 331 if (CI->mayHaveSideEffects()) { 332 // Non-volatile loads may be moved above a call with side effects if it 333 // does not write to memory and the load provably won't trap. 334 // Writes to memory only matter if they may alias the pointer 335 // being loaded from. 336 const DataLayout &DL = L->getModule()->getDataLayout(); 337 if (isModSet(AA->getModRefInfo(CI, MemoryLocation::get(L))) || 338 !isSafeToLoadUnconditionally(L->getPointerOperand(), 339 L->getAlignment(), DL, L)) 340 return false; 341 } 342 } 343 344 // Otherwise, if this is a side-effect free instruction, check to make sure 345 // that it does not use the return value of the call. If it doesn't use the 346 // return value of the call, it must only use things that are defined before 347 // the call, or movable instructions between the call and the instruction 348 // itself. 349 return !is_contained(I->operands(), CI); 350 } 351 352 /// Return true if the specified value is the same when the return would exit 353 /// as it was when the initial iteration of the recursive function was executed. 354 /// 355 /// We currently handle static constants and arguments that are not modified as 356 /// part of the recursion. 357 static bool isDynamicConstant(Value *V, CallInst *CI, ReturnInst *RI) { 358 if (isa<Constant>(V)) return true; // Static constants are always dyn consts 359 360 // Check to see if this is an immutable argument, if so, the value 361 // will be available to initialize the accumulator. 362 if (Argument *Arg = dyn_cast<Argument>(V)) { 363 // Figure out which argument number this is... 364 unsigned ArgNo = 0; 365 Function *F = CI->getParent()->getParent(); 366 for (Function::arg_iterator AI = F->arg_begin(); &*AI != Arg; ++AI) 367 ++ArgNo; 368 369 // If we are passing this argument into call as the corresponding 370 // argument operand, then the argument is dynamically constant. 371 // Otherwise, we cannot transform this function safely. 372 if (CI->getArgOperand(ArgNo) == Arg) 373 return true; 374 } 375 376 // Switch cases are always constant integers. If the value is being switched 377 // on and the return is only reachable from one of its cases, it's 378 // effectively constant. 379 if (BasicBlock *UniquePred = RI->getParent()->getUniquePredecessor()) 380 if (SwitchInst *SI = dyn_cast<SwitchInst>(UniquePred->getTerminator())) 381 if (SI->getCondition() == V) 382 return SI->getDefaultDest() != RI->getParent(); 383 384 // Not a constant or immutable argument, we can't safely transform. 385 return false; 386 } 387 388 /// Check to see if the function containing the specified tail call consistently 389 /// returns the same runtime-constant value at all exit points except for 390 /// IgnoreRI. If so, return the returned value. 391 static Value *getCommonReturnValue(ReturnInst *IgnoreRI, CallInst *CI) { 392 Function *F = CI->getParent()->getParent(); 393 Value *ReturnedValue = nullptr; 394 395 for (BasicBlock &BBI : *F) { 396 ReturnInst *RI = dyn_cast<ReturnInst>(BBI.getTerminator()); 397 if (RI == nullptr || RI == IgnoreRI) continue; 398 399 // We can only perform this transformation if the value returned is 400 // evaluatable at the start of the initial invocation of the function, 401 // instead of at the end of the evaluation. 402 // 403 Value *RetOp = RI->getOperand(0); 404 if (!isDynamicConstant(RetOp, CI, RI)) 405 return nullptr; 406 407 if (ReturnedValue && RetOp != ReturnedValue) 408 return nullptr; // Cannot transform if differing values are returned. 409 ReturnedValue = RetOp; 410 } 411 return ReturnedValue; 412 } 413 414 /// If the specified instruction can be transformed using accumulator recursion 415 /// elimination, return the constant which is the start of the accumulator 416 /// value. Otherwise return null. 417 static Value *canTransformAccumulatorRecursion(Instruction *I, CallInst *CI) { 418 if (!I->isAssociative() || !I->isCommutative()) return nullptr; 419 assert(I->getNumOperands() == 2 && 420 "Associative/commutative operations should have 2 args!"); 421 422 // Exactly one operand should be the result of the call instruction. 423 if ((I->getOperand(0) == CI && I->getOperand(1) == CI) || 424 (I->getOperand(0) != CI && I->getOperand(1) != CI)) 425 return nullptr; 426 427 // The only user of this instruction we allow is a single return instruction. 428 if (!I->hasOneUse() || !isa<ReturnInst>(I->user_back())) 429 return nullptr; 430 431 // Ok, now we have to check all of the other return instructions in this 432 // function. If they return non-constants or differing values, then we cannot 433 // transform the function safely. 434 return getCommonReturnValue(cast<ReturnInst>(I->user_back()), CI); 435 } 436 437 static Instruction *firstNonDbg(BasicBlock::iterator I) { 438 while (isa<DbgInfoIntrinsic>(I)) 439 ++I; 440 return &*I; 441 } 442 443 static CallInst *findTRECandidate(Instruction *TI, 444 bool CannotTailCallElimCallsMarkedTail, 445 const TargetTransformInfo *TTI) { 446 BasicBlock *BB = TI->getParent(); 447 Function *F = BB->getParent(); 448 449 if (&BB->front() == TI) // Make sure there is something before the terminator. 450 return nullptr; 451 452 // Scan backwards from the return, checking to see if there is a tail call in 453 // this block. If so, set CI to it. 454 CallInst *CI = nullptr; 455 BasicBlock::iterator BBI(TI); 456 while (true) { 457 CI = dyn_cast<CallInst>(BBI); 458 if (CI && CI->getCalledFunction() == F) 459 break; 460 461 if (BBI == BB->begin()) 462 return nullptr; // Didn't find a potential tail call. 463 --BBI; 464 } 465 466 // If this call is marked as a tail call, and if there are dynamic allocas in 467 // the function, we cannot perform this optimization. 468 if (CI->isTailCall() && CannotTailCallElimCallsMarkedTail) 469 return nullptr; 470 471 // As a special case, detect code like this: 472 // double fabs(double f) { return __builtin_fabs(f); } // a 'fabs' call 473 // and disable this xform in this case, because the code generator will 474 // lower the call to fabs into inline code. 475 if (BB == &F->getEntryBlock() && 476 firstNonDbg(BB->front().getIterator()) == CI && 477 firstNonDbg(std::next(BB->begin())) == TI && CI->getCalledFunction() && 478 !TTI->isLoweredToCall(CI->getCalledFunction())) { 479 // A single-block function with just a call and a return. Check that 480 // the arguments match. 481 CallSite::arg_iterator I = CallSite(CI).arg_begin(), 482 E = CallSite(CI).arg_end(); 483 Function::arg_iterator FI = F->arg_begin(), 484 FE = F->arg_end(); 485 for (; I != E && FI != FE; ++I, ++FI) 486 if (*I != &*FI) break; 487 if (I == E && FI == FE) 488 return nullptr; 489 } 490 491 return CI; 492 } 493 494 static bool eliminateRecursiveTailCall( 495 CallInst *CI, ReturnInst *Ret, BasicBlock *&OldEntry, 496 bool &TailCallsAreMarkedTail, SmallVectorImpl<PHINode *> &ArgumentPHIs, 497 AliasAnalysis *AA, OptimizationRemarkEmitter *ORE, DomTreeUpdater &DTU) { 498 // If we are introducing accumulator recursion to eliminate operations after 499 // the call instruction that are both associative and commutative, the initial 500 // value for the accumulator is placed in this variable. If this value is set 501 // then we actually perform accumulator recursion elimination instead of 502 // simple tail recursion elimination. If the operation is an LLVM instruction 503 // (eg: "add") then it is recorded in AccumulatorRecursionInstr. If not, then 504 // we are handling the case when the return instruction returns a constant C 505 // which is different to the constant returned by other return instructions 506 // (which is recorded in AccumulatorRecursionEliminationInitVal). This is a 507 // special case of accumulator recursion, the operation being "return C". 508 Value *AccumulatorRecursionEliminationInitVal = nullptr; 509 Instruction *AccumulatorRecursionInstr = nullptr; 510 511 // Ok, we found a potential tail call. We can currently only transform the 512 // tail call if all of the instructions between the call and the return are 513 // movable to above the call itself, leaving the call next to the return. 514 // Check that this is the case now. 515 BasicBlock::iterator BBI(CI); 516 for (++BBI; &*BBI != Ret; ++BBI) { 517 if (canMoveAboveCall(&*BBI, CI, AA)) 518 continue; 519 520 // If we can't move the instruction above the call, it might be because it 521 // is an associative and commutative operation that could be transformed 522 // using accumulator recursion elimination. Check to see if this is the 523 // case, and if so, remember the initial accumulator value for later. 524 if ((AccumulatorRecursionEliminationInitVal = 525 canTransformAccumulatorRecursion(&*BBI, CI))) { 526 // Yes, this is accumulator recursion. Remember which instruction 527 // accumulates. 528 AccumulatorRecursionInstr = &*BBI; 529 } else { 530 return false; // Otherwise, we cannot eliminate the tail recursion! 531 } 532 } 533 534 // We can only transform call/return pairs that either ignore the return value 535 // of the call and return void, ignore the value of the call and return a 536 // constant, return the value returned by the tail call, or that are being 537 // accumulator recursion variable eliminated. 538 if (Ret->getNumOperands() == 1 && Ret->getReturnValue() != CI && 539 !isa<UndefValue>(Ret->getReturnValue()) && 540 AccumulatorRecursionEliminationInitVal == nullptr && 541 !getCommonReturnValue(nullptr, CI)) { 542 // One case remains that we are able to handle: the current return 543 // instruction returns a constant, and all other return instructions 544 // return a different constant. 545 if (!isDynamicConstant(Ret->getReturnValue(), CI, Ret)) 546 return false; // Current return instruction does not return a constant. 547 // Check that all other return instructions return a common constant. If 548 // so, record it in AccumulatorRecursionEliminationInitVal. 549 AccumulatorRecursionEliminationInitVal = getCommonReturnValue(Ret, CI); 550 if (!AccumulatorRecursionEliminationInitVal) 551 return false; 552 } 553 554 BasicBlock *BB = Ret->getParent(); 555 Function *F = BB->getParent(); 556 557 using namespace ore; 558 ORE->emit([&]() { 559 return OptimizationRemark(DEBUG_TYPE, "tailcall-recursion", CI) 560 << "transforming tail recursion into loop"; 561 }); 562 563 // OK! We can transform this tail call. If this is the first one found, 564 // create the new entry block, allowing us to branch back to the old entry. 565 if (!OldEntry) { 566 OldEntry = &F->getEntryBlock(); 567 BasicBlock *NewEntry = BasicBlock::Create(F->getContext(), "", F, OldEntry); 568 NewEntry->takeName(OldEntry); 569 OldEntry->setName("tailrecurse"); 570 BranchInst *BI = BranchInst::Create(OldEntry, NewEntry); 571 BI->setDebugLoc(CI->getDebugLoc()); 572 573 // If this tail call is marked 'tail' and if there are any allocas in the 574 // entry block, move them up to the new entry block. 575 TailCallsAreMarkedTail = CI->isTailCall(); 576 if (TailCallsAreMarkedTail) 577 // Move all fixed sized allocas from OldEntry to NewEntry. 578 for (BasicBlock::iterator OEBI = OldEntry->begin(), E = OldEntry->end(), 579 NEBI = NewEntry->begin(); OEBI != E; ) 580 if (AllocaInst *AI = dyn_cast<AllocaInst>(OEBI++)) 581 if (isa<ConstantInt>(AI->getArraySize())) 582 AI->moveBefore(&*NEBI); 583 584 // Now that we have created a new block, which jumps to the entry 585 // block, insert a PHI node for each argument of the function. 586 // For now, we initialize each PHI to only have the real arguments 587 // which are passed in. 588 Instruction *InsertPos = &OldEntry->front(); 589 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); 590 I != E; ++I) { 591 PHINode *PN = PHINode::Create(I->getType(), 2, 592 I->getName() + ".tr", InsertPos); 593 I->replaceAllUsesWith(PN); // Everyone use the PHI node now! 594 PN->addIncoming(&*I, NewEntry); 595 ArgumentPHIs.push_back(PN); 596 } 597 // The entry block was changed from OldEntry to NewEntry. 598 // The forward DominatorTree needs to be recalculated when the EntryBB is 599 // changed. In this corner-case we recalculate the entire tree. 600 DTU.recalculate(*NewEntry->getParent()); 601 } 602 603 // If this function has self recursive calls in the tail position where some 604 // are marked tail and some are not, only transform one flavor or another. We 605 // have to choose whether we move allocas in the entry block to the new entry 606 // block or not, so we can't make a good choice for both. NOTE: We could do 607 // slightly better here in the case that the function has no entry block 608 // allocas. 609 if (TailCallsAreMarkedTail && !CI->isTailCall()) 610 return false; 611 612 // Ok, now that we know we have a pseudo-entry block WITH all of the 613 // required PHI nodes, add entries into the PHI node for the actual 614 // parameters passed into the tail-recursive call. 615 for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) 616 ArgumentPHIs[i]->addIncoming(CI->getArgOperand(i), BB); 617 618 // If we are introducing an accumulator variable to eliminate the recursion, 619 // do so now. Note that we _know_ that no subsequent tail recursion 620 // eliminations will happen on this function because of the way the 621 // accumulator recursion predicate is set up. 622 // 623 if (AccumulatorRecursionEliminationInitVal) { 624 Instruction *AccRecInstr = AccumulatorRecursionInstr; 625 // Start by inserting a new PHI node for the accumulator. 626 pred_iterator PB = pred_begin(OldEntry), PE = pred_end(OldEntry); 627 PHINode *AccPN = PHINode::Create( 628 AccumulatorRecursionEliminationInitVal->getType(), 629 std::distance(PB, PE) + 1, "accumulator.tr", &OldEntry->front()); 630 631 // Loop over all of the predecessors of the tail recursion block. For the 632 // real entry into the function we seed the PHI with the initial value, 633 // computed earlier. For any other existing branches to this block (due to 634 // other tail recursions eliminated) the accumulator is not modified. 635 // Because we haven't added the branch in the current block to OldEntry yet, 636 // it will not show up as a predecessor. 637 for (pred_iterator PI = PB; PI != PE; ++PI) { 638 BasicBlock *P = *PI; 639 if (P == &F->getEntryBlock()) 640 AccPN->addIncoming(AccumulatorRecursionEliminationInitVal, P); 641 else 642 AccPN->addIncoming(AccPN, P); 643 } 644 645 if (AccRecInstr) { 646 // Add an incoming argument for the current block, which is computed by 647 // our associative and commutative accumulator instruction. 648 AccPN->addIncoming(AccRecInstr, BB); 649 650 // Next, rewrite the accumulator recursion instruction so that it does not 651 // use the result of the call anymore, instead, use the PHI node we just 652 // inserted. 653 AccRecInstr->setOperand(AccRecInstr->getOperand(0) != CI, AccPN); 654 } else { 655 // Add an incoming argument for the current block, which is just the 656 // constant returned by the current return instruction. 657 AccPN->addIncoming(Ret->getReturnValue(), BB); 658 } 659 660 // Finally, rewrite any return instructions in the program to return the PHI 661 // node instead of the "initval" that they do currently. This loop will 662 // actually rewrite the return value we are destroying, but that's ok. 663 for (BasicBlock &BBI : *F) 664 if (ReturnInst *RI = dyn_cast<ReturnInst>(BBI.getTerminator())) 665 RI->setOperand(0, AccPN); 666 ++NumAccumAdded; 667 } 668 669 // Now that all of the PHI nodes are in place, remove the call and 670 // ret instructions, replacing them with an unconditional branch. 671 BranchInst *NewBI = BranchInst::Create(OldEntry, Ret); 672 NewBI->setDebugLoc(CI->getDebugLoc()); 673 674 BB->getInstList().erase(Ret); // Remove return. 675 BB->getInstList().erase(CI); // Remove call. 676 DTU.insertEdge(BB, OldEntry); 677 ++NumEliminated; 678 return true; 679 } 680 681 static bool foldReturnAndProcessPred( 682 BasicBlock *BB, ReturnInst *Ret, BasicBlock *&OldEntry, 683 bool &TailCallsAreMarkedTail, SmallVectorImpl<PHINode *> &ArgumentPHIs, 684 bool CannotTailCallElimCallsMarkedTail, const TargetTransformInfo *TTI, 685 AliasAnalysis *AA, OptimizationRemarkEmitter *ORE, DomTreeUpdater &DTU) { 686 bool Change = false; 687 688 // Make sure this block is a trivial return block. 689 assert(BB->getFirstNonPHIOrDbg() == Ret && 690 "Trying to fold non-trivial return block"); 691 692 // If the return block contains nothing but the return and PHI's, 693 // there might be an opportunity to duplicate the return in its 694 // predecessors and perform TRE there. Look for predecessors that end 695 // in unconditional branch and recursive call(s). 696 SmallVector<BranchInst*, 8> UncondBranchPreds; 697 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { 698 BasicBlock *Pred = *PI; 699 TerminatorInst *PTI = Pred->getTerminator(); 700 if (BranchInst *BI = dyn_cast<BranchInst>(PTI)) 701 if (BI->isUnconditional()) 702 UncondBranchPreds.push_back(BI); 703 } 704 705 while (!UncondBranchPreds.empty()) { 706 BranchInst *BI = UncondBranchPreds.pop_back_val(); 707 BasicBlock *Pred = BI->getParent(); 708 if (CallInst *CI = findTRECandidate(BI, CannotTailCallElimCallsMarkedTail, TTI)){ 709 LLVM_DEBUG(dbgs() << "FOLDING: " << *BB 710 << "INTO UNCOND BRANCH PRED: " << *Pred); 711 ReturnInst *RI = FoldReturnIntoUncondBranch(Ret, BB, Pred, &DTU); 712 713 // Cleanup: if all predecessors of BB have been eliminated by 714 // FoldReturnIntoUncondBranch, delete it. It is important to empty it, 715 // because the ret instruction in there is still using a value which 716 // eliminateRecursiveTailCall will attempt to remove. 717 if (!BB->hasAddressTaken() && pred_begin(BB) == pred_end(BB)) 718 DTU.deleteBB(BB); 719 720 eliminateRecursiveTailCall(CI, RI, OldEntry, TailCallsAreMarkedTail, 721 ArgumentPHIs, AA, ORE, DTU); 722 ++NumRetDuped; 723 Change = true; 724 } 725 } 726 727 return Change; 728 } 729 730 static bool processReturningBlock( 731 ReturnInst *Ret, BasicBlock *&OldEntry, bool &TailCallsAreMarkedTail, 732 SmallVectorImpl<PHINode *> &ArgumentPHIs, 733 bool CannotTailCallElimCallsMarkedTail, const TargetTransformInfo *TTI, 734 AliasAnalysis *AA, OptimizationRemarkEmitter *ORE, DomTreeUpdater &DTU) { 735 CallInst *CI = findTRECandidate(Ret, CannotTailCallElimCallsMarkedTail, TTI); 736 if (!CI) 737 return false; 738 739 return eliminateRecursiveTailCall(CI, Ret, OldEntry, TailCallsAreMarkedTail, 740 ArgumentPHIs, AA, ORE, DTU); 741 } 742 743 static bool eliminateTailRecursion(Function &F, const TargetTransformInfo *TTI, 744 AliasAnalysis *AA, 745 OptimizationRemarkEmitter *ORE, 746 DomTreeUpdater &DTU) { 747 if (F.getFnAttribute("disable-tail-calls").getValueAsString() == "true") 748 return false; 749 750 bool MadeChange = false; 751 bool AllCallsAreTailCalls = false; 752 MadeChange |= markTails(F, AllCallsAreTailCalls, ORE); 753 if (!AllCallsAreTailCalls) 754 return MadeChange; 755 756 // If this function is a varargs function, we won't be able to PHI the args 757 // right, so don't even try to convert it... 758 if (F.getFunctionType()->isVarArg()) 759 return false; 760 761 BasicBlock *OldEntry = nullptr; 762 bool TailCallsAreMarkedTail = false; 763 SmallVector<PHINode*, 8> ArgumentPHIs; 764 765 // If false, we cannot perform TRE on tail calls marked with the 'tail' 766 // attribute, because doing so would cause the stack size to increase (real 767 // TRE would deallocate variable sized allocas, TRE doesn't). 768 bool CanTRETailMarkedCall = canTRE(F); 769 770 // Change any tail recursive calls to loops. 771 // 772 // FIXME: The code generator produces really bad code when an 'escaping 773 // alloca' is changed from being a static alloca to being a dynamic alloca. 774 // Until this is resolved, disable this transformation if that would ever 775 // happen. This bug is PR962. 776 for (Function::iterator BBI = F.begin(), E = F.end(); BBI != E; /*in loop*/) { 777 BasicBlock *BB = &*BBI++; // foldReturnAndProcessPred may delete BB. 778 if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB->getTerminator())) { 779 bool Change = processReturningBlock(Ret, OldEntry, TailCallsAreMarkedTail, 780 ArgumentPHIs, !CanTRETailMarkedCall, 781 TTI, AA, ORE, DTU); 782 if (!Change && BB->getFirstNonPHIOrDbg() == Ret) 783 Change = foldReturnAndProcessPred( 784 BB, Ret, OldEntry, TailCallsAreMarkedTail, ArgumentPHIs, 785 !CanTRETailMarkedCall, TTI, AA, ORE, DTU); 786 MadeChange |= Change; 787 } 788 } 789 790 // If we eliminated any tail recursions, it's possible that we inserted some 791 // silly PHI nodes which just merge an initial value (the incoming operand) 792 // with themselves. Check to see if we did and clean up our mess if so. This 793 // occurs when a function passes an argument straight through to its tail 794 // call. 795 for (PHINode *PN : ArgumentPHIs) { 796 // If the PHI Node is a dynamic constant, replace it with the value it is. 797 if (Value *PNV = SimplifyInstruction(PN, F.getParent()->getDataLayout())) { 798 PN->replaceAllUsesWith(PNV); 799 PN->eraseFromParent(); 800 } 801 } 802 803 return MadeChange; 804 } 805 806 namespace { 807 struct TailCallElim : public FunctionPass { 808 static char ID; // Pass identification, replacement for typeid 809 TailCallElim() : FunctionPass(ID) { 810 initializeTailCallElimPass(*PassRegistry::getPassRegistry()); 811 } 812 813 void getAnalysisUsage(AnalysisUsage &AU) const override { 814 AU.addRequired<TargetTransformInfoWrapperPass>(); 815 AU.addRequired<AAResultsWrapperPass>(); 816 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 817 AU.addPreserved<GlobalsAAWrapperPass>(); 818 AU.addPreserved<DominatorTreeWrapperPass>(); 819 AU.addPreserved<PostDominatorTreeWrapperPass>(); 820 } 821 822 bool runOnFunction(Function &F) override { 823 if (skipFunction(F)) 824 return false; 825 826 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 827 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr; 828 auto *PDTWP = getAnalysisIfAvailable<PostDominatorTreeWrapperPass>(); 829 auto *PDT = PDTWP ? &PDTWP->getPostDomTree() : nullptr; 830 // There is no noticable performance difference here between Lazy and Eager 831 // UpdateStrategy based on some test results. It is feasible to switch the 832 // UpdateStrategy to Lazy if we find it profitable later. 833 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Eager); 834 835 return eliminateTailRecursion( 836 F, &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F), 837 &getAnalysis<AAResultsWrapperPass>().getAAResults(), 838 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(), DTU); 839 } 840 }; 841 } 842 843 char TailCallElim::ID = 0; 844 INITIALIZE_PASS_BEGIN(TailCallElim, "tailcallelim", "Tail Call Elimination", 845 false, false) 846 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 847 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 848 INITIALIZE_PASS_END(TailCallElim, "tailcallelim", "Tail Call Elimination", 849 false, false) 850 851 // Public interface to the TailCallElimination pass 852 FunctionPass *llvm::createTailCallEliminationPass() { 853 return new TailCallElim(); 854 } 855 856 PreservedAnalyses TailCallElimPass::run(Function &F, 857 FunctionAnalysisManager &AM) { 858 859 TargetTransformInfo &TTI = AM.getResult<TargetIRAnalysis>(F); 860 AliasAnalysis &AA = AM.getResult<AAManager>(F); 861 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 862 auto *DT = AM.getCachedResult<DominatorTreeAnalysis>(F); 863 auto *PDT = AM.getCachedResult<PostDominatorTreeAnalysis>(F); 864 // There is no noticable performance difference here between Lazy and Eager 865 // UpdateStrategy based on some test results. It is feasible to switch the 866 // UpdateStrategy to Lazy if we find it profitable later. 867 DomTreeUpdater DTU(DT, PDT, DomTreeUpdater::UpdateStrategy::Eager); 868 bool Changed = eliminateTailRecursion(F, &TTI, &AA, &ORE, DTU); 869 870 if (!Changed) 871 return PreservedAnalyses::all(); 872 PreservedAnalyses PA; 873 PA.preserve<GlobalsAA>(); 874 PA.preserve<DominatorTreeAnalysis>(); 875 PA.preserve<PostDominatorTreeAnalysis>(); 876 return PA; 877 } 878