1 //===- JumpThreading.cpp - Thread control through conditional blocks ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the Jump Threading pass. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/JumpThreading.h" 15 #include "llvm/Transforms/Scalar.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/DenseSet.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/GlobalsModRef.h" 21 #include "llvm/Analysis/CFG.h" 22 #include "llvm/Analysis/BlockFrequencyInfoImpl.h" 23 #include "llvm/Analysis/ConstantFolding.h" 24 #include "llvm/Analysis/InstructionSimplify.h" 25 #include "llvm/Analysis/Loads.h" 26 #include "llvm/Analysis/LoopInfo.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/IntrinsicInst.h" 30 #include "llvm/IR/LLVMContext.h" 31 #include "llvm/IR/MDBuilder.h" 32 #include "llvm/IR/Metadata.h" 33 #include "llvm/Pass.h" 34 #include "llvm/Support/CommandLine.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/raw_ostream.h" 37 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 38 #include "llvm/Transforms/Utils/Local.h" 39 #include "llvm/Transforms/Utils/SSAUpdater.h" 40 #include <algorithm> 41 #include <memory> 42 using namespace llvm; 43 using namespace jumpthreading; 44 45 #define DEBUG_TYPE "jump-threading" 46 47 STATISTIC(NumThreads, "Number of jumps threaded"); 48 STATISTIC(NumFolds, "Number of terminators folded"); 49 STATISTIC(NumDupes, "Number of branch blocks duplicated to eliminate phi"); 50 51 static cl::opt<unsigned> 52 BBDuplicateThreshold("jump-threading-threshold", 53 cl::desc("Max block size to duplicate for jump threading"), 54 cl::init(6), cl::Hidden); 55 56 static cl::opt<unsigned> 57 ImplicationSearchThreshold( 58 "jump-threading-implication-search-threshold", 59 cl::desc("The number of predecessors to search for a stronger " 60 "condition to use to thread over a weaker condition"), 61 cl::init(3), cl::Hidden); 62 63 namespace { 64 /// This pass performs 'jump threading', which looks at blocks that have 65 /// multiple predecessors and multiple successors. If one or more of the 66 /// predecessors of the block can be proven to always jump to one of the 67 /// successors, we forward the edge from the predecessor to the successor by 68 /// duplicating the contents of this block. 69 /// 70 /// An example of when this can occur is code like this: 71 /// 72 /// if () { ... 73 /// X = 4; 74 /// } 75 /// if (X < 3) { 76 /// 77 /// In this case, the unconditional branch at the end of the first if can be 78 /// revectored to the false side of the second if. 79 /// 80 class JumpThreading : public FunctionPass { 81 JumpThreadingPass Impl; 82 83 public: 84 static char ID; // Pass identification 85 JumpThreading(int T = -1) : FunctionPass(ID), Impl(T) { 86 initializeJumpThreadingPass(*PassRegistry::getPassRegistry()); 87 } 88 89 bool runOnFunction(Function &F) override; 90 91 void getAnalysisUsage(AnalysisUsage &AU) const override { 92 AU.addRequired<LazyValueInfoWrapperPass>(); 93 AU.addPreserved<LazyValueInfoWrapperPass>(); 94 AU.addPreserved<GlobalsAAWrapperPass>(); 95 AU.addRequired<TargetLibraryInfoWrapperPass>(); 96 } 97 98 void releaseMemory() override { Impl.releaseMemory(); } 99 }; 100 } 101 102 char JumpThreading::ID = 0; 103 INITIALIZE_PASS_BEGIN(JumpThreading, "jump-threading", 104 "Jump Threading", false, false) 105 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass) 106 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 107 INITIALIZE_PASS_END(JumpThreading, "jump-threading", 108 "Jump Threading", false, false) 109 110 // Public interface to the Jump Threading pass 111 FunctionPass *llvm::createJumpThreadingPass(int Threshold) { return new JumpThreading(Threshold); } 112 113 JumpThreadingPass::JumpThreadingPass(int T) { 114 BBDupThreshold = (T == -1) ? BBDuplicateThreshold : unsigned(T); 115 } 116 117 /// runOnFunction - Top level algorithm. 118 /// 119 bool JumpThreading::runOnFunction(Function &F) { 120 if (skipFunction(F)) 121 return false; 122 auto TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 123 auto LVI = &getAnalysis<LazyValueInfoWrapperPass>().getLVI(); 124 std::unique_ptr<BlockFrequencyInfo> BFI; 125 std::unique_ptr<BranchProbabilityInfo> BPI; 126 bool HasProfileData = F.getEntryCount().hasValue(); 127 if (HasProfileData) { 128 LoopInfo LI{DominatorTree(F)}; 129 BPI.reset(new BranchProbabilityInfo(F, LI)); 130 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI)); 131 } 132 return Impl.runImpl(F, TLI, LVI, HasProfileData, std::move(BFI), 133 std::move(BPI)); 134 } 135 136 PreservedAnalyses JumpThreadingPass::run(Function &F, 137 FunctionAnalysisManager &AM) { 138 139 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 140 auto &LVI = AM.getResult<LazyValueAnalysis>(F); 141 std::unique_ptr<BlockFrequencyInfo> BFI; 142 std::unique_ptr<BranchProbabilityInfo> BPI; 143 bool HasProfileData = F.getEntryCount().hasValue(); 144 if (HasProfileData) { 145 LoopInfo LI{DominatorTree(F)}; 146 BPI.reset(new BranchProbabilityInfo(F, LI)); 147 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI)); 148 } 149 bool Changed = 150 runImpl(F, &TLI, &LVI, HasProfileData, std::move(BFI), std::move(BPI)); 151 152 if (!Changed) 153 return PreservedAnalyses::all(); 154 PreservedAnalyses PA; 155 PA.preserve<GlobalsAA>(); 156 return PA; 157 } 158 159 bool JumpThreadingPass::runImpl(Function &F, TargetLibraryInfo *TLI_, 160 LazyValueInfo *LVI_, bool HasProfileData_, 161 std::unique_ptr<BlockFrequencyInfo> BFI_, 162 std::unique_ptr<BranchProbabilityInfo> BPI_) { 163 164 DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n"); 165 TLI = TLI_; 166 LVI = LVI_; 167 BFI.reset(); 168 BPI.reset(); 169 // When profile data is available, we need to update edge weights after 170 // successful jump threading, which requires both BPI and BFI being available. 171 HasProfileData = HasProfileData_; 172 if (HasProfileData) { 173 BPI = std::move(BPI_); 174 BFI = std::move(BFI_); 175 } 176 177 // Remove unreachable blocks from function as they may result in infinite 178 // loop. We do threading if we found something profitable. Jump threading a 179 // branch can create other opportunities. If these opportunities form a cycle 180 // i.e. if any jump threading is undoing previous threading in the path, then 181 // we will loop forever. We take care of this issue by not jump threading for 182 // back edges. This works for normal cases but not for unreachable blocks as 183 // they may have cycle with no back edge. 184 bool EverChanged = false; 185 EverChanged |= removeUnreachableBlocks(F, LVI); 186 187 FindLoopHeaders(F); 188 189 bool Changed; 190 do { 191 Changed = false; 192 for (Function::iterator I = F.begin(), E = F.end(); I != E;) { 193 BasicBlock *BB = &*I; 194 // Thread all of the branches we can over this block. 195 while (ProcessBlock(BB)) 196 Changed = true; 197 198 ++I; 199 200 // If the block is trivially dead, zap it. This eliminates the successor 201 // edges which simplifies the CFG. 202 if (pred_empty(BB) && 203 BB != &BB->getParent()->getEntryBlock()) { 204 DEBUG(dbgs() << " JT: Deleting dead block '" << BB->getName() 205 << "' with terminator: " << *BB->getTerminator() << '\n'); 206 LoopHeaders.erase(BB); 207 LVI->eraseBlock(BB); 208 DeleteDeadBlock(BB); 209 Changed = true; 210 continue; 211 } 212 213 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 214 215 // Can't thread an unconditional jump, but if the block is "almost 216 // empty", we can replace uses of it with uses of the successor and make 217 // this dead. 218 // We should not eliminate the loop header either, because eliminating 219 // a loop header might later prevent LoopSimplify from transforming nested 220 // loops into simplified form. 221 if (BI && BI->isUnconditional() && 222 BB != &BB->getParent()->getEntryBlock() && 223 // If the terminator is the only non-phi instruction, try to nuke it. 224 BB->getFirstNonPHIOrDbg()->isTerminator() && !LoopHeaders.count(BB)) { 225 // Since TryToSimplifyUncondBranchFromEmptyBlock may delete the 226 // block, we have to make sure it isn't in the LoopHeaders set. We 227 // reinsert afterward if needed. 228 bool ErasedFromLoopHeaders = LoopHeaders.erase(BB); 229 BasicBlock *Succ = BI->getSuccessor(0); 230 231 // FIXME: It is always conservatively correct to drop the info 232 // for a block even if it doesn't get erased. This isn't totally 233 // awesome, but it allows us to use AssertingVH to prevent nasty 234 // dangling pointer issues within LazyValueInfo. 235 LVI->eraseBlock(BB); 236 if (TryToSimplifyUncondBranchFromEmptyBlock(BB)) { 237 Changed = true; 238 // If we deleted BB and BB was the header of a loop, then the 239 // successor is now the header of the loop. 240 BB = Succ; 241 } 242 243 if (ErasedFromLoopHeaders) 244 LoopHeaders.insert(BB); 245 } 246 } 247 EverChanged |= Changed; 248 } while (Changed); 249 250 LoopHeaders.clear(); 251 return EverChanged; 252 } 253 254 /// getJumpThreadDuplicationCost - Return the cost of duplicating this block to 255 /// thread across it. Stop scanning the block when passing the threshold. 256 static unsigned getJumpThreadDuplicationCost(const BasicBlock *BB, 257 unsigned Threshold) { 258 /// Ignore PHI nodes, these will be flattened when duplication happens. 259 BasicBlock::const_iterator I(BB->getFirstNonPHI()); 260 261 // FIXME: THREADING will delete values that are just used to compute the 262 // branch, so they shouldn't count against the duplication cost. 263 264 unsigned Bonus = 0; 265 const TerminatorInst *BBTerm = BB->getTerminator(); 266 // Threading through a switch statement is particularly profitable. If this 267 // block ends in a switch, decrease its cost to make it more likely to happen. 268 if (isa<SwitchInst>(BBTerm)) 269 Bonus = 6; 270 271 // The same holds for indirect branches, but slightly more so. 272 if (isa<IndirectBrInst>(BBTerm)) 273 Bonus = 8; 274 275 // Bump the threshold up so the early exit from the loop doesn't skip the 276 // terminator-based Size adjustment at the end. 277 Threshold += Bonus; 278 279 // Sum up the cost of each instruction until we get to the terminator. Don't 280 // include the terminator because the copy won't include it. 281 unsigned Size = 0; 282 for (; !isa<TerminatorInst>(I); ++I) { 283 284 // Stop scanning the block if we've reached the threshold. 285 if (Size > Threshold) 286 return Size; 287 288 // Debugger intrinsics don't incur code size. 289 if (isa<DbgInfoIntrinsic>(I)) continue; 290 291 // If this is a pointer->pointer bitcast, it is free. 292 if (isa<BitCastInst>(I) && I->getType()->isPointerTy()) 293 continue; 294 295 // Bail out if this instruction gives back a token type, it is not possible 296 // to duplicate it if it is used outside this BB. 297 if (I->getType()->isTokenTy() && I->isUsedOutsideOfBlock(BB)) 298 return ~0U; 299 300 // All other instructions count for at least one unit. 301 ++Size; 302 303 // Calls are more expensive. If they are non-intrinsic calls, we model them 304 // as having cost of 4. If they are a non-vector intrinsic, we model them 305 // as having cost of 2 total, and if they are a vector intrinsic, we model 306 // them as having cost 1. 307 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 308 if (CI->cannotDuplicate() || CI->isConvergent()) 309 // Blocks with NoDuplicate are modelled as having infinite cost, so they 310 // are never duplicated. 311 return ~0U; 312 else if (!isa<IntrinsicInst>(CI)) 313 Size += 3; 314 else if (!CI->getType()->isVectorTy()) 315 Size += 1; 316 } 317 } 318 319 return Size > Bonus ? Size - Bonus : 0; 320 } 321 322 /// FindLoopHeaders - We do not want jump threading to turn proper loop 323 /// structures into irreducible loops. Doing this breaks up the loop nesting 324 /// hierarchy and pessimizes later transformations. To prevent this from 325 /// happening, we first have to find the loop headers. Here we approximate this 326 /// by finding targets of backedges in the CFG. 327 /// 328 /// Note that there definitely are cases when we want to allow threading of 329 /// edges across a loop header. For example, threading a jump from outside the 330 /// loop (the preheader) to an exit block of the loop is definitely profitable. 331 /// It is also almost always profitable to thread backedges from within the loop 332 /// to exit blocks, and is often profitable to thread backedges to other blocks 333 /// within the loop (forming a nested loop). This simple analysis is not rich 334 /// enough to track all of these properties and keep it up-to-date as the CFG 335 /// mutates, so we don't allow any of these transformations. 336 /// 337 void JumpThreadingPass::FindLoopHeaders(Function &F) { 338 SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges; 339 FindFunctionBackedges(F, Edges); 340 341 for (const auto &Edge : Edges) 342 LoopHeaders.insert(Edge.second); 343 } 344 345 /// getKnownConstant - Helper method to determine if we can thread over a 346 /// terminator with the given value as its condition, and if so what value to 347 /// use for that. What kind of value this is depends on whether we want an 348 /// integer or a block address, but an undef is always accepted. 349 /// Returns null if Val is null or not an appropriate constant. 350 static Constant *getKnownConstant(Value *Val, ConstantPreference Preference) { 351 if (!Val) 352 return nullptr; 353 354 // Undef is "known" enough. 355 if (UndefValue *U = dyn_cast<UndefValue>(Val)) 356 return U; 357 358 if (Preference == WantBlockAddress) 359 return dyn_cast<BlockAddress>(Val->stripPointerCasts()); 360 361 return dyn_cast<ConstantInt>(Val); 362 } 363 364 /// ComputeValueKnownInPredecessors - Given a basic block BB and a value V, see 365 /// if we can infer that the value is a known ConstantInt/BlockAddress or undef 366 /// in any of our predecessors. If so, return the known list of value and pred 367 /// BB in the result vector. 368 /// 369 /// This returns true if there were any known values. 370 /// 371 bool JumpThreadingPass::ComputeValueKnownInPredecessors( 372 Value *V, BasicBlock *BB, PredValueInfo &Result, 373 ConstantPreference Preference, Instruction *CxtI) { 374 // This method walks up use-def chains recursively. Because of this, we could 375 // get into an infinite loop going around loops in the use-def chain. To 376 // prevent this, keep track of what (value, block) pairs we've already visited 377 // and terminate the search if we loop back to them 378 if (!RecursionSet.insert(std::make_pair(V, BB)).second) 379 return false; 380 381 // An RAII help to remove this pair from the recursion set once the recursion 382 // stack pops back out again. 383 RecursionSetRemover remover(RecursionSet, std::make_pair(V, BB)); 384 385 // If V is a constant, then it is known in all predecessors. 386 if (Constant *KC = getKnownConstant(V, Preference)) { 387 for (BasicBlock *Pred : predecessors(BB)) 388 Result.push_back(std::make_pair(KC, Pred)); 389 390 return !Result.empty(); 391 } 392 393 // If V is a non-instruction value, or an instruction in a different block, 394 // then it can't be derived from a PHI. 395 Instruction *I = dyn_cast<Instruction>(V); 396 if (!I || I->getParent() != BB) { 397 398 // Okay, if this is a live-in value, see if it has a known value at the end 399 // of any of our predecessors. 400 // 401 // FIXME: This should be an edge property, not a block end property. 402 /// TODO: Per PR2563, we could infer value range information about a 403 /// predecessor based on its terminator. 404 // 405 // FIXME: change this to use the more-rich 'getPredicateOnEdge' method if 406 // "I" is a non-local compare-with-a-constant instruction. This would be 407 // able to handle value inequalities better, for example if the compare is 408 // "X < 4" and "X < 3" is known true but "X < 4" itself is not available. 409 // Perhaps getConstantOnEdge should be smart enough to do this? 410 411 for (BasicBlock *P : predecessors(BB)) { 412 // If the value is known by LazyValueInfo to be a constant in a 413 // predecessor, use that information to try to thread this block. 414 Constant *PredCst = LVI->getConstantOnEdge(V, P, BB, CxtI); 415 if (Constant *KC = getKnownConstant(PredCst, Preference)) 416 Result.push_back(std::make_pair(KC, P)); 417 } 418 419 return !Result.empty(); 420 } 421 422 /// If I is a PHI node, then we know the incoming values for any constants. 423 if (PHINode *PN = dyn_cast<PHINode>(I)) { 424 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 425 Value *InVal = PN->getIncomingValue(i); 426 if (Constant *KC = getKnownConstant(InVal, Preference)) { 427 Result.push_back(std::make_pair(KC, PN->getIncomingBlock(i))); 428 } else { 429 Constant *CI = LVI->getConstantOnEdge(InVal, 430 PN->getIncomingBlock(i), 431 BB, CxtI); 432 if (Constant *KC = getKnownConstant(CI, Preference)) 433 Result.push_back(std::make_pair(KC, PN->getIncomingBlock(i))); 434 } 435 } 436 437 return !Result.empty(); 438 } 439 440 // Handle Cast instructions. Only see through Cast when the source operand is 441 // PHI or Cmp and the source type is i1 to save the compilation time. 442 if (CastInst *CI = dyn_cast<CastInst>(I)) { 443 Value *Source = CI->getOperand(0); 444 if (!Source->getType()->isIntegerTy(1)) 445 return false; 446 if (!isa<PHINode>(Source) && !isa<CmpInst>(Source)) 447 return false; 448 ComputeValueKnownInPredecessors(Source, BB, Result, Preference, CxtI); 449 if (Result.empty()) 450 return false; 451 452 // Convert the known values. 453 for (auto &R : Result) 454 R.first = ConstantExpr::getCast(CI->getOpcode(), R.first, CI->getType()); 455 456 return true; 457 } 458 459 PredValueInfoTy LHSVals, RHSVals; 460 461 // Handle some boolean conditions. 462 if (I->getType()->getPrimitiveSizeInBits() == 1) { 463 assert(Preference == WantInteger && "One-bit non-integer type?"); 464 // X | true -> true 465 // X & false -> false 466 if (I->getOpcode() == Instruction::Or || 467 I->getOpcode() == Instruction::And) { 468 ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals, 469 WantInteger, CxtI); 470 ComputeValueKnownInPredecessors(I->getOperand(1), BB, RHSVals, 471 WantInteger, CxtI); 472 473 if (LHSVals.empty() && RHSVals.empty()) 474 return false; 475 476 ConstantInt *InterestingVal; 477 if (I->getOpcode() == Instruction::Or) 478 InterestingVal = ConstantInt::getTrue(I->getContext()); 479 else 480 InterestingVal = ConstantInt::getFalse(I->getContext()); 481 482 SmallPtrSet<BasicBlock*, 4> LHSKnownBBs; 483 484 // Scan for the sentinel. If we find an undef, force it to the 485 // interesting value: x|undef -> true and x&undef -> false. 486 for (const auto &LHSVal : LHSVals) 487 if (LHSVal.first == InterestingVal || isa<UndefValue>(LHSVal.first)) { 488 Result.emplace_back(InterestingVal, LHSVal.second); 489 LHSKnownBBs.insert(LHSVal.second); 490 } 491 for (const auto &RHSVal : RHSVals) 492 if (RHSVal.first == InterestingVal || isa<UndefValue>(RHSVal.first)) { 493 // If we already inferred a value for this block on the LHS, don't 494 // re-add it. 495 if (!LHSKnownBBs.count(RHSVal.second)) 496 Result.emplace_back(InterestingVal, RHSVal.second); 497 } 498 499 return !Result.empty(); 500 } 501 502 // Handle the NOT form of XOR. 503 if (I->getOpcode() == Instruction::Xor && 504 isa<ConstantInt>(I->getOperand(1)) && 505 cast<ConstantInt>(I->getOperand(1))->isOne()) { 506 ComputeValueKnownInPredecessors(I->getOperand(0), BB, Result, 507 WantInteger, CxtI); 508 if (Result.empty()) 509 return false; 510 511 // Invert the known values. 512 for (auto &R : Result) 513 R.first = ConstantExpr::getNot(R.first); 514 515 return true; 516 } 517 518 // Try to simplify some other binary operator values. 519 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 520 assert(Preference != WantBlockAddress 521 && "A binary operator creating a block address?"); 522 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) { 523 PredValueInfoTy LHSVals; 524 ComputeValueKnownInPredecessors(BO->getOperand(0), BB, LHSVals, 525 WantInteger, CxtI); 526 527 // Try to use constant folding to simplify the binary operator. 528 for (const auto &LHSVal : LHSVals) { 529 Constant *V = LHSVal.first; 530 Constant *Folded = ConstantExpr::get(BO->getOpcode(), V, CI); 531 532 if (Constant *KC = getKnownConstant(Folded, WantInteger)) 533 Result.push_back(std::make_pair(KC, LHSVal.second)); 534 } 535 } 536 537 return !Result.empty(); 538 } 539 540 // Handle compare with phi operand, where the PHI is defined in this block. 541 if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) { 542 assert(Preference == WantInteger && "Compares only produce integers"); 543 PHINode *PN = dyn_cast<PHINode>(Cmp->getOperand(0)); 544 if (PN && PN->getParent() == BB) { 545 const DataLayout &DL = PN->getModule()->getDataLayout(); 546 // We can do this simplification if any comparisons fold to true or false. 547 // See if any do. 548 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 549 BasicBlock *PredBB = PN->getIncomingBlock(i); 550 Value *LHS = PN->getIncomingValue(i); 551 Value *RHS = Cmp->getOperand(1)->DoPHITranslation(BB, PredBB); 552 553 Value *Res = SimplifyCmpInst(Cmp->getPredicate(), LHS, RHS, DL); 554 if (!Res) { 555 if (!isa<Constant>(RHS)) 556 continue; 557 558 LazyValueInfo::Tristate 559 ResT = LVI->getPredicateOnEdge(Cmp->getPredicate(), LHS, 560 cast<Constant>(RHS), PredBB, BB, 561 CxtI ? CxtI : Cmp); 562 if (ResT == LazyValueInfo::Unknown) 563 continue; 564 Res = ConstantInt::get(Type::getInt1Ty(LHS->getContext()), ResT); 565 } 566 567 if (Constant *KC = getKnownConstant(Res, WantInteger)) 568 Result.push_back(std::make_pair(KC, PredBB)); 569 } 570 571 return !Result.empty(); 572 } 573 574 // If comparing a live-in value against a constant, see if we know the 575 // live-in value on any predecessors. 576 if (isa<Constant>(Cmp->getOperand(1)) && Cmp->getType()->isIntegerTy()) { 577 if (!isa<Instruction>(Cmp->getOperand(0)) || 578 cast<Instruction>(Cmp->getOperand(0))->getParent() != BB) { 579 Constant *RHSCst = cast<Constant>(Cmp->getOperand(1)); 580 581 for (BasicBlock *P : predecessors(BB)) { 582 // If the value is known by LazyValueInfo to be a constant in a 583 // predecessor, use that information to try to thread this block. 584 LazyValueInfo::Tristate Res = 585 LVI->getPredicateOnEdge(Cmp->getPredicate(), Cmp->getOperand(0), 586 RHSCst, P, BB, CxtI ? CxtI : Cmp); 587 if (Res == LazyValueInfo::Unknown) 588 continue; 589 590 Constant *ResC = ConstantInt::get(Cmp->getType(), Res); 591 Result.push_back(std::make_pair(ResC, P)); 592 } 593 594 return !Result.empty(); 595 } 596 597 // Try to find a constant value for the LHS of a comparison, 598 // and evaluate it statically if we can. 599 if (Constant *CmpConst = dyn_cast<Constant>(Cmp->getOperand(1))) { 600 PredValueInfoTy LHSVals; 601 ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals, 602 WantInteger, CxtI); 603 604 for (const auto &LHSVal : LHSVals) { 605 Constant *V = LHSVal.first; 606 Constant *Folded = ConstantExpr::getCompare(Cmp->getPredicate(), 607 V, CmpConst); 608 if (Constant *KC = getKnownConstant(Folded, WantInteger)) 609 Result.push_back(std::make_pair(KC, LHSVal.second)); 610 } 611 612 return !Result.empty(); 613 } 614 } 615 } 616 617 if (SelectInst *SI = dyn_cast<SelectInst>(I)) { 618 // Handle select instructions where at least one operand is a known constant 619 // and we can figure out the condition value for any predecessor block. 620 Constant *TrueVal = getKnownConstant(SI->getTrueValue(), Preference); 621 Constant *FalseVal = getKnownConstant(SI->getFalseValue(), Preference); 622 PredValueInfoTy Conds; 623 if ((TrueVal || FalseVal) && 624 ComputeValueKnownInPredecessors(SI->getCondition(), BB, Conds, 625 WantInteger, CxtI)) { 626 for (auto &C : Conds) { 627 Constant *Cond = C.first; 628 629 // Figure out what value to use for the condition. 630 bool KnownCond; 631 if (ConstantInt *CI = dyn_cast<ConstantInt>(Cond)) { 632 // A known boolean. 633 KnownCond = CI->isOne(); 634 } else { 635 assert(isa<UndefValue>(Cond) && "Unexpected condition value"); 636 // Either operand will do, so be sure to pick the one that's a known 637 // constant. 638 // FIXME: Do this more cleverly if both values are known constants? 639 KnownCond = (TrueVal != nullptr); 640 } 641 642 // See if the select has a known constant value for this predecessor. 643 if (Constant *Val = KnownCond ? TrueVal : FalseVal) 644 Result.push_back(std::make_pair(Val, C.second)); 645 } 646 647 return !Result.empty(); 648 } 649 } 650 651 // If all else fails, see if LVI can figure out a constant value for us. 652 Constant *CI = LVI->getConstant(V, BB, CxtI); 653 if (Constant *KC = getKnownConstant(CI, Preference)) { 654 for (BasicBlock *Pred : predecessors(BB)) 655 Result.push_back(std::make_pair(KC, Pred)); 656 } 657 658 return !Result.empty(); 659 } 660 661 662 663 /// GetBestDestForBranchOnUndef - If we determine that the specified block ends 664 /// in an undefined jump, decide which block is best to revector to. 665 /// 666 /// Since we can pick an arbitrary destination, we pick the successor with the 667 /// fewest predecessors. This should reduce the in-degree of the others. 668 /// 669 static unsigned GetBestDestForJumpOnUndef(BasicBlock *BB) { 670 TerminatorInst *BBTerm = BB->getTerminator(); 671 unsigned MinSucc = 0; 672 BasicBlock *TestBB = BBTerm->getSuccessor(MinSucc); 673 // Compute the successor with the minimum number of predecessors. 674 unsigned MinNumPreds = std::distance(pred_begin(TestBB), pred_end(TestBB)); 675 for (unsigned i = 1, e = BBTerm->getNumSuccessors(); i != e; ++i) { 676 TestBB = BBTerm->getSuccessor(i); 677 unsigned NumPreds = std::distance(pred_begin(TestBB), pred_end(TestBB)); 678 if (NumPreds < MinNumPreds) { 679 MinSucc = i; 680 MinNumPreds = NumPreds; 681 } 682 } 683 684 return MinSucc; 685 } 686 687 static bool hasAddressTakenAndUsed(BasicBlock *BB) { 688 if (!BB->hasAddressTaken()) return false; 689 690 // If the block has its address taken, it may be a tree of dead constants 691 // hanging off of it. These shouldn't keep the block alive. 692 BlockAddress *BA = BlockAddress::get(BB); 693 BA->removeDeadConstantUsers(); 694 return !BA->use_empty(); 695 } 696 697 /// ProcessBlock - If there are any predecessors whose control can be threaded 698 /// through to a successor, transform them now. 699 bool JumpThreadingPass::ProcessBlock(BasicBlock *BB) { 700 // If the block is trivially dead, just return and let the caller nuke it. 701 // This simplifies other transformations. 702 if (pred_empty(BB) && 703 BB != &BB->getParent()->getEntryBlock()) 704 return false; 705 706 // If this block has a single predecessor, and if that pred has a single 707 // successor, merge the blocks. This encourages recursive jump threading 708 // because now the condition in this block can be threaded through 709 // predecessors of our predecessor block. 710 if (BasicBlock *SinglePred = BB->getSinglePredecessor()) { 711 const TerminatorInst *TI = SinglePred->getTerminator(); 712 if (!TI->isExceptional() && TI->getNumSuccessors() == 1 && 713 SinglePred != BB && !hasAddressTakenAndUsed(BB)) { 714 // If SinglePred was a loop header, BB becomes one. 715 if (LoopHeaders.erase(SinglePred)) 716 LoopHeaders.insert(BB); 717 718 LVI->eraseBlock(SinglePred); 719 MergeBasicBlockIntoOnlyPred(BB); 720 721 return true; 722 } 723 } 724 725 if (TryToUnfoldSelectInCurrBB(BB)) 726 return true; 727 728 // What kind of constant we're looking for. 729 ConstantPreference Preference = WantInteger; 730 731 // Look to see if the terminator is a conditional branch, switch or indirect 732 // branch, if not we can't thread it. 733 Value *Condition; 734 Instruction *Terminator = BB->getTerminator(); 735 if (BranchInst *BI = dyn_cast<BranchInst>(Terminator)) { 736 // Can't thread an unconditional jump. 737 if (BI->isUnconditional()) return false; 738 Condition = BI->getCondition(); 739 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(Terminator)) { 740 Condition = SI->getCondition(); 741 } else if (IndirectBrInst *IB = dyn_cast<IndirectBrInst>(Terminator)) { 742 // Can't thread indirect branch with no successors. 743 if (IB->getNumSuccessors() == 0) return false; 744 Condition = IB->getAddress()->stripPointerCasts(); 745 Preference = WantBlockAddress; 746 } else { 747 return false; // Must be an invoke. 748 } 749 750 // Run constant folding to see if we can reduce the condition to a simple 751 // constant. 752 if (Instruction *I = dyn_cast<Instruction>(Condition)) { 753 Value *SimpleVal = 754 ConstantFoldInstruction(I, BB->getModule()->getDataLayout(), TLI); 755 if (SimpleVal) { 756 I->replaceAllUsesWith(SimpleVal); 757 if (isInstructionTriviallyDead(I, TLI)) 758 I->eraseFromParent(); 759 Condition = SimpleVal; 760 } 761 } 762 763 // If the terminator is branching on an undef, we can pick any of the 764 // successors to branch to. Let GetBestDestForJumpOnUndef decide. 765 if (isa<UndefValue>(Condition)) { 766 unsigned BestSucc = GetBestDestForJumpOnUndef(BB); 767 768 // Fold the branch/switch. 769 TerminatorInst *BBTerm = BB->getTerminator(); 770 for (unsigned i = 0, e = BBTerm->getNumSuccessors(); i != e; ++i) { 771 if (i == BestSucc) continue; 772 BBTerm->getSuccessor(i)->removePredecessor(BB, true); 773 } 774 775 DEBUG(dbgs() << " In block '" << BB->getName() 776 << "' folding undef terminator: " << *BBTerm << '\n'); 777 BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm); 778 BBTerm->eraseFromParent(); 779 return true; 780 } 781 782 // If the terminator of this block is branching on a constant, simplify the 783 // terminator to an unconditional branch. This can occur due to threading in 784 // other blocks. 785 if (getKnownConstant(Condition, Preference)) { 786 DEBUG(dbgs() << " In block '" << BB->getName() 787 << "' folding terminator: " << *BB->getTerminator() << '\n'); 788 ++NumFolds; 789 ConstantFoldTerminator(BB, true); 790 return true; 791 } 792 793 Instruction *CondInst = dyn_cast<Instruction>(Condition); 794 795 // All the rest of our checks depend on the condition being an instruction. 796 if (!CondInst) { 797 // FIXME: Unify this with code below. 798 if (ProcessThreadableEdges(Condition, BB, Preference, Terminator)) 799 return true; 800 return false; 801 } 802 803 804 if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondInst)) { 805 // If we're branching on a conditional, LVI might be able to determine 806 // it's value at the branch instruction. We only handle comparisons 807 // against a constant at this time. 808 // TODO: This should be extended to handle switches as well. 809 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 810 Constant *CondConst = dyn_cast<Constant>(CondCmp->getOperand(1)); 811 if (CondBr && CondConst && CondBr->isConditional()) { 812 LazyValueInfo::Tristate Ret = 813 LVI->getPredicateAt(CondCmp->getPredicate(), CondCmp->getOperand(0), 814 CondConst, CondBr); 815 if (Ret != LazyValueInfo::Unknown) { 816 unsigned ToRemove = Ret == LazyValueInfo::True ? 1 : 0; 817 unsigned ToKeep = Ret == LazyValueInfo::True ? 0 : 1; 818 CondBr->getSuccessor(ToRemove)->removePredecessor(BB, true); 819 BranchInst::Create(CondBr->getSuccessor(ToKeep), CondBr); 820 CondBr->eraseFromParent(); 821 if (CondCmp->use_empty()) 822 CondCmp->eraseFromParent(); 823 else if (CondCmp->getParent() == BB) { 824 // If the fact we just learned is true for all uses of the 825 // condition, replace it with a constant value 826 auto *CI = Ret == LazyValueInfo::True ? 827 ConstantInt::getTrue(CondCmp->getType()) : 828 ConstantInt::getFalse(CondCmp->getType()); 829 CondCmp->replaceAllUsesWith(CI); 830 CondCmp->eraseFromParent(); 831 } 832 return true; 833 } 834 } 835 836 if (CondBr && CondConst && TryToUnfoldSelect(CondCmp, BB)) 837 return true; 838 } 839 840 // Check for some cases that are worth simplifying. Right now we want to look 841 // for loads that are used by a switch or by the condition for the branch. If 842 // we see one, check to see if it's partially redundant. If so, insert a PHI 843 // which can then be used to thread the values. 844 // 845 Value *SimplifyValue = CondInst; 846 if (CmpInst *CondCmp = dyn_cast<CmpInst>(SimplifyValue)) 847 if (isa<Constant>(CondCmp->getOperand(1))) 848 SimplifyValue = CondCmp->getOperand(0); 849 850 // TODO: There are other places where load PRE would be profitable, such as 851 // more complex comparisons. 852 if (LoadInst *LI = dyn_cast<LoadInst>(SimplifyValue)) 853 if (SimplifyPartiallyRedundantLoad(LI)) 854 return true; 855 856 857 // Handle a variety of cases where we are branching on something derived from 858 // a PHI node in the current block. If we can prove that any predecessors 859 // compute a predictable value based on a PHI node, thread those predecessors. 860 // 861 if (ProcessThreadableEdges(CondInst, BB, Preference, Terminator)) 862 return true; 863 864 // If this is an otherwise-unfoldable branch on a phi node in the current 865 // block, see if we can simplify. 866 if (PHINode *PN = dyn_cast<PHINode>(CondInst)) 867 if (PN->getParent() == BB && isa<BranchInst>(BB->getTerminator())) 868 return ProcessBranchOnPHI(PN); 869 870 871 // If this is an otherwise-unfoldable branch on a XOR, see if we can simplify. 872 if (CondInst->getOpcode() == Instruction::Xor && 873 CondInst->getParent() == BB && isa<BranchInst>(BB->getTerminator())) 874 return ProcessBranchOnXOR(cast<BinaryOperator>(CondInst)); 875 876 // Search for a stronger dominating condition that can be used to simplify a 877 // conditional branch leaving BB. 878 if (ProcessImpliedCondition(BB)) 879 return true; 880 881 return false; 882 } 883 884 bool JumpThreadingPass::ProcessImpliedCondition(BasicBlock *BB) { 885 auto *BI = dyn_cast<BranchInst>(BB->getTerminator()); 886 if (!BI || !BI->isConditional()) 887 return false; 888 889 Value *Cond = BI->getCondition(); 890 BasicBlock *CurrentBB = BB; 891 BasicBlock *CurrentPred = BB->getSinglePredecessor(); 892 unsigned Iter = 0; 893 894 auto &DL = BB->getModule()->getDataLayout(); 895 896 while (CurrentPred && Iter++ < ImplicationSearchThreshold) { 897 auto *PBI = dyn_cast<BranchInst>(CurrentPred->getTerminator()); 898 if (!PBI || !PBI->isConditional()) 899 return false; 900 if (PBI->getSuccessor(0) != CurrentBB && PBI->getSuccessor(1) != CurrentBB) 901 return false; 902 903 bool FalseDest = PBI->getSuccessor(1) == CurrentBB; 904 Optional<bool> Implication = 905 isImpliedCondition(PBI->getCondition(), Cond, DL, FalseDest); 906 if (Implication) { 907 BI->getSuccessor(*Implication ? 1 : 0)->removePredecessor(BB); 908 BranchInst::Create(BI->getSuccessor(*Implication ? 0 : 1), BI); 909 BI->eraseFromParent(); 910 return true; 911 } 912 CurrentBB = CurrentPred; 913 CurrentPred = CurrentBB->getSinglePredecessor(); 914 } 915 916 return false; 917 } 918 919 /// SimplifyPartiallyRedundantLoad - If LI is an obviously partially redundant 920 /// load instruction, eliminate it by replacing it with a PHI node. This is an 921 /// important optimization that encourages jump threading, and needs to be run 922 /// interlaced with other jump threading tasks. 923 bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LI) { 924 // Don't hack volatile and ordered loads. 925 if (!LI->isUnordered()) return false; 926 927 // If the load is defined in a block with exactly one predecessor, it can't be 928 // partially redundant. 929 BasicBlock *LoadBB = LI->getParent(); 930 if (LoadBB->getSinglePredecessor()) 931 return false; 932 933 // If the load is defined in an EH pad, it can't be partially redundant, 934 // because the edges between the invoke and the EH pad cannot have other 935 // instructions between them. 936 if (LoadBB->isEHPad()) 937 return false; 938 939 Value *LoadedPtr = LI->getOperand(0); 940 941 // If the loaded operand is defined in the LoadBB, it can't be available. 942 // TODO: Could do simple PHI translation, that would be fun :) 943 if (Instruction *PtrOp = dyn_cast<Instruction>(LoadedPtr)) 944 if (PtrOp->getParent() == LoadBB) 945 return false; 946 947 // Scan a few instructions up from the load, to see if it is obviously live at 948 // the entry to its block. 949 BasicBlock::iterator BBIt(LI); 950 bool IsLoadCSE; 951 if (Value *AvailableVal = 952 FindAvailableLoadedValue(LI, LoadBB, BBIt, DefMaxInstsToScan, nullptr, &IsLoadCSE)) { 953 // If the value of the load is locally available within the block, just use 954 // it. This frequently occurs for reg2mem'd allocas. 955 956 if (IsLoadCSE) { 957 LoadInst *NLI = cast<LoadInst>(AvailableVal); 958 combineMetadataForCSE(NLI, LI); 959 }; 960 961 // If the returned value is the load itself, replace with an undef. This can 962 // only happen in dead loops. 963 if (AvailableVal == LI) AvailableVal = UndefValue::get(LI->getType()); 964 if (AvailableVal->getType() != LI->getType()) 965 AvailableVal = 966 CastInst::CreateBitOrPointerCast(AvailableVal, LI->getType(), "", LI); 967 LI->replaceAllUsesWith(AvailableVal); 968 LI->eraseFromParent(); 969 return true; 970 } 971 972 // Otherwise, if we scanned the whole block and got to the top of the block, 973 // we know the block is locally transparent to the load. If not, something 974 // might clobber its value. 975 if (BBIt != LoadBB->begin()) 976 return false; 977 978 // If all of the loads and stores that feed the value have the same AA tags, 979 // then we can propagate them onto any newly inserted loads. 980 AAMDNodes AATags; 981 LI->getAAMetadata(AATags); 982 983 SmallPtrSet<BasicBlock*, 8> PredsScanned; 984 typedef SmallVector<std::pair<BasicBlock*, Value*>, 8> AvailablePredsTy; 985 AvailablePredsTy AvailablePreds; 986 BasicBlock *OneUnavailablePred = nullptr; 987 SmallVector<LoadInst*, 8> CSELoads; 988 989 // If we got here, the loaded value is transparent through to the start of the 990 // block. Check to see if it is available in any of the predecessor blocks. 991 for (BasicBlock *PredBB : predecessors(LoadBB)) { 992 // If we already scanned this predecessor, skip it. 993 if (!PredsScanned.insert(PredBB).second) 994 continue; 995 996 // Scan the predecessor to see if the value is available in the pred. 997 BBIt = PredBB->end(); 998 Value *PredAvailable = FindAvailableLoadedValue(LI, PredBB, BBIt, 999 DefMaxInstsToScan, 1000 nullptr, 1001 &IsLoadCSE); 1002 if (!PredAvailable) { 1003 OneUnavailablePred = PredBB; 1004 continue; 1005 } 1006 1007 if (IsLoadCSE) 1008 CSELoads.push_back(cast<LoadInst>(PredAvailable)); 1009 1010 // If so, this load is partially redundant. Remember this info so that we 1011 // can create a PHI node. 1012 AvailablePreds.push_back(std::make_pair(PredBB, PredAvailable)); 1013 } 1014 1015 // If the loaded value isn't available in any predecessor, it isn't partially 1016 // redundant. 1017 if (AvailablePreds.empty()) return false; 1018 1019 // Okay, the loaded value is available in at least one (and maybe all!) 1020 // predecessors. If the value is unavailable in more than one unique 1021 // predecessor, we want to insert a merge block for those common predecessors. 1022 // This ensures that we only have to insert one reload, thus not increasing 1023 // code size. 1024 BasicBlock *UnavailablePred = nullptr; 1025 1026 // If there is exactly one predecessor where the value is unavailable, the 1027 // already computed 'OneUnavailablePred' block is it. If it ends in an 1028 // unconditional branch, we know that it isn't a critical edge. 1029 if (PredsScanned.size() == AvailablePreds.size()+1 && 1030 OneUnavailablePred->getTerminator()->getNumSuccessors() == 1) { 1031 UnavailablePred = OneUnavailablePred; 1032 } else if (PredsScanned.size() != AvailablePreds.size()) { 1033 // Otherwise, we had multiple unavailable predecessors or we had a critical 1034 // edge from the one. 1035 SmallVector<BasicBlock*, 8> PredsToSplit; 1036 SmallPtrSet<BasicBlock*, 8> AvailablePredSet; 1037 1038 for (const auto &AvailablePred : AvailablePreds) 1039 AvailablePredSet.insert(AvailablePred.first); 1040 1041 // Add all the unavailable predecessors to the PredsToSplit list. 1042 for (BasicBlock *P : predecessors(LoadBB)) { 1043 // If the predecessor is an indirect goto, we can't split the edge. 1044 if (isa<IndirectBrInst>(P->getTerminator())) 1045 return false; 1046 1047 if (!AvailablePredSet.count(P)) 1048 PredsToSplit.push_back(P); 1049 } 1050 1051 // Split them out to their own block. 1052 UnavailablePred = SplitBlockPreds(LoadBB, PredsToSplit, "thread-pre-split"); 1053 } 1054 1055 // If the value isn't available in all predecessors, then there will be 1056 // exactly one where it isn't available. Insert a load on that edge and add 1057 // it to the AvailablePreds list. 1058 if (UnavailablePred) { 1059 assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 && 1060 "Can't handle critical edge here!"); 1061 LoadInst *NewVal = 1062 new LoadInst(LoadedPtr, LI->getName() + ".pr", false, 1063 LI->getAlignment(), LI->getOrdering(), LI->getSynchScope(), 1064 UnavailablePred->getTerminator()); 1065 NewVal->setDebugLoc(LI->getDebugLoc()); 1066 if (AATags) 1067 NewVal->setAAMetadata(AATags); 1068 1069 AvailablePreds.push_back(std::make_pair(UnavailablePred, NewVal)); 1070 } 1071 1072 // Now we know that each predecessor of this block has a value in 1073 // AvailablePreds, sort them for efficient access as we're walking the preds. 1074 array_pod_sort(AvailablePreds.begin(), AvailablePreds.end()); 1075 1076 // Create a PHI node at the start of the block for the PRE'd load value. 1077 pred_iterator PB = pred_begin(LoadBB), PE = pred_end(LoadBB); 1078 PHINode *PN = PHINode::Create(LI->getType(), std::distance(PB, PE), "", 1079 &LoadBB->front()); 1080 PN->takeName(LI); 1081 PN->setDebugLoc(LI->getDebugLoc()); 1082 1083 // Insert new entries into the PHI for each predecessor. A single block may 1084 // have multiple entries here. 1085 for (pred_iterator PI = PB; PI != PE; ++PI) { 1086 BasicBlock *P = *PI; 1087 AvailablePredsTy::iterator I = 1088 std::lower_bound(AvailablePreds.begin(), AvailablePreds.end(), 1089 std::make_pair(P, (Value*)nullptr)); 1090 1091 assert(I != AvailablePreds.end() && I->first == P && 1092 "Didn't find entry for predecessor!"); 1093 1094 // If we have an available predecessor but it requires casting, insert the 1095 // cast in the predecessor and use the cast. Note that we have to update the 1096 // AvailablePreds vector as we go so that all of the PHI entries for this 1097 // predecessor use the same bitcast. 1098 Value *&PredV = I->second; 1099 if (PredV->getType() != LI->getType()) 1100 PredV = CastInst::CreateBitOrPointerCast(PredV, LI->getType(), "", 1101 P->getTerminator()); 1102 1103 PN->addIncoming(PredV, I->first); 1104 } 1105 1106 for (LoadInst *PredLI : CSELoads) { 1107 combineMetadataForCSE(PredLI, LI); 1108 } 1109 1110 LI->replaceAllUsesWith(PN); 1111 LI->eraseFromParent(); 1112 1113 return true; 1114 } 1115 1116 /// FindMostPopularDest - The specified list contains multiple possible 1117 /// threadable destinations. Pick the one that occurs the most frequently in 1118 /// the list. 1119 static BasicBlock * 1120 FindMostPopularDest(BasicBlock *BB, 1121 const SmallVectorImpl<std::pair<BasicBlock*, 1122 BasicBlock*> > &PredToDestList) { 1123 assert(!PredToDestList.empty()); 1124 1125 // Determine popularity. If there are multiple possible destinations, we 1126 // explicitly choose to ignore 'undef' destinations. We prefer to thread 1127 // blocks with known and real destinations to threading undef. We'll handle 1128 // them later if interesting. 1129 DenseMap<BasicBlock*, unsigned> DestPopularity; 1130 for (const auto &PredToDest : PredToDestList) 1131 if (PredToDest.second) 1132 DestPopularity[PredToDest.second]++; 1133 1134 // Find the most popular dest. 1135 DenseMap<BasicBlock*, unsigned>::iterator DPI = DestPopularity.begin(); 1136 BasicBlock *MostPopularDest = DPI->first; 1137 unsigned Popularity = DPI->second; 1138 SmallVector<BasicBlock*, 4> SamePopularity; 1139 1140 for (++DPI; DPI != DestPopularity.end(); ++DPI) { 1141 // If the popularity of this entry isn't higher than the popularity we've 1142 // seen so far, ignore it. 1143 if (DPI->second < Popularity) 1144 ; // ignore. 1145 else if (DPI->second == Popularity) { 1146 // If it is the same as what we've seen so far, keep track of it. 1147 SamePopularity.push_back(DPI->first); 1148 } else { 1149 // If it is more popular, remember it. 1150 SamePopularity.clear(); 1151 MostPopularDest = DPI->first; 1152 Popularity = DPI->second; 1153 } 1154 } 1155 1156 // Okay, now we know the most popular destination. If there is more than one 1157 // destination, we need to determine one. This is arbitrary, but we need 1158 // to make a deterministic decision. Pick the first one that appears in the 1159 // successor list. 1160 if (!SamePopularity.empty()) { 1161 SamePopularity.push_back(MostPopularDest); 1162 TerminatorInst *TI = BB->getTerminator(); 1163 for (unsigned i = 0; ; ++i) { 1164 assert(i != TI->getNumSuccessors() && "Didn't find any successor!"); 1165 1166 if (!is_contained(SamePopularity, TI->getSuccessor(i))) 1167 continue; 1168 1169 MostPopularDest = TI->getSuccessor(i); 1170 break; 1171 } 1172 } 1173 1174 // Okay, we have finally picked the most popular destination. 1175 return MostPopularDest; 1176 } 1177 1178 bool JumpThreadingPass::ProcessThreadableEdges(Value *Cond, BasicBlock *BB, 1179 ConstantPreference Preference, 1180 Instruction *CxtI) { 1181 // If threading this would thread across a loop header, don't even try to 1182 // thread the edge. 1183 if (LoopHeaders.count(BB)) 1184 return false; 1185 1186 PredValueInfoTy PredValues; 1187 if (!ComputeValueKnownInPredecessors(Cond, BB, PredValues, Preference, CxtI)) 1188 return false; 1189 1190 assert(!PredValues.empty() && 1191 "ComputeValueKnownInPredecessors returned true with no values"); 1192 1193 DEBUG(dbgs() << "IN BB: " << *BB; 1194 for (const auto &PredValue : PredValues) { 1195 dbgs() << " BB '" << BB->getName() << "': FOUND condition = " 1196 << *PredValue.first 1197 << " for pred '" << PredValue.second->getName() << "'.\n"; 1198 }); 1199 1200 // Decide what we want to thread through. Convert our list of known values to 1201 // a list of known destinations for each pred. This also discards duplicate 1202 // predecessors and keeps track of the undefined inputs (which are represented 1203 // as a null dest in the PredToDestList). 1204 SmallPtrSet<BasicBlock*, 16> SeenPreds; 1205 SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList; 1206 1207 BasicBlock *OnlyDest = nullptr; 1208 BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL; 1209 1210 for (const auto &PredValue : PredValues) { 1211 BasicBlock *Pred = PredValue.second; 1212 if (!SeenPreds.insert(Pred).second) 1213 continue; // Duplicate predecessor entry. 1214 1215 // If the predecessor ends with an indirect goto, we can't change its 1216 // destination. 1217 if (isa<IndirectBrInst>(Pred->getTerminator())) 1218 continue; 1219 1220 Constant *Val = PredValue.first; 1221 1222 BasicBlock *DestBB; 1223 if (isa<UndefValue>(Val)) 1224 DestBB = nullptr; 1225 else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) 1226 DestBB = BI->getSuccessor(cast<ConstantInt>(Val)->isZero()); 1227 else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) { 1228 DestBB = SI->findCaseValue(cast<ConstantInt>(Val)).getCaseSuccessor(); 1229 } else { 1230 assert(isa<IndirectBrInst>(BB->getTerminator()) 1231 && "Unexpected terminator"); 1232 DestBB = cast<BlockAddress>(Val)->getBasicBlock(); 1233 } 1234 1235 // If we have exactly one destination, remember it for efficiency below. 1236 if (PredToDestList.empty()) 1237 OnlyDest = DestBB; 1238 else if (OnlyDest != DestBB) 1239 OnlyDest = MultipleDestSentinel; 1240 1241 PredToDestList.push_back(std::make_pair(Pred, DestBB)); 1242 } 1243 1244 // If all edges were unthreadable, we fail. 1245 if (PredToDestList.empty()) 1246 return false; 1247 1248 // Determine which is the most common successor. If we have many inputs and 1249 // this block is a switch, we want to start by threading the batch that goes 1250 // to the most popular destination first. If we only know about one 1251 // threadable destination (the common case) we can avoid this. 1252 BasicBlock *MostPopularDest = OnlyDest; 1253 1254 if (MostPopularDest == MultipleDestSentinel) 1255 MostPopularDest = FindMostPopularDest(BB, PredToDestList); 1256 1257 // Now that we know what the most popular destination is, factor all 1258 // predecessors that will jump to it into a single predecessor. 1259 SmallVector<BasicBlock*, 16> PredsToFactor; 1260 for (const auto &PredToDest : PredToDestList) 1261 if (PredToDest.second == MostPopularDest) { 1262 BasicBlock *Pred = PredToDest.first; 1263 1264 // This predecessor may be a switch or something else that has multiple 1265 // edges to the block. Factor each of these edges by listing them 1266 // according to # occurrences in PredsToFactor. 1267 for (BasicBlock *Succ : successors(Pred)) 1268 if (Succ == BB) 1269 PredsToFactor.push_back(Pred); 1270 } 1271 1272 // If the threadable edges are branching on an undefined value, we get to pick 1273 // the destination that these predecessors should get to. 1274 if (!MostPopularDest) 1275 MostPopularDest = BB->getTerminator()-> 1276 getSuccessor(GetBestDestForJumpOnUndef(BB)); 1277 1278 // Ok, try to thread it! 1279 return ThreadEdge(BB, PredsToFactor, MostPopularDest); 1280 } 1281 1282 /// ProcessBranchOnPHI - We have an otherwise unthreadable conditional branch on 1283 /// a PHI node in the current block. See if there are any simplifications we 1284 /// can do based on inputs to the phi node. 1285 /// 1286 bool JumpThreadingPass::ProcessBranchOnPHI(PHINode *PN) { 1287 BasicBlock *BB = PN->getParent(); 1288 1289 // TODO: We could make use of this to do it once for blocks with common PHI 1290 // values. 1291 SmallVector<BasicBlock*, 1> PredBBs; 1292 PredBBs.resize(1); 1293 1294 // If any of the predecessor blocks end in an unconditional branch, we can 1295 // *duplicate* the conditional branch into that block in order to further 1296 // encourage jump threading and to eliminate cases where we have branch on a 1297 // phi of an icmp (branch on icmp is much better). 1298 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1299 BasicBlock *PredBB = PN->getIncomingBlock(i); 1300 if (BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator())) 1301 if (PredBr->isUnconditional()) { 1302 PredBBs[0] = PredBB; 1303 // Try to duplicate BB into PredBB. 1304 if (DuplicateCondBranchOnPHIIntoPred(BB, PredBBs)) 1305 return true; 1306 } 1307 } 1308 1309 return false; 1310 } 1311 1312 /// ProcessBranchOnXOR - We have an otherwise unthreadable conditional branch on 1313 /// a xor instruction in the current block. See if there are any 1314 /// simplifications we can do based on inputs to the xor. 1315 /// 1316 bool JumpThreadingPass::ProcessBranchOnXOR(BinaryOperator *BO) { 1317 BasicBlock *BB = BO->getParent(); 1318 1319 // If either the LHS or RHS of the xor is a constant, don't do this 1320 // optimization. 1321 if (isa<ConstantInt>(BO->getOperand(0)) || 1322 isa<ConstantInt>(BO->getOperand(1))) 1323 return false; 1324 1325 // If the first instruction in BB isn't a phi, we won't be able to infer 1326 // anything special about any particular predecessor. 1327 if (!isa<PHINode>(BB->front())) 1328 return false; 1329 1330 // If this BB is a landing pad, we won't be able to split the edge into it. 1331 if (BB->isEHPad()) 1332 return false; 1333 1334 // If we have a xor as the branch input to this block, and we know that the 1335 // LHS or RHS of the xor in any predecessor is true/false, then we can clone 1336 // the condition into the predecessor and fix that value to true, saving some 1337 // logical ops on that path and encouraging other paths to simplify. 1338 // 1339 // This copies something like this: 1340 // 1341 // BB: 1342 // %X = phi i1 [1], [%X'] 1343 // %Y = icmp eq i32 %A, %B 1344 // %Z = xor i1 %X, %Y 1345 // br i1 %Z, ... 1346 // 1347 // Into: 1348 // BB': 1349 // %Y = icmp ne i32 %A, %B 1350 // br i1 %Y, ... 1351 1352 PredValueInfoTy XorOpValues; 1353 bool isLHS = true; 1354 if (!ComputeValueKnownInPredecessors(BO->getOperand(0), BB, XorOpValues, 1355 WantInteger, BO)) { 1356 assert(XorOpValues.empty()); 1357 if (!ComputeValueKnownInPredecessors(BO->getOperand(1), BB, XorOpValues, 1358 WantInteger, BO)) 1359 return false; 1360 isLHS = false; 1361 } 1362 1363 assert(!XorOpValues.empty() && 1364 "ComputeValueKnownInPredecessors returned true with no values"); 1365 1366 // Scan the information to see which is most popular: true or false. The 1367 // predecessors can be of the set true, false, or undef. 1368 unsigned NumTrue = 0, NumFalse = 0; 1369 for (const auto &XorOpValue : XorOpValues) { 1370 if (isa<UndefValue>(XorOpValue.first)) 1371 // Ignore undefs for the count. 1372 continue; 1373 if (cast<ConstantInt>(XorOpValue.first)->isZero()) 1374 ++NumFalse; 1375 else 1376 ++NumTrue; 1377 } 1378 1379 // Determine which value to split on, true, false, or undef if neither. 1380 ConstantInt *SplitVal = nullptr; 1381 if (NumTrue > NumFalse) 1382 SplitVal = ConstantInt::getTrue(BB->getContext()); 1383 else if (NumTrue != 0 || NumFalse != 0) 1384 SplitVal = ConstantInt::getFalse(BB->getContext()); 1385 1386 // Collect all of the blocks that this can be folded into so that we can 1387 // factor this once and clone it once. 1388 SmallVector<BasicBlock*, 8> BlocksToFoldInto; 1389 for (const auto &XorOpValue : XorOpValues) { 1390 if (XorOpValue.first != SplitVal && !isa<UndefValue>(XorOpValue.first)) 1391 continue; 1392 1393 BlocksToFoldInto.push_back(XorOpValue.second); 1394 } 1395 1396 // If we inferred a value for all of the predecessors, then duplication won't 1397 // help us. However, we can just replace the LHS or RHS with the constant. 1398 if (BlocksToFoldInto.size() == 1399 cast<PHINode>(BB->front()).getNumIncomingValues()) { 1400 if (!SplitVal) { 1401 // If all preds provide undef, just nuke the xor, because it is undef too. 1402 BO->replaceAllUsesWith(UndefValue::get(BO->getType())); 1403 BO->eraseFromParent(); 1404 } else if (SplitVal->isZero()) { 1405 // If all preds provide 0, replace the xor with the other input. 1406 BO->replaceAllUsesWith(BO->getOperand(isLHS)); 1407 BO->eraseFromParent(); 1408 } else { 1409 // If all preds provide 1, set the computed value to 1. 1410 BO->setOperand(!isLHS, SplitVal); 1411 } 1412 1413 return true; 1414 } 1415 1416 // Try to duplicate BB into PredBB. 1417 return DuplicateCondBranchOnPHIIntoPred(BB, BlocksToFoldInto); 1418 } 1419 1420 1421 /// AddPHINodeEntriesForMappedBlock - We're adding 'NewPred' as a new 1422 /// predecessor to the PHIBB block. If it has PHI nodes, add entries for 1423 /// NewPred using the entries from OldPred (suitably mapped). 1424 static void AddPHINodeEntriesForMappedBlock(BasicBlock *PHIBB, 1425 BasicBlock *OldPred, 1426 BasicBlock *NewPred, 1427 DenseMap<Instruction*, Value*> &ValueMap) { 1428 for (BasicBlock::iterator PNI = PHIBB->begin(); 1429 PHINode *PN = dyn_cast<PHINode>(PNI); ++PNI) { 1430 // Ok, we have a PHI node. Figure out what the incoming value was for the 1431 // DestBlock. 1432 Value *IV = PN->getIncomingValueForBlock(OldPred); 1433 1434 // Remap the value if necessary. 1435 if (Instruction *Inst = dyn_cast<Instruction>(IV)) { 1436 DenseMap<Instruction*, Value*>::iterator I = ValueMap.find(Inst); 1437 if (I != ValueMap.end()) 1438 IV = I->second; 1439 } 1440 1441 PN->addIncoming(IV, NewPred); 1442 } 1443 } 1444 1445 /// ThreadEdge - We have decided that it is safe and profitable to factor the 1446 /// blocks in PredBBs to one predecessor, then thread an edge from it to SuccBB 1447 /// across BB. Transform the IR to reflect this change. 1448 bool JumpThreadingPass::ThreadEdge(BasicBlock *BB, 1449 const SmallVectorImpl<BasicBlock *> &PredBBs, 1450 BasicBlock *SuccBB) { 1451 // If threading to the same block as we come from, we would infinite loop. 1452 if (SuccBB == BB) { 1453 DEBUG(dbgs() << " Not threading across BB '" << BB->getName() 1454 << "' - would thread to self!\n"); 1455 return false; 1456 } 1457 1458 // If threading this would thread across a loop header, don't thread the edge. 1459 // See the comments above FindLoopHeaders for justifications and caveats. 1460 if (LoopHeaders.count(BB)) { 1461 DEBUG(dbgs() << " Not threading across loop header BB '" << BB->getName() 1462 << "' to dest BB '" << SuccBB->getName() 1463 << "' - it might create an irreducible loop!\n"); 1464 return false; 1465 } 1466 1467 unsigned JumpThreadCost = getJumpThreadDuplicationCost(BB, BBDupThreshold); 1468 if (JumpThreadCost > BBDupThreshold) { 1469 DEBUG(dbgs() << " Not threading BB '" << BB->getName() 1470 << "' - Cost is too high: " << JumpThreadCost << "\n"); 1471 return false; 1472 } 1473 1474 // And finally, do it! Start by factoring the predecessors if needed. 1475 BasicBlock *PredBB; 1476 if (PredBBs.size() == 1) 1477 PredBB = PredBBs[0]; 1478 else { 1479 DEBUG(dbgs() << " Factoring out " << PredBBs.size() 1480 << " common predecessors.\n"); 1481 PredBB = SplitBlockPreds(BB, PredBBs, ".thr_comm"); 1482 } 1483 1484 // And finally, do it! 1485 DEBUG(dbgs() << " Threading edge from '" << PredBB->getName() << "' to '" 1486 << SuccBB->getName() << "' with cost: " << JumpThreadCost 1487 << ", across block:\n " 1488 << *BB << "\n"); 1489 1490 LVI->threadEdge(PredBB, BB, SuccBB); 1491 1492 // We are going to have to map operands from the original BB block to the new 1493 // copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to 1494 // account for entry from PredBB. 1495 DenseMap<Instruction*, Value*> ValueMapping; 1496 1497 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), 1498 BB->getName()+".thread", 1499 BB->getParent(), BB); 1500 NewBB->moveAfter(PredBB); 1501 1502 // Set the block frequency of NewBB. 1503 if (HasProfileData) { 1504 auto NewBBFreq = 1505 BFI->getBlockFreq(PredBB) * BPI->getEdgeProbability(PredBB, BB); 1506 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency()); 1507 } 1508 1509 BasicBlock::iterator BI = BB->begin(); 1510 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 1511 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); 1512 1513 // Clone the non-phi instructions of BB into NewBB, keeping track of the 1514 // mapping and using it to remap operands in the cloned instructions. 1515 for (; !isa<TerminatorInst>(BI); ++BI) { 1516 Instruction *New = BI->clone(); 1517 New->setName(BI->getName()); 1518 NewBB->getInstList().push_back(New); 1519 ValueMapping[&*BI] = New; 1520 1521 // Remap operands to patch up intra-block references. 1522 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 1523 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 1524 DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst); 1525 if (I != ValueMapping.end()) 1526 New->setOperand(i, I->second); 1527 } 1528 } 1529 1530 // We didn't copy the terminator from BB over to NewBB, because there is now 1531 // an unconditional jump to SuccBB. Insert the unconditional jump. 1532 BranchInst *NewBI = BranchInst::Create(SuccBB, NewBB); 1533 NewBI->setDebugLoc(BB->getTerminator()->getDebugLoc()); 1534 1535 // Check to see if SuccBB has PHI nodes. If so, we need to add entries to the 1536 // PHI nodes for NewBB now. 1537 AddPHINodeEntriesForMappedBlock(SuccBB, BB, NewBB, ValueMapping); 1538 1539 // If there were values defined in BB that are used outside the block, then we 1540 // now have to update all uses of the value to use either the original value, 1541 // the cloned value, or some PHI derived value. This can require arbitrary 1542 // PHI insertion, of which we are prepared to do, clean these up now. 1543 SSAUpdater SSAUpdate; 1544 SmallVector<Use*, 16> UsesToRename; 1545 for (Instruction &I : *BB) { 1546 // Scan all uses of this instruction to see if it is used outside of its 1547 // block, and if so, record them in UsesToRename. 1548 for (Use &U : I.uses()) { 1549 Instruction *User = cast<Instruction>(U.getUser()); 1550 if (PHINode *UserPN = dyn_cast<PHINode>(User)) { 1551 if (UserPN->getIncomingBlock(U) == BB) 1552 continue; 1553 } else if (User->getParent() == BB) 1554 continue; 1555 1556 UsesToRename.push_back(&U); 1557 } 1558 1559 // If there are no uses outside the block, we're done with this instruction. 1560 if (UsesToRename.empty()) 1561 continue; 1562 1563 DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n"); 1564 1565 // We found a use of I outside of BB. Rename all uses of I that are outside 1566 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks 1567 // with the two values we know. 1568 SSAUpdate.Initialize(I.getType(), I.getName()); 1569 SSAUpdate.AddAvailableValue(BB, &I); 1570 SSAUpdate.AddAvailableValue(NewBB, ValueMapping[&I]); 1571 1572 while (!UsesToRename.empty()) 1573 SSAUpdate.RewriteUse(*UsesToRename.pop_back_val()); 1574 DEBUG(dbgs() << "\n"); 1575 } 1576 1577 1578 // Ok, NewBB is good to go. Update the terminator of PredBB to jump to 1579 // NewBB instead of BB. This eliminates predecessors from BB, which requires 1580 // us to simplify any PHI nodes in BB. 1581 TerminatorInst *PredTerm = PredBB->getTerminator(); 1582 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) 1583 if (PredTerm->getSuccessor(i) == BB) { 1584 BB->removePredecessor(PredBB, true); 1585 PredTerm->setSuccessor(i, NewBB); 1586 } 1587 1588 // At this point, the IR is fully up to date and consistent. Do a quick scan 1589 // over the new instructions and zap any that are constants or dead. This 1590 // frequently happens because of phi translation. 1591 SimplifyInstructionsInBlock(NewBB, TLI); 1592 1593 // Update the edge weight from BB to SuccBB, which should be less than before. 1594 UpdateBlockFreqAndEdgeWeight(PredBB, BB, NewBB, SuccBB); 1595 1596 // Threaded an edge! 1597 ++NumThreads; 1598 return true; 1599 } 1600 1601 /// Create a new basic block that will be the predecessor of BB and successor of 1602 /// all blocks in Preds. When profile data is available, update the frequency of 1603 /// this new block. 1604 BasicBlock *JumpThreadingPass::SplitBlockPreds(BasicBlock *BB, 1605 ArrayRef<BasicBlock *> Preds, 1606 const char *Suffix) { 1607 // Collect the frequencies of all predecessors of BB, which will be used to 1608 // update the edge weight on BB->SuccBB. 1609 BlockFrequency PredBBFreq(0); 1610 if (HasProfileData) 1611 for (auto Pred : Preds) 1612 PredBBFreq += BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, BB); 1613 1614 BasicBlock *PredBB = SplitBlockPredecessors(BB, Preds, Suffix); 1615 1616 // Set the block frequency of the newly created PredBB, which is the sum of 1617 // frequencies of Preds. 1618 if (HasProfileData) 1619 BFI->setBlockFreq(PredBB, PredBBFreq.getFrequency()); 1620 return PredBB; 1621 } 1622 1623 bool JumpThreadingPass::doesBlockHaveProfileData(BasicBlock *BB) { 1624 const TerminatorInst *TI = BB->getTerminator(); 1625 assert(TI->getNumSuccessors() > 1 && "not a split"); 1626 1627 MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof); 1628 if (!WeightsNode) 1629 return false; 1630 1631 MDString *MDName = cast<MDString>(WeightsNode->getOperand(0)); 1632 if (MDName->getString() != "branch_weights") 1633 return false; 1634 1635 // Ensure there are weights for all of the successors. Note that the first 1636 // operand to the metadata node is a name, not a weight. 1637 return WeightsNode->getNumOperands() == TI->getNumSuccessors() + 1; 1638 } 1639 1640 /// Update the block frequency of BB and branch weight and the metadata on the 1641 /// edge BB->SuccBB. This is done by scaling the weight of BB->SuccBB by 1 - 1642 /// Freq(PredBB->BB) / Freq(BB->SuccBB). 1643 void JumpThreadingPass::UpdateBlockFreqAndEdgeWeight(BasicBlock *PredBB, 1644 BasicBlock *BB, 1645 BasicBlock *NewBB, 1646 BasicBlock *SuccBB) { 1647 if (!HasProfileData) 1648 return; 1649 1650 assert(BFI && BPI && "BFI & BPI should have been created here"); 1651 1652 // As the edge from PredBB to BB is deleted, we have to update the block 1653 // frequency of BB. 1654 auto BBOrigFreq = BFI->getBlockFreq(BB); 1655 auto NewBBFreq = BFI->getBlockFreq(NewBB); 1656 auto BB2SuccBBFreq = BBOrigFreq * BPI->getEdgeProbability(BB, SuccBB); 1657 auto BBNewFreq = BBOrigFreq - NewBBFreq; 1658 BFI->setBlockFreq(BB, BBNewFreq.getFrequency()); 1659 1660 // Collect updated outgoing edges' frequencies from BB and use them to update 1661 // edge probabilities. 1662 SmallVector<uint64_t, 4> BBSuccFreq; 1663 for (BasicBlock *Succ : successors(BB)) { 1664 auto SuccFreq = (Succ == SuccBB) 1665 ? BB2SuccBBFreq - NewBBFreq 1666 : BBOrigFreq * BPI->getEdgeProbability(BB, Succ); 1667 BBSuccFreq.push_back(SuccFreq.getFrequency()); 1668 } 1669 1670 uint64_t MaxBBSuccFreq = 1671 *std::max_element(BBSuccFreq.begin(), BBSuccFreq.end()); 1672 1673 SmallVector<BranchProbability, 4> BBSuccProbs; 1674 if (MaxBBSuccFreq == 0) 1675 BBSuccProbs.assign(BBSuccFreq.size(), 1676 {1, static_cast<unsigned>(BBSuccFreq.size())}); 1677 else { 1678 for (uint64_t Freq : BBSuccFreq) 1679 BBSuccProbs.push_back( 1680 BranchProbability::getBranchProbability(Freq, MaxBBSuccFreq)); 1681 // Normalize edge probabilities so that they sum up to one. 1682 BranchProbability::normalizeProbabilities(BBSuccProbs.begin(), 1683 BBSuccProbs.end()); 1684 } 1685 1686 // Update edge probabilities in BPI. 1687 for (int I = 0, E = BBSuccProbs.size(); I < E; I++) 1688 BPI->setEdgeProbability(BB, I, BBSuccProbs[I]); 1689 1690 // Update the profile metadata as well. 1691 // 1692 // Don't do this if the profile of the transformed blocks was statically 1693 // estimated. (This could occur despite the function having an entry 1694 // frequency in completely cold parts of the CFG.) 1695 // 1696 // In this case we don't want to suggest to subsequent passes that the 1697 // calculated weights are fully consistent. Consider this graph: 1698 // 1699 // check_1 1700 // 50% / | 1701 // eq_1 | 50% 1702 // \ | 1703 // check_2 1704 // 50% / | 1705 // eq_2 | 50% 1706 // \ | 1707 // check_3 1708 // 50% / | 1709 // eq_3 | 50% 1710 // \ | 1711 // 1712 // Assuming the blocks check_* all compare the same value against 1, 2 and 3, 1713 // the overall probabilities are inconsistent; the total probability that the 1714 // value is either 1, 2 or 3 is 150%. 1715 // 1716 // As a consequence if we thread eq_1 -> check_2 to check_3, check_2->check_3 1717 // becomes 0%. This is even worse if the edge whose probability becomes 0% is 1718 // the loop exit edge. Then based solely on static estimation we would assume 1719 // the loop was extremely hot. 1720 // 1721 // FIXME this locally as well so that BPI and BFI are consistent as well. We 1722 // shouldn't make edges extremely likely or unlikely based solely on static 1723 // estimation. 1724 if (BBSuccProbs.size() >= 2 && doesBlockHaveProfileData(BB)) { 1725 SmallVector<uint32_t, 4> Weights; 1726 for (auto Prob : BBSuccProbs) 1727 Weights.push_back(Prob.getNumerator()); 1728 1729 auto TI = BB->getTerminator(); 1730 TI->setMetadata( 1731 LLVMContext::MD_prof, 1732 MDBuilder(TI->getParent()->getContext()).createBranchWeights(Weights)); 1733 } 1734 } 1735 1736 /// DuplicateCondBranchOnPHIIntoPred - PredBB contains an unconditional branch 1737 /// to BB which contains an i1 PHI node and a conditional branch on that PHI. 1738 /// If we can duplicate the contents of BB up into PredBB do so now, this 1739 /// improves the odds that the branch will be on an analyzable instruction like 1740 /// a compare. 1741 bool JumpThreadingPass::DuplicateCondBranchOnPHIIntoPred( 1742 BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs) { 1743 assert(!PredBBs.empty() && "Can't handle an empty set"); 1744 1745 // If BB is a loop header, then duplicating this block outside the loop would 1746 // cause us to transform this into an irreducible loop, don't do this. 1747 // See the comments above FindLoopHeaders for justifications and caveats. 1748 if (LoopHeaders.count(BB)) { 1749 DEBUG(dbgs() << " Not duplicating loop header '" << BB->getName() 1750 << "' into predecessor block '" << PredBBs[0]->getName() 1751 << "' - it might create an irreducible loop!\n"); 1752 return false; 1753 } 1754 1755 unsigned DuplicationCost = getJumpThreadDuplicationCost(BB, BBDupThreshold); 1756 if (DuplicationCost > BBDupThreshold) { 1757 DEBUG(dbgs() << " Not duplicating BB '" << BB->getName() 1758 << "' - Cost is too high: " << DuplicationCost << "\n"); 1759 return false; 1760 } 1761 1762 // And finally, do it! Start by factoring the predecessors if needed. 1763 BasicBlock *PredBB; 1764 if (PredBBs.size() == 1) 1765 PredBB = PredBBs[0]; 1766 else { 1767 DEBUG(dbgs() << " Factoring out " << PredBBs.size() 1768 << " common predecessors.\n"); 1769 PredBB = SplitBlockPreds(BB, PredBBs, ".thr_comm"); 1770 } 1771 1772 // Okay, we decided to do this! Clone all the instructions in BB onto the end 1773 // of PredBB. 1774 DEBUG(dbgs() << " Duplicating block '" << BB->getName() << "' into end of '" 1775 << PredBB->getName() << "' to eliminate branch on phi. Cost: " 1776 << DuplicationCost << " block is:" << *BB << "\n"); 1777 1778 // Unless PredBB ends with an unconditional branch, split the edge so that we 1779 // can just clone the bits from BB into the end of the new PredBB. 1780 BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator()); 1781 1782 if (!OldPredBranch || !OldPredBranch->isUnconditional()) { 1783 PredBB = SplitEdge(PredBB, BB); 1784 OldPredBranch = cast<BranchInst>(PredBB->getTerminator()); 1785 } 1786 1787 // We are going to have to map operands from the original BB block into the 1788 // PredBB block. Evaluate PHI nodes in BB. 1789 DenseMap<Instruction*, Value*> ValueMapping; 1790 1791 BasicBlock::iterator BI = BB->begin(); 1792 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 1793 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); 1794 // Clone the non-phi instructions of BB into PredBB, keeping track of the 1795 // mapping and using it to remap operands in the cloned instructions. 1796 for (; BI != BB->end(); ++BI) { 1797 Instruction *New = BI->clone(); 1798 1799 // Remap operands to patch up intra-block references. 1800 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 1801 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 1802 DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst); 1803 if (I != ValueMapping.end()) 1804 New->setOperand(i, I->second); 1805 } 1806 1807 // If this instruction can be simplified after the operands are updated, 1808 // just use the simplified value instead. This frequently happens due to 1809 // phi translation. 1810 if (Value *IV = 1811 SimplifyInstruction(New, BB->getModule()->getDataLayout())) { 1812 ValueMapping[&*BI] = IV; 1813 if (!New->mayHaveSideEffects()) { 1814 delete New; 1815 New = nullptr; 1816 } 1817 } else { 1818 ValueMapping[&*BI] = New; 1819 } 1820 if (New) { 1821 // Otherwise, insert the new instruction into the block. 1822 New->setName(BI->getName()); 1823 PredBB->getInstList().insert(OldPredBranch->getIterator(), New); 1824 } 1825 } 1826 1827 // Check to see if the targets of the branch had PHI nodes. If so, we need to 1828 // add entries to the PHI nodes for branch from PredBB now. 1829 BranchInst *BBBranch = cast<BranchInst>(BB->getTerminator()); 1830 AddPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(0), BB, PredBB, 1831 ValueMapping); 1832 AddPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(1), BB, PredBB, 1833 ValueMapping); 1834 1835 // If there were values defined in BB that are used outside the block, then we 1836 // now have to update all uses of the value to use either the original value, 1837 // the cloned value, or some PHI derived value. This can require arbitrary 1838 // PHI insertion, of which we are prepared to do, clean these up now. 1839 SSAUpdater SSAUpdate; 1840 SmallVector<Use*, 16> UsesToRename; 1841 for (Instruction &I : *BB) { 1842 // Scan all uses of this instruction to see if it is used outside of its 1843 // block, and if so, record them in UsesToRename. 1844 for (Use &U : I.uses()) { 1845 Instruction *User = cast<Instruction>(U.getUser()); 1846 if (PHINode *UserPN = dyn_cast<PHINode>(User)) { 1847 if (UserPN->getIncomingBlock(U) == BB) 1848 continue; 1849 } else if (User->getParent() == BB) 1850 continue; 1851 1852 UsesToRename.push_back(&U); 1853 } 1854 1855 // If there are no uses outside the block, we're done with this instruction. 1856 if (UsesToRename.empty()) 1857 continue; 1858 1859 DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n"); 1860 1861 // We found a use of I outside of BB. Rename all uses of I that are outside 1862 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks 1863 // with the two values we know. 1864 SSAUpdate.Initialize(I.getType(), I.getName()); 1865 SSAUpdate.AddAvailableValue(BB, &I); 1866 SSAUpdate.AddAvailableValue(PredBB, ValueMapping[&I]); 1867 1868 while (!UsesToRename.empty()) 1869 SSAUpdate.RewriteUse(*UsesToRename.pop_back_val()); 1870 DEBUG(dbgs() << "\n"); 1871 } 1872 1873 // PredBB no longer jumps to BB, remove entries in the PHI node for the edge 1874 // that we nuked. 1875 BB->removePredecessor(PredBB, true); 1876 1877 // Remove the unconditional branch at the end of the PredBB block. 1878 OldPredBranch->eraseFromParent(); 1879 1880 ++NumDupes; 1881 return true; 1882 } 1883 1884 /// TryToUnfoldSelect - Look for blocks of the form 1885 /// bb1: 1886 /// %a = select 1887 /// br bb 1888 /// 1889 /// bb2: 1890 /// %p = phi [%a, %bb] ... 1891 /// %c = icmp %p 1892 /// br i1 %c 1893 /// 1894 /// And expand the select into a branch structure if one of its arms allows %c 1895 /// to be folded. This later enables threading from bb1 over bb2. 1896 bool JumpThreadingPass::TryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB) { 1897 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 1898 PHINode *CondLHS = dyn_cast<PHINode>(CondCmp->getOperand(0)); 1899 Constant *CondRHS = cast<Constant>(CondCmp->getOperand(1)); 1900 1901 if (!CondBr || !CondBr->isConditional() || !CondLHS || 1902 CondLHS->getParent() != BB) 1903 return false; 1904 1905 for (unsigned I = 0, E = CondLHS->getNumIncomingValues(); I != E; ++I) { 1906 BasicBlock *Pred = CondLHS->getIncomingBlock(I); 1907 SelectInst *SI = dyn_cast<SelectInst>(CondLHS->getIncomingValue(I)); 1908 1909 // Look if one of the incoming values is a select in the corresponding 1910 // predecessor. 1911 if (!SI || SI->getParent() != Pred || !SI->hasOneUse()) 1912 continue; 1913 1914 BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator()); 1915 if (!PredTerm || !PredTerm->isUnconditional()) 1916 continue; 1917 1918 // Now check if one of the select values would allow us to constant fold the 1919 // terminator in BB. We don't do the transform if both sides fold, those 1920 // cases will be threaded in any case. 1921 LazyValueInfo::Tristate LHSFolds = 1922 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(1), 1923 CondRHS, Pred, BB, CondCmp); 1924 LazyValueInfo::Tristate RHSFolds = 1925 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(2), 1926 CondRHS, Pred, BB, CondCmp); 1927 if ((LHSFolds != LazyValueInfo::Unknown || 1928 RHSFolds != LazyValueInfo::Unknown) && 1929 LHSFolds != RHSFolds) { 1930 // Expand the select. 1931 // 1932 // Pred -- 1933 // | v 1934 // | NewBB 1935 // | | 1936 // |----- 1937 // v 1938 // BB 1939 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "select.unfold", 1940 BB->getParent(), BB); 1941 // Move the unconditional branch to NewBB. 1942 PredTerm->removeFromParent(); 1943 NewBB->getInstList().insert(NewBB->end(), PredTerm); 1944 // Create a conditional branch and update PHI nodes. 1945 BranchInst::Create(NewBB, BB, SI->getCondition(), Pred); 1946 CondLHS->setIncomingValue(I, SI->getFalseValue()); 1947 CondLHS->addIncoming(SI->getTrueValue(), NewBB); 1948 // The select is now dead. 1949 SI->eraseFromParent(); 1950 1951 // Update any other PHI nodes in BB. 1952 for (BasicBlock::iterator BI = BB->begin(); 1953 PHINode *Phi = dyn_cast<PHINode>(BI); ++BI) 1954 if (Phi != CondLHS) 1955 Phi->addIncoming(Phi->getIncomingValueForBlock(Pred), NewBB); 1956 return true; 1957 } 1958 } 1959 return false; 1960 } 1961 1962 /// TryToUnfoldSelectInCurrBB - Look for PHI/Select in the same BB of the form 1963 /// bb: 1964 /// %p = phi [false, %bb1], [true, %bb2], [false, %bb3], [true, %bb4], ... 1965 /// %s = select p, trueval, falseval 1966 /// 1967 /// And expand the select into a branch structure. This later enables 1968 /// jump-threading over bb in this pass. 1969 /// 1970 /// Using the similar approach of SimplifyCFG::FoldCondBranchOnPHI(), unfold 1971 /// select if the associated PHI has at least one constant. If the unfolded 1972 /// select is not jump-threaded, it will be folded again in the later 1973 /// optimizations. 1974 bool JumpThreadingPass::TryToUnfoldSelectInCurrBB(BasicBlock *BB) { 1975 // If threading this would thread across a loop header, don't thread the edge. 1976 // See the comments above FindLoopHeaders for justifications and caveats. 1977 if (LoopHeaders.count(BB)) 1978 return false; 1979 1980 // Look for a Phi/Select pair in the same basic block. The Phi feeds the 1981 // condition of the Select and at least one of the incoming values is a 1982 // constant. 1983 for (BasicBlock::iterator BI = BB->begin(); 1984 PHINode *PN = dyn_cast<PHINode>(BI); ++BI) { 1985 unsigned NumPHIValues = PN->getNumIncomingValues(); 1986 if (NumPHIValues == 0 || !PN->hasOneUse()) 1987 continue; 1988 1989 SelectInst *SI = dyn_cast<SelectInst>(PN->user_back()); 1990 if (!SI || SI->getParent() != BB) 1991 continue; 1992 1993 Value *Cond = SI->getCondition(); 1994 if (!Cond || Cond != PN || !Cond->getType()->isIntegerTy(1)) 1995 continue; 1996 1997 bool HasConst = false; 1998 for (unsigned i = 0; i != NumPHIValues; ++i) { 1999 if (PN->getIncomingBlock(i) == BB) 2000 return false; 2001 if (isa<ConstantInt>(PN->getIncomingValue(i))) 2002 HasConst = true; 2003 } 2004 2005 if (HasConst) { 2006 // Expand the select. 2007 TerminatorInst *Term = 2008 SplitBlockAndInsertIfThen(SI->getCondition(), SI, false); 2009 PHINode *NewPN = PHINode::Create(SI->getType(), 2, "", SI); 2010 NewPN->addIncoming(SI->getTrueValue(), Term->getParent()); 2011 NewPN->addIncoming(SI->getFalseValue(), BB); 2012 SI->replaceAllUsesWith(NewPN); 2013 SI->eraseFromParent(); 2014 return true; 2015 } 2016 } 2017 2018 return false; 2019 } 2020