1 //===- JumpThreading.cpp - Thread control through conditional blocks ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the Jump Threading pass. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/JumpThreading.h" 15 #include "llvm/ADT/DenseMap.h" 16 #include "llvm/ADT/DenseSet.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/AliasAnalysis.h" 20 #include "llvm/Analysis/BlockFrequencyInfoImpl.h" 21 #include "llvm/Analysis/CFG.h" 22 #include "llvm/Analysis/ConstantFolding.h" 23 #include "llvm/Analysis/GlobalsModRef.h" 24 #include "llvm/Analysis/InstructionSimplify.h" 25 #include "llvm/Analysis/Loads.h" 26 #include "llvm/Analysis/LoopInfo.h" 27 #include "llvm/Analysis/ValueTracking.h" 28 #include "llvm/IR/ConstantRange.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/IntrinsicInst.h" 31 #include "llvm/IR/LLVMContext.h" 32 #include "llvm/IR/MDBuilder.h" 33 #include "llvm/IR/Metadata.h" 34 #include "llvm/IR/PatternMatch.h" 35 #include "llvm/Pass.h" 36 #include "llvm/Support/CommandLine.h" 37 #include "llvm/Support/Debug.h" 38 #include "llvm/Support/raw_ostream.h" 39 #include "llvm/Transforms/Scalar.h" 40 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 41 #include "llvm/Transforms/Utils/Cloning.h" 42 #include "llvm/Transforms/Utils/Local.h" 43 #include "llvm/Transforms/Utils/SSAUpdater.h" 44 #include <algorithm> 45 #include <memory> 46 using namespace llvm; 47 using namespace jumpthreading; 48 49 #define DEBUG_TYPE "jump-threading" 50 51 STATISTIC(NumThreads, "Number of jumps threaded"); 52 STATISTIC(NumFolds, "Number of terminators folded"); 53 STATISTIC(NumDupes, "Number of branch blocks duplicated to eliminate phi"); 54 55 static cl::opt<unsigned> 56 BBDuplicateThreshold("jump-threading-threshold", 57 cl::desc("Max block size to duplicate for jump threading"), 58 cl::init(6), cl::Hidden); 59 60 static cl::opt<unsigned> 61 ImplicationSearchThreshold( 62 "jump-threading-implication-search-threshold", 63 cl::desc("The number of predecessors to search for a stronger " 64 "condition to use to thread over a weaker condition"), 65 cl::init(3), cl::Hidden); 66 67 namespace { 68 /// This pass performs 'jump threading', which looks at blocks that have 69 /// multiple predecessors and multiple successors. If one or more of the 70 /// predecessors of the block can be proven to always jump to one of the 71 /// successors, we forward the edge from the predecessor to the successor by 72 /// duplicating the contents of this block. 73 /// 74 /// An example of when this can occur is code like this: 75 /// 76 /// if () { ... 77 /// X = 4; 78 /// } 79 /// if (X < 3) { 80 /// 81 /// In this case, the unconditional branch at the end of the first if can be 82 /// revectored to the false side of the second if. 83 /// 84 class JumpThreading : public FunctionPass { 85 JumpThreadingPass Impl; 86 87 public: 88 static char ID; // Pass identification 89 JumpThreading(int T = -1) : FunctionPass(ID), Impl(T) { 90 initializeJumpThreadingPass(*PassRegistry::getPassRegistry()); 91 } 92 93 bool runOnFunction(Function &F) override; 94 95 void getAnalysisUsage(AnalysisUsage &AU) const override { 96 AU.addRequired<AAResultsWrapperPass>(); 97 AU.addRequired<LazyValueInfoWrapperPass>(); 98 AU.addPreserved<LazyValueInfoWrapperPass>(); 99 AU.addPreserved<GlobalsAAWrapperPass>(); 100 AU.addRequired<TargetLibraryInfoWrapperPass>(); 101 } 102 103 void releaseMemory() override { Impl.releaseMemory(); } 104 }; 105 } 106 107 char JumpThreading::ID = 0; 108 INITIALIZE_PASS_BEGIN(JumpThreading, "jump-threading", 109 "Jump Threading", false, false) 110 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass) 111 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 112 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 113 INITIALIZE_PASS_END(JumpThreading, "jump-threading", 114 "Jump Threading", false, false) 115 116 // Public interface to the Jump Threading pass 117 FunctionPass *llvm::createJumpThreadingPass(int Threshold) { return new JumpThreading(Threshold); } 118 119 JumpThreadingPass::JumpThreadingPass(int T) { 120 BBDupThreshold = (T == -1) ? BBDuplicateThreshold : unsigned(T); 121 } 122 123 /// runOnFunction - Top level algorithm. 124 /// 125 bool JumpThreading::runOnFunction(Function &F) { 126 if (skipFunction(F)) 127 return false; 128 auto TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 129 auto LVI = &getAnalysis<LazyValueInfoWrapperPass>().getLVI(); 130 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 131 std::unique_ptr<BlockFrequencyInfo> BFI; 132 std::unique_ptr<BranchProbabilityInfo> BPI; 133 bool HasProfileData = F.getEntryCount().hasValue(); 134 if (HasProfileData) { 135 LoopInfo LI{DominatorTree(F)}; 136 BPI.reset(new BranchProbabilityInfo(F, LI, TLI)); 137 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI)); 138 } 139 140 return Impl.runImpl(F, TLI, LVI, AA, HasProfileData, std::move(BFI), 141 std::move(BPI)); 142 } 143 144 PreservedAnalyses JumpThreadingPass::run(Function &F, 145 FunctionAnalysisManager &AM) { 146 147 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 148 auto &LVI = AM.getResult<LazyValueAnalysis>(F); 149 auto &AA = AM.getResult<AAManager>(F); 150 151 std::unique_ptr<BlockFrequencyInfo> BFI; 152 std::unique_ptr<BranchProbabilityInfo> BPI; 153 bool HasProfileData = F.getEntryCount().hasValue(); 154 if (HasProfileData) { 155 LoopInfo LI{DominatorTree(F)}; 156 BPI.reset(new BranchProbabilityInfo(F, LI, &TLI)); 157 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI)); 158 } 159 160 bool Changed = runImpl(F, &TLI, &LVI, &AA, HasProfileData, std::move(BFI), 161 std::move(BPI)); 162 163 if (!Changed) 164 return PreservedAnalyses::all(); 165 PreservedAnalyses PA; 166 PA.preserve<GlobalsAA>(); 167 return PA; 168 } 169 170 bool JumpThreadingPass::runImpl(Function &F, TargetLibraryInfo *TLI_, 171 LazyValueInfo *LVI_, AliasAnalysis *AA_, 172 bool HasProfileData_, 173 std::unique_ptr<BlockFrequencyInfo> BFI_, 174 std::unique_ptr<BranchProbabilityInfo> BPI_) { 175 176 DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n"); 177 TLI = TLI_; 178 LVI = LVI_; 179 AA = AA_; 180 BFI.reset(); 181 BPI.reset(); 182 // When profile data is available, we need to update edge weights after 183 // successful jump threading, which requires both BPI and BFI being available. 184 HasProfileData = HasProfileData_; 185 auto *GuardDecl = F.getParent()->getFunction( 186 Intrinsic::getName(Intrinsic::experimental_guard)); 187 HasGuards = GuardDecl && !GuardDecl->use_empty(); 188 if (HasProfileData) { 189 BPI = std::move(BPI_); 190 BFI = std::move(BFI_); 191 } 192 193 // Remove unreachable blocks from function as they may result in infinite 194 // loop. We do threading if we found something profitable. Jump threading a 195 // branch can create other opportunities. If these opportunities form a cycle 196 // i.e. if any jump threading is undoing previous threading in the path, then 197 // we will loop forever. We take care of this issue by not jump threading for 198 // back edges. This works for normal cases but not for unreachable blocks as 199 // they may have cycle with no back edge. 200 bool EverChanged = false; 201 EverChanged |= removeUnreachableBlocks(F, LVI); 202 203 FindLoopHeaders(F); 204 205 bool Changed; 206 do { 207 Changed = false; 208 for (Function::iterator I = F.begin(), E = F.end(); I != E;) { 209 BasicBlock *BB = &*I; 210 // Thread all of the branches we can over this block. 211 while (ProcessBlock(BB)) 212 Changed = true; 213 214 ++I; 215 216 // If the block is trivially dead, zap it. This eliminates the successor 217 // edges which simplifies the CFG. 218 if (pred_empty(BB) && 219 BB != &BB->getParent()->getEntryBlock()) { 220 DEBUG(dbgs() << " JT: Deleting dead block '" << BB->getName() 221 << "' with terminator: " << *BB->getTerminator() << '\n'); 222 LoopHeaders.erase(BB); 223 LVI->eraseBlock(BB); 224 DeleteDeadBlock(BB); 225 Changed = true; 226 continue; 227 } 228 229 BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 230 231 // Can't thread an unconditional jump, but if the block is "almost 232 // empty", we can replace uses of it with uses of the successor and make 233 // this dead. 234 // We should not eliminate the loop header either, because eliminating 235 // a loop header might later prevent LoopSimplify from transforming nested 236 // loops into simplified form. 237 if (BI && BI->isUnconditional() && 238 BB != &BB->getParent()->getEntryBlock() && 239 // If the terminator is the only non-phi instruction, try to nuke it. 240 BB->getFirstNonPHIOrDbg()->isTerminator() && !LoopHeaders.count(BB)) { 241 // FIXME: It is always conservatively correct to drop the info 242 // for a block even if it doesn't get erased. This isn't totally 243 // awesome, but it allows us to use AssertingVH to prevent nasty 244 // dangling pointer issues within LazyValueInfo. 245 LVI->eraseBlock(BB); 246 if (TryToSimplifyUncondBranchFromEmptyBlock(BB)) 247 Changed = true; 248 } 249 } 250 EverChanged |= Changed; 251 } while (Changed); 252 253 LoopHeaders.clear(); 254 return EverChanged; 255 } 256 257 // Replace uses of Cond with ToVal when safe to do so. If all uses are 258 // replaced, we can remove Cond. We cannot blindly replace all uses of Cond 259 // because we may incorrectly replace uses when guards/assumes are uses of 260 // of `Cond` and we used the guards/assume to reason about the `Cond` value 261 // at the end of block. RAUW unconditionally replaces all uses 262 // including the guards/assumes themselves and the uses before the 263 // guard/assume. 264 static void ReplaceFoldableUses(Instruction *Cond, Value *ToVal) { 265 assert(Cond->getType() == ToVal->getType()); 266 auto *BB = Cond->getParent(); 267 // We can unconditionally replace all uses in non-local blocks (i.e. uses 268 // strictly dominated by BB), since LVI information is true from the 269 // terminator of BB. 270 replaceNonLocalUsesWith(Cond, ToVal); 271 for (Instruction &I : reverse(*BB)) { 272 // Reached the Cond whose uses we are trying to replace, so there are no 273 // more uses. 274 if (&I == Cond) 275 break; 276 // We only replace uses in instructions that are guaranteed to reach the end 277 // of BB, where we know Cond is ToVal. 278 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 279 break; 280 I.replaceUsesOfWith(Cond, ToVal); 281 } 282 if (Cond->use_empty() && !Cond->mayHaveSideEffects()) 283 Cond->eraseFromParent(); 284 } 285 286 /// Return the cost of duplicating a piece of this block from first non-phi 287 /// and before StopAt instruction to thread across it. Stop scanning the block 288 /// when exceeding the threshold. If duplication is impossible, returns ~0U. 289 static unsigned getJumpThreadDuplicationCost(BasicBlock *BB, 290 Instruction *StopAt, 291 unsigned Threshold) { 292 assert(StopAt->getParent() == BB && "Not an instruction from proper BB?"); 293 /// Ignore PHI nodes, these will be flattened when duplication happens. 294 BasicBlock::const_iterator I(BB->getFirstNonPHI()); 295 296 // FIXME: THREADING will delete values that are just used to compute the 297 // branch, so they shouldn't count against the duplication cost. 298 299 unsigned Bonus = 0; 300 if (BB->getTerminator() == StopAt) { 301 // Threading through a switch statement is particularly profitable. If this 302 // block ends in a switch, decrease its cost to make it more likely to 303 // happen. 304 if (isa<SwitchInst>(StopAt)) 305 Bonus = 6; 306 307 // The same holds for indirect branches, but slightly more so. 308 if (isa<IndirectBrInst>(StopAt)) 309 Bonus = 8; 310 } 311 312 // Bump the threshold up so the early exit from the loop doesn't skip the 313 // terminator-based Size adjustment at the end. 314 Threshold += Bonus; 315 316 // Sum up the cost of each instruction until we get to the terminator. Don't 317 // include the terminator because the copy won't include it. 318 unsigned Size = 0; 319 for (; &*I != StopAt; ++I) { 320 321 // Stop scanning the block if we've reached the threshold. 322 if (Size > Threshold) 323 return Size; 324 325 // Debugger intrinsics don't incur code size. 326 if (isa<DbgInfoIntrinsic>(I)) continue; 327 328 // If this is a pointer->pointer bitcast, it is free. 329 if (isa<BitCastInst>(I) && I->getType()->isPointerTy()) 330 continue; 331 332 // Bail out if this instruction gives back a token type, it is not possible 333 // to duplicate it if it is used outside this BB. 334 if (I->getType()->isTokenTy() && I->isUsedOutsideOfBlock(BB)) 335 return ~0U; 336 337 // All other instructions count for at least one unit. 338 ++Size; 339 340 // Calls are more expensive. If they are non-intrinsic calls, we model them 341 // as having cost of 4. If they are a non-vector intrinsic, we model them 342 // as having cost of 2 total, and if they are a vector intrinsic, we model 343 // them as having cost 1. 344 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 345 if (CI->cannotDuplicate() || CI->isConvergent()) 346 // Blocks with NoDuplicate are modelled as having infinite cost, so they 347 // are never duplicated. 348 return ~0U; 349 else if (!isa<IntrinsicInst>(CI)) 350 Size += 3; 351 else if (!CI->getType()->isVectorTy()) 352 Size += 1; 353 } 354 } 355 356 return Size > Bonus ? Size - Bonus : 0; 357 } 358 359 /// FindLoopHeaders - We do not want jump threading to turn proper loop 360 /// structures into irreducible loops. Doing this breaks up the loop nesting 361 /// hierarchy and pessimizes later transformations. To prevent this from 362 /// happening, we first have to find the loop headers. Here we approximate this 363 /// by finding targets of backedges in the CFG. 364 /// 365 /// Note that there definitely are cases when we want to allow threading of 366 /// edges across a loop header. For example, threading a jump from outside the 367 /// loop (the preheader) to an exit block of the loop is definitely profitable. 368 /// It is also almost always profitable to thread backedges from within the loop 369 /// to exit blocks, and is often profitable to thread backedges to other blocks 370 /// within the loop (forming a nested loop). This simple analysis is not rich 371 /// enough to track all of these properties and keep it up-to-date as the CFG 372 /// mutates, so we don't allow any of these transformations. 373 /// 374 void JumpThreadingPass::FindLoopHeaders(Function &F) { 375 SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges; 376 FindFunctionBackedges(F, Edges); 377 378 for (const auto &Edge : Edges) 379 LoopHeaders.insert(Edge.second); 380 } 381 382 /// getKnownConstant - Helper method to determine if we can thread over a 383 /// terminator with the given value as its condition, and if so what value to 384 /// use for that. What kind of value this is depends on whether we want an 385 /// integer or a block address, but an undef is always accepted. 386 /// Returns null if Val is null or not an appropriate constant. 387 static Constant *getKnownConstant(Value *Val, ConstantPreference Preference) { 388 if (!Val) 389 return nullptr; 390 391 // Undef is "known" enough. 392 if (UndefValue *U = dyn_cast<UndefValue>(Val)) 393 return U; 394 395 if (Preference == WantBlockAddress) 396 return dyn_cast<BlockAddress>(Val->stripPointerCasts()); 397 398 return dyn_cast<ConstantInt>(Val); 399 } 400 401 /// ComputeValueKnownInPredecessors - Given a basic block BB and a value V, see 402 /// if we can infer that the value is a known ConstantInt/BlockAddress or undef 403 /// in any of our predecessors. If so, return the known list of value and pred 404 /// BB in the result vector. 405 /// 406 /// This returns true if there were any known values. 407 /// 408 bool JumpThreadingPass::ComputeValueKnownInPredecessors( 409 Value *V, BasicBlock *BB, PredValueInfo &Result, 410 ConstantPreference Preference, Instruction *CxtI) { 411 // This method walks up use-def chains recursively. Because of this, we could 412 // get into an infinite loop going around loops in the use-def chain. To 413 // prevent this, keep track of what (value, block) pairs we've already visited 414 // and terminate the search if we loop back to them 415 if (!RecursionSet.insert(std::make_pair(V, BB)).second) 416 return false; 417 418 // An RAII help to remove this pair from the recursion set once the recursion 419 // stack pops back out again. 420 RecursionSetRemover remover(RecursionSet, std::make_pair(V, BB)); 421 422 // If V is a constant, then it is known in all predecessors. 423 if (Constant *KC = getKnownConstant(V, Preference)) { 424 for (BasicBlock *Pred : predecessors(BB)) 425 Result.push_back(std::make_pair(KC, Pred)); 426 427 return !Result.empty(); 428 } 429 430 // If V is a non-instruction value, or an instruction in a different block, 431 // then it can't be derived from a PHI. 432 Instruction *I = dyn_cast<Instruction>(V); 433 if (!I || I->getParent() != BB) { 434 435 // Okay, if this is a live-in value, see if it has a known value at the end 436 // of any of our predecessors. 437 // 438 // FIXME: This should be an edge property, not a block end property. 439 /// TODO: Per PR2563, we could infer value range information about a 440 /// predecessor based on its terminator. 441 // 442 // FIXME: change this to use the more-rich 'getPredicateOnEdge' method if 443 // "I" is a non-local compare-with-a-constant instruction. This would be 444 // able to handle value inequalities better, for example if the compare is 445 // "X < 4" and "X < 3" is known true but "X < 4" itself is not available. 446 // Perhaps getConstantOnEdge should be smart enough to do this? 447 448 for (BasicBlock *P : predecessors(BB)) { 449 // If the value is known by LazyValueInfo to be a constant in a 450 // predecessor, use that information to try to thread this block. 451 Constant *PredCst = LVI->getConstantOnEdge(V, P, BB, CxtI); 452 if (Constant *KC = getKnownConstant(PredCst, Preference)) 453 Result.push_back(std::make_pair(KC, P)); 454 } 455 456 return !Result.empty(); 457 } 458 459 /// If I is a PHI node, then we know the incoming values for any constants. 460 if (PHINode *PN = dyn_cast<PHINode>(I)) { 461 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 462 Value *InVal = PN->getIncomingValue(i); 463 if (Constant *KC = getKnownConstant(InVal, Preference)) { 464 Result.push_back(std::make_pair(KC, PN->getIncomingBlock(i))); 465 } else { 466 Constant *CI = LVI->getConstantOnEdge(InVal, 467 PN->getIncomingBlock(i), 468 BB, CxtI); 469 if (Constant *KC = getKnownConstant(CI, Preference)) 470 Result.push_back(std::make_pair(KC, PN->getIncomingBlock(i))); 471 } 472 } 473 474 return !Result.empty(); 475 } 476 477 // Handle Cast instructions. Only see through Cast when the source operand is 478 // PHI or Cmp and the source type is i1 to save the compilation time. 479 if (CastInst *CI = dyn_cast<CastInst>(I)) { 480 Value *Source = CI->getOperand(0); 481 if (!Source->getType()->isIntegerTy(1)) 482 return false; 483 if (!isa<PHINode>(Source) && !isa<CmpInst>(Source)) 484 return false; 485 ComputeValueKnownInPredecessors(Source, BB, Result, Preference, CxtI); 486 if (Result.empty()) 487 return false; 488 489 // Convert the known values. 490 for (auto &R : Result) 491 R.first = ConstantExpr::getCast(CI->getOpcode(), R.first, CI->getType()); 492 493 return true; 494 } 495 496 PredValueInfoTy LHSVals, RHSVals; 497 498 // Handle some boolean conditions. 499 if (I->getType()->getPrimitiveSizeInBits() == 1) { 500 assert(Preference == WantInteger && "One-bit non-integer type?"); 501 // X | true -> true 502 // X & false -> false 503 if (I->getOpcode() == Instruction::Or || 504 I->getOpcode() == Instruction::And) { 505 ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals, 506 WantInteger, CxtI); 507 ComputeValueKnownInPredecessors(I->getOperand(1), BB, RHSVals, 508 WantInteger, CxtI); 509 510 if (LHSVals.empty() && RHSVals.empty()) 511 return false; 512 513 ConstantInt *InterestingVal; 514 if (I->getOpcode() == Instruction::Or) 515 InterestingVal = ConstantInt::getTrue(I->getContext()); 516 else 517 InterestingVal = ConstantInt::getFalse(I->getContext()); 518 519 SmallPtrSet<BasicBlock*, 4> LHSKnownBBs; 520 521 // Scan for the sentinel. If we find an undef, force it to the 522 // interesting value: x|undef -> true and x&undef -> false. 523 for (const auto &LHSVal : LHSVals) 524 if (LHSVal.first == InterestingVal || isa<UndefValue>(LHSVal.first)) { 525 Result.emplace_back(InterestingVal, LHSVal.second); 526 LHSKnownBBs.insert(LHSVal.second); 527 } 528 for (const auto &RHSVal : RHSVals) 529 if (RHSVal.first == InterestingVal || isa<UndefValue>(RHSVal.first)) { 530 // If we already inferred a value for this block on the LHS, don't 531 // re-add it. 532 if (!LHSKnownBBs.count(RHSVal.second)) 533 Result.emplace_back(InterestingVal, RHSVal.second); 534 } 535 536 return !Result.empty(); 537 } 538 539 // Handle the NOT form of XOR. 540 if (I->getOpcode() == Instruction::Xor && 541 isa<ConstantInt>(I->getOperand(1)) && 542 cast<ConstantInt>(I->getOperand(1))->isOne()) { 543 ComputeValueKnownInPredecessors(I->getOperand(0), BB, Result, 544 WantInteger, CxtI); 545 if (Result.empty()) 546 return false; 547 548 // Invert the known values. 549 for (auto &R : Result) 550 R.first = ConstantExpr::getNot(R.first); 551 552 return true; 553 } 554 555 // Try to simplify some other binary operator values. 556 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 557 assert(Preference != WantBlockAddress 558 && "A binary operator creating a block address?"); 559 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) { 560 PredValueInfoTy LHSVals; 561 ComputeValueKnownInPredecessors(BO->getOperand(0), BB, LHSVals, 562 WantInteger, CxtI); 563 564 // Try to use constant folding to simplify the binary operator. 565 for (const auto &LHSVal : LHSVals) { 566 Constant *V = LHSVal.first; 567 Constant *Folded = ConstantExpr::get(BO->getOpcode(), V, CI); 568 569 if (Constant *KC = getKnownConstant(Folded, WantInteger)) 570 Result.push_back(std::make_pair(KC, LHSVal.second)); 571 } 572 } 573 574 return !Result.empty(); 575 } 576 577 // Handle compare with phi operand, where the PHI is defined in this block. 578 if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) { 579 assert(Preference == WantInteger && "Compares only produce integers"); 580 Type *CmpType = Cmp->getType(); 581 Value *CmpLHS = Cmp->getOperand(0); 582 Value *CmpRHS = Cmp->getOperand(1); 583 CmpInst::Predicate Pred = Cmp->getPredicate(); 584 585 PHINode *PN = dyn_cast<PHINode>(CmpLHS); 586 if (PN && PN->getParent() == BB) { 587 const DataLayout &DL = PN->getModule()->getDataLayout(); 588 // We can do this simplification if any comparisons fold to true or false. 589 // See if any do. 590 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 591 BasicBlock *PredBB = PN->getIncomingBlock(i); 592 Value *LHS = PN->getIncomingValue(i); 593 Value *RHS = CmpRHS->DoPHITranslation(BB, PredBB); 594 595 Value *Res = SimplifyCmpInst(Pred, LHS, RHS, {DL}); 596 if (!Res) { 597 if (!isa<Constant>(RHS)) 598 continue; 599 600 LazyValueInfo::Tristate 601 ResT = LVI->getPredicateOnEdge(Pred, LHS, 602 cast<Constant>(RHS), PredBB, BB, 603 CxtI ? CxtI : Cmp); 604 if (ResT == LazyValueInfo::Unknown) 605 continue; 606 Res = ConstantInt::get(Type::getInt1Ty(LHS->getContext()), ResT); 607 } 608 609 if (Constant *KC = getKnownConstant(Res, WantInteger)) 610 Result.push_back(std::make_pair(KC, PredBB)); 611 } 612 613 return !Result.empty(); 614 } 615 616 // If comparing a live-in value against a constant, see if we know the 617 // live-in value on any predecessors. 618 if (isa<Constant>(CmpRHS) && !CmpType->isVectorTy()) { 619 Constant *CmpConst = cast<Constant>(CmpRHS); 620 621 if (!isa<Instruction>(CmpLHS) || 622 cast<Instruction>(CmpLHS)->getParent() != BB) { 623 for (BasicBlock *P : predecessors(BB)) { 624 // If the value is known by LazyValueInfo to be a constant in a 625 // predecessor, use that information to try to thread this block. 626 LazyValueInfo::Tristate Res = 627 LVI->getPredicateOnEdge(Pred, CmpLHS, 628 CmpConst, P, BB, CxtI ? CxtI : Cmp); 629 if (Res == LazyValueInfo::Unknown) 630 continue; 631 632 Constant *ResC = ConstantInt::get(CmpType, Res); 633 Result.push_back(std::make_pair(ResC, P)); 634 } 635 636 return !Result.empty(); 637 } 638 639 // InstCombine can fold some forms of constant range checks into 640 // (icmp (add (x, C1)), C2). See if we have we have such a thing with 641 // x as a live-in. 642 { 643 using namespace PatternMatch; 644 Value *AddLHS; 645 ConstantInt *AddConst; 646 if (isa<ConstantInt>(CmpConst) && 647 match(CmpLHS, m_Add(m_Value(AddLHS), m_ConstantInt(AddConst)))) { 648 if (!isa<Instruction>(AddLHS) || 649 cast<Instruction>(AddLHS)->getParent() != BB) { 650 for (BasicBlock *P : predecessors(BB)) { 651 // If the value is known by LazyValueInfo to be a ConstantRange in 652 // a predecessor, use that information to try to thread this 653 // block. 654 ConstantRange CR = LVI->getConstantRangeOnEdge( 655 AddLHS, P, BB, CxtI ? CxtI : cast<Instruction>(CmpLHS)); 656 // Propagate the range through the addition. 657 CR = CR.add(AddConst->getValue()); 658 659 // Get the range where the compare returns true. 660 ConstantRange CmpRange = ConstantRange::makeExactICmpRegion( 661 Pred, cast<ConstantInt>(CmpConst)->getValue()); 662 663 Constant *ResC; 664 if (CmpRange.contains(CR)) 665 ResC = ConstantInt::getTrue(CmpType); 666 else if (CmpRange.inverse().contains(CR)) 667 ResC = ConstantInt::getFalse(CmpType); 668 else 669 continue; 670 671 Result.push_back(std::make_pair(ResC, P)); 672 } 673 674 return !Result.empty(); 675 } 676 } 677 } 678 679 // Try to find a constant value for the LHS of a comparison, 680 // and evaluate it statically if we can. 681 PredValueInfoTy LHSVals; 682 ComputeValueKnownInPredecessors(I->getOperand(0), BB, LHSVals, 683 WantInteger, CxtI); 684 685 for (const auto &LHSVal : LHSVals) { 686 Constant *V = LHSVal.first; 687 Constant *Folded = ConstantExpr::getCompare(Pred, V, CmpConst); 688 if (Constant *KC = getKnownConstant(Folded, WantInteger)) 689 Result.push_back(std::make_pair(KC, LHSVal.second)); 690 } 691 692 return !Result.empty(); 693 } 694 } 695 696 if (SelectInst *SI = dyn_cast<SelectInst>(I)) { 697 // Handle select instructions where at least one operand is a known constant 698 // and we can figure out the condition value for any predecessor block. 699 Constant *TrueVal = getKnownConstant(SI->getTrueValue(), Preference); 700 Constant *FalseVal = getKnownConstant(SI->getFalseValue(), Preference); 701 PredValueInfoTy Conds; 702 if ((TrueVal || FalseVal) && 703 ComputeValueKnownInPredecessors(SI->getCondition(), BB, Conds, 704 WantInteger, CxtI)) { 705 for (auto &C : Conds) { 706 Constant *Cond = C.first; 707 708 // Figure out what value to use for the condition. 709 bool KnownCond; 710 if (ConstantInt *CI = dyn_cast<ConstantInt>(Cond)) { 711 // A known boolean. 712 KnownCond = CI->isOne(); 713 } else { 714 assert(isa<UndefValue>(Cond) && "Unexpected condition value"); 715 // Either operand will do, so be sure to pick the one that's a known 716 // constant. 717 // FIXME: Do this more cleverly if both values are known constants? 718 KnownCond = (TrueVal != nullptr); 719 } 720 721 // See if the select has a known constant value for this predecessor. 722 if (Constant *Val = KnownCond ? TrueVal : FalseVal) 723 Result.push_back(std::make_pair(Val, C.second)); 724 } 725 726 return !Result.empty(); 727 } 728 } 729 730 // If all else fails, see if LVI can figure out a constant value for us. 731 Constant *CI = LVI->getConstant(V, BB, CxtI); 732 if (Constant *KC = getKnownConstant(CI, Preference)) { 733 for (BasicBlock *Pred : predecessors(BB)) 734 Result.push_back(std::make_pair(KC, Pred)); 735 } 736 737 return !Result.empty(); 738 } 739 740 741 742 /// GetBestDestForBranchOnUndef - If we determine that the specified block ends 743 /// in an undefined jump, decide which block is best to revector to. 744 /// 745 /// Since we can pick an arbitrary destination, we pick the successor with the 746 /// fewest predecessors. This should reduce the in-degree of the others. 747 /// 748 static unsigned GetBestDestForJumpOnUndef(BasicBlock *BB) { 749 TerminatorInst *BBTerm = BB->getTerminator(); 750 unsigned MinSucc = 0; 751 BasicBlock *TestBB = BBTerm->getSuccessor(MinSucc); 752 // Compute the successor with the minimum number of predecessors. 753 unsigned MinNumPreds = std::distance(pred_begin(TestBB), pred_end(TestBB)); 754 for (unsigned i = 1, e = BBTerm->getNumSuccessors(); i != e; ++i) { 755 TestBB = BBTerm->getSuccessor(i); 756 unsigned NumPreds = std::distance(pred_begin(TestBB), pred_end(TestBB)); 757 if (NumPreds < MinNumPreds) { 758 MinSucc = i; 759 MinNumPreds = NumPreds; 760 } 761 } 762 763 return MinSucc; 764 } 765 766 static bool hasAddressTakenAndUsed(BasicBlock *BB) { 767 if (!BB->hasAddressTaken()) return false; 768 769 // If the block has its address taken, it may be a tree of dead constants 770 // hanging off of it. These shouldn't keep the block alive. 771 BlockAddress *BA = BlockAddress::get(BB); 772 BA->removeDeadConstantUsers(); 773 return !BA->use_empty(); 774 } 775 776 /// ProcessBlock - If there are any predecessors whose control can be threaded 777 /// through to a successor, transform them now. 778 bool JumpThreadingPass::ProcessBlock(BasicBlock *BB) { 779 // If the block is trivially dead, just return and let the caller nuke it. 780 // This simplifies other transformations. 781 if (pred_empty(BB) && 782 BB != &BB->getParent()->getEntryBlock()) 783 return false; 784 785 // If this block has a single predecessor, and if that pred has a single 786 // successor, merge the blocks. This encourages recursive jump threading 787 // because now the condition in this block can be threaded through 788 // predecessors of our predecessor block. 789 if (BasicBlock *SinglePred = BB->getSinglePredecessor()) { 790 const TerminatorInst *TI = SinglePred->getTerminator(); 791 if (!TI->isExceptional() && TI->getNumSuccessors() == 1 && 792 SinglePred != BB && !hasAddressTakenAndUsed(BB)) { 793 // If SinglePred was a loop header, BB becomes one. 794 if (LoopHeaders.erase(SinglePred)) 795 LoopHeaders.insert(BB); 796 797 LVI->eraseBlock(SinglePred); 798 MergeBasicBlockIntoOnlyPred(BB); 799 800 // Now that BB is merged into SinglePred (i.e. SinglePred Code followed by 801 // BB code within one basic block `BB`), we need to invalidate the LVI 802 // information associated with BB, because the LVI information need not be 803 // true for all of BB after the merge. For example, 804 // Before the merge, LVI info and code is as follows: 805 // SinglePred: <LVI info1 for %p val> 806 // %y = use of %p 807 // call @exit() // need not transfer execution to successor. 808 // assume(%p) // from this point on %p is true 809 // br label %BB 810 // BB: <LVI info2 for %p val, i.e. %p is true> 811 // %x = use of %p 812 // br label exit 813 // 814 // Note that this LVI info for blocks BB and SinglPred is correct for %p 815 // (info2 and info1 respectively). After the merge and the deletion of the 816 // LVI info1 for SinglePred. We have the following code: 817 // BB: <LVI info2 for %p val> 818 // %y = use of %p 819 // call @exit() 820 // assume(%p) 821 // %x = use of %p <-- LVI info2 is correct from here onwards. 822 // br label exit 823 // LVI info2 for BB is incorrect at the beginning of BB. 824 825 // Invalidate LVI information for BB if the LVI is not provably true for 826 // all of BB. 827 if (any_of(*BB, [](Instruction &I) { 828 return !isGuaranteedToTransferExecutionToSuccessor(&I); 829 })) 830 LVI->eraseBlock(BB); 831 return true; 832 } 833 } 834 835 if (TryToUnfoldSelectInCurrBB(BB)) 836 return true; 837 838 // Look if we can propagate guards to predecessors. 839 if (HasGuards && ProcessGuards(BB)) 840 return true; 841 842 // What kind of constant we're looking for. 843 ConstantPreference Preference = WantInteger; 844 845 // Look to see if the terminator is a conditional branch, switch or indirect 846 // branch, if not we can't thread it. 847 Value *Condition; 848 Instruction *Terminator = BB->getTerminator(); 849 if (BranchInst *BI = dyn_cast<BranchInst>(Terminator)) { 850 // Can't thread an unconditional jump. 851 if (BI->isUnconditional()) return false; 852 Condition = BI->getCondition(); 853 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(Terminator)) { 854 Condition = SI->getCondition(); 855 } else if (IndirectBrInst *IB = dyn_cast<IndirectBrInst>(Terminator)) { 856 // Can't thread indirect branch with no successors. 857 if (IB->getNumSuccessors() == 0) return false; 858 Condition = IB->getAddress()->stripPointerCasts(); 859 Preference = WantBlockAddress; 860 } else { 861 return false; // Must be an invoke. 862 } 863 864 // Run constant folding to see if we can reduce the condition to a simple 865 // constant. 866 if (Instruction *I = dyn_cast<Instruction>(Condition)) { 867 Value *SimpleVal = 868 ConstantFoldInstruction(I, BB->getModule()->getDataLayout(), TLI); 869 if (SimpleVal) { 870 I->replaceAllUsesWith(SimpleVal); 871 if (isInstructionTriviallyDead(I, TLI)) 872 I->eraseFromParent(); 873 Condition = SimpleVal; 874 } 875 } 876 877 // If the terminator is branching on an undef, we can pick any of the 878 // successors to branch to. Let GetBestDestForJumpOnUndef decide. 879 if (isa<UndefValue>(Condition)) { 880 unsigned BestSucc = GetBestDestForJumpOnUndef(BB); 881 882 // Fold the branch/switch. 883 TerminatorInst *BBTerm = BB->getTerminator(); 884 for (unsigned i = 0, e = BBTerm->getNumSuccessors(); i != e; ++i) { 885 if (i == BestSucc) continue; 886 BBTerm->getSuccessor(i)->removePredecessor(BB, true); 887 } 888 889 DEBUG(dbgs() << " In block '" << BB->getName() 890 << "' folding undef terminator: " << *BBTerm << '\n'); 891 BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm); 892 BBTerm->eraseFromParent(); 893 return true; 894 } 895 896 // If the terminator of this block is branching on a constant, simplify the 897 // terminator to an unconditional branch. This can occur due to threading in 898 // other blocks. 899 if (getKnownConstant(Condition, Preference)) { 900 DEBUG(dbgs() << " In block '" << BB->getName() 901 << "' folding terminator: " << *BB->getTerminator() << '\n'); 902 ++NumFolds; 903 ConstantFoldTerminator(BB, true); 904 return true; 905 } 906 907 Instruction *CondInst = dyn_cast<Instruction>(Condition); 908 909 // All the rest of our checks depend on the condition being an instruction. 910 if (!CondInst) { 911 // FIXME: Unify this with code below. 912 if (ProcessThreadableEdges(Condition, BB, Preference, Terminator)) 913 return true; 914 return false; 915 } 916 917 if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondInst)) { 918 // If we're branching on a conditional, LVI might be able to determine 919 // it's value at the branch instruction. We only handle comparisons 920 // against a constant at this time. 921 // TODO: This should be extended to handle switches as well. 922 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 923 Constant *CondConst = dyn_cast<Constant>(CondCmp->getOperand(1)); 924 if (CondBr && CondConst) { 925 // We should have returned as soon as we turn a conditional branch to 926 // unconditional. Because its no longer interesting as far as jump 927 // threading is concerned. 928 assert(CondBr->isConditional() && "Threading on unconditional terminator"); 929 930 LazyValueInfo::Tristate Ret = 931 LVI->getPredicateAt(CondCmp->getPredicate(), CondCmp->getOperand(0), 932 CondConst, CondBr); 933 if (Ret != LazyValueInfo::Unknown) { 934 unsigned ToRemove = Ret == LazyValueInfo::True ? 1 : 0; 935 unsigned ToKeep = Ret == LazyValueInfo::True ? 0 : 1; 936 CondBr->getSuccessor(ToRemove)->removePredecessor(BB, true); 937 BranchInst::Create(CondBr->getSuccessor(ToKeep), CondBr); 938 CondBr->eraseFromParent(); 939 if (CondCmp->use_empty()) 940 CondCmp->eraseFromParent(); 941 // We can safely replace *some* uses of the CondInst if it has 942 // exactly one value as returned by LVI. RAUW is incorrect in the 943 // presence of guards and assumes, that have the `Cond` as the use. This 944 // is because we use the guards/assume to reason about the `Cond` value 945 // at the end of block, but RAUW unconditionally replaces all uses 946 // including the guards/assumes themselves and the uses before the 947 // guard/assume. 948 else if (CondCmp->getParent() == BB) { 949 auto *CI = Ret == LazyValueInfo::True ? 950 ConstantInt::getTrue(CondCmp->getType()) : 951 ConstantInt::getFalse(CondCmp->getType()); 952 ReplaceFoldableUses(CondCmp, CI); 953 } 954 return true; 955 } 956 957 // We did not manage to simplify this branch, try to see whether 958 // CondCmp depends on a known phi-select pattern. 959 if (TryToUnfoldSelect(CondCmp, BB)) 960 return true; 961 } 962 } 963 964 // Check for some cases that are worth simplifying. Right now we want to look 965 // for loads that are used by a switch or by the condition for the branch. If 966 // we see one, check to see if it's partially redundant. If so, insert a PHI 967 // which can then be used to thread the values. 968 // 969 Value *SimplifyValue = CondInst; 970 if (CmpInst *CondCmp = dyn_cast<CmpInst>(SimplifyValue)) 971 if (isa<Constant>(CondCmp->getOperand(1))) 972 SimplifyValue = CondCmp->getOperand(0); 973 974 // TODO: There are other places where load PRE would be profitable, such as 975 // more complex comparisons. 976 if (LoadInst *LI = dyn_cast<LoadInst>(SimplifyValue)) 977 if (SimplifyPartiallyRedundantLoad(LI)) 978 return true; 979 980 // Handle a variety of cases where we are branching on something derived from 981 // a PHI node in the current block. If we can prove that any predecessors 982 // compute a predictable value based on a PHI node, thread those predecessors. 983 // 984 if (ProcessThreadableEdges(CondInst, BB, Preference, Terminator)) 985 return true; 986 987 // If this is an otherwise-unfoldable branch on a phi node in the current 988 // block, see if we can simplify. 989 if (PHINode *PN = dyn_cast<PHINode>(CondInst)) 990 if (PN->getParent() == BB && isa<BranchInst>(BB->getTerminator())) 991 return ProcessBranchOnPHI(PN); 992 993 // If this is an otherwise-unfoldable branch on a XOR, see if we can simplify. 994 if (CondInst->getOpcode() == Instruction::Xor && 995 CondInst->getParent() == BB && isa<BranchInst>(BB->getTerminator())) 996 return ProcessBranchOnXOR(cast<BinaryOperator>(CondInst)); 997 998 // Search for a stronger dominating condition that can be used to simplify a 999 // conditional branch leaving BB. 1000 if (ProcessImpliedCondition(BB)) 1001 return true; 1002 1003 return false; 1004 } 1005 1006 bool JumpThreadingPass::ProcessImpliedCondition(BasicBlock *BB) { 1007 auto *BI = dyn_cast<BranchInst>(BB->getTerminator()); 1008 if (!BI || !BI->isConditional()) 1009 return false; 1010 1011 Value *Cond = BI->getCondition(); 1012 BasicBlock *CurrentBB = BB; 1013 BasicBlock *CurrentPred = BB->getSinglePredecessor(); 1014 unsigned Iter = 0; 1015 1016 auto &DL = BB->getModule()->getDataLayout(); 1017 1018 while (CurrentPred && Iter++ < ImplicationSearchThreshold) { 1019 auto *PBI = dyn_cast<BranchInst>(CurrentPred->getTerminator()); 1020 if (!PBI || !PBI->isConditional()) 1021 return false; 1022 if (PBI->getSuccessor(0) != CurrentBB && PBI->getSuccessor(1) != CurrentBB) 1023 return false; 1024 1025 bool FalseDest = PBI->getSuccessor(1) == CurrentBB; 1026 Optional<bool> Implication = 1027 isImpliedCondition(PBI->getCondition(), Cond, DL, FalseDest); 1028 if (Implication) { 1029 BI->getSuccessor(*Implication ? 1 : 0)->removePredecessor(BB); 1030 BranchInst::Create(BI->getSuccessor(*Implication ? 0 : 1), BI); 1031 BI->eraseFromParent(); 1032 return true; 1033 } 1034 CurrentBB = CurrentPred; 1035 CurrentPred = CurrentBB->getSinglePredecessor(); 1036 } 1037 1038 return false; 1039 } 1040 1041 /// Return true if Op is an instruction defined in the given block. 1042 static bool isOpDefinedInBlock(Value *Op, BasicBlock *BB) { 1043 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1044 if (OpInst->getParent() == BB) 1045 return true; 1046 return false; 1047 } 1048 1049 /// SimplifyPartiallyRedundantLoad - If LI is an obviously partially redundant 1050 /// load instruction, eliminate it by replacing it with a PHI node. This is an 1051 /// important optimization that encourages jump threading, and needs to be run 1052 /// interlaced with other jump threading tasks. 1053 bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LI) { 1054 // Don't hack volatile and ordered loads. 1055 if (!LI->isUnordered()) return false; 1056 1057 // If the load is defined in a block with exactly one predecessor, it can't be 1058 // partially redundant. 1059 BasicBlock *LoadBB = LI->getParent(); 1060 if (LoadBB->getSinglePredecessor()) 1061 return false; 1062 1063 // If the load is defined in an EH pad, it can't be partially redundant, 1064 // because the edges between the invoke and the EH pad cannot have other 1065 // instructions between them. 1066 if (LoadBB->isEHPad()) 1067 return false; 1068 1069 Value *LoadedPtr = LI->getOperand(0); 1070 1071 // If the loaded operand is defined in the LoadBB and its not a phi, 1072 // it can't be available in predecessors. 1073 if (isOpDefinedInBlock(LoadedPtr, LoadBB) && !isa<PHINode>(LoadedPtr)) 1074 return false; 1075 1076 // Scan a few instructions up from the load, to see if it is obviously live at 1077 // the entry to its block. 1078 BasicBlock::iterator BBIt(LI); 1079 bool IsLoadCSE; 1080 if (Value *AvailableVal = FindAvailableLoadedValue( 1081 LI, LoadBB, BBIt, DefMaxInstsToScan, AA, &IsLoadCSE)) { 1082 // If the value of the load is locally available within the block, just use 1083 // it. This frequently occurs for reg2mem'd allocas. 1084 1085 if (IsLoadCSE) { 1086 LoadInst *NLI = cast<LoadInst>(AvailableVal); 1087 combineMetadataForCSE(NLI, LI); 1088 }; 1089 1090 // If the returned value is the load itself, replace with an undef. This can 1091 // only happen in dead loops. 1092 if (AvailableVal == LI) AvailableVal = UndefValue::get(LI->getType()); 1093 if (AvailableVal->getType() != LI->getType()) 1094 AvailableVal = 1095 CastInst::CreateBitOrPointerCast(AvailableVal, LI->getType(), "", LI); 1096 LI->replaceAllUsesWith(AvailableVal); 1097 LI->eraseFromParent(); 1098 return true; 1099 } 1100 1101 // Otherwise, if we scanned the whole block and got to the top of the block, 1102 // we know the block is locally transparent to the load. If not, something 1103 // might clobber its value. 1104 if (BBIt != LoadBB->begin()) 1105 return false; 1106 1107 // If all of the loads and stores that feed the value have the same AA tags, 1108 // then we can propagate them onto any newly inserted loads. 1109 AAMDNodes AATags; 1110 LI->getAAMetadata(AATags); 1111 1112 SmallPtrSet<BasicBlock*, 8> PredsScanned; 1113 typedef SmallVector<std::pair<BasicBlock*, Value*>, 8> AvailablePredsTy; 1114 AvailablePredsTy AvailablePreds; 1115 BasicBlock *OneUnavailablePred = nullptr; 1116 SmallVector<LoadInst*, 8> CSELoads; 1117 1118 // If we got here, the loaded value is transparent through to the start of the 1119 // block. Check to see if it is available in any of the predecessor blocks. 1120 for (BasicBlock *PredBB : predecessors(LoadBB)) { 1121 // If we already scanned this predecessor, skip it. 1122 if (!PredsScanned.insert(PredBB).second) 1123 continue; 1124 1125 BBIt = PredBB->end(); 1126 unsigned NumScanedInst = 0; 1127 Value *PredAvailable = nullptr; 1128 // NOTE: We don't CSE load that is volatile or anything stronger than 1129 // unordered, that should have been checked when we entered the function. 1130 assert(LI->isUnordered() && "Attempting to CSE volatile or atomic loads"); 1131 // If this is a load on a phi pointer, phi-translate it and search 1132 // for available load/store to the pointer in predecessors. 1133 Value *Ptr = LoadedPtr->DoPHITranslation(LoadBB, PredBB); 1134 PredAvailable = FindAvailablePtrLoadStore( 1135 Ptr, LI->getType(), LI->isAtomic(), PredBB, BBIt, DefMaxInstsToScan, 1136 AA, &IsLoadCSE, &NumScanedInst); 1137 1138 // If PredBB has a single predecessor, continue scanning through the 1139 // single precessor. 1140 BasicBlock *SinglePredBB = PredBB; 1141 while (!PredAvailable && SinglePredBB && BBIt == SinglePredBB->begin() && 1142 NumScanedInst < DefMaxInstsToScan) { 1143 SinglePredBB = SinglePredBB->getSinglePredecessor(); 1144 if (SinglePredBB) { 1145 BBIt = SinglePredBB->end(); 1146 PredAvailable = FindAvailablePtrLoadStore( 1147 Ptr, LI->getType(), LI->isAtomic(), SinglePredBB, BBIt, 1148 (DefMaxInstsToScan - NumScanedInst), AA, &IsLoadCSE, 1149 &NumScanedInst); 1150 } 1151 } 1152 1153 if (!PredAvailable) { 1154 OneUnavailablePred = PredBB; 1155 continue; 1156 } 1157 1158 if (IsLoadCSE) 1159 CSELoads.push_back(cast<LoadInst>(PredAvailable)); 1160 1161 // If so, this load is partially redundant. Remember this info so that we 1162 // can create a PHI node. 1163 AvailablePreds.push_back(std::make_pair(PredBB, PredAvailable)); 1164 } 1165 1166 // If the loaded value isn't available in any predecessor, it isn't partially 1167 // redundant. 1168 if (AvailablePreds.empty()) return false; 1169 1170 // Okay, the loaded value is available in at least one (and maybe all!) 1171 // predecessors. If the value is unavailable in more than one unique 1172 // predecessor, we want to insert a merge block for those common predecessors. 1173 // This ensures that we only have to insert one reload, thus not increasing 1174 // code size. 1175 BasicBlock *UnavailablePred = nullptr; 1176 1177 // If there is exactly one predecessor where the value is unavailable, the 1178 // already computed 'OneUnavailablePred' block is it. If it ends in an 1179 // unconditional branch, we know that it isn't a critical edge. 1180 if (PredsScanned.size() == AvailablePreds.size()+1 && 1181 OneUnavailablePred->getTerminator()->getNumSuccessors() == 1) { 1182 UnavailablePred = OneUnavailablePred; 1183 } else if (PredsScanned.size() != AvailablePreds.size()) { 1184 // Otherwise, we had multiple unavailable predecessors or we had a critical 1185 // edge from the one. 1186 SmallVector<BasicBlock*, 8> PredsToSplit; 1187 SmallPtrSet<BasicBlock*, 8> AvailablePredSet; 1188 1189 for (const auto &AvailablePred : AvailablePreds) 1190 AvailablePredSet.insert(AvailablePred.first); 1191 1192 // Add all the unavailable predecessors to the PredsToSplit list. 1193 for (BasicBlock *P : predecessors(LoadBB)) { 1194 // If the predecessor is an indirect goto, we can't split the edge. 1195 if (isa<IndirectBrInst>(P->getTerminator())) 1196 return false; 1197 1198 if (!AvailablePredSet.count(P)) 1199 PredsToSplit.push_back(P); 1200 } 1201 1202 // Split them out to their own block. 1203 UnavailablePred = SplitBlockPreds(LoadBB, PredsToSplit, "thread-pre-split"); 1204 } 1205 1206 // If the value isn't available in all predecessors, then there will be 1207 // exactly one where it isn't available. Insert a load on that edge and add 1208 // it to the AvailablePreds list. 1209 if (UnavailablePred) { 1210 assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 && 1211 "Can't handle critical edge here!"); 1212 LoadInst *NewVal = new LoadInst( 1213 LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred), 1214 LI->getName() + ".pr", false, LI->getAlignment(), LI->getOrdering(), 1215 LI->getSynchScope(), UnavailablePred->getTerminator()); 1216 NewVal->setDebugLoc(LI->getDebugLoc()); 1217 if (AATags) 1218 NewVal->setAAMetadata(AATags); 1219 1220 AvailablePreds.push_back(std::make_pair(UnavailablePred, NewVal)); 1221 } 1222 1223 // Now we know that each predecessor of this block has a value in 1224 // AvailablePreds, sort them for efficient access as we're walking the preds. 1225 array_pod_sort(AvailablePreds.begin(), AvailablePreds.end()); 1226 1227 // Create a PHI node at the start of the block for the PRE'd load value. 1228 pred_iterator PB = pred_begin(LoadBB), PE = pred_end(LoadBB); 1229 PHINode *PN = PHINode::Create(LI->getType(), std::distance(PB, PE), "", 1230 &LoadBB->front()); 1231 PN->takeName(LI); 1232 PN->setDebugLoc(LI->getDebugLoc()); 1233 1234 // Insert new entries into the PHI for each predecessor. A single block may 1235 // have multiple entries here. 1236 for (pred_iterator PI = PB; PI != PE; ++PI) { 1237 BasicBlock *P = *PI; 1238 AvailablePredsTy::iterator I = 1239 std::lower_bound(AvailablePreds.begin(), AvailablePreds.end(), 1240 std::make_pair(P, (Value*)nullptr)); 1241 1242 assert(I != AvailablePreds.end() && I->first == P && 1243 "Didn't find entry for predecessor!"); 1244 1245 // If we have an available predecessor but it requires casting, insert the 1246 // cast in the predecessor and use the cast. Note that we have to update the 1247 // AvailablePreds vector as we go so that all of the PHI entries for this 1248 // predecessor use the same bitcast. 1249 Value *&PredV = I->second; 1250 if (PredV->getType() != LI->getType()) 1251 PredV = CastInst::CreateBitOrPointerCast(PredV, LI->getType(), "", 1252 P->getTerminator()); 1253 1254 PN->addIncoming(PredV, I->first); 1255 } 1256 1257 for (LoadInst *PredLI : CSELoads) { 1258 combineMetadataForCSE(PredLI, LI); 1259 } 1260 1261 LI->replaceAllUsesWith(PN); 1262 LI->eraseFromParent(); 1263 1264 return true; 1265 } 1266 1267 /// FindMostPopularDest - The specified list contains multiple possible 1268 /// threadable destinations. Pick the one that occurs the most frequently in 1269 /// the list. 1270 static BasicBlock * 1271 FindMostPopularDest(BasicBlock *BB, 1272 const SmallVectorImpl<std::pair<BasicBlock*, 1273 BasicBlock*> > &PredToDestList) { 1274 assert(!PredToDestList.empty()); 1275 1276 // Determine popularity. If there are multiple possible destinations, we 1277 // explicitly choose to ignore 'undef' destinations. We prefer to thread 1278 // blocks with known and real destinations to threading undef. We'll handle 1279 // them later if interesting. 1280 DenseMap<BasicBlock*, unsigned> DestPopularity; 1281 for (const auto &PredToDest : PredToDestList) 1282 if (PredToDest.second) 1283 DestPopularity[PredToDest.second]++; 1284 1285 // Find the most popular dest. 1286 DenseMap<BasicBlock*, unsigned>::iterator DPI = DestPopularity.begin(); 1287 BasicBlock *MostPopularDest = DPI->first; 1288 unsigned Popularity = DPI->second; 1289 SmallVector<BasicBlock*, 4> SamePopularity; 1290 1291 for (++DPI; DPI != DestPopularity.end(); ++DPI) { 1292 // If the popularity of this entry isn't higher than the popularity we've 1293 // seen so far, ignore it. 1294 if (DPI->second < Popularity) 1295 ; // ignore. 1296 else if (DPI->second == Popularity) { 1297 // If it is the same as what we've seen so far, keep track of it. 1298 SamePopularity.push_back(DPI->first); 1299 } else { 1300 // If it is more popular, remember it. 1301 SamePopularity.clear(); 1302 MostPopularDest = DPI->first; 1303 Popularity = DPI->second; 1304 } 1305 } 1306 1307 // Okay, now we know the most popular destination. If there is more than one 1308 // destination, we need to determine one. This is arbitrary, but we need 1309 // to make a deterministic decision. Pick the first one that appears in the 1310 // successor list. 1311 if (!SamePopularity.empty()) { 1312 SamePopularity.push_back(MostPopularDest); 1313 TerminatorInst *TI = BB->getTerminator(); 1314 for (unsigned i = 0; ; ++i) { 1315 assert(i != TI->getNumSuccessors() && "Didn't find any successor!"); 1316 1317 if (!is_contained(SamePopularity, TI->getSuccessor(i))) 1318 continue; 1319 1320 MostPopularDest = TI->getSuccessor(i); 1321 break; 1322 } 1323 } 1324 1325 // Okay, we have finally picked the most popular destination. 1326 return MostPopularDest; 1327 } 1328 1329 bool JumpThreadingPass::ProcessThreadableEdges(Value *Cond, BasicBlock *BB, 1330 ConstantPreference Preference, 1331 Instruction *CxtI) { 1332 // If threading this would thread across a loop header, don't even try to 1333 // thread the edge. 1334 if (LoopHeaders.count(BB)) 1335 return false; 1336 1337 PredValueInfoTy PredValues; 1338 if (!ComputeValueKnownInPredecessors(Cond, BB, PredValues, Preference, CxtI)) 1339 return false; 1340 1341 assert(!PredValues.empty() && 1342 "ComputeValueKnownInPredecessors returned true with no values"); 1343 1344 DEBUG(dbgs() << "IN BB: " << *BB; 1345 for (const auto &PredValue : PredValues) { 1346 dbgs() << " BB '" << BB->getName() << "': FOUND condition = " 1347 << *PredValue.first 1348 << " for pred '" << PredValue.second->getName() << "'.\n"; 1349 }); 1350 1351 // Decide what we want to thread through. Convert our list of known values to 1352 // a list of known destinations for each pred. This also discards duplicate 1353 // predecessors and keeps track of the undefined inputs (which are represented 1354 // as a null dest in the PredToDestList). 1355 SmallPtrSet<BasicBlock*, 16> SeenPreds; 1356 SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList; 1357 1358 BasicBlock *OnlyDest = nullptr; 1359 BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL; 1360 Constant *OnlyVal = nullptr; 1361 Constant *MultipleVal = (Constant *)(intptr_t)~0ULL; 1362 1363 unsigned PredWithKnownDest = 0; 1364 for (const auto &PredValue : PredValues) { 1365 BasicBlock *Pred = PredValue.second; 1366 if (!SeenPreds.insert(Pred).second) 1367 continue; // Duplicate predecessor entry. 1368 1369 Constant *Val = PredValue.first; 1370 1371 BasicBlock *DestBB; 1372 if (isa<UndefValue>(Val)) 1373 DestBB = nullptr; 1374 else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) { 1375 assert(isa<ConstantInt>(Val) && "Expecting a constant integer"); 1376 DestBB = BI->getSuccessor(cast<ConstantInt>(Val)->isZero()); 1377 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) { 1378 assert(isa<ConstantInt>(Val) && "Expecting a constant integer"); 1379 DestBB = SI->findCaseValue(cast<ConstantInt>(Val))->getCaseSuccessor(); 1380 } else { 1381 assert(isa<IndirectBrInst>(BB->getTerminator()) 1382 && "Unexpected terminator"); 1383 assert(isa<BlockAddress>(Val) && "Expecting a constant blockaddress"); 1384 DestBB = cast<BlockAddress>(Val)->getBasicBlock(); 1385 } 1386 1387 // If we have exactly one destination, remember it for efficiency below. 1388 if (PredToDestList.empty()) { 1389 OnlyDest = DestBB; 1390 OnlyVal = Val; 1391 } else { 1392 if (OnlyDest != DestBB) 1393 OnlyDest = MultipleDestSentinel; 1394 // It possible we have same destination, but different value, e.g. default 1395 // case in switchinst. 1396 if (Val != OnlyVal) 1397 OnlyVal = MultipleVal; 1398 } 1399 1400 // We know where this predecessor is going. 1401 ++PredWithKnownDest; 1402 1403 // If the predecessor ends with an indirect goto, we can't change its 1404 // destination. 1405 if (isa<IndirectBrInst>(Pred->getTerminator())) 1406 continue; 1407 1408 PredToDestList.push_back(std::make_pair(Pred, DestBB)); 1409 } 1410 1411 // If all edges were unthreadable, we fail. 1412 if (PredToDestList.empty()) 1413 return false; 1414 1415 // If all the predecessors go to a single known successor, we want to fold, 1416 // not thread. By doing so, we do not need to duplicate the current block and 1417 // also miss potential opportunities in case we dont/cant duplicate. 1418 if (OnlyDest && OnlyDest != MultipleDestSentinel) { 1419 if (PredWithKnownDest == 1420 (size_t)std::distance(pred_begin(BB), pred_end(BB))) { 1421 bool SeenFirstBranchToOnlyDest = false; 1422 for (BasicBlock *SuccBB : successors(BB)) { 1423 if (SuccBB == OnlyDest && !SeenFirstBranchToOnlyDest) 1424 SeenFirstBranchToOnlyDest = true; // Don't modify the first branch. 1425 else 1426 SuccBB->removePredecessor(BB, true); // This is unreachable successor. 1427 } 1428 1429 // Finally update the terminator. 1430 TerminatorInst *Term = BB->getTerminator(); 1431 BranchInst::Create(OnlyDest, Term); 1432 Term->eraseFromParent(); 1433 1434 // If the condition is now dead due to the removal of the old terminator, 1435 // erase it. 1436 if (auto *CondInst = dyn_cast<Instruction>(Cond)) { 1437 if (CondInst->use_empty() && !CondInst->mayHaveSideEffects()) 1438 CondInst->eraseFromParent(); 1439 // We can safely replace *some* uses of the CondInst if it has 1440 // exactly one value as returned by LVI. RAUW is incorrect in the 1441 // presence of guards and assumes, that have the `Cond` as the use. This 1442 // is because we use the guards/assume to reason about the `Cond` value 1443 // at the end of block, but RAUW unconditionally replaces all uses 1444 // including the guards/assumes themselves and the uses before the 1445 // guard/assume. 1446 else if (OnlyVal && OnlyVal != MultipleVal && 1447 CondInst->getParent() == BB) 1448 ReplaceFoldableUses(CondInst, OnlyVal); 1449 } 1450 return true; 1451 } 1452 } 1453 1454 // Determine which is the most common successor. If we have many inputs and 1455 // this block is a switch, we want to start by threading the batch that goes 1456 // to the most popular destination first. If we only know about one 1457 // threadable destination (the common case) we can avoid this. 1458 BasicBlock *MostPopularDest = OnlyDest; 1459 1460 if (MostPopularDest == MultipleDestSentinel) 1461 MostPopularDest = FindMostPopularDest(BB, PredToDestList); 1462 1463 // Now that we know what the most popular destination is, factor all 1464 // predecessors that will jump to it into a single predecessor. 1465 SmallVector<BasicBlock*, 16> PredsToFactor; 1466 for (const auto &PredToDest : PredToDestList) 1467 if (PredToDest.second == MostPopularDest) { 1468 BasicBlock *Pred = PredToDest.first; 1469 1470 // This predecessor may be a switch or something else that has multiple 1471 // edges to the block. Factor each of these edges by listing them 1472 // according to # occurrences in PredsToFactor. 1473 for (BasicBlock *Succ : successors(Pred)) 1474 if (Succ == BB) 1475 PredsToFactor.push_back(Pred); 1476 } 1477 1478 // If the threadable edges are branching on an undefined value, we get to pick 1479 // the destination that these predecessors should get to. 1480 if (!MostPopularDest) 1481 MostPopularDest = BB->getTerminator()-> 1482 getSuccessor(GetBestDestForJumpOnUndef(BB)); 1483 1484 // Ok, try to thread it! 1485 return ThreadEdge(BB, PredsToFactor, MostPopularDest); 1486 } 1487 1488 /// ProcessBranchOnPHI - We have an otherwise unthreadable conditional branch on 1489 /// a PHI node in the current block. See if there are any simplifications we 1490 /// can do based on inputs to the phi node. 1491 /// 1492 bool JumpThreadingPass::ProcessBranchOnPHI(PHINode *PN) { 1493 BasicBlock *BB = PN->getParent(); 1494 1495 // TODO: We could make use of this to do it once for blocks with common PHI 1496 // values. 1497 SmallVector<BasicBlock*, 1> PredBBs; 1498 PredBBs.resize(1); 1499 1500 // If any of the predecessor blocks end in an unconditional branch, we can 1501 // *duplicate* the conditional branch into that block in order to further 1502 // encourage jump threading and to eliminate cases where we have branch on a 1503 // phi of an icmp (branch on icmp is much better). 1504 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1505 BasicBlock *PredBB = PN->getIncomingBlock(i); 1506 if (BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator())) 1507 if (PredBr->isUnconditional()) { 1508 PredBBs[0] = PredBB; 1509 // Try to duplicate BB into PredBB. 1510 if (DuplicateCondBranchOnPHIIntoPred(BB, PredBBs)) 1511 return true; 1512 } 1513 } 1514 1515 return false; 1516 } 1517 1518 /// ProcessBranchOnXOR - We have an otherwise unthreadable conditional branch on 1519 /// a xor instruction in the current block. See if there are any 1520 /// simplifications we can do based on inputs to the xor. 1521 /// 1522 bool JumpThreadingPass::ProcessBranchOnXOR(BinaryOperator *BO) { 1523 BasicBlock *BB = BO->getParent(); 1524 1525 // If either the LHS or RHS of the xor is a constant, don't do this 1526 // optimization. 1527 if (isa<ConstantInt>(BO->getOperand(0)) || 1528 isa<ConstantInt>(BO->getOperand(1))) 1529 return false; 1530 1531 // If the first instruction in BB isn't a phi, we won't be able to infer 1532 // anything special about any particular predecessor. 1533 if (!isa<PHINode>(BB->front())) 1534 return false; 1535 1536 // If this BB is a landing pad, we won't be able to split the edge into it. 1537 if (BB->isEHPad()) 1538 return false; 1539 1540 // If we have a xor as the branch input to this block, and we know that the 1541 // LHS or RHS of the xor in any predecessor is true/false, then we can clone 1542 // the condition into the predecessor and fix that value to true, saving some 1543 // logical ops on that path and encouraging other paths to simplify. 1544 // 1545 // This copies something like this: 1546 // 1547 // BB: 1548 // %X = phi i1 [1], [%X'] 1549 // %Y = icmp eq i32 %A, %B 1550 // %Z = xor i1 %X, %Y 1551 // br i1 %Z, ... 1552 // 1553 // Into: 1554 // BB': 1555 // %Y = icmp ne i32 %A, %B 1556 // br i1 %Y, ... 1557 1558 PredValueInfoTy XorOpValues; 1559 bool isLHS = true; 1560 if (!ComputeValueKnownInPredecessors(BO->getOperand(0), BB, XorOpValues, 1561 WantInteger, BO)) { 1562 assert(XorOpValues.empty()); 1563 if (!ComputeValueKnownInPredecessors(BO->getOperand(1), BB, XorOpValues, 1564 WantInteger, BO)) 1565 return false; 1566 isLHS = false; 1567 } 1568 1569 assert(!XorOpValues.empty() && 1570 "ComputeValueKnownInPredecessors returned true with no values"); 1571 1572 // Scan the information to see which is most popular: true or false. The 1573 // predecessors can be of the set true, false, or undef. 1574 unsigned NumTrue = 0, NumFalse = 0; 1575 for (const auto &XorOpValue : XorOpValues) { 1576 if (isa<UndefValue>(XorOpValue.first)) 1577 // Ignore undefs for the count. 1578 continue; 1579 if (cast<ConstantInt>(XorOpValue.first)->isZero()) 1580 ++NumFalse; 1581 else 1582 ++NumTrue; 1583 } 1584 1585 // Determine which value to split on, true, false, or undef if neither. 1586 ConstantInt *SplitVal = nullptr; 1587 if (NumTrue > NumFalse) 1588 SplitVal = ConstantInt::getTrue(BB->getContext()); 1589 else if (NumTrue != 0 || NumFalse != 0) 1590 SplitVal = ConstantInt::getFalse(BB->getContext()); 1591 1592 // Collect all of the blocks that this can be folded into so that we can 1593 // factor this once and clone it once. 1594 SmallVector<BasicBlock*, 8> BlocksToFoldInto; 1595 for (const auto &XorOpValue : XorOpValues) { 1596 if (XorOpValue.first != SplitVal && !isa<UndefValue>(XorOpValue.first)) 1597 continue; 1598 1599 BlocksToFoldInto.push_back(XorOpValue.second); 1600 } 1601 1602 // If we inferred a value for all of the predecessors, then duplication won't 1603 // help us. However, we can just replace the LHS or RHS with the constant. 1604 if (BlocksToFoldInto.size() == 1605 cast<PHINode>(BB->front()).getNumIncomingValues()) { 1606 if (!SplitVal) { 1607 // If all preds provide undef, just nuke the xor, because it is undef too. 1608 BO->replaceAllUsesWith(UndefValue::get(BO->getType())); 1609 BO->eraseFromParent(); 1610 } else if (SplitVal->isZero()) { 1611 // If all preds provide 0, replace the xor with the other input. 1612 BO->replaceAllUsesWith(BO->getOperand(isLHS)); 1613 BO->eraseFromParent(); 1614 } else { 1615 // If all preds provide 1, set the computed value to 1. 1616 BO->setOperand(!isLHS, SplitVal); 1617 } 1618 1619 return true; 1620 } 1621 1622 // Try to duplicate BB into PredBB. 1623 return DuplicateCondBranchOnPHIIntoPred(BB, BlocksToFoldInto); 1624 } 1625 1626 1627 /// AddPHINodeEntriesForMappedBlock - We're adding 'NewPred' as a new 1628 /// predecessor to the PHIBB block. If it has PHI nodes, add entries for 1629 /// NewPred using the entries from OldPred (suitably mapped). 1630 static void AddPHINodeEntriesForMappedBlock(BasicBlock *PHIBB, 1631 BasicBlock *OldPred, 1632 BasicBlock *NewPred, 1633 DenseMap<Instruction*, Value*> &ValueMap) { 1634 for (BasicBlock::iterator PNI = PHIBB->begin(); 1635 PHINode *PN = dyn_cast<PHINode>(PNI); ++PNI) { 1636 // Ok, we have a PHI node. Figure out what the incoming value was for the 1637 // DestBlock. 1638 Value *IV = PN->getIncomingValueForBlock(OldPred); 1639 1640 // Remap the value if necessary. 1641 if (Instruction *Inst = dyn_cast<Instruction>(IV)) { 1642 DenseMap<Instruction*, Value*>::iterator I = ValueMap.find(Inst); 1643 if (I != ValueMap.end()) 1644 IV = I->second; 1645 } 1646 1647 PN->addIncoming(IV, NewPred); 1648 } 1649 } 1650 1651 /// ThreadEdge - We have decided that it is safe and profitable to factor the 1652 /// blocks in PredBBs to one predecessor, then thread an edge from it to SuccBB 1653 /// across BB. Transform the IR to reflect this change. 1654 bool JumpThreadingPass::ThreadEdge(BasicBlock *BB, 1655 const SmallVectorImpl<BasicBlock *> &PredBBs, 1656 BasicBlock *SuccBB) { 1657 // If threading to the same block as we come from, we would infinite loop. 1658 if (SuccBB == BB) { 1659 DEBUG(dbgs() << " Not threading across BB '" << BB->getName() 1660 << "' - would thread to self!\n"); 1661 return false; 1662 } 1663 1664 // If threading this would thread across a loop header, don't thread the edge. 1665 // See the comments above FindLoopHeaders for justifications and caveats. 1666 if (LoopHeaders.count(BB)) { 1667 DEBUG(dbgs() << " Not threading across loop header BB '" << BB->getName() 1668 << "' to dest BB '" << SuccBB->getName() 1669 << "' - it might create an irreducible loop!\n"); 1670 return false; 1671 } 1672 1673 unsigned JumpThreadCost = 1674 getJumpThreadDuplicationCost(BB, BB->getTerminator(), BBDupThreshold); 1675 if (JumpThreadCost > BBDupThreshold) { 1676 DEBUG(dbgs() << " Not threading BB '" << BB->getName() 1677 << "' - Cost is too high: " << JumpThreadCost << "\n"); 1678 return false; 1679 } 1680 1681 // And finally, do it! Start by factoring the predecessors if needed. 1682 BasicBlock *PredBB; 1683 if (PredBBs.size() == 1) 1684 PredBB = PredBBs[0]; 1685 else { 1686 DEBUG(dbgs() << " Factoring out " << PredBBs.size() 1687 << " common predecessors.\n"); 1688 PredBB = SplitBlockPreds(BB, PredBBs, ".thr_comm"); 1689 } 1690 1691 // And finally, do it! 1692 DEBUG(dbgs() << " Threading edge from '" << PredBB->getName() << "' to '" 1693 << SuccBB->getName() << "' with cost: " << JumpThreadCost 1694 << ", across block:\n " 1695 << *BB << "\n"); 1696 1697 LVI->threadEdge(PredBB, BB, SuccBB); 1698 1699 // We are going to have to map operands from the original BB block to the new 1700 // copy of the block 'NewBB'. If there are PHI nodes in BB, evaluate them to 1701 // account for entry from PredBB. 1702 DenseMap<Instruction*, Value*> ValueMapping; 1703 1704 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), 1705 BB->getName()+".thread", 1706 BB->getParent(), BB); 1707 NewBB->moveAfter(PredBB); 1708 1709 // Set the block frequency of NewBB. 1710 if (HasProfileData) { 1711 auto NewBBFreq = 1712 BFI->getBlockFreq(PredBB) * BPI->getEdgeProbability(PredBB, BB); 1713 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency()); 1714 } 1715 1716 BasicBlock::iterator BI = BB->begin(); 1717 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 1718 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); 1719 1720 // Clone the non-phi instructions of BB into NewBB, keeping track of the 1721 // mapping and using it to remap operands in the cloned instructions. 1722 for (; !isa<TerminatorInst>(BI); ++BI) { 1723 Instruction *New = BI->clone(); 1724 New->setName(BI->getName()); 1725 NewBB->getInstList().push_back(New); 1726 ValueMapping[&*BI] = New; 1727 1728 // Remap operands to patch up intra-block references. 1729 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 1730 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 1731 DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst); 1732 if (I != ValueMapping.end()) 1733 New->setOperand(i, I->second); 1734 } 1735 } 1736 1737 // We didn't copy the terminator from BB over to NewBB, because there is now 1738 // an unconditional jump to SuccBB. Insert the unconditional jump. 1739 BranchInst *NewBI = BranchInst::Create(SuccBB, NewBB); 1740 NewBI->setDebugLoc(BB->getTerminator()->getDebugLoc()); 1741 1742 // Check to see if SuccBB has PHI nodes. If so, we need to add entries to the 1743 // PHI nodes for NewBB now. 1744 AddPHINodeEntriesForMappedBlock(SuccBB, BB, NewBB, ValueMapping); 1745 1746 // If there were values defined in BB that are used outside the block, then we 1747 // now have to update all uses of the value to use either the original value, 1748 // the cloned value, or some PHI derived value. This can require arbitrary 1749 // PHI insertion, of which we are prepared to do, clean these up now. 1750 SSAUpdater SSAUpdate; 1751 SmallVector<Use*, 16> UsesToRename; 1752 for (Instruction &I : *BB) { 1753 // Scan all uses of this instruction to see if it is used outside of its 1754 // block, and if so, record them in UsesToRename. 1755 for (Use &U : I.uses()) { 1756 Instruction *User = cast<Instruction>(U.getUser()); 1757 if (PHINode *UserPN = dyn_cast<PHINode>(User)) { 1758 if (UserPN->getIncomingBlock(U) == BB) 1759 continue; 1760 } else if (User->getParent() == BB) 1761 continue; 1762 1763 UsesToRename.push_back(&U); 1764 } 1765 1766 // If there are no uses outside the block, we're done with this instruction. 1767 if (UsesToRename.empty()) 1768 continue; 1769 1770 DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n"); 1771 1772 // We found a use of I outside of BB. Rename all uses of I that are outside 1773 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks 1774 // with the two values we know. 1775 SSAUpdate.Initialize(I.getType(), I.getName()); 1776 SSAUpdate.AddAvailableValue(BB, &I); 1777 SSAUpdate.AddAvailableValue(NewBB, ValueMapping[&I]); 1778 1779 while (!UsesToRename.empty()) 1780 SSAUpdate.RewriteUse(*UsesToRename.pop_back_val()); 1781 DEBUG(dbgs() << "\n"); 1782 } 1783 1784 1785 // Ok, NewBB is good to go. Update the terminator of PredBB to jump to 1786 // NewBB instead of BB. This eliminates predecessors from BB, which requires 1787 // us to simplify any PHI nodes in BB. 1788 TerminatorInst *PredTerm = PredBB->getTerminator(); 1789 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) 1790 if (PredTerm->getSuccessor(i) == BB) { 1791 BB->removePredecessor(PredBB, true); 1792 PredTerm->setSuccessor(i, NewBB); 1793 } 1794 1795 // At this point, the IR is fully up to date and consistent. Do a quick scan 1796 // over the new instructions and zap any that are constants or dead. This 1797 // frequently happens because of phi translation. 1798 SimplifyInstructionsInBlock(NewBB, TLI); 1799 1800 // Update the edge weight from BB to SuccBB, which should be less than before. 1801 UpdateBlockFreqAndEdgeWeight(PredBB, BB, NewBB, SuccBB); 1802 1803 // Threaded an edge! 1804 ++NumThreads; 1805 return true; 1806 } 1807 1808 /// Create a new basic block that will be the predecessor of BB and successor of 1809 /// all blocks in Preds. When profile data is available, update the frequency of 1810 /// this new block. 1811 BasicBlock *JumpThreadingPass::SplitBlockPreds(BasicBlock *BB, 1812 ArrayRef<BasicBlock *> Preds, 1813 const char *Suffix) { 1814 // Collect the frequencies of all predecessors of BB, which will be used to 1815 // update the edge weight on BB->SuccBB. 1816 BlockFrequency PredBBFreq(0); 1817 if (HasProfileData) 1818 for (auto Pred : Preds) 1819 PredBBFreq += BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, BB); 1820 1821 BasicBlock *PredBB = SplitBlockPredecessors(BB, Preds, Suffix); 1822 1823 // Set the block frequency of the newly created PredBB, which is the sum of 1824 // frequencies of Preds. 1825 if (HasProfileData) 1826 BFI->setBlockFreq(PredBB, PredBBFreq.getFrequency()); 1827 return PredBB; 1828 } 1829 1830 bool JumpThreadingPass::doesBlockHaveProfileData(BasicBlock *BB) { 1831 const TerminatorInst *TI = BB->getTerminator(); 1832 assert(TI->getNumSuccessors() > 1 && "not a split"); 1833 1834 MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof); 1835 if (!WeightsNode) 1836 return false; 1837 1838 MDString *MDName = cast<MDString>(WeightsNode->getOperand(0)); 1839 if (MDName->getString() != "branch_weights") 1840 return false; 1841 1842 // Ensure there are weights for all of the successors. Note that the first 1843 // operand to the metadata node is a name, not a weight. 1844 return WeightsNode->getNumOperands() == TI->getNumSuccessors() + 1; 1845 } 1846 1847 /// Update the block frequency of BB and branch weight and the metadata on the 1848 /// edge BB->SuccBB. This is done by scaling the weight of BB->SuccBB by 1 - 1849 /// Freq(PredBB->BB) / Freq(BB->SuccBB). 1850 void JumpThreadingPass::UpdateBlockFreqAndEdgeWeight(BasicBlock *PredBB, 1851 BasicBlock *BB, 1852 BasicBlock *NewBB, 1853 BasicBlock *SuccBB) { 1854 if (!HasProfileData) 1855 return; 1856 1857 assert(BFI && BPI && "BFI & BPI should have been created here"); 1858 1859 // As the edge from PredBB to BB is deleted, we have to update the block 1860 // frequency of BB. 1861 auto BBOrigFreq = BFI->getBlockFreq(BB); 1862 auto NewBBFreq = BFI->getBlockFreq(NewBB); 1863 auto BB2SuccBBFreq = BBOrigFreq * BPI->getEdgeProbability(BB, SuccBB); 1864 auto BBNewFreq = BBOrigFreq - NewBBFreq; 1865 BFI->setBlockFreq(BB, BBNewFreq.getFrequency()); 1866 1867 // Collect updated outgoing edges' frequencies from BB and use them to update 1868 // edge probabilities. 1869 SmallVector<uint64_t, 4> BBSuccFreq; 1870 for (BasicBlock *Succ : successors(BB)) { 1871 auto SuccFreq = (Succ == SuccBB) 1872 ? BB2SuccBBFreq - NewBBFreq 1873 : BBOrigFreq * BPI->getEdgeProbability(BB, Succ); 1874 BBSuccFreq.push_back(SuccFreq.getFrequency()); 1875 } 1876 1877 uint64_t MaxBBSuccFreq = 1878 *std::max_element(BBSuccFreq.begin(), BBSuccFreq.end()); 1879 1880 SmallVector<BranchProbability, 4> BBSuccProbs; 1881 if (MaxBBSuccFreq == 0) 1882 BBSuccProbs.assign(BBSuccFreq.size(), 1883 {1, static_cast<unsigned>(BBSuccFreq.size())}); 1884 else { 1885 for (uint64_t Freq : BBSuccFreq) 1886 BBSuccProbs.push_back( 1887 BranchProbability::getBranchProbability(Freq, MaxBBSuccFreq)); 1888 // Normalize edge probabilities so that they sum up to one. 1889 BranchProbability::normalizeProbabilities(BBSuccProbs.begin(), 1890 BBSuccProbs.end()); 1891 } 1892 1893 // Update edge probabilities in BPI. 1894 for (int I = 0, E = BBSuccProbs.size(); I < E; I++) 1895 BPI->setEdgeProbability(BB, I, BBSuccProbs[I]); 1896 1897 // Update the profile metadata as well. 1898 // 1899 // Don't do this if the profile of the transformed blocks was statically 1900 // estimated. (This could occur despite the function having an entry 1901 // frequency in completely cold parts of the CFG.) 1902 // 1903 // In this case we don't want to suggest to subsequent passes that the 1904 // calculated weights are fully consistent. Consider this graph: 1905 // 1906 // check_1 1907 // 50% / | 1908 // eq_1 | 50% 1909 // \ | 1910 // check_2 1911 // 50% / | 1912 // eq_2 | 50% 1913 // \ | 1914 // check_3 1915 // 50% / | 1916 // eq_3 | 50% 1917 // \ | 1918 // 1919 // Assuming the blocks check_* all compare the same value against 1, 2 and 3, 1920 // the overall probabilities are inconsistent; the total probability that the 1921 // value is either 1, 2 or 3 is 150%. 1922 // 1923 // As a consequence if we thread eq_1 -> check_2 to check_3, check_2->check_3 1924 // becomes 0%. This is even worse if the edge whose probability becomes 0% is 1925 // the loop exit edge. Then based solely on static estimation we would assume 1926 // the loop was extremely hot. 1927 // 1928 // FIXME this locally as well so that BPI and BFI are consistent as well. We 1929 // shouldn't make edges extremely likely or unlikely based solely on static 1930 // estimation. 1931 if (BBSuccProbs.size() >= 2 && doesBlockHaveProfileData(BB)) { 1932 SmallVector<uint32_t, 4> Weights; 1933 for (auto Prob : BBSuccProbs) 1934 Weights.push_back(Prob.getNumerator()); 1935 1936 auto TI = BB->getTerminator(); 1937 TI->setMetadata( 1938 LLVMContext::MD_prof, 1939 MDBuilder(TI->getParent()->getContext()).createBranchWeights(Weights)); 1940 } 1941 } 1942 1943 /// DuplicateCondBranchOnPHIIntoPred - PredBB contains an unconditional branch 1944 /// to BB which contains an i1 PHI node and a conditional branch on that PHI. 1945 /// If we can duplicate the contents of BB up into PredBB do so now, this 1946 /// improves the odds that the branch will be on an analyzable instruction like 1947 /// a compare. 1948 bool JumpThreadingPass::DuplicateCondBranchOnPHIIntoPred( 1949 BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs) { 1950 assert(!PredBBs.empty() && "Can't handle an empty set"); 1951 1952 // If BB is a loop header, then duplicating this block outside the loop would 1953 // cause us to transform this into an irreducible loop, don't do this. 1954 // See the comments above FindLoopHeaders for justifications and caveats. 1955 if (LoopHeaders.count(BB)) { 1956 DEBUG(dbgs() << " Not duplicating loop header '" << BB->getName() 1957 << "' into predecessor block '" << PredBBs[0]->getName() 1958 << "' - it might create an irreducible loop!\n"); 1959 return false; 1960 } 1961 1962 unsigned DuplicationCost = 1963 getJumpThreadDuplicationCost(BB, BB->getTerminator(), BBDupThreshold); 1964 if (DuplicationCost > BBDupThreshold) { 1965 DEBUG(dbgs() << " Not duplicating BB '" << BB->getName() 1966 << "' - Cost is too high: " << DuplicationCost << "\n"); 1967 return false; 1968 } 1969 1970 // And finally, do it! Start by factoring the predecessors if needed. 1971 BasicBlock *PredBB; 1972 if (PredBBs.size() == 1) 1973 PredBB = PredBBs[0]; 1974 else { 1975 DEBUG(dbgs() << " Factoring out " << PredBBs.size() 1976 << " common predecessors.\n"); 1977 PredBB = SplitBlockPreds(BB, PredBBs, ".thr_comm"); 1978 } 1979 1980 // Okay, we decided to do this! Clone all the instructions in BB onto the end 1981 // of PredBB. 1982 DEBUG(dbgs() << " Duplicating block '" << BB->getName() << "' into end of '" 1983 << PredBB->getName() << "' to eliminate branch on phi. Cost: " 1984 << DuplicationCost << " block is:" << *BB << "\n"); 1985 1986 // Unless PredBB ends with an unconditional branch, split the edge so that we 1987 // can just clone the bits from BB into the end of the new PredBB. 1988 BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator()); 1989 1990 if (!OldPredBranch || !OldPredBranch->isUnconditional()) { 1991 PredBB = SplitEdge(PredBB, BB); 1992 OldPredBranch = cast<BranchInst>(PredBB->getTerminator()); 1993 } 1994 1995 // We are going to have to map operands from the original BB block into the 1996 // PredBB block. Evaluate PHI nodes in BB. 1997 DenseMap<Instruction*, Value*> ValueMapping; 1998 1999 BasicBlock::iterator BI = BB->begin(); 2000 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 2001 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); 2002 // Clone the non-phi instructions of BB into PredBB, keeping track of the 2003 // mapping and using it to remap operands in the cloned instructions. 2004 for (; BI != BB->end(); ++BI) { 2005 Instruction *New = BI->clone(); 2006 2007 // Remap operands to patch up intra-block references. 2008 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 2009 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 2010 DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst); 2011 if (I != ValueMapping.end()) 2012 New->setOperand(i, I->second); 2013 } 2014 2015 // If this instruction can be simplified after the operands are updated, 2016 // just use the simplified value instead. This frequently happens due to 2017 // phi translation. 2018 if (Value *IV = SimplifyInstruction( 2019 New, 2020 {BB->getModule()->getDataLayout(), TLI, nullptr, nullptr, New})) { 2021 ValueMapping[&*BI] = IV; 2022 if (!New->mayHaveSideEffects()) { 2023 New->deleteValue(); 2024 New = nullptr; 2025 } 2026 } else { 2027 ValueMapping[&*BI] = New; 2028 } 2029 if (New) { 2030 // Otherwise, insert the new instruction into the block. 2031 New->setName(BI->getName()); 2032 PredBB->getInstList().insert(OldPredBranch->getIterator(), New); 2033 } 2034 } 2035 2036 // Check to see if the targets of the branch had PHI nodes. If so, we need to 2037 // add entries to the PHI nodes for branch from PredBB now. 2038 BranchInst *BBBranch = cast<BranchInst>(BB->getTerminator()); 2039 AddPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(0), BB, PredBB, 2040 ValueMapping); 2041 AddPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(1), BB, PredBB, 2042 ValueMapping); 2043 2044 // If there were values defined in BB that are used outside the block, then we 2045 // now have to update all uses of the value to use either the original value, 2046 // the cloned value, or some PHI derived value. This can require arbitrary 2047 // PHI insertion, of which we are prepared to do, clean these up now. 2048 SSAUpdater SSAUpdate; 2049 SmallVector<Use*, 16> UsesToRename; 2050 for (Instruction &I : *BB) { 2051 // Scan all uses of this instruction to see if it is used outside of its 2052 // block, and if so, record them in UsesToRename. 2053 for (Use &U : I.uses()) { 2054 Instruction *User = cast<Instruction>(U.getUser()); 2055 if (PHINode *UserPN = dyn_cast<PHINode>(User)) { 2056 if (UserPN->getIncomingBlock(U) == BB) 2057 continue; 2058 } else if (User->getParent() == BB) 2059 continue; 2060 2061 UsesToRename.push_back(&U); 2062 } 2063 2064 // If there are no uses outside the block, we're done with this instruction. 2065 if (UsesToRename.empty()) 2066 continue; 2067 2068 DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n"); 2069 2070 // We found a use of I outside of BB. Rename all uses of I that are outside 2071 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks 2072 // with the two values we know. 2073 SSAUpdate.Initialize(I.getType(), I.getName()); 2074 SSAUpdate.AddAvailableValue(BB, &I); 2075 SSAUpdate.AddAvailableValue(PredBB, ValueMapping[&I]); 2076 2077 while (!UsesToRename.empty()) 2078 SSAUpdate.RewriteUse(*UsesToRename.pop_back_val()); 2079 DEBUG(dbgs() << "\n"); 2080 } 2081 2082 // PredBB no longer jumps to BB, remove entries in the PHI node for the edge 2083 // that we nuked. 2084 BB->removePredecessor(PredBB, true); 2085 2086 // Remove the unconditional branch at the end of the PredBB block. 2087 OldPredBranch->eraseFromParent(); 2088 2089 ++NumDupes; 2090 return true; 2091 } 2092 2093 /// TryToUnfoldSelect - Look for blocks of the form 2094 /// bb1: 2095 /// %a = select 2096 /// br bb2 2097 /// 2098 /// bb2: 2099 /// %p = phi [%a, %bb1] ... 2100 /// %c = icmp %p 2101 /// br i1 %c 2102 /// 2103 /// And expand the select into a branch structure if one of its arms allows %c 2104 /// to be folded. This later enables threading from bb1 over bb2. 2105 bool JumpThreadingPass::TryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB) { 2106 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 2107 PHINode *CondLHS = dyn_cast<PHINode>(CondCmp->getOperand(0)); 2108 Constant *CondRHS = cast<Constant>(CondCmp->getOperand(1)); 2109 2110 if (!CondBr || !CondBr->isConditional() || !CondLHS || 2111 CondLHS->getParent() != BB) 2112 return false; 2113 2114 for (unsigned I = 0, E = CondLHS->getNumIncomingValues(); I != E; ++I) { 2115 BasicBlock *Pred = CondLHS->getIncomingBlock(I); 2116 SelectInst *SI = dyn_cast<SelectInst>(CondLHS->getIncomingValue(I)); 2117 2118 // Look if one of the incoming values is a select in the corresponding 2119 // predecessor. 2120 if (!SI || SI->getParent() != Pred || !SI->hasOneUse()) 2121 continue; 2122 2123 BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator()); 2124 if (!PredTerm || !PredTerm->isUnconditional()) 2125 continue; 2126 2127 // Now check if one of the select values would allow us to constant fold the 2128 // terminator in BB. We don't do the transform if both sides fold, those 2129 // cases will be threaded in any case. 2130 LazyValueInfo::Tristate LHSFolds = 2131 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(1), 2132 CondRHS, Pred, BB, CondCmp); 2133 LazyValueInfo::Tristate RHSFolds = 2134 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(2), 2135 CondRHS, Pred, BB, CondCmp); 2136 if ((LHSFolds != LazyValueInfo::Unknown || 2137 RHSFolds != LazyValueInfo::Unknown) && 2138 LHSFolds != RHSFolds) { 2139 // Expand the select. 2140 // 2141 // Pred -- 2142 // | v 2143 // | NewBB 2144 // | | 2145 // |----- 2146 // v 2147 // BB 2148 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "select.unfold", 2149 BB->getParent(), BB); 2150 // Move the unconditional branch to NewBB. 2151 PredTerm->removeFromParent(); 2152 NewBB->getInstList().insert(NewBB->end(), PredTerm); 2153 // Create a conditional branch and update PHI nodes. 2154 BranchInst::Create(NewBB, BB, SI->getCondition(), Pred); 2155 CondLHS->setIncomingValue(I, SI->getFalseValue()); 2156 CondLHS->addIncoming(SI->getTrueValue(), NewBB); 2157 // The select is now dead. 2158 SI->eraseFromParent(); 2159 2160 // Update any other PHI nodes in BB. 2161 for (BasicBlock::iterator BI = BB->begin(); 2162 PHINode *Phi = dyn_cast<PHINode>(BI); ++BI) 2163 if (Phi != CondLHS) 2164 Phi->addIncoming(Phi->getIncomingValueForBlock(Pred), NewBB); 2165 return true; 2166 } 2167 } 2168 return false; 2169 } 2170 2171 /// TryToUnfoldSelectInCurrBB - Look for PHI/Select in the same BB of the form 2172 /// bb: 2173 /// %p = phi [false, %bb1], [true, %bb2], [false, %bb3], [true, %bb4], ... 2174 /// %s = select p, trueval, falseval 2175 /// 2176 /// And expand the select into a branch structure. This later enables 2177 /// jump-threading over bb in this pass. 2178 /// 2179 /// Using the similar approach of SimplifyCFG::FoldCondBranchOnPHI(), unfold 2180 /// select if the associated PHI has at least one constant. If the unfolded 2181 /// select is not jump-threaded, it will be folded again in the later 2182 /// optimizations. 2183 bool JumpThreadingPass::TryToUnfoldSelectInCurrBB(BasicBlock *BB) { 2184 // If threading this would thread across a loop header, don't thread the edge. 2185 // See the comments above FindLoopHeaders for justifications and caveats. 2186 if (LoopHeaders.count(BB)) 2187 return false; 2188 2189 // Look for a Phi/Select pair in the same basic block. The Phi feeds the 2190 // condition of the Select and at least one of the incoming values is a 2191 // constant. 2192 for (BasicBlock::iterator BI = BB->begin(); 2193 PHINode *PN = dyn_cast<PHINode>(BI); ++BI) { 2194 unsigned NumPHIValues = PN->getNumIncomingValues(); 2195 if (NumPHIValues == 0 || !PN->hasOneUse()) 2196 continue; 2197 2198 SelectInst *SI = dyn_cast<SelectInst>(PN->user_back()); 2199 if (!SI || SI->getParent() != BB) 2200 continue; 2201 2202 Value *Cond = SI->getCondition(); 2203 if (!Cond || Cond != PN || !Cond->getType()->isIntegerTy(1)) 2204 continue; 2205 2206 bool HasConst = false; 2207 for (unsigned i = 0; i != NumPHIValues; ++i) { 2208 if (PN->getIncomingBlock(i) == BB) 2209 return false; 2210 if (isa<ConstantInt>(PN->getIncomingValue(i))) 2211 HasConst = true; 2212 } 2213 2214 if (HasConst) { 2215 // Expand the select. 2216 TerminatorInst *Term = 2217 SplitBlockAndInsertIfThen(SI->getCondition(), SI, false); 2218 PHINode *NewPN = PHINode::Create(SI->getType(), 2, "", SI); 2219 NewPN->addIncoming(SI->getTrueValue(), Term->getParent()); 2220 NewPN->addIncoming(SI->getFalseValue(), BB); 2221 SI->replaceAllUsesWith(NewPN); 2222 SI->eraseFromParent(); 2223 return true; 2224 } 2225 } 2226 2227 return false; 2228 } 2229 2230 /// Try to propagate a guard from the current BB into one of its predecessors 2231 /// in case if another branch of execution implies that the condition of this 2232 /// guard is always true. Currently we only process the simplest case that 2233 /// looks like: 2234 /// 2235 /// Start: 2236 /// %cond = ... 2237 /// br i1 %cond, label %T1, label %F1 2238 /// T1: 2239 /// br label %Merge 2240 /// F1: 2241 /// br label %Merge 2242 /// Merge: 2243 /// %condGuard = ... 2244 /// call void(i1, ...) @llvm.experimental.guard( i1 %condGuard )[ "deopt"() ] 2245 /// 2246 /// And cond either implies condGuard or !condGuard. In this case all the 2247 /// instructions before the guard can be duplicated in both branches, and the 2248 /// guard is then threaded to one of them. 2249 bool JumpThreadingPass::ProcessGuards(BasicBlock *BB) { 2250 using namespace PatternMatch; 2251 // We only want to deal with two predecessors. 2252 BasicBlock *Pred1, *Pred2; 2253 auto PI = pred_begin(BB), PE = pred_end(BB); 2254 if (PI == PE) 2255 return false; 2256 Pred1 = *PI++; 2257 if (PI == PE) 2258 return false; 2259 Pred2 = *PI++; 2260 if (PI != PE) 2261 return false; 2262 if (Pred1 == Pred2) 2263 return false; 2264 2265 // Try to thread one of the guards of the block. 2266 // TODO: Look up deeper than to immediate predecessor? 2267 auto *Parent = Pred1->getSinglePredecessor(); 2268 if (!Parent || Parent != Pred2->getSinglePredecessor()) 2269 return false; 2270 2271 if (auto *BI = dyn_cast<BranchInst>(Parent->getTerminator())) 2272 for (auto &I : *BB) 2273 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>())) 2274 if (ThreadGuard(BB, cast<IntrinsicInst>(&I), BI)) 2275 return true; 2276 2277 return false; 2278 } 2279 2280 /// Try to propagate the guard from BB which is the lower block of a diamond 2281 /// to one of its branches, in case if diamond's condition implies guard's 2282 /// condition. 2283 bool JumpThreadingPass::ThreadGuard(BasicBlock *BB, IntrinsicInst *Guard, 2284 BranchInst *BI) { 2285 assert(BI->getNumSuccessors() == 2 && "Wrong number of successors?"); 2286 assert(BI->isConditional() && "Unconditional branch has 2 successors?"); 2287 Value *GuardCond = Guard->getArgOperand(0); 2288 Value *BranchCond = BI->getCondition(); 2289 BasicBlock *TrueDest = BI->getSuccessor(0); 2290 BasicBlock *FalseDest = BI->getSuccessor(1); 2291 2292 auto &DL = BB->getModule()->getDataLayout(); 2293 bool TrueDestIsSafe = false; 2294 bool FalseDestIsSafe = false; 2295 2296 // True dest is safe if BranchCond => GuardCond. 2297 auto Impl = isImpliedCondition(BranchCond, GuardCond, DL); 2298 if (Impl && *Impl) 2299 TrueDestIsSafe = true; 2300 else { 2301 // False dest is safe if !BranchCond => GuardCond. 2302 Impl = 2303 isImpliedCondition(BranchCond, GuardCond, DL, /* InvertAPred */ true); 2304 if (Impl && *Impl) 2305 FalseDestIsSafe = true; 2306 } 2307 2308 if (!TrueDestIsSafe && !FalseDestIsSafe) 2309 return false; 2310 2311 BasicBlock *UnguardedBlock = TrueDestIsSafe ? TrueDest : FalseDest; 2312 BasicBlock *GuardedBlock = FalseDestIsSafe ? TrueDest : FalseDest; 2313 2314 ValueToValueMapTy UnguardedMapping, GuardedMapping; 2315 Instruction *AfterGuard = Guard->getNextNode(); 2316 unsigned Cost = getJumpThreadDuplicationCost(BB, AfterGuard, BBDupThreshold); 2317 if (Cost > BBDupThreshold) 2318 return false; 2319 // Duplicate all instructions before the guard and the guard itself to the 2320 // branch where implication is not proved. 2321 GuardedBlock = DuplicateInstructionsInSplitBetween( 2322 BB, GuardedBlock, AfterGuard, GuardedMapping); 2323 assert(GuardedBlock && "Could not create the guarded block?"); 2324 // Duplicate all instructions before the guard in the unguarded branch. 2325 // Since we have successfully duplicated the guarded block and this block 2326 // has fewer instructions, we expect it to succeed. 2327 UnguardedBlock = DuplicateInstructionsInSplitBetween(BB, UnguardedBlock, 2328 Guard, UnguardedMapping); 2329 assert(UnguardedBlock && "Could not create the unguarded block?"); 2330 DEBUG(dbgs() << "Moved guard " << *Guard << " to block " 2331 << GuardedBlock->getName() << "\n"); 2332 2333 // Some instructions before the guard may still have uses. For them, we need 2334 // to create Phi nodes merging their copies in both guarded and unguarded 2335 // branches. Those instructions that have no uses can be just removed. 2336 SmallVector<Instruction *, 4> ToRemove; 2337 for (auto BI = BB->begin(); &*BI != AfterGuard; ++BI) 2338 if (!isa<PHINode>(&*BI)) 2339 ToRemove.push_back(&*BI); 2340 2341 Instruction *InsertionPoint = &*BB->getFirstInsertionPt(); 2342 assert(InsertionPoint && "Empty block?"); 2343 // Substitute with Phis & remove. 2344 for (auto *Inst : reverse(ToRemove)) { 2345 if (!Inst->use_empty()) { 2346 PHINode *NewPN = PHINode::Create(Inst->getType(), 2); 2347 NewPN->addIncoming(UnguardedMapping[Inst], UnguardedBlock); 2348 NewPN->addIncoming(GuardedMapping[Inst], GuardedBlock); 2349 NewPN->insertBefore(InsertionPoint); 2350 Inst->replaceAllUsesWith(NewPN); 2351 } 2352 Inst->eraseFromParent(); 2353 } 2354 return true; 2355 } 2356