1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs global value numbering to eliminate fully redundant 11 // instructions. It also performs simple dead load elimination. 12 // 13 // Note that this pass does the value numbering itself; it does not use the 14 // ValueNumbering analysis passes. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/Transforms/Scalar/GVN.h" 19 #include "llvm/ADT/DenseMap.h" 20 #include "llvm/ADT/DepthFirstIterator.h" 21 #include "llvm/ADT/Hashing.h" 22 #include "llvm/ADT/MapVector.h" 23 #include "llvm/ADT/PointerIntPair.h" 24 #include "llvm/ADT/PostOrderIterator.h" 25 #include "llvm/ADT/STLExtras.h" 26 #include "llvm/ADT/SetVector.h" 27 #include "llvm/ADT/SmallPtrSet.h" 28 #include "llvm/ADT/SmallVector.h" 29 #include "llvm/ADT/Statistic.h" 30 #include "llvm/Analysis/AliasAnalysis.h" 31 #include "llvm/Analysis/AssumptionCache.h" 32 #include "llvm/Analysis/CFG.h" 33 #include "llvm/Analysis/GlobalsModRef.h" 34 #include "llvm/Analysis/InstructionSimplify.h" 35 #include "llvm/Analysis/LoopInfo.h" 36 #include "llvm/Analysis/MemoryBuiltins.h" 37 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 38 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 39 #include "llvm/Analysis/PHITransAddr.h" 40 #include "llvm/Analysis/TargetLibraryInfo.h" 41 #include "llvm/Analysis/ValueTracking.h" 42 #include "llvm/Config/llvm-config.h" 43 #include "llvm/IR/Attributes.h" 44 #include "llvm/IR/BasicBlock.h" 45 #include "llvm/IR/CallSite.h" 46 #include "llvm/IR/Constant.h" 47 #include "llvm/IR/Constants.h" 48 #include "llvm/IR/DataLayout.h" 49 #include "llvm/IR/DebugLoc.h" 50 #include "llvm/IR/DomTreeUpdater.h" 51 #include "llvm/IR/Dominators.h" 52 #include "llvm/IR/Function.h" 53 #include "llvm/IR/InstrTypes.h" 54 #include "llvm/IR/Instruction.h" 55 #include "llvm/IR/Instructions.h" 56 #include "llvm/IR/IntrinsicInst.h" 57 #include "llvm/IR/Intrinsics.h" 58 #include "llvm/IR/LLVMContext.h" 59 #include "llvm/IR/Metadata.h" 60 #include "llvm/IR/Module.h" 61 #include "llvm/IR/Operator.h" 62 #include "llvm/IR/PassManager.h" 63 #include "llvm/IR/PatternMatch.h" 64 #include "llvm/IR/Type.h" 65 #include "llvm/IR/Use.h" 66 #include "llvm/IR/Value.h" 67 #include "llvm/Pass.h" 68 #include "llvm/Support/Casting.h" 69 #include "llvm/Support/CommandLine.h" 70 #include "llvm/Support/Compiler.h" 71 #include "llvm/Support/Debug.h" 72 #include "llvm/Support/raw_ostream.h" 73 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 74 #include "llvm/Transforms/Utils/Local.h" 75 #include "llvm/Transforms/Utils/SSAUpdater.h" 76 #include "llvm/Transforms/Utils/VNCoercion.h" 77 #include <algorithm> 78 #include <cassert> 79 #include <cstdint> 80 #include <utility> 81 #include <vector> 82 83 using namespace llvm; 84 using namespace llvm::gvn; 85 using namespace llvm::VNCoercion; 86 using namespace PatternMatch; 87 88 #define DEBUG_TYPE "gvn" 89 90 STATISTIC(NumGVNInstr, "Number of instructions deleted"); 91 STATISTIC(NumGVNLoad, "Number of loads deleted"); 92 STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 93 STATISTIC(NumGVNBlocks, "Number of blocks merged"); 94 STATISTIC(NumGVNSimpl, "Number of instructions simplified"); 95 STATISTIC(NumGVNEqProp, "Number of equalities propagated"); 96 STATISTIC(NumPRELoad, "Number of loads PRE'd"); 97 98 static cl::opt<bool> EnablePRE("enable-pre", 99 cl::init(true), cl::Hidden); 100 static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 101 static cl::opt<bool> EnableMemDep("enable-gvn-memdep", cl::init(true)); 102 103 // Maximum allowed recursion depth. 104 static cl::opt<uint32_t> 105 MaxRecurseDepth("gvn-max-recurse-depth", cl::Hidden, cl::init(1000), cl::ZeroOrMore, 106 cl::desc("Max recurse depth in GVN (default = 1000)")); 107 108 static cl::opt<uint32_t> MaxNumDeps( 109 "gvn-max-num-deps", cl::Hidden, cl::init(100), cl::ZeroOrMore, 110 cl::desc("Max number of dependences to attempt Load PRE (default = 100)")); 111 112 struct llvm::GVN::Expression { 113 uint32_t opcode; 114 Type *type; 115 bool commutative = false; 116 SmallVector<uint32_t, 4> varargs; 117 118 Expression(uint32_t o = ~2U) : opcode(o) {} 119 120 bool operator==(const Expression &other) const { 121 if (opcode != other.opcode) 122 return false; 123 if (opcode == ~0U || opcode == ~1U) 124 return true; 125 if (type != other.type) 126 return false; 127 if (varargs != other.varargs) 128 return false; 129 return true; 130 } 131 132 friend hash_code hash_value(const Expression &Value) { 133 return hash_combine( 134 Value.opcode, Value.type, 135 hash_combine_range(Value.varargs.begin(), Value.varargs.end())); 136 } 137 }; 138 139 namespace llvm { 140 141 template <> struct DenseMapInfo<GVN::Expression> { 142 static inline GVN::Expression getEmptyKey() { return ~0U; } 143 static inline GVN::Expression getTombstoneKey() { return ~1U; } 144 145 static unsigned getHashValue(const GVN::Expression &e) { 146 using llvm::hash_value; 147 148 return static_cast<unsigned>(hash_value(e)); 149 } 150 151 static bool isEqual(const GVN::Expression &LHS, const GVN::Expression &RHS) { 152 return LHS == RHS; 153 } 154 }; 155 156 } // end namespace llvm 157 158 /// Represents a particular available value that we know how to materialize. 159 /// Materialization of an AvailableValue never fails. An AvailableValue is 160 /// implicitly associated with a rematerialization point which is the 161 /// location of the instruction from which it was formed. 162 struct llvm::gvn::AvailableValue { 163 enum ValType { 164 SimpleVal, // A simple offsetted value that is accessed. 165 LoadVal, // A value produced by a load. 166 MemIntrin, // A memory intrinsic which is loaded from. 167 UndefVal // A UndefValue representing a value from dead block (which 168 // is not yet physically removed from the CFG). 169 }; 170 171 /// V - The value that is live out of the block. 172 PointerIntPair<Value *, 2, ValType> Val; 173 174 /// Offset - The byte offset in Val that is interesting for the load query. 175 unsigned Offset; 176 177 static AvailableValue get(Value *V, unsigned Offset = 0) { 178 AvailableValue Res; 179 Res.Val.setPointer(V); 180 Res.Val.setInt(SimpleVal); 181 Res.Offset = Offset; 182 return Res; 183 } 184 185 static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) { 186 AvailableValue Res; 187 Res.Val.setPointer(MI); 188 Res.Val.setInt(MemIntrin); 189 Res.Offset = Offset; 190 return Res; 191 } 192 193 static AvailableValue getLoad(LoadInst *LI, unsigned Offset = 0) { 194 AvailableValue Res; 195 Res.Val.setPointer(LI); 196 Res.Val.setInt(LoadVal); 197 Res.Offset = Offset; 198 return Res; 199 } 200 201 static AvailableValue getUndef() { 202 AvailableValue Res; 203 Res.Val.setPointer(nullptr); 204 Res.Val.setInt(UndefVal); 205 Res.Offset = 0; 206 return Res; 207 } 208 209 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 210 bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; } 211 bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; } 212 bool isUndefValue() const { return Val.getInt() == UndefVal; } 213 214 Value *getSimpleValue() const { 215 assert(isSimpleValue() && "Wrong accessor"); 216 return Val.getPointer(); 217 } 218 219 LoadInst *getCoercedLoadValue() const { 220 assert(isCoercedLoadValue() && "Wrong accessor"); 221 return cast<LoadInst>(Val.getPointer()); 222 } 223 224 MemIntrinsic *getMemIntrinValue() const { 225 assert(isMemIntrinValue() && "Wrong accessor"); 226 return cast<MemIntrinsic>(Val.getPointer()); 227 } 228 229 /// Emit code at the specified insertion point to adjust the value defined 230 /// here to the specified type. This handles various coercion cases. 231 Value *MaterializeAdjustedValue(LoadInst *LI, Instruction *InsertPt, 232 GVN &gvn) const; 233 }; 234 235 /// Represents an AvailableValue which can be rematerialized at the end of 236 /// the associated BasicBlock. 237 struct llvm::gvn::AvailableValueInBlock { 238 /// BB - The basic block in question. 239 BasicBlock *BB; 240 241 /// AV - The actual available value 242 AvailableValue AV; 243 244 static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV) { 245 AvailableValueInBlock Res; 246 Res.BB = BB; 247 Res.AV = std::move(AV); 248 return Res; 249 } 250 251 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 252 unsigned Offset = 0) { 253 return get(BB, AvailableValue::get(V, Offset)); 254 } 255 256 static AvailableValueInBlock getUndef(BasicBlock *BB) { 257 return get(BB, AvailableValue::getUndef()); 258 } 259 260 /// Emit code at the end of this block to adjust the value defined here to 261 /// the specified type. This handles various coercion cases. 262 Value *MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const { 263 return AV.MaterializeAdjustedValue(LI, BB->getTerminator(), gvn); 264 } 265 }; 266 267 //===----------------------------------------------------------------------===// 268 // ValueTable Internal Functions 269 //===----------------------------------------------------------------------===// 270 271 GVN::Expression GVN::ValueTable::createExpr(Instruction *I) { 272 Expression e; 273 e.type = I->getType(); 274 e.opcode = I->getOpcode(); 275 for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end(); 276 OI != OE; ++OI) 277 e.varargs.push_back(lookupOrAdd(*OI)); 278 if (I->isCommutative()) { 279 // Ensure that commutative instructions that only differ by a permutation 280 // of their operands get the same value number by sorting the operand value 281 // numbers. Since all commutative instructions have two operands it is more 282 // efficient to sort by hand rather than using, say, std::sort. 283 assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!"); 284 if (e.varargs[0] > e.varargs[1]) 285 std::swap(e.varargs[0], e.varargs[1]); 286 e.commutative = true; 287 } 288 289 if (CmpInst *C = dyn_cast<CmpInst>(I)) { 290 // Sort the operand value numbers so x<y and y>x get the same value number. 291 CmpInst::Predicate Predicate = C->getPredicate(); 292 if (e.varargs[0] > e.varargs[1]) { 293 std::swap(e.varargs[0], e.varargs[1]); 294 Predicate = CmpInst::getSwappedPredicate(Predicate); 295 } 296 e.opcode = (C->getOpcode() << 8) | Predicate; 297 e.commutative = true; 298 } else if (InsertValueInst *E = dyn_cast<InsertValueInst>(I)) { 299 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 300 II != IE; ++II) 301 e.varargs.push_back(*II); 302 } 303 304 return e; 305 } 306 307 GVN::Expression GVN::ValueTable::createCmpExpr(unsigned Opcode, 308 CmpInst::Predicate Predicate, 309 Value *LHS, Value *RHS) { 310 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 311 "Not a comparison!"); 312 Expression e; 313 e.type = CmpInst::makeCmpResultType(LHS->getType()); 314 e.varargs.push_back(lookupOrAdd(LHS)); 315 e.varargs.push_back(lookupOrAdd(RHS)); 316 317 // Sort the operand value numbers so x<y and y>x get the same value number. 318 if (e.varargs[0] > e.varargs[1]) { 319 std::swap(e.varargs[0], e.varargs[1]); 320 Predicate = CmpInst::getSwappedPredicate(Predicate); 321 } 322 e.opcode = (Opcode << 8) | Predicate; 323 e.commutative = true; 324 return e; 325 } 326 327 GVN::Expression GVN::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) { 328 assert(EI && "Not an ExtractValueInst?"); 329 Expression e; 330 e.type = EI->getType(); 331 e.opcode = 0; 332 333 IntrinsicInst *I = dyn_cast<IntrinsicInst>(EI->getAggregateOperand()); 334 if (I != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0 ) { 335 // EI might be an extract from one of our recognised intrinsics. If it 336 // is we'll synthesize a semantically equivalent expression instead on 337 // an extract value expression. 338 switch (I->getIntrinsicID()) { 339 case Intrinsic::sadd_with_overflow: 340 case Intrinsic::uadd_with_overflow: 341 e.opcode = Instruction::Add; 342 break; 343 case Intrinsic::ssub_with_overflow: 344 case Intrinsic::usub_with_overflow: 345 e.opcode = Instruction::Sub; 346 break; 347 case Intrinsic::smul_with_overflow: 348 case Intrinsic::umul_with_overflow: 349 e.opcode = Instruction::Mul; 350 break; 351 default: 352 break; 353 } 354 355 if (e.opcode != 0) { 356 // Intrinsic recognized. Grab its args to finish building the expression. 357 assert(I->getNumArgOperands() == 2 && 358 "Expect two args for recognised intrinsics."); 359 e.varargs.push_back(lookupOrAdd(I->getArgOperand(0))); 360 e.varargs.push_back(lookupOrAdd(I->getArgOperand(1))); 361 return e; 362 } 363 } 364 365 // Not a recognised intrinsic. Fall back to producing an extract value 366 // expression. 367 e.opcode = EI->getOpcode(); 368 for (Instruction::op_iterator OI = EI->op_begin(), OE = EI->op_end(); 369 OI != OE; ++OI) 370 e.varargs.push_back(lookupOrAdd(*OI)); 371 372 for (ExtractValueInst::idx_iterator II = EI->idx_begin(), IE = EI->idx_end(); 373 II != IE; ++II) 374 e.varargs.push_back(*II); 375 376 return e; 377 } 378 379 //===----------------------------------------------------------------------===// 380 // ValueTable External Functions 381 //===----------------------------------------------------------------------===// 382 383 GVN::ValueTable::ValueTable() = default; 384 GVN::ValueTable::ValueTable(const ValueTable &) = default; 385 GVN::ValueTable::ValueTable(ValueTable &&) = default; 386 GVN::ValueTable::~ValueTable() = default; 387 388 /// add - Insert a value into the table with a specified value number. 389 void GVN::ValueTable::add(Value *V, uint32_t num) { 390 valueNumbering.insert(std::make_pair(V, num)); 391 if (PHINode *PN = dyn_cast<PHINode>(V)) 392 NumberingPhi[num] = PN; 393 } 394 395 uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) { 396 if (AA->doesNotAccessMemory(C)) { 397 Expression exp = createExpr(C); 398 uint32_t e = assignExpNewValueNum(exp).first; 399 valueNumbering[C] = e; 400 return e; 401 } else if (MD && AA->onlyReadsMemory(C)) { 402 Expression exp = createExpr(C); 403 auto ValNum = assignExpNewValueNum(exp); 404 if (ValNum.second) { 405 valueNumbering[C] = ValNum.first; 406 return ValNum.first; 407 } 408 409 MemDepResult local_dep = MD->getDependency(C); 410 411 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 412 valueNumbering[C] = nextValueNumber; 413 return nextValueNumber++; 414 } 415 416 if (local_dep.isDef()) { 417 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 418 419 if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) { 420 valueNumbering[C] = nextValueNumber; 421 return nextValueNumber++; 422 } 423 424 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 425 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i)); 426 uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i)); 427 if (c_vn != cd_vn) { 428 valueNumbering[C] = nextValueNumber; 429 return nextValueNumber++; 430 } 431 } 432 433 uint32_t v = lookupOrAdd(local_cdep); 434 valueNumbering[C] = v; 435 return v; 436 } 437 438 // Non-local case. 439 const MemoryDependenceResults::NonLocalDepInfo &deps = 440 MD->getNonLocalCallDependency(CallSite(C)); 441 // FIXME: Move the checking logic to MemDep! 442 CallInst* cdep = nullptr; 443 444 // Check to see if we have a single dominating call instruction that is 445 // identical to C. 446 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 447 const NonLocalDepEntry *I = &deps[i]; 448 if (I->getResult().isNonLocal()) 449 continue; 450 451 // We don't handle non-definitions. If we already have a call, reject 452 // instruction dependencies. 453 if (!I->getResult().isDef() || cdep != nullptr) { 454 cdep = nullptr; 455 break; 456 } 457 458 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 459 // FIXME: All duplicated with non-local case. 460 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 461 cdep = NonLocalDepCall; 462 continue; 463 } 464 465 cdep = nullptr; 466 break; 467 } 468 469 if (!cdep) { 470 valueNumbering[C] = nextValueNumber; 471 return nextValueNumber++; 472 } 473 474 if (cdep->getNumArgOperands() != C->getNumArgOperands()) { 475 valueNumbering[C] = nextValueNumber; 476 return nextValueNumber++; 477 } 478 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 479 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i)); 480 uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i)); 481 if (c_vn != cd_vn) { 482 valueNumbering[C] = nextValueNumber; 483 return nextValueNumber++; 484 } 485 } 486 487 uint32_t v = lookupOrAdd(cdep); 488 valueNumbering[C] = v; 489 return v; 490 } else { 491 valueNumbering[C] = nextValueNumber; 492 return nextValueNumber++; 493 } 494 } 495 496 /// Returns true if a value number exists for the specified value. 497 bool GVN::ValueTable::exists(Value *V) const { return valueNumbering.count(V) != 0; } 498 499 /// lookup_or_add - Returns the value number for the specified value, assigning 500 /// it a new number if it did not have one before. 501 uint32_t GVN::ValueTable::lookupOrAdd(Value *V) { 502 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 503 if (VI != valueNumbering.end()) 504 return VI->second; 505 506 if (!isa<Instruction>(V)) { 507 valueNumbering[V] = nextValueNumber; 508 return nextValueNumber++; 509 } 510 511 Instruction* I = cast<Instruction>(V); 512 Expression exp; 513 switch (I->getOpcode()) { 514 case Instruction::Call: 515 return lookupOrAddCall(cast<CallInst>(I)); 516 case Instruction::Add: 517 case Instruction::FAdd: 518 case Instruction::Sub: 519 case Instruction::FSub: 520 case Instruction::Mul: 521 case Instruction::FMul: 522 case Instruction::UDiv: 523 case Instruction::SDiv: 524 case Instruction::FDiv: 525 case Instruction::URem: 526 case Instruction::SRem: 527 case Instruction::FRem: 528 case Instruction::Shl: 529 case Instruction::LShr: 530 case Instruction::AShr: 531 case Instruction::And: 532 case Instruction::Or: 533 case Instruction::Xor: 534 case Instruction::ICmp: 535 case Instruction::FCmp: 536 case Instruction::Trunc: 537 case Instruction::ZExt: 538 case Instruction::SExt: 539 case Instruction::FPToUI: 540 case Instruction::FPToSI: 541 case Instruction::UIToFP: 542 case Instruction::SIToFP: 543 case Instruction::FPTrunc: 544 case Instruction::FPExt: 545 case Instruction::PtrToInt: 546 case Instruction::IntToPtr: 547 case Instruction::BitCast: 548 case Instruction::Select: 549 case Instruction::ExtractElement: 550 case Instruction::InsertElement: 551 case Instruction::ShuffleVector: 552 case Instruction::InsertValue: 553 case Instruction::GetElementPtr: 554 exp = createExpr(I); 555 break; 556 case Instruction::ExtractValue: 557 exp = createExtractvalueExpr(cast<ExtractValueInst>(I)); 558 break; 559 case Instruction::PHI: 560 valueNumbering[V] = nextValueNumber; 561 NumberingPhi[nextValueNumber] = cast<PHINode>(V); 562 return nextValueNumber++; 563 default: 564 valueNumbering[V] = nextValueNumber; 565 return nextValueNumber++; 566 } 567 568 uint32_t e = assignExpNewValueNum(exp).first; 569 valueNumbering[V] = e; 570 return e; 571 } 572 573 /// Returns the value number of the specified value. Fails if 574 /// the value has not yet been numbered. 575 uint32_t GVN::ValueTable::lookup(Value *V, bool Verify) const { 576 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 577 if (Verify) { 578 assert(VI != valueNumbering.end() && "Value not numbered?"); 579 return VI->second; 580 } 581 return (VI != valueNumbering.end()) ? VI->second : 0; 582 } 583 584 /// Returns the value number of the given comparison, 585 /// assigning it a new number if it did not have one before. Useful when 586 /// we deduced the result of a comparison, but don't immediately have an 587 /// instruction realizing that comparison to hand. 588 uint32_t GVN::ValueTable::lookupOrAddCmp(unsigned Opcode, 589 CmpInst::Predicate Predicate, 590 Value *LHS, Value *RHS) { 591 Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS); 592 return assignExpNewValueNum(exp).first; 593 } 594 595 /// Remove all entries from the ValueTable. 596 void GVN::ValueTable::clear() { 597 valueNumbering.clear(); 598 expressionNumbering.clear(); 599 NumberingPhi.clear(); 600 PhiTranslateTable.clear(); 601 nextValueNumber = 1; 602 Expressions.clear(); 603 ExprIdx.clear(); 604 nextExprNumber = 0; 605 } 606 607 /// Remove a value from the value numbering. 608 void GVN::ValueTable::erase(Value *V) { 609 uint32_t Num = valueNumbering.lookup(V); 610 valueNumbering.erase(V); 611 // If V is PHINode, V <--> value number is an one-to-one mapping. 612 if (isa<PHINode>(V)) 613 NumberingPhi.erase(Num); 614 } 615 616 /// verifyRemoved - Verify that the value is removed from all internal data 617 /// structures. 618 void GVN::ValueTable::verifyRemoved(const Value *V) const { 619 for (DenseMap<Value*, uint32_t>::const_iterator 620 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 621 assert(I->first != V && "Inst still occurs in value numbering map!"); 622 } 623 } 624 625 //===----------------------------------------------------------------------===// 626 // GVN Pass 627 //===----------------------------------------------------------------------===// 628 629 PreservedAnalyses GVN::run(Function &F, FunctionAnalysisManager &AM) { 630 // FIXME: The order of evaluation of these 'getResult' calls is very 631 // significant! Re-ordering these variables will cause GVN when run alone to 632 // be less effective! We should fix memdep and basic-aa to not exhibit this 633 // behavior, but until then don't change the order here. 634 auto &AC = AM.getResult<AssumptionAnalysis>(F); 635 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 636 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 637 auto &AA = AM.getResult<AAManager>(F); 638 auto &MemDep = AM.getResult<MemoryDependenceAnalysis>(F); 639 auto *LI = AM.getCachedResult<LoopAnalysis>(F); 640 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 641 bool Changed = runImpl(F, AC, DT, TLI, AA, &MemDep, LI, &ORE); 642 if (!Changed) 643 return PreservedAnalyses::all(); 644 PreservedAnalyses PA; 645 PA.preserve<DominatorTreeAnalysis>(); 646 PA.preserve<GlobalsAA>(); 647 PA.preserve<TargetLibraryAnalysis>(); 648 return PA; 649 } 650 651 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 652 LLVM_DUMP_METHOD void GVN::dump(DenseMap<uint32_t, Value*>& d) const { 653 errs() << "{\n"; 654 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 655 E = d.end(); I != E; ++I) { 656 errs() << I->first << "\n"; 657 I->second->dump(); 658 } 659 errs() << "}\n"; 660 } 661 #endif 662 663 /// Return true if we can prove that the value 664 /// we're analyzing is fully available in the specified block. As we go, keep 665 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This 666 /// map is actually a tri-state map with the following values: 667 /// 0) we know the block *is not* fully available. 668 /// 1) we know the block *is* fully available. 669 /// 2) we do not know whether the block is fully available or not, but we are 670 /// currently speculating that it will be. 671 /// 3) we are speculating for this block and have used that to speculate for 672 /// other blocks. 673 static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 674 DenseMap<BasicBlock*, char> &FullyAvailableBlocks, 675 uint32_t RecurseDepth) { 676 if (RecurseDepth > MaxRecurseDepth) 677 return false; 678 679 // Optimistically assume that the block is fully available and check to see 680 // if we already know about this block in one lookup. 681 std::pair<DenseMap<BasicBlock*, char>::iterator, bool> IV = 682 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 683 684 // If the entry already existed for this block, return the precomputed value. 685 if (!IV.second) { 686 // If this is a speculative "available" value, mark it as being used for 687 // speculation of other blocks. 688 if (IV.first->second == 2) 689 IV.first->second = 3; 690 return IV.first->second != 0; 691 } 692 693 // Otherwise, see if it is fully available in all predecessors. 694 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 695 696 // If this block has no predecessors, it isn't live-in here. 697 if (PI == PE) 698 goto SpeculationFailure; 699 700 for (; PI != PE; ++PI) 701 // If the value isn't fully available in one of our predecessors, then it 702 // isn't fully available in this block either. Undo our previous 703 // optimistic assumption and bail out. 704 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks,RecurseDepth+1)) 705 goto SpeculationFailure; 706 707 return true; 708 709 // If we get here, we found out that this is not, after 710 // all, a fully-available block. We have a problem if we speculated on this and 711 // used the speculation to mark other blocks as available. 712 SpeculationFailure: 713 char &BBVal = FullyAvailableBlocks[BB]; 714 715 // If we didn't speculate on this, just return with it set to false. 716 if (BBVal == 2) { 717 BBVal = 0; 718 return false; 719 } 720 721 // If we did speculate on this value, we could have blocks set to 1 that are 722 // incorrect. Walk the (transitive) successors of this block and mark them as 723 // 0 if set to one. 724 SmallVector<BasicBlock*, 32> BBWorklist; 725 BBWorklist.push_back(BB); 726 727 do { 728 BasicBlock *Entry = BBWorklist.pop_back_val(); 729 // Note that this sets blocks to 0 (unavailable) if they happen to not 730 // already be in FullyAvailableBlocks. This is safe. 731 char &EntryVal = FullyAvailableBlocks[Entry]; 732 if (EntryVal == 0) continue; // Already unavailable. 733 734 // Mark as unavailable. 735 EntryVal = 0; 736 737 BBWorklist.append(succ_begin(Entry), succ_end(Entry)); 738 } while (!BBWorklist.empty()); 739 740 return false; 741 } 742 743 /// Given a set of loads specified by ValuesPerBlock, 744 /// construct SSA form, allowing us to eliminate LI. This returns the value 745 /// that should be used at LI's definition site. 746 static Value *ConstructSSAForLoadSet(LoadInst *LI, 747 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 748 GVN &gvn) { 749 // Check for the fully redundant, dominating load case. In this case, we can 750 // just use the dominating value directly. 751 if (ValuesPerBlock.size() == 1 && 752 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB, 753 LI->getParent())) { 754 assert(!ValuesPerBlock[0].AV.isUndefValue() && 755 "Dead BB dominate this block"); 756 return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn); 757 } 758 759 // Otherwise, we have to construct SSA form. 760 SmallVector<PHINode*, 8> NewPHIs; 761 SSAUpdater SSAUpdate(&NewPHIs); 762 SSAUpdate.Initialize(LI->getType(), LI->getName()); 763 764 for (const AvailableValueInBlock &AV : ValuesPerBlock) { 765 BasicBlock *BB = AV.BB; 766 767 if (SSAUpdate.HasValueForBlock(BB)) 768 continue; 769 770 // If the value is the load that we will be eliminating, and the block it's 771 // available in is the block that the load is in, then don't add it as 772 // SSAUpdater will resolve the value to the relevant phi which may let it 773 // avoid phi construction entirely if there's actually only one value. 774 if (BB == LI->getParent() && 775 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == LI) || 776 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == LI))) 777 continue; 778 779 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LI, gvn)); 780 } 781 782 // Perform PHI construction. 783 return SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 784 } 785 786 Value *AvailableValue::MaterializeAdjustedValue(LoadInst *LI, 787 Instruction *InsertPt, 788 GVN &gvn) const { 789 Value *Res; 790 Type *LoadTy = LI->getType(); 791 const DataLayout &DL = LI->getModule()->getDataLayout(); 792 if (isSimpleValue()) { 793 Res = getSimpleValue(); 794 if (Res->getType() != LoadTy) { 795 Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL); 796 797 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset 798 << " " << *getSimpleValue() << '\n' 799 << *Res << '\n' 800 << "\n\n\n"); 801 } 802 } else if (isCoercedLoadValue()) { 803 LoadInst *Load = getCoercedLoadValue(); 804 if (Load->getType() == LoadTy && Offset == 0) { 805 Res = Load; 806 } else { 807 Res = getLoadValueForLoad(Load, Offset, LoadTy, InsertPt, DL); 808 // We would like to use gvn.markInstructionForDeletion here, but we can't 809 // because the load is already memoized into the leader map table that GVN 810 // tracks. It is potentially possible to remove the load from the table, 811 // but then there all of the operations based on it would need to be 812 // rehashed. Just leave the dead load around. 813 gvn.getMemDep().removeInstruction(Load); 814 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset 815 << " " << *getCoercedLoadValue() << '\n' 816 << *Res << '\n' 817 << "\n\n\n"); 818 } 819 } else if (isMemIntrinValue()) { 820 Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy, 821 InsertPt, DL); 822 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 823 << " " << *getMemIntrinValue() << '\n' 824 << *Res << '\n' 825 << "\n\n\n"); 826 } else { 827 assert(isUndefValue() && "Should be UndefVal"); 828 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";); 829 return UndefValue::get(LoadTy); 830 } 831 assert(Res && "failed to materialize?"); 832 return Res; 833 } 834 835 static bool isLifetimeStart(const Instruction *Inst) { 836 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 837 return II->getIntrinsicID() == Intrinsic::lifetime_start; 838 return false; 839 } 840 841 /// Try to locate the three instruction involved in a missed 842 /// load-elimination case that is due to an intervening store. 843 static void reportMayClobberedLoad(LoadInst *LI, MemDepResult DepInfo, 844 DominatorTree *DT, 845 OptimizationRemarkEmitter *ORE) { 846 using namespace ore; 847 848 User *OtherAccess = nullptr; 849 850 OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", LI); 851 R << "load of type " << NV("Type", LI->getType()) << " not eliminated" 852 << setExtraArgs(); 853 854 for (auto *U : LI->getPointerOperand()->users()) 855 if (U != LI && (isa<LoadInst>(U) || isa<StoreInst>(U)) && 856 DT->dominates(cast<Instruction>(U), LI)) { 857 // FIXME: for now give up if there are multiple memory accesses that 858 // dominate the load. We need further analysis to decide which one is 859 // that we're forwarding from. 860 if (OtherAccess) 861 OtherAccess = nullptr; 862 else 863 OtherAccess = U; 864 } 865 866 if (OtherAccess) 867 R << " in favor of " << NV("OtherAccess", OtherAccess); 868 869 R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst()); 870 871 ORE->emit(R); 872 } 873 874 bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo, 875 Value *Address, AvailableValue &Res) { 876 assert((DepInfo.isDef() || DepInfo.isClobber()) && 877 "expected a local dependence"); 878 assert(LI->isUnordered() && "rules below are incorrect for ordered access"); 879 880 const DataLayout &DL = LI->getModule()->getDataLayout(); 881 882 if (DepInfo.isClobber()) { 883 // If the dependence is to a store that writes to a superset of the bits 884 // read by the load, we can extract the bits we need for the load from the 885 // stored value. 886 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 887 // Can't forward from non-atomic to atomic without violating memory model. 888 if (Address && LI->isAtomic() <= DepSI->isAtomic()) { 889 int Offset = 890 analyzeLoadFromClobberingStore(LI->getType(), Address, DepSI, DL); 891 if (Offset != -1) { 892 Res = AvailableValue::get(DepSI->getValueOperand(), Offset); 893 return true; 894 } 895 } 896 } 897 898 // Check to see if we have something like this: 899 // load i32* P 900 // load i8* (P+1) 901 // if we have this, replace the later with an extraction from the former. 902 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) { 903 // If this is a clobber and L is the first instruction in its block, then 904 // we have the first instruction in the entry block. 905 // Can't forward from non-atomic to atomic without violating memory model. 906 if (DepLI != LI && Address && LI->isAtomic() <= DepLI->isAtomic()) { 907 int Offset = 908 analyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL); 909 910 if (Offset != -1) { 911 Res = AvailableValue::getLoad(DepLI, Offset); 912 return true; 913 } 914 } 915 } 916 917 // If the clobbering value is a memset/memcpy/memmove, see if we can 918 // forward a value on from it. 919 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { 920 if (Address && !LI->isAtomic()) { 921 int Offset = analyzeLoadFromClobberingMemInst(LI->getType(), Address, 922 DepMI, DL); 923 if (Offset != -1) { 924 Res = AvailableValue::getMI(DepMI, Offset); 925 return true; 926 } 927 } 928 } 929 // Nothing known about this clobber, have to be conservative 930 LLVM_DEBUG( 931 // fast print dep, using operator<< on instruction is too slow. 932 dbgs() << "GVN: load "; LI->printAsOperand(dbgs()); 933 Instruction *I = DepInfo.getInst(); 934 dbgs() << " is clobbered by " << *I << '\n';); 935 if (ORE->allowExtraAnalysis(DEBUG_TYPE)) 936 reportMayClobberedLoad(LI, DepInfo, DT, ORE); 937 938 return false; 939 } 940 assert(DepInfo.isDef() && "follows from above"); 941 942 Instruction *DepInst = DepInfo.getInst(); 943 944 // Loading the allocation -> undef. 945 if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) || 946 // Loading immediately after lifetime begin -> undef. 947 isLifetimeStart(DepInst)) { 948 Res = AvailableValue::get(UndefValue::get(LI->getType())); 949 return true; 950 } 951 952 // Loading from calloc (which zero initializes memory) -> zero 953 if (isCallocLikeFn(DepInst, TLI)) { 954 Res = AvailableValue::get(Constant::getNullValue(LI->getType())); 955 return true; 956 } 957 958 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 959 // Reject loads and stores that are to the same address but are of 960 // different types if we have to. If the stored value is larger or equal to 961 // the loaded value, we can reuse it. 962 if (S->getValueOperand()->getType() != LI->getType() && 963 !canCoerceMustAliasedValueToLoad(S->getValueOperand(), 964 LI->getType(), DL)) 965 return false; 966 967 // Can't forward from non-atomic to atomic without violating memory model. 968 if (S->isAtomic() < LI->isAtomic()) 969 return false; 970 971 Res = AvailableValue::get(S->getValueOperand()); 972 return true; 973 } 974 975 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 976 // If the types mismatch and we can't handle it, reject reuse of the load. 977 // If the stored value is larger or equal to the loaded value, we can reuse 978 // it. 979 if (LD->getType() != LI->getType() && 980 !canCoerceMustAliasedValueToLoad(LD, LI->getType(), DL)) 981 return false; 982 983 // Can't forward from non-atomic to atomic without violating memory model. 984 if (LD->isAtomic() < LI->isAtomic()) 985 return false; 986 987 Res = AvailableValue::getLoad(LD); 988 return true; 989 } 990 991 // Unknown def - must be conservative 992 LLVM_DEBUG( 993 // fast print dep, using operator<< on instruction is too slow. 994 dbgs() << "GVN: load "; LI->printAsOperand(dbgs()); 995 dbgs() << " has unknown def " << *DepInst << '\n';); 996 return false; 997 } 998 999 void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, 1000 AvailValInBlkVect &ValuesPerBlock, 1001 UnavailBlkVect &UnavailableBlocks) { 1002 // Filter out useless results (non-locals, etc). Keep track of the blocks 1003 // where we have a value available in repl, also keep track of whether we see 1004 // dependencies that produce an unknown value for the load (such as a call 1005 // that could potentially clobber the load). 1006 unsigned NumDeps = Deps.size(); 1007 for (unsigned i = 0, e = NumDeps; i != e; ++i) { 1008 BasicBlock *DepBB = Deps[i].getBB(); 1009 MemDepResult DepInfo = Deps[i].getResult(); 1010 1011 if (DeadBlocks.count(DepBB)) { 1012 // Dead dependent mem-op disguise as a load evaluating the same value 1013 // as the load in question. 1014 ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB)); 1015 continue; 1016 } 1017 1018 if (!DepInfo.isDef() && !DepInfo.isClobber()) { 1019 UnavailableBlocks.push_back(DepBB); 1020 continue; 1021 } 1022 1023 // The address being loaded in this non-local block may not be the same as 1024 // the pointer operand of the load if PHI translation occurs. Make sure 1025 // to consider the right address. 1026 Value *Address = Deps[i].getAddress(); 1027 1028 AvailableValue AV; 1029 if (AnalyzeLoadAvailability(LI, DepInfo, Address, AV)) { 1030 // subtlety: because we know this was a non-local dependency, we know 1031 // it's safe to materialize anywhere between the instruction within 1032 // DepInfo and the end of it's block. 1033 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1034 std::move(AV))); 1035 } else { 1036 UnavailableBlocks.push_back(DepBB); 1037 } 1038 } 1039 1040 assert(NumDeps == ValuesPerBlock.size() + UnavailableBlocks.size() && 1041 "post condition violation"); 1042 } 1043 1044 bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, 1045 UnavailBlkVect &UnavailableBlocks) { 1046 // Okay, we have *some* definitions of the value. This means that the value 1047 // is available in some of our (transitive) predecessors. Lets think about 1048 // doing PRE of this load. This will involve inserting a new load into the 1049 // predecessor when it's not available. We could do this in general, but 1050 // prefer to not increase code size. As such, we only do this when we know 1051 // that we only have to insert *one* load (which means we're basically moving 1052 // the load, not inserting a new one). 1053 1054 SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(), 1055 UnavailableBlocks.end()); 1056 1057 // Let's find the first basic block with more than one predecessor. Walk 1058 // backwards through predecessors if needed. 1059 BasicBlock *LoadBB = LI->getParent(); 1060 BasicBlock *TmpBB = LoadBB; 1061 bool IsSafeToSpeculativelyExecute = isSafeToSpeculativelyExecute(LI); 1062 1063 // Check that there is no implicit control flow instructions above our load in 1064 // its block. If there is an instruction that doesn't always pass the 1065 // execution to the following instruction, then moving through it may become 1066 // invalid. For example: 1067 // 1068 // int arr[LEN]; 1069 // int index = ???; 1070 // ... 1071 // guard(0 <= index && index < LEN); 1072 // use(arr[index]); 1073 // 1074 // It is illegal to move the array access to any point above the guard, 1075 // because if the index is out of bounds we should deoptimize rather than 1076 // access the array. 1077 // Check that there is no guard in this block above our instruction. 1078 if (!IsSafeToSpeculativelyExecute && ICF->isDominatedByICFIFromSameBlock(LI)) 1079 return false; 1080 while (TmpBB->getSinglePredecessor()) { 1081 TmpBB = TmpBB->getSinglePredecessor(); 1082 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1083 return false; 1084 if (Blockers.count(TmpBB)) 1085 return false; 1086 1087 // If any of these blocks has more than one successor (i.e. if the edge we 1088 // just traversed was critical), then there are other paths through this 1089 // block along which the load may not be anticipated. Hoisting the load 1090 // above this block would be adding the load to execution paths along 1091 // which it was not previously executed. 1092 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1093 return false; 1094 1095 // Check that there is no implicit control flow in a block above. 1096 if (!IsSafeToSpeculativelyExecute && ICF->hasICF(TmpBB)) 1097 return false; 1098 } 1099 1100 assert(TmpBB); 1101 LoadBB = TmpBB; 1102 1103 // Check to see how many predecessors have the loaded value fully 1104 // available. 1105 MapVector<BasicBlock *, Value *> PredLoads; 1106 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1107 for (const AvailableValueInBlock &AV : ValuesPerBlock) 1108 FullyAvailableBlocks[AV.BB] = true; 1109 for (BasicBlock *UnavailableBB : UnavailableBlocks) 1110 FullyAvailableBlocks[UnavailableBB] = false; 1111 1112 SmallVector<BasicBlock *, 4> CriticalEdgePred; 1113 for (BasicBlock *Pred : predecessors(LoadBB)) { 1114 // If any predecessor block is an EH pad that does not allow non-PHI 1115 // instructions before the terminator, we can't PRE the load. 1116 if (Pred->getTerminator()->isEHPad()) { 1117 LLVM_DEBUG( 1118 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '" 1119 << Pred->getName() << "': " << *LI << '\n'); 1120 return false; 1121 } 1122 1123 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks, 0)) { 1124 continue; 1125 } 1126 1127 if (Pred->getTerminator()->getNumSuccessors() != 1) { 1128 if (isa<IndirectBrInst>(Pred->getTerminator())) { 1129 LLVM_DEBUG( 1130 dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" 1131 << Pred->getName() << "': " << *LI << '\n'); 1132 return false; 1133 } 1134 1135 if (LoadBB->isEHPad()) { 1136 LLVM_DEBUG( 1137 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '" 1138 << Pred->getName() << "': " << *LI << '\n'); 1139 return false; 1140 } 1141 1142 CriticalEdgePred.push_back(Pred); 1143 } else { 1144 // Only add the predecessors that will not be split for now. 1145 PredLoads[Pred] = nullptr; 1146 } 1147 } 1148 1149 // Decide whether PRE is profitable for this load. 1150 unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size(); 1151 assert(NumUnavailablePreds != 0 && 1152 "Fully available value should already be eliminated!"); 1153 1154 // If this load is unavailable in multiple predecessors, reject it. 1155 // FIXME: If we could restructure the CFG, we could make a common pred with 1156 // all the preds that don't have an available LI and insert a new load into 1157 // that one block. 1158 if (NumUnavailablePreds != 1) 1159 return false; 1160 1161 // Split critical edges, and update the unavailable predecessors accordingly. 1162 for (BasicBlock *OrigPred : CriticalEdgePred) { 1163 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB); 1164 assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!"); 1165 PredLoads[NewPred] = nullptr; 1166 LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->" 1167 << LoadBB->getName() << '\n'); 1168 } 1169 1170 // Check if the load can safely be moved to all the unavailable predecessors. 1171 bool CanDoPRE = true; 1172 const DataLayout &DL = LI->getModule()->getDataLayout(); 1173 SmallVector<Instruction*, 8> NewInsts; 1174 for (auto &PredLoad : PredLoads) { 1175 BasicBlock *UnavailablePred = PredLoad.first; 1176 1177 // Do PHI translation to get its value in the predecessor if necessary. The 1178 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1179 1180 // If all preds have a single successor, then we know it is safe to insert 1181 // the load on the pred (?!?), so we can insert code to materialize the 1182 // pointer if it is not available. 1183 PHITransAddr Address(LI->getPointerOperand(), DL, AC); 1184 Value *LoadPtr = nullptr; 1185 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1186 *DT, NewInsts); 1187 1188 // If we couldn't find or insert a computation of this phi translated value, 1189 // we fail PRE. 1190 if (!LoadPtr) { 1191 LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1192 << *LI->getPointerOperand() << "\n"); 1193 CanDoPRE = false; 1194 break; 1195 } 1196 1197 PredLoad.second = LoadPtr; 1198 } 1199 1200 if (!CanDoPRE) { 1201 while (!NewInsts.empty()) { 1202 Instruction *I = NewInsts.pop_back_val(); 1203 markInstructionForDeletion(I); 1204 } 1205 // HINT: Don't revert the edge-splitting as following transformation may 1206 // also need to split these critical edges. 1207 return !CriticalEdgePred.empty(); 1208 } 1209 1210 // Okay, we can eliminate this load by inserting a reload in the predecessor 1211 // and using PHI construction to get the value in the other predecessors, do 1212 // it. 1213 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1214 LLVM_DEBUG(if (!NewInsts.empty()) dbgs() 1215 << "INSERTED " << NewInsts.size() << " INSTS: " << *NewInsts.back() 1216 << '\n'); 1217 1218 // Assign value numbers to the new instructions. 1219 for (Instruction *I : NewInsts) { 1220 // Instructions that have been inserted in predecessor(s) to materialize 1221 // the load address do not retain their original debug locations. Doing 1222 // so could lead to confusing (but correct) source attributions. 1223 // FIXME: How do we retain source locations without causing poor debugging 1224 // behavior? 1225 I->setDebugLoc(DebugLoc()); 1226 1227 // FIXME: We really _ought_ to insert these value numbers into their 1228 // parent's availability map. However, in doing so, we risk getting into 1229 // ordering issues. If a block hasn't been processed yet, we would be 1230 // marking a value as AVAIL-IN, which isn't what we intend. 1231 VN.lookupOrAdd(I); 1232 } 1233 1234 for (const auto &PredLoad : PredLoads) { 1235 BasicBlock *UnavailablePred = PredLoad.first; 1236 Value *LoadPtr = PredLoad.second; 1237 1238 auto *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", 1239 LI->isVolatile(), LI->getAlignment(), 1240 LI->getOrdering(), LI->getSyncScopeID(), 1241 UnavailablePred->getTerminator()); 1242 NewLoad->setDebugLoc(LI->getDebugLoc()); 1243 1244 // Transfer the old load's AA tags to the new load. 1245 AAMDNodes Tags; 1246 LI->getAAMetadata(Tags); 1247 if (Tags) 1248 NewLoad->setAAMetadata(Tags); 1249 1250 if (auto *MD = LI->getMetadata(LLVMContext::MD_invariant_load)) 1251 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD); 1252 if (auto *InvGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group)) 1253 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD); 1254 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) 1255 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD); 1256 1257 // We do not propagate the old load's debug location, because the new 1258 // load now lives in a different BB, and we want to avoid a jumpy line 1259 // table. 1260 // FIXME: How do we retain source locations without causing poor debugging 1261 // behavior? 1262 1263 // Add the newly created load. 1264 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred, 1265 NewLoad)); 1266 MD->invalidateCachedPointerInfo(LoadPtr); 1267 LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); 1268 } 1269 1270 // Perform PHI construction. 1271 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); 1272 LI->replaceAllUsesWith(V); 1273 if (isa<PHINode>(V)) 1274 V->takeName(LI); 1275 if (Instruction *I = dyn_cast<Instruction>(V)) 1276 I->setDebugLoc(LI->getDebugLoc()); 1277 if (V->getType()->isPtrOrPtrVectorTy()) 1278 MD->invalidateCachedPointerInfo(V); 1279 markInstructionForDeletion(LI); 1280 ORE->emit([&]() { 1281 return OptimizationRemark(DEBUG_TYPE, "LoadPRE", LI) 1282 << "load eliminated by PRE"; 1283 }); 1284 ++NumPRELoad; 1285 return true; 1286 } 1287 1288 static void reportLoadElim(LoadInst *LI, Value *AvailableValue, 1289 OptimizationRemarkEmitter *ORE) { 1290 using namespace ore; 1291 1292 ORE->emit([&]() { 1293 return OptimizationRemark(DEBUG_TYPE, "LoadElim", LI) 1294 << "load of type " << NV("Type", LI->getType()) << " eliminated" 1295 << setExtraArgs() << " in favor of " 1296 << NV("InfavorOfValue", AvailableValue); 1297 }); 1298 } 1299 1300 /// Attempt to eliminate a load whose dependencies are 1301 /// non-local by performing PHI construction. 1302 bool GVN::processNonLocalLoad(LoadInst *LI) { 1303 // non-local speculations are not allowed under asan. 1304 if (LI->getParent()->getParent()->hasFnAttribute( 1305 Attribute::SanitizeAddress) || 1306 LI->getParent()->getParent()->hasFnAttribute( 1307 Attribute::SanitizeHWAddress)) 1308 return false; 1309 1310 // Step 1: Find the non-local dependencies of the load. 1311 LoadDepVect Deps; 1312 MD->getNonLocalPointerDependency(LI, Deps); 1313 1314 // If we had to process more than one hundred blocks to find the 1315 // dependencies, this load isn't worth worrying about. Optimizing 1316 // it will be too expensive. 1317 unsigned NumDeps = Deps.size(); 1318 if (NumDeps > MaxNumDeps) 1319 return false; 1320 1321 // If we had a phi translation failure, we'll have a single entry which is a 1322 // clobber in the current block. Reject this early. 1323 if (NumDeps == 1 && 1324 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) { 1325 LLVM_DEBUG(dbgs() << "GVN: non-local load "; LI->printAsOperand(dbgs()); 1326 dbgs() << " has unknown dependencies\n";); 1327 return false; 1328 } 1329 1330 // If this load follows a GEP, see if we can PRE the indices before analyzing. 1331 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0))) { 1332 for (GetElementPtrInst::op_iterator OI = GEP->idx_begin(), 1333 OE = GEP->idx_end(); 1334 OI != OE; ++OI) 1335 if (Instruction *I = dyn_cast<Instruction>(OI->get())) 1336 performScalarPRE(I); 1337 } 1338 1339 // Step 2: Analyze the availability of the load 1340 AvailValInBlkVect ValuesPerBlock; 1341 UnavailBlkVect UnavailableBlocks; 1342 AnalyzeLoadAvailability(LI, Deps, ValuesPerBlock, UnavailableBlocks); 1343 1344 // If we have no predecessors that produce a known value for this load, exit 1345 // early. 1346 if (ValuesPerBlock.empty()) 1347 return false; 1348 1349 // Step 3: Eliminate fully redundancy. 1350 // 1351 // If all of the instructions we depend on produce a known value for this 1352 // load, then it is fully redundant and we can use PHI insertion to compute 1353 // its value. Insert PHIs and remove the fully redundant value now. 1354 if (UnavailableBlocks.empty()) { 1355 LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1356 1357 // Perform PHI construction. 1358 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); 1359 LI->replaceAllUsesWith(V); 1360 1361 if (isa<PHINode>(V)) 1362 V->takeName(LI); 1363 if (Instruction *I = dyn_cast<Instruction>(V)) 1364 // If instruction I has debug info, then we should not update it. 1365 // Also, if I has a null DebugLoc, then it is still potentially incorrect 1366 // to propagate LI's DebugLoc because LI may not post-dominate I. 1367 if (LI->getDebugLoc() && LI->getParent() == I->getParent()) 1368 I->setDebugLoc(LI->getDebugLoc()); 1369 if (V->getType()->isPtrOrPtrVectorTy()) 1370 MD->invalidateCachedPointerInfo(V); 1371 markInstructionForDeletion(LI); 1372 ++NumGVNLoad; 1373 reportLoadElim(LI, V, ORE); 1374 return true; 1375 } 1376 1377 // Step 4: Eliminate partial redundancy. 1378 if (!EnablePRE || !EnableLoadPRE) 1379 return false; 1380 1381 return PerformLoadPRE(LI, ValuesPerBlock, UnavailableBlocks); 1382 } 1383 1384 bool GVN::processAssumeIntrinsic(IntrinsicInst *IntrinsicI) { 1385 assert(IntrinsicI->getIntrinsicID() == Intrinsic::assume && 1386 "This function can only be called with llvm.assume intrinsic"); 1387 Value *V = IntrinsicI->getArgOperand(0); 1388 1389 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) { 1390 if (Cond->isZero()) { 1391 Type *Int8Ty = Type::getInt8Ty(V->getContext()); 1392 // Insert a new store to null instruction before the load to indicate that 1393 // this code is not reachable. FIXME: We could insert unreachable 1394 // instruction directly because we can modify the CFG. 1395 new StoreInst(UndefValue::get(Int8Ty), 1396 Constant::getNullValue(Int8Ty->getPointerTo()), 1397 IntrinsicI); 1398 } 1399 markInstructionForDeletion(IntrinsicI); 1400 return false; 1401 } else if (isa<Constant>(V)) { 1402 // If it's not false, and constant, it must evaluate to true. This means our 1403 // assume is assume(true), and thus, pointless, and we don't want to do 1404 // anything more here. 1405 return false; 1406 } 1407 1408 Constant *True = ConstantInt::getTrue(V->getContext()); 1409 bool Changed = false; 1410 1411 for (BasicBlock *Successor : successors(IntrinsicI->getParent())) { 1412 BasicBlockEdge Edge(IntrinsicI->getParent(), Successor); 1413 1414 // This property is only true in dominated successors, propagateEquality 1415 // will check dominance for us. 1416 Changed |= propagateEquality(V, True, Edge, false); 1417 } 1418 1419 // We can replace assume value with true, which covers cases like this: 1420 // call void @llvm.assume(i1 %cmp) 1421 // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true 1422 ReplaceWithConstMap[V] = True; 1423 1424 // If one of *cmp *eq operand is const, adding it to map will cover this: 1425 // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen 1426 // call void @llvm.assume(i1 %cmp) 1427 // ret float %0 ; will change it to ret float 3.000000e+00 1428 if (auto *CmpI = dyn_cast<CmpInst>(V)) { 1429 if (CmpI->getPredicate() == CmpInst::Predicate::ICMP_EQ || 1430 CmpI->getPredicate() == CmpInst::Predicate::FCMP_OEQ || 1431 (CmpI->getPredicate() == CmpInst::Predicate::FCMP_UEQ && 1432 CmpI->getFastMathFlags().noNaNs())) { 1433 Value *CmpLHS = CmpI->getOperand(0); 1434 Value *CmpRHS = CmpI->getOperand(1); 1435 if (isa<Constant>(CmpLHS)) 1436 std::swap(CmpLHS, CmpRHS); 1437 auto *RHSConst = dyn_cast<Constant>(CmpRHS); 1438 1439 // If only one operand is constant. 1440 if (RHSConst != nullptr && !isa<Constant>(CmpLHS)) 1441 ReplaceWithConstMap[CmpLHS] = RHSConst; 1442 } 1443 } 1444 return Changed; 1445 } 1446 1447 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) { 1448 patchReplacementInstruction(I, Repl); 1449 I->replaceAllUsesWith(Repl); 1450 } 1451 1452 /// Attempt to eliminate a load, first by eliminating it 1453 /// locally, and then attempting non-local elimination if that fails. 1454 bool GVN::processLoad(LoadInst *L) { 1455 if (!MD) 1456 return false; 1457 1458 // This code hasn't been audited for ordered or volatile memory access 1459 if (!L->isUnordered()) 1460 return false; 1461 1462 if (L->use_empty()) { 1463 markInstructionForDeletion(L); 1464 return true; 1465 } 1466 1467 // ... to a pointer that has been loaded from before... 1468 MemDepResult Dep = MD->getDependency(L); 1469 1470 // If it is defined in another block, try harder. 1471 if (Dep.isNonLocal()) 1472 return processNonLocalLoad(L); 1473 1474 // Only handle the local case below 1475 if (!Dep.isDef() && !Dep.isClobber()) { 1476 // This might be a NonFuncLocal or an Unknown 1477 LLVM_DEBUG( 1478 // fast print dep, using operator<< on instruction is too slow. 1479 dbgs() << "GVN: load "; L->printAsOperand(dbgs()); 1480 dbgs() << " has unknown dependence\n";); 1481 return false; 1482 } 1483 1484 AvailableValue AV; 1485 if (AnalyzeLoadAvailability(L, Dep, L->getPointerOperand(), AV)) { 1486 Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this); 1487 1488 // Replace the load! 1489 patchAndReplaceAllUsesWith(L, AvailableValue); 1490 markInstructionForDeletion(L); 1491 ++NumGVNLoad; 1492 reportLoadElim(L, AvailableValue, ORE); 1493 // Tell MDA to rexamine the reused pointer since we might have more 1494 // information after forwarding it. 1495 if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy()) 1496 MD->invalidateCachedPointerInfo(AvailableValue); 1497 return true; 1498 } 1499 1500 return false; 1501 } 1502 1503 /// Return a pair the first field showing the value number of \p Exp and the 1504 /// second field showing whether it is a value number newly created. 1505 std::pair<uint32_t, bool> 1506 GVN::ValueTable::assignExpNewValueNum(Expression &Exp) { 1507 uint32_t &e = expressionNumbering[Exp]; 1508 bool CreateNewValNum = !e; 1509 if (CreateNewValNum) { 1510 Expressions.push_back(Exp); 1511 if (ExprIdx.size() < nextValueNumber + 1) 1512 ExprIdx.resize(nextValueNumber * 2); 1513 e = nextValueNumber; 1514 ExprIdx[nextValueNumber++] = nextExprNumber++; 1515 } 1516 return {e, CreateNewValNum}; 1517 } 1518 1519 /// Return whether all the values related with the same \p num are 1520 /// defined in \p BB. 1521 bool GVN::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB, 1522 GVN &Gvn) { 1523 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num]; 1524 while (Vals && Vals->BB == BB) 1525 Vals = Vals->Next; 1526 return !Vals; 1527 } 1528 1529 /// Wrap phiTranslateImpl to provide caching functionality. 1530 uint32_t GVN::ValueTable::phiTranslate(const BasicBlock *Pred, 1531 const BasicBlock *PhiBlock, uint32_t Num, 1532 GVN &Gvn) { 1533 auto FindRes = PhiTranslateTable.find({Num, Pred}); 1534 if (FindRes != PhiTranslateTable.end()) 1535 return FindRes->second; 1536 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn); 1537 PhiTranslateTable.insert({{Num, Pred}, NewNum}); 1538 return NewNum; 1539 } 1540 1541 /// Translate value number \p Num using phis, so that it has the values of 1542 /// the phis in BB. 1543 uint32_t GVN::ValueTable::phiTranslateImpl(const BasicBlock *Pred, 1544 const BasicBlock *PhiBlock, 1545 uint32_t Num, GVN &Gvn) { 1546 if (PHINode *PN = NumberingPhi[Num]) { 1547 for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) { 1548 if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred) 1549 if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false)) 1550 return TransVal; 1551 } 1552 return Num; 1553 } 1554 1555 // If there is any value related with Num is defined in a BB other than 1556 // PhiBlock, it cannot depend on a phi in PhiBlock without going through 1557 // a backedge. We can do an early exit in that case to save compile time. 1558 if (!areAllValsInBB(Num, PhiBlock, Gvn)) 1559 return Num; 1560 1561 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0) 1562 return Num; 1563 Expression Exp = Expressions[ExprIdx[Num]]; 1564 1565 for (unsigned i = 0; i < Exp.varargs.size(); i++) { 1566 // For InsertValue and ExtractValue, some varargs are index numbers 1567 // instead of value numbers. Those index numbers should not be 1568 // translated. 1569 if ((i > 1 && Exp.opcode == Instruction::InsertValue) || 1570 (i > 0 && Exp.opcode == Instruction::ExtractValue)) 1571 continue; 1572 Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn); 1573 } 1574 1575 if (Exp.commutative) { 1576 assert(Exp.varargs.size() == 2 && "Unsupported commutative expression!"); 1577 if (Exp.varargs[0] > Exp.varargs[1]) { 1578 std::swap(Exp.varargs[0], Exp.varargs[1]); 1579 uint32_t Opcode = Exp.opcode >> 8; 1580 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) 1581 Exp.opcode = (Opcode << 8) | 1582 CmpInst::getSwappedPredicate( 1583 static_cast<CmpInst::Predicate>(Exp.opcode & 255)); 1584 } 1585 } 1586 1587 if (uint32_t NewNum = expressionNumbering[Exp]) 1588 return NewNum; 1589 return Num; 1590 } 1591 1592 /// Erase stale entry from phiTranslate cache so phiTranslate can be computed 1593 /// again. 1594 void GVN::ValueTable::eraseTranslateCacheEntry(uint32_t Num, 1595 const BasicBlock &CurrBlock) { 1596 for (const BasicBlock *Pred : predecessors(&CurrBlock)) { 1597 auto FindRes = PhiTranslateTable.find({Num, Pred}); 1598 if (FindRes != PhiTranslateTable.end()) 1599 PhiTranslateTable.erase(FindRes); 1600 } 1601 } 1602 1603 // In order to find a leader for a given value number at a 1604 // specific basic block, we first obtain the list of all Values for that number, 1605 // and then scan the list to find one whose block dominates the block in 1606 // question. This is fast because dominator tree queries consist of only 1607 // a few comparisons of DFS numbers. 1608 Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) { 1609 LeaderTableEntry Vals = LeaderTable[num]; 1610 if (!Vals.Val) return nullptr; 1611 1612 Value *Val = nullptr; 1613 if (DT->dominates(Vals.BB, BB)) { 1614 Val = Vals.Val; 1615 if (isa<Constant>(Val)) return Val; 1616 } 1617 1618 LeaderTableEntry* Next = Vals.Next; 1619 while (Next) { 1620 if (DT->dominates(Next->BB, BB)) { 1621 if (isa<Constant>(Next->Val)) return Next->Val; 1622 if (!Val) Val = Next->Val; 1623 } 1624 1625 Next = Next->Next; 1626 } 1627 1628 return Val; 1629 } 1630 1631 /// There is an edge from 'Src' to 'Dst'. Return 1632 /// true if every path from the entry block to 'Dst' passes via this edge. In 1633 /// particular 'Dst' must not be reachable via another edge from 'Src'. 1634 static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, 1635 DominatorTree *DT) { 1636 // While in theory it is interesting to consider the case in which Dst has 1637 // more than one predecessor, because Dst might be part of a loop which is 1638 // only reachable from Src, in practice it is pointless since at the time 1639 // GVN runs all such loops have preheaders, which means that Dst will have 1640 // been changed to have only one predecessor, namely Src. 1641 const BasicBlock *Pred = E.getEnd()->getSinglePredecessor(); 1642 assert((!Pred || Pred == E.getStart()) && 1643 "No edge between these basic blocks!"); 1644 return Pred != nullptr; 1645 } 1646 1647 void GVN::assignBlockRPONumber(Function &F) { 1648 uint32_t NextBlockNumber = 1; 1649 ReversePostOrderTraversal<Function *> RPOT(&F); 1650 for (BasicBlock *BB : RPOT) 1651 BlockRPONumber[BB] = NextBlockNumber++; 1652 } 1653 1654 // Tries to replace instruction with const, using information from 1655 // ReplaceWithConstMap. 1656 bool GVN::replaceOperandsWithConsts(Instruction *Instr) const { 1657 bool Changed = false; 1658 for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) { 1659 Value *Operand = Instr->getOperand(OpNum); 1660 auto it = ReplaceWithConstMap.find(Operand); 1661 if (it != ReplaceWithConstMap.end()) { 1662 assert(!isa<Constant>(Operand) && 1663 "Replacing constants with constants is invalid"); 1664 LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with " 1665 << *it->second << " in instruction " << *Instr << '\n'); 1666 Instr->setOperand(OpNum, it->second); 1667 Changed = true; 1668 } 1669 } 1670 return Changed; 1671 } 1672 1673 /// The given values are known to be equal in every block 1674 /// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with 1675 /// 'RHS' everywhere in the scope. Returns whether a change was made. 1676 /// If DominatesByEdge is false, then it means that we will propagate the RHS 1677 /// value starting from the end of Root.Start. 1678 bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root, 1679 bool DominatesByEdge) { 1680 SmallVector<std::pair<Value*, Value*>, 4> Worklist; 1681 Worklist.push_back(std::make_pair(LHS, RHS)); 1682 bool Changed = false; 1683 // For speed, compute a conservative fast approximation to 1684 // DT->dominates(Root, Root.getEnd()); 1685 const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT); 1686 1687 while (!Worklist.empty()) { 1688 std::pair<Value*, Value*> Item = Worklist.pop_back_val(); 1689 LHS = Item.first; RHS = Item.second; 1690 1691 if (LHS == RHS) 1692 continue; 1693 assert(LHS->getType() == RHS->getType() && "Equality but unequal types!"); 1694 1695 // Don't try to propagate equalities between constants. 1696 if (isa<Constant>(LHS) && isa<Constant>(RHS)) 1697 continue; 1698 1699 // Prefer a constant on the right-hand side, or an Argument if no constants. 1700 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS))) 1701 std::swap(LHS, RHS); 1702 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!"); 1703 1704 // If there is no obvious reason to prefer the left-hand side over the 1705 // right-hand side, ensure the longest lived term is on the right-hand side, 1706 // so the shortest lived term will be replaced by the longest lived. 1707 // This tends to expose more simplifications. 1708 uint32_t LVN = VN.lookupOrAdd(LHS); 1709 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) || 1710 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) { 1711 // Move the 'oldest' value to the right-hand side, using the value number 1712 // as a proxy for age. 1713 uint32_t RVN = VN.lookupOrAdd(RHS); 1714 if (LVN < RVN) { 1715 std::swap(LHS, RHS); 1716 LVN = RVN; 1717 } 1718 } 1719 1720 // If value numbering later sees that an instruction in the scope is equal 1721 // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve 1722 // the invariant that instructions only occur in the leader table for their 1723 // own value number (this is used by removeFromLeaderTable), do not do this 1724 // if RHS is an instruction (if an instruction in the scope is morphed into 1725 // LHS then it will be turned into RHS by the next GVN iteration anyway, so 1726 // using the leader table is about compiling faster, not optimizing better). 1727 // The leader table only tracks basic blocks, not edges. Only add to if we 1728 // have the simple case where the edge dominates the end. 1729 if (RootDominatesEnd && !isa<Instruction>(RHS)) 1730 addToLeaderTable(LVN, RHS, Root.getEnd()); 1731 1732 // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As 1733 // LHS always has at least one use that is not dominated by Root, this will 1734 // never do anything if LHS has only one use. 1735 if (!LHS->hasOneUse()) { 1736 unsigned NumReplacements = 1737 DominatesByEdge 1738 ? replaceDominatedUsesWith(LHS, RHS, *DT, Root) 1739 : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart()); 1740 1741 Changed |= NumReplacements > 0; 1742 NumGVNEqProp += NumReplacements; 1743 // Cached information for anything that uses LHS will be invalid. 1744 if (MD) 1745 MD->invalidateCachedPointerInfo(LHS); 1746 } 1747 1748 // Now try to deduce additional equalities from this one. For example, if 1749 // the known equality was "(A != B)" == "false" then it follows that A and B 1750 // are equal in the scope. Only boolean equalities with an explicit true or 1751 // false RHS are currently supported. 1752 if (!RHS->getType()->isIntegerTy(1)) 1753 // Not a boolean equality - bail out. 1754 continue; 1755 ConstantInt *CI = dyn_cast<ConstantInt>(RHS); 1756 if (!CI) 1757 // RHS neither 'true' nor 'false' - bail out. 1758 continue; 1759 // Whether RHS equals 'true'. Otherwise it equals 'false'. 1760 bool isKnownTrue = CI->isMinusOne(); 1761 bool isKnownFalse = !isKnownTrue; 1762 1763 // If "A && B" is known true then both A and B are known true. If "A || B" 1764 // is known false then both A and B are known false. 1765 Value *A, *B; 1766 if ((isKnownTrue && match(LHS, m_And(m_Value(A), m_Value(B)))) || 1767 (isKnownFalse && match(LHS, m_Or(m_Value(A), m_Value(B))))) { 1768 Worklist.push_back(std::make_pair(A, RHS)); 1769 Worklist.push_back(std::make_pair(B, RHS)); 1770 continue; 1771 } 1772 1773 // If we are propagating an equality like "(A == B)" == "true" then also 1774 // propagate the equality A == B. When propagating a comparison such as 1775 // "(A >= B)" == "true", replace all instances of "A < B" with "false". 1776 if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) { 1777 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1); 1778 1779 // If "A == B" is known true, or "A != B" is known false, then replace 1780 // A with B everywhere in the scope. 1781 if ((isKnownTrue && Cmp->getPredicate() == CmpInst::ICMP_EQ) || 1782 (isKnownFalse && Cmp->getPredicate() == CmpInst::ICMP_NE)) 1783 Worklist.push_back(std::make_pair(Op0, Op1)); 1784 1785 // Handle the floating point versions of equality comparisons too. 1786 if ((isKnownTrue && Cmp->getPredicate() == CmpInst::FCMP_OEQ) || 1787 (isKnownFalse && Cmp->getPredicate() == CmpInst::FCMP_UNE)) { 1788 1789 // Floating point -0.0 and 0.0 compare equal, so we can only 1790 // propagate values if we know that we have a constant and that 1791 // its value is non-zero. 1792 1793 // FIXME: We should do this optimization if 'no signed zeros' is 1794 // applicable via an instruction-level fast-math-flag or some other 1795 // indicator that relaxed FP semantics are being used. 1796 1797 if (isa<ConstantFP>(Op1) && !cast<ConstantFP>(Op1)->isZero()) 1798 Worklist.push_back(std::make_pair(Op0, Op1)); 1799 } 1800 1801 // If "A >= B" is known true, replace "A < B" with false everywhere. 1802 CmpInst::Predicate NotPred = Cmp->getInversePredicate(); 1803 Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse); 1804 // Since we don't have the instruction "A < B" immediately to hand, work 1805 // out the value number that it would have and use that to find an 1806 // appropriate instruction (if any). 1807 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1808 uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1); 1809 // If the number we were assigned was brand new then there is no point in 1810 // looking for an instruction realizing it: there cannot be one! 1811 if (Num < NextNum) { 1812 Value *NotCmp = findLeader(Root.getEnd(), Num); 1813 if (NotCmp && isa<Instruction>(NotCmp)) { 1814 unsigned NumReplacements = 1815 DominatesByEdge 1816 ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root) 1817 : replaceDominatedUsesWith(NotCmp, NotVal, *DT, 1818 Root.getStart()); 1819 Changed |= NumReplacements > 0; 1820 NumGVNEqProp += NumReplacements; 1821 // Cached information for anything that uses NotCmp will be invalid. 1822 if (MD) 1823 MD->invalidateCachedPointerInfo(NotCmp); 1824 } 1825 } 1826 // Ensure that any instruction in scope that gets the "A < B" value number 1827 // is replaced with false. 1828 // The leader table only tracks basic blocks, not edges. Only add to if we 1829 // have the simple case where the edge dominates the end. 1830 if (RootDominatesEnd) 1831 addToLeaderTable(Num, NotVal, Root.getEnd()); 1832 1833 continue; 1834 } 1835 } 1836 1837 return Changed; 1838 } 1839 1840 /// When calculating availability, handle an instruction 1841 /// by inserting it into the appropriate sets 1842 bool GVN::processInstruction(Instruction *I) { 1843 // Ignore dbg info intrinsics. 1844 if (isa<DbgInfoIntrinsic>(I)) 1845 return false; 1846 1847 // If the instruction can be easily simplified then do so now in preference 1848 // to value numbering it. Value numbering often exposes redundancies, for 1849 // example if it determines that %y is equal to %x then the instruction 1850 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify. 1851 const DataLayout &DL = I->getModule()->getDataLayout(); 1852 if (Value *V = SimplifyInstruction(I, {DL, TLI, DT, AC})) { 1853 bool Changed = false; 1854 if (!I->use_empty()) { 1855 I->replaceAllUsesWith(V); 1856 Changed = true; 1857 } 1858 if (isInstructionTriviallyDead(I, TLI)) { 1859 markInstructionForDeletion(I); 1860 Changed = true; 1861 } 1862 if (Changed) { 1863 if (MD && V->getType()->isPtrOrPtrVectorTy()) 1864 MD->invalidateCachedPointerInfo(V); 1865 ++NumGVNSimpl; 1866 return true; 1867 } 1868 } 1869 1870 if (IntrinsicInst *IntrinsicI = dyn_cast<IntrinsicInst>(I)) 1871 if (IntrinsicI->getIntrinsicID() == Intrinsic::assume) 1872 return processAssumeIntrinsic(IntrinsicI); 1873 1874 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1875 if (processLoad(LI)) 1876 return true; 1877 1878 unsigned Num = VN.lookupOrAdd(LI); 1879 addToLeaderTable(Num, LI, LI->getParent()); 1880 return false; 1881 } 1882 1883 // For conditional branches, we can perform simple conditional propagation on 1884 // the condition value itself. 1885 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 1886 if (!BI->isConditional()) 1887 return false; 1888 1889 if (isa<Constant>(BI->getCondition())) 1890 return processFoldableCondBr(BI); 1891 1892 Value *BranchCond = BI->getCondition(); 1893 BasicBlock *TrueSucc = BI->getSuccessor(0); 1894 BasicBlock *FalseSucc = BI->getSuccessor(1); 1895 // Avoid multiple edges early. 1896 if (TrueSucc == FalseSucc) 1897 return false; 1898 1899 BasicBlock *Parent = BI->getParent(); 1900 bool Changed = false; 1901 1902 Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext()); 1903 BasicBlockEdge TrueE(Parent, TrueSucc); 1904 Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true); 1905 1906 Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext()); 1907 BasicBlockEdge FalseE(Parent, FalseSucc); 1908 Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true); 1909 1910 return Changed; 1911 } 1912 1913 // For switches, propagate the case values into the case destinations. 1914 if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) { 1915 Value *SwitchCond = SI->getCondition(); 1916 BasicBlock *Parent = SI->getParent(); 1917 bool Changed = false; 1918 1919 // Remember how many outgoing edges there are to every successor. 1920 SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges; 1921 for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i) 1922 ++SwitchEdges[SI->getSuccessor(i)]; 1923 1924 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); 1925 i != e; ++i) { 1926 BasicBlock *Dst = i->getCaseSuccessor(); 1927 // If there is only a single edge, propagate the case value into it. 1928 if (SwitchEdges.lookup(Dst) == 1) { 1929 BasicBlockEdge E(Parent, Dst); 1930 Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true); 1931 } 1932 } 1933 return Changed; 1934 } 1935 1936 // Instructions with void type don't return a value, so there's 1937 // no point in trying to find redundancies in them. 1938 if (I->getType()->isVoidTy()) 1939 return false; 1940 1941 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1942 unsigned Num = VN.lookupOrAdd(I); 1943 1944 // Allocations are always uniquely numbered, so we can save time and memory 1945 // by fast failing them. 1946 if (isa<AllocaInst>(I) || I->isTerminator() || isa<PHINode>(I)) { 1947 addToLeaderTable(Num, I, I->getParent()); 1948 return false; 1949 } 1950 1951 // If the number we were assigned was a brand new VN, then we don't 1952 // need to do a lookup to see if the number already exists 1953 // somewhere in the domtree: it can't! 1954 if (Num >= NextNum) { 1955 addToLeaderTable(Num, I, I->getParent()); 1956 return false; 1957 } 1958 1959 // Perform fast-path value-number based elimination of values inherited from 1960 // dominators. 1961 Value *Repl = findLeader(I->getParent(), Num); 1962 if (!Repl) { 1963 // Failure, just remember this instance for future use. 1964 addToLeaderTable(Num, I, I->getParent()); 1965 return false; 1966 } else if (Repl == I) { 1967 // If I was the result of a shortcut PRE, it might already be in the table 1968 // and the best replacement for itself. Nothing to do. 1969 return false; 1970 } 1971 1972 // Remove it! 1973 patchAndReplaceAllUsesWith(I, Repl); 1974 if (MD && Repl->getType()->isPtrOrPtrVectorTy()) 1975 MD->invalidateCachedPointerInfo(Repl); 1976 markInstructionForDeletion(I); 1977 return true; 1978 } 1979 1980 /// runOnFunction - This is the main transformation entry point for a function. 1981 bool GVN::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT, 1982 const TargetLibraryInfo &RunTLI, AAResults &RunAA, 1983 MemoryDependenceResults *RunMD, LoopInfo *LI, 1984 OptimizationRemarkEmitter *RunORE) { 1985 AC = &RunAC; 1986 DT = &RunDT; 1987 VN.setDomTree(DT); 1988 TLI = &RunTLI; 1989 VN.setAliasAnalysis(&RunAA); 1990 MD = RunMD; 1991 ImplicitControlFlowTracking ImplicitCFT(DT); 1992 ICF = &ImplicitCFT; 1993 VN.setMemDep(MD); 1994 ORE = RunORE; 1995 1996 bool Changed = false; 1997 bool ShouldContinue = true; 1998 1999 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); 2000 // Merge unconditional branches, allowing PRE to catch more 2001 // optimization opportunities. 2002 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 2003 BasicBlock *BB = &*FI++; 2004 2005 bool removedBlock = MergeBlockIntoPredecessor(BB, &DTU, LI, nullptr, MD); 2006 if (removedBlock) 2007 ++NumGVNBlocks; 2008 2009 Changed |= removedBlock; 2010 } 2011 2012 unsigned Iteration = 0; 2013 while (ShouldContinue) { 2014 LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); 2015 ShouldContinue = iterateOnFunction(F); 2016 Changed |= ShouldContinue; 2017 ++Iteration; 2018 } 2019 2020 if (EnablePRE) { 2021 // Fabricate val-num for dead-code in order to suppress assertion in 2022 // performPRE(). 2023 assignValNumForDeadCode(); 2024 assignBlockRPONumber(F); 2025 bool PREChanged = true; 2026 while (PREChanged) { 2027 PREChanged = performPRE(F); 2028 Changed |= PREChanged; 2029 } 2030 } 2031 2032 // FIXME: Should perform GVN again after PRE does something. PRE can move 2033 // computations into blocks where they become fully redundant. Note that 2034 // we can't do this until PRE's critical edge splitting updates memdep. 2035 // Actually, when this happens, we should just fully integrate PRE into GVN. 2036 2037 cleanupGlobalSets(); 2038 // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each 2039 // iteration. 2040 DeadBlocks.clear(); 2041 2042 return Changed; 2043 } 2044 2045 bool GVN::processBlock(BasicBlock *BB) { 2046 // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function 2047 // (and incrementing BI before processing an instruction). 2048 assert(InstrsToErase.empty() && 2049 "We expect InstrsToErase to be empty across iterations"); 2050 if (DeadBlocks.count(BB)) 2051 return false; 2052 2053 // Clearing map before every BB because it can be used only for single BB. 2054 ReplaceWithConstMap.clear(); 2055 bool ChangedFunction = false; 2056 2057 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 2058 BI != BE;) { 2059 if (!ReplaceWithConstMap.empty()) 2060 ChangedFunction |= replaceOperandsWithConsts(&*BI); 2061 ChangedFunction |= processInstruction(&*BI); 2062 2063 if (InstrsToErase.empty()) { 2064 ++BI; 2065 continue; 2066 } 2067 2068 // If we need some instructions deleted, do it now. 2069 NumGVNInstr += InstrsToErase.size(); 2070 2071 // Avoid iterator invalidation. 2072 bool AtStart = BI == BB->begin(); 2073 if (!AtStart) 2074 --BI; 2075 2076 for (auto *I : InstrsToErase) { 2077 assert(I->getParent() == BB && "Removing instruction from wrong block?"); 2078 LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n'); 2079 salvageDebugInfo(*I); 2080 if (MD) MD->removeInstruction(I); 2081 LLVM_DEBUG(verifyRemoved(I)); 2082 I->eraseFromParent(); 2083 } 2084 2085 ICF->invalidateBlock(BB); 2086 InstrsToErase.clear(); 2087 2088 if (AtStart) 2089 BI = BB->begin(); 2090 else 2091 ++BI; 2092 } 2093 2094 return ChangedFunction; 2095 } 2096 2097 // Instantiate an expression in a predecessor that lacked it. 2098 bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred, 2099 BasicBlock *Curr, unsigned int ValNo) { 2100 // Because we are going top-down through the block, all value numbers 2101 // will be available in the predecessor by the time we need them. Any 2102 // that weren't originally present will have been instantiated earlier 2103 // in this loop. 2104 bool success = true; 2105 for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) { 2106 Value *Op = Instr->getOperand(i); 2107 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2108 continue; 2109 // This could be a newly inserted instruction, in which case, we won't 2110 // find a value number, and should give up before we hurt ourselves. 2111 // FIXME: Rewrite the infrastructure to let it easier to value number 2112 // and process newly inserted instructions. 2113 if (!VN.exists(Op)) { 2114 success = false; 2115 break; 2116 } 2117 uint32_t TValNo = 2118 VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this); 2119 if (Value *V = findLeader(Pred, TValNo)) { 2120 Instr->setOperand(i, V); 2121 } else { 2122 success = false; 2123 break; 2124 } 2125 } 2126 2127 // Fail out if we encounter an operand that is not available in 2128 // the PRE predecessor. This is typically because of loads which 2129 // are not value numbered precisely. 2130 if (!success) 2131 return false; 2132 2133 Instr->insertBefore(Pred->getTerminator()); 2134 Instr->setName(Instr->getName() + ".pre"); 2135 Instr->setDebugLoc(Instr->getDebugLoc()); 2136 2137 unsigned Num = VN.lookupOrAdd(Instr); 2138 VN.add(Instr, Num); 2139 2140 // Update the availability map to include the new instruction. 2141 addToLeaderTable(Num, Instr, Pred); 2142 return true; 2143 } 2144 2145 bool GVN::performScalarPRE(Instruction *CurInst) { 2146 if (isa<AllocaInst>(CurInst) || CurInst->isTerminator() || 2147 isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() || 2148 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2149 isa<DbgInfoIntrinsic>(CurInst)) 2150 return false; 2151 2152 // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from 2153 // sinking the compare again, and it would force the code generator to 2154 // move the i1 from processor flags or predicate registers into a general 2155 // purpose register. 2156 if (isa<CmpInst>(CurInst)) 2157 return false; 2158 2159 // We don't currently value number ANY inline asm calls. 2160 if (CallInst *CallI = dyn_cast<CallInst>(CurInst)) 2161 if (CallI->isInlineAsm()) 2162 return false; 2163 2164 uint32_t ValNo = VN.lookup(CurInst); 2165 2166 // Look for the predecessors for PRE opportunities. We're 2167 // only trying to solve the basic diamond case, where 2168 // a value is computed in the successor and one predecessor, 2169 // but not the other. We also explicitly disallow cases 2170 // where the successor is its own predecessor, because they're 2171 // more complicated to get right. 2172 unsigned NumWith = 0; 2173 unsigned NumWithout = 0; 2174 BasicBlock *PREPred = nullptr; 2175 BasicBlock *CurrentBlock = CurInst->getParent(); 2176 2177 SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap; 2178 for (BasicBlock *P : predecessors(CurrentBlock)) { 2179 // We're not interested in PRE where blocks with predecessors that are 2180 // not reachable. 2181 if (!DT->isReachableFromEntry(P)) { 2182 NumWithout = 2; 2183 break; 2184 } 2185 // It is not safe to do PRE when P->CurrentBlock is a loop backedge, and 2186 // when CurInst has operand defined in CurrentBlock (so it may be defined 2187 // by phi in the loop header). 2188 if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock] && 2189 llvm::any_of(CurInst->operands(), [&](const Use &U) { 2190 if (auto *Inst = dyn_cast<Instruction>(U.get())) 2191 return Inst->getParent() == CurrentBlock; 2192 return false; 2193 })) { 2194 NumWithout = 2; 2195 break; 2196 } 2197 2198 uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this); 2199 Value *predV = findLeader(P, TValNo); 2200 if (!predV) { 2201 predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P)); 2202 PREPred = P; 2203 ++NumWithout; 2204 } else if (predV == CurInst) { 2205 /* CurInst dominates this predecessor. */ 2206 NumWithout = 2; 2207 break; 2208 } else { 2209 predMap.push_back(std::make_pair(predV, P)); 2210 ++NumWith; 2211 } 2212 } 2213 2214 // Don't do PRE when it might increase code size, i.e. when 2215 // we would need to insert instructions in more than one pred. 2216 if (NumWithout > 1 || NumWith == 0) 2217 return false; 2218 2219 // We may have a case where all predecessors have the instruction, 2220 // and we just need to insert a phi node. Otherwise, perform 2221 // insertion. 2222 Instruction *PREInstr = nullptr; 2223 2224 if (NumWithout != 0) { 2225 if (!isSafeToSpeculativelyExecute(CurInst)) { 2226 // It is only valid to insert a new instruction if the current instruction 2227 // is always executed. An instruction with implicit control flow could 2228 // prevent us from doing it. If we cannot speculate the execution, then 2229 // PRE should be prohibited. 2230 if (ICF->isDominatedByICFIFromSameBlock(CurInst)) 2231 return false; 2232 } 2233 2234 // Don't do PRE across indirect branch. 2235 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2236 return false; 2237 2238 // We can't do PRE safely on a critical edge, so instead we schedule 2239 // the edge to be split and perform the PRE the next time we iterate 2240 // on the function. 2241 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); 2242 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2243 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2244 return false; 2245 } 2246 // We need to insert somewhere, so let's give it a shot 2247 PREInstr = CurInst->clone(); 2248 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) { 2249 // If we failed insertion, make sure we remove the instruction. 2250 LLVM_DEBUG(verifyRemoved(PREInstr)); 2251 PREInstr->deleteValue(); 2252 return false; 2253 } 2254 } 2255 2256 // Either we should have filled in the PRE instruction, or we should 2257 // not have needed insertions. 2258 assert(PREInstr != nullptr || NumWithout == 0); 2259 2260 ++NumGVNPRE; 2261 2262 // Create a PHI to make the value available in this block. 2263 PHINode *Phi = 2264 PHINode::Create(CurInst->getType(), predMap.size(), 2265 CurInst->getName() + ".pre-phi", &CurrentBlock->front()); 2266 for (unsigned i = 0, e = predMap.size(); i != e; ++i) { 2267 if (Value *V = predMap[i].first) { 2268 // If we use an existing value in this phi, we have to patch the original 2269 // value because the phi will be used to replace a later value. 2270 patchReplacementInstruction(CurInst, V); 2271 Phi->addIncoming(V, predMap[i].second); 2272 } else 2273 Phi->addIncoming(PREInstr, PREPred); 2274 } 2275 2276 VN.add(Phi, ValNo); 2277 // After creating a new PHI for ValNo, the phi translate result for ValNo will 2278 // be changed, so erase the related stale entries in phi translate cache. 2279 VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock); 2280 addToLeaderTable(ValNo, Phi, CurrentBlock); 2281 Phi->setDebugLoc(CurInst->getDebugLoc()); 2282 CurInst->replaceAllUsesWith(Phi); 2283 if (MD && Phi->getType()->isPtrOrPtrVectorTy()) 2284 MD->invalidateCachedPointerInfo(Phi); 2285 VN.erase(CurInst); 2286 removeFromLeaderTable(ValNo, CurInst, CurrentBlock); 2287 2288 LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); 2289 if (MD) 2290 MD->removeInstruction(CurInst); 2291 LLVM_DEBUG(verifyRemoved(CurInst)); 2292 // FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes 2293 // some assertion failures. 2294 ICF->invalidateBlock(CurrentBlock); 2295 CurInst->eraseFromParent(); 2296 ++NumGVNInstr; 2297 2298 return true; 2299 } 2300 2301 /// Perform a purely local form of PRE that looks for diamond 2302 /// control flow patterns and attempts to perform simple PRE at the join point. 2303 bool GVN::performPRE(Function &F) { 2304 bool Changed = false; 2305 for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) { 2306 // Nothing to PRE in the entry block. 2307 if (CurrentBlock == &F.getEntryBlock()) 2308 continue; 2309 2310 // Don't perform PRE on an EH pad. 2311 if (CurrentBlock->isEHPad()) 2312 continue; 2313 2314 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2315 BE = CurrentBlock->end(); 2316 BI != BE;) { 2317 Instruction *CurInst = &*BI++; 2318 Changed |= performScalarPRE(CurInst); 2319 } 2320 } 2321 2322 if (splitCriticalEdges()) 2323 Changed = true; 2324 2325 return Changed; 2326 } 2327 2328 /// Split the critical edge connecting the given two blocks, and return 2329 /// the block inserted to the critical edge. 2330 BasicBlock *GVN::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) { 2331 BasicBlock *BB = 2332 SplitCriticalEdge(Pred, Succ, CriticalEdgeSplittingOptions(DT)); 2333 if (MD) 2334 MD->invalidateCachedPredecessors(); 2335 return BB; 2336 } 2337 2338 /// Split critical edges found during the previous 2339 /// iteration that may enable further optimization. 2340 bool GVN::splitCriticalEdges() { 2341 if (toSplit.empty()) 2342 return false; 2343 do { 2344 std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val(); 2345 SplitCriticalEdge(Edge.first, Edge.second, 2346 CriticalEdgeSplittingOptions(DT)); 2347 } while (!toSplit.empty()); 2348 if (MD) MD->invalidateCachedPredecessors(); 2349 return true; 2350 } 2351 2352 /// Executes one iteration of GVN 2353 bool GVN::iterateOnFunction(Function &F) { 2354 cleanupGlobalSets(); 2355 2356 // Top-down walk of the dominator tree 2357 bool Changed = false; 2358 // Needed for value numbering with phi construction to work. 2359 // RPOT walks the graph in its constructor and will not be invalidated during 2360 // processBlock. 2361 ReversePostOrderTraversal<Function *> RPOT(&F); 2362 2363 for (BasicBlock *BB : RPOT) 2364 Changed |= processBlock(BB); 2365 2366 return Changed; 2367 } 2368 2369 void GVN::cleanupGlobalSets() { 2370 VN.clear(); 2371 LeaderTable.clear(); 2372 BlockRPONumber.clear(); 2373 TableAllocator.Reset(); 2374 ICF->clear(); 2375 } 2376 2377 /// Verify that the specified instruction does not occur in our 2378 /// internal data structures. 2379 void GVN::verifyRemoved(const Instruction *Inst) const { 2380 VN.verifyRemoved(Inst); 2381 2382 // Walk through the value number scope to make sure the instruction isn't 2383 // ferreted away in it. 2384 for (DenseMap<uint32_t, LeaderTableEntry>::const_iterator 2385 I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++I) { 2386 const LeaderTableEntry *Node = &I->second; 2387 assert(Node->Val != Inst && "Inst still in value numbering scope!"); 2388 2389 while (Node->Next) { 2390 Node = Node->Next; 2391 assert(Node->Val != Inst && "Inst still in value numbering scope!"); 2392 } 2393 } 2394 } 2395 2396 /// BB is declared dead, which implied other blocks become dead as well. This 2397 /// function is to add all these blocks to "DeadBlocks". For the dead blocks' 2398 /// live successors, update their phi nodes by replacing the operands 2399 /// corresponding to dead blocks with UndefVal. 2400 void GVN::addDeadBlock(BasicBlock *BB) { 2401 SmallVector<BasicBlock *, 4> NewDead; 2402 SmallSetVector<BasicBlock *, 4> DF; 2403 2404 NewDead.push_back(BB); 2405 while (!NewDead.empty()) { 2406 BasicBlock *D = NewDead.pop_back_val(); 2407 if (DeadBlocks.count(D)) 2408 continue; 2409 2410 // All blocks dominated by D are dead. 2411 SmallVector<BasicBlock *, 8> Dom; 2412 DT->getDescendants(D, Dom); 2413 DeadBlocks.insert(Dom.begin(), Dom.end()); 2414 2415 // Figure out the dominance-frontier(D). 2416 for (BasicBlock *B : Dom) { 2417 for (BasicBlock *S : successors(B)) { 2418 if (DeadBlocks.count(S)) 2419 continue; 2420 2421 bool AllPredDead = true; 2422 for (BasicBlock *P : predecessors(S)) 2423 if (!DeadBlocks.count(P)) { 2424 AllPredDead = false; 2425 break; 2426 } 2427 2428 if (!AllPredDead) { 2429 // S could be proved dead later on. That is why we don't update phi 2430 // operands at this moment. 2431 DF.insert(S); 2432 } else { 2433 // While S is not dominated by D, it is dead by now. This could take 2434 // place if S already have a dead predecessor before D is declared 2435 // dead. 2436 NewDead.push_back(S); 2437 } 2438 } 2439 } 2440 } 2441 2442 // For the dead blocks' live successors, update their phi nodes by replacing 2443 // the operands corresponding to dead blocks with UndefVal. 2444 for(SmallSetVector<BasicBlock *, 4>::iterator I = DF.begin(), E = DF.end(); 2445 I != E; I++) { 2446 BasicBlock *B = *I; 2447 if (DeadBlocks.count(B)) 2448 continue; 2449 2450 SmallVector<BasicBlock *, 4> Preds(pred_begin(B), pred_end(B)); 2451 for (BasicBlock *P : Preds) { 2452 if (!DeadBlocks.count(P)) 2453 continue; 2454 2455 if (isCriticalEdge(P->getTerminator(), GetSuccessorNumber(P, B))) { 2456 if (BasicBlock *S = splitCriticalEdges(P, B)) 2457 DeadBlocks.insert(P = S); 2458 } 2459 2460 for (BasicBlock::iterator II = B->begin(); isa<PHINode>(II); ++II) { 2461 PHINode &Phi = cast<PHINode>(*II); 2462 Phi.setIncomingValue(Phi.getBasicBlockIndex(P), 2463 UndefValue::get(Phi.getType())); 2464 if (MD) 2465 MD->invalidateCachedPointerInfo(&Phi); 2466 } 2467 } 2468 } 2469 } 2470 2471 // If the given branch is recognized as a foldable branch (i.e. conditional 2472 // branch with constant condition), it will perform following analyses and 2473 // transformation. 2474 // 1) If the dead out-coming edge is a critical-edge, split it. Let 2475 // R be the target of the dead out-coming edge. 2476 // 1) Identify the set of dead blocks implied by the branch's dead outcoming 2477 // edge. The result of this step will be {X| X is dominated by R} 2478 // 2) Identify those blocks which haves at least one dead predecessor. The 2479 // result of this step will be dominance-frontier(R). 2480 // 3) Update the PHIs in DF(R) by replacing the operands corresponding to 2481 // dead blocks with "UndefVal" in an hope these PHIs will optimized away. 2482 // 2483 // Return true iff *NEW* dead code are found. 2484 bool GVN::processFoldableCondBr(BranchInst *BI) { 2485 if (!BI || BI->isUnconditional()) 2486 return false; 2487 2488 // If a branch has two identical successors, we cannot declare either dead. 2489 if (BI->getSuccessor(0) == BI->getSuccessor(1)) 2490 return false; 2491 2492 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition()); 2493 if (!Cond) 2494 return false; 2495 2496 BasicBlock *DeadRoot = 2497 Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0); 2498 if (DeadBlocks.count(DeadRoot)) 2499 return false; 2500 2501 if (!DeadRoot->getSinglePredecessor()) 2502 DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot); 2503 2504 addDeadBlock(DeadRoot); 2505 return true; 2506 } 2507 2508 // performPRE() will trigger assert if it comes across an instruction without 2509 // associated val-num. As it normally has far more live instructions than dead 2510 // instructions, it makes more sense just to "fabricate" a val-number for the 2511 // dead code than checking if instruction involved is dead or not. 2512 void GVN::assignValNumForDeadCode() { 2513 for (BasicBlock *BB : DeadBlocks) { 2514 for (Instruction &Inst : *BB) { 2515 unsigned ValNum = VN.lookupOrAdd(&Inst); 2516 addToLeaderTable(ValNum, &Inst, BB); 2517 } 2518 } 2519 } 2520 2521 class llvm::gvn::GVNLegacyPass : public FunctionPass { 2522 public: 2523 static char ID; // Pass identification, replacement for typeid 2524 2525 explicit GVNLegacyPass(bool NoMemDepAnalysis = !EnableMemDep) 2526 : FunctionPass(ID), NoMemDepAnalysis(NoMemDepAnalysis) { 2527 initializeGVNLegacyPassPass(*PassRegistry::getPassRegistry()); 2528 } 2529 2530 bool runOnFunction(Function &F) override { 2531 if (skipFunction(F)) 2532 return false; 2533 2534 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 2535 2536 return Impl.runImpl( 2537 F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 2538 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 2539 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 2540 getAnalysis<AAResultsWrapperPass>().getAAResults(), 2541 NoMemDepAnalysis ? nullptr 2542 : &getAnalysis<MemoryDependenceWrapperPass>().getMemDep(), 2543 LIWP ? &LIWP->getLoopInfo() : nullptr, 2544 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE()); 2545 } 2546 2547 void getAnalysisUsage(AnalysisUsage &AU) const override { 2548 AU.addRequired<AssumptionCacheTracker>(); 2549 AU.addRequired<DominatorTreeWrapperPass>(); 2550 AU.addRequired<TargetLibraryInfoWrapperPass>(); 2551 if (!NoMemDepAnalysis) 2552 AU.addRequired<MemoryDependenceWrapperPass>(); 2553 AU.addRequired<AAResultsWrapperPass>(); 2554 2555 AU.addPreserved<DominatorTreeWrapperPass>(); 2556 AU.addPreserved<GlobalsAAWrapperPass>(); 2557 AU.addPreserved<TargetLibraryInfoWrapperPass>(); 2558 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2559 } 2560 2561 private: 2562 bool NoMemDepAnalysis; 2563 GVN Impl; 2564 }; 2565 2566 char GVNLegacyPass::ID = 0; 2567 2568 INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false) 2569 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 2570 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 2571 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2572 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 2573 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2574 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 2575 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 2576 INITIALIZE_PASS_END(GVNLegacyPass, "gvn", "Global Value Numbering", false, false) 2577 2578 // The public interface to this file... 2579 FunctionPass *llvm::createGVNPass(bool NoMemDepAnalysis) { 2580 return new GVNLegacyPass(NoMemDepAnalysis); 2581 } 2582