1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs global value numbering to eliminate fully redundant 11 // instructions. It also performs simple dead load elimination. 12 // 13 // Note that this pass does the value numbering itself; it does not use the 14 // ValueNumbering analysis passes. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/Transforms/Scalar/GVN.h" 19 #include "llvm/ADT/DenseMap.h" 20 #include "llvm/ADT/DepthFirstIterator.h" 21 #include "llvm/ADT/Hashing.h" 22 #include "llvm/ADT/MapVector.h" 23 #include "llvm/ADT/PointerIntPair.h" 24 #include "llvm/ADT/PostOrderIterator.h" 25 #include "llvm/ADT/STLExtras.h" 26 #include "llvm/ADT/SetVector.h" 27 #include "llvm/ADT/SmallPtrSet.h" 28 #include "llvm/ADT/SmallVector.h" 29 #include "llvm/ADT/Statistic.h" 30 #include "llvm/Analysis/AliasAnalysis.h" 31 #include "llvm/Analysis/AssumptionCache.h" 32 #include "llvm/Analysis/CFG.h" 33 #include "llvm/Analysis/GlobalsModRef.h" 34 #include "llvm/Analysis/InstructionSimplify.h" 35 #include "llvm/Analysis/LoopInfo.h" 36 #include "llvm/Analysis/MemoryBuiltins.h" 37 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 38 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 39 #include "llvm/Analysis/PHITransAddr.h" 40 #include "llvm/Analysis/TargetLibraryInfo.h" 41 #include "llvm/Analysis/Utils/Local.h" 42 #include "llvm/Analysis/ValueTracking.h" 43 #include "llvm/Config/llvm-config.h" 44 #include "llvm/IR/Attributes.h" 45 #include "llvm/IR/BasicBlock.h" 46 #include "llvm/IR/CallSite.h" 47 #include "llvm/IR/Constant.h" 48 #include "llvm/IR/Constants.h" 49 #include "llvm/IR/DataLayout.h" 50 #include "llvm/IR/DebugLoc.h" 51 #include "llvm/IR/Dominators.h" 52 #include "llvm/IR/Function.h" 53 #include "llvm/IR/InstrTypes.h" 54 #include "llvm/IR/Instruction.h" 55 #include "llvm/IR/Instructions.h" 56 #include "llvm/IR/IntrinsicInst.h" 57 #include "llvm/IR/Intrinsics.h" 58 #include "llvm/IR/LLVMContext.h" 59 #include "llvm/IR/Metadata.h" 60 #include "llvm/IR/Module.h" 61 #include "llvm/IR/Operator.h" 62 #include "llvm/IR/PassManager.h" 63 #include "llvm/IR/PatternMatch.h" 64 #include "llvm/IR/Type.h" 65 #include "llvm/IR/Use.h" 66 #include "llvm/IR/Value.h" 67 #include "llvm/Pass.h" 68 #include "llvm/Support/Casting.h" 69 #include "llvm/Support/CommandLine.h" 70 #include "llvm/Support/Compiler.h" 71 #include "llvm/Support/Debug.h" 72 #include "llvm/Support/raw_ostream.h" 73 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 74 #include "llvm/Transforms/Utils/SSAUpdater.h" 75 #include "llvm/Transforms/Utils/VNCoercion.h" 76 #include <algorithm> 77 #include <cassert> 78 #include <cstdint> 79 #include <utility> 80 #include <vector> 81 82 using namespace llvm; 83 using namespace llvm::gvn; 84 using namespace llvm::VNCoercion; 85 using namespace PatternMatch; 86 87 #define DEBUG_TYPE "gvn" 88 89 STATISTIC(NumGVNInstr, "Number of instructions deleted"); 90 STATISTIC(NumGVNLoad, "Number of loads deleted"); 91 STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 92 STATISTIC(NumGVNBlocks, "Number of blocks merged"); 93 STATISTIC(NumGVNSimpl, "Number of instructions simplified"); 94 STATISTIC(NumGVNEqProp, "Number of equalities propagated"); 95 STATISTIC(NumPRELoad, "Number of loads PRE'd"); 96 97 static cl::opt<bool> EnablePRE("enable-pre", 98 cl::init(true), cl::Hidden); 99 static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 100 101 // Maximum allowed recursion depth. 102 static cl::opt<uint32_t> 103 MaxRecurseDepth("max-recurse-depth", cl::Hidden, cl::init(1000), cl::ZeroOrMore, 104 cl::desc("Max recurse depth (default = 1000)")); 105 106 struct llvm::GVN::Expression { 107 uint32_t opcode; 108 Type *type; 109 bool commutative = false; 110 SmallVector<uint32_t, 4> varargs; 111 112 Expression(uint32_t o = ~2U) : opcode(o) {} 113 114 bool operator==(const Expression &other) const { 115 if (opcode != other.opcode) 116 return false; 117 if (opcode == ~0U || opcode == ~1U) 118 return true; 119 if (type != other.type) 120 return false; 121 if (varargs != other.varargs) 122 return false; 123 return true; 124 } 125 126 friend hash_code hash_value(const Expression &Value) { 127 return hash_combine( 128 Value.opcode, Value.type, 129 hash_combine_range(Value.varargs.begin(), Value.varargs.end())); 130 } 131 }; 132 133 namespace llvm { 134 135 template <> struct DenseMapInfo<GVN::Expression> { 136 static inline GVN::Expression getEmptyKey() { return ~0U; } 137 static inline GVN::Expression getTombstoneKey() { return ~1U; } 138 139 static unsigned getHashValue(const GVN::Expression &e) { 140 using llvm::hash_value; 141 142 return static_cast<unsigned>(hash_value(e)); 143 } 144 145 static bool isEqual(const GVN::Expression &LHS, const GVN::Expression &RHS) { 146 return LHS == RHS; 147 } 148 }; 149 150 } // end namespace llvm 151 152 /// Represents a particular available value that we know how to materialize. 153 /// Materialization of an AvailableValue never fails. An AvailableValue is 154 /// implicitly associated with a rematerialization point which is the 155 /// location of the instruction from which it was formed. 156 struct llvm::gvn::AvailableValue { 157 enum ValType { 158 SimpleVal, // A simple offsetted value that is accessed. 159 LoadVal, // A value produced by a load. 160 MemIntrin, // A memory intrinsic which is loaded from. 161 UndefVal // A UndefValue representing a value from dead block (which 162 // is not yet physically removed from the CFG). 163 }; 164 165 /// V - The value that is live out of the block. 166 PointerIntPair<Value *, 2, ValType> Val; 167 168 /// Offset - The byte offset in Val that is interesting for the load query. 169 unsigned Offset; 170 171 static AvailableValue get(Value *V, unsigned Offset = 0) { 172 AvailableValue Res; 173 Res.Val.setPointer(V); 174 Res.Val.setInt(SimpleVal); 175 Res.Offset = Offset; 176 return Res; 177 } 178 179 static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) { 180 AvailableValue Res; 181 Res.Val.setPointer(MI); 182 Res.Val.setInt(MemIntrin); 183 Res.Offset = Offset; 184 return Res; 185 } 186 187 static AvailableValue getLoad(LoadInst *LI, unsigned Offset = 0) { 188 AvailableValue Res; 189 Res.Val.setPointer(LI); 190 Res.Val.setInt(LoadVal); 191 Res.Offset = Offset; 192 return Res; 193 } 194 195 static AvailableValue getUndef() { 196 AvailableValue Res; 197 Res.Val.setPointer(nullptr); 198 Res.Val.setInt(UndefVal); 199 Res.Offset = 0; 200 return Res; 201 } 202 203 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 204 bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; } 205 bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; } 206 bool isUndefValue() const { return Val.getInt() == UndefVal; } 207 208 Value *getSimpleValue() const { 209 assert(isSimpleValue() && "Wrong accessor"); 210 return Val.getPointer(); 211 } 212 213 LoadInst *getCoercedLoadValue() const { 214 assert(isCoercedLoadValue() && "Wrong accessor"); 215 return cast<LoadInst>(Val.getPointer()); 216 } 217 218 MemIntrinsic *getMemIntrinValue() const { 219 assert(isMemIntrinValue() && "Wrong accessor"); 220 return cast<MemIntrinsic>(Val.getPointer()); 221 } 222 223 /// Emit code at the specified insertion point to adjust the value defined 224 /// here to the specified type. This handles various coercion cases. 225 Value *MaterializeAdjustedValue(LoadInst *LI, Instruction *InsertPt, 226 GVN &gvn) const; 227 }; 228 229 /// Represents an AvailableValue which can be rematerialized at the end of 230 /// the associated BasicBlock. 231 struct llvm::gvn::AvailableValueInBlock { 232 /// BB - The basic block in question. 233 BasicBlock *BB; 234 235 /// AV - The actual available value 236 AvailableValue AV; 237 238 static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV) { 239 AvailableValueInBlock Res; 240 Res.BB = BB; 241 Res.AV = std::move(AV); 242 return Res; 243 } 244 245 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 246 unsigned Offset = 0) { 247 return get(BB, AvailableValue::get(V, Offset)); 248 } 249 250 static AvailableValueInBlock getUndef(BasicBlock *BB) { 251 return get(BB, AvailableValue::getUndef()); 252 } 253 254 /// Emit code at the end of this block to adjust the value defined here to 255 /// the specified type. This handles various coercion cases. 256 Value *MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const { 257 return AV.MaterializeAdjustedValue(LI, BB->getTerminator(), gvn); 258 } 259 }; 260 261 //===----------------------------------------------------------------------===// 262 // ValueTable Internal Functions 263 //===----------------------------------------------------------------------===// 264 265 GVN::Expression GVN::ValueTable::createExpr(Instruction *I) { 266 Expression e; 267 e.type = I->getType(); 268 e.opcode = I->getOpcode(); 269 for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end(); 270 OI != OE; ++OI) 271 e.varargs.push_back(lookupOrAdd(*OI)); 272 if (I->isCommutative()) { 273 // Ensure that commutative instructions that only differ by a permutation 274 // of their operands get the same value number by sorting the operand value 275 // numbers. Since all commutative instructions have two operands it is more 276 // efficient to sort by hand rather than using, say, std::sort. 277 assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!"); 278 if (e.varargs[0] > e.varargs[1]) 279 std::swap(e.varargs[0], e.varargs[1]); 280 e.commutative = true; 281 } 282 283 if (CmpInst *C = dyn_cast<CmpInst>(I)) { 284 // Sort the operand value numbers so x<y and y>x get the same value number. 285 CmpInst::Predicate Predicate = C->getPredicate(); 286 if (e.varargs[0] > e.varargs[1]) { 287 std::swap(e.varargs[0], e.varargs[1]); 288 Predicate = CmpInst::getSwappedPredicate(Predicate); 289 } 290 e.opcode = (C->getOpcode() << 8) | Predicate; 291 e.commutative = true; 292 } else if (InsertValueInst *E = dyn_cast<InsertValueInst>(I)) { 293 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 294 II != IE; ++II) 295 e.varargs.push_back(*II); 296 } 297 298 return e; 299 } 300 301 GVN::Expression GVN::ValueTable::createCmpExpr(unsigned Opcode, 302 CmpInst::Predicate Predicate, 303 Value *LHS, Value *RHS) { 304 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 305 "Not a comparison!"); 306 Expression e; 307 e.type = CmpInst::makeCmpResultType(LHS->getType()); 308 e.varargs.push_back(lookupOrAdd(LHS)); 309 e.varargs.push_back(lookupOrAdd(RHS)); 310 311 // Sort the operand value numbers so x<y and y>x get the same value number. 312 if (e.varargs[0] > e.varargs[1]) { 313 std::swap(e.varargs[0], e.varargs[1]); 314 Predicate = CmpInst::getSwappedPredicate(Predicate); 315 } 316 e.opcode = (Opcode << 8) | Predicate; 317 e.commutative = true; 318 return e; 319 } 320 321 GVN::Expression GVN::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) { 322 assert(EI && "Not an ExtractValueInst?"); 323 Expression e; 324 e.type = EI->getType(); 325 e.opcode = 0; 326 327 IntrinsicInst *I = dyn_cast<IntrinsicInst>(EI->getAggregateOperand()); 328 if (I != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0 ) { 329 // EI might be an extract from one of our recognised intrinsics. If it 330 // is we'll synthesize a semantically equivalent expression instead on 331 // an extract value expression. 332 switch (I->getIntrinsicID()) { 333 case Intrinsic::sadd_with_overflow: 334 case Intrinsic::uadd_with_overflow: 335 e.opcode = Instruction::Add; 336 break; 337 case Intrinsic::ssub_with_overflow: 338 case Intrinsic::usub_with_overflow: 339 e.opcode = Instruction::Sub; 340 break; 341 case Intrinsic::smul_with_overflow: 342 case Intrinsic::umul_with_overflow: 343 e.opcode = Instruction::Mul; 344 break; 345 default: 346 break; 347 } 348 349 if (e.opcode != 0) { 350 // Intrinsic recognized. Grab its args to finish building the expression. 351 assert(I->getNumArgOperands() == 2 && 352 "Expect two args for recognised intrinsics."); 353 e.varargs.push_back(lookupOrAdd(I->getArgOperand(0))); 354 e.varargs.push_back(lookupOrAdd(I->getArgOperand(1))); 355 return e; 356 } 357 } 358 359 // Not a recognised intrinsic. Fall back to producing an extract value 360 // expression. 361 e.opcode = EI->getOpcode(); 362 for (Instruction::op_iterator OI = EI->op_begin(), OE = EI->op_end(); 363 OI != OE; ++OI) 364 e.varargs.push_back(lookupOrAdd(*OI)); 365 366 for (ExtractValueInst::idx_iterator II = EI->idx_begin(), IE = EI->idx_end(); 367 II != IE; ++II) 368 e.varargs.push_back(*II); 369 370 return e; 371 } 372 373 //===----------------------------------------------------------------------===// 374 // ValueTable External Functions 375 //===----------------------------------------------------------------------===// 376 377 GVN::ValueTable::ValueTable() = default; 378 GVN::ValueTable::ValueTable(const ValueTable &) = default; 379 GVN::ValueTable::ValueTable(ValueTable &&) = default; 380 GVN::ValueTable::~ValueTable() = default; 381 382 /// add - Insert a value into the table with a specified value number. 383 void GVN::ValueTable::add(Value *V, uint32_t num) { 384 valueNumbering.insert(std::make_pair(V, num)); 385 if (PHINode *PN = dyn_cast<PHINode>(V)) 386 NumberingPhi[num] = PN; 387 } 388 389 uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) { 390 if (AA->doesNotAccessMemory(C)) { 391 Expression exp = createExpr(C); 392 uint32_t e = assignExpNewValueNum(exp).first; 393 valueNumbering[C] = e; 394 return e; 395 } else if (AA->onlyReadsMemory(C)) { 396 Expression exp = createExpr(C); 397 auto ValNum = assignExpNewValueNum(exp); 398 if (ValNum.second) { 399 valueNumbering[C] = ValNum.first; 400 return ValNum.first; 401 } 402 if (!MD) { 403 uint32_t e = assignExpNewValueNum(exp).first; 404 valueNumbering[C] = e; 405 return e; 406 } 407 408 MemDepResult local_dep = MD->getDependency(C); 409 410 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 411 valueNumbering[C] = nextValueNumber; 412 return nextValueNumber++; 413 } 414 415 if (local_dep.isDef()) { 416 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 417 418 if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) { 419 valueNumbering[C] = nextValueNumber; 420 return nextValueNumber++; 421 } 422 423 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 424 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i)); 425 uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i)); 426 if (c_vn != cd_vn) { 427 valueNumbering[C] = nextValueNumber; 428 return nextValueNumber++; 429 } 430 } 431 432 uint32_t v = lookupOrAdd(local_cdep); 433 valueNumbering[C] = v; 434 return v; 435 } 436 437 // Non-local case. 438 const MemoryDependenceResults::NonLocalDepInfo &deps = 439 MD->getNonLocalCallDependency(CallSite(C)); 440 // FIXME: Move the checking logic to MemDep! 441 CallInst* cdep = nullptr; 442 443 // Check to see if we have a single dominating call instruction that is 444 // identical to C. 445 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 446 const NonLocalDepEntry *I = &deps[i]; 447 if (I->getResult().isNonLocal()) 448 continue; 449 450 // We don't handle non-definitions. If we already have a call, reject 451 // instruction dependencies. 452 if (!I->getResult().isDef() || cdep != nullptr) { 453 cdep = nullptr; 454 break; 455 } 456 457 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 458 // FIXME: All duplicated with non-local case. 459 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 460 cdep = NonLocalDepCall; 461 continue; 462 } 463 464 cdep = nullptr; 465 break; 466 } 467 468 if (!cdep) { 469 valueNumbering[C] = nextValueNumber; 470 return nextValueNumber++; 471 } 472 473 if (cdep->getNumArgOperands() != C->getNumArgOperands()) { 474 valueNumbering[C] = nextValueNumber; 475 return nextValueNumber++; 476 } 477 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 478 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i)); 479 uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i)); 480 if (c_vn != cd_vn) { 481 valueNumbering[C] = nextValueNumber; 482 return nextValueNumber++; 483 } 484 } 485 486 uint32_t v = lookupOrAdd(cdep); 487 valueNumbering[C] = v; 488 return v; 489 } else { 490 valueNumbering[C] = nextValueNumber; 491 return nextValueNumber++; 492 } 493 } 494 495 /// Returns true if a value number exists for the specified value. 496 bool GVN::ValueTable::exists(Value *V) const { return valueNumbering.count(V) != 0; } 497 498 /// lookup_or_add - Returns the value number for the specified value, assigning 499 /// it a new number if it did not have one before. 500 uint32_t GVN::ValueTable::lookupOrAdd(Value *V) { 501 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 502 if (VI != valueNumbering.end()) 503 return VI->second; 504 505 if (!isa<Instruction>(V)) { 506 valueNumbering[V] = nextValueNumber; 507 return nextValueNumber++; 508 } 509 510 Instruction* I = cast<Instruction>(V); 511 Expression exp; 512 switch (I->getOpcode()) { 513 case Instruction::Call: 514 return lookupOrAddCall(cast<CallInst>(I)); 515 case Instruction::Add: 516 case Instruction::FAdd: 517 case Instruction::Sub: 518 case Instruction::FSub: 519 case Instruction::Mul: 520 case Instruction::FMul: 521 case Instruction::UDiv: 522 case Instruction::SDiv: 523 case Instruction::FDiv: 524 case Instruction::URem: 525 case Instruction::SRem: 526 case Instruction::FRem: 527 case Instruction::Shl: 528 case Instruction::LShr: 529 case Instruction::AShr: 530 case Instruction::And: 531 case Instruction::Or: 532 case Instruction::Xor: 533 case Instruction::ICmp: 534 case Instruction::FCmp: 535 case Instruction::Trunc: 536 case Instruction::ZExt: 537 case Instruction::SExt: 538 case Instruction::FPToUI: 539 case Instruction::FPToSI: 540 case Instruction::UIToFP: 541 case Instruction::SIToFP: 542 case Instruction::FPTrunc: 543 case Instruction::FPExt: 544 case Instruction::PtrToInt: 545 case Instruction::IntToPtr: 546 case Instruction::BitCast: 547 case Instruction::Select: 548 case Instruction::ExtractElement: 549 case Instruction::InsertElement: 550 case Instruction::ShuffleVector: 551 case Instruction::InsertValue: 552 case Instruction::GetElementPtr: 553 exp = createExpr(I); 554 break; 555 case Instruction::ExtractValue: 556 exp = createExtractvalueExpr(cast<ExtractValueInst>(I)); 557 break; 558 case Instruction::PHI: 559 valueNumbering[V] = nextValueNumber; 560 NumberingPhi[nextValueNumber] = cast<PHINode>(V); 561 return nextValueNumber++; 562 default: 563 valueNumbering[V] = nextValueNumber; 564 return nextValueNumber++; 565 } 566 567 uint32_t e = assignExpNewValueNum(exp).first; 568 valueNumbering[V] = e; 569 return e; 570 } 571 572 /// Returns the value number of the specified value. Fails if 573 /// the value has not yet been numbered. 574 uint32_t GVN::ValueTable::lookup(Value *V, bool Verify) const { 575 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 576 if (Verify) { 577 assert(VI != valueNumbering.end() && "Value not numbered?"); 578 return VI->second; 579 } 580 return (VI != valueNumbering.end()) ? VI->second : 0; 581 } 582 583 /// Returns the value number of the given comparison, 584 /// assigning it a new number if it did not have one before. Useful when 585 /// we deduced the result of a comparison, but don't immediately have an 586 /// instruction realizing that comparison to hand. 587 uint32_t GVN::ValueTable::lookupOrAddCmp(unsigned Opcode, 588 CmpInst::Predicate Predicate, 589 Value *LHS, Value *RHS) { 590 Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS); 591 return assignExpNewValueNum(exp).first; 592 } 593 594 /// Remove all entries from the ValueTable. 595 void GVN::ValueTable::clear() { 596 valueNumbering.clear(); 597 expressionNumbering.clear(); 598 NumberingPhi.clear(); 599 PhiTranslateTable.clear(); 600 nextValueNumber = 1; 601 Expressions.clear(); 602 ExprIdx.clear(); 603 nextExprNumber = 0; 604 } 605 606 /// Remove a value from the value numbering. 607 void GVN::ValueTable::erase(Value *V) { 608 uint32_t Num = valueNumbering.lookup(V); 609 valueNumbering.erase(V); 610 // If V is PHINode, V <--> value number is an one-to-one mapping. 611 if (isa<PHINode>(V)) 612 NumberingPhi.erase(Num); 613 } 614 615 /// verifyRemoved - Verify that the value is removed from all internal data 616 /// structures. 617 void GVN::ValueTable::verifyRemoved(const Value *V) const { 618 for (DenseMap<Value*, uint32_t>::const_iterator 619 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 620 assert(I->first != V && "Inst still occurs in value numbering map!"); 621 } 622 } 623 624 //===----------------------------------------------------------------------===// 625 // GVN Pass 626 //===----------------------------------------------------------------------===// 627 628 PreservedAnalyses GVN::run(Function &F, FunctionAnalysisManager &AM) { 629 // FIXME: The order of evaluation of these 'getResult' calls is very 630 // significant! Re-ordering these variables will cause GVN when run alone to 631 // be less effective! We should fix memdep and basic-aa to not exhibit this 632 // behavior, but until then don't change the order here. 633 auto &AC = AM.getResult<AssumptionAnalysis>(F); 634 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 635 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 636 auto &AA = AM.getResult<AAManager>(F); 637 auto &MemDep = AM.getResult<MemoryDependenceAnalysis>(F); 638 auto *LI = AM.getCachedResult<LoopAnalysis>(F); 639 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 640 bool Changed = runImpl(F, AC, DT, TLI, AA, &MemDep, LI, &ORE); 641 if (!Changed) 642 return PreservedAnalyses::all(); 643 PreservedAnalyses PA; 644 PA.preserve<DominatorTreeAnalysis>(); 645 PA.preserve<GlobalsAA>(); 646 PA.preserve<TargetLibraryAnalysis>(); 647 return PA; 648 } 649 650 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 651 LLVM_DUMP_METHOD void GVN::dump(DenseMap<uint32_t, Value*>& d) const { 652 errs() << "{\n"; 653 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 654 E = d.end(); I != E; ++I) { 655 errs() << I->first << "\n"; 656 I->second->dump(); 657 } 658 errs() << "}\n"; 659 } 660 #endif 661 662 /// Return true if we can prove that the value 663 /// we're analyzing is fully available in the specified block. As we go, keep 664 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This 665 /// map is actually a tri-state map with the following values: 666 /// 0) we know the block *is not* fully available. 667 /// 1) we know the block *is* fully available. 668 /// 2) we do not know whether the block is fully available or not, but we are 669 /// currently speculating that it will be. 670 /// 3) we are speculating for this block and have used that to speculate for 671 /// other blocks. 672 static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 673 DenseMap<BasicBlock*, char> &FullyAvailableBlocks, 674 uint32_t RecurseDepth) { 675 if (RecurseDepth > MaxRecurseDepth) 676 return false; 677 678 // Optimistically assume that the block is fully available and check to see 679 // if we already know about this block in one lookup. 680 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 681 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 682 683 // If the entry already existed for this block, return the precomputed value. 684 if (!IV.second) { 685 // If this is a speculative "available" value, mark it as being used for 686 // speculation of other blocks. 687 if (IV.first->second == 2) 688 IV.first->second = 3; 689 return IV.first->second != 0; 690 } 691 692 // Otherwise, see if it is fully available in all predecessors. 693 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 694 695 // If this block has no predecessors, it isn't live-in here. 696 if (PI == PE) 697 goto SpeculationFailure; 698 699 for (; PI != PE; ++PI) 700 // If the value isn't fully available in one of our predecessors, then it 701 // isn't fully available in this block either. Undo our previous 702 // optimistic assumption and bail out. 703 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks,RecurseDepth+1)) 704 goto SpeculationFailure; 705 706 return true; 707 708 // If we get here, we found out that this is not, after 709 // all, a fully-available block. We have a problem if we speculated on this and 710 // used the speculation to mark other blocks as available. 711 SpeculationFailure: 712 char &BBVal = FullyAvailableBlocks[BB]; 713 714 // If we didn't speculate on this, just return with it set to false. 715 if (BBVal == 2) { 716 BBVal = 0; 717 return false; 718 } 719 720 // If we did speculate on this value, we could have blocks set to 1 that are 721 // incorrect. Walk the (transitive) successors of this block and mark them as 722 // 0 if set to one. 723 SmallVector<BasicBlock*, 32> BBWorklist; 724 BBWorklist.push_back(BB); 725 726 do { 727 BasicBlock *Entry = BBWorklist.pop_back_val(); 728 // Note that this sets blocks to 0 (unavailable) if they happen to not 729 // already be in FullyAvailableBlocks. This is safe. 730 char &EntryVal = FullyAvailableBlocks[Entry]; 731 if (EntryVal == 0) continue; // Already unavailable. 732 733 // Mark as unavailable. 734 EntryVal = 0; 735 736 BBWorklist.append(succ_begin(Entry), succ_end(Entry)); 737 } while (!BBWorklist.empty()); 738 739 return false; 740 } 741 742 /// Given a set of loads specified by ValuesPerBlock, 743 /// construct SSA form, allowing us to eliminate LI. This returns the value 744 /// that should be used at LI's definition site. 745 static Value *ConstructSSAForLoadSet(LoadInst *LI, 746 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 747 GVN &gvn) { 748 // Check for the fully redundant, dominating load case. In this case, we can 749 // just use the dominating value directly. 750 if (ValuesPerBlock.size() == 1 && 751 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB, 752 LI->getParent())) { 753 assert(!ValuesPerBlock[0].AV.isUndefValue() && 754 "Dead BB dominate this block"); 755 return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn); 756 } 757 758 // Otherwise, we have to construct SSA form. 759 SmallVector<PHINode*, 8> NewPHIs; 760 SSAUpdater SSAUpdate(&NewPHIs); 761 SSAUpdate.Initialize(LI->getType(), LI->getName()); 762 763 for (const AvailableValueInBlock &AV : ValuesPerBlock) { 764 BasicBlock *BB = AV.BB; 765 766 if (SSAUpdate.HasValueForBlock(BB)) 767 continue; 768 769 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LI, gvn)); 770 } 771 772 // Perform PHI construction. 773 return SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 774 } 775 776 Value *AvailableValue::MaterializeAdjustedValue(LoadInst *LI, 777 Instruction *InsertPt, 778 GVN &gvn) const { 779 Value *Res; 780 Type *LoadTy = LI->getType(); 781 const DataLayout &DL = LI->getModule()->getDataLayout(); 782 if (isSimpleValue()) { 783 Res = getSimpleValue(); 784 if (Res->getType() != LoadTy) { 785 Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL); 786 787 DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " 788 << *getSimpleValue() << '\n' 789 << *Res << '\n' << "\n\n\n"); 790 } 791 } else if (isCoercedLoadValue()) { 792 LoadInst *Load = getCoercedLoadValue(); 793 if (Load->getType() == LoadTy && Offset == 0) { 794 Res = Load; 795 } else { 796 Res = getLoadValueForLoad(Load, Offset, LoadTy, InsertPt, DL); 797 // We would like to use gvn.markInstructionForDeletion here, but we can't 798 // because the load is already memoized into the leader map table that GVN 799 // tracks. It is potentially possible to remove the load from the table, 800 // but then there all of the operations based on it would need to be 801 // rehashed. Just leave the dead load around. 802 gvn.getMemDep().removeInstruction(Load); 803 DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset << " " 804 << *getCoercedLoadValue() << '\n' 805 << *Res << '\n' 806 << "\n\n\n"); 807 } 808 } else if (isMemIntrinValue()) { 809 Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy, 810 InsertPt, DL); 811 DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 812 << " " << *getMemIntrinValue() << '\n' 813 << *Res << '\n' << "\n\n\n"); 814 } else { 815 assert(isUndefValue() && "Should be UndefVal"); 816 DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";); 817 return UndefValue::get(LoadTy); 818 } 819 assert(Res && "failed to materialize?"); 820 return Res; 821 } 822 823 static bool isLifetimeStart(const Instruction *Inst) { 824 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 825 return II->getIntrinsicID() == Intrinsic::lifetime_start; 826 return false; 827 } 828 829 /// Try to locate the three instruction involved in a missed 830 /// load-elimination case that is due to an intervening store. 831 static void reportMayClobberedLoad(LoadInst *LI, MemDepResult DepInfo, 832 DominatorTree *DT, 833 OptimizationRemarkEmitter *ORE) { 834 using namespace ore; 835 836 User *OtherAccess = nullptr; 837 838 OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", LI); 839 R << "load of type " << NV("Type", LI->getType()) << " not eliminated" 840 << setExtraArgs(); 841 842 for (auto *U : LI->getPointerOperand()->users()) 843 if (U != LI && (isa<LoadInst>(U) || isa<StoreInst>(U)) && 844 DT->dominates(cast<Instruction>(U), LI)) { 845 // FIXME: for now give up if there are multiple memory accesses that 846 // dominate the load. We need further analysis to decide which one is 847 // that we're forwarding from. 848 if (OtherAccess) 849 OtherAccess = nullptr; 850 else 851 OtherAccess = U; 852 } 853 854 if (OtherAccess) 855 R << " in favor of " << NV("OtherAccess", OtherAccess); 856 857 R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst()); 858 859 ORE->emit(R); 860 } 861 862 bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo, 863 Value *Address, AvailableValue &Res) { 864 assert((DepInfo.isDef() || DepInfo.isClobber()) && 865 "expected a local dependence"); 866 assert(LI->isUnordered() && "rules below are incorrect for ordered access"); 867 868 const DataLayout &DL = LI->getModule()->getDataLayout(); 869 870 if (DepInfo.isClobber()) { 871 // If the dependence is to a store that writes to a superset of the bits 872 // read by the load, we can extract the bits we need for the load from the 873 // stored value. 874 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 875 // Can't forward from non-atomic to atomic without violating memory model. 876 if (Address && LI->isAtomic() <= DepSI->isAtomic()) { 877 int Offset = 878 analyzeLoadFromClobberingStore(LI->getType(), Address, DepSI, DL); 879 if (Offset != -1) { 880 Res = AvailableValue::get(DepSI->getValueOperand(), Offset); 881 return true; 882 } 883 } 884 } 885 886 // Check to see if we have something like this: 887 // load i32* P 888 // load i8* (P+1) 889 // if we have this, replace the later with an extraction from the former. 890 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) { 891 // If this is a clobber and L is the first instruction in its block, then 892 // we have the first instruction in the entry block. 893 // Can't forward from non-atomic to atomic without violating memory model. 894 if (DepLI != LI && Address && LI->isAtomic() <= DepLI->isAtomic()) { 895 int Offset = 896 analyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL); 897 898 if (Offset != -1) { 899 Res = AvailableValue::getLoad(DepLI, Offset); 900 return true; 901 } 902 } 903 } 904 905 // If the clobbering value is a memset/memcpy/memmove, see if we can 906 // forward a value on from it. 907 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { 908 if (Address && !LI->isAtomic()) { 909 int Offset = analyzeLoadFromClobberingMemInst(LI->getType(), Address, 910 DepMI, DL); 911 if (Offset != -1) { 912 Res = AvailableValue::getMI(DepMI, Offset); 913 return true; 914 } 915 } 916 } 917 // Nothing known about this clobber, have to be conservative 918 DEBUG( 919 // fast print dep, using operator<< on instruction is too slow. 920 dbgs() << "GVN: load "; 921 LI->printAsOperand(dbgs()); 922 Instruction *I = DepInfo.getInst(); 923 dbgs() << " is clobbered by " << *I << '\n'; 924 ); 925 if (ORE->allowExtraAnalysis(DEBUG_TYPE)) 926 reportMayClobberedLoad(LI, DepInfo, DT, ORE); 927 928 return false; 929 } 930 assert(DepInfo.isDef() && "follows from above"); 931 932 Instruction *DepInst = DepInfo.getInst(); 933 934 // Loading the allocation -> undef. 935 if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) || 936 // Loading immediately after lifetime begin -> undef. 937 isLifetimeStart(DepInst)) { 938 Res = AvailableValue::get(UndefValue::get(LI->getType())); 939 return true; 940 } 941 942 // Loading from calloc (which zero initializes memory) -> zero 943 if (isCallocLikeFn(DepInst, TLI)) { 944 Res = AvailableValue::get(Constant::getNullValue(LI->getType())); 945 return true; 946 } 947 948 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 949 // Reject loads and stores that are to the same address but are of 950 // different types if we have to. If the stored value is larger or equal to 951 // the loaded value, we can reuse it. 952 if (S->getValueOperand()->getType() != LI->getType() && 953 !canCoerceMustAliasedValueToLoad(S->getValueOperand(), 954 LI->getType(), DL)) 955 return false; 956 957 // Can't forward from non-atomic to atomic without violating memory model. 958 if (S->isAtomic() < LI->isAtomic()) 959 return false; 960 961 Res = AvailableValue::get(S->getValueOperand()); 962 return true; 963 } 964 965 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 966 // If the types mismatch and we can't handle it, reject reuse of the load. 967 // If the stored value is larger or equal to the loaded value, we can reuse 968 // it. 969 if (LD->getType() != LI->getType() && 970 !canCoerceMustAliasedValueToLoad(LD, LI->getType(), DL)) 971 return false; 972 973 // Can't forward from non-atomic to atomic without violating memory model. 974 if (LD->isAtomic() < LI->isAtomic()) 975 return false; 976 977 Res = AvailableValue::getLoad(LD); 978 return true; 979 } 980 981 // Unknown def - must be conservative 982 DEBUG( 983 // fast print dep, using operator<< on instruction is too slow. 984 dbgs() << "GVN: load "; 985 LI->printAsOperand(dbgs()); 986 dbgs() << " has unknown def " << *DepInst << '\n'; 987 ); 988 return false; 989 } 990 991 void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, 992 AvailValInBlkVect &ValuesPerBlock, 993 UnavailBlkVect &UnavailableBlocks) { 994 // Filter out useless results (non-locals, etc). Keep track of the blocks 995 // where we have a value available in repl, also keep track of whether we see 996 // dependencies that produce an unknown value for the load (such as a call 997 // that could potentially clobber the load). 998 unsigned NumDeps = Deps.size(); 999 for (unsigned i = 0, e = NumDeps; i != e; ++i) { 1000 BasicBlock *DepBB = Deps[i].getBB(); 1001 MemDepResult DepInfo = Deps[i].getResult(); 1002 1003 if (DeadBlocks.count(DepBB)) { 1004 // Dead dependent mem-op disguise as a load evaluating the same value 1005 // as the load in question. 1006 ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB)); 1007 continue; 1008 } 1009 1010 if (!DepInfo.isDef() && !DepInfo.isClobber()) { 1011 UnavailableBlocks.push_back(DepBB); 1012 continue; 1013 } 1014 1015 // The address being loaded in this non-local block may not be the same as 1016 // the pointer operand of the load if PHI translation occurs. Make sure 1017 // to consider the right address. 1018 Value *Address = Deps[i].getAddress(); 1019 1020 AvailableValue AV; 1021 if (AnalyzeLoadAvailability(LI, DepInfo, Address, AV)) { 1022 // subtlety: because we know this was a non-local dependency, we know 1023 // it's safe to materialize anywhere between the instruction within 1024 // DepInfo and the end of it's block. 1025 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1026 std::move(AV))); 1027 } else { 1028 UnavailableBlocks.push_back(DepBB); 1029 } 1030 } 1031 1032 assert(NumDeps == ValuesPerBlock.size() + UnavailableBlocks.size() && 1033 "post condition violation"); 1034 } 1035 1036 bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, 1037 UnavailBlkVect &UnavailableBlocks) { 1038 // Okay, we have *some* definitions of the value. This means that the value 1039 // is available in some of our (transitive) predecessors. Lets think about 1040 // doing PRE of this load. This will involve inserting a new load into the 1041 // predecessor when it's not available. We could do this in general, but 1042 // prefer to not increase code size. As such, we only do this when we know 1043 // that we only have to insert *one* load (which means we're basically moving 1044 // the load, not inserting a new one). 1045 1046 SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(), 1047 UnavailableBlocks.end()); 1048 1049 // Let's find the first basic block with more than one predecessor. Walk 1050 // backwards through predecessors if needed. 1051 BasicBlock *LoadBB = LI->getParent(); 1052 BasicBlock *TmpBB = LoadBB; 1053 bool IsSafeToSpeculativelyExecute = isSafeToSpeculativelyExecute(LI); 1054 1055 // Check that there is no implicit control flow instructions above our load in 1056 // its block. If there is an instruction that doesn't always pass the 1057 // execution to the following instruction, then moving through it may become 1058 // invalid. For example: 1059 // 1060 // int arr[LEN]; 1061 // int index = ???; 1062 // ... 1063 // guard(0 <= index && index < LEN); 1064 // use(arr[index]); 1065 // 1066 // It is illegal to move the array access to any point above the guard, 1067 // because if the index is out of bounds we should deoptimize rather than 1068 // access the array. 1069 // Check that there is no guard in this block above our intruction. 1070 if (!IsSafeToSpeculativelyExecute) { 1071 auto It = FirstImplicitControlFlowInsts.find(TmpBB); 1072 if (It != FirstImplicitControlFlowInsts.end()) { 1073 assert(It->second->getParent() == TmpBB && 1074 "Implicit control flow map broken?"); 1075 if (OI->dominates(It->second, LI)) 1076 return false; 1077 } 1078 } 1079 while (TmpBB->getSinglePredecessor()) { 1080 TmpBB = TmpBB->getSinglePredecessor(); 1081 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1082 return false; 1083 if (Blockers.count(TmpBB)) 1084 return false; 1085 1086 // If any of these blocks has more than one successor (i.e. if the edge we 1087 // just traversed was critical), then there are other paths through this 1088 // block along which the load may not be anticipated. Hoisting the load 1089 // above this block would be adding the load to execution paths along 1090 // which it was not previously executed. 1091 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1092 return false; 1093 1094 // Check that there is no implicit control flow in a block above. 1095 if (!IsSafeToSpeculativelyExecute && 1096 FirstImplicitControlFlowInsts.count(TmpBB)) 1097 return false; 1098 } 1099 1100 assert(TmpBB); 1101 LoadBB = TmpBB; 1102 1103 // Check to see how many predecessors have the loaded value fully 1104 // available. 1105 MapVector<BasicBlock *, Value *> PredLoads; 1106 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1107 for (const AvailableValueInBlock &AV : ValuesPerBlock) 1108 FullyAvailableBlocks[AV.BB] = true; 1109 for (BasicBlock *UnavailableBB : UnavailableBlocks) 1110 FullyAvailableBlocks[UnavailableBB] = false; 1111 1112 SmallVector<BasicBlock *, 4> CriticalEdgePred; 1113 for (BasicBlock *Pred : predecessors(LoadBB)) { 1114 // If any predecessor block is an EH pad that does not allow non-PHI 1115 // instructions before the terminator, we can't PRE the load. 1116 if (Pred->getTerminator()->isEHPad()) { 1117 DEBUG(dbgs() 1118 << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '" 1119 << Pred->getName() << "': " << *LI << '\n'); 1120 return false; 1121 } 1122 1123 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks, 0)) { 1124 continue; 1125 } 1126 1127 if (Pred->getTerminator()->getNumSuccessors() != 1) { 1128 if (isa<IndirectBrInst>(Pred->getTerminator())) { 1129 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" 1130 << Pred->getName() << "': " << *LI << '\n'); 1131 return false; 1132 } 1133 1134 if (LoadBB->isEHPad()) { 1135 DEBUG(dbgs() 1136 << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '" 1137 << Pred->getName() << "': " << *LI << '\n'); 1138 return false; 1139 } 1140 1141 CriticalEdgePred.push_back(Pred); 1142 } else { 1143 // Only add the predecessors that will not be split for now. 1144 PredLoads[Pred] = nullptr; 1145 } 1146 } 1147 1148 // Decide whether PRE is profitable for this load. 1149 unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size(); 1150 assert(NumUnavailablePreds != 0 && 1151 "Fully available value should already be eliminated!"); 1152 1153 // If this load is unavailable in multiple predecessors, reject it. 1154 // FIXME: If we could restructure the CFG, we could make a common pred with 1155 // all the preds that don't have an available LI and insert a new load into 1156 // that one block. 1157 if (NumUnavailablePreds != 1) 1158 return false; 1159 1160 // Split critical edges, and update the unavailable predecessors accordingly. 1161 for (BasicBlock *OrigPred : CriticalEdgePred) { 1162 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB); 1163 assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!"); 1164 PredLoads[NewPred] = nullptr; 1165 DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->" 1166 << LoadBB->getName() << '\n'); 1167 } 1168 1169 // Check if the load can safely be moved to all the unavailable predecessors. 1170 bool CanDoPRE = true; 1171 const DataLayout &DL = LI->getModule()->getDataLayout(); 1172 SmallVector<Instruction*, 8> NewInsts; 1173 for (auto &PredLoad : PredLoads) { 1174 BasicBlock *UnavailablePred = PredLoad.first; 1175 1176 // Do PHI translation to get its value in the predecessor if necessary. The 1177 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1178 1179 // If all preds have a single successor, then we know it is safe to insert 1180 // the load on the pred (?!?), so we can insert code to materialize the 1181 // pointer if it is not available. 1182 PHITransAddr Address(LI->getPointerOperand(), DL, AC); 1183 Value *LoadPtr = nullptr; 1184 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1185 *DT, NewInsts); 1186 1187 // If we couldn't find or insert a computation of this phi translated value, 1188 // we fail PRE. 1189 if (!LoadPtr) { 1190 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1191 << *LI->getPointerOperand() << "\n"); 1192 CanDoPRE = false; 1193 break; 1194 } 1195 1196 PredLoad.second = LoadPtr; 1197 } 1198 1199 if (!CanDoPRE) { 1200 while (!NewInsts.empty()) { 1201 Instruction *I = NewInsts.pop_back_val(); 1202 markInstructionForDeletion(I); 1203 } 1204 // HINT: Don't revert the edge-splitting as following transformation may 1205 // also need to split these critical edges. 1206 return !CriticalEdgePred.empty(); 1207 } 1208 1209 // Okay, we can eliminate this load by inserting a reload in the predecessor 1210 // and using PHI construction to get the value in the other predecessors, do 1211 // it. 1212 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1213 DEBUG(if (!NewInsts.empty()) 1214 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: " 1215 << *NewInsts.back() << '\n'); 1216 1217 // Assign value numbers to the new instructions. 1218 for (Instruction *I : NewInsts) { 1219 // Instructions that have been inserted in predecessor(s) to materialize 1220 // the load address do not retain their original debug locations. Doing 1221 // so could lead to confusing (but correct) source attributions. 1222 // FIXME: How do we retain source locations without causing poor debugging 1223 // behavior? 1224 I->setDebugLoc(DebugLoc()); 1225 1226 // FIXME: We really _ought_ to insert these value numbers into their 1227 // parent's availability map. However, in doing so, we risk getting into 1228 // ordering issues. If a block hasn't been processed yet, we would be 1229 // marking a value as AVAIL-IN, which isn't what we intend. 1230 VN.lookupOrAdd(I); 1231 } 1232 1233 for (const auto &PredLoad : PredLoads) { 1234 BasicBlock *UnavailablePred = PredLoad.first; 1235 Value *LoadPtr = PredLoad.second; 1236 1237 auto *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", 1238 LI->isVolatile(), LI->getAlignment(), 1239 LI->getOrdering(), LI->getSyncScopeID(), 1240 UnavailablePred->getTerminator()); 1241 NewLoad->setDebugLoc(LI->getDebugLoc()); 1242 1243 // Transfer the old load's AA tags to the new load. 1244 AAMDNodes Tags; 1245 LI->getAAMetadata(Tags); 1246 if (Tags) 1247 NewLoad->setAAMetadata(Tags); 1248 1249 if (auto *MD = LI->getMetadata(LLVMContext::MD_invariant_load)) 1250 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD); 1251 if (auto *InvGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group)) 1252 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD); 1253 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) 1254 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD); 1255 1256 // We do not propagate the old load's debug location, because the new 1257 // load now lives in a different BB, and we want to avoid a jumpy line 1258 // table. 1259 // FIXME: How do we retain source locations without causing poor debugging 1260 // behavior? 1261 1262 // Add the newly created load. 1263 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred, 1264 NewLoad)); 1265 MD->invalidateCachedPointerInfo(LoadPtr); 1266 DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); 1267 } 1268 1269 // Perform PHI construction. 1270 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); 1271 LI->replaceAllUsesWith(V); 1272 if (isa<PHINode>(V)) 1273 V->takeName(LI); 1274 if (Instruction *I = dyn_cast<Instruction>(V)) 1275 I->setDebugLoc(LI->getDebugLoc()); 1276 if (V->getType()->isPtrOrPtrVectorTy()) 1277 MD->invalidateCachedPointerInfo(V); 1278 markInstructionForDeletion(LI); 1279 ORE->emit([&]() { 1280 return OptimizationRemark(DEBUG_TYPE, "LoadPRE", LI) 1281 << "load eliminated by PRE"; 1282 }); 1283 ++NumPRELoad; 1284 return true; 1285 } 1286 1287 static void reportLoadElim(LoadInst *LI, Value *AvailableValue, 1288 OptimizationRemarkEmitter *ORE) { 1289 using namespace ore; 1290 1291 ORE->emit([&]() { 1292 return OptimizationRemark(DEBUG_TYPE, "LoadElim", LI) 1293 << "load of type " << NV("Type", LI->getType()) << " eliminated" 1294 << setExtraArgs() << " in favor of " 1295 << NV("InfavorOfValue", AvailableValue); 1296 }); 1297 } 1298 1299 /// Attempt to eliminate a load whose dependencies are 1300 /// non-local by performing PHI construction. 1301 bool GVN::processNonLocalLoad(LoadInst *LI) { 1302 // non-local speculations are not allowed under asan. 1303 if (LI->getParent()->getParent()->hasFnAttribute( 1304 Attribute::SanitizeAddress) || 1305 LI->getParent()->getParent()->hasFnAttribute( 1306 Attribute::SanitizeHWAddress)) 1307 return false; 1308 1309 // Step 1: Find the non-local dependencies of the load. 1310 LoadDepVect Deps; 1311 MD->getNonLocalPointerDependency(LI, Deps); 1312 1313 // If we had to process more than one hundred blocks to find the 1314 // dependencies, this load isn't worth worrying about. Optimizing 1315 // it will be too expensive. 1316 unsigned NumDeps = Deps.size(); 1317 if (NumDeps > 100) 1318 return false; 1319 1320 // If we had a phi translation failure, we'll have a single entry which is a 1321 // clobber in the current block. Reject this early. 1322 if (NumDeps == 1 && 1323 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) { 1324 DEBUG( 1325 dbgs() << "GVN: non-local load "; 1326 LI->printAsOperand(dbgs()); 1327 dbgs() << " has unknown dependencies\n"; 1328 ); 1329 return false; 1330 } 1331 1332 // If this load follows a GEP, see if we can PRE the indices before analyzing. 1333 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0))) { 1334 for (GetElementPtrInst::op_iterator OI = GEP->idx_begin(), 1335 OE = GEP->idx_end(); 1336 OI != OE; ++OI) 1337 if (Instruction *I = dyn_cast<Instruction>(OI->get())) 1338 performScalarPRE(I); 1339 } 1340 1341 // Step 2: Analyze the availability of the load 1342 AvailValInBlkVect ValuesPerBlock; 1343 UnavailBlkVect UnavailableBlocks; 1344 AnalyzeLoadAvailability(LI, Deps, ValuesPerBlock, UnavailableBlocks); 1345 1346 // If we have no predecessors that produce a known value for this load, exit 1347 // early. 1348 if (ValuesPerBlock.empty()) 1349 return false; 1350 1351 // Step 3: Eliminate fully redundancy. 1352 // 1353 // If all of the instructions we depend on produce a known value for this 1354 // load, then it is fully redundant and we can use PHI insertion to compute 1355 // its value. Insert PHIs and remove the fully redundant value now. 1356 if (UnavailableBlocks.empty()) { 1357 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1358 1359 // Perform PHI construction. 1360 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); 1361 LI->replaceAllUsesWith(V); 1362 1363 if (isa<PHINode>(V)) 1364 V->takeName(LI); 1365 if (Instruction *I = dyn_cast<Instruction>(V)) 1366 // If instruction I has debug info, then we should not update it. 1367 // Also, if I has a null DebugLoc, then it is still potentially incorrect 1368 // to propagate LI's DebugLoc because LI may not post-dominate I. 1369 if (LI->getDebugLoc() && LI->getParent() == I->getParent()) 1370 I->setDebugLoc(LI->getDebugLoc()); 1371 if (V->getType()->isPtrOrPtrVectorTy()) 1372 MD->invalidateCachedPointerInfo(V); 1373 markInstructionForDeletion(LI); 1374 ++NumGVNLoad; 1375 reportLoadElim(LI, V, ORE); 1376 return true; 1377 } 1378 1379 // Step 4: Eliminate partial redundancy. 1380 if (!EnablePRE || !EnableLoadPRE) 1381 return false; 1382 1383 return PerformLoadPRE(LI, ValuesPerBlock, UnavailableBlocks); 1384 } 1385 1386 bool GVN::processAssumeIntrinsic(IntrinsicInst *IntrinsicI) { 1387 assert(IntrinsicI->getIntrinsicID() == Intrinsic::assume && 1388 "This function can only be called with llvm.assume intrinsic"); 1389 Value *V = IntrinsicI->getArgOperand(0); 1390 1391 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) { 1392 if (Cond->isZero()) { 1393 Type *Int8Ty = Type::getInt8Ty(V->getContext()); 1394 // Insert a new store to null instruction before the load to indicate that 1395 // this code is not reachable. FIXME: We could insert unreachable 1396 // instruction directly because we can modify the CFG. 1397 new StoreInst(UndefValue::get(Int8Ty), 1398 Constant::getNullValue(Int8Ty->getPointerTo()), 1399 IntrinsicI); 1400 } 1401 markInstructionForDeletion(IntrinsicI); 1402 return false; 1403 } else if (isa<Constant>(V)) { 1404 // If it's not false, and constant, it must evaluate to true. This means our 1405 // assume is assume(true), and thus, pointless, and we don't want to do 1406 // anything more here. 1407 return false; 1408 } 1409 1410 Constant *True = ConstantInt::getTrue(V->getContext()); 1411 bool Changed = false; 1412 1413 for (BasicBlock *Successor : successors(IntrinsicI->getParent())) { 1414 BasicBlockEdge Edge(IntrinsicI->getParent(), Successor); 1415 1416 // This property is only true in dominated successors, propagateEquality 1417 // will check dominance for us. 1418 Changed |= propagateEquality(V, True, Edge, false); 1419 } 1420 1421 // We can replace assume value with true, which covers cases like this: 1422 // call void @llvm.assume(i1 %cmp) 1423 // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true 1424 ReplaceWithConstMap[V] = True; 1425 1426 // If one of *cmp *eq operand is const, adding it to map will cover this: 1427 // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen 1428 // call void @llvm.assume(i1 %cmp) 1429 // ret float %0 ; will change it to ret float 3.000000e+00 1430 if (auto *CmpI = dyn_cast<CmpInst>(V)) { 1431 if (CmpI->getPredicate() == CmpInst::Predicate::ICMP_EQ || 1432 CmpI->getPredicate() == CmpInst::Predicate::FCMP_OEQ || 1433 (CmpI->getPredicate() == CmpInst::Predicate::FCMP_UEQ && 1434 CmpI->getFastMathFlags().noNaNs())) { 1435 Value *CmpLHS = CmpI->getOperand(0); 1436 Value *CmpRHS = CmpI->getOperand(1); 1437 if (isa<Constant>(CmpLHS)) 1438 std::swap(CmpLHS, CmpRHS); 1439 auto *RHSConst = dyn_cast<Constant>(CmpRHS); 1440 1441 // If only one operand is constant. 1442 if (RHSConst != nullptr && !isa<Constant>(CmpLHS)) 1443 ReplaceWithConstMap[CmpLHS] = RHSConst; 1444 } 1445 } 1446 return Changed; 1447 } 1448 1449 static void patchReplacementInstruction(Instruction *I, Value *Repl) { 1450 auto *ReplInst = dyn_cast<Instruction>(Repl); 1451 if (!ReplInst) 1452 return; 1453 1454 // Patch the replacement so that it is not more restrictive than the value 1455 // being replaced. 1456 // Note that if 'I' is a load being replaced by some operation, 1457 // for example, by an arithmetic operation, then andIRFlags() 1458 // would just erase all math flags from the original arithmetic 1459 // operation, which is clearly not wanted and not needed. 1460 if (!isa<LoadInst>(I)) 1461 ReplInst->andIRFlags(I); 1462 1463 // FIXME: If both the original and replacement value are part of the 1464 // same control-flow region (meaning that the execution of one 1465 // guarantees the execution of the other), then we can combine the 1466 // noalias scopes here and do better than the general conservative 1467 // answer used in combineMetadata(). 1468 1469 // In general, GVN unifies expressions over different control-flow 1470 // regions, and so we need a conservative combination of the noalias 1471 // scopes. 1472 static const unsigned KnownIDs[] = { 1473 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 1474 LLVMContext::MD_noalias, LLVMContext::MD_range, 1475 LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load, 1476 LLVMContext::MD_invariant_group}; 1477 combineMetadata(ReplInst, I, KnownIDs); 1478 } 1479 1480 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) { 1481 patchReplacementInstruction(I, Repl); 1482 I->replaceAllUsesWith(Repl); 1483 } 1484 1485 /// Attempt to eliminate a load, first by eliminating it 1486 /// locally, and then attempting non-local elimination if that fails. 1487 bool GVN::processLoad(LoadInst *L) { 1488 if (!MD) 1489 return false; 1490 1491 // This code hasn't been audited for ordered or volatile memory access 1492 if (!L->isUnordered()) 1493 return false; 1494 1495 if (L->use_empty()) { 1496 markInstructionForDeletion(L); 1497 return true; 1498 } 1499 1500 // ... to a pointer that has been loaded from before... 1501 MemDepResult Dep = MD->getDependency(L); 1502 1503 // If it is defined in another block, try harder. 1504 if (Dep.isNonLocal()) 1505 return processNonLocalLoad(L); 1506 1507 // Only handle the local case below 1508 if (!Dep.isDef() && !Dep.isClobber()) { 1509 // This might be a NonFuncLocal or an Unknown 1510 DEBUG( 1511 // fast print dep, using operator<< on instruction is too slow. 1512 dbgs() << "GVN: load "; 1513 L->printAsOperand(dbgs()); 1514 dbgs() << " has unknown dependence\n"; 1515 ); 1516 return false; 1517 } 1518 1519 AvailableValue AV; 1520 if (AnalyzeLoadAvailability(L, Dep, L->getPointerOperand(), AV)) { 1521 Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this); 1522 1523 // Replace the load! 1524 patchAndReplaceAllUsesWith(L, AvailableValue); 1525 markInstructionForDeletion(L); 1526 ++NumGVNLoad; 1527 reportLoadElim(L, AvailableValue, ORE); 1528 // Tell MDA to rexamine the reused pointer since we might have more 1529 // information after forwarding it. 1530 if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy()) 1531 MD->invalidateCachedPointerInfo(AvailableValue); 1532 return true; 1533 } 1534 1535 return false; 1536 } 1537 1538 /// Return a pair the first field showing the value number of \p Exp and the 1539 /// second field showing whether it is a value number newly created. 1540 std::pair<uint32_t, bool> 1541 GVN::ValueTable::assignExpNewValueNum(Expression &Exp) { 1542 uint32_t &e = expressionNumbering[Exp]; 1543 bool CreateNewValNum = !e; 1544 if (CreateNewValNum) { 1545 Expressions.push_back(Exp); 1546 if (ExprIdx.size() < nextValueNumber + 1) 1547 ExprIdx.resize(nextValueNumber * 2); 1548 e = nextValueNumber; 1549 ExprIdx[nextValueNumber++] = nextExprNumber++; 1550 } 1551 return {e, CreateNewValNum}; 1552 } 1553 1554 /// Return whether all the values related with the same \p num are 1555 /// defined in \p BB. 1556 bool GVN::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB, 1557 GVN &Gvn) { 1558 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num]; 1559 while (Vals && Vals->BB == BB) 1560 Vals = Vals->Next; 1561 return !Vals; 1562 } 1563 1564 /// Wrap phiTranslateImpl to provide caching functionality. 1565 uint32_t GVN::ValueTable::phiTranslate(const BasicBlock *Pred, 1566 const BasicBlock *PhiBlock, uint32_t Num, 1567 GVN &Gvn) { 1568 auto FindRes = PhiTranslateTable.find({Num, Pred}); 1569 if (FindRes != PhiTranslateTable.end()) 1570 return FindRes->second; 1571 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn); 1572 PhiTranslateTable.insert({{Num, Pred}, NewNum}); 1573 return NewNum; 1574 } 1575 1576 /// Translate value number \p Num using phis, so that it has the values of 1577 /// the phis in BB. 1578 uint32_t GVN::ValueTable::phiTranslateImpl(const BasicBlock *Pred, 1579 const BasicBlock *PhiBlock, 1580 uint32_t Num, GVN &Gvn) { 1581 if (PHINode *PN = NumberingPhi[Num]) { 1582 for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) { 1583 if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred) 1584 if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false)) 1585 return TransVal; 1586 } 1587 return Num; 1588 } 1589 1590 // If there is any value related with Num is defined in a BB other than 1591 // PhiBlock, it cannot depend on a phi in PhiBlock without going through 1592 // a backedge. We can do an early exit in that case to save compile time. 1593 if (!areAllValsInBB(Num, PhiBlock, Gvn)) 1594 return Num; 1595 1596 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0) 1597 return Num; 1598 Expression Exp = Expressions[ExprIdx[Num]]; 1599 1600 for (unsigned i = 0; i < Exp.varargs.size(); i++) { 1601 // For InsertValue and ExtractValue, some varargs are index numbers 1602 // instead of value numbers. Those index numbers should not be 1603 // translated. 1604 if ((i > 1 && Exp.opcode == Instruction::InsertValue) || 1605 (i > 0 && Exp.opcode == Instruction::ExtractValue)) 1606 continue; 1607 Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn); 1608 } 1609 1610 if (Exp.commutative) { 1611 assert(Exp.varargs.size() == 2 && "Unsupported commutative expression!"); 1612 if (Exp.varargs[0] > Exp.varargs[1]) { 1613 std::swap(Exp.varargs[0], Exp.varargs[1]); 1614 uint32_t Opcode = Exp.opcode >> 8; 1615 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) 1616 Exp.opcode = (Opcode << 8) | 1617 CmpInst::getSwappedPredicate( 1618 static_cast<CmpInst::Predicate>(Exp.opcode & 255)); 1619 } 1620 } 1621 1622 if (uint32_t NewNum = expressionNumbering[Exp]) 1623 return NewNum; 1624 return Num; 1625 } 1626 1627 /// Erase stale entry from phiTranslate cache so phiTranslate can be computed 1628 /// again. 1629 void GVN::ValueTable::eraseTranslateCacheEntry(uint32_t Num, 1630 const BasicBlock &CurrBlock) { 1631 for (const BasicBlock *Pred : predecessors(&CurrBlock)) { 1632 auto FindRes = PhiTranslateTable.find({Num, Pred}); 1633 if (FindRes != PhiTranslateTable.end()) 1634 PhiTranslateTable.erase(FindRes); 1635 } 1636 } 1637 1638 // In order to find a leader for a given value number at a 1639 // specific basic block, we first obtain the list of all Values for that number, 1640 // and then scan the list to find one whose block dominates the block in 1641 // question. This is fast because dominator tree queries consist of only 1642 // a few comparisons of DFS numbers. 1643 Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) { 1644 LeaderTableEntry Vals = LeaderTable[num]; 1645 if (!Vals.Val) return nullptr; 1646 1647 Value *Val = nullptr; 1648 if (DT->dominates(Vals.BB, BB)) { 1649 Val = Vals.Val; 1650 if (isa<Constant>(Val)) return Val; 1651 } 1652 1653 LeaderTableEntry* Next = Vals.Next; 1654 while (Next) { 1655 if (DT->dominates(Next->BB, BB)) { 1656 if (isa<Constant>(Next->Val)) return Next->Val; 1657 if (!Val) Val = Next->Val; 1658 } 1659 1660 Next = Next->Next; 1661 } 1662 1663 return Val; 1664 } 1665 1666 /// There is an edge from 'Src' to 'Dst'. Return 1667 /// true if every path from the entry block to 'Dst' passes via this edge. In 1668 /// particular 'Dst' must not be reachable via another edge from 'Src'. 1669 static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, 1670 DominatorTree *DT) { 1671 // While in theory it is interesting to consider the case in which Dst has 1672 // more than one predecessor, because Dst might be part of a loop which is 1673 // only reachable from Src, in practice it is pointless since at the time 1674 // GVN runs all such loops have preheaders, which means that Dst will have 1675 // been changed to have only one predecessor, namely Src. 1676 const BasicBlock *Pred = E.getEnd()->getSinglePredecessor(); 1677 assert((!Pred || Pred == E.getStart()) && 1678 "No edge between these basic blocks!"); 1679 return Pred != nullptr; 1680 } 1681 1682 void GVN::assignBlockRPONumber(Function &F) { 1683 uint32_t NextBlockNumber = 1; 1684 ReversePostOrderTraversal<Function *> RPOT(&F); 1685 for (BasicBlock *BB : RPOT) 1686 BlockRPONumber[BB] = NextBlockNumber++; 1687 } 1688 1689 // Tries to replace instruction with const, using information from 1690 // ReplaceWithConstMap. 1691 bool GVN::replaceOperandsWithConsts(Instruction *Instr) const { 1692 bool Changed = false; 1693 for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) { 1694 Value *Operand = Instr->getOperand(OpNum); 1695 auto it = ReplaceWithConstMap.find(Operand); 1696 if (it != ReplaceWithConstMap.end()) { 1697 assert(!isa<Constant>(Operand) && 1698 "Replacing constants with constants is invalid"); 1699 DEBUG(dbgs() << "GVN replacing: " << *Operand << " with " << *it->second 1700 << " in instruction " << *Instr << '\n'); 1701 Instr->setOperand(OpNum, it->second); 1702 Changed = true; 1703 } 1704 } 1705 return Changed; 1706 } 1707 1708 /// The given values are known to be equal in every block 1709 /// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with 1710 /// 'RHS' everywhere in the scope. Returns whether a change was made. 1711 /// If DominatesByEdge is false, then it means that we will propagate the RHS 1712 /// value starting from the end of Root.Start. 1713 bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root, 1714 bool DominatesByEdge) { 1715 SmallVector<std::pair<Value*, Value*>, 4> Worklist; 1716 Worklist.push_back(std::make_pair(LHS, RHS)); 1717 bool Changed = false; 1718 // For speed, compute a conservative fast approximation to 1719 // DT->dominates(Root, Root.getEnd()); 1720 const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT); 1721 1722 while (!Worklist.empty()) { 1723 std::pair<Value*, Value*> Item = Worklist.pop_back_val(); 1724 LHS = Item.first; RHS = Item.second; 1725 1726 if (LHS == RHS) 1727 continue; 1728 assert(LHS->getType() == RHS->getType() && "Equality but unequal types!"); 1729 1730 // Don't try to propagate equalities between constants. 1731 if (isa<Constant>(LHS) && isa<Constant>(RHS)) 1732 continue; 1733 1734 // Prefer a constant on the right-hand side, or an Argument if no constants. 1735 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS))) 1736 std::swap(LHS, RHS); 1737 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!"); 1738 1739 // If there is no obvious reason to prefer the left-hand side over the 1740 // right-hand side, ensure the longest lived term is on the right-hand side, 1741 // so the shortest lived term will be replaced by the longest lived. 1742 // This tends to expose more simplifications. 1743 uint32_t LVN = VN.lookupOrAdd(LHS); 1744 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) || 1745 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) { 1746 // Move the 'oldest' value to the right-hand side, using the value number 1747 // as a proxy for age. 1748 uint32_t RVN = VN.lookupOrAdd(RHS); 1749 if (LVN < RVN) { 1750 std::swap(LHS, RHS); 1751 LVN = RVN; 1752 } 1753 } 1754 1755 // If value numbering later sees that an instruction in the scope is equal 1756 // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve 1757 // the invariant that instructions only occur in the leader table for their 1758 // own value number (this is used by removeFromLeaderTable), do not do this 1759 // if RHS is an instruction (if an instruction in the scope is morphed into 1760 // LHS then it will be turned into RHS by the next GVN iteration anyway, so 1761 // using the leader table is about compiling faster, not optimizing better). 1762 // The leader table only tracks basic blocks, not edges. Only add to if we 1763 // have the simple case where the edge dominates the end. 1764 if (RootDominatesEnd && !isa<Instruction>(RHS)) 1765 addToLeaderTable(LVN, RHS, Root.getEnd()); 1766 1767 // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As 1768 // LHS always has at least one use that is not dominated by Root, this will 1769 // never do anything if LHS has only one use. 1770 if (!LHS->hasOneUse()) { 1771 unsigned NumReplacements = 1772 DominatesByEdge 1773 ? replaceDominatedUsesWith(LHS, RHS, *DT, Root) 1774 : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart()); 1775 1776 Changed |= NumReplacements > 0; 1777 NumGVNEqProp += NumReplacements; 1778 } 1779 1780 // Now try to deduce additional equalities from this one. For example, if 1781 // the known equality was "(A != B)" == "false" then it follows that A and B 1782 // are equal in the scope. Only boolean equalities with an explicit true or 1783 // false RHS are currently supported. 1784 if (!RHS->getType()->isIntegerTy(1)) 1785 // Not a boolean equality - bail out. 1786 continue; 1787 ConstantInt *CI = dyn_cast<ConstantInt>(RHS); 1788 if (!CI) 1789 // RHS neither 'true' nor 'false' - bail out. 1790 continue; 1791 // Whether RHS equals 'true'. Otherwise it equals 'false'. 1792 bool isKnownTrue = CI->isMinusOne(); 1793 bool isKnownFalse = !isKnownTrue; 1794 1795 // If "A && B" is known true then both A and B are known true. If "A || B" 1796 // is known false then both A and B are known false. 1797 Value *A, *B; 1798 if ((isKnownTrue && match(LHS, m_And(m_Value(A), m_Value(B)))) || 1799 (isKnownFalse && match(LHS, m_Or(m_Value(A), m_Value(B))))) { 1800 Worklist.push_back(std::make_pair(A, RHS)); 1801 Worklist.push_back(std::make_pair(B, RHS)); 1802 continue; 1803 } 1804 1805 // If we are propagating an equality like "(A == B)" == "true" then also 1806 // propagate the equality A == B. When propagating a comparison such as 1807 // "(A >= B)" == "true", replace all instances of "A < B" with "false". 1808 if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) { 1809 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1); 1810 1811 // If "A == B" is known true, or "A != B" is known false, then replace 1812 // A with B everywhere in the scope. 1813 if ((isKnownTrue && Cmp->getPredicate() == CmpInst::ICMP_EQ) || 1814 (isKnownFalse && Cmp->getPredicate() == CmpInst::ICMP_NE)) 1815 Worklist.push_back(std::make_pair(Op0, Op1)); 1816 1817 // Handle the floating point versions of equality comparisons too. 1818 if ((isKnownTrue && Cmp->getPredicate() == CmpInst::FCMP_OEQ) || 1819 (isKnownFalse && Cmp->getPredicate() == CmpInst::FCMP_UNE)) { 1820 1821 // Floating point -0.0 and 0.0 compare equal, so we can only 1822 // propagate values if we know that we have a constant and that 1823 // its value is non-zero. 1824 1825 // FIXME: We should do this optimization if 'no signed zeros' is 1826 // applicable via an instruction-level fast-math-flag or some other 1827 // indicator that relaxed FP semantics are being used. 1828 1829 if (isa<ConstantFP>(Op1) && !cast<ConstantFP>(Op1)->isZero()) 1830 Worklist.push_back(std::make_pair(Op0, Op1)); 1831 } 1832 1833 // If "A >= B" is known true, replace "A < B" with false everywhere. 1834 CmpInst::Predicate NotPred = Cmp->getInversePredicate(); 1835 Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse); 1836 // Since we don't have the instruction "A < B" immediately to hand, work 1837 // out the value number that it would have and use that to find an 1838 // appropriate instruction (if any). 1839 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1840 uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1); 1841 // If the number we were assigned was brand new then there is no point in 1842 // looking for an instruction realizing it: there cannot be one! 1843 if (Num < NextNum) { 1844 Value *NotCmp = findLeader(Root.getEnd(), Num); 1845 if (NotCmp && isa<Instruction>(NotCmp)) { 1846 unsigned NumReplacements = 1847 DominatesByEdge 1848 ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root) 1849 : replaceDominatedUsesWith(NotCmp, NotVal, *DT, 1850 Root.getStart()); 1851 Changed |= NumReplacements > 0; 1852 NumGVNEqProp += NumReplacements; 1853 } 1854 } 1855 // Ensure that any instruction in scope that gets the "A < B" value number 1856 // is replaced with false. 1857 // The leader table only tracks basic blocks, not edges. Only add to if we 1858 // have the simple case where the edge dominates the end. 1859 if (RootDominatesEnd) 1860 addToLeaderTable(Num, NotVal, Root.getEnd()); 1861 1862 continue; 1863 } 1864 } 1865 1866 return Changed; 1867 } 1868 1869 /// When calculating availability, handle an instruction 1870 /// by inserting it into the appropriate sets 1871 bool GVN::processInstruction(Instruction *I) { 1872 // Ignore dbg info intrinsics. 1873 if (isa<DbgInfoIntrinsic>(I)) 1874 return false; 1875 1876 // If the instruction can be easily simplified then do so now in preference 1877 // to value numbering it. Value numbering often exposes redundancies, for 1878 // example if it determines that %y is equal to %x then the instruction 1879 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify. 1880 const DataLayout &DL = I->getModule()->getDataLayout(); 1881 if (Value *V = SimplifyInstruction(I, {DL, TLI, DT, AC})) { 1882 bool Changed = false; 1883 if (!I->use_empty()) { 1884 I->replaceAllUsesWith(V); 1885 Changed = true; 1886 } 1887 if (isInstructionTriviallyDead(I, TLI)) { 1888 markInstructionForDeletion(I); 1889 Changed = true; 1890 } 1891 if (Changed) { 1892 if (MD && V->getType()->isPtrOrPtrVectorTy()) 1893 MD->invalidateCachedPointerInfo(V); 1894 ++NumGVNSimpl; 1895 return true; 1896 } 1897 } 1898 1899 if (IntrinsicInst *IntrinsicI = dyn_cast<IntrinsicInst>(I)) 1900 if (IntrinsicI->getIntrinsicID() == Intrinsic::assume) 1901 return processAssumeIntrinsic(IntrinsicI); 1902 1903 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1904 if (processLoad(LI)) 1905 return true; 1906 1907 unsigned Num = VN.lookupOrAdd(LI); 1908 addToLeaderTable(Num, LI, LI->getParent()); 1909 return false; 1910 } 1911 1912 // For conditional branches, we can perform simple conditional propagation on 1913 // the condition value itself. 1914 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 1915 if (!BI->isConditional()) 1916 return false; 1917 1918 if (isa<Constant>(BI->getCondition())) 1919 return processFoldableCondBr(BI); 1920 1921 Value *BranchCond = BI->getCondition(); 1922 BasicBlock *TrueSucc = BI->getSuccessor(0); 1923 BasicBlock *FalseSucc = BI->getSuccessor(1); 1924 // Avoid multiple edges early. 1925 if (TrueSucc == FalseSucc) 1926 return false; 1927 1928 BasicBlock *Parent = BI->getParent(); 1929 bool Changed = false; 1930 1931 Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext()); 1932 BasicBlockEdge TrueE(Parent, TrueSucc); 1933 Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true); 1934 1935 Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext()); 1936 BasicBlockEdge FalseE(Parent, FalseSucc); 1937 Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true); 1938 1939 return Changed; 1940 } 1941 1942 // For switches, propagate the case values into the case destinations. 1943 if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) { 1944 Value *SwitchCond = SI->getCondition(); 1945 BasicBlock *Parent = SI->getParent(); 1946 bool Changed = false; 1947 1948 // Remember how many outgoing edges there are to every successor. 1949 SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges; 1950 for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i) 1951 ++SwitchEdges[SI->getSuccessor(i)]; 1952 1953 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); 1954 i != e; ++i) { 1955 BasicBlock *Dst = i->getCaseSuccessor(); 1956 // If there is only a single edge, propagate the case value into it. 1957 if (SwitchEdges.lookup(Dst) == 1) { 1958 BasicBlockEdge E(Parent, Dst); 1959 Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true); 1960 } 1961 } 1962 return Changed; 1963 } 1964 1965 // Instructions with void type don't return a value, so there's 1966 // no point in trying to find redundancies in them. 1967 if (I->getType()->isVoidTy()) 1968 return false; 1969 1970 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1971 unsigned Num = VN.lookupOrAdd(I); 1972 1973 // Allocations are always uniquely numbered, so we can save time and memory 1974 // by fast failing them. 1975 if (isa<AllocaInst>(I) || isa<TerminatorInst>(I) || isa<PHINode>(I)) { 1976 addToLeaderTable(Num, I, I->getParent()); 1977 return false; 1978 } 1979 1980 // If the number we were assigned was a brand new VN, then we don't 1981 // need to do a lookup to see if the number already exists 1982 // somewhere in the domtree: it can't! 1983 if (Num >= NextNum) { 1984 addToLeaderTable(Num, I, I->getParent()); 1985 return false; 1986 } 1987 1988 // Perform fast-path value-number based elimination of values inherited from 1989 // dominators. 1990 Value *Repl = findLeader(I->getParent(), Num); 1991 if (!Repl) { 1992 // Failure, just remember this instance for future use. 1993 addToLeaderTable(Num, I, I->getParent()); 1994 return false; 1995 } else if (Repl == I) { 1996 // If I was the result of a shortcut PRE, it might already be in the table 1997 // and the best replacement for itself. Nothing to do. 1998 return false; 1999 } 2000 2001 // Remove it! 2002 patchAndReplaceAllUsesWith(I, Repl); 2003 if (MD && Repl->getType()->isPtrOrPtrVectorTy()) 2004 MD->invalidateCachedPointerInfo(Repl); 2005 markInstructionForDeletion(I); 2006 return true; 2007 } 2008 2009 /// runOnFunction - This is the main transformation entry point for a function. 2010 bool GVN::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT, 2011 const TargetLibraryInfo &RunTLI, AAResults &RunAA, 2012 MemoryDependenceResults *RunMD, LoopInfo *LI, 2013 OptimizationRemarkEmitter *RunORE) { 2014 AC = &RunAC; 2015 DT = &RunDT; 2016 VN.setDomTree(DT); 2017 TLI = &RunTLI; 2018 VN.setAliasAnalysis(&RunAA); 2019 MD = RunMD; 2020 OrderedInstructions OrderedInstrs(DT); 2021 OI = &OrderedInstrs; 2022 VN.setMemDep(MD); 2023 ORE = RunORE; 2024 2025 bool Changed = false; 2026 bool ShouldContinue = true; 2027 2028 // Merge unconditional branches, allowing PRE to catch more 2029 // optimization opportunities. 2030 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 2031 BasicBlock *BB = &*FI++; 2032 2033 bool removedBlock = MergeBlockIntoPredecessor(BB, DT, LI, MD); 2034 if (removedBlock) 2035 ++NumGVNBlocks; 2036 2037 Changed |= removedBlock; 2038 } 2039 2040 unsigned Iteration = 0; 2041 while (ShouldContinue) { 2042 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); 2043 ShouldContinue = iterateOnFunction(F); 2044 Changed |= ShouldContinue; 2045 ++Iteration; 2046 } 2047 2048 if (EnablePRE) { 2049 // Fabricate val-num for dead-code in order to suppress assertion in 2050 // performPRE(). 2051 assignValNumForDeadCode(); 2052 assignBlockRPONumber(F); 2053 bool PREChanged = true; 2054 while (PREChanged) { 2055 PREChanged = performPRE(F); 2056 Changed |= PREChanged; 2057 } 2058 } 2059 2060 // FIXME: Should perform GVN again after PRE does something. PRE can move 2061 // computations into blocks where they become fully redundant. Note that 2062 // we can't do this until PRE's critical edge splitting updates memdep. 2063 // Actually, when this happens, we should just fully integrate PRE into GVN. 2064 2065 cleanupGlobalSets(); 2066 // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each 2067 // iteration. 2068 DeadBlocks.clear(); 2069 2070 return Changed; 2071 } 2072 2073 bool GVN::processBlock(BasicBlock *BB) { 2074 // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function 2075 // (and incrementing BI before processing an instruction). 2076 assert(InstrsToErase.empty() && 2077 "We expect InstrsToErase to be empty across iterations"); 2078 if (DeadBlocks.count(BB)) 2079 return false; 2080 2081 // Clearing map before every BB because it can be used only for single BB. 2082 ReplaceWithConstMap.clear(); 2083 bool ChangedFunction = false; 2084 2085 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 2086 BI != BE;) { 2087 if (!ReplaceWithConstMap.empty()) 2088 ChangedFunction |= replaceOperandsWithConsts(&*BI); 2089 ChangedFunction |= processInstruction(&*BI); 2090 2091 if (InstrsToErase.empty()) { 2092 ++BI; 2093 continue; 2094 } 2095 2096 // If we need some instructions deleted, do it now. 2097 NumGVNInstr += InstrsToErase.size(); 2098 2099 // Avoid iterator invalidation. 2100 bool AtStart = BI == BB->begin(); 2101 if (!AtStart) 2102 --BI; 2103 2104 bool InvalidateImplicitCF = false; 2105 const Instruction *MaybeFirstICF = FirstImplicitControlFlowInsts.lookup(BB); 2106 for (auto *I : InstrsToErase) { 2107 assert(I->getParent() == BB && "Removing instruction from wrong block?"); 2108 DEBUG(dbgs() << "GVN removed: " << *I << '\n'); 2109 salvageDebugInfo(*I); 2110 if (MD) MD->removeInstruction(I); 2111 DEBUG(verifyRemoved(I)); 2112 if (MaybeFirstICF == I) { 2113 // We have erased the first ICF in block. The map needs to be updated. 2114 InvalidateImplicitCF = true; 2115 // Do not keep dangling pointer on the erased instruction. 2116 MaybeFirstICF = nullptr; 2117 } 2118 I->eraseFromParent(); 2119 } 2120 2121 OI->invalidateBlock(BB); 2122 InstrsToErase.clear(); 2123 if (InvalidateImplicitCF) 2124 fillImplicitControlFlowInfo(BB); 2125 2126 if (AtStart) 2127 BI = BB->begin(); 2128 else 2129 ++BI; 2130 } 2131 2132 return ChangedFunction; 2133 } 2134 2135 // Instantiate an expression in a predecessor that lacked it. 2136 bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred, 2137 BasicBlock *Curr, unsigned int ValNo) { 2138 // Because we are going top-down through the block, all value numbers 2139 // will be available in the predecessor by the time we need them. Any 2140 // that weren't originally present will have been instantiated earlier 2141 // in this loop. 2142 bool success = true; 2143 for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) { 2144 Value *Op = Instr->getOperand(i); 2145 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2146 continue; 2147 // This could be a newly inserted instruction, in which case, we won't 2148 // find a value number, and should give up before we hurt ourselves. 2149 // FIXME: Rewrite the infrastructure to let it easier to value number 2150 // and process newly inserted instructions. 2151 if (!VN.exists(Op)) { 2152 success = false; 2153 break; 2154 } 2155 uint32_t TValNo = 2156 VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this); 2157 if (Value *V = findLeader(Pred, TValNo)) { 2158 Instr->setOperand(i, V); 2159 } else { 2160 success = false; 2161 break; 2162 } 2163 } 2164 2165 // Fail out if we encounter an operand that is not available in 2166 // the PRE predecessor. This is typically because of loads which 2167 // are not value numbered precisely. 2168 if (!success) 2169 return false; 2170 2171 Instr->insertBefore(Pred->getTerminator()); 2172 Instr->setName(Instr->getName() + ".pre"); 2173 Instr->setDebugLoc(Instr->getDebugLoc()); 2174 2175 unsigned Num = VN.lookupOrAdd(Instr); 2176 VN.add(Instr, Num); 2177 2178 // Update the availability map to include the new instruction. 2179 addToLeaderTable(Num, Instr, Pred); 2180 return true; 2181 } 2182 2183 bool GVN::performScalarPRE(Instruction *CurInst) { 2184 if (isa<AllocaInst>(CurInst) || isa<TerminatorInst>(CurInst) || 2185 isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() || 2186 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2187 isa<DbgInfoIntrinsic>(CurInst)) 2188 return false; 2189 2190 // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from 2191 // sinking the compare again, and it would force the code generator to 2192 // move the i1 from processor flags or predicate registers into a general 2193 // purpose register. 2194 if (isa<CmpInst>(CurInst)) 2195 return false; 2196 2197 // We don't currently value number ANY inline asm calls. 2198 if (CallInst *CallI = dyn_cast<CallInst>(CurInst)) 2199 if (CallI->isInlineAsm()) 2200 return false; 2201 2202 uint32_t ValNo = VN.lookup(CurInst); 2203 2204 // Look for the predecessors for PRE opportunities. We're 2205 // only trying to solve the basic diamond case, where 2206 // a value is computed in the successor and one predecessor, 2207 // but not the other. We also explicitly disallow cases 2208 // where the successor is its own predecessor, because they're 2209 // more complicated to get right. 2210 unsigned NumWith = 0; 2211 unsigned NumWithout = 0; 2212 BasicBlock *PREPred = nullptr; 2213 BasicBlock *CurrentBlock = CurInst->getParent(); 2214 2215 SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap; 2216 for (BasicBlock *P : predecessors(CurrentBlock)) { 2217 // We're not interested in PRE where blocks with predecessors that are 2218 // not reachable. 2219 if (!DT->isReachableFromEntry(P)) { 2220 NumWithout = 2; 2221 break; 2222 } 2223 // It is not safe to do PRE when P->CurrentBlock is a loop backedge, and 2224 // when CurInst has operand defined in CurrentBlock (so it may be defined 2225 // by phi in the loop header). 2226 if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock] && 2227 llvm::any_of(CurInst->operands(), [&](const Use &U) { 2228 if (auto *Inst = dyn_cast<Instruction>(U.get())) 2229 return Inst->getParent() == CurrentBlock; 2230 return false; 2231 })) { 2232 NumWithout = 2; 2233 break; 2234 } 2235 2236 uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this); 2237 Value *predV = findLeader(P, TValNo); 2238 if (!predV) { 2239 predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P)); 2240 PREPred = P; 2241 ++NumWithout; 2242 } else if (predV == CurInst) { 2243 /* CurInst dominates this predecessor. */ 2244 NumWithout = 2; 2245 break; 2246 } else { 2247 predMap.push_back(std::make_pair(predV, P)); 2248 ++NumWith; 2249 } 2250 } 2251 2252 // Don't do PRE when it might increase code size, i.e. when 2253 // we would need to insert instructions in more than one pred. 2254 if (NumWithout > 1 || NumWith == 0) 2255 return false; 2256 2257 // We may have a case where all predecessors have the instruction, 2258 // and we just need to insert a phi node. Otherwise, perform 2259 // insertion. 2260 Instruction *PREInstr = nullptr; 2261 2262 if (NumWithout != 0) { 2263 if (!isSafeToSpeculativelyExecute(CurInst)) { 2264 // It is only valid to insert a new instruction if the current instruction 2265 // is always executed. An instruction with implicit control flow could 2266 // prevent us from doing it. If we cannot speculate the execution, then 2267 // PRE should be prohibited. 2268 auto It = FirstImplicitControlFlowInsts.find(CurrentBlock); 2269 if (It != FirstImplicitControlFlowInsts.end()) { 2270 assert(It->second->getParent() == CurrentBlock && 2271 "Implicit control flow map broken?"); 2272 if (OI->dominates(It->second, CurInst)) 2273 return false; 2274 } 2275 } 2276 2277 // Don't do PRE across indirect branch. 2278 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2279 return false; 2280 2281 // We can't do PRE safely on a critical edge, so instead we schedule 2282 // the edge to be split and perform the PRE the next time we iterate 2283 // on the function. 2284 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); 2285 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2286 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2287 return false; 2288 } 2289 // We need to insert somewhere, so let's give it a shot 2290 PREInstr = CurInst->clone(); 2291 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) { 2292 // If we failed insertion, make sure we remove the instruction. 2293 DEBUG(verifyRemoved(PREInstr)); 2294 PREInstr->deleteValue(); 2295 return false; 2296 } 2297 } 2298 2299 // Either we should have filled in the PRE instruction, or we should 2300 // not have needed insertions. 2301 assert(PREInstr != nullptr || NumWithout == 0); 2302 2303 ++NumGVNPRE; 2304 2305 // Create a PHI to make the value available in this block. 2306 PHINode *Phi = 2307 PHINode::Create(CurInst->getType(), predMap.size(), 2308 CurInst->getName() + ".pre-phi", &CurrentBlock->front()); 2309 for (unsigned i = 0, e = predMap.size(); i != e; ++i) { 2310 if (Value *V = predMap[i].first) { 2311 // If we use an existing value in this phi, we have to patch the original 2312 // value because the phi will be used to replace a later value. 2313 patchReplacementInstruction(CurInst, V); 2314 Phi->addIncoming(V, predMap[i].second); 2315 } else 2316 Phi->addIncoming(PREInstr, PREPred); 2317 } 2318 2319 VN.add(Phi, ValNo); 2320 // After creating a new PHI for ValNo, the phi translate result for ValNo will 2321 // be changed, so erase the related stale entries in phi translate cache. 2322 VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock); 2323 addToLeaderTable(ValNo, Phi, CurrentBlock); 2324 Phi->setDebugLoc(CurInst->getDebugLoc()); 2325 CurInst->replaceAllUsesWith(Phi); 2326 if (MD && Phi->getType()->isPtrOrPtrVectorTy()) 2327 MD->invalidateCachedPointerInfo(Phi); 2328 VN.erase(CurInst); 2329 removeFromLeaderTable(ValNo, CurInst, CurrentBlock); 2330 2331 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); 2332 if (MD) 2333 MD->removeInstruction(CurInst); 2334 DEBUG(verifyRemoved(CurInst)); 2335 bool InvalidateImplicitCF = 2336 FirstImplicitControlFlowInsts.lookup(CurInst->getParent()) == CurInst; 2337 // FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes 2338 // some assertion failures. 2339 OI->invalidateBlock(CurrentBlock); 2340 CurInst->eraseFromParent(); 2341 if (InvalidateImplicitCF) 2342 fillImplicitControlFlowInfo(CurrentBlock); 2343 ++NumGVNInstr; 2344 2345 return true; 2346 } 2347 2348 /// Perform a purely local form of PRE that looks for diamond 2349 /// control flow patterns and attempts to perform simple PRE at the join point. 2350 bool GVN::performPRE(Function &F) { 2351 bool Changed = false; 2352 for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) { 2353 // Nothing to PRE in the entry block. 2354 if (CurrentBlock == &F.getEntryBlock()) 2355 continue; 2356 2357 // Don't perform PRE on an EH pad. 2358 if (CurrentBlock->isEHPad()) 2359 continue; 2360 2361 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2362 BE = CurrentBlock->end(); 2363 BI != BE;) { 2364 Instruction *CurInst = &*BI++; 2365 Changed |= performScalarPRE(CurInst); 2366 } 2367 } 2368 2369 if (splitCriticalEdges()) 2370 Changed = true; 2371 2372 return Changed; 2373 } 2374 2375 /// Split the critical edge connecting the given two blocks, and return 2376 /// the block inserted to the critical edge. 2377 BasicBlock *GVN::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) { 2378 BasicBlock *BB = 2379 SplitCriticalEdge(Pred, Succ, CriticalEdgeSplittingOptions(DT)); 2380 if (MD) 2381 MD->invalidateCachedPredecessors(); 2382 return BB; 2383 } 2384 2385 /// Split critical edges found during the previous 2386 /// iteration that may enable further optimization. 2387 bool GVN::splitCriticalEdges() { 2388 if (toSplit.empty()) 2389 return false; 2390 do { 2391 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val(); 2392 SplitCriticalEdge(Edge.first, Edge.second, 2393 CriticalEdgeSplittingOptions(DT)); 2394 } while (!toSplit.empty()); 2395 if (MD) MD->invalidateCachedPredecessors(); 2396 return true; 2397 } 2398 2399 /// Executes one iteration of GVN 2400 bool GVN::iterateOnFunction(Function &F) { 2401 cleanupGlobalSets(); 2402 2403 // Top-down walk of the dominator tree 2404 bool Changed = false; 2405 // Needed for value numbering with phi construction to work. 2406 // RPOT walks the graph in its constructor and will not be invalidated during 2407 // processBlock. 2408 ReversePostOrderTraversal<Function *> RPOT(&F); 2409 2410 for (BasicBlock *BB : RPOT) 2411 fillImplicitControlFlowInfo(BB); 2412 for (BasicBlock *BB : RPOT) 2413 Changed |= processBlock(BB); 2414 2415 return Changed; 2416 } 2417 2418 void GVN::cleanupGlobalSets() { 2419 VN.clear(); 2420 LeaderTable.clear(); 2421 BlockRPONumber.clear(); 2422 TableAllocator.Reset(); 2423 FirstImplicitControlFlowInsts.clear(); 2424 } 2425 2426 void 2427 GVN::fillImplicitControlFlowInfo(BasicBlock *BB) { 2428 // Make sure that all marked instructions are actually deleted by this point, 2429 // so that we don't need to care about omitting them. 2430 assert(InstrsToErase.empty() && "Filling before removed all marked insns?"); 2431 auto MayNotTransferExecutionToSuccessor = [&](const Instruction *I) { 2432 // If a block's instruction doesn't always pass the control to its successor 2433 // instruction, mark the block as having implicit control flow. We use them 2434 // to avoid wrong assumptions of sort "if A is executed and B post-dominates 2435 // A, then B is also executed". This is not true is there is an implicit 2436 // control flow instruction (e.g. a guard) between them. 2437 // 2438 // TODO: Currently, isGuaranteedToTransferExecutionToSuccessor returns false 2439 // for volatile stores and loads because they can trap. The discussion on 2440 // whether or not it is correct is still ongoing. We might want to get rid 2441 // of this logic in the future. Anyways, trapping instructions shouldn't 2442 // introduce implicit control flow, so we explicitly allow them here. This 2443 // must be removed once isGuaranteedToTransferExecutionToSuccessor is fixed. 2444 if (isGuaranteedToTransferExecutionToSuccessor(I)) 2445 return false; 2446 if (isa<LoadInst>(I)) { 2447 assert(cast<LoadInst>(I)->isVolatile() && 2448 "Non-volatile load should transfer execution to successor!"); 2449 return false; 2450 } 2451 if (isa<StoreInst>(I)) { 2452 assert(cast<StoreInst>(I)->isVolatile() && 2453 "Non-volatile store should transfer execution to successor!"); 2454 return false; 2455 } 2456 return true; 2457 }; 2458 FirstImplicitControlFlowInsts.erase(BB); 2459 2460 for (auto &I : *BB) 2461 if (MayNotTransferExecutionToSuccessor(&I)) { 2462 FirstImplicitControlFlowInsts[BB] = &I; 2463 break; 2464 } 2465 } 2466 2467 /// Verify that the specified instruction does not occur in our 2468 /// internal data structures. 2469 void GVN::verifyRemoved(const Instruction *Inst) const { 2470 VN.verifyRemoved(Inst); 2471 2472 // Walk through the value number scope to make sure the instruction isn't 2473 // ferreted away in it. 2474 for (DenseMap<uint32_t, LeaderTableEntry>::const_iterator 2475 I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++I) { 2476 const LeaderTableEntry *Node = &I->second; 2477 assert(Node->Val != Inst && "Inst still in value numbering scope!"); 2478 2479 while (Node->Next) { 2480 Node = Node->Next; 2481 assert(Node->Val != Inst && "Inst still in value numbering scope!"); 2482 } 2483 } 2484 } 2485 2486 /// BB is declared dead, which implied other blocks become dead as well. This 2487 /// function is to add all these blocks to "DeadBlocks". For the dead blocks' 2488 /// live successors, update their phi nodes by replacing the operands 2489 /// corresponding to dead blocks with UndefVal. 2490 void GVN::addDeadBlock(BasicBlock *BB) { 2491 SmallVector<BasicBlock *, 4> NewDead; 2492 SmallSetVector<BasicBlock *, 4> DF; 2493 2494 NewDead.push_back(BB); 2495 while (!NewDead.empty()) { 2496 BasicBlock *D = NewDead.pop_back_val(); 2497 if (DeadBlocks.count(D)) 2498 continue; 2499 2500 // All blocks dominated by D are dead. 2501 SmallVector<BasicBlock *, 8> Dom; 2502 DT->getDescendants(D, Dom); 2503 DeadBlocks.insert(Dom.begin(), Dom.end()); 2504 2505 // Figure out the dominance-frontier(D). 2506 for (BasicBlock *B : Dom) { 2507 for (BasicBlock *S : successors(B)) { 2508 if (DeadBlocks.count(S)) 2509 continue; 2510 2511 bool AllPredDead = true; 2512 for (BasicBlock *P : predecessors(S)) 2513 if (!DeadBlocks.count(P)) { 2514 AllPredDead = false; 2515 break; 2516 } 2517 2518 if (!AllPredDead) { 2519 // S could be proved dead later on. That is why we don't update phi 2520 // operands at this moment. 2521 DF.insert(S); 2522 } else { 2523 // While S is not dominated by D, it is dead by now. This could take 2524 // place if S already have a dead predecessor before D is declared 2525 // dead. 2526 NewDead.push_back(S); 2527 } 2528 } 2529 } 2530 } 2531 2532 // For the dead blocks' live successors, update their phi nodes by replacing 2533 // the operands corresponding to dead blocks with UndefVal. 2534 for(SmallSetVector<BasicBlock *, 4>::iterator I = DF.begin(), E = DF.end(); 2535 I != E; I++) { 2536 BasicBlock *B = *I; 2537 if (DeadBlocks.count(B)) 2538 continue; 2539 2540 SmallVector<BasicBlock *, 4> Preds(pred_begin(B), pred_end(B)); 2541 for (BasicBlock *P : Preds) { 2542 if (!DeadBlocks.count(P)) 2543 continue; 2544 2545 if (isCriticalEdge(P->getTerminator(), GetSuccessorNumber(P, B))) { 2546 if (BasicBlock *S = splitCriticalEdges(P, B)) 2547 DeadBlocks.insert(P = S); 2548 } 2549 2550 for (BasicBlock::iterator II = B->begin(); isa<PHINode>(II); ++II) { 2551 PHINode &Phi = cast<PHINode>(*II); 2552 Phi.setIncomingValue(Phi.getBasicBlockIndex(P), 2553 UndefValue::get(Phi.getType())); 2554 } 2555 } 2556 } 2557 } 2558 2559 // If the given branch is recognized as a foldable branch (i.e. conditional 2560 // branch with constant condition), it will perform following analyses and 2561 // transformation. 2562 // 1) If the dead out-coming edge is a critical-edge, split it. Let 2563 // R be the target of the dead out-coming edge. 2564 // 1) Identify the set of dead blocks implied by the branch's dead outcoming 2565 // edge. The result of this step will be {X| X is dominated by R} 2566 // 2) Identify those blocks which haves at least one dead predecessor. The 2567 // result of this step will be dominance-frontier(R). 2568 // 3) Update the PHIs in DF(R) by replacing the operands corresponding to 2569 // dead blocks with "UndefVal" in an hope these PHIs will optimized away. 2570 // 2571 // Return true iff *NEW* dead code are found. 2572 bool GVN::processFoldableCondBr(BranchInst *BI) { 2573 if (!BI || BI->isUnconditional()) 2574 return false; 2575 2576 // If a branch has two identical successors, we cannot declare either dead. 2577 if (BI->getSuccessor(0) == BI->getSuccessor(1)) 2578 return false; 2579 2580 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition()); 2581 if (!Cond) 2582 return false; 2583 2584 BasicBlock *DeadRoot = 2585 Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0); 2586 if (DeadBlocks.count(DeadRoot)) 2587 return false; 2588 2589 if (!DeadRoot->getSinglePredecessor()) 2590 DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot); 2591 2592 addDeadBlock(DeadRoot); 2593 return true; 2594 } 2595 2596 // performPRE() will trigger assert if it comes across an instruction without 2597 // associated val-num. As it normally has far more live instructions than dead 2598 // instructions, it makes more sense just to "fabricate" a val-number for the 2599 // dead code than checking if instruction involved is dead or not. 2600 void GVN::assignValNumForDeadCode() { 2601 for (BasicBlock *BB : DeadBlocks) { 2602 for (Instruction &Inst : *BB) { 2603 unsigned ValNum = VN.lookupOrAdd(&Inst); 2604 addToLeaderTable(ValNum, &Inst, BB); 2605 } 2606 } 2607 } 2608 2609 class llvm::gvn::GVNLegacyPass : public FunctionPass { 2610 public: 2611 static char ID; // Pass identification, replacement for typeid 2612 2613 explicit GVNLegacyPass(bool NoLoads = false) 2614 : FunctionPass(ID), NoLoads(NoLoads) { 2615 initializeGVNLegacyPassPass(*PassRegistry::getPassRegistry()); 2616 } 2617 2618 bool runOnFunction(Function &F) override { 2619 if (skipFunction(F)) 2620 return false; 2621 2622 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 2623 2624 return Impl.runImpl( 2625 F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 2626 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 2627 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 2628 getAnalysis<AAResultsWrapperPass>().getAAResults(), 2629 NoLoads ? nullptr 2630 : &getAnalysis<MemoryDependenceWrapperPass>().getMemDep(), 2631 LIWP ? &LIWP->getLoopInfo() : nullptr, 2632 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE()); 2633 } 2634 2635 void getAnalysisUsage(AnalysisUsage &AU) const override { 2636 AU.addRequired<AssumptionCacheTracker>(); 2637 AU.addRequired<DominatorTreeWrapperPass>(); 2638 AU.addRequired<TargetLibraryInfoWrapperPass>(); 2639 if (!NoLoads) 2640 AU.addRequired<MemoryDependenceWrapperPass>(); 2641 AU.addRequired<AAResultsWrapperPass>(); 2642 2643 AU.addPreserved<DominatorTreeWrapperPass>(); 2644 AU.addPreserved<GlobalsAAWrapperPass>(); 2645 AU.addPreserved<TargetLibraryInfoWrapperPass>(); 2646 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2647 } 2648 2649 private: 2650 bool NoLoads; 2651 GVN Impl; 2652 }; 2653 2654 char GVNLegacyPass::ID = 0; 2655 2656 INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false) 2657 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 2658 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 2659 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2660 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 2661 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2662 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 2663 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 2664 INITIALIZE_PASS_END(GVNLegacyPass, "gvn", "Global Value Numbering", false, false) 2665 2666 // The public interface to this file... 2667 FunctionPass *llvm::createGVNPass(bool NoLoads) { 2668 return new GVNLegacyPass(NoLoads); 2669 } 2670