1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs global value numbering to eliminate fully redundant 11 // instructions. It also performs simple dead load elimination. 12 // 13 // Note that this pass does the value numbering itself; it does not use the 14 // ValueNumbering analysis passes. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/Transforms/Scalar/GVN.h" 19 #include "llvm/ADT/DenseMap.h" 20 #include "llvm/ADT/DepthFirstIterator.h" 21 #include "llvm/ADT/Hashing.h" 22 #include "llvm/ADT/MapVector.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/SetVector.h" 25 #include "llvm/ADT/SmallPtrSet.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Analysis/AssumptionCache.h" 29 #include "llvm/Analysis/CFG.h" 30 #include "llvm/Analysis/ConstantFolding.h" 31 #include "llvm/Analysis/GlobalsModRef.h" 32 #include "llvm/Analysis/InstructionSimplify.h" 33 #include "llvm/Analysis/Loads.h" 34 #include "llvm/Analysis/MemoryBuiltins.h" 35 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 36 #include "llvm/Analysis/PHITransAddr.h" 37 #include "llvm/Analysis/TargetLibraryInfo.h" 38 #include "llvm/Analysis/ValueTracking.h" 39 #include "llvm/IR/DataLayout.h" 40 #include "llvm/IR/Dominators.h" 41 #include "llvm/IR/GlobalVariable.h" 42 #include "llvm/IR/IRBuilder.h" 43 #include "llvm/IR/IntrinsicInst.h" 44 #include "llvm/IR/LLVMContext.h" 45 #include "llvm/IR/Metadata.h" 46 #include "llvm/IR/PatternMatch.h" 47 #include "llvm/Support/CommandLine.h" 48 #include "llvm/Support/Debug.h" 49 #include "llvm/Support/raw_ostream.h" 50 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 51 #include "llvm/Transforms/Utils/Local.h" 52 #include "llvm/Transforms/Utils/SSAUpdater.h" 53 #include <vector> 54 using namespace llvm; 55 using namespace llvm::gvn; 56 using namespace PatternMatch; 57 58 #define DEBUG_TYPE "gvn" 59 60 STATISTIC(NumGVNInstr, "Number of instructions deleted"); 61 STATISTIC(NumGVNLoad, "Number of loads deleted"); 62 STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 63 STATISTIC(NumGVNBlocks, "Number of blocks merged"); 64 STATISTIC(NumGVNSimpl, "Number of instructions simplified"); 65 STATISTIC(NumGVNEqProp, "Number of equalities propagated"); 66 STATISTIC(NumPRELoad, "Number of loads PRE'd"); 67 68 static cl::opt<bool> EnablePRE("enable-pre", 69 cl::init(true), cl::Hidden); 70 static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 71 72 // Maximum allowed recursion depth. 73 static cl::opt<uint32_t> 74 MaxRecurseDepth("max-recurse-depth", cl::Hidden, cl::init(1000), cl::ZeroOrMore, 75 cl::desc("Max recurse depth (default = 1000)")); 76 77 struct llvm::GVN::Expression { 78 uint32_t opcode; 79 Type *type; 80 SmallVector<uint32_t, 4> varargs; 81 82 Expression(uint32_t o = ~2U) : opcode(o) {} 83 84 bool operator==(const Expression &other) const { 85 if (opcode != other.opcode) 86 return false; 87 if (opcode == ~0U || opcode == ~1U) 88 return true; 89 if (type != other.type) 90 return false; 91 if (varargs != other.varargs) 92 return false; 93 return true; 94 } 95 96 friend hash_code hash_value(const Expression &Value) { 97 return hash_combine( 98 Value.opcode, Value.type, 99 hash_combine_range(Value.varargs.begin(), Value.varargs.end())); 100 } 101 }; 102 103 namespace llvm { 104 template <> struct DenseMapInfo<GVN::Expression> { 105 static inline GVN::Expression getEmptyKey() { return ~0U; } 106 107 static inline GVN::Expression getTombstoneKey() { return ~1U; } 108 109 static unsigned getHashValue(const GVN::Expression &e) { 110 using llvm::hash_value; 111 return static_cast<unsigned>(hash_value(e)); 112 } 113 static bool isEqual(const GVN::Expression &LHS, const GVN::Expression &RHS) { 114 return LHS == RHS; 115 } 116 }; 117 } // End llvm namespace. 118 119 /// Represents a particular available value that we know how to materialize. 120 /// Materialization of an AvailableValue never fails. An AvailableValue is 121 /// implicitly associated with a rematerialization point which is the 122 /// location of the instruction from which it was formed. 123 struct llvm::gvn::AvailableValue { 124 enum ValType { 125 SimpleVal, // A simple offsetted value that is accessed. 126 LoadVal, // A value produced by a load. 127 MemIntrin, // A memory intrinsic which is loaded from. 128 UndefVal // A UndefValue representing a value from dead block (which 129 // is not yet physically removed from the CFG). 130 }; 131 132 /// V - The value that is live out of the block. 133 PointerIntPair<Value *, 2, ValType> Val; 134 135 /// Offset - The byte offset in Val that is interesting for the load query. 136 unsigned Offset; 137 138 static AvailableValue get(Value *V, unsigned Offset = 0) { 139 AvailableValue Res; 140 Res.Val.setPointer(V); 141 Res.Val.setInt(SimpleVal); 142 Res.Offset = Offset; 143 return Res; 144 } 145 146 static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) { 147 AvailableValue Res; 148 Res.Val.setPointer(MI); 149 Res.Val.setInt(MemIntrin); 150 Res.Offset = Offset; 151 return Res; 152 } 153 154 static AvailableValue getLoad(LoadInst *LI, unsigned Offset = 0) { 155 AvailableValue Res; 156 Res.Val.setPointer(LI); 157 Res.Val.setInt(LoadVal); 158 Res.Offset = Offset; 159 return Res; 160 } 161 162 static AvailableValue getUndef() { 163 AvailableValue Res; 164 Res.Val.setPointer(nullptr); 165 Res.Val.setInt(UndefVal); 166 Res.Offset = 0; 167 return Res; 168 } 169 170 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 171 bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; } 172 bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; } 173 bool isUndefValue() const { return Val.getInt() == UndefVal; } 174 175 Value *getSimpleValue() const { 176 assert(isSimpleValue() && "Wrong accessor"); 177 return Val.getPointer(); 178 } 179 180 LoadInst *getCoercedLoadValue() const { 181 assert(isCoercedLoadValue() && "Wrong accessor"); 182 return cast<LoadInst>(Val.getPointer()); 183 } 184 185 MemIntrinsic *getMemIntrinValue() const { 186 assert(isMemIntrinValue() && "Wrong accessor"); 187 return cast<MemIntrinsic>(Val.getPointer()); 188 } 189 190 /// Emit code at the specified insertion point to adjust the value defined 191 /// here to the specified type. This handles various coercion cases. 192 Value *MaterializeAdjustedValue(LoadInst *LI, Instruction *InsertPt, 193 GVN &gvn) const; 194 }; 195 196 /// Represents an AvailableValue which can be rematerialized at the end of 197 /// the associated BasicBlock. 198 struct llvm::gvn::AvailableValueInBlock { 199 /// BB - The basic block in question. 200 BasicBlock *BB; 201 202 /// AV - The actual available value 203 AvailableValue AV; 204 205 static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV) { 206 AvailableValueInBlock Res; 207 Res.BB = BB; 208 Res.AV = std::move(AV); 209 return Res; 210 } 211 212 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 213 unsigned Offset = 0) { 214 return get(BB, AvailableValue::get(V, Offset)); 215 } 216 static AvailableValueInBlock getUndef(BasicBlock *BB) { 217 return get(BB, AvailableValue::getUndef()); 218 } 219 220 /// Emit code at the end of this block to adjust the value defined here to 221 /// the specified type. This handles various coercion cases. 222 Value *MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const { 223 return AV.MaterializeAdjustedValue(LI, BB->getTerminator(), gvn); 224 } 225 }; 226 227 //===----------------------------------------------------------------------===// 228 // ValueTable Internal Functions 229 //===----------------------------------------------------------------------===// 230 231 GVN::Expression GVN::ValueTable::createExpr(Instruction *I) { 232 Expression e; 233 e.type = I->getType(); 234 e.opcode = I->getOpcode(); 235 for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end(); 236 OI != OE; ++OI) 237 e.varargs.push_back(lookupOrAdd(*OI)); 238 if (I->isCommutative()) { 239 // Ensure that commutative instructions that only differ by a permutation 240 // of their operands get the same value number by sorting the operand value 241 // numbers. Since all commutative instructions have two operands it is more 242 // efficient to sort by hand rather than using, say, std::sort. 243 assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!"); 244 if (e.varargs[0] > e.varargs[1]) 245 std::swap(e.varargs[0], e.varargs[1]); 246 } 247 248 if (CmpInst *C = dyn_cast<CmpInst>(I)) { 249 // Sort the operand value numbers so x<y and y>x get the same value number. 250 CmpInst::Predicate Predicate = C->getPredicate(); 251 if (e.varargs[0] > e.varargs[1]) { 252 std::swap(e.varargs[0], e.varargs[1]); 253 Predicate = CmpInst::getSwappedPredicate(Predicate); 254 } 255 e.opcode = (C->getOpcode() << 8) | Predicate; 256 } else if (InsertValueInst *E = dyn_cast<InsertValueInst>(I)) { 257 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 258 II != IE; ++II) 259 e.varargs.push_back(*II); 260 } 261 262 return e; 263 } 264 265 GVN::Expression GVN::ValueTable::createCmpExpr(unsigned Opcode, 266 CmpInst::Predicate Predicate, 267 Value *LHS, Value *RHS) { 268 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 269 "Not a comparison!"); 270 Expression e; 271 e.type = CmpInst::makeCmpResultType(LHS->getType()); 272 e.varargs.push_back(lookupOrAdd(LHS)); 273 e.varargs.push_back(lookupOrAdd(RHS)); 274 275 // Sort the operand value numbers so x<y and y>x get the same value number. 276 if (e.varargs[0] > e.varargs[1]) { 277 std::swap(e.varargs[0], e.varargs[1]); 278 Predicate = CmpInst::getSwappedPredicate(Predicate); 279 } 280 e.opcode = (Opcode << 8) | Predicate; 281 return e; 282 } 283 284 GVN::Expression GVN::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) { 285 assert(EI && "Not an ExtractValueInst?"); 286 Expression e; 287 e.type = EI->getType(); 288 e.opcode = 0; 289 290 IntrinsicInst *I = dyn_cast<IntrinsicInst>(EI->getAggregateOperand()); 291 if (I != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0 ) { 292 // EI might be an extract from one of our recognised intrinsics. If it 293 // is we'll synthesize a semantically equivalent expression instead on 294 // an extract value expression. 295 switch (I->getIntrinsicID()) { 296 case Intrinsic::sadd_with_overflow: 297 case Intrinsic::uadd_with_overflow: 298 e.opcode = Instruction::Add; 299 break; 300 case Intrinsic::ssub_with_overflow: 301 case Intrinsic::usub_with_overflow: 302 e.opcode = Instruction::Sub; 303 break; 304 case Intrinsic::smul_with_overflow: 305 case Intrinsic::umul_with_overflow: 306 e.opcode = Instruction::Mul; 307 break; 308 default: 309 break; 310 } 311 312 if (e.opcode != 0) { 313 // Intrinsic recognized. Grab its args to finish building the expression. 314 assert(I->getNumArgOperands() == 2 && 315 "Expect two args for recognised intrinsics."); 316 e.varargs.push_back(lookupOrAdd(I->getArgOperand(0))); 317 e.varargs.push_back(lookupOrAdd(I->getArgOperand(1))); 318 return e; 319 } 320 } 321 322 // Not a recognised intrinsic. Fall back to producing an extract value 323 // expression. 324 e.opcode = EI->getOpcode(); 325 for (Instruction::op_iterator OI = EI->op_begin(), OE = EI->op_end(); 326 OI != OE; ++OI) 327 e.varargs.push_back(lookupOrAdd(*OI)); 328 329 for (ExtractValueInst::idx_iterator II = EI->idx_begin(), IE = EI->idx_end(); 330 II != IE; ++II) 331 e.varargs.push_back(*II); 332 333 return e; 334 } 335 336 //===----------------------------------------------------------------------===// 337 // ValueTable External Functions 338 //===----------------------------------------------------------------------===// 339 340 GVN::ValueTable::ValueTable() : nextValueNumber(1) {} 341 GVN::ValueTable::ValueTable(const ValueTable &) = default; 342 GVN::ValueTable::ValueTable(ValueTable &&) = default; 343 GVN::ValueTable::~ValueTable() = default; 344 345 /// add - Insert a value into the table with a specified value number. 346 void GVN::ValueTable::add(Value *V, uint32_t num) { 347 valueNumbering.insert(std::make_pair(V, num)); 348 } 349 350 uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) { 351 if (AA->doesNotAccessMemory(C)) { 352 Expression exp = createExpr(C); 353 uint32_t &e = expressionNumbering[exp]; 354 if (!e) e = nextValueNumber++; 355 valueNumbering[C] = e; 356 return e; 357 } else if (AA->onlyReadsMemory(C)) { 358 Expression exp = createExpr(C); 359 uint32_t &e = expressionNumbering[exp]; 360 if (!e) { 361 e = nextValueNumber++; 362 valueNumbering[C] = e; 363 return e; 364 } 365 if (!MD) { 366 e = nextValueNumber++; 367 valueNumbering[C] = e; 368 return e; 369 } 370 371 MemDepResult local_dep = MD->getDependency(C); 372 373 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 374 valueNumbering[C] = nextValueNumber; 375 return nextValueNumber++; 376 } 377 378 if (local_dep.isDef()) { 379 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 380 381 if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) { 382 valueNumbering[C] = nextValueNumber; 383 return nextValueNumber++; 384 } 385 386 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 387 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i)); 388 uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i)); 389 if (c_vn != cd_vn) { 390 valueNumbering[C] = nextValueNumber; 391 return nextValueNumber++; 392 } 393 } 394 395 uint32_t v = lookupOrAdd(local_cdep); 396 valueNumbering[C] = v; 397 return v; 398 } 399 400 // Non-local case. 401 const MemoryDependenceResults::NonLocalDepInfo &deps = 402 MD->getNonLocalCallDependency(CallSite(C)); 403 // FIXME: Move the checking logic to MemDep! 404 CallInst* cdep = nullptr; 405 406 // Check to see if we have a single dominating call instruction that is 407 // identical to C. 408 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 409 const NonLocalDepEntry *I = &deps[i]; 410 if (I->getResult().isNonLocal()) 411 continue; 412 413 // We don't handle non-definitions. If we already have a call, reject 414 // instruction dependencies. 415 if (!I->getResult().isDef() || cdep != nullptr) { 416 cdep = nullptr; 417 break; 418 } 419 420 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 421 // FIXME: All duplicated with non-local case. 422 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 423 cdep = NonLocalDepCall; 424 continue; 425 } 426 427 cdep = nullptr; 428 break; 429 } 430 431 if (!cdep) { 432 valueNumbering[C] = nextValueNumber; 433 return nextValueNumber++; 434 } 435 436 if (cdep->getNumArgOperands() != C->getNumArgOperands()) { 437 valueNumbering[C] = nextValueNumber; 438 return nextValueNumber++; 439 } 440 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 441 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i)); 442 uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i)); 443 if (c_vn != cd_vn) { 444 valueNumbering[C] = nextValueNumber; 445 return nextValueNumber++; 446 } 447 } 448 449 uint32_t v = lookupOrAdd(cdep); 450 valueNumbering[C] = v; 451 return v; 452 453 } else { 454 valueNumbering[C] = nextValueNumber; 455 return nextValueNumber++; 456 } 457 } 458 459 /// Returns true if a value number exists for the specified value. 460 bool GVN::ValueTable::exists(Value *V) const { return valueNumbering.count(V) != 0; } 461 462 /// lookup_or_add - Returns the value number for the specified value, assigning 463 /// it a new number if it did not have one before. 464 uint32_t GVN::ValueTable::lookupOrAdd(Value *V) { 465 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 466 if (VI != valueNumbering.end()) 467 return VI->second; 468 469 if (!isa<Instruction>(V)) { 470 valueNumbering[V] = nextValueNumber; 471 return nextValueNumber++; 472 } 473 474 Instruction* I = cast<Instruction>(V); 475 Expression exp; 476 switch (I->getOpcode()) { 477 case Instruction::Call: 478 return lookupOrAddCall(cast<CallInst>(I)); 479 case Instruction::Add: 480 case Instruction::FAdd: 481 case Instruction::Sub: 482 case Instruction::FSub: 483 case Instruction::Mul: 484 case Instruction::FMul: 485 case Instruction::UDiv: 486 case Instruction::SDiv: 487 case Instruction::FDiv: 488 case Instruction::URem: 489 case Instruction::SRem: 490 case Instruction::FRem: 491 case Instruction::Shl: 492 case Instruction::LShr: 493 case Instruction::AShr: 494 case Instruction::And: 495 case Instruction::Or: 496 case Instruction::Xor: 497 case Instruction::ICmp: 498 case Instruction::FCmp: 499 case Instruction::Trunc: 500 case Instruction::ZExt: 501 case Instruction::SExt: 502 case Instruction::FPToUI: 503 case Instruction::FPToSI: 504 case Instruction::UIToFP: 505 case Instruction::SIToFP: 506 case Instruction::FPTrunc: 507 case Instruction::FPExt: 508 case Instruction::PtrToInt: 509 case Instruction::IntToPtr: 510 case Instruction::BitCast: 511 case Instruction::Select: 512 case Instruction::ExtractElement: 513 case Instruction::InsertElement: 514 case Instruction::ShuffleVector: 515 case Instruction::InsertValue: 516 case Instruction::GetElementPtr: 517 exp = createExpr(I); 518 break; 519 case Instruction::ExtractValue: 520 exp = createExtractvalueExpr(cast<ExtractValueInst>(I)); 521 break; 522 default: 523 valueNumbering[V] = nextValueNumber; 524 return nextValueNumber++; 525 } 526 527 uint32_t& e = expressionNumbering[exp]; 528 if (!e) e = nextValueNumber++; 529 valueNumbering[V] = e; 530 return e; 531 } 532 533 /// Returns the value number of the specified value. Fails if 534 /// the value has not yet been numbered. 535 uint32_t GVN::ValueTable::lookup(Value *V) const { 536 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 537 assert(VI != valueNumbering.end() && "Value not numbered?"); 538 return VI->second; 539 } 540 541 /// Returns the value number of the given comparison, 542 /// assigning it a new number if it did not have one before. Useful when 543 /// we deduced the result of a comparison, but don't immediately have an 544 /// instruction realizing that comparison to hand. 545 uint32_t GVN::ValueTable::lookupOrAddCmp(unsigned Opcode, 546 CmpInst::Predicate Predicate, 547 Value *LHS, Value *RHS) { 548 Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS); 549 uint32_t& e = expressionNumbering[exp]; 550 if (!e) e = nextValueNumber++; 551 return e; 552 } 553 554 /// Remove all entries from the ValueTable. 555 void GVN::ValueTable::clear() { 556 valueNumbering.clear(); 557 expressionNumbering.clear(); 558 nextValueNumber = 1; 559 } 560 561 /// Remove a value from the value numbering. 562 void GVN::ValueTable::erase(Value *V) { 563 valueNumbering.erase(V); 564 } 565 566 /// verifyRemoved - Verify that the value is removed from all internal data 567 /// structures. 568 void GVN::ValueTable::verifyRemoved(const Value *V) const { 569 for (DenseMap<Value*, uint32_t>::const_iterator 570 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 571 assert(I->first != V && "Inst still occurs in value numbering map!"); 572 } 573 } 574 575 //===----------------------------------------------------------------------===// 576 // GVN Pass 577 //===----------------------------------------------------------------------===// 578 579 PreservedAnalyses GVN::run(Function &F, FunctionAnalysisManager &AM) { 580 // FIXME: The order of evaluation of these 'getResult' calls is very 581 // significant! Re-ordering these variables will cause GVN when run alone to 582 // be less effective! We should fix memdep and basic-aa to not exhibit this 583 // behavior, but until then don't change the order here. 584 auto &AC = AM.getResult<AssumptionAnalysis>(F); 585 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 586 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 587 auto &AA = AM.getResult<AAManager>(F); 588 auto &MemDep = AM.getResult<MemoryDependenceAnalysis>(F); 589 bool Changed = runImpl(F, AC, DT, TLI, AA, &MemDep); 590 if (!Changed) 591 return PreservedAnalyses::all(); 592 PreservedAnalyses PA; 593 PA.preserve<DominatorTreeAnalysis>(); 594 PA.preserve<GlobalsAA>(); 595 return PA; 596 } 597 598 LLVM_DUMP_METHOD 599 void GVN::dump(DenseMap<uint32_t, Value*>& d) { 600 errs() << "{\n"; 601 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 602 E = d.end(); I != E; ++I) { 603 errs() << I->first << "\n"; 604 I->second->dump(); 605 } 606 errs() << "}\n"; 607 } 608 609 /// Return true if we can prove that the value 610 /// we're analyzing is fully available in the specified block. As we go, keep 611 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This 612 /// map is actually a tri-state map with the following values: 613 /// 0) we know the block *is not* fully available. 614 /// 1) we know the block *is* fully available. 615 /// 2) we do not know whether the block is fully available or not, but we are 616 /// currently speculating that it will be. 617 /// 3) we are speculating for this block and have used that to speculate for 618 /// other blocks. 619 static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 620 DenseMap<BasicBlock*, char> &FullyAvailableBlocks, 621 uint32_t RecurseDepth) { 622 if (RecurseDepth > MaxRecurseDepth) 623 return false; 624 625 // Optimistically assume that the block is fully available and check to see 626 // if we already know about this block in one lookup. 627 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 628 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 629 630 // If the entry already existed for this block, return the precomputed value. 631 if (!IV.second) { 632 // If this is a speculative "available" value, mark it as being used for 633 // speculation of other blocks. 634 if (IV.first->second == 2) 635 IV.first->second = 3; 636 return IV.first->second != 0; 637 } 638 639 // Otherwise, see if it is fully available in all predecessors. 640 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 641 642 // If this block has no predecessors, it isn't live-in here. 643 if (PI == PE) 644 goto SpeculationFailure; 645 646 for (; PI != PE; ++PI) 647 // If the value isn't fully available in one of our predecessors, then it 648 // isn't fully available in this block either. Undo our previous 649 // optimistic assumption and bail out. 650 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks,RecurseDepth+1)) 651 goto SpeculationFailure; 652 653 return true; 654 655 // If we get here, we found out that this is not, after 656 // all, a fully-available block. We have a problem if we speculated on this and 657 // used the speculation to mark other blocks as available. 658 SpeculationFailure: 659 char &BBVal = FullyAvailableBlocks[BB]; 660 661 // If we didn't speculate on this, just return with it set to false. 662 if (BBVal == 2) { 663 BBVal = 0; 664 return false; 665 } 666 667 // If we did speculate on this value, we could have blocks set to 1 that are 668 // incorrect. Walk the (transitive) successors of this block and mark them as 669 // 0 if set to one. 670 SmallVector<BasicBlock*, 32> BBWorklist; 671 BBWorklist.push_back(BB); 672 673 do { 674 BasicBlock *Entry = BBWorklist.pop_back_val(); 675 // Note that this sets blocks to 0 (unavailable) if they happen to not 676 // already be in FullyAvailableBlocks. This is safe. 677 char &EntryVal = FullyAvailableBlocks[Entry]; 678 if (EntryVal == 0) continue; // Already unavailable. 679 680 // Mark as unavailable. 681 EntryVal = 0; 682 683 BBWorklist.append(succ_begin(Entry), succ_end(Entry)); 684 } while (!BBWorklist.empty()); 685 686 return false; 687 } 688 689 690 /// Return true if CoerceAvailableValueToLoadType will succeed. 691 static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, 692 Type *LoadTy, 693 const DataLayout &DL) { 694 // If the loaded or stored value is an first class array or struct, don't try 695 // to transform them. We need to be able to bitcast to integer. 696 if (LoadTy->isStructTy() || LoadTy->isArrayTy() || 697 StoredVal->getType()->isStructTy() || 698 StoredVal->getType()->isArrayTy()) 699 return false; 700 701 // The store has to be at least as big as the load. 702 if (DL.getTypeSizeInBits(StoredVal->getType()) < 703 DL.getTypeSizeInBits(LoadTy)) 704 return false; 705 706 return true; 707 } 708 709 /// If we saw a store of a value to memory, and 710 /// then a load from a must-aliased pointer of a different type, try to coerce 711 /// the stored value. LoadedTy is the type of the load we want to replace. 712 /// IRB is IRBuilder used to insert new instructions. 713 /// 714 /// If we can't do it, return null. 715 static Value *CoerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy, 716 IRBuilder<> &IRB, 717 const DataLayout &DL) { 718 assert(CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, DL) && 719 "precondition violation - materialization can't fail"); 720 721 if (auto *C = dyn_cast<Constant>(StoredVal)) 722 if (auto *FoldedStoredVal = ConstantFoldConstant(C, DL)) 723 StoredVal = FoldedStoredVal; 724 725 // If this is already the right type, just return it. 726 Type *StoredValTy = StoredVal->getType(); 727 728 uint64_t StoredValSize = DL.getTypeSizeInBits(StoredValTy); 729 uint64_t LoadedValSize = DL.getTypeSizeInBits(LoadedTy); 730 731 // If the store and reload are the same size, we can always reuse it. 732 if (StoredValSize == LoadedValSize) { 733 // Pointer to Pointer -> use bitcast. 734 if (StoredValTy->getScalarType()->isPointerTy() && 735 LoadedTy->getScalarType()->isPointerTy()) { 736 StoredVal = IRB.CreateBitCast(StoredVal, LoadedTy); 737 } else { 738 // Convert source pointers to integers, which can be bitcast. 739 if (StoredValTy->getScalarType()->isPointerTy()) { 740 StoredValTy = DL.getIntPtrType(StoredValTy); 741 StoredVal = IRB.CreatePtrToInt(StoredVal, StoredValTy); 742 } 743 744 Type *TypeToCastTo = LoadedTy; 745 if (TypeToCastTo->getScalarType()->isPointerTy()) 746 TypeToCastTo = DL.getIntPtrType(TypeToCastTo); 747 748 if (StoredValTy != TypeToCastTo) 749 StoredVal = IRB.CreateBitCast(StoredVal, TypeToCastTo); 750 751 // Cast to pointer if the load needs a pointer type. 752 if (LoadedTy->getScalarType()->isPointerTy()) 753 StoredVal = IRB.CreateIntToPtr(StoredVal, LoadedTy); 754 } 755 756 if (auto *C = dyn_cast<ConstantExpr>(StoredVal)) 757 if (auto *FoldedStoredVal = ConstantFoldConstant(C, DL)) 758 StoredVal = FoldedStoredVal; 759 760 return StoredVal; 761 } 762 763 // If the loaded value is smaller than the available value, then we can 764 // extract out a piece from it. If the available value is too small, then we 765 // can't do anything. 766 assert(StoredValSize >= LoadedValSize && 767 "CanCoerceMustAliasedValueToLoad fail"); 768 769 // Convert source pointers to integers, which can be manipulated. 770 if (StoredValTy->getScalarType()->isPointerTy()) { 771 StoredValTy = DL.getIntPtrType(StoredValTy); 772 StoredVal = IRB.CreatePtrToInt(StoredVal, StoredValTy); 773 } 774 775 // Convert vectors and fp to integer, which can be manipulated. 776 if (!StoredValTy->isIntegerTy()) { 777 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoredValSize); 778 StoredVal = IRB.CreateBitCast(StoredVal, StoredValTy); 779 } 780 781 // If this is a big-endian system, we need to shift the value down to the low 782 // bits so that a truncate will work. 783 if (DL.isBigEndian()) { 784 uint64_t ShiftAmt = DL.getTypeStoreSizeInBits(StoredValTy) - 785 DL.getTypeStoreSizeInBits(LoadedTy); 786 StoredVal = IRB.CreateLShr(StoredVal, ShiftAmt, "tmp"); 787 } 788 789 // Truncate the integer to the right size now. 790 Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadedValSize); 791 StoredVal = IRB.CreateTrunc(StoredVal, NewIntTy, "trunc"); 792 793 if (LoadedTy != NewIntTy) { 794 // If the result is a pointer, inttoptr. 795 if (LoadedTy->getScalarType()->isPointerTy()) 796 StoredVal = IRB.CreateIntToPtr(StoredVal, LoadedTy, "inttoptr"); 797 else 798 // Otherwise, bitcast. 799 StoredVal = IRB.CreateBitCast(StoredVal, LoadedTy, "bitcast"); 800 } 801 802 if (auto *C = dyn_cast<Constant>(StoredVal)) 803 if (auto *FoldedStoredVal = ConstantFoldConstant(C, DL)) 804 StoredVal = FoldedStoredVal; 805 806 return StoredVal; 807 } 808 809 /// This function is called when we have a 810 /// memdep query of a load that ends up being a clobbering memory write (store, 811 /// memset, memcpy, memmove). This means that the write *may* provide bits used 812 /// by the load but we can't be sure because the pointers don't mustalias. 813 /// 814 /// Check this case to see if there is anything more we can do before we give 815 /// up. This returns -1 if we have to give up, or a byte number in the stored 816 /// value of the piece that feeds the load. 817 static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, 818 Value *WritePtr, 819 uint64_t WriteSizeInBits, 820 const DataLayout &DL) { 821 // If the loaded or stored value is a first class array or struct, don't try 822 // to transform them. We need to be able to bitcast to integer. 823 if (LoadTy->isStructTy() || LoadTy->isArrayTy()) 824 return -1; 825 826 int64_t StoreOffset = 0, LoadOffset = 0; 827 Value *StoreBase = 828 GetPointerBaseWithConstantOffset(WritePtr, StoreOffset, DL); 829 Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, DL); 830 if (StoreBase != LoadBase) 831 return -1; 832 833 // If the load and store are to the exact same address, they should have been 834 // a must alias. AA must have gotten confused. 835 // FIXME: Study to see if/when this happens. One case is forwarding a memset 836 // to a load from the base of the memset. 837 838 // If the load and store don't overlap at all, the store doesn't provide 839 // anything to the load. In this case, they really don't alias at all, AA 840 // must have gotten confused. 841 uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy); 842 843 if ((WriteSizeInBits & 7) | (LoadSize & 7)) 844 return -1; 845 uint64_t StoreSize = WriteSizeInBits / 8; // Convert to bytes. 846 LoadSize /= 8; 847 848 849 bool isAAFailure = false; 850 if (StoreOffset < LoadOffset) 851 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; 852 else 853 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; 854 855 if (isAAFailure) 856 return -1; 857 858 // If the Load isn't completely contained within the stored bits, we don't 859 // have all the bits to feed it. We could do something crazy in the future 860 // (issue a smaller load then merge the bits in) but this seems unlikely to be 861 // valuable. 862 if (StoreOffset > LoadOffset || 863 StoreOffset+StoreSize < LoadOffset+LoadSize) 864 return -1; 865 866 // Okay, we can do this transformation. Return the number of bytes into the 867 // store that the load is. 868 return LoadOffset-StoreOffset; 869 } 870 871 /// This function is called when we have a 872 /// memdep query of a load that ends up being a clobbering store. 873 static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, 874 StoreInst *DepSI) { 875 // Cannot handle reading from store of first-class aggregate yet. 876 if (DepSI->getValueOperand()->getType()->isStructTy() || 877 DepSI->getValueOperand()->getType()->isArrayTy()) 878 return -1; 879 880 const DataLayout &DL = DepSI->getModule()->getDataLayout(); 881 Value *StorePtr = DepSI->getPointerOperand(); 882 uint64_t StoreSize =DL.getTypeSizeInBits(DepSI->getValueOperand()->getType()); 883 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 884 StorePtr, StoreSize, DL); 885 } 886 887 /// This function is called when we have a 888 /// memdep query of a load that ends up being clobbered by another load. See if 889 /// the other load can feed into the second load. 890 static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, 891 LoadInst *DepLI, const DataLayout &DL){ 892 // Cannot handle reading from store of first-class aggregate yet. 893 if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy()) 894 return -1; 895 896 Value *DepPtr = DepLI->getPointerOperand(); 897 uint64_t DepSize = DL.getTypeSizeInBits(DepLI->getType()); 898 int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, DL); 899 if (R != -1) return R; 900 901 // If we have a load/load clobber an DepLI can be widened to cover this load, 902 // then we should widen it! 903 int64_t LoadOffs = 0; 904 const Value *LoadBase = 905 GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, DL); 906 unsigned LoadSize = DL.getTypeStoreSize(LoadTy); 907 908 unsigned Size = MemoryDependenceResults::getLoadLoadClobberFullWidthSize( 909 LoadBase, LoadOffs, LoadSize, DepLI); 910 if (Size == 0) return -1; 911 912 // Check non-obvious conditions enforced by MDA which we rely on for being 913 // able to materialize this potentially available value 914 assert(DepLI->isSimple() && "Cannot widen volatile/atomic load!"); 915 assert(DepLI->getType()->isIntegerTy() && "Can't widen non-integer load"); 916 917 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, DL); 918 } 919 920 921 922 static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, 923 MemIntrinsic *MI, 924 const DataLayout &DL) { 925 // If the mem operation is a non-constant size, we can't handle it. 926 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength()); 927 if (!SizeCst) return -1; 928 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8; 929 930 // If this is memset, we just need to see if the offset is valid in the size 931 // of the memset.. 932 if (MI->getIntrinsicID() == Intrinsic::memset) 933 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), 934 MemSizeInBits, DL); 935 936 // If we have a memcpy/memmove, the only case we can handle is if this is a 937 // copy from constant memory. In that case, we can read directly from the 938 // constant memory. 939 MemTransferInst *MTI = cast<MemTransferInst>(MI); 940 941 Constant *Src = dyn_cast<Constant>(MTI->getSource()); 942 if (!Src) return -1; 943 944 GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, DL)); 945 if (!GV || !GV->isConstant()) return -1; 946 947 // See if the access is within the bounds of the transfer. 948 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 949 MI->getDest(), MemSizeInBits, DL); 950 if (Offset == -1) 951 return Offset; 952 953 unsigned AS = Src->getType()->getPointerAddressSpace(); 954 // Otherwise, see if we can constant fold a load from the constant with the 955 // offset applied as appropriate. 956 Src = ConstantExpr::getBitCast(Src, 957 Type::getInt8PtrTy(Src->getContext(), AS)); 958 Constant *OffsetCst = 959 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 960 Src = ConstantExpr::getGetElementPtr(Type::getInt8Ty(Src->getContext()), Src, 961 OffsetCst); 962 Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS)); 963 if (ConstantFoldLoadFromConstPtr(Src, LoadTy, DL)) 964 return Offset; 965 return -1; 966 } 967 968 969 /// This function is called when we have a 970 /// memdep query of a load that ends up being a clobbering store. This means 971 /// that the store provides bits used by the load but we the pointers don't 972 /// mustalias. Check this case to see if there is anything more we can do 973 /// before we give up. 974 static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, 975 Type *LoadTy, 976 Instruction *InsertPt, const DataLayout &DL){ 977 LLVMContext &Ctx = SrcVal->getType()->getContext(); 978 979 uint64_t StoreSize = (DL.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; 980 uint64_t LoadSize = (DL.getTypeSizeInBits(LoadTy) + 7) / 8; 981 982 IRBuilder<> Builder(InsertPt); 983 984 // Compute which bits of the stored value are being used by the load. Convert 985 // to an integer type to start with. 986 if (SrcVal->getType()->getScalarType()->isPointerTy()) 987 SrcVal = Builder.CreatePtrToInt(SrcVal, 988 DL.getIntPtrType(SrcVal->getType())); 989 if (!SrcVal->getType()->isIntegerTy()) 990 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8)); 991 992 // Shift the bits to the least significant depending on endianness. 993 unsigned ShiftAmt; 994 if (DL.isLittleEndian()) 995 ShiftAmt = Offset*8; 996 else 997 ShiftAmt = (StoreSize-LoadSize-Offset)*8; 998 999 if (ShiftAmt) 1000 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt); 1001 1002 if (LoadSize != StoreSize) 1003 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8)); 1004 1005 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, Builder, DL); 1006 } 1007 1008 /// This function is called when we have a 1009 /// memdep query of a load that ends up being a clobbering load. This means 1010 /// that the load *may* provide bits used by the load but we can't be sure 1011 /// because the pointers don't mustalias. Check this case to see if there is 1012 /// anything more we can do before we give up. 1013 static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, 1014 Type *LoadTy, Instruction *InsertPt, 1015 GVN &gvn) { 1016 const DataLayout &DL = SrcVal->getModule()->getDataLayout(); 1017 // If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to 1018 // widen SrcVal out to a larger load. 1019 unsigned SrcValStoreSize = DL.getTypeStoreSize(SrcVal->getType()); 1020 unsigned LoadSize = DL.getTypeStoreSize(LoadTy); 1021 if (Offset+LoadSize > SrcValStoreSize) { 1022 assert(SrcVal->isSimple() && "Cannot widen volatile/atomic load!"); 1023 assert(SrcVal->getType()->isIntegerTy() && "Can't widen non-integer load"); 1024 // If we have a load/load clobber an DepLI can be widened to cover this 1025 // load, then we should widen it to the next power of 2 size big enough! 1026 unsigned NewLoadSize = Offset+LoadSize; 1027 if (!isPowerOf2_32(NewLoadSize)) 1028 NewLoadSize = NextPowerOf2(NewLoadSize); 1029 1030 Value *PtrVal = SrcVal->getPointerOperand(); 1031 1032 // Insert the new load after the old load. This ensures that subsequent 1033 // memdep queries will find the new load. We can't easily remove the old 1034 // load completely because it is already in the value numbering table. 1035 IRBuilder<> Builder(SrcVal->getParent(), ++BasicBlock::iterator(SrcVal)); 1036 Type *DestPTy = 1037 IntegerType::get(LoadTy->getContext(), NewLoadSize*8); 1038 DestPTy = PointerType::get(DestPTy, 1039 PtrVal->getType()->getPointerAddressSpace()); 1040 Builder.SetCurrentDebugLocation(SrcVal->getDebugLoc()); 1041 PtrVal = Builder.CreateBitCast(PtrVal, DestPTy); 1042 LoadInst *NewLoad = Builder.CreateLoad(PtrVal); 1043 NewLoad->takeName(SrcVal); 1044 NewLoad->setAlignment(SrcVal->getAlignment()); 1045 1046 DEBUG(dbgs() << "GVN WIDENED LOAD: " << *SrcVal << "\n"); 1047 DEBUG(dbgs() << "TO: " << *NewLoad << "\n"); 1048 1049 // Replace uses of the original load with the wider load. On a big endian 1050 // system, we need to shift down to get the relevant bits. 1051 Value *RV = NewLoad; 1052 if (DL.isBigEndian()) 1053 RV = Builder.CreateLShr(RV, (NewLoadSize - SrcValStoreSize) * 8); 1054 RV = Builder.CreateTrunc(RV, SrcVal->getType()); 1055 SrcVal->replaceAllUsesWith(RV); 1056 1057 // We would like to use gvn.markInstructionForDeletion here, but we can't 1058 // because the load is already memoized into the leader map table that GVN 1059 // tracks. It is potentially possible to remove the load from the table, 1060 // but then there all of the operations based on it would need to be 1061 // rehashed. Just leave the dead load around. 1062 gvn.getMemDep().removeInstruction(SrcVal); 1063 SrcVal = NewLoad; 1064 } 1065 1066 return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, DL); 1067 } 1068 1069 1070 /// This function is called when we have a 1071 /// memdep query of a load that ends up being a clobbering mem intrinsic. 1072 static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, 1073 Type *LoadTy, Instruction *InsertPt, 1074 const DataLayout &DL){ 1075 LLVMContext &Ctx = LoadTy->getContext(); 1076 uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy)/8; 1077 1078 IRBuilder<> Builder(InsertPt); 1079 1080 // We know that this method is only called when the mem transfer fully 1081 // provides the bits for the load. 1082 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) { 1083 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and 1084 // independently of what the offset is. 1085 Value *Val = MSI->getValue(); 1086 if (LoadSize != 1) 1087 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8)); 1088 1089 Value *OneElt = Val; 1090 1091 // Splat the value out to the right number of bits. 1092 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) { 1093 // If we can double the number of bytes set, do it. 1094 if (NumBytesSet*2 <= LoadSize) { 1095 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8); 1096 Val = Builder.CreateOr(Val, ShVal); 1097 NumBytesSet <<= 1; 1098 continue; 1099 } 1100 1101 // Otherwise insert one byte at a time. 1102 Value *ShVal = Builder.CreateShl(Val, 1*8); 1103 Val = Builder.CreateOr(OneElt, ShVal); 1104 ++NumBytesSet; 1105 } 1106 1107 return CoerceAvailableValueToLoadType(Val, LoadTy, Builder, DL); 1108 } 1109 1110 // Otherwise, this is a memcpy/memmove from a constant global. 1111 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); 1112 Constant *Src = cast<Constant>(MTI->getSource()); 1113 unsigned AS = Src->getType()->getPointerAddressSpace(); 1114 1115 // Otherwise, see if we can constant fold a load from the constant with the 1116 // offset applied as appropriate. 1117 Src = ConstantExpr::getBitCast(Src, 1118 Type::getInt8PtrTy(Src->getContext(), AS)); 1119 Constant *OffsetCst = 1120 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1121 Src = ConstantExpr::getGetElementPtr(Type::getInt8Ty(Src->getContext()), Src, 1122 OffsetCst); 1123 Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS)); 1124 return ConstantFoldLoadFromConstPtr(Src, LoadTy, DL); 1125 } 1126 1127 1128 /// Given a set of loads specified by ValuesPerBlock, 1129 /// construct SSA form, allowing us to eliminate LI. This returns the value 1130 /// that should be used at LI's definition site. 1131 static Value *ConstructSSAForLoadSet(LoadInst *LI, 1132 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 1133 GVN &gvn) { 1134 // Check for the fully redundant, dominating load case. In this case, we can 1135 // just use the dominating value directly. 1136 if (ValuesPerBlock.size() == 1 && 1137 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB, 1138 LI->getParent())) { 1139 assert(!ValuesPerBlock[0].AV.isUndefValue() && 1140 "Dead BB dominate this block"); 1141 return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn); 1142 } 1143 1144 // Otherwise, we have to construct SSA form. 1145 SmallVector<PHINode*, 8> NewPHIs; 1146 SSAUpdater SSAUpdate(&NewPHIs); 1147 SSAUpdate.Initialize(LI->getType(), LI->getName()); 1148 1149 for (const AvailableValueInBlock &AV : ValuesPerBlock) { 1150 BasicBlock *BB = AV.BB; 1151 1152 if (SSAUpdate.HasValueForBlock(BB)) 1153 continue; 1154 1155 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LI, gvn)); 1156 } 1157 1158 // Perform PHI construction. 1159 return SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 1160 } 1161 1162 Value *AvailableValue::MaterializeAdjustedValue(LoadInst *LI, 1163 Instruction *InsertPt, 1164 GVN &gvn) const { 1165 Value *Res; 1166 Type *LoadTy = LI->getType(); 1167 const DataLayout &DL = LI->getModule()->getDataLayout(); 1168 if (isSimpleValue()) { 1169 Res = getSimpleValue(); 1170 if (Res->getType() != LoadTy) { 1171 Res = GetStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL); 1172 1173 DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " 1174 << *getSimpleValue() << '\n' 1175 << *Res << '\n' << "\n\n\n"); 1176 } 1177 } else if (isCoercedLoadValue()) { 1178 LoadInst *Load = getCoercedLoadValue(); 1179 if (Load->getType() == LoadTy && Offset == 0) { 1180 Res = Load; 1181 } else { 1182 Res = GetLoadValueForLoad(Load, Offset, LoadTy, InsertPt, gvn); 1183 1184 DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset << " " 1185 << *getCoercedLoadValue() << '\n' 1186 << *Res << '\n' << "\n\n\n"); 1187 } 1188 } else if (isMemIntrinValue()) { 1189 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy, 1190 InsertPt, DL); 1191 DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 1192 << " " << *getMemIntrinValue() << '\n' 1193 << *Res << '\n' << "\n\n\n"); 1194 } else { 1195 assert(isUndefValue() && "Should be UndefVal"); 1196 DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";); 1197 return UndefValue::get(LoadTy); 1198 } 1199 assert(Res && "failed to materialize?"); 1200 return Res; 1201 } 1202 1203 static bool isLifetimeStart(const Instruction *Inst) { 1204 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 1205 return II->getIntrinsicID() == Intrinsic::lifetime_start; 1206 return false; 1207 } 1208 1209 bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo, 1210 Value *Address, AvailableValue &Res) { 1211 1212 assert((DepInfo.isDef() || DepInfo.isClobber()) && 1213 "expected a local dependence"); 1214 assert(LI->isUnordered() && "rules below are incorrect for ordered access"); 1215 1216 const DataLayout &DL = LI->getModule()->getDataLayout(); 1217 1218 if (DepInfo.isClobber()) { 1219 // If the dependence is to a store that writes to a superset of the bits 1220 // read by the load, we can extract the bits we need for the load from the 1221 // stored value. 1222 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 1223 // Can't forward from non-atomic to atomic without violating memory model. 1224 if (Address && LI->isAtomic() <= DepSI->isAtomic()) { 1225 int Offset = 1226 AnalyzeLoadFromClobberingStore(LI->getType(), Address, DepSI); 1227 if (Offset != -1) { 1228 Res = AvailableValue::get(DepSI->getValueOperand(), Offset); 1229 return true; 1230 } 1231 } 1232 } 1233 1234 // Check to see if we have something like this: 1235 // load i32* P 1236 // load i8* (P+1) 1237 // if we have this, replace the later with an extraction from the former. 1238 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) { 1239 // If this is a clobber and L is the first instruction in its block, then 1240 // we have the first instruction in the entry block. 1241 // Can't forward from non-atomic to atomic without violating memory model. 1242 if (DepLI != LI && Address && LI->isAtomic() <= DepLI->isAtomic()) { 1243 int Offset = 1244 AnalyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL); 1245 1246 if (Offset != -1) { 1247 Res = AvailableValue::getLoad(DepLI, Offset); 1248 return true; 1249 } 1250 } 1251 } 1252 1253 // If the clobbering value is a memset/memcpy/memmove, see if we can 1254 // forward a value on from it. 1255 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { 1256 if (Address && !LI->isAtomic()) { 1257 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address, 1258 DepMI, DL); 1259 if (Offset != -1) { 1260 Res = AvailableValue::getMI(DepMI, Offset); 1261 return true; 1262 } 1263 } 1264 } 1265 // Nothing known about this clobber, have to be conservative 1266 DEBUG( 1267 // fast print dep, using operator<< on instruction is too slow. 1268 dbgs() << "GVN: load "; 1269 LI->printAsOperand(dbgs()); 1270 Instruction *I = DepInfo.getInst(); 1271 dbgs() << " is clobbered by " << *I << '\n'; 1272 ); 1273 return false; 1274 } 1275 assert(DepInfo.isDef() && "follows from above"); 1276 1277 Instruction *DepInst = DepInfo.getInst(); 1278 1279 // Loading the allocation -> undef. 1280 if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) || 1281 // Loading immediately after lifetime begin -> undef. 1282 isLifetimeStart(DepInst)) { 1283 Res = AvailableValue::get(UndefValue::get(LI->getType())); 1284 return true; 1285 } 1286 1287 // Loading from calloc (which zero initializes memory) -> zero 1288 if (isCallocLikeFn(DepInst, TLI)) { 1289 Res = AvailableValue::get(Constant::getNullValue(LI->getType())); 1290 return true; 1291 } 1292 1293 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1294 // Reject loads and stores that are to the same address but are of 1295 // different types if we have to. If the stored value is larger or equal to 1296 // the loaded value, we can reuse it. 1297 if (S->getValueOperand()->getType() != LI->getType() && 1298 !CanCoerceMustAliasedValueToLoad(S->getValueOperand(), 1299 LI->getType(), DL)) 1300 return false; 1301 1302 // Can't forward from non-atomic to atomic without violating memory model. 1303 if (S->isAtomic() < LI->isAtomic()) 1304 return false; 1305 1306 Res = AvailableValue::get(S->getValueOperand()); 1307 return true; 1308 } 1309 1310 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1311 // If the types mismatch and we can't handle it, reject reuse of the load. 1312 // If the stored value is larger or equal to the loaded value, we can reuse 1313 // it. 1314 if (LD->getType() != LI->getType() && 1315 !CanCoerceMustAliasedValueToLoad(LD, LI->getType(), DL)) 1316 return false; 1317 1318 // Can't forward from non-atomic to atomic without violating memory model. 1319 if (LD->isAtomic() < LI->isAtomic()) 1320 return false; 1321 1322 Res = AvailableValue::getLoad(LD); 1323 return true; 1324 } 1325 1326 // Unknown def - must be conservative 1327 DEBUG( 1328 // fast print dep, using operator<< on instruction is too slow. 1329 dbgs() << "GVN: load "; 1330 LI->printAsOperand(dbgs()); 1331 dbgs() << " has unknown def " << *DepInst << '\n'; 1332 ); 1333 return false; 1334 } 1335 1336 void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, 1337 AvailValInBlkVect &ValuesPerBlock, 1338 UnavailBlkVect &UnavailableBlocks) { 1339 1340 // Filter out useless results (non-locals, etc). Keep track of the blocks 1341 // where we have a value available in repl, also keep track of whether we see 1342 // dependencies that produce an unknown value for the load (such as a call 1343 // that could potentially clobber the load). 1344 unsigned NumDeps = Deps.size(); 1345 for (unsigned i = 0, e = NumDeps; i != e; ++i) { 1346 BasicBlock *DepBB = Deps[i].getBB(); 1347 MemDepResult DepInfo = Deps[i].getResult(); 1348 1349 if (DeadBlocks.count(DepBB)) { 1350 // Dead dependent mem-op disguise as a load evaluating the same value 1351 // as the load in question. 1352 ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB)); 1353 continue; 1354 } 1355 1356 if (!DepInfo.isDef() && !DepInfo.isClobber()) { 1357 UnavailableBlocks.push_back(DepBB); 1358 continue; 1359 } 1360 1361 // The address being loaded in this non-local block may not be the same as 1362 // the pointer operand of the load if PHI translation occurs. Make sure 1363 // to consider the right address. 1364 Value *Address = Deps[i].getAddress(); 1365 1366 AvailableValue AV; 1367 if (AnalyzeLoadAvailability(LI, DepInfo, Address, AV)) { 1368 // subtlety: because we know this was a non-local dependency, we know 1369 // it's safe to materialize anywhere between the instruction within 1370 // DepInfo and the end of it's block. 1371 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1372 std::move(AV))); 1373 } else { 1374 UnavailableBlocks.push_back(DepBB); 1375 } 1376 } 1377 1378 assert(NumDeps == ValuesPerBlock.size() + UnavailableBlocks.size() && 1379 "post condition violation"); 1380 } 1381 1382 bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, 1383 UnavailBlkVect &UnavailableBlocks) { 1384 // Okay, we have *some* definitions of the value. This means that the value 1385 // is available in some of our (transitive) predecessors. Lets think about 1386 // doing PRE of this load. This will involve inserting a new load into the 1387 // predecessor when it's not available. We could do this in general, but 1388 // prefer to not increase code size. As such, we only do this when we know 1389 // that we only have to insert *one* load (which means we're basically moving 1390 // the load, not inserting a new one). 1391 1392 SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(), 1393 UnavailableBlocks.end()); 1394 1395 // Let's find the first basic block with more than one predecessor. Walk 1396 // backwards through predecessors if needed. 1397 BasicBlock *LoadBB = LI->getParent(); 1398 BasicBlock *TmpBB = LoadBB; 1399 1400 while (TmpBB->getSinglePredecessor()) { 1401 TmpBB = TmpBB->getSinglePredecessor(); 1402 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1403 return false; 1404 if (Blockers.count(TmpBB)) 1405 return false; 1406 1407 // If any of these blocks has more than one successor (i.e. if the edge we 1408 // just traversed was critical), then there are other paths through this 1409 // block along which the load may not be anticipated. Hoisting the load 1410 // above this block would be adding the load to execution paths along 1411 // which it was not previously executed. 1412 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1413 return false; 1414 } 1415 1416 assert(TmpBB); 1417 LoadBB = TmpBB; 1418 1419 // Check to see how many predecessors have the loaded value fully 1420 // available. 1421 MapVector<BasicBlock *, Value *> PredLoads; 1422 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1423 for (const AvailableValueInBlock &AV : ValuesPerBlock) 1424 FullyAvailableBlocks[AV.BB] = true; 1425 for (BasicBlock *UnavailableBB : UnavailableBlocks) 1426 FullyAvailableBlocks[UnavailableBB] = false; 1427 1428 SmallVector<BasicBlock *, 4> CriticalEdgePred; 1429 for (BasicBlock *Pred : predecessors(LoadBB)) { 1430 // If any predecessor block is an EH pad that does not allow non-PHI 1431 // instructions before the terminator, we can't PRE the load. 1432 if (Pred->getTerminator()->isEHPad()) { 1433 DEBUG(dbgs() 1434 << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '" 1435 << Pred->getName() << "': " << *LI << '\n'); 1436 return false; 1437 } 1438 1439 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks, 0)) { 1440 continue; 1441 } 1442 1443 if (Pred->getTerminator()->getNumSuccessors() != 1) { 1444 if (isa<IndirectBrInst>(Pred->getTerminator())) { 1445 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" 1446 << Pred->getName() << "': " << *LI << '\n'); 1447 return false; 1448 } 1449 1450 if (LoadBB->isEHPad()) { 1451 DEBUG(dbgs() 1452 << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '" 1453 << Pred->getName() << "': " << *LI << '\n'); 1454 return false; 1455 } 1456 1457 CriticalEdgePred.push_back(Pred); 1458 } else { 1459 // Only add the predecessors that will not be split for now. 1460 PredLoads[Pred] = nullptr; 1461 } 1462 } 1463 1464 // Decide whether PRE is profitable for this load. 1465 unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size(); 1466 assert(NumUnavailablePreds != 0 && 1467 "Fully available value should already be eliminated!"); 1468 1469 // If this load is unavailable in multiple predecessors, reject it. 1470 // FIXME: If we could restructure the CFG, we could make a common pred with 1471 // all the preds that don't have an available LI and insert a new load into 1472 // that one block. 1473 if (NumUnavailablePreds != 1) 1474 return false; 1475 1476 // Split critical edges, and update the unavailable predecessors accordingly. 1477 for (BasicBlock *OrigPred : CriticalEdgePred) { 1478 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB); 1479 assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!"); 1480 PredLoads[NewPred] = nullptr; 1481 DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->" 1482 << LoadBB->getName() << '\n'); 1483 } 1484 1485 // Check if the load can safely be moved to all the unavailable predecessors. 1486 bool CanDoPRE = true; 1487 const DataLayout &DL = LI->getModule()->getDataLayout(); 1488 SmallVector<Instruction*, 8> NewInsts; 1489 for (auto &PredLoad : PredLoads) { 1490 BasicBlock *UnavailablePred = PredLoad.first; 1491 1492 // Do PHI translation to get its value in the predecessor if necessary. The 1493 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1494 1495 // If all preds have a single successor, then we know it is safe to insert 1496 // the load on the pred (?!?), so we can insert code to materialize the 1497 // pointer if it is not available. 1498 PHITransAddr Address(LI->getPointerOperand(), DL, AC); 1499 Value *LoadPtr = nullptr; 1500 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1501 *DT, NewInsts); 1502 1503 // If we couldn't find or insert a computation of this phi translated value, 1504 // we fail PRE. 1505 if (!LoadPtr) { 1506 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1507 << *LI->getPointerOperand() << "\n"); 1508 CanDoPRE = false; 1509 break; 1510 } 1511 1512 PredLoad.second = LoadPtr; 1513 } 1514 1515 if (!CanDoPRE) { 1516 while (!NewInsts.empty()) { 1517 Instruction *I = NewInsts.pop_back_val(); 1518 if (MD) MD->removeInstruction(I); 1519 I->eraseFromParent(); 1520 } 1521 // HINT: Don't revert the edge-splitting as following transformation may 1522 // also need to split these critical edges. 1523 return !CriticalEdgePred.empty(); 1524 } 1525 1526 // Okay, we can eliminate this load by inserting a reload in the predecessor 1527 // and using PHI construction to get the value in the other predecessors, do 1528 // it. 1529 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1530 DEBUG(if (!NewInsts.empty()) 1531 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: " 1532 << *NewInsts.back() << '\n'); 1533 1534 // Assign value numbers to the new instructions. 1535 for (Instruction *I : NewInsts) { 1536 // FIXME: We really _ought_ to insert these value numbers into their 1537 // parent's availability map. However, in doing so, we risk getting into 1538 // ordering issues. If a block hasn't been processed yet, we would be 1539 // marking a value as AVAIL-IN, which isn't what we intend. 1540 VN.lookupOrAdd(I); 1541 } 1542 1543 for (const auto &PredLoad : PredLoads) { 1544 BasicBlock *UnavailablePred = PredLoad.first; 1545 Value *LoadPtr = PredLoad.second; 1546 1547 auto *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", 1548 LI->isVolatile(), LI->getAlignment(), 1549 LI->getOrdering(), LI->getSynchScope(), 1550 UnavailablePred->getTerminator()); 1551 1552 // Transfer the old load's AA tags to the new load. 1553 AAMDNodes Tags; 1554 LI->getAAMetadata(Tags); 1555 if (Tags) 1556 NewLoad->setAAMetadata(Tags); 1557 1558 if (auto *MD = LI->getMetadata(LLVMContext::MD_invariant_load)) 1559 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD); 1560 if (auto *InvGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group)) 1561 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD); 1562 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) 1563 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD); 1564 1565 // Transfer DebugLoc. 1566 NewLoad->setDebugLoc(LI->getDebugLoc()); 1567 1568 // Add the newly created load. 1569 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred, 1570 NewLoad)); 1571 MD->invalidateCachedPointerInfo(LoadPtr); 1572 DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); 1573 } 1574 1575 // Perform PHI construction. 1576 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); 1577 LI->replaceAllUsesWith(V); 1578 if (isa<PHINode>(V)) 1579 V->takeName(LI); 1580 if (Instruction *I = dyn_cast<Instruction>(V)) 1581 I->setDebugLoc(LI->getDebugLoc()); 1582 if (V->getType()->getScalarType()->isPointerTy()) 1583 MD->invalidateCachedPointerInfo(V); 1584 markInstructionForDeletion(LI); 1585 ++NumPRELoad; 1586 return true; 1587 } 1588 1589 /// Attempt to eliminate a load whose dependencies are 1590 /// non-local by performing PHI construction. 1591 bool GVN::processNonLocalLoad(LoadInst *LI) { 1592 // non-local speculations are not allowed under asan. 1593 if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeAddress)) 1594 return false; 1595 1596 // Step 1: Find the non-local dependencies of the load. 1597 LoadDepVect Deps; 1598 MD->getNonLocalPointerDependency(LI, Deps); 1599 1600 // If we had to process more than one hundred blocks to find the 1601 // dependencies, this load isn't worth worrying about. Optimizing 1602 // it will be too expensive. 1603 unsigned NumDeps = Deps.size(); 1604 if (NumDeps > 100) 1605 return false; 1606 1607 // If we had a phi translation failure, we'll have a single entry which is a 1608 // clobber in the current block. Reject this early. 1609 if (NumDeps == 1 && 1610 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) { 1611 DEBUG( 1612 dbgs() << "GVN: non-local load "; 1613 LI->printAsOperand(dbgs()); 1614 dbgs() << " has unknown dependencies\n"; 1615 ); 1616 return false; 1617 } 1618 1619 // If this load follows a GEP, see if we can PRE the indices before analyzing. 1620 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0))) { 1621 for (GetElementPtrInst::op_iterator OI = GEP->idx_begin(), 1622 OE = GEP->idx_end(); 1623 OI != OE; ++OI) 1624 if (Instruction *I = dyn_cast<Instruction>(OI->get())) 1625 performScalarPRE(I); 1626 } 1627 1628 // Step 2: Analyze the availability of the load 1629 AvailValInBlkVect ValuesPerBlock; 1630 UnavailBlkVect UnavailableBlocks; 1631 AnalyzeLoadAvailability(LI, Deps, ValuesPerBlock, UnavailableBlocks); 1632 1633 // If we have no predecessors that produce a known value for this load, exit 1634 // early. 1635 if (ValuesPerBlock.empty()) 1636 return false; 1637 1638 // Step 3: Eliminate fully redundancy. 1639 // 1640 // If all of the instructions we depend on produce a known value for this 1641 // load, then it is fully redundant and we can use PHI insertion to compute 1642 // its value. Insert PHIs and remove the fully redundant value now. 1643 if (UnavailableBlocks.empty()) { 1644 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1645 1646 // Perform PHI construction. 1647 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); 1648 LI->replaceAllUsesWith(V); 1649 1650 if (isa<PHINode>(V)) 1651 V->takeName(LI); 1652 if (Instruction *I = dyn_cast<Instruction>(V)) 1653 if (LI->getDebugLoc()) 1654 I->setDebugLoc(LI->getDebugLoc()); 1655 if (V->getType()->getScalarType()->isPointerTy()) 1656 MD->invalidateCachedPointerInfo(V); 1657 markInstructionForDeletion(LI); 1658 ++NumGVNLoad; 1659 return true; 1660 } 1661 1662 // Step 4: Eliminate partial redundancy. 1663 if (!EnablePRE || !EnableLoadPRE) 1664 return false; 1665 1666 return PerformLoadPRE(LI, ValuesPerBlock, UnavailableBlocks); 1667 } 1668 1669 bool GVN::processAssumeIntrinsic(IntrinsicInst *IntrinsicI) { 1670 assert(IntrinsicI->getIntrinsicID() == Intrinsic::assume && 1671 "This function can only be called with llvm.assume intrinsic"); 1672 Value *V = IntrinsicI->getArgOperand(0); 1673 1674 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) { 1675 if (Cond->isZero()) { 1676 Type *Int8Ty = Type::getInt8Ty(V->getContext()); 1677 // Insert a new store to null instruction before the load to indicate that 1678 // this code is not reachable. FIXME: We could insert unreachable 1679 // instruction directly because we can modify the CFG. 1680 new StoreInst(UndefValue::get(Int8Ty), 1681 Constant::getNullValue(Int8Ty->getPointerTo()), 1682 IntrinsicI); 1683 } 1684 markInstructionForDeletion(IntrinsicI); 1685 return false; 1686 } 1687 1688 Constant *True = ConstantInt::getTrue(V->getContext()); 1689 bool Changed = false; 1690 1691 for (BasicBlock *Successor : successors(IntrinsicI->getParent())) { 1692 BasicBlockEdge Edge(IntrinsicI->getParent(), Successor); 1693 1694 // This property is only true in dominated successors, propagateEquality 1695 // will check dominance for us. 1696 Changed |= propagateEquality(V, True, Edge, false); 1697 } 1698 1699 // We can replace assume value with true, which covers cases like this: 1700 // call void @llvm.assume(i1 %cmp) 1701 // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true 1702 ReplaceWithConstMap[V] = True; 1703 1704 // If one of *cmp *eq operand is const, adding it to map will cover this: 1705 // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen 1706 // call void @llvm.assume(i1 %cmp) 1707 // ret float %0 ; will change it to ret float 3.000000e+00 1708 if (auto *CmpI = dyn_cast<CmpInst>(V)) { 1709 if (CmpI->getPredicate() == CmpInst::Predicate::ICMP_EQ || 1710 CmpI->getPredicate() == CmpInst::Predicate::FCMP_OEQ || 1711 (CmpI->getPredicate() == CmpInst::Predicate::FCMP_UEQ && 1712 CmpI->getFastMathFlags().noNaNs())) { 1713 Value *CmpLHS = CmpI->getOperand(0); 1714 Value *CmpRHS = CmpI->getOperand(1); 1715 if (isa<Constant>(CmpLHS)) 1716 std::swap(CmpLHS, CmpRHS); 1717 auto *RHSConst = dyn_cast<Constant>(CmpRHS); 1718 1719 // If only one operand is constant. 1720 if (RHSConst != nullptr && !isa<Constant>(CmpLHS)) 1721 ReplaceWithConstMap[CmpLHS] = RHSConst; 1722 } 1723 } 1724 return Changed; 1725 } 1726 1727 static void patchReplacementInstruction(Instruction *I, Value *Repl) { 1728 auto *ReplInst = dyn_cast<Instruction>(Repl); 1729 if (!ReplInst) 1730 return; 1731 1732 // Patch the replacement so that it is not more restrictive than the value 1733 // being replaced. 1734 ReplInst->andIRFlags(I); 1735 1736 // FIXME: If both the original and replacement value are part of the 1737 // same control-flow region (meaning that the execution of one 1738 // guarantees the execution of the other), then we can combine the 1739 // noalias scopes here and do better than the general conservative 1740 // answer used in combineMetadata(). 1741 1742 // In general, GVN unifies expressions over different control-flow 1743 // regions, and so we need a conservative combination of the noalias 1744 // scopes. 1745 static const unsigned KnownIDs[] = { 1746 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 1747 LLVMContext::MD_noalias, LLVMContext::MD_range, 1748 LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load, 1749 LLVMContext::MD_invariant_group}; 1750 combineMetadata(ReplInst, I, KnownIDs); 1751 } 1752 1753 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) { 1754 patchReplacementInstruction(I, Repl); 1755 I->replaceAllUsesWith(Repl); 1756 } 1757 1758 /// Attempt to eliminate a load, first by eliminating it 1759 /// locally, and then attempting non-local elimination if that fails. 1760 bool GVN::processLoad(LoadInst *L) { 1761 if (!MD) 1762 return false; 1763 1764 // This code hasn't been audited for ordered or volatile memory access 1765 if (!L->isUnordered()) 1766 return false; 1767 1768 if (L->use_empty()) { 1769 markInstructionForDeletion(L); 1770 return true; 1771 } 1772 1773 // ... to a pointer that has been loaded from before... 1774 MemDepResult Dep = MD->getDependency(L); 1775 1776 // If it is defined in another block, try harder. 1777 if (Dep.isNonLocal()) 1778 return processNonLocalLoad(L); 1779 1780 // Only handle the local case below 1781 if (!Dep.isDef() && !Dep.isClobber()) { 1782 // This might be a NonFuncLocal or an Unknown 1783 DEBUG( 1784 // fast print dep, using operator<< on instruction is too slow. 1785 dbgs() << "GVN: load "; 1786 L->printAsOperand(dbgs()); 1787 dbgs() << " has unknown dependence\n"; 1788 ); 1789 return false; 1790 } 1791 1792 AvailableValue AV; 1793 if (AnalyzeLoadAvailability(L, Dep, L->getPointerOperand(), AV)) { 1794 Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this); 1795 1796 // Replace the load! 1797 patchAndReplaceAllUsesWith(L, AvailableValue); 1798 markInstructionForDeletion(L); 1799 ++NumGVNLoad; 1800 // Tell MDA to rexamine the reused pointer since we might have more 1801 // information after forwarding it. 1802 if (MD && AvailableValue->getType()->getScalarType()->isPointerTy()) 1803 MD->invalidateCachedPointerInfo(AvailableValue); 1804 return true; 1805 } 1806 1807 return false; 1808 } 1809 1810 // In order to find a leader for a given value number at a 1811 // specific basic block, we first obtain the list of all Values for that number, 1812 // and then scan the list to find one whose block dominates the block in 1813 // question. This is fast because dominator tree queries consist of only 1814 // a few comparisons of DFS numbers. 1815 Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) { 1816 LeaderTableEntry Vals = LeaderTable[num]; 1817 if (!Vals.Val) return nullptr; 1818 1819 Value *Val = nullptr; 1820 if (DT->dominates(Vals.BB, BB)) { 1821 Val = Vals.Val; 1822 if (isa<Constant>(Val)) return Val; 1823 } 1824 1825 LeaderTableEntry* Next = Vals.Next; 1826 while (Next) { 1827 if (DT->dominates(Next->BB, BB)) { 1828 if (isa<Constant>(Next->Val)) return Next->Val; 1829 if (!Val) Val = Next->Val; 1830 } 1831 1832 Next = Next->Next; 1833 } 1834 1835 return Val; 1836 } 1837 1838 /// There is an edge from 'Src' to 'Dst'. Return 1839 /// true if every path from the entry block to 'Dst' passes via this edge. In 1840 /// particular 'Dst' must not be reachable via another edge from 'Src'. 1841 static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, 1842 DominatorTree *DT) { 1843 // While in theory it is interesting to consider the case in which Dst has 1844 // more than one predecessor, because Dst might be part of a loop which is 1845 // only reachable from Src, in practice it is pointless since at the time 1846 // GVN runs all such loops have preheaders, which means that Dst will have 1847 // been changed to have only one predecessor, namely Src. 1848 const BasicBlock *Pred = E.getEnd()->getSinglePredecessor(); 1849 assert((!Pred || Pred == E.getStart()) && 1850 "No edge between these basic blocks!"); 1851 return Pred != nullptr; 1852 } 1853 1854 // Tries to replace instruction with const, using information from 1855 // ReplaceWithConstMap. 1856 bool GVN::replaceOperandsWithConsts(Instruction *Instr) const { 1857 bool Changed = false; 1858 for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) { 1859 Value *Operand = Instr->getOperand(OpNum); 1860 auto it = ReplaceWithConstMap.find(Operand); 1861 if (it != ReplaceWithConstMap.end()) { 1862 assert(!isa<Constant>(Operand) && 1863 "Replacing constants with constants is invalid"); 1864 DEBUG(dbgs() << "GVN replacing: " << *Operand << " with " << *it->second 1865 << " in instruction " << *Instr << '\n'); 1866 Instr->setOperand(OpNum, it->second); 1867 Changed = true; 1868 } 1869 } 1870 return Changed; 1871 } 1872 1873 /// The given values are known to be equal in every block 1874 /// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with 1875 /// 'RHS' everywhere in the scope. Returns whether a change was made. 1876 /// If DominatesByEdge is false, then it means that we will propagate the RHS 1877 /// value starting from the end of Root.Start. 1878 bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root, 1879 bool DominatesByEdge) { 1880 SmallVector<std::pair<Value*, Value*>, 4> Worklist; 1881 Worklist.push_back(std::make_pair(LHS, RHS)); 1882 bool Changed = false; 1883 // For speed, compute a conservative fast approximation to 1884 // DT->dominates(Root, Root.getEnd()); 1885 const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT); 1886 1887 while (!Worklist.empty()) { 1888 std::pair<Value*, Value*> Item = Worklist.pop_back_val(); 1889 LHS = Item.first; RHS = Item.second; 1890 1891 if (LHS == RHS) 1892 continue; 1893 assert(LHS->getType() == RHS->getType() && "Equality but unequal types!"); 1894 1895 // Don't try to propagate equalities between constants. 1896 if (isa<Constant>(LHS) && isa<Constant>(RHS)) 1897 continue; 1898 1899 // Prefer a constant on the right-hand side, or an Argument if no constants. 1900 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS))) 1901 std::swap(LHS, RHS); 1902 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!"); 1903 1904 // If there is no obvious reason to prefer the left-hand side over the 1905 // right-hand side, ensure the longest lived term is on the right-hand side, 1906 // so the shortest lived term will be replaced by the longest lived. 1907 // This tends to expose more simplifications. 1908 uint32_t LVN = VN.lookupOrAdd(LHS); 1909 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) || 1910 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) { 1911 // Move the 'oldest' value to the right-hand side, using the value number 1912 // as a proxy for age. 1913 uint32_t RVN = VN.lookupOrAdd(RHS); 1914 if (LVN < RVN) { 1915 std::swap(LHS, RHS); 1916 LVN = RVN; 1917 } 1918 } 1919 1920 // If value numbering later sees that an instruction in the scope is equal 1921 // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve 1922 // the invariant that instructions only occur in the leader table for their 1923 // own value number (this is used by removeFromLeaderTable), do not do this 1924 // if RHS is an instruction (if an instruction in the scope is morphed into 1925 // LHS then it will be turned into RHS by the next GVN iteration anyway, so 1926 // using the leader table is about compiling faster, not optimizing better). 1927 // The leader table only tracks basic blocks, not edges. Only add to if we 1928 // have the simple case where the edge dominates the end. 1929 if (RootDominatesEnd && !isa<Instruction>(RHS)) 1930 addToLeaderTable(LVN, RHS, Root.getEnd()); 1931 1932 // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As 1933 // LHS always has at least one use that is not dominated by Root, this will 1934 // never do anything if LHS has only one use. 1935 if (!LHS->hasOneUse()) { 1936 unsigned NumReplacements = 1937 DominatesByEdge 1938 ? replaceDominatedUsesWith(LHS, RHS, *DT, Root) 1939 : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart()); 1940 1941 Changed |= NumReplacements > 0; 1942 NumGVNEqProp += NumReplacements; 1943 } 1944 1945 // Now try to deduce additional equalities from this one. For example, if 1946 // the known equality was "(A != B)" == "false" then it follows that A and B 1947 // are equal in the scope. Only boolean equalities with an explicit true or 1948 // false RHS are currently supported. 1949 if (!RHS->getType()->isIntegerTy(1)) 1950 // Not a boolean equality - bail out. 1951 continue; 1952 ConstantInt *CI = dyn_cast<ConstantInt>(RHS); 1953 if (!CI) 1954 // RHS neither 'true' nor 'false' - bail out. 1955 continue; 1956 // Whether RHS equals 'true'. Otherwise it equals 'false'. 1957 bool isKnownTrue = CI->isAllOnesValue(); 1958 bool isKnownFalse = !isKnownTrue; 1959 1960 // If "A && B" is known true then both A and B are known true. If "A || B" 1961 // is known false then both A and B are known false. 1962 Value *A, *B; 1963 if ((isKnownTrue && match(LHS, m_And(m_Value(A), m_Value(B)))) || 1964 (isKnownFalse && match(LHS, m_Or(m_Value(A), m_Value(B))))) { 1965 Worklist.push_back(std::make_pair(A, RHS)); 1966 Worklist.push_back(std::make_pair(B, RHS)); 1967 continue; 1968 } 1969 1970 // If we are propagating an equality like "(A == B)" == "true" then also 1971 // propagate the equality A == B. When propagating a comparison such as 1972 // "(A >= B)" == "true", replace all instances of "A < B" with "false". 1973 if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) { 1974 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1); 1975 1976 // If "A == B" is known true, or "A != B" is known false, then replace 1977 // A with B everywhere in the scope. 1978 if ((isKnownTrue && Cmp->getPredicate() == CmpInst::ICMP_EQ) || 1979 (isKnownFalse && Cmp->getPredicate() == CmpInst::ICMP_NE)) 1980 Worklist.push_back(std::make_pair(Op0, Op1)); 1981 1982 // Handle the floating point versions of equality comparisons too. 1983 if ((isKnownTrue && Cmp->getPredicate() == CmpInst::FCMP_OEQ) || 1984 (isKnownFalse && Cmp->getPredicate() == CmpInst::FCMP_UNE)) { 1985 1986 // Floating point -0.0 and 0.0 compare equal, so we can only 1987 // propagate values if we know that we have a constant and that 1988 // its value is non-zero. 1989 1990 // FIXME: We should do this optimization if 'no signed zeros' is 1991 // applicable via an instruction-level fast-math-flag or some other 1992 // indicator that relaxed FP semantics are being used. 1993 1994 if (isa<ConstantFP>(Op1) && !cast<ConstantFP>(Op1)->isZero()) 1995 Worklist.push_back(std::make_pair(Op0, Op1)); 1996 } 1997 1998 // If "A >= B" is known true, replace "A < B" with false everywhere. 1999 CmpInst::Predicate NotPred = Cmp->getInversePredicate(); 2000 Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse); 2001 // Since we don't have the instruction "A < B" immediately to hand, work 2002 // out the value number that it would have and use that to find an 2003 // appropriate instruction (if any). 2004 uint32_t NextNum = VN.getNextUnusedValueNumber(); 2005 uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1); 2006 // If the number we were assigned was brand new then there is no point in 2007 // looking for an instruction realizing it: there cannot be one! 2008 if (Num < NextNum) { 2009 Value *NotCmp = findLeader(Root.getEnd(), Num); 2010 if (NotCmp && isa<Instruction>(NotCmp)) { 2011 unsigned NumReplacements = 2012 DominatesByEdge 2013 ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root) 2014 : replaceDominatedUsesWith(NotCmp, NotVal, *DT, 2015 Root.getStart()); 2016 Changed |= NumReplacements > 0; 2017 NumGVNEqProp += NumReplacements; 2018 } 2019 } 2020 // Ensure that any instruction in scope that gets the "A < B" value number 2021 // is replaced with false. 2022 // The leader table only tracks basic blocks, not edges. Only add to if we 2023 // have the simple case where the edge dominates the end. 2024 if (RootDominatesEnd) 2025 addToLeaderTable(Num, NotVal, Root.getEnd()); 2026 2027 continue; 2028 } 2029 } 2030 2031 return Changed; 2032 } 2033 2034 /// When calculating availability, handle an instruction 2035 /// by inserting it into the appropriate sets 2036 bool GVN::processInstruction(Instruction *I) { 2037 // Ignore dbg info intrinsics. 2038 if (isa<DbgInfoIntrinsic>(I)) 2039 return false; 2040 2041 // If the instruction can be easily simplified then do so now in preference 2042 // to value numbering it. Value numbering often exposes redundancies, for 2043 // example if it determines that %y is equal to %x then the instruction 2044 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify. 2045 const DataLayout &DL = I->getModule()->getDataLayout(); 2046 if (Value *V = SimplifyInstruction(I, DL, TLI, DT, AC)) { 2047 bool Changed = false; 2048 if (!I->use_empty()) { 2049 I->replaceAllUsesWith(V); 2050 Changed = true; 2051 } 2052 if (isInstructionTriviallyDead(I, TLI)) { 2053 markInstructionForDeletion(I); 2054 Changed = true; 2055 } 2056 if (Changed) { 2057 if (MD && V->getType()->getScalarType()->isPointerTy()) 2058 MD->invalidateCachedPointerInfo(V); 2059 ++NumGVNSimpl; 2060 return true; 2061 } 2062 } 2063 2064 if (IntrinsicInst *IntrinsicI = dyn_cast<IntrinsicInst>(I)) 2065 if (IntrinsicI->getIntrinsicID() == Intrinsic::assume) 2066 return processAssumeIntrinsic(IntrinsicI); 2067 2068 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 2069 if (processLoad(LI)) 2070 return true; 2071 2072 unsigned Num = VN.lookupOrAdd(LI); 2073 addToLeaderTable(Num, LI, LI->getParent()); 2074 return false; 2075 } 2076 2077 // For conditional branches, we can perform simple conditional propagation on 2078 // the condition value itself. 2079 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 2080 if (!BI->isConditional()) 2081 return false; 2082 2083 if (isa<Constant>(BI->getCondition())) 2084 return processFoldableCondBr(BI); 2085 2086 Value *BranchCond = BI->getCondition(); 2087 BasicBlock *TrueSucc = BI->getSuccessor(0); 2088 BasicBlock *FalseSucc = BI->getSuccessor(1); 2089 // Avoid multiple edges early. 2090 if (TrueSucc == FalseSucc) 2091 return false; 2092 2093 BasicBlock *Parent = BI->getParent(); 2094 bool Changed = false; 2095 2096 Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext()); 2097 BasicBlockEdge TrueE(Parent, TrueSucc); 2098 Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true); 2099 2100 Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext()); 2101 BasicBlockEdge FalseE(Parent, FalseSucc); 2102 Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true); 2103 2104 return Changed; 2105 } 2106 2107 // For switches, propagate the case values into the case destinations. 2108 if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) { 2109 Value *SwitchCond = SI->getCondition(); 2110 BasicBlock *Parent = SI->getParent(); 2111 bool Changed = false; 2112 2113 // Remember how many outgoing edges there are to every successor. 2114 SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges; 2115 for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i) 2116 ++SwitchEdges[SI->getSuccessor(i)]; 2117 2118 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); 2119 i != e; ++i) { 2120 BasicBlock *Dst = i.getCaseSuccessor(); 2121 // If there is only a single edge, propagate the case value into it. 2122 if (SwitchEdges.lookup(Dst) == 1) { 2123 BasicBlockEdge E(Parent, Dst); 2124 Changed |= propagateEquality(SwitchCond, i.getCaseValue(), E, true); 2125 } 2126 } 2127 return Changed; 2128 } 2129 2130 // Instructions with void type don't return a value, so there's 2131 // no point in trying to find redundancies in them. 2132 if (I->getType()->isVoidTy()) 2133 return false; 2134 2135 uint32_t NextNum = VN.getNextUnusedValueNumber(); 2136 unsigned Num = VN.lookupOrAdd(I); 2137 2138 // Allocations are always uniquely numbered, so we can save time and memory 2139 // by fast failing them. 2140 if (isa<AllocaInst>(I) || isa<TerminatorInst>(I) || isa<PHINode>(I)) { 2141 addToLeaderTable(Num, I, I->getParent()); 2142 return false; 2143 } 2144 2145 // If the number we were assigned was a brand new VN, then we don't 2146 // need to do a lookup to see if the number already exists 2147 // somewhere in the domtree: it can't! 2148 if (Num >= NextNum) { 2149 addToLeaderTable(Num, I, I->getParent()); 2150 return false; 2151 } 2152 2153 // Perform fast-path value-number based elimination of values inherited from 2154 // dominators. 2155 Value *Repl = findLeader(I->getParent(), Num); 2156 if (!Repl) { 2157 // Failure, just remember this instance for future use. 2158 addToLeaderTable(Num, I, I->getParent()); 2159 return false; 2160 } else if (Repl == I) { 2161 // If I was the result of a shortcut PRE, it might already be in the table 2162 // and the best replacement for itself. Nothing to do. 2163 return false; 2164 } 2165 2166 // Remove it! 2167 patchAndReplaceAllUsesWith(I, Repl); 2168 if (MD && Repl->getType()->getScalarType()->isPointerTy()) 2169 MD->invalidateCachedPointerInfo(Repl); 2170 markInstructionForDeletion(I); 2171 return true; 2172 } 2173 2174 /// runOnFunction - This is the main transformation entry point for a function. 2175 bool GVN::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT, 2176 const TargetLibraryInfo &RunTLI, AAResults &RunAA, 2177 MemoryDependenceResults *RunMD) { 2178 AC = &RunAC; 2179 DT = &RunDT; 2180 VN.setDomTree(DT); 2181 TLI = &RunTLI; 2182 VN.setAliasAnalysis(&RunAA); 2183 MD = RunMD; 2184 VN.setMemDep(MD); 2185 2186 bool Changed = false; 2187 bool ShouldContinue = true; 2188 2189 // Merge unconditional branches, allowing PRE to catch more 2190 // optimization opportunities. 2191 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 2192 BasicBlock *BB = &*FI++; 2193 2194 bool removedBlock = 2195 MergeBlockIntoPredecessor(BB, DT, /* LoopInfo */ nullptr, MD); 2196 if (removedBlock) ++NumGVNBlocks; 2197 2198 Changed |= removedBlock; 2199 } 2200 2201 unsigned Iteration = 0; 2202 while (ShouldContinue) { 2203 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); 2204 ShouldContinue = iterateOnFunction(F); 2205 Changed |= ShouldContinue; 2206 ++Iteration; 2207 } 2208 2209 if (EnablePRE) { 2210 // Fabricate val-num for dead-code in order to suppress assertion in 2211 // performPRE(). 2212 assignValNumForDeadCode(); 2213 bool PREChanged = true; 2214 while (PREChanged) { 2215 PREChanged = performPRE(F); 2216 Changed |= PREChanged; 2217 } 2218 } 2219 2220 // FIXME: Should perform GVN again after PRE does something. PRE can move 2221 // computations into blocks where they become fully redundant. Note that 2222 // we can't do this until PRE's critical edge splitting updates memdep. 2223 // Actually, when this happens, we should just fully integrate PRE into GVN. 2224 2225 cleanupGlobalSets(); 2226 // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each 2227 // iteration. 2228 DeadBlocks.clear(); 2229 2230 return Changed; 2231 } 2232 2233 bool GVN::processBlock(BasicBlock *BB) { 2234 // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function 2235 // (and incrementing BI before processing an instruction). 2236 assert(InstrsToErase.empty() && 2237 "We expect InstrsToErase to be empty across iterations"); 2238 if (DeadBlocks.count(BB)) 2239 return false; 2240 2241 // Clearing map before every BB because it can be used only for single BB. 2242 ReplaceWithConstMap.clear(); 2243 bool ChangedFunction = false; 2244 2245 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 2246 BI != BE;) { 2247 if (!ReplaceWithConstMap.empty()) 2248 ChangedFunction |= replaceOperandsWithConsts(&*BI); 2249 ChangedFunction |= processInstruction(&*BI); 2250 2251 if (InstrsToErase.empty()) { 2252 ++BI; 2253 continue; 2254 } 2255 2256 // If we need some instructions deleted, do it now. 2257 NumGVNInstr += InstrsToErase.size(); 2258 2259 // Avoid iterator invalidation. 2260 bool AtStart = BI == BB->begin(); 2261 if (!AtStart) 2262 --BI; 2263 2264 for (SmallVectorImpl<Instruction *>::iterator I = InstrsToErase.begin(), 2265 E = InstrsToErase.end(); I != E; ++I) { 2266 DEBUG(dbgs() << "GVN removed: " << **I << '\n'); 2267 if (MD) MD->removeInstruction(*I); 2268 DEBUG(verifyRemoved(*I)); 2269 (*I)->eraseFromParent(); 2270 } 2271 InstrsToErase.clear(); 2272 2273 if (AtStart) 2274 BI = BB->begin(); 2275 else 2276 ++BI; 2277 } 2278 2279 return ChangedFunction; 2280 } 2281 2282 // Instantiate an expression in a predecessor that lacked it. 2283 bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred, 2284 unsigned int ValNo) { 2285 // Because we are going top-down through the block, all value numbers 2286 // will be available in the predecessor by the time we need them. Any 2287 // that weren't originally present will have been instantiated earlier 2288 // in this loop. 2289 bool success = true; 2290 for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) { 2291 Value *Op = Instr->getOperand(i); 2292 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2293 continue; 2294 // This could be a newly inserted instruction, in which case, we won't 2295 // find a value number, and should give up before we hurt ourselves. 2296 // FIXME: Rewrite the infrastructure to let it easier to value number 2297 // and process newly inserted instructions. 2298 if (!VN.exists(Op)) { 2299 success = false; 2300 break; 2301 } 2302 if (Value *V = findLeader(Pred, VN.lookup(Op))) { 2303 Instr->setOperand(i, V); 2304 } else { 2305 success = false; 2306 break; 2307 } 2308 } 2309 2310 // Fail out if we encounter an operand that is not available in 2311 // the PRE predecessor. This is typically because of loads which 2312 // are not value numbered precisely. 2313 if (!success) 2314 return false; 2315 2316 Instr->insertBefore(Pred->getTerminator()); 2317 Instr->setName(Instr->getName() + ".pre"); 2318 Instr->setDebugLoc(Instr->getDebugLoc()); 2319 VN.add(Instr, ValNo); 2320 2321 // Update the availability map to include the new instruction. 2322 addToLeaderTable(ValNo, Instr, Pred); 2323 return true; 2324 } 2325 2326 bool GVN::performScalarPRE(Instruction *CurInst) { 2327 if (isa<AllocaInst>(CurInst) || isa<TerminatorInst>(CurInst) || 2328 isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() || 2329 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2330 isa<DbgInfoIntrinsic>(CurInst)) 2331 return false; 2332 2333 // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from 2334 // sinking the compare again, and it would force the code generator to 2335 // move the i1 from processor flags or predicate registers into a general 2336 // purpose register. 2337 if (isa<CmpInst>(CurInst)) 2338 return false; 2339 2340 // We don't currently value number ANY inline asm calls. 2341 if (CallInst *CallI = dyn_cast<CallInst>(CurInst)) 2342 if (CallI->isInlineAsm()) 2343 return false; 2344 2345 uint32_t ValNo = VN.lookup(CurInst); 2346 2347 // Look for the predecessors for PRE opportunities. We're 2348 // only trying to solve the basic diamond case, where 2349 // a value is computed in the successor and one predecessor, 2350 // but not the other. We also explicitly disallow cases 2351 // where the successor is its own predecessor, because they're 2352 // more complicated to get right. 2353 unsigned NumWith = 0; 2354 unsigned NumWithout = 0; 2355 BasicBlock *PREPred = nullptr; 2356 BasicBlock *CurrentBlock = CurInst->getParent(); 2357 2358 SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap; 2359 for (BasicBlock *P : predecessors(CurrentBlock)) { 2360 // We're not interested in PRE where the block is its 2361 // own predecessor, or in blocks with predecessors 2362 // that are not reachable. 2363 if (P == CurrentBlock) { 2364 NumWithout = 2; 2365 break; 2366 } else if (!DT->isReachableFromEntry(P)) { 2367 NumWithout = 2; 2368 break; 2369 } 2370 2371 Value *predV = findLeader(P, ValNo); 2372 if (!predV) { 2373 predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P)); 2374 PREPred = P; 2375 ++NumWithout; 2376 } else if (predV == CurInst) { 2377 /* CurInst dominates this predecessor. */ 2378 NumWithout = 2; 2379 break; 2380 } else { 2381 predMap.push_back(std::make_pair(predV, P)); 2382 ++NumWith; 2383 } 2384 } 2385 2386 // Don't do PRE when it might increase code size, i.e. when 2387 // we would need to insert instructions in more than one pred. 2388 if (NumWithout > 1 || NumWith == 0) 2389 return false; 2390 2391 // We may have a case where all predecessors have the instruction, 2392 // and we just need to insert a phi node. Otherwise, perform 2393 // insertion. 2394 Instruction *PREInstr = nullptr; 2395 2396 if (NumWithout != 0) { 2397 // Don't do PRE across indirect branch. 2398 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2399 return false; 2400 2401 // We can't do PRE safely on a critical edge, so instead we schedule 2402 // the edge to be split and perform the PRE the next time we iterate 2403 // on the function. 2404 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); 2405 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2406 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2407 return false; 2408 } 2409 // We need to insert somewhere, so let's give it a shot 2410 PREInstr = CurInst->clone(); 2411 if (!performScalarPREInsertion(PREInstr, PREPred, ValNo)) { 2412 // If we failed insertion, make sure we remove the instruction. 2413 DEBUG(verifyRemoved(PREInstr)); 2414 delete PREInstr; 2415 return false; 2416 } 2417 } 2418 2419 // Either we should have filled in the PRE instruction, or we should 2420 // not have needed insertions. 2421 assert (PREInstr != nullptr || NumWithout == 0); 2422 2423 ++NumGVNPRE; 2424 2425 // Create a PHI to make the value available in this block. 2426 PHINode *Phi = 2427 PHINode::Create(CurInst->getType(), predMap.size(), 2428 CurInst->getName() + ".pre-phi", &CurrentBlock->front()); 2429 for (unsigned i = 0, e = predMap.size(); i != e; ++i) { 2430 if (Value *V = predMap[i].first) 2431 Phi->addIncoming(V, predMap[i].second); 2432 else 2433 Phi->addIncoming(PREInstr, PREPred); 2434 } 2435 2436 VN.add(Phi, ValNo); 2437 addToLeaderTable(ValNo, Phi, CurrentBlock); 2438 Phi->setDebugLoc(CurInst->getDebugLoc()); 2439 CurInst->replaceAllUsesWith(Phi); 2440 if (MD && Phi->getType()->getScalarType()->isPointerTy()) 2441 MD->invalidateCachedPointerInfo(Phi); 2442 VN.erase(CurInst); 2443 removeFromLeaderTable(ValNo, CurInst, CurrentBlock); 2444 2445 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); 2446 if (MD) 2447 MD->removeInstruction(CurInst); 2448 DEBUG(verifyRemoved(CurInst)); 2449 CurInst->eraseFromParent(); 2450 ++NumGVNInstr; 2451 2452 return true; 2453 } 2454 2455 /// Perform a purely local form of PRE that looks for diamond 2456 /// control flow patterns and attempts to perform simple PRE at the join point. 2457 bool GVN::performPRE(Function &F) { 2458 bool Changed = false; 2459 for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) { 2460 // Nothing to PRE in the entry block. 2461 if (CurrentBlock == &F.getEntryBlock()) 2462 continue; 2463 2464 // Don't perform PRE on an EH pad. 2465 if (CurrentBlock->isEHPad()) 2466 continue; 2467 2468 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2469 BE = CurrentBlock->end(); 2470 BI != BE;) { 2471 Instruction *CurInst = &*BI++; 2472 Changed |= performScalarPRE(CurInst); 2473 } 2474 } 2475 2476 if (splitCriticalEdges()) 2477 Changed = true; 2478 2479 return Changed; 2480 } 2481 2482 /// Split the critical edge connecting the given two blocks, and return 2483 /// the block inserted to the critical edge. 2484 BasicBlock *GVN::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) { 2485 BasicBlock *BB = 2486 SplitCriticalEdge(Pred, Succ, CriticalEdgeSplittingOptions(DT)); 2487 if (MD) 2488 MD->invalidateCachedPredecessors(); 2489 return BB; 2490 } 2491 2492 /// Split critical edges found during the previous 2493 /// iteration that may enable further optimization. 2494 bool GVN::splitCriticalEdges() { 2495 if (toSplit.empty()) 2496 return false; 2497 do { 2498 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val(); 2499 SplitCriticalEdge(Edge.first, Edge.second, 2500 CriticalEdgeSplittingOptions(DT)); 2501 } while (!toSplit.empty()); 2502 if (MD) MD->invalidateCachedPredecessors(); 2503 return true; 2504 } 2505 2506 /// Executes one iteration of GVN 2507 bool GVN::iterateOnFunction(Function &F) { 2508 cleanupGlobalSets(); 2509 2510 // Top-down walk of the dominator tree 2511 bool Changed = false; 2512 // Save the blocks this function have before transformation begins. GVN may 2513 // split critical edge, and hence may invalidate the RPO/DT iterator. 2514 // 2515 std::vector<BasicBlock *> BBVect; 2516 BBVect.reserve(256); 2517 // Needed for value numbering with phi construction to work. 2518 ReversePostOrderTraversal<Function *> RPOT(&F); 2519 for (ReversePostOrderTraversal<Function *>::rpo_iterator RI = RPOT.begin(), 2520 RE = RPOT.end(); 2521 RI != RE; ++RI) 2522 BBVect.push_back(*RI); 2523 2524 for (std::vector<BasicBlock *>::iterator I = BBVect.begin(), E = BBVect.end(); 2525 I != E; I++) 2526 Changed |= processBlock(*I); 2527 2528 return Changed; 2529 } 2530 2531 void GVN::cleanupGlobalSets() { 2532 VN.clear(); 2533 LeaderTable.clear(); 2534 TableAllocator.Reset(); 2535 } 2536 2537 /// Verify that the specified instruction does not occur in our 2538 /// internal data structures. 2539 void GVN::verifyRemoved(const Instruction *Inst) const { 2540 VN.verifyRemoved(Inst); 2541 2542 // Walk through the value number scope to make sure the instruction isn't 2543 // ferreted away in it. 2544 for (DenseMap<uint32_t, LeaderTableEntry>::const_iterator 2545 I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++I) { 2546 const LeaderTableEntry *Node = &I->second; 2547 assert(Node->Val != Inst && "Inst still in value numbering scope!"); 2548 2549 while (Node->Next) { 2550 Node = Node->Next; 2551 assert(Node->Val != Inst && "Inst still in value numbering scope!"); 2552 } 2553 } 2554 } 2555 2556 /// BB is declared dead, which implied other blocks become dead as well. This 2557 /// function is to add all these blocks to "DeadBlocks". For the dead blocks' 2558 /// live successors, update their phi nodes by replacing the operands 2559 /// corresponding to dead blocks with UndefVal. 2560 void GVN::addDeadBlock(BasicBlock *BB) { 2561 SmallVector<BasicBlock *, 4> NewDead; 2562 SmallSetVector<BasicBlock *, 4> DF; 2563 2564 NewDead.push_back(BB); 2565 while (!NewDead.empty()) { 2566 BasicBlock *D = NewDead.pop_back_val(); 2567 if (DeadBlocks.count(D)) 2568 continue; 2569 2570 // All blocks dominated by D are dead. 2571 SmallVector<BasicBlock *, 8> Dom; 2572 DT->getDescendants(D, Dom); 2573 DeadBlocks.insert(Dom.begin(), Dom.end()); 2574 2575 // Figure out the dominance-frontier(D). 2576 for (BasicBlock *B : Dom) { 2577 for (BasicBlock *S : successors(B)) { 2578 if (DeadBlocks.count(S)) 2579 continue; 2580 2581 bool AllPredDead = true; 2582 for (BasicBlock *P : predecessors(S)) 2583 if (!DeadBlocks.count(P)) { 2584 AllPredDead = false; 2585 break; 2586 } 2587 2588 if (!AllPredDead) { 2589 // S could be proved dead later on. That is why we don't update phi 2590 // operands at this moment. 2591 DF.insert(S); 2592 } else { 2593 // While S is not dominated by D, it is dead by now. This could take 2594 // place if S already have a dead predecessor before D is declared 2595 // dead. 2596 NewDead.push_back(S); 2597 } 2598 } 2599 } 2600 } 2601 2602 // For the dead blocks' live successors, update their phi nodes by replacing 2603 // the operands corresponding to dead blocks with UndefVal. 2604 for(SmallSetVector<BasicBlock *, 4>::iterator I = DF.begin(), E = DF.end(); 2605 I != E; I++) { 2606 BasicBlock *B = *I; 2607 if (DeadBlocks.count(B)) 2608 continue; 2609 2610 SmallVector<BasicBlock *, 4> Preds(pred_begin(B), pred_end(B)); 2611 for (BasicBlock *P : Preds) { 2612 if (!DeadBlocks.count(P)) 2613 continue; 2614 2615 if (isCriticalEdge(P->getTerminator(), GetSuccessorNumber(P, B))) { 2616 if (BasicBlock *S = splitCriticalEdges(P, B)) 2617 DeadBlocks.insert(P = S); 2618 } 2619 2620 for (BasicBlock::iterator II = B->begin(); isa<PHINode>(II); ++II) { 2621 PHINode &Phi = cast<PHINode>(*II); 2622 Phi.setIncomingValue(Phi.getBasicBlockIndex(P), 2623 UndefValue::get(Phi.getType())); 2624 } 2625 } 2626 } 2627 } 2628 2629 // If the given branch is recognized as a foldable branch (i.e. conditional 2630 // branch with constant condition), it will perform following analyses and 2631 // transformation. 2632 // 1) If the dead out-coming edge is a critical-edge, split it. Let 2633 // R be the target of the dead out-coming edge. 2634 // 1) Identify the set of dead blocks implied by the branch's dead outcoming 2635 // edge. The result of this step will be {X| X is dominated by R} 2636 // 2) Identify those blocks which haves at least one dead predecessor. The 2637 // result of this step will be dominance-frontier(R). 2638 // 3) Update the PHIs in DF(R) by replacing the operands corresponding to 2639 // dead blocks with "UndefVal" in an hope these PHIs will optimized away. 2640 // 2641 // Return true iff *NEW* dead code are found. 2642 bool GVN::processFoldableCondBr(BranchInst *BI) { 2643 if (!BI || BI->isUnconditional()) 2644 return false; 2645 2646 // If a branch has two identical successors, we cannot declare either dead. 2647 if (BI->getSuccessor(0) == BI->getSuccessor(1)) 2648 return false; 2649 2650 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition()); 2651 if (!Cond) 2652 return false; 2653 2654 BasicBlock *DeadRoot = 2655 Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0); 2656 if (DeadBlocks.count(DeadRoot)) 2657 return false; 2658 2659 if (!DeadRoot->getSinglePredecessor()) 2660 DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot); 2661 2662 addDeadBlock(DeadRoot); 2663 return true; 2664 } 2665 2666 // performPRE() will trigger assert if it comes across an instruction without 2667 // associated val-num. As it normally has far more live instructions than dead 2668 // instructions, it makes more sense just to "fabricate" a val-number for the 2669 // dead code than checking if instruction involved is dead or not. 2670 void GVN::assignValNumForDeadCode() { 2671 for (BasicBlock *BB : DeadBlocks) { 2672 for (Instruction &Inst : *BB) { 2673 unsigned ValNum = VN.lookupOrAdd(&Inst); 2674 addToLeaderTable(ValNum, &Inst, BB); 2675 } 2676 } 2677 } 2678 2679 class llvm::gvn::GVNLegacyPass : public FunctionPass { 2680 public: 2681 static char ID; // Pass identification, replacement for typeid 2682 explicit GVNLegacyPass(bool NoLoads = false) 2683 : FunctionPass(ID), NoLoads(NoLoads) { 2684 initializeGVNLegacyPassPass(*PassRegistry::getPassRegistry()); 2685 } 2686 2687 bool runOnFunction(Function &F) override { 2688 if (skipFunction(F)) 2689 return false; 2690 2691 return Impl.runImpl( 2692 F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 2693 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 2694 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 2695 getAnalysis<AAResultsWrapperPass>().getAAResults(), 2696 NoLoads ? nullptr 2697 : &getAnalysis<MemoryDependenceWrapperPass>().getMemDep()); 2698 } 2699 2700 void getAnalysisUsage(AnalysisUsage &AU) const override { 2701 AU.addRequired<AssumptionCacheTracker>(); 2702 AU.addRequired<DominatorTreeWrapperPass>(); 2703 AU.addRequired<TargetLibraryInfoWrapperPass>(); 2704 if (!NoLoads) 2705 AU.addRequired<MemoryDependenceWrapperPass>(); 2706 AU.addRequired<AAResultsWrapperPass>(); 2707 2708 AU.addPreserved<DominatorTreeWrapperPass>(); 2709 AU.addPreserved<GlobalsAAWrapperPass>(); 2710 } 2711 2712 private: 2713 bool NoLoads; 2714 GVN Impl; 2715 }; 2716 2717 char GVNLegacyPass::ID = 0; 2718 2719 // The public interface to this file... 2720 FunctionPass *llvm::createGVNPass(bool NoLoads) { 2721 return new GVNLegacyPass(NoLoads); 2722 } 2723 2724 INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false) 2725 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 2726 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 2727 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2728 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 2729 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2730 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 2731 INITIALIZE_PASS_END(GVNLegacyPass, "gvn", "Global Value Numbering", false, false) 2732