1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs global value numbering to eliminate fully redundant 11 // instructions. It also performs simple dead load elimination. 12 // 13 // Note that this pass does the value numbering itself; it does not use the 14 // ValueNumbering analysis passes. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/Transforms/Scalar.h" 19 #include "llvm/ADT/DenseMap.h" 20 #include "llvm/ADT/DepthFirstIterator.h" 21 #include "llvm/ADT/Hashing.h" 22 #include "llvm/ADT/MapVector.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/SetVector.h" 25 #include "llvm/ADT/SmallPtrSet.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Analysis/AssumptionCache.h" 29 #include "llvm/Analysis/CFG.h" 30 #include "llvm/Analysis/ConstantFolding.h" 31 #include "llvm/Analysis/GlobalsModRef.h" 32 #include "llvm/Analysis/InstructionSimplify.h" 33 #include "llvm/Analysis/Loads.h" 34 #include "llvm/Analysis/MemoryBuiltins.h" 35 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 36 #include "llvm/Analysis/PHITransAddr.h" 37 #include "llvm/Analysis/TargetLibraryInfo.h" 38 #include "llvm/Analysis/ValueTracking.h" 39 #include "llvm/IR/DataLayout.h" 40 #include "llvm/IR/Dominators.h" 41 #include "llvm/IR/GlobalVariable.h" 42 #include "llvm/IR/IRBuilder.h" 43 #include "llvm/IR/IntrinsicInst.h" 44 #include "llvm/IR/LLVMContext.h" 45 #include "llvm/IR/Metadata.h" 46 #include "llvm/IR/PatternMatch.h" 47 #include "llvm/Support/Allocator.h" 48 #include "llvm/Support/CommandLine.h" 49 #include "llvm/Support/Debug.h" 50 #include "llvm/Support/raw_ostream.h" 51 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 52 #include "llvm/Transforms/Utils/Local.h" 53 #include "llvm/Transforms/Utils/SSAUpdater.h" 54 #include <vector> 55 using namespace llvm; 56 using namespace PatternMatch; 57 58 #define DEBUG_TYPE "gvn" 59 60 STATISTIC(NumGVNInstr, "Number of instructions deleted"); 61 STATISTIC(NumGVNLoad, "Number of loads deleted"); 62 STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 63 STATISTIC(NumGVNBlocks, "Number of blocks merged"); 64 STATISTIC(NumGVNSimpl, "Number of instructions simplified"); 65 STATISTIC(NumGVNEqProp, "Number of equalities propagated"); 66 STATISTIC(NumPRELoad, "Number of loads PRE'd"); 67 68 static cl::opt<bool> EnablePRE("enable-pre", 69 cl::init(true), cl::Hidden); 70 static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 71 72 // Maximum allowed recursion depth. 73 static cl::opt<uint32_t> 74 MaxRecurseDepth("max-recurse-depth", cl::Hidden, cl::init(1000), cl::ZeroOrMore, 75 cl::desc("Max recurse depth (default = 1000)")); 76 77 //===----------------------------------------------------------------------===// 78 // ValueTable Class 79 //===----------------------------------------------------------------------===// 80 81 /// This class holds the mapping between values and value numbers. It is used 82 /// as an efficient mechanism to determine the expression-wise equivalence of 83 /// two values. 84 namespace { 85 struct Expression { 86 uint32_t opcode; 87 Type *type; 88 SmallVector<uint32_t, 4> varargs; 89 90 Expression(uint32_t o = ~2U) : opcode(o) { } 91 92 bool operator==(const Expression &other) const { 93 if (opcode != other.opcode) 94 return false; 95 if (opcode == ~0U || opcode == ~1U) 96 return true; 97 if (type != other.type) 98 return false; 99 if (varargs != other.varargs) 100 return false; 101 return true; 102 } 103 104 friend hash_code hash_value(const Expression &Value) { 105 return hash_combine(Value.opcode, Value.type, 106 hash_combine_range(Value.varargs.begin(), 107 Value.varargs.end())); 108 } 109 }; 110 111 class ValueTable { 112 DenseMap<Value*, uint32_t> valueNumbering; 113 DenseMap<Expression, uint32_t> expressionNumbering; 114 AliasAnalysis *AA; 115 MemoryDependenceAnalysis *MD; 116 DominatorTree *DT; 117 118 uint32_t nextValueNumber; 119 120 Expression create_expression(Instruction* I); 121 Expression create_cmp_expression(unsigned Opcode, 122 CmpInst::Predicate Predicate, 123 Value *LHS, Value *RHS); 124 Expression create_extractvalue_expression(ExtractValueInst* EI); 125 uint32_t lookup_or_add_call(CallInst* C); 126 public: 127 ValueTable() : nextValueNumber(1) { } 128 uint32_t lookup_or_add(Value *V); 129 uint32_t lookup(Value *V) const; 130 uint32_t lookup_or_add_cmp(unsigned Opcode, CmpInst::Predicate Pred, 131 Value *LHS, Value *RHS); 132 void add(Value *V, uint32_t num); 133 void clear(); 134 void erase(Value *v); 135 void setAliasAnalysis(AliasAnalysis* A) { AA = A; } 136 AliasAnalysis *getAliasAnalysis() const { return AA; } 137 void setMemDep(MemoryDependenceAnalysis* M) { MD = M; } 138 void setDomTree(DominatorTree* D) { DT = D; } 139 uint32_t getNextUnusedValueNumber() { return nextValueNumber; } 140 void verifyRemoved(const Value *) const; 141 }; 142 } 143 144 namespace llvm { 145 template <> struct DenseMapInfo<Expression> { 146 static inline Expression getEmptyKey() { 147 return ~0U; 148 } 149 150 static inline Expression getTombstoneKey() { 151 return ~1U; 152 } 153 154 static unsigned getHashValue(const Expression e) { 155 using llvm::hash_value; 156 return static_cast<unsigned>(hash_value(e)); 157 } 158 static bool isEqual(const Expression &LHS, const Expression &RHS) { 159 return LHS == RHS; 160 } 161 }; 162 163 } 164 165 //===----------------------------------------------------------------------===// 166 // ValueTable Internal Functions 167 //===----------------------------------------------------------------------===// 168 169 Expression ValueTable::create_expression(Instruction *I) { 170 Expression e; 171 e.type = I->getType(); 172 e.opcode = I->getOpcode(); 173 for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end(); 174 OI != OE; ++OI) 175 e.varargs.push_back(lookup_or_add(*OI)); 176 if (I->isCommutative()) { 177 // Ensure that commutative instructions that only differ by a permutation 178 // of their operands get the same value number by sorting the operand value 179 // numbers. Since all commutative instructions have two operands it is more 180 // efficient to sort by hand rather than using, say, std::sort. 181 assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!"); 182 if (e.varargs[0] > e.varargs[1]) 183 std::swap(e.varargs[0], e.varargs[1]); 184 } 185 186 if (CmpInst *C = dyn_cast<CmpInst>(I)) { 187 // Sort the operand value numbers so x<y and y>x get the same value number. 188 CmpInst::Predicate Predicate = C->getPredicate(); 189 if (e.varargs[0] > e.varargs[1]) { 190 std::swap(e.varargs[0], e.varargs[1]); 191 Predicate = CmpInst::getSwappedPredicate(Predicate); 192 } 193 e.opcode = (C->getOpcode() << 8) | Predicate; 194 } else if (InsertValueInst *E = dyn_cast<InsertValueInst>(I)) { 195 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 196 II != IE; ++II) 197 e.varargs.push_back(*II); 198 } 199 200 return e; 201 } 202 203 Expression ValueTable::create_cmp_expression(unsigned Opcode, 204 CmpInst::Predicate Predicate, 205 Value *LHS, Value *RHS) { 206 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 207 "Not a comparison!"); 208 Expression e; 209 e.type = CmpInst::makeCmpResultType(LHS->getType()); 210 e.varargs.push_back(lookup_or_add(LHS)); 211 e.varargs.push_back(lookup_or_add(RHS)); 212 213 // Sort the operand value numbers so x<y and y>x get the same value number. 214 if (e.varargs[0] > e.varargs[1]) { 215 std::swap(e.varargs[0], e.varargs[1]); 216 Predicate = CmpInst::getSwappedPredicate(Predicate); 217 } 218 e.opcode = (Opcode << 8) | Predicate; 219 return e; 220 } 221 222 Expression ValueTable::create_extractvalue_expression(ExtractValueInst *EI) { 223 assert(EI && "Not an ExtractValueInst?"); 224 Expression e; 225 e.type = EI->getType(); 226 e.opcode = 0; 227 228 IntrinsicInst *I = dyn_cast<IntrinsicInst>(EI->getAggregateOperand()); 229 if (I != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0 ) { 230 // EI might be an extract from one of our recognised intrinsics. If it 231 // is we'll synthesize a semantically equivalent expression instead on 232 // an extract value expression. 233 switch (I->getIntrinsicID()) { 234 case Intrinsic::sadd_with_overflow: 235 case Intrinsic::uadd_with_overflow: 236 e.opcode = Instruction::Add; 237 break; 238 case Intrinsic::ssub_with_overflow: 239 case Intrinsic::usub_with_overflow: 240 e.opcode = Instruction::Sub; 241 break; 242 case Intrinsic::smul_with_overflow: 243 case Intrinsic::umul_with_overflow: 244 e.opcode = Instruction::Mul; 245 break; 246 default: 247 break; 248 } 249 250 if (e.opcode != 0) { 251 // Intrinsic recognized. Grab its args to finish building the expression. 252 assert(I->getNumArgOperands() == 2 && 253 "Expect two args for recognised intrinsics."); 254 e.varargs.push_back(lookup_or_add(I->getArgOperand(0))); 255 e.varargs.push_back(lookup_or_add(I->getArgOperand(1))); 256 return e; 257 } 258 } 259 260 // Not a recognised intrinsic. Fall back to producing an extract value 261 // expression. 262 e.opcode = EI->getOpcode(); 263 for (Instruction::op_iterator OI = EI->op_begin(), OE = EI->op_end(); 264 OI != OE; ++OI) 265 e.varargs.push_back(lookup_or_add(*OI)); 266 267 for (ExtractValueInst::idx_iterator II = EI->idx_begin(), IE = EI->idx_end(); 268 II != IE; ++II) 269 e.varargs.push_back(*II); 270 271 return e; 272 } 273 274 //===----------------------------------------------------------------------===// 275 // ValueTable External Functions 276 //===----------------------------------------------------------------------===// 277 278 /// add - Insert a value into the table with a specified value number. 279 void ValueTable::add(Value *V, uint32_t num) { 280 valueNumbering.insert(std::make_pair(V, num)); 281 } 282 283 uint32_t ValueTable::lookup_or_add_call(CallInst *C) { 284 if (AA->doesNotAccessMemory(C)) { 285 Expression exp = create_expression(C); 286 uint32_t &e = expressionNumbering[exp]; 287 if (!e) e = nextValueNumber++; 288 valueNumbering[C] = e; 289 return e; 290 } else if (AA->onlyReadsMemory(C)) { 291 Expression exp = create_expression(C); 292 uint32_t &e = expressionNumbering[exp]; 293 if (!e) { 294 e = nextValueNumber++; 295 valueNumbering[C] = e; 296 return e; 297 } 298 if (!MD) { 299 e = nextValueNumber++; 300 valueNumbering[C] = e; 301 return e; 302 } 303 304 MemDepResult local_dep = MD->getDependency(C); 305 306 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 307 valueNumbering[C] = nextValueNumber; 308 return nextValueNumber++; 309 } 310 311 if (local_dep.isDef()) { 312 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 313 314 if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) { 315 valueNumbering[C] = nextValueNumber; 316 return nextValueNumber++; 317 } 318 319 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 320 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 321 uint32_t cd_vn = lookup_or_add(local_cdep->getArgOperand(i)); 322 if (c_vn != cd_vn) { 323 valueNumbering[C] = nextValueNumber; 324 return nextValueNumber++; 325 } 326 } 327 328 uint32_t v = lookup_or_add(local_cdep); 329 valueNumbering[C] = v; 330 return v; 331 } 332 333 // Non-local case. 334 const MemoryDependenceAnalysis::NonLocalDepInfo &deps = 335 MD->getNonLocalCallDependency(CallSite(C)); 336 // FIXME: Move the checking logic to MemDep! 337 CallInst* cdep = nullptr; 338 339 // Check to see if we have a single dominating call instruction that is 340 // identical to C. 341 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 342 const NonLocalDepEntry *I = &deps[i]; 343 if (I->getResult().isNonLocal()) 344 continue; 345 346 // We don't handle non-definitions. If we already have a call, reject 347 // instruction dependencies. 348 if (!I->getResult().isDef() || cdep != nullptr) { 349 cdep = nullptr; 350 break; 351 } 352 353 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 354 // FIXME: All duplicated with non-local case. 355 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 356 cdep = NonLocalDepCall; 357 continue; 358 } 359 360 cdep = nullptr; 361 break; 362 } 363 364 if (!cdep) { 365 valueNumbering[C] = nextValueNumber; 366 return nextValueNumber++; 367 } 368 369 if (cdep->getNumArgOperands() != C->getNumArgOperands()) { 370 valueNumbering[C] = nextValueNumber; 371 return nextValueNumber++; 372 } 373 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 374 uint32_t c_vn = lookup_or_add(C->getArgOperand(i)); 375 uint32_t cd_vn = lookup_or_add(cdep->getArgOperand(i)); 376 if (c_vn != cd_vn) { 377 valueNumbering[C] = nextValueNumber; 378 return nextValueNumber++; 379 } 380 } 381 382 uint32_t v = lookup_or_add(cdep); 383 valueNumbering[C] = v; 384 return v; 385 386 } else { 387 valueNumbering[C] = nextValueNumber; 388 return nextValueNumber++; 389 } 390 } 391 392 /// lookup_or_add - Returns the value number for the specified value, assigning 393 /// it a new number if it did not have one before. 394 uint32_t ValueTable::lookup_or_add(Value *V) { 395 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 396 if (VI != valueNumbering.end()) 397 return VI->second; 398 399 if (!isa<Instruction>(V)) { 400 valueNumbering[V] = nextValueNumber; 401 return nextValueNumber++; 402 } 403 404 Instruction* I = cast<Instruction>(V); 405 Expression exp; 406 switch (I->getOpcode()) { 407 case Instruction::Call: 408 return lookup_or_add_call(cast<CallInst>(I)); 409 case Instruction::Add: 410 case Instruction::FAdd: 411 case Instruction::Sub: 412 case Instruction::FSub: 413 case Instruction::Mul: 414 case Instruction::FMul: 415 case Instruction::UDiv: 416 case Instruction::SDiv: 417 case Instruction::FDiv: 418 case Instruction::URem: 419 case Instruction::SRem: 420 case Instruction::FRem: 421 case Instruction::Shl: 422 case Instruction::LShr: 423 case Instruction::AShr: 424 case Instruction::And: 425 case Instruction::Or: 426 case Instruction::Xor: 427 case Instruction::ICmp: 428 case Instruction::FCmp: 429 case Instruction::Trunc: 430 case Instruction::ZExt: 431 case Instruction::SExt: 432 case Instruction::FPToUI: 433 case Instruction::FPToSI: 434 case Instruction::UIToFP: 435 case Instruction::SIToFP: 436 case Instruction::FPTrunc: 437 case Instruction::FPExt: 438 case Instruction::PtrToInt: 439 case Instruction::IntToPtr: 440 case Instruction::BitCast: 441 case Instruction::Select: 442 case Instruction::ExtractElement: 443 case Instruction::InsertElement: 444 case Instruction::ShuffleVector: 445 case Instruction::InsertValue: 446 case Instruction::GetElementPtr: 447 exp = create_expression(I); 448 break; 449 case Instruction::ExtractValue: 450 exp = create_extractvalue_expression(cast<ExtractValueInst>(I)); 451 break; 452 default: 453 valueNumbering[V] = nextValueNumber; 454 return nextValueNumber++; 455 } 456 457 uint32_t& e = expressionNumbering[exp]; 458 if (!e) e = nextValueNumber++; 459 valueNumbering[V] = e; 460 return e; 461 } 462 463 /// Returns the value number of the specified value. Fails if 464 /// the value has not yet been numbered. 465 uint32_t ValueTable::lookup(Value *V) const { 466 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 467 assert(VI != valueNumbering.end() && "Value not numbered?"); 468 return VI->second; 469 } 470 471 /// Returns the value number of the given comparison, 472 /// assigning it a new number if it did not have one before. Useful when 473 /// we deduced the result of a comparison, but don't immediately have an 474 /// instruction realizing that comparison to hand. 475 uint32_t ValueTable::lookup_or_add_cmp(unsigned Opcode, 476 CmpInst::Predicate Predicate, 477 Value *LHS, Value *RHS) { 478 Expression exp = create_cmp_expression(Opcode, Predicate, LHS, RHS); 479 uint32_t& e = expressionNumbering[exp]; 480 if (!e) e = nextValueNumber++; 481 return e; 482 } 483 484 /// Remove all entries from the ValueTable. 485 void ValueTable::clear() { 486 valueNumbering.clear(); 487 expressionNumbering.clear(); 488 nextValueNumber = 1; 489 } 490 491 /// Remove a value from the value numbering. 492 void ValueTable::erase(Value *V) { 493 valueNumbering.erase(V); 494 } 495 496 /// verifyRemoved - Verify that the value is removed from all internal data 497 /// structures. 498 void ValueTable::verifyRemoved(const Value *V) const { 499 for (DenseMap<Value*, uint32_t>::const_iterator 500 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 501 assert(I->first != V && "Inst still occurs in value numbering map!"); 502 } 503 } 504 505 //===----------------------------------------------------------------------===// 506 // GVN Pass 507 //===----------------------------------------------------------------------===// 508 509 namespace { 510 class GVN; 511 struct AvailableValueInBlock { 512 /// BB - The basic block in question. 513 BasicBlock *BB; 514 enum ValType { 515 SimpleVal, // A simple offsetted value that is accessed. 516 LoadVal, // A value produced by a load. 517 MemIntrin, // A memory intrinsic which is loaded from. 518 UndefVal // A UndefValue representing a value from dead block (which 519 // is not yet physically removed from the CFG). 520 }; 521 522 /// V - The value that is live out of the block. 523 PointerIntPair<Value *, 2, ValType> Val; 524 525 /// Offset - The byte offset in Val that is interesting for the load query. 526 unsigned Offset; 527 528 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 529 unsigned Offset = 0) { 530 AvailableValueInBlock Res; 531 Res.BB = BB; 532 Res.Val.setPointer(V); 533 Res.Val.setInt(SimpleVal); 534 Res.Offset = Offset; 535 return Res; 536 } 537 538 static AvailableValueInBlock getMI(BasicBlock *BB, MemIntrinsic *MI, 539 unsigned Offset = 0) { 540 AvailableValueInBlock Res; 541 Res.BB = BB; 542 Res.Val.setPointer(MI); 543 Res.Val.setInt(MemIntrin); 544 Res.Offset = Offset; 545 return Res; 546 } 547 548 static AvailableValueInBlock getLoad(BasicBlock *BB, LoadInst *LI, 549 unsigned Offset = 0) { 550 AvailableValueInBlock Res; 551 Res.BB = BB; 552 Res.Val.setPointer(LI); 553 Res.Val.setInt(LoadVal); 554 Res.Offset = Offset; 555 return Res; 556 } 557 558 static AvailableValueInBlock getUndef(BasicBlock *BB) { 559 AvailableValueInBlock Res; 560 Res.BB = BB; 561 Res.Val.setPointer(nullptr); 562 Res.Val.setInt(UndefVal); 563 Res.Offset = 0; 564 return Res; 565 } 566 567 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 568 bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; } 569 bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; } 570 bool isUndefValue() const { return Val.getInt() == UndefVal; } 571 572 Value *getSimpleValue() const { 573 assert(isSimpleValue() && "Wrong accessor"); 574 return Val.getPointer(); 575 } 576 577 LoadInst *getCoercedLoadValue() const { 578 assert(isCoercedLoadValue() && "Wrong accessor"); 579 return cast<LoadInst>(Val.getPointer()); 580 } 581 582 MemIntrinsic *getMemIntrinValue() const { 583 assert(isMemIntrinValue() && "Wrong accessor"); 584 return cast<MemIntrinsic>(Val.getPointer()); 585 } 586 587 /// Emit code into this block to adjust the value defined here to the 588 /// specified type. This handles various coercion cases. 589 Value *MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const; 590 }; 591 592 class GVN : public FunctionPass { 593 bool NoLoads; 594 MemoryDependenceAnalysis *MD; 595 DominatorTree *DT; 596 const TargetLibraryInfo *TLI; 597 AssumptionCache *AC; 598 SetVector<BasicBlock *> DeadBlocks; 599 600 ValueTable VN; 601 602 /// A mapping from value numbers to lists of Value*'s that 603 /// have that value number. Use findLeader to query it. 604 struct LeaderTableEntry { 605 Value *Val; 606 const BasicBlock *BB; 607 LeaderTableEntry *Next; 608 }; 609 DenseMap<uint32_t, LeaderTableEntry> LeaderTable; 610 BumpPtrAllocator TableAllocator; 611 612 // Block-local map of equivalent values to their leader, does not 613 // propagate to any successors. Entries added mid-block are applied 614 // to the remaining instructions in the block. 615 SmallMapVector<llvm::Value *, llvm::Constant *, 4> ReplaceWithConstMap; 616 SmallVector<Instruction*, 8> InstrsToErase; 617 618 typedef SmallVector<NonLocalDepResult, 64> LoadDepVect; 619 typedef SmallVector<AvailableValueInBlock, 64> AvailValInBlkVect; 620 typedef SmallVector<BasicBlock*, 64> UnavailBlkVect; 621 622 public: 623 static char ID; // Pass identification, replacement for typeid 624 explicit GVN(bool noloads = false) 625 : FunctionPass(ID), NoLoads(noloads), MD(nullptr) { 626 initializeGVNPass(*PassRegistry::getPassRegistry()); 627 } 628 629 bool runOnFunction(Function &F) override; 630 631 /// This removes the specified instruction from 632 /// our various maps and marks it for deletion. 633 void markInstructionForDeletion(Instruction *I) { 634 VN.erase(I); 635 InstrsToErase.push_back(I); 636 } 637 638 DominatorTree &getDominatorTree() const { return *DT; } 639 AliasAnalysis *getAliasAnalysis() const { return VN.getAliasAnalysis(); } 640 MemoryDependenceAnalysis &getMemDep() const { return *MD; } 641 private: 642 /// Push a new Value to the LeaderTable onto the list for its value number. 643 void addToLeaderTable(uint32_t N, Value *V, const BasicBlock *BB) { 644 LeaderTableEntry &Curr = LeaderTable[N]; 645 if (!Curr.Val) { 646 Curr.Val = V; 647 Curr.BB = BB; 648 return; 649 } 650 651 LeaderTableEntry *Node = TableAllocator.Allocate<LeaderTableEntry>(); 652 Node->Val = V; 653 Node->BB = BB; 654 Node->Next = Curr.Next; 655 Curr.Next = Node; 656 } 657 658 /// Scan the list of values corresponding to a given 659 /// value number, and remove the given instruction if encountered. 660 void removeFromLeaderTable(uint32_t N, Instruction *I, BasicBlock *BB) { 661 LeaderTableEntry* Prev = nullptr; 662 LeaderTableEntry* Curr = &LeaderTable[N]; 663 664 while (Curr && (Curr->Val != I || Curr->BB != BB)) { 665 Prev = Curr; 666 Curr = Curr->Next; 667 } 668 669 if (!Curr) 670 return; 671 672 if (Prev) { 673 Prev->Next = Curr->Next; 674 } else { 675 if (!Curr->Next) { 676 Curr->Val = nullptr; 677 Curr->BB = nullptr; 678 } else { 679 LeaderTableEntry* Next = Curr->Next; 680 Curr->Val = Next->Val; 681 Curr->BB = Next->BB; 682 Curr->Next = Next->Next; 683 } 684 } 685 } 686 687 // List of critical edges to be split between iterations. 688 SmallVector<std::pair<TerminatorInst*, unsigned>, 4> toSplit; 689 690 // This transformation requires dominator postdominator info 691 void getAnalysisUsage(AnalysisUsage &AU) const override { 692 AU.addRequired<AssumptionCacheTracker>(); 693 AU.addRequired<DominatorTreeWrapperPass>(); 694 AU.addRequired<TargetLibraryInfoWrapperPass>(); 695 if (!NoLoads) 696 AU.addRequired<MemoryDependenceAnalysis>(); 697 AU.addRequired<AAResultsWrapperPass>(); 698 699 AU.addPreserved<DominatorTreeWrapperPass>(); 700 AU.addPreserved<GlobalsAAWrapperPass>(); 701 } 702 703 704 // Helper functions of redundant load elimination 705 bool processLoad(LoadInst *L); 706 bool processNonLocalLoad(LoadInst *L); 707 bool processAssumeIntrinsic(IntrinsicInst *II); 708 void AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, 709 AvailValInBlkVect &ValuesPerBlock, 710 UnavailBlkVect &UnavailableBlocks); 711 bool PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, 712 UnavailBlkVect &UnavailableBlocks); 713 714 // Other helper routines 715 bool processInstruction(Instruction *I); 716 bool processBlock(BasicBlock *BB); 717 void dump(DenseMap<uint32_t, Value*> &d); 718 bool iterateOnFunction(Function &F); 719 bool performPRE(Function &F); 720 bool performScalarPRE(Instruction *I); 721 bool performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred, 722 unsigned int ValNo); 723 Value *findLeader(const BasicBlock *BB, uint32_t num); 724 void cleanupGlobalSets(); 725 void verifyRemoved(const Instruction *I) const; 726 bool splitCriticalEdges(); 727 BasicBlock *splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ); 728 bool replaceOperandsWithConsts(Instruction *I) const; 729 bool propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root, 730 bool DominatesByEdge); 731 bool processFoldableCondBr(BranchInst *BI); 732 void addDeadBlock(BasicBlock *BB); 733 void assignValNumForDeadCode(); 734 }; 735 736 char GVN::ID = 0; 737 } 738 739 // The public interface to this file... 740 FunctionPass *llvm::createGVNPass(bool NoLoads) { 741 return new GVN(NoLoads); 742 } 743 744 INITIALIZE_PASS_BEGIN(GVN, "gvn", "Global Value Numbering", false, false) 745 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 746 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis) 747 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 748 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 749 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 750 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 751 INITIALIZE_PASS_END(GVN, "gvn", "Global Value Numbering", false, false) 752 753 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 754 void GVN::dump(DenseMap<uint32_t, Value*>& d) { 755 errs() << "{\n"; 756 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 757 E = d.end(); I != E; ++I) { 758 errs() << I->first << "\n"; 759 I->second->dump(); 760 } 761 errs() << "}\n"; 762 } 763 #endif 764 765 /// Return true if we can prove that the value 766 /// we're analyzing is fully available in the specified block. As we go, keep 767 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This 768 /// map is actually a tri-state map with the following values: 769 /// 0) we know the block *is not* fully available. 770 /// 1) we know the block *is* fully available. 771 /// 2) we do not know whether the block is fully available or not, but we are 772 /// currently speculating that it will be. 773 /// 3) we are speculating for this block and have used that to speculate for 774 /// other blocks. 775 static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 776 DenseMap<BasicBlock*, char> &FullyAvailableBlocks, 777 uint32_t RecurseDepth) { 778 if (RecurseDepth > MaxRecurseDepth) 779 return false; 780 781 // Optimistically assume that the block is fully available and check to see 782 // if we already know about this block in one lookup. 783 std::pair<DenseMap<BasicBlock*, char>::iterator, char> IV = 784 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 785 786 // If the entry already existed for this block, return the precomputed value. 787 if (!IV.second) { 788 // If this is a speculative "available" value, mark it as being used for 789 // speculation of other blocks. 790 if (IV.first->second == 2) 791 IV.first->second = 3; 792 return IV.first->second != 0; 793 } 794 795 // Otherwise, see if it is fully available in all predecessors. 796 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 797 798 // If this block has no predecessors, it isn't live-in here. 799 if (PI == PE) 800 goto SpeculationFailure; 801 802 for (; PI != PE; ++PI) 803 // If the value isn't fully available in one of our predecessors, then it 804 // isn't fully available in this block either. Undo our previous 805 // optimistic assumption and bail out. 806 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks,RecurseDepth+1)) 807 goto SpeculationFailure; 808 809 return true; 810 811 // If we get here, we found out that this is not, after 812 // all, a fully-available block. We have a problem if we speculated on this and 813 // used the speculation to mark other blocks as available. 814 SpeculationFailure: 815 char &BBVal = FullyAvailableBlocks[BB]; 816 817 // If we didn't speculate on this, just return with it set to false. 818 if (BBVal == 2) { 819 BBVal = 0; 820 return false; 821 } 822 823 // If we did speculate on this value, we could have blocks set to 1 that are 824 // incorrect. Walk the (transitive) successors of this block and mark them as 825 // 0 if set to one. 826 SmallVector<BasicBlock*, 32> BBWorklist; 827 BBWorklist.push_back(BB); 828 829 do { 830 BasicBlock *Entry = BBWorklist.pop_back_val(); 831 // Note that this sets blocks to 0 (unavailable) if they happen to not 832 // already be in FullyAvailableBlocks. This is safe. 833 char &EntryVal = FullyAvailableBlocks[Entry]; 834 if (EntryVal == 0) continue; // Already unavailable. 835 836 // Mark as unavailable. 837 EntryVal = 0; 838 839 BBWorklist.append(succ_begin(Entry), succ_end(Entry)); 840 } while (!BBWorklist.empty()); 841 842 return false; 843 } 844 845 846 /// Return true if CoerceAvailableValueToLoadType will succeed. 847 static bool CanCoerceMustAliasedValueToLoad(Value *StoredVal, 848 Type *LoadTy, 849 const DataLayout &DL) { 850 // If the loaded or stored value is an first class array or struct, don't try 851 // to transform them. We need to be able to bitcast to integer. 852 if (LoadTy->isStructTy() || LoadTy->isArrayTy() || 853 StoredVal->getType()->isStructTy() || 854 StoredVal->getType()->isArrayTy()) 855 return false; 856 857 // The store has to be at least as big as the load. 858 if (DL.getTypeSizeInBits(StoredVal->getType()) < 859 DL.getTypeSizeInBits(LoadTy)) 860 return false; 861 862 return true; 863 } 864 865 /// If we saw a store of a value to memory, and 866 /// then a load from a must-aliased pointer of a different type, try to coerce 867 /// the stored value. LoadedTy is the type of the load we want to replace. 868 /// IRB is IRBuilder used to insert new instructions. 869 /// 870 /// If we can't do it, return null. 871 static Value *CoerceAvailableValueToLoadType(Value *StoredVal, Type *LoadedTy, 872 IRBuilder<> &IRB, 873 const DataLayout &DL) { 874 if (!CanCoerceMustAliasedValueToLoad(StoredVal, LoadedTy, DL)) 875 return nullptr; 876 877 // If this is already the right type, just return it. 878 Type *StoredValTy = StoredVal->getType(); 879 880 uint64_t StoreSize = DL.getTypeSizeInBits(StoredValTy); 881 uint64_t LoadSize = DL.getTypeSizeInBits(LoadedTy); 882 883 // If the store and reload are the same size, we can always reuse it. 884 if (StoreSize == LoadSize) { 885 // Pointer to Pointer -> use bitcast. 886 if (StoredValTy->getScalarType()->isPointerTy() && 887 LoadedTy->getScalarType()->isPointerTy()) 888 return IRB.CreateBitCast(StoredVal, LoadedTy); 889 890 // Convert source pointers to integers, which can be bitcast. 891 if (StoredValTy->getScalarType()->isPointerTy()) { 892 StoredValTy = DL.getIntPtrType(StoredValTy); 893 StoredVal = IRB.CreatePtrToInt(StoredVal, StoredValTy); 894 } 895 896 Type *TypeToCastTo = LoadedTy; 897 if (TypeToCastTo->getScalarType()->isPointerTy()) 898 TypeToCastTo = DL.getIntPtrType(TypeToCastTo); 899 900 if (StoredValTy != TypeToCastTo) 901 StoredVal = IRB.CreateBitCast(StoredVal, TypeToCastTo); 902 903 // Cast to pointer if the load needs a pointer type. 904 if (LoadedTy->getScalarType()->isPointerTy()) 905 StoredVal = IRB.CreateIntToPtr(StoredVal, LoadedTy); 906 907 return StoredVal; 908 } 909 910 // If the loaded value is smaller than the available value, then we can 911 // extract out a piece from it. If the available value is too small, then we 912 // can't do anything. 913 assert(StoreSize >= LoadSize && "CanCoerceMustAliasedValueToLoad fail"); 914 915 // Convert source pointers to integers, which can be manipulated. 916 if (StoredValTy->getScalarType()->isPointerTy()) { 917 StoredValTy = DL.getIntPtrType(StoredValTy); 918 StoredVal = IRB.CreatePtrToInt(StoredVal, StoredValTy); 919 } 920 921 // Convert vectors and fp to integer, which can be manipulated. 922 if (!StoredValTy->isIntegerTy()) { 923 StoredValTy = IntegerType::get(StoredValTy->getContext(), StoreSize); 924 StoredVal = IRB.CreateBitCast(StoredVal, StoredValTy); 925 } 926 927 // If this is a big-endian system, we need to shift the value down to the low 928 // bits so that a truncate will work. 929 if (DL.isBigEndian()) { 930 StoredVal = IRB.CreateLShr(StoredVal, StoreSize - LoadSize, "tmp"); 931 } 932 933 // Truncate the integer to the right size now. 934 Type *NewIntTy = IntegerType::get(StoredValTy->getContext(), LoadSize); 935 StoredVal = IRB.CreateTrunc(StoredVal, NewIntTy, "trunc"); 936 937 if (LoadedTy == NewIntTy) 938 return StoredVal; 939 940 // If the result is a pointer, inttoptr. 941 if (LoadedTy->getScalarType()->isPointerTy()) 942 return IRB.CreateIntToPtr(StoredVal, LoadedTy, "inttoptr"); 943 944 // Otherwise, bitcast. 945 return IRB.CreateBitCast(StoredVal, LoadedTy, "bitcast"); 946 } 947 948 /// This function is called when we have a 949 /// memdep query of a load that ends up being a clobbering memory write (store, 950 /// memset, memcpy, memmove). This means that the write *may* provide bits used 951 /// by the load but we can't be sure because the pointers don't mustalias. 952 /// 953 /// Check this case to see if there is anything more we can do before we give 954 /// up. This returns -1 if we have to give up, or a byte number in the stored 955 /// value of the piece that feeds the load. 956 static int AnalyzeLoadFromClobberingWrite(Type *LoadTy, Value *LoadPtr, 957 Value *WritePtr, 958 uint64_t WriteSizeInBits, 959 const DataLayout &DL) { 960 // If the loaded or stored value is a first class array or struct, don't try 961 // to transform them. We need to be able to bitcast to integer. 962 if (LoadTy->isStructTy() || LoadTy->isArrayTy()) 963 return -1; 964 965 int64_t StoreOffset = 0, LoadOffset = 0; 966 Value *StoreBase = 967 GetPointerBaseWithConstantOffset(WritePtr, StoreOffset, DL); 968 Value *LoadBase = GetPointerBaseWithConstantOffset(LoadPtr, LoadOffset, DL); 969 if (StoreBase != LoadBase) 970 return -1; 971 972 // If the load and store are to the exact same address, they should have been 973 // a must alias. AA must have gotten confused. 974 // FIXME: Study to see if/when this happens. One case is forwarding a memset 975 // to a load from the base of the memset. 976 #if 0 977 if (LoadOffset == StoreOffset) { 978 dbgs() << "STORE/LOAD DEP WITH COMMON POINTER MISSED:\n" 979 << "Base = " << *StoreBase << "\n" 980 << "Store Ptr = " << *WritePtr << "\n" 981 << "Store Offs = " << StoreOffset << "\n" 982 << "Load Ptr = " << *LoadPtr << "\n"; 983 abort(); 984 } 985 #endif 986 987 // If the load and store don't overlap at all, the store doesn't provide 988 // anything to the load. In this case, they really don't alias at all, AA 989 // must have gotten confused. 990 uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy); 991 992 if ((WriteSizeInBits & 7) | (LoadSize & 7)) 993 return -1; 994 uint64_t StoreSize = WriteSizeInBits >> 3; // Convert to bytes. 995 LoadSize >>= 3; 996 997 998 bool isAAFailure = false; 999 if (StoreOffset < LoadOffset) 1000 isAAFailure = StoreOffset+int64_t(StoreSize) <= LoadOffset; 1001 else 1002 isAAFailure = LoadOffset+int64_t(LoadSize) <= StoreOffset; 1003 1004 if (isAAFailure) { 1005 #if 0 1006 dbgs() << "STORE LOAD DEP WITH COMMON BASE:\n" 1007 << "Base = " << *StoreBase << "\n" 1008 << "Store Ptr = " << *WritePtr << "\n" 1009 << "Store Offs = " << StoreOffset << "\n" 1010 << "Load Ptr = " << *LoadPtr << "\n"; 1011 abort(); 1012 #endif 1013 return -1; 1014 } 1015 1016 // If the Load isn't completely contained within the stored bits, we don't 1017 // have all the bits to feed it. We could do something crazy in the future 1018 // (issue a smaller load then merge the bits in) but this seems unlikely to be 1019 // valuable. 1020 if (StoreOffset > LoadOffset || 1021 StoreOffset+StoreSize < LoadOffset+LoadSize) 1022 return -1; 1023 1024 // Okay, we can do this transformation. Return the number of bytes into the 1025 // store that the load is. 1026 return LoadOffset-StoreOffset; 1027 } 1028 1029 /// This function is called when we have a 1030 /// memdep query of a load that ends up being a clobbering store. 1031 static int AnalyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr, 1032 StoreInst *DepSI) { 1033 // Cannot handle reading from store of first-class aggregate yet. 1034 if (DepSI->getValueOperand()->getType()->isStructTy() || 1035 DepSI->getValueOperand()->getType()->isArrayTy()) 1036 return -1; 1037 1038 const DataLayout &DL = DepSI->getModule()->getDataLayout(); 1039 Value *StorePtr = DepSI->getPointerOperand(); 1040 uint64_t StoreSize =DL.getTypeSizeInBits(DepSI->getValueOperand()->getType()); 1041 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1042 StorePtr, StoreSize, DL); 1043 } 1044 1045 /// This function is called when we have a 1046 /// memdep query of a load that ends up being clobbered by another load. See if 1047 /// the other load can feed into the second load. 1048 static int AnalyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, 1049 LoadInst *DepLI, const DataLayout &DL){ 1050 // Cannot handle reading from store of first-class aggregate yet. 1051 if (DepLI->getType()->isStructTy() || DepLI->getType()->isArrayTy()) 1052 return -1; 1053 1054 Value *DepPtr = DepLI->getPointerOperand(); 1055 uint64_t DepSize = DL.getTypeSizeInBits(DepLI->getType()); 1056 int R = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, DepSize, DL); 1057 if (R != -1) return R; 1058 1059 // If we have a load/load clobber an DepLI can be widened to cover this load, 1060 // then we should widen it! 1061 int64_t LoadOffs = 0; 1062 const Value *LoadBase = 1063 GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, DL); 1064 unsigned LoadSize = DL.getTypeStoreSize(LoadTy); 1065 1066 unsigned Size = MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize( 1067 LoadBase, LoadOffs, LoadSize, DepLI); 1068 if (Size == 0) return -1; 1069 1070 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, DepPtr, Size*8, DL); 1071 } 1072 1073 1074 1075 static int AnalyzeLoadFromClobberingMemInst(Type *LoadTy, Value *LoadPtr, 1076 MemIntrinsic *MI, 1077 const DataLayout &DL) { 1078 // If the mem operation is a non-constant size, we can't handle it. 1079 ConstantInt *SizeCst = dyn_cast<ConstantInt>(MI->getLength()); 1080 if (!SizeCst) return -1; 1081 uint64_t MemSizeInBits = SizeCst->getZExtValue()*8; 1082 1083 // If this is memset, we just need to see if the offset is valid in the size 1084 // of the memset.. 1085 if (MI->getIntrinsicID() == Intrinsic::memset) 1086 return AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, MI->getDest(), 1087 MemSizeInBits, DL); 1088 1089 // If we have a memcpy/memmove, the only case we can handle is if this is a 1090 // copy from constant memory. In that case, we can read directly from the 1091 // constant memory. 1092 MemTransferInst *MTI = cast<MemTransferInst>(MI); 1093 1094 Constant *Src = dyn_cast<Constant>(MTI->getSource()); 1095 if (!Src) return -1; 1096 1097 GlobalVariable *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(Src, DL)); 1098 if (!GV || !GV->isConstant()) return -1; 1099 1100 // See if the access is within the bounds of the transfer. 1101 int Offset = AnalyzeLoadFromClobberingWrite(LoadTy, LoadPtr, 1102 MI->getDest(), MemSizeInBits, DL); 1103 if (Offset == -1) 1104 return Offset; 1105 1106 unsigned AS = Src->getType()->getPointerAddressSpace(); 1107 // Otherwise, see if we can constant fold a load from the constant with the 1108 // offset applied as appropriate. 1109 Src = ConstantExpr::getBitCast(Src, 1110 Type::getInt8PtrTy(Src->getContext(), AS)); 1111 Constant *OffsetCst = 1112 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1113 Src = ConstantExpr::getGetElementPtr(Type::getInt8Ty(Src->getContext()), Src, 1114 OffsetCst); 1115 Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS)); 1116 if (ConstantFoldLoadFromConstPtr(Src, DL)) 1117 return Offset; 1118 return -1; 1119 } 1120 1121 1122 /// This function is called when we have a 1123 /// memdep query of a load that ends up being a clobbering store. This means 1124 /// that the store provides bits used by the load but we the pointers don't 1125 /// mustalias. Check this case to see if there is anything more we can do 1126 /// before we give up. 1127 static Value *GetStoreValueForLoad(Value *SrcVal, unsigned Offset, 1128 Type *LoadTy, 1129 Instruction *InsertPt, const DataLayout &DL){ 1130 LLVMContext &Ctx = SrcVal->getType()->getContext(); 1131 1132 uint64_t StoreSize = (DL.getTypeSizeInBits(SrcVal->getType()) + 7) / 8; 1133 uint64_t LoadSize = (DL.getTypeSizeInBits(LoadTy) + 7) / 8; 1134 1135 IRBuilder<> Builder(InsertPt); 1136 1137 // Compute which bits of the stored value are being used by the load. Convert 1138 // to an integer type to start with. 1139 if (SrcVal->getType()->getScalarType()->isPointerTy()) 1140 SrcVal = Builder.CreatePtrToInt(SrcVal, 1141 DL.getIntPtrType(SrcVal->getType())); 1142 if (!SrcVal->getType()->isIntegerTy()) 1143 SrcVal = Builder.CreateBitCast(SrcVal, IntegerType::get(Ctx, StoreSize*8)); 1144 1145 // Shift the bits to the least significant depending on endianness. 1146 unsigned ShiftAmt; 1147 if (DL.isLittleEndian()) 1148 ShiftAmt = Offset*8; 1149 else 1150 ShiftAmt = (StoreSize-LoadSize-Offset)*8; 1151 1152 if (ShiftAmt) 1153 SrcVal = Builder.CreateLShr(SrcVal, ShiftAmt); 1154 1155 if (LoadSize != StoreSize) 1156 SrcVal = Builder.CreateTrunc(SrcVal, IntegerType::get(Ctx, LoadSize*8)); 1157 1158 return CoerceAvailableValueToLoadType(SrcVal, LoadTy, Builder, DL); 1159 } 1160 1161 /// This function is called when we have a 1162 /// memdep query of a load that ends up being a clobbering load. This means 1163 /// that the load *may* provide bits used by the load but we can't be sure 1164 /// because the pointers don't mustalias. Check this case to see if there is 1165 /// anything more we can do before we give up. 1166 static Value *GetLoadValueForLoad(LoadInst *SrcVal, unsigned Offset, 1167 Type *LoadTy, Instruction *InsertPt, 1168 GVN &gvn) { 1169 const DataLayout &DL = SrcVal->getModule()->getDataLayout(); 1170 // If Offset+LoadTy exceeds the size of SrcVal, then we must be wanting to 1171 // widen SrcVal out to a larger load. 1172 unsigned SrcValSize = DL.getTypeStoreSize(SrcVal->getType()); 1173 unsigned LoadSize = DL.getTypeStoreSize(LoadTy); 1174 if (Offset+LoadSize > SrcValSize) { 1175 assert(SrcVal->isSimple() && "Cannot widen volatile/atomic load!"); 1176 assert(SrcVal->getType()->isIntegerTy() && "Can't widen non-integer load"); 1177 // If we have a load/load clobber an DepLI can be widened to cover this 1178 // load, then we should widen it to the next power of 2 size big enough! 1179 unsigned NewLoadSize = Offset+LoadSize; 1180 if (!isPowerOf2_32(NewLoadSize)) 1181 NewLoadSize = NextPowerOf2(NewLoadSize); 1182 1183 Value *PtrVal = SrcVal->getPointerOperand(); 1184 1185 // Insert the new load after the old load. This ensures that subsequent 1186 // memdep queries will find the new load. We can't easily remove the old 1187 // load completely because it is already in the value numbering table. 1188 IRBuilder<> Builder(SrcVal->getParent(), ++BasicBlock::iterator(SrcVal)); 1189 Type *DestPTy = 1190 IntegerType::get(LoadTy->getContext(), NewLoadSize*8); 1191 DestPTy = PointerType::get(DestPTy, 1192 PtrVal->getType()->getPointerAddressSpace()); 1193 Builder.SetCurrentDebugLocation(SrcVal->getDebugLoc()); 1194 PtrVal = Builder.CreateBitCast(PtrVal, DestPTy); 1195 LoadInst *NewLoad = Builder.CreateLoad(PtrVal); 1196 NewLoad->takeName(SrcVal); 1197 NewLoad->setAlignment(SrcVal->getAlignment()); 1198 1199 DEBUG(dbgs() << "GVN WIDENED LOAD: " << *SrcVal << "\n"); 1200 DEBUG(dbgs() << "TO: " << *NewLoad << "\n"); 1201 1202 // Replace uses of the original load with the wider load. On a big endian 1203 // system, we need to shift down to get the relevant bits. 1204 Value *RV = NewLoad; 1205 if (DL.isBigEndian()) 1206 RV = Builder.CreateLShr(RV, 1207 NewLoadSize*8-SrcVal->getType()->getPrimitiveSizeInBits()); 1208 RV = Builder.CreateTrunc(RV, SrcVal->getType()); 1209 SrcVal->replaceAllUsesWith(RV); 1210 1211 // We would like to use gvn.markInstructionForDeletion here, but we can't 1212 // because the load is already memoized into the leader map table that GVN 1213 // tracks. It is potentially possible to remove the load from the table, 1214 // but then there all of the operations based on it would need to be 1215 // rehashed. Just leave the dead load around. 1216 gvn.getMemDep().removeInstruction(SrcVal); 1217 SrcVal = NewLoad; 1218 } 1219 1220 return GetStoreValueForLoad(SrcVal, Offset, LoadTy, InsertPt, DL); 1221 } 1222 1223 1224 /// This function is called when we have a 1225 /// memdep query of a load that ends up being a clobbering mem intrinsic. 1226 static Value *GetMemInstValueForLoad(MemIntrinsic *SrcInst, unsigned Offset, 1227 Type *LoadTy, Instruction *InsertPt, 1228 const DataLayout &DL){ 1229 LLVMContext &Ctx = LoadTy->getContext(); 1230 uint64_t LoadSize = DL.getTypeSizeInBits(LoadTy)/8; 1231 1232 IRBuilder<> Builder(InsertPt); 1233 1234 // We know that this method is only called when the mem transfer fully 1235 // provides the bits for the load. 1236 if (MemSetInst *MSI = dyn_cast<MemSetInst>(SrcInst)) { 1237 // memset(P, 'x', 1234) -> splat('x'), even if x is a variable, and 1238 // independently of what the offset is. 1239 Value *Val = MSI->getValue(); 1240 if (LoadSize != 1) 1241 Val = Builder.CreateZExt(Val, IntegerType::get(Ctx, LoadSize*8)); 1242 1243 Value *OneElt = Val; 1244 1245 // Splat the value out to the right number of bits. 1246 for (unsigned NumBytesSet = 1; NumBytesSet != LoadSize; ) { 1247 // If we can double the number of bytes set, do it. 1248 if (NumBytesSet*2 <= LoadSize) { 1249 Value *ShVal = Builder.CreateShl(Val, NumBytesSet*8); 1250 Val = Builder.CreateOr(Val, ShVal); 1251 NumBytesSet <<= 1; 1252 continue; 1253 } 1254 1255 // Otherwise insert one byte at a time. 1256 Value *ShVal = Builder.CreateShl(Val, 1*8); 1257 Val = Builder.CreateOr(OneElt, ShVal); 1258 ++NumBytesSet; 1259 } 1260 1261 return CoerceAvailableValueToLoadType(Val, LoadTy, Builder, DL); 1262 } 1263 1264 // Otherwise, this is a memcpy/memmove from a constant global. 1265 MemTransferInst *MTI = cast<MemTransferInst>(SrcInst); 1266 Constant *Src = cast<Constant>(MTI->getSource()); 1267 unsigned AS = Src->getType()->getPointerAddressSpace(); 1268 1269 // Otherwise, see if we can constant fold a load from the constant with the 1270 // offset applied as appropriate. 1271 Src = ConstantExpr::getBitCast(Src, 1272 Type::getInt8PtrTy(Src->getContext(), AS)); 1273 Constant *OffsetCst = 1274 ConstantInt::get(Type::getInt64Ty(Src->getContext()), (unsigned)Offset); 1275 Src = ConstantExpr::getGetElementPtr(Type::getInt8Ty(Src->getContext()), Src, 1276 OffsetCst); 1277 Src = ConstantExpr::getBitCast(Src, PointerType::get(LoadTy, AS)); 1278 return ConstantFoldLoadFromConstPtr(Src, DL); 1279 } 1280 1281 1282 /// Given a set of loads specified by ValuesPerBlock, 1283 /// construct SSA form, allowing us to eliminate LI. This returns the value 1284 /// that should be used at LI's definition site. 1285 static Value *ConstructSSAForLoadSet(LoadInst *LI, 1286 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 1287 GVN &gvn) { 1288 // Check for the fully redundant, dominating load case. In this case, we can 1289 // just use the dominating value directly. 1290 if (ValuesPerBlock.size() == 1 && 1291 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB, 1292 LI->getParent())) { 1293 assert(!ValuesPerBlock[0].isUndefValue() && "Dead BB dominate this block"); 1294 return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn); 1295 } 1296 1297 // Otherwise, we have to construct SSA form. 1298 SmallVector<PHINode*, 8> NewPHIs; 1299 SSAUpdater SSAUpdate(&NewPHIs); 1300 SSAUpdate.Initialize(LI->getType(), LI->getName()); 1301 1302 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) { 1303 const AvailableValueInBlock &AV = ValuesPerBlock[i]; 1304 BasicBlock *BB = AV.BB; 1305 1306 if (SSAUpdate.HasValueForBlock(BB)) 1307 continue; 1308 1309 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LI, gvn)); 1310 } 1311 1312 // Perform PHI construction. 1313 return SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 1314 } 1315 1316 Value *AvailableValueInBlock::MaterializeAdjustedValue(LoadInst *LI, 1317 GVN &gvn) const { 1318 Value *Res; 1319 Type *LoadTy = LI->getType(); 1320 const DataLayout &DL = LI->getModule()->getDataLayout(); 1321 if (isSimpleValue()) { 1322 Res = getSimpleValue(); 1323 if (Res->getType() != LoadTy) { 1324 Res = GetStoreValueForLoad(Res, Offset, LoadTy, BB->getTerminator(), DL); 1325 1326 DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset << " " 1327 << *getSimpleValue() << '\n' 1328 << *Res << '\n' << "\n\n\n"); 1329 } 1330 } else if (isCoercedLoadValue()) { 1331 LoadInst *Load = getCoercedLoadValue(); 1332 if (Load->getType() == LoadTy && Offset == 0) { 1333 Res = Load; 1334 } else { 1335 Res = GetLoadValueForLoad(Load, Offset, LoadTy, BB->getTerminator(), 1336 gvn); 1337 1338 DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset << " " 1339 << *getCoercedLoadValue() << '\n' 1340 << *Res << '\n' << "\n\n\n"); 1341 } 1342 } else if (isMemIntrinValue()) { 1343 Res = GetMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy, 1344 BB->getTerminator(), DL); 1345 DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 1346 << " " << *getMemIntrinValue() << '\n' 1347 << *Res << '\n' << "\n\n\n"); 1348 } else { 1349 assert(isUndefValue() && "Should be UndefVal"); 1350 DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";); 1351 return UndefValue::get(LoadTy); 1352 } 1353 return Res; 1354 } 1355 1356 static bool isLifetimeStart(const Instruction *Inst) { 1357 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 1358 return II->getIntrinsicID() == Intrinsic::lifetime_start; 1359 return false; 1360 } 1361 1362 void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, 1363 AvailValInBlkVect &ValuesPerBlock, 1364 UnavailBlkVect &UnavailableBlocks) { 1365 1366 // Filter out useless results (non-locals, etc). Keep track of the blocks 1367 // where we have a value available in repl, also keep track of whether we see 1368 // dependencies that produce an unknown value for the load (such as a call 1369 // that could potentially clobber the load). 1370 unsigned NumDeps = Deps.size(); 1371 const DataLayout &DL = LI->getModule()->getDataLayout(); 1372 for (unsigned i = 0, e = NumDeps; i != e; ++i) { 1373 BasicBlock *DepBB = Deps[i].getBB(); 1374 MemDepResult DepInfo = Deps[i].getResult(); 1375 1376 if (DeadBlocks.count(DepBB)) { 1377 // Dead dependent mem-op disguise as a load evaluating the same value 1378 // as the load in question. 1379 ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB)); 1380 continue; 1381 } 1382 1383 if (!DepInfo.isDef() && !DepInfo.isClobber()) { 1384 UnavailableBlocks.push_back(DepBB); 1385 continue; 1386 } 1387 1388 if (DepInfo.isClobber()) { 1389 // The address being loaded in this non-local block may not be the same as 1390 // the pointer operand of the load if PHI translation occurs. Make sure 1391 // to consider the right address. 1392 Value *Address = Deps[i].getAddress(); 1393 1394 // If the dependence is to a store that writes to a superset of the bits 1395 // read by the load, we can extract the bits we need for the load from the 1396 // stored value. 1397 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInfo.getInst())) { 1398 if (Address) { 1399 int Offset = 1400 AnalyzeLoadFromClobberingStore(LI->getType(), Address, DepSI); 1401 if (Offset != -1) { 1402 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1403 DepSI->getValueOperand(), 1404 Offset)); 1405 continue; 1406 } 1407 } 1408 } 1409 1410 // Check to see if we have something like this: 1411 // load i32* P 1412 // load i8* (P+1) 1413 // if we have this, replace the later with an extraction from the former. 1414 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInfo.getInst())) { 1415 // If this is a clobber and L is the first instruction in its block, then 1416 // we have the first instruction in the entry block. 1417 if (DepLI != LI && Address) { 1418 int Offset = 1419 AnalyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL); 1420 1421 if (Offset != -1) { 1422 ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB,DepLI, 1423 Offset)); 1424 continue; 1425 } 1426 } 1427 } 1428 1429 // If the clobbering value is a memset/memcpy/memmove, see if we can 1430 // forward a value on from it. 1431 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInfo.getInst())) { 1432 if (Address) { 1433 int Offset = AnalyzeLoadFromClobberingMemInst(LI->getType(), Address, 1434 DepMI, DL); 1435 if (Offset != -1) { 1436 ValuesPerBlock.push_back(AvailableValueInBlock::getMI(DepBB, DepMI, 1437 Offset)); 1438 continue; 1439 } 1440 } 1441 } 1442 1443 UnavailableBlocks.push_back(DepBB); 1444 continue; 1445 } 1446 1447 // DepInfo.isDef() here 1448 1449 Instruction *DepInst = DepInfo.getInst(); 1450 1451 // Loading the allocation -> undef. 1452 if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) || 1453 // Loading immediately after lifetime begin -> undef. 1454 isLifetimeStart(DepInst)) { 1455 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1456 UndefValue::get(LI->getType()))); 1457 continue; 1458 } 1459 1460 // Loading from calloc (which zero initializes memory) -> zero 1461 if (isCallocLikeFn(DepInst, TLI)) { 1462 ValuesPerBlock.push_back(AvailableValueInBlock::get( 1463 DepBB, Constant::getNullValue(LI->getType()))); 1464 continue; 1465 } 1466 1467 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 1468 // Reject loads and stores that are to the same address but are of 1469 // different types if we have to. 1470 if (S->getValueOperand()->getType() != LI->getType()) { 1471 // If the stored value is larger or equal to the loaded value, we can 1472 // reuse it. 1473 if (!CanCoerceMustAliasedValueToLoad(S->getValueOperand(), 1474 LI->getType(), DL)) { 1475 UnavailableBlocks.push_back(DepBB); 1476 continue; 1477 } 1478 } 1479 1480 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1481 S->getValueOperand())); 1482 continue; 1483 } 1484 1485 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 1486 // If the types mismatch and we can't handle it, reject reuse of the load. 1487 if (LD->getType() != LI->getType()) { 1488 // If the stored value is larger or equal to the loaded value, we can 1489 // reuse it. 1490 if (!CanCoerceMustAliasedValueToLoad(LD, LI->getType(), DL)) { 1491 UnavailableBlocks.push_back(DepBB); 1492 continue; 1493 } 1494 } 1495 ValuesPerBlock.push_back(AvailableValueInBlock::getLoad(DepBB, LD)); 1496 continue; 1497 } 1498 1499 UnavailableBlocks.push_back(DepBB); 1500 } 1501 } 1502 1503 bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, 1504 UnavailBlkVect &UnavailableBlocks) { 1505 // Okay, we have *some* definitions of the value. This means that the value 1506 // is available in some of our (transitive) predecessors. Lets think about 1507 // doing PRE of this load. This will involve inserting a new load into the 1508 // predecessor when it's not available. We could do this in general, but 1509 // prefer to not increase code size. As such, we only do this when we know 1510 // that we only have to insert *one* load (which means we're basically moving 1511 // the load, not inserting a new one). 1512 1513 SmallPtrSet<BasicBlock *, 4> Blockers; 1514 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1515 Blockers.insert(UnavailableBlocks[i]); 1516 1517 // Let's find the first basic block with more than one predecessor. Walk 1518 // backwards through predecessors if needed. 1519 BasicBlock *LoadBB = LI->getParent(); 1520 BasicBlock *TmpBB = LoadBB; 1521 1522 while (TmpBB->getSinglePredecessor()) { 1523 TmpBB = TmpBB->getSinglePredecessor(); 1524 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1525 return false; 1526 if (Blockers.count(TmpBB)) 1527 return false; 1528 1529 // If any of these blocks has more than one successor (i.e. if the edge we 1530 // just traversed was critical), then there are other paths through this 1531 // block along which the load may not be anticipated. Hoisting the load 1532 // above this block would be adding the load to execution paths along 1533 // which it was not previously executed. 1534 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1535 return false; 1536 } 1537 1538 assert(TmpBB); 1539 LoadBB = TmpBB; 1540 1541 // Check to see how many predecessors have the loaded value fully 1542 // available. 1543 MapVector<BasicBlock *, Value *> PredLoads; 1544 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1545 for (unsigned i = 0, e = ValuesPerBlock.size(); i != e; ++i) 1546 FullyAvailableBlocks[ValuesPerBlock[i].BB] = true; 1547 for (unsigned i = 0, e = UnavailableBlocks.size(); i != e; ++i) 1548 FullyAvailableBlocks[UnavailableBlocks[i]] = false; 1549 1550 SmallVector<BasicBlock *, 4> CriticalEdgePred; 1551 for (pred_iterator PI = pred_begin(LoadBB), E = pred_end(LoadBB); 1552 PI != E; ++PI) { 1553 BasicBlock *Pred = *PI; 1554 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks, 0)) { 1555 continue; 1556 } 1557 1558 if (Pred->getTerminator()->getNumSuccessors() != 1) { 1559 if (isa<IndirectBrInst>(Pred->getTerminator())) { 1560 DEBUG(dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" 1561 << Pred->getName() << "': " << *LI << '\n'); 1562 return false; 1563 } 1564 1565 if (LoadBB->isEHPad()) { 1566 DEBUG(dbgs() 1567 << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '" 1568 << Pred->getName() << "': " << *LI << '\n'); 1569 return false; 1570 } 1571 1572 CriticalEdgePred.push_back(Pred); 1573 } else { 1574 // Only add the predecessors that will not be split for now. 1575 PredLoads[Pred] = nullptr; 1576 } 1577 } 1578 1579 // Decide whether PRE is profitable for this load. 1580 unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size(); 1581 assert(NumUnavailablePreds != 0 && 1582 "Fully available value should already be eliminated!"); 1583 1584 // If this load is unavailable in multiple predecessors, reject it. 1585 // FIXME: If we could restructure the CFG, we could make a common pred with 1586 // all the preds that don't have an available LI and insert a new load into 1587 // that one block. 1588 if (NumUnavailablePreds != 1) 1589 return false; 1590 1591 // Split critical edges, and update the unavailable predecessors accordingly. 1592 for (BasicBlock *OrigPred : CriticalEdgePred) { 1593 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB); 1594 assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!"); 1595 PredLoads[NewPred] = nullptr; 1596 DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->" 1597 << LoadBB->getName() << '\n'); 1598 } 1599 1600 // Check if the load can safely be moved to all the unavailable predecessors. 1601 bool CanDoPRE = true; 1602 const DataLayout &DL = LI->getModule()->getDataLayout(); 1603 SmallVector<Instruction*, 8> NewInsts; 1604 for (auto &PredLoad : PredLoads) { 1605 BasicBlock *UnavailablePred = PredLoad.first; 1606 1607 // Do PHI translation to get its value in the predecessor if necessary. The 1608 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1609 1610 // If all preds have a single successor, then we know it is safe to insert 1611 // the load on the pred (?!?), so we can insert code to materialize the 1612 // pointer if it is not available. 1613 PHITransAddr Address(LI->getPointerOperand(), DL, AC); 1614 Value *LoadPtr = nullptr; 1615 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1616 *DT, NewInsts); 1617 1618 // If we couldn't find or insert a computation of this phi translated value, 1619 // we fail PRE. 1620 if (!LoadPtr) { 1621 DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1622 << *LI->getPointerOperand() << "\n"); 1623 CanDoPRE = false; 1624 break; 1625 } 1626 1627 PredLoad.second = LoadPtr; 1628 } 1629 1630 if (!CanDoPRE) { 1631 while (!NewInsts.empty()) { 1632 Instruction *I = NewInsts.pop_back_val(); 1633 if (MD) MD->removeInstruction(I); 1634 I->eraseFromParent(); 1635 } 1636 // HINT: Don't revert the edge-splitting as following transformation may 1637 // also need to split these critical edges. 1638 return !CriticalEdgePred.empty(); 1639 } 1640 1641 // Okay, we can eliminate this load by inserting a reload in the predecessor 1642 // and using PHI construction to get the value in the other predecessors, do 1643 // it. 1644 DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1645 DEBUG(if (!NewInsts.empty()) 1646 dbgs() << "INSERTED " << NewInsts.size() << " INSTS: " 1647 << *NewInsts.back() << '\n'); 1648 1649 // Assign value numbers to the new instructions. 1650 for (unsigned i = 0, e = NewInsts.size(); i != e; ++i) { 1651 // FIXME: We really _ought_ to insert these value numbers into their 1652 // parent's availability map. However, in doing so, we risk getting into 1653 // ordering issues. If a block hasn't been processed yet, we would be 1654 // marking a value as AVAIL-IN, which isn't what we intend. 1655 VN.lookup_or_add(NewInsts[i]); 1656 } 1657 1658 for (const auto &PredLoad : PredLoads) { 1659 BasicBlock *UnavailablePred = PredLoad.first; 1660 Value *LoadPtr = PredLoad.second; 1661 1662 Instruction *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false, 1663 LI->getAlignment(), 1664 UnavailablePred->getTerminator()); 1665 1666 // Transfer the old load's AA tags to the new load. 1667 AAMDNodes Tags; 1668 LI->getAAMetadata(Tags); 1669 if (Tags) 1670 NewLoad->setAAMetadata(Tags); 1671 1672 if (auto *InvGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group)) 1673 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD); 1674 1675 // Transfer DebugLoc. 1676 NewLoad->setDebugLoc(LI->getDebugLoc()); 1677 1678 // Add the newly created load. 1679 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred, 1680 NewLoad)); 1681 MD->invalidateCachedPointerInfo(LoadPtr); 1682 DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); 1683 } 1684 1685 // Perform PHI construction. 1686 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); 1687 LI->replaceAllUsesWith(V); 1688 if (isa<PHINode>(V)) 1689 V->takeName(LI); 1690 if (Instruction *I = dyn_cast<Instruction>(V)) 1691 I->setDebugLoc(LI->getDebugLoc()); 1692 if (V->getType()->getScalarType()->isPointerTy()) 1693 MD->invalidateCachedPointerInfo(V); 1694 markInstructionForDeletion(LI); 1695 ++NumPRELoad; 1696 return true; 1697 } 1698 1699 /// Attempt to eliminate a load whose dependencies are 1700 /// non-local by performing PHI construction. 1701 bool GVN::processNonLocalLoad(LoadInst *LI) { 1702 // Step 1: Find the non-local dependencies of the load. 1703 LoadDepVect Deps; 1704 MD->getNonLocalPointerDependency(LI, Deps); 1705 1706 // If we had to process more than one hundred blocks to find the 1707 // dependencies, this load isn't worth worrying about. Optimizing 1708 // it will be too expensive. 1709 unsigned NumDeps = Deps.size(); 1710 if (NumDeps > 100) 1711 return false; 1712 1713 // If we had a phi translation failure, we'll have a single entry which is a 1714 // clobber in the current block. Reject this early. 1715 if (NumDeps == 1 && 1716 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) { 1717 DEBUG( 1718 dbgs() << "GVN: non-local load "; 1719 LI->printAsOperand(dbgs()); 1720 dbgs() << " has unknown dependencies\n"; 1721 ); 1722 return false; 1723 } 1724 1725 // If this load follows a GEP, see if we can PRE the indices before analyzing. 1726 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0))) { 1727 for (GetElementPtrInst::op_iterator OI = GEP->idx_begin(), 1728 OE = GEP->idx_end(); 1729 OI != OE; ++OI) 1730 if (Instruction *I = dyn_cast<Instruction>(OI->get())) 1731 performScalarPRE(I); 1732 } 1733 1734 // Step 2: Analyze the availability of the load 1735 AvailValInBlkVect ValuesPerBlock; 1736 UnavailBlkVect UnavailableBlocks; 1737 AnalyzeLoadAvailability(LI, Deps, ValuesPerBlock, UnavailableBlocks); 1738 1739 // If we have no predecessors that produce a known value for this load, exit 1740 // early. 1741 if (ValuesPerBlock.empty()) 1742 return false; 1743 1744 // Step 3: Eliminate fully redundancy. 1745 // 1746 // If all of the instructions we depend on produce a known value for this 1747 // load, then it is fully redundant and we can use PHI insertion to compute 1748 // its value. Insert PHIs and remove the fully redundant value now. 1749 if (UnavailableBlocks.empty()) { 1750 DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1751 1752 // Perform PHI construction. 1753 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); 1754 LI->replaceAllUsesWith(V); 1755 1756 if (isa<PHINode>(V)) 1757 V->takeName(LI); 1758 if (Instruction *I = dyn_cast<Instruction>(V)) 1759 if (LI->getDebugLoc()) 1760 I->setDebugLoc(LI->getDebugLoc()); 1761 if (V->getType()->getScalarType()->isPointerTy()) 1762 MD->invalidateCachedPointerInfo(V); 1763 markInstructionForDeletion(LI); 1764 ++NumGVNLoad; 1765 return true; 1766 } 1767 1768 // Step 4: Eliminate partial redundancy. 1769 if (!EnablePRE || !EnableLoadPRE) 1770 return false; 1771 1772 return PerformLoadPRE(LI, ValuesPerBlock, UnavailableBlocks); 1773 } 1774 1775 bool GVN::processAssumeIntrinsic(IntrinsicInst *IntrinsicI) { 1776 assert(IntrinsicI->getIntrinsicID() == Intrinsic::assume && 1777 "This function can only be called with llvm.assume intrinsic"); 1778 Value *V = IntrinsicI->getArgOperand(0); 1779 1780 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) { 1781 if (Cond->isZero()) { 1782 Type *Int8Ty = Type::getInt8Ty(V->getContext()); 1783 // Insert a new store to null instruction before the load to indicate that 1784 // this code is not reachable. FIXME: We could insert unreachable 1785 // instruction directly because we can modify the CFG. 1786 new StoreInst(UndefValue::get(Int8Ty), 1787 Constant::getNullValue(Int8Ty->getPointerTo()), 1788 IntrinsicI); 1789 } 1790 markInstructionForDeletion(IntrinsicI); 1791 return false; 1792 } 1793 1794 Constant *True = ConstantInt::getTrue(V->getContext()); 1795 bool Changed = false; 1796 1797 for (BasicBlock *Successor : successors(IntrinsicI->getParent())) { 1798 BasicBlockEdge Edge(IntrinsicI->getParent(), Successor); 1799 1800 // This property is only true in dominated successors, propagateEquality 1801 // will check dominance for us. 1802 Changed |= propagateEquality(V, True, Edge, false); 1803 } 1804 1805 // We can replace assume value with true, which covers cases like this: 1806 // call void @llvm.assume(i1 %cmp) 1807 // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true 1808 ReplaceWithConstMap[V] = True; 1809 1810 // If one of *cmp *eq operand is const, adding it to map will cover this: 1811 // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen 1812 // call void @llvm.assume(i1 %cmp) 1813 // ret float %0 ; will change it to ret float 3.000000e+00 1814 if (auto *CmpI = dyn_cast<CmpInst>(V)) { 1815 if (CmpI->getPredicate() == CmpInst::Predicate::ICMP_EQ || 1816 CmpI->getPredicate() == CmpInst::Predicate::FCMP_OEQ || 1817 (CmpI->getPredicate() == CmpInst::Predicate::FCMP_UEQ && 1818 CmpI->getFastMathFlags().noNaNs())) { 1819 Value *CmpLHS = CmpI->getOperand(0); 1820 Value *CmpRHS = CmpI->getOperand(1); 1821 if (isa<Constant>(CmpLHS)) 1822 std::swap(CmpLHS, CmpRHS); 1823 auto *RHSConst = dyn_cast<Constant>(CmpRHS); 1824 1825 // If only one operand is constant. 1826 if (RHSConst != nullptr && !isa<Constant>(CmpLHS)) 1827 ReplaceWithConstMap[CmpLHS] = RHSConst; 1828 } 1829 } 1830 return Changed; 1831 } 1832 1833 static void patchReplacementInstruction(Instruction *I, Value *Repl) { 1834 // Patch the replacement so that it is not more restrictive than the value 1835 // being replaced. 1836 BinaryOperator *Op = dyn_cast<BinaryOperator>(I); 1837 BinaryOperator *ReplOp = dyn_cast<BinaryOperator>(Repl); 1838 if (Op && ReplOp) 1839 ReplOp->andIRFlags(Op); 1840 1841 if (Instruction *ReplInst = dyn_cast<Instruction>(Repl)) { 1842 // FIXME: If both the original and replacement value are part of the 1843 // same control-flow region (meaning that the execution of one 1844 // guarantees the execution of the other), then we can combine the 1845 // noalias scopes here and do better than the general conservative 1846 // answer used in combineMetadata(). 1847 1848 // In general, GVN unifies expressions over different control-flow 1849 // regions, and so we need a conservative combination of the noalias 1850 // scopes. 1851 static const unsigned KnownIDs[] = { 1852 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 1853 LLVMContext::MD_noalias, LLVMContext::MD_range, 1854 LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load, 1855 LLVMContext::MD_invariant_group}; 1856 combineMetadata(ReplInst, I, KnownIDs); 1857 } 1858 } 1859 1860 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) { 1861 patchReplacementInstruction(I, Repl); 1862 I->replaceAllUsesWith(Repl); 1863 } 1864 1865 /// Attempt to eliminate a load, first by eliminating it 1866 /// locally, and then attempting non-local elimination if that fails. 1867 bool GVN::processLoad(LoadInst *L) { 1868 if (!MD) 1869 return false; 1870 1871 if (!L->isSimple()) 1872 return false; 1873 1874 if (L->use_empty()) { 1875 markInstructionForDeletion(L); 1876 return true; 1877 } 1878 1879 // ... to a pointer that has been loaded from before... 1880 MemDepResult Dep = MD->getDependency(L); 1881 const DataLayout &DL = L->getModule()->getDataLayout(); 1882 1883 // If we have a clobber and target data is around, see if this is a clobber 1884 // that we can fix up through code synthesis. 1885 if (Dep.isClobber()) { 1886 // Check to see if we have something like this: 1887 // store i32 123, i32* %P 1888 // %A = bitcast i32* %P to i8* 1889 // %B = gep i8* %A, i32 1 1890 // %C = load i8* %B 1891 // 1892 // We could do that by recognizing if the clobber instructions are obviously 1893 // a common base + constant offset, and if the previous store (or memset) 1894 // completely covers this load. This sort of thing can happen in bitfield 1895 // access code. 1896 Value *AvailVal = nullptr; 1897 if (StoreInst *DepSI = dyn_cast<StoreInst>(Dep.getInst())) { 1898 int Offset = AnalyzeLoadFromClobberingStore( 1899 L->getType(), L->getPointerOperand(), DepSI); 1900 if (Offset != -1) 1901 AvailVal = GetStoreValueForLoad(DepSI->getValueOperand(), Offset, 1902 L->getType(), L, DL); 1903 } 1904 1905 // Check to see if we have something like this: 1906 // load i32* P 1907 // load i8* (P+1) 1908 // if we have this, replace the later with an extraction from the former. 1909 if (LoadInst *DepLI = dyn_cast<LoadInst>(Dep.getInst())) { 1910 // If this is a clobber and L is the first instruction in its block, then 1911 // we have the first instruction in the entry block. 1912 if (DepLI == L) 1913 return false; 1914 1915 int Offset = AnalyzeLoadFromClobberingLoad( 1916 L->getType(), L->getPointerOperand(), DepLI, DL); 1917 if (Offset != -1) 1918 AvailVal = GetLoadValueForLoad(DepLI, Offset, L->getType(), L, *this); 1919 } 1920 1921 // If the clobbering value is a memset/memcpy/memmove, see if we can forward 1922 // a value on from it. 1923 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(Dep.getInst())) { 1924 int Offset = AnalyzeLoadFromClobberingMemInst( 1925 L->getType(), L->getPointerOperand(), DepMI, DL); 1926 if (Offset != -1) 1927 AvailVal = GetMemInstValueForLoad(DepMI, Offset, L->getType(), L, DL); 1928 } 1929 1930 if (AvailVal) { 1931 DEBUG(dbgs() << "GVN COERCED INST:\n" << *Dep.getInst() << '\n' 1932 << *AvailVal << '\n' << *L << "\n\n\n"); 1933 1934 // Replace the load! 1935 L->replaceAllUsesWith(AvailVal); 1936 if (AvailVal->getType()->getScalarType()->isPointerTy()) 1937 MD->invalidateCachedPointerInfo(AvailVal); 1938 markInstructionForDeletion(L); 1939 ++NumGVNLoad; 1940 return true; 1941 } 1942 1943 // If the value isn't available, don't do anything! 1944 DEBUG( 1945 // fast print dep, using operator<< on instruction is too slow. 1946 dbgs() << "GVN: load "; 1947 L->printAsOperand(dbgs()); 1948 Instruction *I = Dep.getInst(); 1949 dbgs() << " is clobbered by " << *I << '\n'; 1950 ); 1951 return false; 1952 } 1953 1954 // If it is defined in another block, try harder. 1955 if (Dep.isNonLocal()) 1956 return processNonLocalLoad(L); 1957 1958 if (!Dep.isDef()) { 1959 DEBUG( 1960 // fast print dep, using operator<< on instruction is too slow. 1961 dbgs() << "GVN: load "; 1962 L->printAsOperand(dbgs()); 1963 dbgs() << " has unknown dependence\n"; 1964 ); 1965 return false; 1966 } 1967 1968 Instruction *DepInst = Dep.getInst(); 1969 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 1970 Value *StoredVal = DepSI->getValueOperand(); 1971 1972 // The store and load are to a must-aliased pointer, but they may not 1973 // actually have the same type. See if we know how to reuse the stored 1974 // value (depending on its type). 1975 if (StoredVal->getType() != L->getType()) { 1976 IRBuilder<> Builder(L); 1977 StoredVal = 1978 CoerceAvailableValueToLoadType(StoredVal, L->getType(), Builder, DL); 1979 if (!StoredVal) 1980 return false; 1981 1982 DEBUG(dbgs() << "GVN COERCED STORE:\n" << *DepSI << '\n' << *StoredVal 1983 << '\n' << *L << "\n\n\n"); 1984 } 1985 1986 // Remove it! 1987 L->replaceAllUsesWith(StoredVal); 1988 if (StoredVal->getType()->getScalarType()->isPointerTy()) 1989 MD->invalidateCachedPointerInfo(StoredVal); 1990 markInstructionForDeletion(L); 1991 ++NumGVNLoad; 1992 return true; 1993 } 1994 1995 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { 1996 Value *AvailableVal = DepLI; 1997 1998 // The loads are of a must-aliased pointer, but they may not actually have 1999 // the same type. See if we know how to reuse the previously loaded value 2000 // (depending on its type). 2001 if (DepLI->getType() != L->getType()) { 2002 IRBuilder<> Builder(L); 2003 AvailableVal = 2004 CoerceAvailableValueToLoadType(DepLI, L->getType(), Builder, DL); 2005 if (!AvailableVal) 2006 return false; 2007 2008 DEBUG(dbgs() << "GVN COERCED LOAD:\n" << *DepLI << "\n" << *AvailableVal 2009 << "\n" << *L << "\n\n\n"); 2010 } 2011 2012 // Remove it! 2013 patchAndReplaceAllUsesWith(L, AvailableVal); 2014 if (DepLI->getType()->getScalarType()->isPointerTy()) 2015 MD->invalidateCachedPointerInfo(DepLI); 2016 markInstructionForDeletion(L); 2017 ++NumGVNLoad; 2018 return true; 2019 } 2020 2021 // If this load really doesn't depend on anything, then we must be loading an 2022 // undef value. This can happen when loading for a fresh allocation with no 2023 // intervening stores, for example. 2024 if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI)) { 2025 L->replaceAllUsesWith(UndefValue::get(L->getType())); 2026 markInstructionForDeletion(L); 2027 ++NumGVNLoad; 2028 return true; 2029 } 2030 2031 // If this load occurs either right after a lifetime begin, 2032 // then the loaded value is undefined. 2033 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(DepInst)) { 2034 if (II->getIntrinsicID() == Intrinsic::lifetime_start) { 2035 L->replaceAllUsesWith(UndefValue::get(L->getType())); 2036 markInstructionForDeletion(L); 2037 ++NumGVNLoad; 2038 return true; 2039 } 2040 } 2041 2042 // If this load follows a calloc (which zero initializes memory), 2043 // then the loaded value is zero 2044 if (isCallocLikeFn(DepInst, TLI)) { 2045 L->replaceAllUsesWith(Constant::getNullValue(L->getType())); 2046 markInstructionForDeletion(L); 2047 ++NumGVNLoad; 2048 return true; 2049 } 2050 2051 return false; 2052 } 2053 2054 // In order to find a leader for a given value number at a 2055 // specific basic block, we first obtain the list of all Values for that number, 2056 // and then scan the list to find one whose block dominates the block in 2057 // question. This is fast because dominator tree queries consist of only 2058 // a few comparisons of DFS numbers. 2059 Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) { 2060 LeaderTableEntry Vals = LeaderTable[num]; 2061 if (!Vals.Val) return nullptr; 2062 2063 Value *Val = nullptr; 2064 if (DT->dominates(Vals.BB, BB)) { 2065 Val = Vals.Val; 2066 if (isa<Constant>(Val)) return Val; 2067 } 2068 2069 LeaderTableEntry* Next = Vals.Next; 2070 while (Next) { 2071 if (DT->dominates(Next->BB, BB)) { 2072 if (isa<Constant>(Next->Val)) return Next->Val; 2073 if (!Val) Val = Next->Val; 2074 } 2075 2076 Next = Next->Next; 2077 } 2078 2079 return Val; 2080 } 2081 2082 /// There is an edge from 'Src' to 'Dst'. Return 2083 /// true if every path from the entry block to 'Dst' passes via this edge. In 2084 /// particular 'Dst' must not be reachable via another edge from 'Src'. 2085 static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, 2086 DominatorTree *DT) { 2087 // While in theory it is interesting to consider the case in which Dst has 2088 // more than one predecessor, because Dst might be part of a loop which is 2089 // only reachable from Src, in practice it is pointless since at the time 2090 // GVN runs all such loops have preheaders, which means that Dst will have 2091 // been changed to have only one predecessor, namely Src. 2092 const BasicBlock *Pred = E.getEnd()->getSinglePredecessor(); 2093 const BasicBlock *Src = E.getStart(); 2094 assert((!Pred || Pred == Src) && "No edge between these basic blocks!"); 2095 (void)Src; 2096 return Pred != nullptr; 2097 } 2098 2099 // Tries to replace instruction with const, using information from 2100 // ReplaceWithConstMap. 2101 bool GVN::replaceOperandsWithConsts(Instruction *Instr) const { 2102 bool Changed = false; 2103 for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) { 2104 Value *Operand = Instr->getOperand(OpNum); 2105 auto it = ReplaceWithConstMap.find(Operand); 2106 if (it != ReplaceWithConstMap.end()) { 2107 assert(!isa<Constant>(Operand) && 2108 "Replacing constants with constants is invalid"); 2109 DEBUG(dbgs() << "GVN replacing: " << *Operand << " with " << *it->second 2110 << " in instruction " << *Instr << '\n'); 2111 Instr->setOperand(OpNum, it->second); 2112 Changed = true; 2113 } 2114 } 2115 return Changed; 2116 } 2117 2118 /// The given values are known to be equal in every block 2119 /// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with 2120 /// 'RHS' everywhere in the scope. Returns whether a change was made. 2121 /// If DominatesByEdge is false, then it means that it is dominated by Root.End. 2122 bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root, 2123 bool DominatesByEdge) { 2124 SmallVector<std::pair<Value*, Value*>, 4> Worklist; 2125 Worklist.push_back(std::make_pair(LHS, RHS)); 2126 bool Changed = false; 2127 // For speed, compute a conservative fast approximation to 2128 // DT->dominates(Root, Root.getEnd()); 2129 bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT); 2130 2131 while (!Worklist.empty()) { 2132 std::pair<Value*, Value*> Item = Worklist.pop_back_val(); 2133 LHS = Item.first; RHS = Item.second; 2134 2135 if (LHS == RHS) 2136 continue; 2137 assert(LHS->getType() == RHS->getType() && "Equality but unequal types!"); 2138 2139 // Don't try to propagate equalities between constants. 2140 if (isa<Constant>(LHS) && isa<Constant>(RHS)) 2141 continue; 2142 2143 // Prefer a constant on the right-hand side, or an Argument if no constants. 2144 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS))) 2145 std::swap(LHS, RHS); 2146 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!"); 2147 2148 // If there is no obvious reason to prefer the left-hand side over the 2149 // right-hand side, ensure the longest lived term is on the right-hand side, 2150 // so the shortest lived term will be replaced by the longest lived. 2151 // This tends to expose more simplifications. 2152 uint32_t LVN = VN.lookup_or_add(LHS); 2153 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) || 2154 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) { 2155 // Move the 'oldest' value to the right-hand side, using the value number 2156 // as a proxy for age. 2157 uint32_t RVN = VN.lookup_or_add(RHS); 2158 if (LVN < RVN) { 2159 std::swap(LHS, RHS); 2160 LVN = RVN; 2161 } 2162 } 2163 2164 // If value numbering later sees that an instruction in the scope is equal 2165 // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve 2166 // the invariant that instructions only occur in the leader table for their 2167 // own value number (this is used by removeFromLeaderTable), do not do this 2168 // if RHS is an instruction (if an instruction in the scope is morphed into 2169 // LHS then it will be turned into RHS by the next GVN iteration anyway, so 2170 // using the leader table is about compiling faster, not optimizing better). 2171 // The leader table only tracks basic blocks, not edges. Only add to if we 2172 // have the simple case where the edge dominates the end. 2173 if (RootDominatesEnd && !isa<Instruction>(RHS)) 2174 addToLeaderTable(LVN, RHS, Root.getEnd()); 2175 2176 // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As 2177 // LHS always has at least one use that is not dominated by Root, this will 2178 // never do anything if LHS has only one use. 2179 if (!LHS->hasOneUse()) { 2180 unsigned NumReplacements = 2181 DominatesByEdge 2182 ? replaceDominatedUsesWith(LHS, RHS, *DT, Root) 2183 : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getEnd()); 2184 2185 Changed |= NumReplacements > 0; 2186 NumGVNEqProp += NumReplacements; 2187 } 2188 2189 // Now try to deduce additional equalities from this one. For example, if 2190 // the known equality was "(A != B)" == "false" then it follows that A and B 2191 // are equal in the scope. Only boolean equalities with an explicit true or 2192 // false RHS are currently supported. 2193 if (!RHS->getType()->isIntegerTy(1)) 2194 // Not a boolean equality - bail out. 2195 continue; 2196 ConstantInt *CI = dyn_cast<ConstantInt>(RHS); 2197 if (!CI) 2198 // RHS neither 'true' nor 'false' - bail out. 2199 continue; 2200 // Whether RHS equals 'true'. Otherwise it equals 'false'. 2201 bool isKnownTrue = CI->isAllOnesValue(); 2202 bool isKnownFalse = !isKnownTrue; 2203 2204 // If "A && B" is known true then both A and B are known true. If "A || B" 2205 // is known false then both A and B are known false. 2206 Value *A, *B; 2207 if ((isKnownTrue && match(LHS, m_And(m_Value(A), m_Value(B)))) || 2208 (isKnownFalse && match(LHS, m_Or(m_Value(A), m_Value(B))))) { 2209 Worklist.push_back(std::make_pair(A, RHS)); 2210 Worklist.push_back(std::make_pair(B, RHS)); 2211 continue; 2212 } 2213 2214 // If we are propagating an equality like "(A == B)" == "true" then also 2215 // propagate the equality A == B. When propagating a comparison such as 2216 // "(A >= B)" == "true", replace all instances of "A < B" with "false". 2217 if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) { 2218 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1); 2219 2220 // If "A == B" is known true, or "A != B" is known false, then replace 2221 // A with B everywhere in the scope. 2222 if ((isKnownTrue && Cmp->getPredicate() == CmpInst::ICMP_EQ) || 2223 (isKnownFalse && Cmp->getPredicate() == CmpInst::ICMP_NE)) 2224 Worklist.push_back(std::make_pair(Op0, Op1)); 2225 2226 // Handle the floating point versions of equality comparisons too. 2227 if ((isKnownTrue && Cmp->getPredicate() == CmpInst::FCMP_OEQ) || 2228 (isKnownFalse && Cmp->getPredicate() == CmpInst::FCMP_UNE)) { 2229 2230 // Floating point -0.0 and 0.0 compare equal, so we can only 2231 // propagate values if we know that we have a constant and that 2232 // its value is non-zero. 2233 2234 // FIXME: We should do this optimization if 'no signed zeros' is 2235 // applicable via an instruction-level fast-math-flag or some other 2236 // indicator that relaxed FP semantics are being used. 2237 2238 if (isa<ConstantFP>(Op1) && !cast<ConstantFP>(Op1)->isZero()) 2239 Worklist.push_back(std::make_pair(Op0, Op1)); 2240 } 2241 2242 // If "A >= B" is known true, replace "A < B" with false everywhere. 2243 CmpInst::Predicate NotPred = Cmp->getInversePredicate(); 2244 Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse); 2245 // Since we don't have the instruction "A < B" immediately to hand, work 2246 // out the value number that it would have and use that to find an 2247 // appropriate instruction (if any). 2248 uint32_t NextNum = VN.getNextUnusedValueNumber(); 2249 uint32_t Num = VN.lookup_or_add_cmp(Cmp->getOpcode(), NotPred, Op0, Op1); 2250 // If the number we were assigned was brand new then there is no point in 2251 // looking for an instruction realizing it: there cannot be one! 2252 if (Num < NextNum) { 2253 Value *NotCmp = findLeader(Root.getEnd(), Num); 2254 if (NotCmp && isa<Instruction>(NotCmp)) { 2255 unsigned NumReplacements = 2256 DominatesByEdge 2257 ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root) 2258 : replaceDominatedUsesWith(NotCmp, NotVal, *DT, 2259 Root.getEnd()); 2260 Changed |= NumReplacements > 0; 2261 NumGVNEqProp += NumReplacements; 2262 } 2263 } 2264 // Ensure that any instruction in scope that gets the "A < B" value number 2265 // is replaced with false. 2266 // The leader table only tracks basic blocks, not edges. Only add to if we 2267 // have the simple case where the edge dominates the end. 2268 if (RootDominatesEnd) 2269 addToLeaderTable(Num, NotVal, Root.getEnd()); 2270 2271 continue; 2272 } 2273 } 2274 2275 return Changed; 2276 } 2277 2278 /// When calculating availability, handle an instruction 2279 /// by inserting it into the appropriate sets 2280 bool GVN::processInstruction(Instruction *I) { 2281 // Ignore dbg info intrinsics. 2282 if (isa<DbgInfoIntrinsic>(I)) 2283 return false; 2284 2285 // If the instruction can be easily simplified then do so now in preference 2286 // to value numbering it. Value numbering often exposes redundancies, for 2287 // example if it determines that %y is equal to %x then the instruction 2288 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify. 2289 const DataLayout &DL = I->getModule()->getDataLayout(); 2290 if (Value *V = SimplifyInstruction(I, DL, TLI, DT, AC)) { 2291 I->replaceAllUsesWith(V); 2292 if (MD && V->getType()->getScalarType()->isPointerTy()) 2293 MD->invalidateCachedPointerInfo(V); 2294 markInstructionForDeletion(I); 2295 ++NumGVNSimpl; 2296 return true; 2297 } 2298 2299 if (IntrinsicInst *IntrinsicI = dyn_cast<IntrinsicInst>(I)) 2300 if (IntrinsicI->getIntrinsicID() == Intrinsic::assume) 2301 return processAssumeIntrinsic(IntrinsicI); 2302 2303 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 2304 if (processLoad(LI)) 2305 return true; 2306 2307 unsigned Num = VN.lookup_or_add(LI); 2308 addToLeaderTable(Num, LI, LI->getParent()); 2309 return false; 2310 } 2311 2312 // For conditional branches, we can perform simple conditional propagation on 2313 // the condition value itself. 2314 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 2315 if (!BI->isConditional()) 2316 return false; 2317 2318 if (isa<Constant>(BI->getCondition())) 2319 return processFoldableCondBr(BI); 2320 2321 Value *BranchCond = BI->getCondition(); 2322 BasicBlock *TrueSucc = BI->getSuccessor(0); 2323 BasicBlock *FalseSucc = BI->getSuccessor(1); 2324 // Avoid multiple edges early. 2325 if (TrueSucc == FalseSucc) 2326 return false; 2327 2328 BasicBlock *Parent = BI->getParent(); 2329 bool Changed = false; 2330 2331 Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext()); 2332 BasicBlockEdge TrueE(Parent, TrueSucc); 2333 Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true); 2334 2335 Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext()); 2336 BasicBlockEdge FalseE(Parent, FalseSucc); 2337 Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true); 2338 2339 return Changed; 2340 } 2341 2342 // For switches, propagate the case values into the case destinations. 2343 if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) { 2344 Value *SwitchCond = SI->getCondition(); 2345 BasicBlock *Parent = SI->getParent(); 2346 bool Changed = false; 2347 2348 // Remember how many outgoing edges there are to every successor. 2349 SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges; 2350 for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i) 2351 ++SwitchEdges[SI->getSuccessor(i)]; 2352 2353 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); 2354 i != e; ++i) { 2355 BasicBlock *Dst = i.getCaseSuccessor(); 2356 // If there is only a single edge, propagate the case value into it. 2357 if (SwitchEdges.lookup(Dst) == 1) { 2358 BasicBlockEdge E(Parent, Dst); 2359 Changed |= propagateEquality(SwitchCond, i.getCaseValue(), E, true); 2360 } 2361 } 2362 return Changed; 2363 } 2364 2365 // Instructions with void type don't return a value, so there's 2366 // no point in trying to find redundancies in them. 2367 if (I->getType()->isVoidTy()) 2368 return false; 2369 2370 uint32_t NextNum = VN.getNextUnusedValueNumber(); 2371 unsigned Num = VN.lookup_or_add(I); 2372 2373 // Allocations are always uniquely numbered, so we can save time and memory 2374 // by fast failing them. 2375 if (isa<AllocaInst>(I) || isa<TerminatorInst>(I) || isa<PHINode>(I)) { 2376 addToLeaderTable(Num, I, I->getParent()); 2377 return false; 2378 } 2379 2380 // If the number we were assigned was a brand new VN, then we don't 2381 // need to do a lookup to see if the number already exists 2382 // somewhere in the domtree: it can't! 2383 if (Num >= NextNum) { 2384 addToLeaderTable(Num, I, I->getParent()); 2385 return false; 2386 } 2387 2388 // Perform fast-path value-number based elimination of values inherited from 2389 // dominators. 2390 Value *repl = findLeader(I->getParent(), Num); 2391 if (!repl) { 2392 // Failure, just remember this instance for future use. 2393 addToLeaderTable(Num, I, I->getParent()); 2394 return false; 2395 } 2396 2397 // Remove it! 2398 patchAndReplaceAllUsesWith(I, repl); 2399 if (MD && repl->getType()->getScalarType()->isPointerTy()) 2400 MD->invalidateCachedPointerInfo(repl); 2401 markInstructionForDeletion(I); 2402 return true; 2403 } 2404 2405 /// runOnFunction - This is the main transformation entry point for a function. 2406 bool GVN::runOnFunction(Function& F) { 2407 if (skipOptnoneFunction(F)) 2408 return false; 2409 2410 if (!NoLoads) 2411 MD = &getAnalysis<MemoryDependenceAnalysis>(); 2412 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2413 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 2414 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 2415 VN.setAliasAnalysis(&getAnalysis<AAResultsWrapperPass>().getAAResults()); 2416 VN.setMemDep(MD); 2417 VN.setDomTree(DT); 2418 2419 bool Changed = false; 2420 bool ShouldContinue = true; 2421 2422 // Merge unconditional branches, allowing PRE to catch more 2423 // optimization opportunities. 2424 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 2425 BasicBlock *BB = &*FI++; 2426 2427 bool removedBlock = 2428 MergeBlockIntoPredecessor(BB, DT, /* LoopInfo */ nullptr, MD); 2429 if (removedBlock) ++NumGVNBlocks; 2430 2431 Changed |= removedBlock; 2432 } 2433 2434 unsigned Iteration = 0; 2435 while (ShouldContinue) { 2436 DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); 2437 ShouldContinue = iterateOnFunction(F); 2438 Changed |= ShouldContinue; 2439 ++Iteration; 2440 } 2441 2442 if (EnablePRE) { 2443 // Fabricate val-num for dead-code in order to suppress assertion in 2444 // performPRE(). 2445 assignValNumForDeadCode(); 2446 bool PREChanged = true; 2447 while (PREChanged) { 2448 PREChanged = performPRE(F); 2449 Changed |= PREChanged; 2450 } 2451 } 2452 2453 // FIXME: Should perform GVN again after PRE does something. PRE can move 2454 // computations into blocks where they become fully redundant. Note that 2455 // we can't do this until PRE's critical edge splitting updates memdep. 2456 // Actually, when this happens, we should just fully integrate PRE into GVN. 2457 2458 cleanupGlobalSets(); 2459 // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each 2460 // iteration. 2461 DeadBlocks.clear(); 2462 2463 return Changed; 2464 } 2465 2466 bool GVN::processBlock(BasicBlock *BB) { 2467 // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function 2468 // (and incrementing BI before processing an instruction). 2469 assert(InstrsToErase.empty() && 2470 "We expect InstrsToErase to be empty across iterations"); 2471 if (DeadBlocks.count(BB)) 2472 return false; 2473 2474 // Clearing map before every BB because it can be used only for single BB. 2475 ReplaceWithConstMap.clear(); 2476 bool ChangedFunction = false; 2477 2478 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 2479 BI != BE;) { 2480 if (!ReplaceWithConstMap.empty()) 2481 ChangedFunction |= replaceOperandsWithConsts(&*BI); 2482 ChangedFunction |= processInstruction(&*BI); 2483 2484 if (InstrsToErase.empty()) { 2485 ++BI; 2486 continue; 2487 } 2488 2489 // If we need some instructions deleted, do it now. 2490 NumGVNInstr += InstrsToErase.size(); 2491 2492 // Avoid iterator invalidation. 2493 bool AtStart = BI == BB->begin(); 2494 if (!AtStart) 2495 --BI; 2496 2497 for (SmallVectorImpl<Instruction *>::iterator I = InstrsToErase.begin(), 2498 E = InstrsToErase.end(); I != E; ++I) { 2499 DEBUG(dbgs() << "GVN removed: " << **I << '\n'); 2500 if (MD) MD->removeInstruction(*I); 2501 DEBUG(verifyRemoved(*I)); 2502 (*I)->eraseFromParent(); 2503 } 2504 InstrsToErase.clear(); 2505 2506 if (AtStart) 2507 BI = BB->begin(); 2508 else 2509 ++BI; 2510 } 2511 2512 return ChangedFunction; 2513 } 2514 2515 // Instantiate an expression in a predecessor that lacked it. 2516 bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred, 2517 unsigned int ValNo) { 2518 // Because we are going top-down through the block, all value numbers 2519 // will be available in the predecessor by the time we need them. Any 2520 // that weren't originally present will have been instantiated earlier 2521 // in this loop. 2522 bool success = true; 2523 for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) { 2524 Value *Op = Instr->getOperand(i); 2525 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2526 continue; 2527 2528 if (Value *V = findLeader(Pred, VN.lookup(Op))) { 2529 Instr->setOperand(i, V); 2530 } else { 2531 success = false; 2532 break; 2533 } 2534 } 2535 2536 // Fail out if we encounter an operand that is not available in 2537 // the PRE predecessor. This is typically because of loads which 2538 // are not value numbered precisely. 2539 if (!success) 2540 return false; 2541 2542 Instr->insertBefore(Pred->getTerminator()); 2543 Instr->setName(Instr->getName() + ".pre"); 2544 Instr->setDebugLoc(Instr->getDebugLoc()); 2545 VN.add(Instr, ValNo); 2546 2547 // Update the availability map to include the new instruction. 2548 addToLeaderTable(ValNo, Instr, Pred); 2549 return true; 2550 } 2551 2552 bool GVN::performScalarPRE(Instruction *CurInst) { 2553 SmallVector<std::pair<Value*, BasicBlock*>, 8> predMap; 2554 2555 if (isa<AllocaInst>(CurInst) || isa<TerminatorInst>(CurInst) || 2556 isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() || 2557 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2558 isa<DbgInfoIntrinsic>(CurInst)) 2559 return false; 2560 2561 // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from 2562 // sinking the compare again, and it would force the code generator to 2563 // move the i1 from processor flags or predicate registers into a general 2564 // purpose register. 2565 if (isa<CmpInst>(CurInst)) 2566 return false; 2567 2568 // We don't currently value number ANY inline asm calls. 2569 if (CallInst *CallI = dyn_cast<CallInst>(CurInst)) 2570 if (CallI->isInlineAsm()) 2571 return false; 2572 2573 uint32_t ValNo = VN.lookup(CurInst); 2574 2575 // Look for the predecessors for PRE opportunities. We're 2576 // only trying to solve the basic diamond case, where 2577 // a value is computed in the successor and one predecessor, 2578 // but not the other. We also explicitly disallow cases 2579 // where the successor is its own predecessor, because they're 2580 // more complicated to get right. 2581 unsigned NumWith = 0; 2582 unsigned NumWithout = 0; 2583 BasicBlock *PREPred = nullptr; 2584 BasicBlock *CurrentBlock = CurInst->getParent(); 2585 predMap.clear(); 2586 2587 for (pred_iterator PI = pred_begin(CurrentBlock), PE = pred_end(CurrentBlock); 2588 PI != PE; ++PI) { 2589 BasicBlock *P = *PI; 2590 // We're not interested in PRE where the block is its 2591 // own predecessor, or in blocks with predecessors 2592 // that are not reachable. 2593 if (P == CurrentBlock) { 2594 NumWithout = 2; 2595 break; 2596 } else if (!DT->isReachableFromEntry(P)) { 2597 NumWithout = 2; 2598 break; 2599 } 2600 2601 Value *predV = findLeader(P, ValNo); 2602 if (!predV) { 2603 predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P)); 2604 PREPred = P; 2605 ++NumWithout; 2606 } else if (predV == CurInst) { 2607 /* CurInst dominates this predecessor. */ 2608 NumWithout = 2; 2609 break; 2610 } else { 2611 predMap.push_back(std::make_pair(predV, P)); 2612 ++NumWith; 2613 } 2614 } 2615 2616 // Don't do PRE when it might increase code size, i.e. when 2617 // we would need to insert instructions in more than one pred. 2618 if (NumWithout > 1 || NumWith == 0) 2619 return false; 2620 2621 // We may have a case where all predecessors have the instruction, 2622 // and we just need to insert a phi node. Otherwise, perform 2623 // insertion. 2624 Instruction *PREInstr = nullptr; 2625 2626 if (NumWithout != 0) { 2627 // Don't do PRE across indirect branch. 2628 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2629 return false; 2630 2631 // We can't do PRE safely on a critical edge, so instead we schedule 2632 // the edge to be split and perform the PRE the next time we iterate 2633 // on the function. 2634 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); 2635 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2636 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2637 return false; 2638 } 2639 // We need to insert somewhere, so let's give it a shot 2640 PREInstr = CurInst->clone(); 2641 if (!performScalarPREInsertion(PREInstr, PREPred, ValNo)) { 2642 // If we failed insertion, make sure we remove the instruction. 2643 DEBUG(verifyRemoved(PREInstr)); 2644 delete PREInstr; 2645 return false; 2646 } 2647 } 2648 2649 // Either we should have filled in the PRE instruction, or we should 2650 // not have needed insertions. 2651 assert (PREInstr != nullptr || NumWithout == 0); 2652 2653 ++NumGVNPRE; 2654 2655 // Create a PHI to make the value available in this block. 2656 PHINode *Phi = 2657 PHINode::Create(CurInst->getType(), predMap.size(), 2658 CurInst->getName() + ".pre-phi", &CurrentBlock->front()); 2659 for (unsigned i = 0, e = predMap.size(); i != e; ++i) { 2660 if (Value *V = predMap[i].first) 2661 Phi->addIncoming(V, predMap[i].second); 2662 else 2663 Phi->addIncoming(PREInstr, PREPred); 2664 } 2665 2666 VN.add(Phi, ValNo); 2667 addToLeaderTable(ValNo, Phi, CurrentBlock); 2668 Phi->setDebugLoc(CurInst->getDebugLoc()); 2669 CurInst->replaceAllUsesWith(Phi); 2670 if (MD && Phi->getType()->getScalarType()->isPointerTy()) 2671 MD->invalidateCachedPointerInfo(Phi); 2672 VN.erase(CurInst); 2673 removeFromLeaderTable(ValNo, CurInst, CurrentBlock); 2674 2675 DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); 2676 if (MD) 2677 MD->removeInstruction(CurInst); 2678 DEBUG(verifyRemoved(CurInst)); 2679 CurInst->eraseFromParent(); 2680 ++NumGVNInstr; 2681 2682 return true; 2683 } 2684 2685 /// Perform a purely local form of PRE that looks for diamond 2686 /// control flow patterns and attempts to perform simple PRE at the join point. 2687 bool GVN::performPRE(Function &F) { 2688 bool Changed = false; 2689 for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) { 2690 // Nothing to PRE in the entry block. 2691 if (CurrentBlock == &F.getEntryBlock()) 2692 continue; 2693 2694 // Don't perform PRE on an EH pad. 2695 if (CurrentBlock->isEHPad()) 2696 continue; 2697 2698 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2699 BE = CurrentBlock->end(); 2700 BI != BE;) { 2701 Instruction *CurInst = &*BI++; 2702 Changed = performScalarPRE(CurInst); 2703 } 2704 } 2705 2706 if (splitCriticalEdges()) 2707 Changed = true; 2708 2709 return Changed; 2710 } 2711 2712 /// Split the critical edge connecting the given two blocks, and return 2713 /// the block inserted to the critical edge. 2714 BasicBlock *GVN::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) { 2715 BasicBlock *BB = 2716 SplitCriticalEdge(Pred, Succ, CriticalEdgeSplittingOptions(DT)); 2717 if (MD) 2718 MD->invalidateCachedPredecessors(); 2719 return BB; 2720 } 2721 2722 /// Split critical edges found during the previous 2723 /// iteration that may enable further optimization. 2724 bool GVN::splitCriticalEdges() { 2725 if (toSplit.empty()) 2726 return false; 2727 do { 2728 std::pair<TerminatorInst*, unsigned> Edge = toSplit.pop_back_val(); 2729 SplitCriticalEdge(Edge.first, Edge.second, 2730 CriticalEdgeSplittingOptions(DT)); 2731 } while (!toSplit.empty()); 2732 if (MD) MD->invalidateCachedPredecessors(); 2733 return true; 2734 } 2735 2736 /// Executes one iteration of GVN 2737 bool GVN::iterateOnFunction(Function &F) { 2738 cleanupGlobalSets(); 2739 2740 // Top-down walk of the dominator tree 2741 bool Changed = false; 2742 // Save the blocks this function have before transformation begins. GVN may 2743 // split critical edge, and hence may invalidate the RPO/DT iterator. 2744 // 2745 std::vector<BasicBlock *> BBVect; 2746 BBVect.reserve(256); 2747 // Needed for value numbering with phi construction to work. 2748 ReversePostOrderTraversal<Function *> RPOT(&F); 2749 for (ReversePostOrderTraversal<Function *>::rpo_iterator RI = RPOT.begin(), 2750 RE = RPOT.end(); 2751 RI != RE; ++RI) 2752 BBVect.push_back(*RI); 2753 2754 for (std::vector<BasicBlock *>::iterator I = BBVect.begin(), E = BBVect.end(); 2755 I != E; I++) 2756 Changed |= processBlock(*I); 2757 2758 return Changed; 2759 } 2760 2761 void GVN::cleanupGlobalSets() { 2762 VN.clear(); 2763 LeaderTable.clear(); 2764 TableAllocator.Reset(); 2765 } 2766 2767 /// Verify that the specified instruction does not occur in our 2768 /// internal data structures. 2769 void GVN::verifyRemoved(const Instruction *Inst) const { 2770 VN.verifyRemoved(Inst); 2771 2772 // Walk through the value number scope to make sure the instruction isn't 2773 // ferreted away in it. 2774 for (DenseMap<uint32_t, LeaderTableEntry>::const_iterator 2775 I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++I) { 2776 const LeaderTableEntry *Node = &I->second; 2777 assert(Node->Val != Inst && "Inst still in value numbering scope!"); 2778 2779 while (Node->Next) { 2780 Node = Node->Next; 2781 assert(Node->Val != Inst && "Inst still in value numbering scope!"); 2782 } 2783 } 2784 } 2785 2786 /// BB is declared dead, which implied other blocks become dead as well. This 2787 /// function is to add all these blocks to "DeadBlocks". For the dead blocks' 2788 /// live successors, update their phi nodes by replacing the operands 2789 /// corresponding to dead blocks with UndefVal. 2790 void GVN::addDeadBlock(BasicBlock *BB) { 2791 SmallVector<BasicBlock *, 4> NewDead; 2792 SmallSetVector<BasicBlock *, 4> DF; 2793 2794 NewDead.push_back(BB); 2795 while (!NewDead.empty()) { 2796 BasicBlock *D = NewDead.pop_back_val(); 2797 if (DeadBlocks.count(D)) 2798 continue; 2799 2800 // All blocks dominated by D are dead. 2801 SmallVector<BasicBlock *, 8> Dom; 2802 DT->getDescendants(D, Dom); 2803 DeadBlocks.insert(Dom.begin(), Dom.end()); 2804 2805 // Figure out the dominance-frontier(D). 2806 for (SmallVectorImpl<BasicBlock *>::iterator I = Dom.begin(), 2807 E = Dom.end(); I != E; I++) { 2808 BasicBlock *B = *I; 2809 for (succ_iterator SI = succ_begin(B), SE = succ_end(B); SI != SE; SI++) { 2810 BasicBlock *S = *SI; 2811 if (DeadBlocks.count(S)) 2812 continue; 2813 2814 bool AllPredDead = true; 2815 for (pred_iterator PI = pred_begin(S), PE = pred_end(S); PI != PE; PI++) 2816 if (!DeadBlocks.count(*PI)) { 2817 AllPredDead = false; 2818 break; 2819 } 2820 2821 if (!AllPredDead) { 2822 // S could be proved dead later on. That is why we don't update phi 2823 // operands at this moment. 2824 DF.insert(S); 2825 } else { 2826 // While S is not dominated by D, it is dead by now. This could take 2827 // place if S already have a dead predecessor before D is declared 2828 // dead. 2829 NewDead.push_back(S); 2830 } 2831 } 2832 } 2833 } 2834 2835 // For the dead blocks' live successors, update their phi nodes by replacing 2836 // the operands corresponding to dead blocks with UndefVal. 2837 for(SmallSetVector<BasicBlock *, 4>::iterator I = DF.begin(), E = DF.end(); 2838 I != E; I++) { 2839 BasicBlock *B = *I; 2840 if (DeadBlocks.count(B)) 2841 continue; 2842 2843 SmallVector<BasicBlock *, 4> Preds(pred_begin(B), pred_end(B)); 2844 for (SmallVectorImpl<BasicBlock *>::iterator PI = Preds.begin(), 2845 PE = Preds.end(); PI != PE; PI++) { 2846 BasicBlock *P = *PI; 2847 2848 if (!DeadBlocks.count(P)) 2849 continue; 2850 2851 if (isCriticalEdge(P->getTerminator(), GetSuccessorNumber(P, B))) { 2852 if (BasicBlock *S = splitCriticalEdges(P, B)) 2853 DeadBlocks.insert(P = S); 2854 } 2855 2856 for (BasicBlock::iterator II = B->begin(); isa<PHINode>(II); ++II) { 2857 PHINode &Phi = cast<PHINode>(*II); 2858 Phi.setIncomingValue(Phi.getBasicBlockIndex(P), 2859 UndefValue::get(Phi.getType())); 2860 } 2861 } 2862 } 2863 } 2864 2865 // If the given branch is recognized as a foldable branch (i.e. conditional 2866 // branch with constant condition), it will perform following analyses and 2867 // transformation. 2868 // 1) If the dead out-coming edge is a critical-edge, split it. Let 2869 // R be the target of the dead out-coming edge. 2870 // 1) Identify the set of dead blocks implied by the branch's dead outcoming 2871 // edge. The result of this step will be {X| X is dominated by R} 2872 // 2) Identify those blocks which haves at least one dead predecessor. The 2873 // result of this step will be dominance-frontier(R). 2874 // 3) Update the PHIs in DF(R) by replacing the operands corresponding to 2875 // dead blocks with "UndefVal" in an hope these PHIs will optimized away. 2876 // 2877 // Return true iff *NEW* dead code are found. 2878 bool GVN::processFoldableCondBr(BranchInst *BI) { 2879 if (!BI || BI->isUnconditional()) 2880 return false; 2881 2882 // If a branch has two identical successors, we cannot declare either dead. 2883 if (BI->getSuccessor(0) == BI->getSuccessor(1)) 2884 return false; 2885 2886 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition()); 2887 if (!Cond) 2888 return false; 2889 2890 BasicBlock *DeadRoot = Cond->getZExtValue() ? 2891 BI->getSuccessor(1) : BI->getSuccessor(0); 2892 if (DeadBlocks.count(DeadRoot)) 2893 return false; 2894 2895 if (!DeadRoot->getSinglePredecessor()) 2896 DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot); 2897 2898 addDeadBlock(DeadRoot); 2899 return true; 2900 } 2901 2902 // performPRE() will trigger assert if it comes across an instruction without 2903 // associated val-num. As it normally has far more live instructions than dead 2904 // instructions, it makes more sense just to "fabricate" a val-number for the 2905 // dead code than checking if instruction involved is dead or not. 2906 void GVN::assignValNumForDeadCode() { 2907 for (SetVector<BasicBlock *>::iterator I = DeadBlocks.begin(), 2908 E = DeadBlocks.end(); I != E; I++) { 2909 BasicBlock *BB = *I; 2910 for (BasicBlock::iterator II = BB->begin(), EE = BB->end(); 2911 II != EE; II++) { 2912 Instruction *Inst = &*II; 2913 unsigned ValNum = VN.lookup_or_add(Inst); 2914 addToLeaderTable(ValNum, Inst, BB); 2915 } 2916 } 2917 } 2918