1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs global value numbering to eliminate fully redundant 10 // instructions. It also performs simple dead load elimination. 11 // 12 // Note that this pass does the value numbering itself; it does not use the 13 // ValueNumbering analysis passes. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/Transforms/Scalar/GVN.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/DepthFirstIterator.h" 20 #include "llvm/ADT/Hashing.h" 21 #include "llvm/ADT/MapVector.h" 22 #include "llvm/ADT/PointerIntPair.h" 23 #include "llvm/ADT/PostOrderIterator.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/ADT/SetVector.h" 26 #include "llvm/ADT/SmallPtrSet.h" 27 #include "llvm/ADT/SmallVector.h" 28 #include "llvm/ADT/Statistic.h" 29 #include "llvm/Analysis/AliasAnalysis.h" 30 #include "llvm/Analysis/AssumptionCache.h" 31 #include "llvm/Analysis/CFG.h" 32 #include "llvm/Analysis/DomTreeUpdater.h" 33 #include "llvm/Analysis/GlobalsModRef.h" 34 #include "llvm/Analysis/InstructionSimplify.h" 35 #include "llvm/Analysis/LoopInfo.h" 36 #include "llvm/Analysis/MemoryBuiltins.h" 37 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 38 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 39 #include "llvm/Analysis/PHITransAddr.h" 40 #include "llvm/Analysis/TargetLibraryInfo.h" 41 #include "llvm/Analysis/ValueTracking.h" 42 #include "llvm/Config/llvm-config.h" 43 #include "llvm/IR/Attributes.h" 44 #include "llvm/IR/BasicBlock.h" 45 #include "llvm/IR/CallSite.h" 46 #include "llvm/IR/Constant.h" 47 #include "llvm/IR/Constants.h" 48 #include "llvm/IR/DataLayout.h" 49 #include "llvm/IR/DebugInfoMetadata.h" 50 #include "llvm/IR/DebugLoc.h" 51 #include "llvm/IR/Dominators.h" 52 #include "llvm/IR/Function.h" 53 #include "llvm/IR/InstrTypes.h" 54 #include "llvm/IR/Instruction.h" 55 #include "llvm/IR/Instructions.h" 56 #include "llvm/IR/IntrinsicInst.h" 57 #include "llvm/IR/Intrinsics.h" 58 #include "llvm/IR/LLVMContext.h" 59 #include "llvm/IR/Metadata.h" 60 #include "llvm/IR/Module.h" 61 #include "llvm/IR/Operator.h" 62 #include "llvm/IR/PassManager.h" 63 #include "llvm/IR/PatternMatch.h" 64 #include "llvm/IR/Type.h" 65 #include "llvm/IR/Use.h" 66 #include "llvm/IR/Value.h" 67 #include "llvm/Pass.h" 68 #include "llvm/Support/Casting.h" 69 #include "llvm/Support/CommandLine.h" 70 #include "llvm/Support/Compiler.h" 71 #include "llvm/Support/Debug.h" 72 #include "llvm/Support/raw_ostream.h" 73 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 74 #include "llvm/Transforms/Utils/Local.h" 75 #include "llvm/Transforms/Utils/SSAUpdater.h" 76 #include "llvm/Transforms/Utils/VNCoercion.h" 77 #include <algorithm> 78 #include <cassert> 79 #include <cstdint> 80 #include <utility> 81 #include <vector> 82 83 using namespace llvm; 84 using namespace llvm::gvn; 85 using namespace llvm::VNCoercion; 86 using namespace PatternMatch; 87 88 #define DEBUG_TYPE "gvn" 89 90 STATISTIC(NumGVNInstr, "Number of instructions deleted"); 91 STATISTIC(NumGVNLoad, "Number of loads deleted"); 92 STATISTIC(NumGVNPRE, "Number of instructions PRE'd"); 93 STATISTIC(NumGVNBlocks, "Number of blocks merged"); 94 STATISTIC(NumGVNSimpl, "Number of instructions simplified"); 95 STATISTIC(NumGVNEqProp, "Number of equalities propagated"); 96 STATISTIC(NumPRELoad, "Number of loads PRE'd"); 97 98 static cl::opt<bool> EnablePRE("enable-pre", 99 cl::init(true), cl::Hidden); 100 static cl::opt<bool> EnableLoadPRE("enable-load-pre", cl::init(true)); 101 static cl::opt<bool> EnableMemDep("enable-gvn-memdep", cl::init(true)); 102 103 // Maximum allowed recursion depth. 104 static cl::opt<uint32_t> 105 MaxRecurseDepth("gvn-max-recurse-depth", cl::Hidden, cl::init(1000), cl::ZeroOrMore, 106 cl::desc("Max recurse depth in GVN (default = 1000)")); 107 108 static cl::opt<uint32_t> MaxNumDeps( 109 "gvn-max-num-deps", cl::Hidden, cl::init(100), cl::ZeroOrMore, 110 cl::desc("Max number of dependences to attempt Load PRE (default = 100)")); 111 112 struct llvm::GVN::Expression { 113 uint32_t opcode; 114 Type *type; 115 bool commutative = false; 116 SmallVector<uint32_t, 4> varargs; 117 118 Expression(uint32_t o = ~2U) : opcode(o) {} 119 120 bool operator==(const Expression &other) const { 121 if (opcode != other.opcode) 122 return false; 123 if (opcode == ~0U || opcode == ~1U) 124 return true; 125 if (type != other.type) 126 return false; 127 if (varargs != other.varargs) 128 return false; 129 return true; 130 } 131 132 friend hash_code hash_value(const Expression &Value) { 133 return hash_combine( 134 Value.opcode, Value.type, 135 hash_combine_range(Value.varargs.begin(), Value.varargs.end())); 136 } 137 }; 138 139 namespace llvm { 140 141 template <> struct DenseMapInfo<GVN::Expression> { 142 static inline GVN::Expression getEmptyKey() { return ~0U; } 143 static inline GVN::Expression getTombstoneKey() { return ~1U; } 144 145 static unsigned getHashValue(const GVN::Expression &e) { 146 using llvm::hash_value; 147 148 return static_cast<unsigned>(hash_value(e)); 149 } 150 151 static bool isEqual(const GVN::Expression &LHS, const GVN::Expression &RHS) { 152 return LHS == RHS; 153 } 154 }; 155 156 } // end namespace llvm 157 158 /// Represents a particular available value that we know how to materialize. 159 /// Materialization of an AvailableValue never fails. An AvailableValue is 160 /// implicitly associated with a rematerialization point which is the 161 /// location of the instruction from which it was formed. 162 struct llvm::gvn::AvailableValue { 163 enum ValType { 164 SimpleVal, // A simple offsetted value that is accessed. 165 LoadVal, // A value produced by a load. 166 MemIntrin, // A memory intrinsic which is loaded from. 167 UndefVal // A UndefValue representing a value from dead block (which 168 // is not yet physically removed from the CFG). 169 }; 170 171 /// V - The value that is live out of the block. 172 PointerIntPair<Value *, 2, ValType> Val; 173 174 /// Offset - The byte offset in Val that is interesting for the load query. 175 unsigned Offset; 176 177 static AvailableValue get(Value *V, unsigned Offset = 0) { 178 AvailableValue Res; 179 Res.Val.setPointer(V); 180 Res.Val.setInt(SimpleVal); 181 Res.Offset = Offset; 182 return Res; 183 } 184 185 static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) { 186 AvailableValue Res; 187 Res.Val.setPointer(MI); 188 Res.Val.setInt(MemIntrin); 189 Res.Offset = Offset; 190 return Res; 191 } 192 193 static AvailableValue getLoad(LoadInst *LI, unsigned Offset = 0) { 194 AvailableValue Res; 195 Res.Val.setPointer(LI); 196 Res.Val.setInt(LoadVal); 197 Res.Offset = Offset; 198 return Res; 199 } 200 201 static AvailableValue getUndef() { 202 AvailableValue Res; 203 Res.Val.setPointer(nullptr); 204 Res.Val.setInt(UndefVal); 205 Res.Offset = 0; 206 return Res; 207 } 208 209 bool isSimpleValue() const { return Val.getInt() == SimpleVal; } 210 bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; } 211 bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; } 212 bool isUndefValue() const { return Val.getInt() == UndefVal; } 213 214 Value *getSimpleValue() const { 215 assert(isSimpleValue() && "Wrong accessor"); 216 return Val.getPointer(); 217 } 218 219 LoadInst *getCoercedLoadValue() const { 220 assert(isCoercedLoadValue() && "Wrong accessor"); 221 return cast<LoadInst>(Val.getPointer()); 222 } 223 224 MemIntrinsic *getMemIntrinValue() const { 225 assert(isMemIntrinValue() && "Wrong accessor"); 226 return cast<MemIntrinsic>(Val.getPointer()); 227 } 228 229 /// Emit code at the specified insertion point to adjust the value defined 230 /// here to the specified type. This handles various coercion cases. 231 Value *MaterializeAdjustedValue(LoadInst *LI, Instruction *InsertPt, 232 GVN &gvn) const; 233 }; 234 235 /// Represents an AvailableValue which can be rematerialized at the end of 236 /// the associated BasicBlock. 237 struct llvm::gvn::AvailableValueInBlock { 238 /// BB - The basic block in question. 239 BasicBlock *BB; 240 241 /// AV - The actual available value 242 AvailableValue AV; 243 244 static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV) { 245 AvailableValueInBlock Res; 246 Res.BB = BB; 247 Res.AV = std::move(AV); 248 return Res; 249 } 250 251 static AvailableValueInBlock get(BasicBlock *BB, Value *V, 252 unsigned Offset = 0) { 253 return get(BB, AvailableValue::get(V, Offset)); 254 } 255 256 static AvailableValueInBlock getUndef(BasicBlock *BB) { 257 return get(BB, AvailableValue::getUndef()); 258 } 259 260 /// Emit code at the end of this block to adjust the value defined here to 261 /// the specified type. This handles various coercion cases. 262 Value *MaterializeAdjustedValue(LoadInst *LI, GVN &gvn) const { 263 return AV.MaterializeAdjustedValue(LI, BB->getTerminator(), gvn); 264 } 265 }; 266 267 //===----------------------------------------------------------------------===// 268 // ValueTable Internal Functions 269 //===----------------------------------------------------------------------===// 270 271 GVN::Expression GVN::ValueTable::createExpr(Instruction *I) { 272 Expression e; 273 e.type = I->getType(); 274 e.opcode = I->getOpcode(); 275 for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end(); 276 OI != OE; ++OI) 277 e.varargs.push_back(lookupOrAdd(*OI)); 278 if (I->isCommutative()) { 279 // Ensure that commutative instructions that only differ by a permutation 280 // of their operands get the same value number by sorting the operand value 281 // numbers. Since all commutative instructions have two operands it is more 282 // efficient to sort by hand rather than using, say, std::sort. 283 assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!"); 284 if (e.varargs[0] > e.varargs[1]) 285 std::swap(e.varargs[0], e.varargs[1]); 286 e.commutative = true; 287 } 288 289 if (CmpInst *C = dyn_cast<CmpInst>(I)) { 290 // Sort the operand value numbers so x<y and y>x get the same value number. 291 CmpInst::Predicate Predicate = C->getPredicate(); 292 if (e.varargs[0] > e.varargs[1]) { 293 std::swap(e.varargs[0], e.varargs[1]); 294 Predicate = CmpInst::getSwappedPredicate(Predicate); 295 } 296 e.opcode = (C->getOpcode() << 8) | Predicate; 297 e.commutative = true; 298 } else if (InsertValueInst *E = dyn_cast<InsertValueInst>(I)) { 299 for (InsertValueInst::idx_iterator II = E->idx_begin(), IE = E->idx_end(); 300 II != IE; ++II) 301 e.varargs.push_back(*II); 302 } 303 304 return e; 305 } 306 307 GVN::Expression GVN::ValueTable::createCmpExpr(unsigned Opcode, 308 CmpInst::Predicate Predicate, 309 Value *LHS, Value *RHS) { 310 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) && 311 "Not a comparison!"); 312 Expression e; 313 e.type = CmpInst::makeCmpResultType(LHS->getType()); 314 e.varargs.push_back(lookupOrAdd(LHS)); 315 e.varargs.push_back(lookupOrAdd(RHS)); 316 317 // Sort the operand value numbers so x<y and y>x get the same value number. 318 if (e.varargs[0] > e.varargs[1]) { 319 std::swap(e.varargs[0], e.varargs[1]); 320 Predicate = CmpInst::getSwappedPredicate(Predicate); 321 } 322 e.opcode = (Opcode << 8) | Predicate; 323 e.commutative = true; 324 return e; 325 } 326 327 GVN::Expression GVN::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) { 328 assert(EI && "Not an ExtractValueInst?"); 329 Expression e; 330 e.type = EI->getType(); 331 e.opcode = 0; 332 333 WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand()); 334 if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) { 335 // EI is an extract from one of our with.overflow intrinsics. Synthesize 336 // a semantically equivalent expression instead of an extract value 337 // expression. 338 e.opcode = WO->getBinaryOp(); 339 e.varargs.push_back(lookupOrAdd(WO->getLHS())); 340 e.varargs.push_back(lookupOrAdd(WO->getRHS())); 341 return e; 342 } 343 344 // Not a recognised intrinsic. Fall back to producing an extract value 345 // expression. 346 e.opcode = EI->getOpcode(); 347 for (Instruction::op_iterator OI = EI->op_begin(), OE = EI->op_end(); 348 OI != OE; ++OI) 349 e.varargs.push_back(lookupOrAdd(*OI)); 350 351 for (ExtractValueInst::idx_iterator II = EI->idx_begin(), IE = EI->idx_end(); 352 II != IE; ++II) 353 e.varargs.push_back(*II); 354 355 return e; 356 } 357 358 //===----------------------------------------------------------------------===// 359 // ValueTable External Functions 360 //===----------------------------------------------------------------------===// 361 362 GVN::ValueTable::ValueTable() = default; 363 GVN::ValueTable::ValueTable(const ValueTable &) = default; 364 GVN::ValueTable::ValueTable(ValueTable &&) = default; 365 GVN::ValueTable::~ValueTable() = default; 366 367 /// add - Insert a value into the table with a specified value number. 368 void GVN::ValueTable::add(Value *V, uint32_t num) { 369 valueNumbering.insert(std::make_pair(V, num)); 370 if (PHINode *PN = dyn_cast<PHINode>(V)) 371 NumberingPhi[num] = PN; 372 } 373 374 uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) { 375 if (AA->doesNotAccessMemory(C)) { 376 Expression exp = createExpr(C); 377 uint32_t e = assignExpNewValueNum(exp).first; 378 valueNumbering[C] = e; 379 return e; 380 } else if (MD && AA->onlyReadsMemory(C)) { 381 Expression exp = createExpr(C); 382 auto ValNum = assignExpNewValueNum(exp); 383 if (ValNum.second) { 384 valueNumbering[C] = ValNum.first; 385 return ValNum.first; 386 } 387 388 MemDepResult local_dep = MD->getDependency(C); 389 390 if (!local_dep.isDef() && !local_dep.isNonLocal()) { 391 valueNumbering[C] = nextValueNumber; 392 return nextValueNumber++; 393 } 394 395 if (local_dep.isDef()) { 396 CallInst* local_cdep = cast<CallInst>(local_dep.getInst()); 397 398 if (local_cdep->getNumArgOperands() != C->getNumArgOperands()) { 399 valueNumbering[C] = nextValueNumber; 400 return nextValueNumber++; 401 } 402 403 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 404 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i)); 405 uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i)); 406 if (c_vn != cd_vn) { 407 valueNumbering[C] = nextValueNumber; 408 return nextValueNumber++; 409 } 410 } 411 412 uint32_t v = lookupOrAdd(local_cdep); 413 valueNumbering[C] = v; 414 return v; 415 } 416 417 // Non-local case. 418 const MemoryDependenceResults::NonLocalDepInfo &deps = 419 MD->getNonLocalCallDependency(C); 420 // FIXME: Move the checking logic to MemDep! 421 CallInst* cdep = nullptr; 422 423 // Check to see if we have a single dominating call instruction that is 424 // identical to C. 425 for (unsigned i = 0, e = deps.size(); i != e; ++i) { 426 const NonLocalDepEntry *I = &deps[i]; 427 if (I->getResult().isNonLocal()) 428 continue; 429 430 // We don't handle non-definitions. If we already have a call, reject 431 // instruction dependencies. 432 if (!I->getResult().isDef() || cdep != nullptr) { 433 cdep = nullptr; 434 break; 435 } 436 437 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst()); 438 // FIXME: All duplicated with non-local case. 439 if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){ 440 cdep = NonLocalDepCall; 441 continue; 442 } 443 444 cdep = nullptr; 445 break; 446 } 447 448 if (!cdep) { 449 valueNumbering[C] = nextValueNumber; 450 return nextValueNumber++; 451 } 452 453 if (cdep->getNumArgOperands() != C->getNumArgOperands()) { 454 valueNumbering[C] = nextValueNumber; 455 return nextValueNumber++; 456 } 457 for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) { 458 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i)); 459 uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i)); 460 if (c_vn != cd_vn) { 461 valueNumbering[C] = nextValueNumber; 462 return nextValueNumber++; 463 } 464 } 465 466 uint32_t v = lookupOrAdd(cdep); 467 valueNumbering[C] = v; 468 return v; 469 } else { 470 valueNumbering[C] = nextValueNumber; 471 return nextValueNumber++; 472 } 473 } 474 475 /// Returns true if a value number exists for the specified value. 476 bool GVN::ValueTable::exists(Value *V) const { return valueNumbering.count(V) != 0; } 477 478 /// lookup_or_add - Returns the value number for the specified value, assigning 479 /// it a new number if it did not have one before. 480 uint32_t GVN::ValueTable::lookupOrAdd(Value *V) { 481 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V); 482 if (VI != valueNumbering.end()) 483 return VI->second; 484 485 if (!isa<Instruction>(V)) { 486 valueNumbering[V] = nextValueNumber; 487 return nextValueNumber++; 488 } 489 490 Instruction* I = cast<Instruction>(V); 491 Expression exp; 492 switch (I->getOpcode()) { 493 case Instruction::Call: 494 return lookupOrAddCall(cast<CallInst>(I)); 495 case Instruction::Add: 496 case Instruction::FAdd: 497 case Instruction::Sub: 498 case Instruction::FSub: 499 case Instruction::Mul: 500 case Instruction::FMul: 501 case Instruction::UDiv: 502 case Instruction::SDiv: 503 case Instruction::FDiv: 504 case Instruction::URem: 505 case Instruction::SRem: 506 case Instruction::FRem: 507 case Instruction::Shl: 508 case Instruction::LShr: 509 case Instruction::AShr: 510 case Instruction::And: 511 case Instruction::Or: 512 case Instruction::Xor: 513 case Instruction::ICmp: 514 case Instruction::FCmp: 515 case Instruction::Trunc: 516 case Instruction::ZExt: 517 case Instruction::SExt: 518 case Instruction::FPToUI: 519 case Instruction::FPToSI: 520 case Instruction::UIToFP: 521 case Instruction::SIToFP: 522 case Instruction::FPTrunc: 523 case Instruction::FPExt: 524 case Instruction::PtrToInt: 525 case Instruction::IntToPtr: 526 case Instruction::AddrSpaceCast: 527 case Instruction::BitCast: 528 case Instruction::Select: 529 case Instruction::ExtractElement: 530 case Instruction::InsertElement: 531 case Instruction::ShuffleVector: 532 case Instruction::InsertValue: 533 case Instruction::GetElementPtr: 534 exp = createExpr(I); 535 break; 536 case Instruction::ExtractValue: 537 exp = createExtractvalueExpr(cast<ExtractValueInst>(I)); 538 break; 539 case Instruction::PHI: 540 valueNumbering[V] = nextValueNumber; 541 NumberingPhi[nextValueNumber] = cast<PHINode>(V); 542 return nextValueNumber++; 543 default: 544 valueNumbering[V] = nextValueNumber; 545 return nextValueNumber++; 546 } 547 548 uint32_t e = assignExpNewValueNum(exp).first; 549 valueNumbering[V] = e; 550 return e; 551 } 552 553 /// Returns the value number of the specified value. Fails if 554 /// the value has not yet been numbered. 555 uint32_t GVN::ValueTable::lookup(Value *V, bool Verify) const { 556 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V); 557 if (Verify) { 558 assert(VI != valueNumbering.end() && "Value not numbered?"); 559 return VI->second; 560 } 561 return (VI != valueNumbering.end()) ? VI->second : 0; 562 } 563 564 /// Returns the value number of the given comparison, 565 /// assigning it a new number if it did not have one before. Useful when 566 /// we deduced the result of a comparison, but don't immediately have an 567 /// instruction realizing that comparison to hand. 568 uint32_t GVN::ValueTable::lookupOrAddCmp(unsigned Opcode, 569 CmpInst::Predicate Predicate, 570 Value *LHS, Value *RHS) { 571 Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS); 572 return assignExpNewValueNum(exp).first; 573 } 574 575 /// Remove all entries from the ValueTable. 576 void GVN::ValueTable::clear() { 577 valueNumbering.clear(); 578 expressionNumbering.clear(); 579 NumberingPhi.clear(); 580 PhiTranslateTable.clear(); 581 nextValueNumber = 1; 582 Expressions.clear(); 583 ExprIdx.clear(); 584 nextExprNumber = 0; 585 } 586 587 /// Remove a value from the value numbering. 588 void GVN::ValueTable::erase(Value *V) { 589 uint32_t Num = valueNumbering.lookup(V); 590 valueNumbering.erase(V); 591 // If V is PHINode, V <--> value number is an one-to-one mapping. 592 if (isa<PHINode>(V)) 593 NumberingPhi.erase(Num); 594 } 595 596 /// verifyRemoved - Verify that the value is removed from all internal data 597 /// structures. 598 void GVN::ValueTable::verifyRemoved(const Value *V) const { 599 for (DenseMap<Value*, uint32_t>::const_iterator 600 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) { 601 assert(I->first != V && "Inst still occurs in value numbering map!"); 602 } 603 } 604 605 //===----------------------------------------------------------------------===// 606 // GVN Pass 607 //===----------------------------------------------------------------------===// 608 609 PreservedAnalyses GVN::run(Function &F, FunctionAnalysisManager &AM) { 610 // FIXME: The order of evaluation of these 'getResult' calls is very 611 // significant! Re-ordering these variables will cause GVN when run alone to 612 // be less effective! We should fix memdep and basic-aa to not exhibit this 613 // behavior, but until then don't change the order here. 614 auto &AC = AM.getResult<AssumptionAnalysis>(F); 615 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 616 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 617 auto &AA = AM.getResult<AAManager>(F); 618 auto &MemDep = AM.getResult<MemoryDependenceAnalysis>(F); 619 auto *LI = AM.getCachedResult<LoopAnalysis>(F); 620 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F); 621 bool Changed = runImpl(F, AC, DT, TLI, AA, &MemDep, LI, &ORE); 622 if (!Changed) 623 return PreservedAnalyses::all(); 624 PreservedAnalyses PA; 625 PA.preserve<DominatorTreeAnalysis>(); 626 PA.preserve<GlobalsAA>(); 627 PA.preserve<TargetLibraryAnalysis>(); 628 return PA; 629 } 630 631 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 632 LLVM_DUMP_METHOD void GVN::dump(DenseMap<uint32_t, Value*>& d) const { 633 errs() << "{\n"; 634 for (DenseMap<uint32_t, Value*>::iterator I = d.begin(), 635 E = d.end(); I != E; ++I) { 636 errs() << I->first << "\n"; 637 I->second->dump(); 638 } 639 errs() << "}\n"; 640 } 641 #endif 642 643 /// Return true if we can prove that the value 644 /// we're analyzing is fully available in the specified block. As we go, keep 645 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This 646 /// map is actually a tri-state map with the following values: 647 /// 0) we know the block *is not* fully available. 648 /// 1) we know the block *is* fully available. 649 /// 2) we do not know whether the block is fully available or not, but we are 650 /// currently speculating that it will be. 651 /// 3) we are speculating for this block and have used that to speculate for 652 /// other blocks. 653 static bool IsValueFullyAvailableInBlock(BasicBlock *BB, 654 DenseMap<BasicBlock*, char> &FullyAvailableBlocks, 655 uint32_t RecurseDepth) { 656 if (RecurseDepth > MaxRecurseDepth) 657 return false; 658 659 // Optimistically assume that the block is fully available and check to see 660 // if we already know about this block in one lookup. 661 std::pair<DenseMap<BasicBlock*, char>::iterator, bool> IV = 662 FullyAvailableBlocks.insert(std::make_pair(BB, 2)); 663 664 // If the entry already existed for this block, return the precomputed value. 665 if (!IV.second) { 666 // If this is a speculative "available" value, mark it as being used for 667 // speculation of other blocks. 668 if (IV.first->second == 2) 669 IV.first->second = 3; 670 return IV.first->second != 0; 671 } 672 673 // Otherwise, see if it is fully available in all predecessors. 674 pred_iterator PI = pred_begin(BB), PE = pred_end(BB); 675 676 // If this block has no predecessors, it isn't live-in here. 677 if (PI == PE) 678 goto SpeculationFailure; 679 680 for (; PI != PE; ++PI) 681 // If the value isn't fully available in one of our predecessors, then it 682 // isn't fully available in this block either. Undo our previous 683 // optimistic assumption and bail out. 684 if (!IsValueFullyAvailableInBlock(*PI, FullyAvailableBlocks,RecurseDepth+1)) 685 goto SpeculationFailure; 686 687 return true; 688 689 // If we get here, we found out that this is not, after 690 // all, a fully-available block. We have a problem if we speculated on this and 691 // used the speculation to mark other blocks as available. 692 SpeculationFailure: 693 char &BBVal = FullyAvailableBlocks[BB]; 694 695 // If we didn't speculate on this, just return with it set to false. 696 if (BBVal == 2) { 697 BBVal = 0; 698 return false; 699 } 700 701 // If we did speculate on this value, we could have blocks set to 1 that are 702 // incorrect. Walk the (transitive) successors of this block and mark them as 703 // 0 if set to one. 704 SmallVector<BasicBlock*, 32> BBWorklist; 705 BBWorklist.push_back(BB); 706 707 do { 708 BasicBlock *Entry = BBWorklist.pop_back_val(); 709 // Note that this sets blocks to 0 (unavailable) if they happen to not 710 // already be in FullyAvailableBlocks. This is safe. 711 char &EntryVal = FullyAvailableBlocks[Entry]; 712 if (EntryVal == 0) continue; // Already unavailable. 713 714 // Mark as unavailable. 715 EntryVal = 0; 716 717 BBWorklist.append(succ_begin(Entry), succ_end(Entry)); 718 } while (!BBWorklist.empty()); 719 720 return false; 721 } 722 723 /// Given a set of loads specified by ValuesPerBlock, 724 /// construct SSA form, allowing us to eliminate LI. This returns the value 725 /// that should be used at LI's definition site. 726 static Value *ConstructSSAForLoadSet(LoadInst *LI, 727 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock, 728 GVN &gvn) { 729 // Check for the fully redundant, dominating load case. In this case, we can 730 // just use the dominating value directly. 731 if (ValuesPerBlock.size() == 1 && 732 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB, 733 LI->getParent())) { 734 assert(!ValuesPerBlock[0].AV.isUndefValue() && 735 "Dead BB dominate this block"); 736 return ValuesPerBlock[0].MaterializeAdjustedValue(LI, gvn); 737 } 738 739 // Otherwise, we have to construct SSA form. 740 SmallVector<PHINode*, 8> NewPHIs; 741 SSAUpdater SSAUpdate(&NewPHIs); 742 SSAUpdate.Initialize(LI->getType(), LI->getName()); 743 744 for (const AvailableValueInBlock &AV : ValuesPerBlock) { 745 BasicBlock *BB = AV.BB; 746 747 if (SSAUpdate.HasValueForBlock(BB)) 748 continue; 749 750 // If the value is the load that we will be eliminating, and the block it's 751 // available in is the block that the load is in, then don't add it as 752 // SSAUpdater will resolve the value to the relevant phi which may let it 753 // avoid phi construction entirely if there's actually only one value. 754 if (BB == LI->getParent() && 755 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == LI) || 756 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == LI))) 757 continue; 758 759 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(LI, gvn)); 760 } 761 762 // Perform PHI construction. 763 return SSAUpdate.GetValueInMiddleOfBlock(LI->getParent()); 764 } 765 766 Value *AvailableValue::MaterializeAdjustedValue(LoadInst *LI, 767 Instruction *InsertPt, 768 GVN &gvn) const { 769 Value *Res; 770 Type *LoadTy = LI->getType(); 771 const DataLayout &DL = LI->getModule()->getDataLayout(); 772 if (isSimpleValue()) { 773 Res = getSimpleValue(); 774 if (Res->getType() != LoadTy) { 775 Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL); 776 777 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset 778 << " " << *getSimpleValue() << '\n' 779 << *Res << '\n' 780 << "\n\n\n"); 781 } 782 } else if (isCoercedLoadValue()) { 783 LoadInst *Load = getCoercedLoadValue(); 784 if (Load->getType() == LoadTy && Offset == 0) { 785 Res = Load; 786 } else { 787 Res = getLoadValueForLoad(Load, Offset, LoadTy, InsertPt, DL); 788 // We would like to use gvn.markInstructionForDeletion here, but we can't 789 // because the load is already memoized into the leader map table that GVN 790 // tracks. It is potentially possible to remove the load from the table, 791 // but then there all of the operations based on it would need to be 792 // rehashed. Just leave the dead load around. 793 gvn.getMemDep().removeInstruction(Load); 794 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset 795 << " " << *getCoercedLoadValue() << '\n' 796 << *Res << '\n' 797 << "\n\n\n"); 798 } 799 } else if (isMemIntrinValue()) { 800 Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy, 801 InsertPt, DL); 802 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset 803 << " " << *getMemIntrinValue() << '\n' 804 << *Res << '\n' 805 << "\n\n\n"); 806 } else { 807 assert(isUndefValue() && "Should be UndefVal"); 808 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL Undef:\n";); 809 return UndefValue::get(LoadTy); 810 } 811 assert(Res && "failed to materialize?"); 812 return Res; 813 } 814 815 static bool isLifetimeStart(const Instruction *Inst) { 816 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) 817 return II->getIntrinsicID() == Intrinsic::lifetime_start; 818 return false; 819 } 820 821 /// Try to locate the three instruction involved in a missed 822 /// load-elimination case that is due to an intervening store. 823 static void reportMayClobberedLoad(LoadInst *LI, MemDepResult DepInfo, 824 DominatorTree *DT, 825 OptimizationRemarkEmitter *ORE) { 826 using namespace ore; 827 828 User *OtherAccess = nullptr; 829 830 OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", LI); 831 R << "load of type " << NV("Type", LI->getType()) << " not eliminated" 832 << setExtraArgs(); 833 834 for (auto *U : LI->getPointerOperand()->users()) 835 if (U != LI && (isa<LoadInst>(U) || isa<StoreInst>(U)) && 836 DT->dominates(cast<Instruction>(U), LI)) { 837 // FIXME: for now give up if there are multiple memory accesses that 838 // dominate the load. We need further analysis to decide which one is 839 // that we're forwarding from. 840 if (OtherAccess) 841 OtherAccess = nullptr; 842 else 843 OtherAccess = U; 844 } 845 846 if (OtherAccess) 847 R << " in favor of " << NV("OtherAccess", OtherAccess); 848 849 R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst()); 850 851 ORE->emit(R); 852 } 853 854 bool GVN::AnalyzeLoadAvailability(LoadInst *LI, MemDepResult DepInfo, 855 Value *Address, AvailableValue &Res) { 856 assert((DepInfo.isDef() || DepInfo.isClobber()) && 857 "expected a local dependence"); 858 assert(LI->isUnordered() && "rules below are incorrect for ordered access"); 859 860 const DataLayout &DL = LI->getModule()->getDataLayout(); 861 862 Instruction *DepInst = DepInfo.getInst(); 863 if (DepInfo.isClobber()) { 864 // If the dependence is to a store that writes to a superset of the bits 865 // read by the load, we can extract the bits we need for the load from the 866 // stored value. 867 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) { 868 // Can't forward from non-atomic to atomic without violating memory model. 869 if (Address && LI->isAtomic() <= DepSI->isAtomic()) { 870 int Offset = 871 analyzeLoadFromClobberingStore(LI->getType(), Address, DepSI, DL); 872 if (Offset != -1) { 873 Res = AvailableValue::get(DepSI->getValueOperand(), Offset); 874 return true; 875 } 876 } 877 } 878 879 // Check to see if we have something like this: 880 // load i32* P 881 // load i8* (P+1) 882 // if we have this, replace the later with an extraction from the former. 883 if (LoadInst *DepLI = dyn_cast<LoadInst>(DepInst)) { 884 // If this is a clobber and L is the first instruction in its block, then 885 // we have the first instruction in the entry block. 886 // Can't forward from non-atomic to atomic without violating memory model. 887 if (DepLI != LI && Address && LI->isAtomic() <= DepLI->isAtomic()) { 888 int Offset = 889 analyzeLoadFromClobberingLoad(LI->getType(), Address, DepLI, DL); 890 891 if (Offset != -1) { 892 Res = AvailableValue::getLoad(DepLI, Offset); 893 return true; 894 } 895 } 896 } 897 898 // If the clobbering value is a memset/memcpy/memmove, see if we can 899 // forward a value on from it. 900 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) { 901 if (Address && !LI->isAtomic()) { 902 int Offset = analyzeLoadFromClobberingMemInst(LI->getType(), Address, 903 DepMI, DL); 904 if (Offset != -1) { 905 Res = AvailableValue::getMI(DepMI, Offset); 906 return true; 907 } 908 } 909 } 910 // Nothing known about this clobber, have to be conservative 911 LLVM_DEBUG( 912 // fast print dep, using operator<< on instruction is too slow. 913 dbgs() << "GVN: load "; LI->printAsOperand(dbgs()); 914 dbgs() << " is clobbered by " << *DepInst << '\n';); 915 if (ORE->allowExtraAnalysis(DEBUG_TYPE)) 916 reportMayClobberedLoad(LI, DepInfo, DT, ORE); 917 918 return false; 919 } 920 assert(DepInfo.isDef() && "follows from above"); 921 922 // Loading the allocation -> undef. 923 if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) || 924 // Loading immediately after lifetime begin -> undef. 925 isLifetimeStart(DepInst)) { 926 Res = AvailableValue::get(UndefValue::get(LI->getType())); 927 return true; 928 } 929 930 // Loading from calloc (which zero initializes memory) -> zero 931 if (isCallocLikeFn(DepInst, TLI)) { 932 Res = AvailableValue::get(Constant::getNullValue(LI->getType())); 933 return true; 934 } 935 936 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) { 937 // Reject loads and stores that are to the same address but are of 938 // different types if we have to. If the stored value is larger or equal to 939 // the loaded value, we can reuse it. 940 if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), LI->getType(), 941 DL)) 942 return false; 943 944 // Can't forward from non-atomic to atomic without violating memory model. 945 if (S->isAtomic() < LI->isAtomic()) 946 return false; 947 948 Res = AvailableValue::get(S->getValueOperand()); 949 return true; 950 } 951 952 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) { 953 // If the types mismatch and we can't handle it, reject reuse of the load. 954 // If the stored value is larger or equal to the loaded value, we can reuse 955 // it. 956 if (!canCoerceMustAliasedValueToLoad(LD, LI->getType(), DL)) 957 return false; 958 959 // Can't forward from non-atomic to atomic without violating memory model. 960 if (LD->isAtomic() < LI->isAtomic()) 961 return false; 962 963 Res = AvailableValue::getLoad(LD); 964 return true; 965 } 966 967 // Unknown def - must be conservative 968 LLVM_DEBUG( 969 // fast print dep, using operator<< on instruction is too slow. 970 dbgs() << "GVN: load "; LI->printAsOperand(dbgs()); 971 dbgs() << " has unknown def " << *DepInst << '\n';); 972 return false; 973 } 974 975 void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps, 976 AvailValInBlkVect &ValuesPerBlock, 977 UnavailBlkVect &UnavailableBlocks) { 978 // Filter out useless results (non-locals, etc). Keep track of the blocks 979 // where we have a value available in repl, also keep track of whether we see 980 // dependencies that produce an unknown value for the load (such as a call 981 // that could potentially clobber the load). 982 unsigned NumDeps = Deps.size(); 983 for (unsigned i = 0, e = NumDeps; i != e; ++i) { 984 BasicBlock *DepBB = Deps[i].getBB(); 985 MemDepResult DepInfo = Deps[i].getResult(); 986 987 if (DeadBlocks.count(DepBB)) { 988 // Dead dependent mem-op disguise as a load evaluating the same value 989 // as the load in question. 990 ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB)); 991 continue; 992 } 993 994 if (!DepInfo.isDef() && !DepInfo.isClobber()) { 995 UnavailableBlocks.push_back(DepBB); 996 continue; 997 } 998 999 // The address being loaded in this non-local block may not be the same as 1000 // the pointer operand of the load if PHI translation occurs. Make sure 1001 // to consider the right address. 1002 Value *Address = Deps[i].getAddress(); 1003 1004 AvailableValue AV; 1005 if (AnalyzeLoadAvailability(LI, DepInfo, Address, AV)) { 1006 // subtlety: because we know this was a non-local dependency, we know 1007 // it's safe to materialize anywhere between the instruction within 1008 // DepInfo and the end of it's block. 1009 ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, 1010 std::move(AV))); 1011 } else { 1012 UnavailableBlocks.push_back(DepBB); 1013 } 1014 } 1015 1016 assert(NumDeps == ValuesPerBlock.size() + UnavailableBlocks.size() && 1017 "post condition violation"); 1018 } 1019 1020 bool GVN::PerformLoadPRE(LoadInst *LI, AvailValInBlkVect &ValuesPerBlock, 1021 UnavailBlkVect &UnavailableBlocks) { 1022 // Okay, we have *some* definitions of the value. This means that the value 1023 // is available in some of our (transitive) predecessors. Lets think about 1024 // doing PRE of this load. This will involve inserting a new load into the 1025 // predecessor when it's not available. We could do this in general, but 1026 // prefer to not increase code size. As such, we only do this when we know 1027 // that we only have to insert *one* load (which means we're basically moving 1028 // the load, not inserting a new one). 1029 1030 SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(), 1031 UnavailableBlocks.end()); 1032 1033 // Let's find the first basic block with more than one predecessor. Walk 1034 // backwards through predecessors if needed. 1035 BasicBlock *LoadBB = LI->getParent(); 1036 BasicBlock *TmpBB = LoadBB; 1037 bool IsSafeToSpeculativelyExecute = isSafeToSpeculativelyExecute(LI); 1038 1039 // Check that there is no implicit control flow instructions above our load in 1040 // its block. If there is an instruction that doesn't always pass the 1041 // execution to the following instruction, then moving through it may become 1042 // invalid. For example: 1043 // 1044 // int arr[LEN]; 1045 // int index = ???; 1046 // ... 1047 // guard(0 <= index && index < LEN); 1048 // use(arr[index]); 1049 // 1050 // It is illegal to move the array access to any point above the guard, 1051 // because if the index is out of bounds we should deoptimize rather than 1052 // access the array. 1053 // Check that there is no guard in this block above our instruction. 1054 if (!IsSafeToSpeculativelyExecute && ICF->isDominatedByICFIFromSameBlock(LI)) 1055 return false; 1056 while (TmpBB->getSinglePredecessor()) { 1057 TmpBB = TmpBB->getSinglePredecessor(); 1058 if (TmpBB == LoadBB) // Infinite (unreachable) loop. 1059 return false; 1060 if (Blockers.count(TmpBB)) 1061 return false; 1062 1063 // If any of these blocks has more than one successor (i.e. if the edge we 1064 // just traversed was critical), then there are other paths through this 1065 // block along which the load may not be anticipated. Hoisting the load 1066 // above this block would be adding the load to execution paths along 1067 // which it was not previously executed. 1068 if (TmpBB->getTerminator()->getNumSuccessors() != 1) 1069 return false; 1070 1071 // Check that there is no implicit control flow in a block above. 1072 if (!IsSafeToSpeculativelyExecute && ICF->hasICF(TmpBB)) 1073 return false; 1074 } 1075 1076 assert(TmpBB); 1077 LoadBB = TmpBB; 1078 1079 // Check to see how many predecessors have the loaded value fully 1080 // available. 1081 MapVector<BasicBlock *, Value *> PredLoads; 1082 DenseMap<BasicBlock*, char> FullyAvailableBlocks; 1083 for (const AvailableValueInBlock &AV : ValuesPerBlock) 1084 FullyAvailableBlocks[AV.BB] = true; 1085 for (BasicBlock *UnavailableBB : UnavailableBlocks) 1086 FullyAvailableBlocks[UnavailableBB] = false; 1087 1088 SmallVector<BasicBlock *, 4> CriticalEdgePred; 1089 for (BasicBlock *Pred : predecessors(LoadBB)) { 1090 // If any predecessor block is an EH pad that does not allow non-PHI 1091 // instructions before the terminator, we can't PRE the load. 1092 if (Pred->getTerminator()->isEHPad()) { 1093 LLVM_DEBUG( 1094 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '" 1095 << Pred->getName() << "': " << *LI << '\n'); 1096 return false; 1097 } 1098 1099 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks, 0)) { 1100 continue; 1101 } 1102 1103 if (Pred->getTerminator()->getNumSuccessors() != 1) { 1104 if (isa<IndirectBrInst>(Pred->getTerminator())) { 1105 LLVM_DEBUG( 1106 dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '" 1107 << Pred->getName() << "': " << *LI << '\n'); 1108 return false; 1109 } 1110 1111 // FIXME: Can we support the fallthrough edge? 1112 if (isa<CallBrInst>(Pred->getTerminator())) { 1113 LLVM_DEBUG( 1114 dbgs() << "COULD NOT PRE LOAD BECAUSE OF CALLBR CRITICAL EDGE '" 1115 << Pred->getName() << "': " << *LI << '\n'); 1116 return false; 1117 } 1118 1119 if (LoadBB->isEHPad()) { 1120 LLVM_DEBUG( 1121 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '" 1122 << Pred->getName() << "': " << *LI << '\n'); 1123 return false; 1124 } 1125 1126 CriticalEdgePred.push_back(Pred); 1127 } else { 1128 // Only add the predecessors that will not be split for now. 1129 PredLoads[Pred] = nullptr; 1130 } 1131 } 1132 1133 // Decide whether PRE is profitable for this load. 1134 unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size(); 1135 assert(NumUnavailablePreds != 0 && 1136 "Fully available value should already be eliminated!"); 1137 1138 // If this load is unavailable in multiple predecessors, reject it. 1139 // FIXME: If we could restructure the CFG, we could make a common pred with 1140 // all the preds that don't have an available LI and insert a new load into 1141 // that one block. 1142 if (NumUnavailablePreds != 1) 1143 return false; 1144 1145 // Split critical edges, and update the unavailable predecessors accordingly. 1146 for (BasicBlock *OrigPred : CriticalEdgePred) { 1147 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB); 1148 assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!"); 1149 PredLoads[NewPred] = nullptr; 1150 LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->" 1151 << LoadBB->getName() << '\n'); 1152 } 1153 1154 // Check if the load can safely be moved to all the unavailable predecessors. 1155 bool CanDoPRE = true; 1156 const DataLayout &DL = LI->getModule()->getDataLayout(); 1157 SmallVector<Instruction*, 8> NewInsts; 1158 for (auto &PredLoad : PredLoads) { 1159 BasicBlock *UnavailablePred = PredLoad.first; 1160 1161 // Do PHI translation to get its value in the predecessor if necessary. The 1162 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred. 1163 1164 // If all preds have a single successor, then we know it is safe to insert 1165 // the load on the pred (?!?), so we can insert code to materialize the 1166 // pointer if it is not available. 1167 PHITransAddr Address(LI->getPointerOperand(), DL, AC); 1168 Value *LoadPtr = nullptr; 1169 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, 1170 *DT, NewInsts); 1171 1172 // If we couldn't find or insert a computation of this phi translated value, 1173 // we fail PRE. 1174 if (!LoadPtr) { 1175 LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: " 1176 << *LI->getPointerOperand() << "\n"); 1177 CanDoPRE = false; 1178 break; 1179 } 1180 1181 PredLoad.second = LoadPtr; 1182 } 1183 1184 if (!CanDoPRE) { 1185 while (!NewInsts.empty()) { 1186 Instruction *I = NewInsts.pop_back_val(); 1187 markInstructionForDeletion(I); 1188 } 1189 // HINT: Don't revert the edge-splitting as following transformation may 1190 // also need to split these critical edges. 1191 return !CriticalEdgePred.empty(); 1192 } 1193 1194 // Okay, we can eliminate this load by inserting a reload in the predecessor 1195 // and using PHI construction to get the value in the other predecessors, do 1196 // it. 1197 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *LI << '\n'); 1198 LLVM_DEBUG(if (!NewInsts.empty()) dbgs() 1199 << "INSERTED " << NewInsts.size() << " INSTS: " << *NewInsts.back() 1200 << '\n'); 1201 1202 // Assign value numbers to the new instructions. 1203 for (Instruction *I : NewInsts) { 1204 // Instructions that have been inserted in predecessor(s) to materialize 1205 // the load address do not retain their original debug locations. Doing 1206 // so could lead to confusing (but correct) source attributions. 1207 if (const DebugLoc &DL = I->getDebugLoc()) 1208 I->setDebugLoc(DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt())); 1209 1210 // FIXME: We really _ought_ to insert these value numbers into their 1211 // parent's availability map. However, in doing so, we risk getting into 1212 // ordering issues. If a block hasn't been processed yet, we would be 1213 // marking a value as AVAIL-IN, which isn't what we intend. 1214 VN.lookupOrAdd(I); 1215 } 1216 1217 for (const auto &PredLoad : PredLoads) { 1218 BasicBlock *UnavailablePred = PredLoad.first; 1219 Value *LoadPtr = PredLoad.second; 1220 1221 auto *NewLoad = 1222 new LoadInst(LI->getType(), LoadPtr, LI->getName() + ".pre", 1223 LI->isVolatile(), LI->getAlignment(), LI->getOrdering(), 1224 LI->getSyncScopeID(), UnavailablePred->getTerminator()); 1225 NewLoad->setDebugLoc(LI->getDebugLoc()); 1226 1227 // Transfer the old load's AA tags to the new load. 1228 AAMDNodes Tags; 1229 LI->getAAMetadata(Tags); 1230 if (Tags) 1231 NewLoad->setAAMetadata(Tags); 1232 1233 if (auto *MD = LI->getMetadata(LLVMContext::MD_invariant_load)) 1234 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD); 1235 if (auto *InvGroupMD = LI->getMetadata(LLVMContext::MD_invariant_group)) 1236 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD); 1237 if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) 1238 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD); 1239 1240 // We do not propagate the old load's debug location, because the new 1241 // load now lives in a different BB, and we want to avoid a jumpy line 1242 // table. 1243 // FIXME: How do we retain source locations without causing poor debugging 1244 // behavior? 1245 1246 // Add the newly created load. 1247 ValuesPerBlock.push_back(AvailableValueInBlock::get(UnavailablePred, 1248 NewLoad)); 1249 MD->invalidateCachedPointerInfo(LoadPtr); 1250 LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n'); 1251 } 1252 1253 // Perform PHI construction. 1254 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); 1255 LI->replaceAllUsesWith(V); 1256 if (isa<PHINode>(V)) 1257 V->takeName(LI); 1258 if (Instruction *I = dyn_cast<Instruction>(V)) 1259 I->setDebugLoc(LI->getDebugLoc()); 1260 if (V->getType()->isPtrOrPtrVectorTy()) 1261 MD->invalidateCachedPointerInfo(V); 1262 markInstructionForDeletion(LI); 1263 ORE->emit([&]() { 1264 return OptimizationRemark(DEBUG_TYPE, "LoadPRE", LI) 1265 << "load eliminated by PRE"; 1266 }); 1267 ++NumPRELoad; 1268 return true; 1269 } 1270 1271 static void reportLoadElim(LoadInst *LI, Value *AvailableValue, 1272 OptimizationRemarkEmitter *ORE) { 1273 using namespace ore; 1274 1275 ORE->emit([&]() { 1276 return OptimizationRemark(DEBUG_TYPE, "LoadElim", LI) 1277 << "load of type " << NV("Type", LI->getType()) << " eliminated" 1278 << setExtraArgs() << " in favor of " 1279 << NV("InfavorOfValue", AvailableValue); 1280 }); 1281 } 1282 1283 /// Attempt to eliminate a load whose dependencies are 1284 /// non-local by performing PHI construction. 1285 bool GVN::processNonLocalLoad(LoadInst *LI) { 1286 // non-local speculations are not allowed under asan. 1287 if (LI->getParent()->getParent()->hasFnAttribute( 1288 Attribute::SanitizeAddress) || 1289 LI->getParent()->getParent()->hasFnAttribute( 1290 Attribute::SanitizeHWAddress)) 1291 return false; 1292 1293 // Step 1: Find the non-local dependencies of the load. 1294 LoadDepVect Deps; 1295 MD->getNonLocalPointerDependency(LI, Deps); 1296 1297 // If we had to process more than one hundred blocks to find the 1298 // dependencies, this load isn't worth worrying about. Optimizing 1299 // it will be too expensive. 1300 unsigned NumDeps = Deps.size(); 1301 if (NumDeps > MaxNumDeps) 1302 return false; 1303 1304 // If we had a phi translation failure, we'll have a single entry which is a 1305 // clobber in the current block. Reject this early. 1306 if (NumDeps == 1 && 1307 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) { 1308 LLVM_DEBUG(dbgs() << "GVN: non-local load "; LI->printAsOperand(dbgs()); 1309 dbgs() << " has unknown dependencies\n";); 1310 return false; 1311 } 1312 1313 // If this load follows a GEP, see if we can PRE the indices before analyzing. 1314 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0))) { 1315 for (GetElementPtrInst::op_iterator OI = GEP->idx_begin(), 1316 OE = GEP->idx_end(); 1317 OI != OE; ++OI) 1318 if (Instruction *I = dyn_cast<Instruction>(OI->get())) 1319 performScalarPRE(I); 1320 } 1321 1322 // Step 2: Analyze the availability of the load 1323 AvailValInBlkVect ValuesPerBlock; 1324 UnavailBlkVect UnavailableBlocks; 1325 AnalyzeLoadAvailability(LI, Deps, ValuesPerBlock, UnavailableBlocks); 1326 1327 // If we have no predecessors that produce a known value for this load, exit 1328 // early. 1329 if (ValuesPerBlock.empty()) 1330 return false; 1331 1332 // Step 3: Eliminate fully redundancy. 1333 // 1334 // If all of the instructions we depend on produce a known value for this 1335 // load, then it is fully redundant and we can use PHI insertion to compute 1336 // its value. Insert PHIs and remove the fully redundant value now. 1337 if (UnavailableBlocks.empty()) { 1338 LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *LI << '\n'); 1339 1340 // Perform PHI construction. 1341 Value *V = ConstructSSAForLoadSet(LI, ValuesPerBlock, *this); 1342 LI->replaceAllUsesWith(V); 1343 1344 if (isa<PHINode>(V)) 1345 V->takeName(LI); 1346 if (Instruction *I = dyn_cast<Instruction>(V)) 1347 // If instruction I has debug info, then we should not update it. 1348 // Also, if I has a null DebugLoc, then it is still potentially incorrect 1349 // to propagate LI's DebugLoc because LI may not post-dominate I. 1350 if (LI->getDebugLoc() && LI->getParent() == I->getParent()) 1351 I->setDebugLoc(LI->getDebugLoc()); 1352 if (V->getType()->isPtrOrPtrVectorTy()) 1353 MD->invalidateCachedPointerInfo(V); 1354 markInstructionForDeletion(LI); 1355 ++NumGVNLoad; 1356 reportLoadElim(LI, V, ORE); 1357 return true; 1358 } 1359 1360 // Step 4: Eliminate partial redundancy. 1361 if (!EnablePRE || !EnableLoadPRE) 1362 return false; 1363 1364 return PerformLoadPRE(LI, ValuesPerBlock, UnavailableBlocks); 1365 } 1366 1367 bool GVN::processAssumeIntrinsic(IntrinsicInst *IntrinsicI) { 1368 assert(IntrinsicI->getIntrinsicID() == Intrinsic::assume && 1369 "This function can only be called with llvm.assume intrinsic"); 1370 Value *V = IntrinsicI->getArgOperand(0); 1371 1372 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) { 1373 if (Cond->isZero()) { 1374 Type *Int8Ty = Type::getInt8Ty(V->getContext()); 1375 // Insert a new store to null instruction before the load to indicate that 1376 // this code is not reachable. FIXME: We could insert unreachable 1377 // instruction directly because we can modify the CFG. 1378 new StoreInst(UndefValue::get(Int8Ty), 1379 Constant::getNullValue(Int8Ty->getPointerTo()), 1380 IntrinsicI); 1381 } 1382 markInstructionForDeletion(IntrinsicI); 1383 return false; 1384 } else if (isa<Constant>(V)) { 1385 // If it's not false, and constant, it must evaluate to true. This means our 1386 // assume is assume(true), and thus, pointless, and we don't want to do 1387 // anything more here. 1388 return false; 1389 } 1390 1391 Constant *True = ConstantInt::getTrue(V->getContext()); 1392 bool Changed = false; 1393 1394 for (BasicBlock *Successor : successors(IntrinsicI->getParent())) { 1395 BasicBlockEdge Edge(IntrinsicI->getParent(), Successor); 1396 1397 // This property is only true in dominated successors, propagateEquality 1398 // will check dominance for us. 1399 Changed |= propagateEquality(V, True, Edge, false); 1400 } 1401 1402 // We can replace assume value with true, which covers cases like this: 1403 // call void @llvm.assume(i1 %cmp) 1404 // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true 1405 ReplaceWithConstMap[V] = True; 1406 1407 // If one of *cmp *eq operand is const, adding it to map will cover this: 1408 // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen 1409 // call void @llvm.assume(i1 %cmp) 1410 // ret float %0 ; will change it to ret float 3.000000e+00 1411 if (auto *CmpI = dyn_cast<CmpInst>(V)) { 1412 if (CmpI->getPredicate() == CmpInst::Predicate::ICMP_EQ || 1413 CmpI->getPredicate() == CmpInst::Predicate::FCMP_OEQ || 1414 (CmpI->getPredicate() == CmpInst::Predicate::FCMP_UEQ && 1415 CmpI->getFastMathFlags().noNaNs())) { 1416 Value *CmpLHS = CmpI->getOperand(0); 1417 Value *CmpRHS = CmpI->getOperand(1); 1418 if (isa<Constant>(CmpLHS)) 1419 std::swap(CmpLHS, CmpRHS); 1420 auto *RHSConst = dyn_cast<Constant>(CmpRHS); 1421 1422 // If only one operand is constant. 1423 if (RHSConst != nullptr && !isa<Constant>(CmpLHS)) 1424 ReplaceWithConstMap[CmpLHS] = RHSConst; 1425 } 1426 } 1427 return Changed; 1428 } 1429 1430 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) { 1431 patchReplacementInstruction(I, Repl); 1432 I->replaceAllUsesWith(Repl); 1433 } 1434 1435 /// Attempt to eliminate a load, first by eliminating it 1436 /// locally, and then attempting non-local elimination if that fails. 1437 bool GVN::processLoad(LoadInst *L) { 1438 if (!MD) 1439 return false; 1440 1441 // This code hasn't been audited for ordered or volatile memory access 1442 if (!L->isUnordered()) 1443 return false; 1444 1445 if (L->use_empty()) { 1446 markInstructionForDeletion(L); 1447 return true; 1448 } 1449 1450 // ... to a pointer that has been loaded from before... 1451 MemDepResult Dep = MD->getDependency(L); 1452 1453 // If it is defined in another block, try harder. 1454 if (Dep.isNonLocal()) 1455 return processNonLocalLoad(L); 1456 1457 // Only handle the local case below 1458 if (!Dep.isDef() && !Dep.isClobber()) { 1459 // This might be a NonFuncLocal or an Unknown 1460 LLVM_DEBUG( 1461 // fast print dep, using operator<< on instruction is too slow. 1462 dbgs() << "GVN: load "; L->printAsOperand(dbgs()); 1463 dbgs() << " has unknown dependence\n";); 1464 return false; 1465 } 1466 1467 AvailableValue AV; 1468 if (AnalyzeLoadAvailability(L, Dep, L->getPointerOperand(), AV)) { 1469 Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this); 1470 1471 // Replace the load! 1472 patchAndReplaceAllUsesWith(L, AvailableValue); 1473 markInstructionForDeletion(L); 1474 ++NumGVNLoad; 1475 reportLoadElim(L, AvailableValue, ORE); 1476 // Tell MDA to rexamine the reused pointer since we might have more 1477 // information after forwarding it. 1478 if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy()) 1479 MD->invalidateCachedPointerInfo(AvailableValue); 1480 return true; 1481 } 1482 1483 return false; 1484 } 1485 1486 /// Return a pair the first field showing the value number of \p Exp and the 1487 /// second field showing whether it is a value number newly created. 1488 std::pair<uint32_t, bool> 1489 GVN::ValueTable::assignExpNewValueNum(Expression &Exp) { 1490 uint32_t &e = expressionNumbering[Exp]; 1491 bool CreateNewValNum = !e; 1492 if (CreateNewValNum) { 1493 Expressions.push_back(Exp); 1494 if (ExprIdx.size() < nextValueNumber + 1) 1495 ExprIdx.resize(nextValueNumber * 2); 1496 e = nextValueNumber; 1497 ExprIdx[nextValueNumber++] = nextExprNumber++; 1498 } 1499 return {e, CreateNewValNum}; 1500 } 1501 1502 /// Return whether all the values related with the same \p num are 1503 /// defined in \p BB. 1504 bool GVN::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB, 1505 GVN &Gvn) { 1506 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num]; 1507 while (Vals && Vals->BB == BB) 1508 Vals = Vals->Next; 1509 return !Vals; 1510 } 1511 1512 /// Wrap phiTranslateImpl to provide caching functionality. 1513 uint32_t GVN::ValueTable::phiTranslate(const BasicBlock *Pred, 1514 const BasicBlock *PhiBlock, uint32_t Num, 1515 GVN &Gvn) { 1516 auto FindRes = PhiTranslateTable.find({Num, Pred}); 1517 if (FindRes != PhiTranslateTable.end()) 1518 return FindRes->second; 1519 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn); 1520 PhiTranslateTable.insert({{Num, Pred}, NewNum}); 1521 return NewNum; 1522 } 1523 1524 /// Translate value number \p Num using phis, so that it has the values of 1525 /// the phis in BB. 1526 uint32_t GVN::ValueTable::phiTranslateImpl(const BasicBlock *Pred, 1527 const BasicBlock *PhiBlock, 1528 uint32_t Num, GVN &Gvn) { 1529 if (PHINode *PN = NumberingPhi[Num]) { 1530 for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) { 1531 if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred) 1532 if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false)) 1533 return TransVal; 1534 } 1535 return Num; 1536 } 1537 1538 // If there is any value related with Num is defined in a BB other than 1539 // PhiBlock, it cannot depend on a phi in PhiBlock without going through 1540 // a backedge. We can do an early exit in that case to save compile time. 1541 if (!areAllValsInBB(Num, PhiBlock, Gvn)) 1542 return Num; 1543 1544 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0) 1545 return Num; 1546 Expression Exp = Expressions[ExprIdx[Num]]; 1547 1548 for (unsigned i = 0; i < Exp.varargs.size(); i++) { 1549 // For InsertValue and ExtractValue, some varargs are index numbers 1550 // instead of value numbers. Those index numbers should not be 1551 // translated. 1552 if ((i > 1 && Exp.opcode == Instruction::InsertValue) || 1553 (i > 0 && Exp.opcode == Instruction::ExtractValue)) 1554 continue; 1555 Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn); 1556 } 1557 1558 if (Exp.commutative) { 1559 assert(Exp.varargs.size() == 2 && "Unsupported commutative expression!"); 1560 if (Exp.varargs[0] > Exp.varargs[1]) { 1561 std::swap(Exp.varargs[0], Exp.varargs[1]); 1562 uint32_t Opcode = Exp.opcode >> 8; 1563 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) 1564 Exp.opcode = (Opcode << 8) | 1565 CmpInst::getSwappedPredicate( 1566 static_cast<CmpInst::Predicate>(Exp.opcode & 255)); 1567 } 1568 } 1569 1570 if (uint32_t NewNum = expressionNumbering[Exp]) 1571 return NewNum; 1572 return Num; 1573 } 1574 1575 /// Erase stale entry from phiTranslate cache so phiTranslate can be computed 1576 /// again. 1577 void GVN::ValueTable::eraseTranslateCacheEntry(uint32_t Num, 1578 const BasicBlock &CurrBlock) { 1579 for (const BasicBlock *Pred : predecessors(&CurrBlock)) { 1580 auto FindRes = PhiTranslateTable.find({Num, Pred}); 1581 if (FindRes != PhiTranslateTable.end()) 1582 PhiTranslateTable.erase(FindRes); 1583 } 1584 } 1585 1586 // In order to find a leader for a given value number at a 1587 // specific basic block, we first obtain the list of all Values for that number, 1588 // and then scan the list to find one whose block dominates the block in 1589 // question. This is fast because dominator tree queries consist of only 1590 // a few comparisons of DFS numbers. 1591 Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) { 1592 LeaderTableEntry Vals = LeaderTable[num]; 1593 if (!Vals.Val) return nullptr; 1594 1595 Value *Val = nullptr; 1596 if (DT->dominates(Vals.BB, BB)) { 1597 Val = Vals.Val; 1598 if (isa<Constant>(Val)) return Val; 1599 } 1600 1601 LeaderTableEntry* Next = Vals.Next; 1602 while (Next) { 1603 if (DT->dominates(Next->BB, BB)) { 1604 if (isa<Constant>(Next->Val)) return Next->Val; 1605 if (!Val) Val = Next->Val; 1606 } 1607 1608 Next = Next->Next; 1609 } 1610 1611 return Val; 1612 } 1613 1614 /// There is an edge from 'Src' to 'Dst'. Return 1615 /// true if every path from the entry block to 'Dst' passes via this edge. In 1616 /// particular 'Dst' must not be reachable via another edge from 'Src'. 1617 static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E, 1618 DominatorTree *DT) { 1619 // While in theory it is interesting to consider the case in which Dst has 1620 // more than one predecessor, because Dst might be part of a loop which is 1621 // only reachable from Src, in practice it is pointless since at the time 1622 // GVN runs all such loops have preheaders, which means that Dst will have 1623 // been changed to have only one predecessor, namely Src. 1624 const BasicBlock *Pred = E.getEnd()->getSinglePredecessor(); 1625 assert((!Pred || Pred == E.getStart()) && 1626 "No edge between these basic blocks!"); 1627 return Pred != nullptr; 1628 } 1629 1630 void GVN::assignBlockRPONumber(Function &F) { 1631 BlockRPONumber.clear(); 1632 uint32_t NextBlockNumber = 1; 1633 ReversePostOrderTraversal<Function *> RPOT(&F); 1634 for (BasicBlock *BB : RPOT) 1635 BlockRPONumber[BB] = NextBlockNumber++; 1636 InvalidBlockRPONumbers = false; 1637 } 1638 1639 // Tries to replace instruction with const, using information from 1640 // ReplaceWithConstMap. 1641 bool GVN::replaceOperandsWithConsts(Instruction *Instr) const { 1642 bool Changed = false; 1643 for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) { 1644 Value *Operand = Instr->getOperand(OpNum); 1645 auto it = ReplaceWithConstMap.find(Operand); 1646 if (it != ReplaceWithConstMap.end()) { 1647 assert(!isa<Constant>(Operand) && 1648 "Replacing constants with constants is invalid"); 1649 LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with " 1650 << *it->second << " in instruction " << *Instr << '\n'); 1651 Instr->setOperand(OpNum, it->second); 1652 Changed = true; 1653 } 1654 } 1655 return Changed; 1656 } 1657 1658 /// The given values are known to be equal in every block 1659 /// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with 1660 /// 'RHS' everywhere in the scope. Returns whether a change was made. 1661 /// If DominatesByEdge is false, then it means that we will propagate the RHS 1662 /// value starting from the end of Root.Start. 1663 bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root, 1664 bool DominatesByEdge) { 1665 SmallVector<std::pair<Value*, Value*>, 4> Worklist; 1666 Worklist.push_back(std::make_pair(LHS, RHS)); 1667 bool Changed = false; 1668 // For speed, compute a conservative fast approximation to 1669 // DT->dominates(Root, Root.getEnd()); 1670 const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT); 1671 1672 while (!Worklist.empty()) { 1673 std::pair<Value*, Value*> Item = Worklist.pop_back_val(); 1674 LHS = Item.first; RHS = Item.second; 1675 1676 if (LHS == RHS) 1677 continue; 1678 assert(LHS->getType() == RHS->getType() && "Equality but unequal types!"); 1679 1680 // Don't try to propagate equalities between constants. 1681 if (isa<Constant>(LHS) && isa<Constant>(RHS)) 1682 continue; 1683 1684 // Prefer a constant on the right-hand side, or an Argument if no constants. 1685 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS))) 1686 std::swap(LHS, RHS); 1687 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!"); 1688 1689 // If there is no obvious reason to prefer the left-hand side over the 1690 // right-hand side, ensure the longest lived term is on the right-hand side, 1691 // so the shortest lived term will be replaced by the longest lived. 1692 // This tends to expose more simplifications. 1693 uint32_t LVN = VN.lookupOrAdd(LHS); 1694 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) || 1695 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) { 1696 // Move the 'oldest' value to the right-hand side, using the value number 1697 // as a proxy for age. 1698 uint32_t RVN = VN.lookupOrAdd(RHS); 1699 if (LVN < RVN) { 1700 std::swap(LHS, RHS); 1701 LVN = RVN; 1702 } 1703 } 1704 1705 // If value numbering later sees that an instruction in the scope is equal 1706 // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve 1707 // the invariant that instructions only occur in the leader table for their 1708 // own value number (this is used by removeFromLeaderTable), do not do this 1709 // if RHS is an instruction (if an instruction in the scope is morphed into 1710 // LHS then it will be turned into RHS by the next GVN iteration anyway, so 1711 // using the leader table is about compiling faster, not optimizing better). 1712 // The leader table only tracks basic blocks, not edges. Only add to if we 1713 // have the simple case where the edge dominates the end. 1714 if (RootDominatesEnd && !isa<Instruction>(RHS)) 1715 addToLeaderTable(LVN, RHS, Root.getEnd()); 1716 1717 // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As 1718 // LHS always has at least one use that is not dominated by Root, this will 1719 // never do anything if LHS has only one use. 1720 if (!LHS->hasOneUse()) { 1721 unsigned NumReplacements = 1722 DominatesByEdge 1723 ? replaceDominatedUsesWith(LHS, RHS, *DT, Root) 1724 : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart()); 1725 1726 Changed |= NumReplacements > 0; 1727 NumGVNEqProp += NumReplacements; 1728 // Cached information for anything that uses LHS will be invalid. 1729 if (MD) 1730 MD->invalidateCachedPointerInfo(LHS); 1731 } 1732 1733 // Now try to deduce additional equalities from this one. For example, if 1734 // the known equality was "(A != B)" == "false" then it follows that A and B 1735 // are equal in the scope. Only boolean equalities with an explicit true or 1736 // false RHS are currently supported. 1737 if (!RHS->getType()->isIntegerTy(1)) 1738 // Not a boolean equality - bail out. 1739 continue; 1740 ConstantInt *CI = dyn_cast<ConstantInt>(RHS); 1741 if (!CI) 1742 // RHS neither 'true' nor 'false' - bail out. 1743 continue; 1744 // Whether RHS equals 'true'. Otherwise it equals 'false'. 1745 bool isKnownTrue = CI->isMinusOne(); 1746 bool isKnownFalse = !isKnownTrue; 1747 1748 // If "A && B" is known true then both A and B are known true. If "A || B" 1749 // is known false then both A and B are known false. 1750 Value *A, *B; 1751 if ((isKnownTrue && match(LHS, m_And(m_Value(A), m_Value(B)))) || 1752 (isKnownFalse && match(LHS, m_Or(m_Value(A), m_Value(B))))) { 1753 Worklist.push_back(std::make_pair(A, RHS)); 1754 Worklist.push_back(std::make_pair(B, RHS)); 1755 continue; 1756 } 1757 1758 // If we are propagating an equality like "(A == B)" == "true" then also 1759 // propagate the equality A == B. When propagating a comparison such as 1760 // "(A >= B)" == "true", replace all instances of "A < B" with "false". 1761 if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) { 1762 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1); 1763 1764 // If "A == B" is known true, or "A != B" is known false, then replace 1765 // A with B everywhere in the scope. 1766 if ((isKnownTrue && Cmp->getPredicate() == CmpInst::ICMP_EQ) || 1767 (isKnownFalse && Cmp->getPredicate() == CmpInst::ICMP_NE)) 1768 Worklist.push_back(std::make_pair(Op0, Op1)); 1769 1770 // Handle the floating point versions of equality comparisons too. 1771 if ((isKnownTrue && Cmp->getPredicate() == CmpInst::FCMP_OEQ) || 1772 (isKnownFalse && Cmp->getPredicate() == CmpInst::FCMP_UNE)) { 1773 1774 // Floating point -0.0 and 0.0 compare equal, so we can only 1775 // propagate values if we know that we have a constant and that 1776 // its value is non-zero. 1777 1778 // FIXME: We should do this optimization if 'no signed zeros' is 1779 // applicable via an instruction-level fast-math-flag or some other 1780 // indicator that relaxed FP semantics are being used. 1781 1782 if (isa<ConstantFP>(Op1) && !cast<ConstantFP>(Op1)->isZero()) 1783 Worklist.push_back(std::make_pair(Op0, Op1)); 1784 } 1785 1786 // If "A >= B" is known true, replace "A < B" with false everywhere. 1787 CmpInst::Predicate NotPred = Cmp->getInversePredicate(); 1788 Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse); 1789 // Since we don't have the instruction "A < B" immediately to hand, work 1790 // out the value number that it would have and use that to find an 1791 // appropriate instruction (if any). 1792 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1793 uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1); 1794 // If the number we were assigned was brand new then there is no point in 1795 // looking for an instruction realizing it: there cannot be one! 1796 if (Num < NextNum) { 1797 Value *NotCmp = findLeader(Root.getEnd(), Num); 1798 if (NotCmp && isa<Instruction>(NotCmp)) { 1799 unsigned NumReplacements = 1800 DominatesByEdge 1801 ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root) 1802 : replaceDominatedUsesWith(NotCmp, NotVal, *DT, 1803 Root.getStart()); 1804 Changed |= NumReplacements > 0; 1805 NumGVNEqProp += NumReplacements; 1806 // Cached information for anything that uses NotCmp will be invalid. 1807 if (MD) 1808 MD->invalidateCachedPointerInfo(NotCmp); 1809 } 1810 } 1811 // Ensure that any instruction in scope that gets the "A < B" value number 1812 // is replaced with false. 1813 // The leader table only tracks basic blocks, not edges. Only add to if we 1814 // have the simple case where the edge dominates the end. 1815 if (RootDominatesEnd) 1816 addToLeaderTable(Num, NotVal, Root.getEnd()); 1817 1818 continue; 1819 } 1820 } 1821 1822 return Changed; 1823 } 1824 1825 /// When calculating availability, handle an instruction 1826 /// by inserting it into the appropriate sets 1827 bool GVN::processInstruction(Instruction *I) { 1828 // Ignore dbg info intrinsics. 1829 if (isa<DbgInfoIntrinsic>(I)) 1830 return false; 1831 1832 // If the instruction can be easily simplified then do so now in preference 1833 // to value numbering it. Value numbering often exposes redundancies, for 1834 // example if it determines that %y is equal to %x then the instruction 1835 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify. 1836 const DataLayout &DL = I->getModule()->getDataLayout(); 1837 if (Value *V = SimplifyInstruction(I, {DL, TLI, DT, AC})) { 1838 bool Changed = false; 1839 if (!I->use_empty()) { 1840 I->replaceAllUsesWith(V); 1841 Changed = true; 1842 } 1843 if (isInstructionTriviallyDead(I, TLI)) { 1844 markInstructionForDeletion(I); 1845 Changed = true; 1846 } 1847 if (Changed) { 1848 if (MD && V->getType()->isPtrOrPtrVectorTy()) 1849 MD->invalidateCachedPointerInfo(V); 1850 ++NumGVNSimpl; 1851 return true; 1852 } 1853 } 1854 1855 if (IntrinsicInst *IntrinsicI = dyn_cast<IntrinsicInst>(I)) 1856 if (IntrinsicI->getIntrinsicID() == Intrinsic::assume) 1857 return processAssumeIntrinsic(IntrinsicI); 1858 1859 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1860 if (processLoad(LI)) 1861 return true; 1862 1863 unsigned Num = VN.lookupOrAdd(LI); 1864 addToLeaderTable(Num, LI, LI->getParent()); 1865 return false; 1866 } 1867 1868 // For conditional branches, we can perform simple conditional propagation on 1869 // the condition value itself. 1870 if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 1871 if (!BI->isConditional()) 1872 return false; 1873 1874 if (isa<Constant>(BI->getCondition())) 1875 return processFoldableCondBr(BI); 1876 1877 Value *BranchCond = BI->getCondition(); 1878 BasicBlock *TrueSucc = BI->getSuccessor(0); 1879 BasicBlock *FalseSucc = BI->getSuccessor(1); 1880 // Avoid multiple edges early. 1881 if (TrueSucc == FalseSucc) 1882 return false; 1883 1884 BasicBlock *Parent = BI->getParent(); 1885 bool Changed = false; 1886 1887 Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext()); 1888 BasicBlockEdge TrueE(Parent, TrueSucc); 1889 Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true); 1890 1891 Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext()); 1892 BasicBlockEdge FalseE(Parent, FalseSucc); 1893 Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true); 1894 1895 return Changed; 1896 } 1897 1898 // For switches, propagate the case values into the case destinations. 1899 if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) { 1900 Value *SwitchCond = SI->getCondition(); 1901 BasicBlock *Parent = SI->getParent(); 1902 bool Changed = false; 1903 1904 // Remember how many outgoing edges there are to every successor. 1905 SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges; 1906 for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i) 1907 ++SwitchEdges[SI->getSuccessor(i)]; 1908 1909 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); 1910 i != e; ++i) { 1911 BasicBlock *Dst = i->getCaseSuccessor(); 1912 // If there is only a single edge, propagate the case value into it. 1913 if (SwitchEdges.lookup(Dst) == 1) { 1914 BasicBlockEdge E(Parent, Dst); 1915 Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true); 1916 } 1917 } 1918 return Changed; 1919 } 1920 1921 // Instructions with void type don't return a value, so there's 1922 // no point in trying to find redundancies in them. 1923 if (I->getType()->isVoidTy()) 1924 return false; 1925 1926 uint32_t NextNum = VN.getNextUnusedValueNumber(); 1927 unsigned Num = VN.lookupOrAdd(I); 1928 1929 // Allocations are always uniquely numbered, so we can save time and memory 1930 // by fast failing them. 1931 if (isa<AllocaInst>(I) || I->isTerminator() || isa<PHINode>(I)) { 1932 addToLeaderTable(Num, I, I->getParent()); 1933 return false; 1934 } 1935 1936 // If the number we were assigned was a brand new VN, then we don't 1937 // need to do a lookup to see if the number already exists 1938 // somewhere in the domtree: it can't! 1939 if (Num >= NextNum) { 1940 addToLeaderTable(Num, I, I->getParent()); 1941 return false; 1942 } 1943 1944 // Perform fast-path value-number based elimination of values inherited from 1945 // dominators. 1946 Value *Repl = findLeader(I->getParent(), Num); 1947 if (!Repl) { 1948 // Failure, just remember this instance for future use. 1949 addToLeaderTable(Num, I, I->getParent()); 1950 return false; 1951 } else if (Repl == I) { 1952 // If I was the result of a shortcut PRE, it might already be in the table 1953 // and the best replacement for itself. Nothing to do. 1954 return false; 1955 } 1956 1957 // Remove it! 1958 patchAndReplaceAllUsesWith(I, Repl); 1959 if (MD && Repl->getType()->isPtrOrPtrVectorTy()) 1960 MD->invalidateCachedPointerInfo(Repl); 1961 markInstructionForDeletion(I); 1962 return true; 1963 } 1964 1965 /// runOnFunction - This is the main transformation entry point for a function. 1966 bool GVN::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT, 1967 const TargetLibraryInfo &RunTLI, AAResults &RunAA, 1968 MemoryDependenceResults *RunMD, LoopInfo *LI, 1969 OptimizationRemarkEmitter *RunORE) { 1970 AC = &RunAC; 1971 DT = &RunDT; 1972 VN.setDomTree(DT); 1973 TLI = &RunTLI; 1974 VN.setAliasAnalysis(&RunAA); 1975 MD = RunMD; 1976 ImplicitControlFlowTracking ImplicitCFT(DT); 1977 ICF = &ImplicitCFT; 1978 VN.setMemDep(MD); 1979 ORE = RunORE; 1980 InvalidBlockRPONumbers = true; 1981 1982 bool Changed = false; 1983 bool ShouldContinue = true; 1984 1985 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); 1986 // Merge unconditional branches, allowing PRE to catch more 1987 // optimization opportunities. 1988 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) { 1989 BasicBlock *BB = &*FI++; 1990 1991 bool removedBlock = MergeBlockIntoPredecessor(BB, &DTU, LI, nullptr, MD); 1992 if (removedBlock) 1993 ++NumGVNBlocks; 1994 1995 Changed |= removedBlock; 1996 } 1997 1998 unsigned Iteration = 0; 1999 while (ShouldContinue) { 2000 LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n"); 2001 ShouldContinue = iterateOnFunction(F); 2002 Changed |= ShouldContinue; 2003 ++Iteration; 2004 } 2005 2006 if (EnablePRE) { 2007 // Fabricate val-num for dead-code in order to suppress assertion in 2008 // performPRE(). 2009 assignValNumForDeadCode(); 2010 bool PREChanged = true; 2011 while (PREChanged) { 2012 PREChanged = performPRE(F); 2013 Changed |= PREChanged; 2014 } 2015 } 2016 2017 // FIXME: Should perform GVN again after PRE does something. PRE can move 2018 // computations into blocks where they become fully redundant. Note that 2019 // we can't do this until PRE's critical edge splitting updates memdep. 2020 // Actually, when this happens, we should just fully integrate PRE into GVN. 2021 2022 cleanupGlobalSets(); 2023 // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each 2024 // iteration. 2025 DeadBlocks.clear(); 2026 2027 return Changed; 2028 } 2029 2030 bool GVN::processBlock(BasicBlock *BB) { 2031 // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function 2032 // (and incrementing BI before processing an instruction). 2033 assert(InstrsToErase.empty() && 2034 "We expect InstrsToErase to be empty across iterations"); 2035 if (DeadBlocks.count(BB)) 2036 return false; 2037 2038 // Clearing map before every BB because it can be used only for single BB. 2039 ReplaceWithConstMap.clear(); 2040 bool ChangedFunction = false; 2041 2042 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); 2043 BI != BE;) { 2044 if (!ReplaceWithConstMap.empty()) 2045 ChangedFunction |= replaceOperandsWithConsts(&*BI); 2046 ChangedFunction |= processInstruction(&*BI); 2047 2048 if (InstrsToErase.empty()) { 2049 ++BI; 2050 continue; 2051 } 2052 2053 // If we need some instructions deleted, do it now. 2054 NumGVNInstr += InstrsToErase.size(); 2055 2056 // Avoid iterator invalidation. 2057 bool AtStart = BI == BB->begin(); 2058 if (!AtStart) 2059 --BI; 2060 2061 for (auto *I : InstrsToErase) { 2062 assert(I->getParent() == BB && "Removing instruction from wrong block?"); 2063 LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n'); 2064 salvageDebugInfo(*I); 2065 if (MD) MD->removeInstruction(I); 2066 LLVM_DEBUG(verifyRemoved(I)); 2067 ICF->removeInstruction(I); 2068 I->eraseFromParent(); 2069 } 2070 InstrsToErase.clear(); 2071 2072 if (AtStart) 2073 BI = BB->begin(); 2074 else 2075 ++BI; 2076 } 2077 2078 return ChangedFunction; 2079 } 2080 2081 // Instantiate an expression in a predecessor that lacked it. 2082 bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred, 2083 BasicBlock *Curr, unsigned int ValNo) { 2084 // Because we are going top-down through the block, all value numbers 2085 // will be available in the predecessor by the time we need them. Any 2086 // that weren't originally present will have been instantiated earlier 2087 // in this loop. 2088 bool success = true; 2089 for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) { 2090 Value *Op = Instr->getOperand(i); 2091 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op)) 2092 continue; 2093 // This could be a newly inserted instruction, in which case, we won't 2094 // find a value number, and should give up before we hurt ourselves. 2095 // FIXME: Rewrite the infrastructure to let it easier to value number 2096 // and process newly inserted instructions. 2097 if (!VN.exists(Op)) { 2098 success = false; 2099 break; 2100 } 2101 uint32_t TValNo = 2102 VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this); 2103 if (Value *V = findLeader(Pred, TValNo)) { 2104 Instr->setOperand(i, V); 2105 } else { 2106 success = false; 2107 break; 2108 } 2109 } 2110 2111 // Fail out if we encounter an operand that is not available in 2112 // the PRE predecessor. This is typically because of loads which 2113 // are not value numbered precisely. 2114 if (!success) 2115 return false; 2116 2117 Instr->insertBefore(Pred->getTerminator()); 2118 Instr->setName(Instr->getName() + ".pre"); 2119 Instr->setDebugLoc(Instr->getDebugLoc()); 2120 2121 unsigned Num = VN.lookupOrAdd(Instr); 2122 VN.add(Instr, Num); 2123 2124 // Update the availability map to include the new instruction. 2125 addToLeaderTable(Num, Instr, Pred); 2126 return true; 2127 } 2128 2129 bool GVN::performScalarPRE(Instruction *CurInst) { 2130 if (isa<AllocaInst>(CurInst) || CurInst->isTerminator() || 2131 isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() || 2132 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() || 2133 isa<DbgInfoIntrinsic>(CurInst)) 2134 return false; 2135 2136 // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from 2137 // sinking the compare again, and it would force the code generator to 2138 // move the i1 from processor flags or predicate registers into a general 2139 // purpose register. 2140 if (isa<CmpInst>(CurInst)) 2141 return false; 2142 2143 // Don't do PRE on GEPs. The inserted PHI would prevent CodeGenPrepare from 2144 // sinking the addressing mode computation back to its uses. Extending the 2145 // GEP's live range increases the register pressure, and therefore it can 2146 // introduce unnecessary spills. 2147 // 2148 // This doesn't prevent Load PRE. PHI translation will make the GEP available 2149 // to the load by moving it to the predecessor block if necessary. 2150 if (isa<GetElementPtrInst>(CurInst)) 2151 return false; 2152 2153 // We don't currently value number ANY inline asm calls. 2154 if (auto *CallB = dyn_cast<CallBase>(CurInst)) 2155 if (CallB->isInlineAsm()) 2156 return false; 2157 2158 uint32_t ValNo = VN.lookup(CurInst); 2159 2160 // Look for the predecessors for PRE opportunities. We're 2161 // only trying to solve the basic diamond case, where 2162 // a value is computed in the successor and one predecessor, 2163 // but not the other. We also explicitly disallow cases 2164 // where the successor is its own predecessor, because they're 2165 // more complicated to get right. 2166 unsigned NumWith = 0; 2167 unsigned NumWithout = 0; 2168 BasicBlock *PREPred = nullptr; 2169 BasicBlock *CurrentBlock = CurInst->getParent(); 2170 2171 // Update the RPO numbers for this function. 2172 if (InvalidBlockRPONumbers) 2173 assignBlockRPONumber(*CurrentBlock->getParent()); 2174 2175 SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap; 2176 for (BasicBlock *P : predecessors(CurrentBlock)) { 2177 // We're not interested in PRE where blocks with predecessors that are 2178 // not reachable. 2179 if (!DT->isReachableFromEntry(P)) { 2180 NumWithout = 2; 2181 break; 2182 } 2183 // It is not safe to do PRE when P->CurrentBlock is a loop backedge, and 2184 // when CurInst has operand defined in CurrentBlock (so it may be defined 2185 // by phi in the loop header). 2186 assert(BlockRPONumber.count(P) && BlockRPONumber.count(CurrentBlock) && 2187 "Invalid BlockRPONumber map."); 2188 if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock] && 2189 llvm::any_of(CurInst->operands(), [&](const Use &U) { 2190 if (auto *Inst = dyn_cast<Instruction>(U.get())) 2191 return Inst->getParent() == CurrentBlock; 2192 return false; 2193 })) { 2194 NumWithout = 2; 2195 break; 2196 } 2197 2198 uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this); 2199 Value *predV = findLeader(P, TValNo); 2200 if (!predV) { 2201 predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P)); 2202 PREPred = P; 2203 ++NumWithout; 2204 } else if (predV == CurInst) { 2205 /* CurInst dominates this predecessor. */ 2206 NumWithout = 2; 2207 break; 2208 } else { 2209 predMap.push_back(std::make_pair(predV, P)); 2210 ++NumWith; 2211 } 2212 } 2213 2214 // Don't do PRE when it might increase code size, i.e. when 2215 // we would need to insert instructions in more than one pred. 2216 if (NumWithout > 1 || NumWith == 0) 2217 return false; 2218 2219 // We may have a case where all predecessors have the instruction, 2220 // and we just need to insert a phi node. Otherwise, perform 2221 // insertion. 2222 Instruction *PREInstr = nullptr; 2223 2224 if (NumWithout != 0) { 2225 if (!isSafeToSpeculativelyExecute(CurInst)) { 2226 // It is only valid to insert a new instruction if the current instruction 2227 // is always executed. An instruction with implicit control flow could 2228 // prevent us from doing it. If we cannot speculate the execution, then 2229 // PRE should be prohibited. 2230 if (ICF->isDominatedByICFIFromSameBlock(CurInst)) 2231 return false; 2232 } 2233 2234 // Don't do PRE across indirect branch. 2235 if (isa<IndirectBrInst>(PREPred->getTerminator())) 2236 return false; 2237 2238 // Don't do PRE across callbr. 2239 // FIXME: Can we do this across the fallthrough edge? 2240 if (isa<CallBrInst>(PREPred->getTerminator())) 2241 return false; 2242 2243 // We can't do PRE safely on a critical edge, so instead we schedule 2244 // the edge to be split and perform the PRE the next time we iterate 2245 // on the function. 2246 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock); 2247 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) { 2248 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum)); 2249 return false; 2250 } 2251 // We need to insert somewhere, so let's give it a shot 2252 PREInstr = CurInst->clone(); 2253 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) { 2254 // If we failed insertion, make sure we remove the instruction. 2255 LLVM_DEBUG(verifyRemoved(PREInstr)); 2256 PREInstr->deleteValue(); 2257 return false; 2258 } 2259 } 2260 2261 // Either we should have filled in the PRE instruction, or we should 2262 // not have needed insertions. 2263 assert(PREInstr != nullptr || NumWithout == 0); 2264 2265 ++NumGVNPRE; 2266 2267 // Create a PHI to make the value available in this block. 2268 PHINode *Phi = 2269 PHINode::Create(CurInst->getType(), predMap.size(), 2270 CurInst->getName() + ".pre-phi", &CurrentBlock->front()); 2271 for (unsigned i = 0, e = predMap.size(); i != e; ++i) { 2272 if (Value *V = predMap[i].first) { 2273 // If we use an existing value in this phi, we have to patch the original 2274 // value because the phi will be used to replace a later value. 2275 patchReplacementInstruction(CurInst, V); 2276 Phi->addIncoming(V, predMap[i].second); 2277 } else 2278 Phi->addIncoming(PREInstr, PREPred); 2279 } 2280 2281 VN.add(Phi, ValNo); 2282 // After creating a new PHI for ValNo, the phi translate result for ValNo will 2283 // be changed, so erase the related stale entries in phi translate cache. 2284 VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock); 2285 addToLeaderTable(ValNo, Phi, CurrentBlock); 2286 Phi->setDebugLoc(CurInst->getDebugLoc()); 2287 CurInst->replaceAllUsesWith(Phi); 2288 if (MD && Phi->getType()->isPtrOrPtrVectorTy()) 2289 MD->invalidateCachedPointerInfo(Phi); 2290 VN.erase(CurInst); 2291 removeFromLeaderTable(ValNo, CurInst, CurrentBlock); 2292 2293 LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n'); 2294 if (MD) 2295 MD->removeInstruction(CurInst); 2296 LLVM_DEBUG(verifyRemoved(CurInst)); 2297 // FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes 2298 // some assertion failures. 2299 ICF->removeInstruction(CurInst); 2300 CurInst->eraseFromParent(); 2301 ++NumGVNInstr; 2302 2303 return true; 2304 } 2305 2306 /// Perform a purely local form of PRE that looks for diamond 2307 /// control flow patterns and attempts to perform simple PRE at the join point. 2308 bool GVN::performPRE(Function &F) { 2309 bool Changed = false; 2310 for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) { 2311 // Nothing to PRE in the entry block. 2312 if (CurrentBlock == &F.getEntryBlock()) 2313 continue; 2314 2315 // Don't perform PRE on an EH pad. 2316 if (CurrentBlock->isEHPad()) 2317 continue; 2318 2319 for (BasicBlock::iterator BI = CurrentBlock->begin(), 2320 BE = CurrentBlock->end(); 2321 BI != BE;) { 2322 Instruction *CurInst = &*BI++; 2323 Changed |= performScalarPRE(CurInst); 2324 } 2325 } 2326 2327 if (splitCriticalEdges()) 2328 Changed = true; 2329 2330 return Changed; 2331 } 2332 2333 /// Split the critical edge connecting the given two blocks, and return 2334 /// the block inserted to the critical edge. 2335 BasicBlock *GVN::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) { 2336 BasicBlock *BB = 2337 SplitCriticalEdge(Pred, Succ, CriticalEdgeSplittingOptions(DT)); 2338 if (MD) 2339 MD->invalidateCachedPredecessors(); 2340 InvalidBlockRPONumbers = true; 2341 return BB; 2342 } 2343 2344 /// Split critical edges found during the previous 2345 /// iteration that may enable further optimization. 2346 bool GVN::splitCriticalEdges() { 2347 if (toSplit.empty()) 2348 return false; 2349 do { 2350 std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val(); 2351 SplitCriticalEdge(Edge.first, Edge.second, 2352 CriticalEdgeSplittingOptions(DT)); 2353 } while (!toSplit.empty()); 2354 if (MD) MD->invalidateCachedPredecessors(); 2355 InvalidBlockRPONumbers = true; 2356 return true; 2357 } 2358 2359 /// Executes one iteration of GVN 2360 bool GVN::iterateOnFunction(Function &F) { 2361 cleanupGlobalSets(); 2362 2363 // Top-down walk of the dominator tree 2364 bool Changed = false; 2365 // Needed for value numbering with phi construction to work. 2366 // RPOT walks the graph in its constructor and will not be invalidated during 2367 // processBlock. 2368 ReversePostOrderTraversal<Function *> RPOT(&F); 2369 2370 for (BasicBlock *BB : RPOT) 2371 Changed |= processBlock(BB); 2372 2373 return Changed; 2374 } 2375 2376 void GVN::cleanupGlobalSets() { 2377 VN.clear(); 2378 LeaderTable.clear(); 2379 BlockRPONumber.clear(); 2380 TableAllocator.Reset(); 2381 ICF->clear(); 2382 InvalidBlockRPONumbers = true; 2383 } 2384 2385 /// Verify that the specified instruction does not occur in our 2386 /// internal data structures. 2387 void GVN::verifyRemoved(const Instruction *Inst) const { 2388 VN.verifyRemoved(Inst); 2389 2390 // Walk through the value number scope to make sure the instruction isn't 2391 // ferreted away in it. 2392 for (DenseMap<uint32_t, LeaderTableEntry>::const_iterator 2393 I = LeaderTable.begin(), E = LeaderTable.end(); I != E; ++I) { 2394 const LeaderTableEntry *Node = &I->second; 2395 assert(Node->Val != Inst && "Inst still in value numbering scope!"); 2396 2397 while (Node->Next) { 2398 Node = Node->Next; 2399 assert(Node->Val != Inst && "Inst still in value numbering scope!"); 2400 } 2401 } 2402 } 2403 2404 /// BB is declared dead, which implied other blocks become dead as well. This 2405 /// function is to add all these blocks to "DeadBlocks". For the dead blocks' 2406 /// live successors, update their phi nodes by replacing the operands 2407 /// corresponding to dead blocks with UndefVal. 2408 void GVN::addDeadBlock(BasicBlock *BB) { 2409 SmallVector<BasicBlock *, 4> NewDead; 2410 SmallSetVector<BasicBlock *, 4> DF; 2411 2412 NewDead.push_back(BB); 2413 while (!NewDead.empty()) { 2414 BasicBlock *D = NewDead.pop_back_val(); 2415 if (DeadBlocks.count(D)) 2416 continue; 2417 2418 // All blocks dominated by D are dead. 2419 SmallVector<BasicBlock *, 8> Dom; 2420 DT->getDescendants(D, Dom); 2421 DeadBlocks.insert(Dom.begin(), Dom.end()); 2422 2423 // Figure out the dominance-frontier(D). 2424 for (BasicBlock *B : Dom) { 2425 for (BasicBlock *S : successors(B)) { 2426 if (DeadBlocks.count(S)) 2427 continue; 2428 2429 bool AllPredDead = true; 2430 for (BasicBlock *P : predecessors(S)) 2431 if (!DeadBlocks.count(P)) { 2432 AllPredDead = false; 2433 break; 2434 } 2435 2436 if (!AllPredDead) { 2437 // S could be proved dead later on. That is why we don't update phi 2438 // operands at this moment. 2439 DF.insert(S); 2440 } else { 2441 // While S is not dominated by D, it is dead by now. This could take 2442 // place if S already have a dead predecessor before D is declared 2443 // dead. 2444 NewDead.push_back(S); 2445 } 2446 } 2447 } 2448 } 2449 2450 // For the dead blocks' live successors, update their phi nodes by replacing 2451 // the operands corresponding to dead blocks with UndefVal. 2452 for(SmallSetVector<BasicBlock *, 4>::iterator I = DF.begin(), E = DF.end(); 2453 I != E; I++) { 2454 BasicBlock *B = *I; 2455 if (DeadBlocks.count(B)) 2456 continue; 2457 2458 SmallVector<BasicBlock *, 4> Preds(pred_begin(B), pred_end(B)); 2459 for (BasicBlock *P : Preds) { 2460 if (!DeadBlocks.count(P)) 2461 continue; 2462 2463 if (isCriticalEdge(P->getTerminator(), GetSuccessorNumber(P, B))) { 2464 if (BasicBlock *S = splitCriticalEdges(P, B)) 2465 DeadBlocks.insert(P = S); 2466 } 2467 2468 for (BasicBlock::iterator II = B->begin(); isa<PHINode>(II); ++II) { 2469 PHINode &Phi = cast<PHINode>(*II); 2470 Phi.setIncomingValue(Phi.getBasicBlockIndex(P), 2471 UndefValue::get(Phi.getType())); 2472 if (MD) 2473 MD->invalidateCachedPointerInfo(&Phi); 2474 } 2475 } 2476 } 2477 } 2478 2479 // If the given branch is recognized as a foldable branch (i.e. conditional 2480 // branch with constant condition), it will perform following analyses and 2481 // transformation. 2482 // 1) If the dead out-coming edge is a critical-edge, split it. Let 2483 // R be the target of the dead out-coming edge. 2484 // 1) Identify the set of dead blocks implied by the branch's dead outcoming 2485 // edge. The result of this step will be {X| X is dominated by R} 2486 // 2) Identify those blocks which haves at least one dead predecessor. The 2487 // result of this step will be dominance-frontier(R). 2488 // 3) Update the PHIs in DF(R) by replacing the operands corresponding to 2489 // dead blocks with "UndefVal" in an hope these PHIs will optimized away. 2490 // 2491 // Return true iff *NEW* dead code are found. 2492 bool GVN::processFoldableCondBr(BranchInst *BI) { 2493 if (!BI || BI->isUnconditional()) 2494 return false; 2495 2496 // If a branch has two identical successors, we cannot declare either dead. 2497 if (BI->getSuccessor(0) == BI->getSuccessor(1)) 2498 return false; 2499 2500 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition()); 2501 if (!Cond) 2502 return false; 2503 2504 BasicBlock *DeadRoot = 2505 Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0); 2506 if (DeadBlocks.count(DeadRoot)) 2507 return false; 2508 2509 if (!DeadRoot->getSinglePredecessor()) 2510 DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot); 2511 2512 addDeadBlock(DeadRoot); 2513 return true; 2514 } 2515 2516 // performPRE() will trigger assert if it comes across an instruction without 2517 // associated val-num. As it normally has far more live instructions than dead 2518 // instructions, it makes more sense just to "fabricate" a val-number for the 2519 // dead code than checking if instruction involved is dead or not. 2520 void GVN::assignValNumForDeadCode() { 2521 for (BasicBlock *BB : DeadBlocks) { 2522 for (Instruction &Inst : *BB) { 2523 unsigned ValNum = VN.lookupOrAdd(&Inst); 2524 addToLeaderTable(ValNum, &Inst, BB); 2525 } 2526 } 2527 } 2528 2529 class llvm::gvn::GVNLegacyPass : public FunctionPass { 2530 public: 2531 static char ID; // Pass identification, replacement for typeid 2532 2533 explicit GVNLegacyPass(bool NoMemDepAnalysis = !EnableMemDep) 2534 : FunctionPass(ID), NoMemDepAnalysis(NoMemDepAnalysis) { 2535 initializeGVNLegacyPassPass(*PassRegistry::getPassRegistry()); 2536 } 2537 2538 bool runOnFunction(Function &F) override { 2539 if (skipFunction(F)) 2540 return false; 2541 2542 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>(); 2543 2544 return Impl.runImpl( 2545 F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F), 2546 getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 2547 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), 2548 getAnalysis<AAResultsWrapperPass>().getAAResults(), 2549 NoMemDepAnalysis ? nullptr 2550 : &getAnalysis<MemoryDependenceWrapperPass>().getMemDep(), 2551 LIWP ? &LIWP->getLoopInfo() : nullptr, 2552 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE()); 2553 } 2554 2555 void getAnalysisUsage(AnalysisUsage &AU) const override { 2556 AU.addRequired<AssumptionCacheTracker>(); 2557 AU.addRequired<DominatorTreeWrapperPass>(); 2558 AU.addRequired<TargetLibraryInfoWrapperPass>(); 2559 if (!NoMemDepAnalysis) 2560 AU.addRequired<MemoryDependenceWrapperPass>(); 2561 AU.addRequired<AAResultsWrapperPass>(); 2562 2563 AU.addPreserved<DominatorTreeWrapperPass>(); 2564 AU.addPreserved<GlobalsAAWrapperPass>(); 2565 AU.addPreserved<TargetLibraryInfoWrapperPass>(); 2566 AU.addRequired<OptimizationRemarkEmitterWrapperPass>(); 2567 } 2568 2569 private: 2570 bool NoMemDepAnalysis; 2571 GVN Impl; 2572 }; 2573 2574 char GVNLegacyPass::ID = 0; 2575 2576 INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false) 2577 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 2578 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 2579 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 2580 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 2581 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 2582 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 2583 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass) 2584 INITIALIZE_PASS_END(GVNLegacyPass, "gvn", "Global Value Numbering", false, false) 2585 2586 // The public interface to this file... 2587 FunctionPass *llvm::createGVNPass(bool NoMemDepAnalysis) { 2588 return new GVNLegacyPass(NoMemDepAnalysis); 2589 } 2590