1 //===- SCCP.cpp - Sparse Conditional Constant Propagation -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements sparse conditional constant propagation and merging: 10 // 11 // Specifically, this: 12 // * Assumes values are constant unless proven otherwise 13 // * Assumes BasicBlocks are dead unless proven otherwise 14 // * Proves values to be constant, and replaces them with constants 15 // * Proves conditional branches to be unconditional 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Scalar/SCCP.h" 20 #include "llvm/ADT/ArrayRef.h" 21 #include "llvm/ADT/DenseMap.h" 22 #include "llvm/ADT/DenseSet.h" 23 #include "llvm/ADT/MapVector.h" 24 #include "llvm/ADT/PointerIntPair.h" 25 #include "llvm/ADT/STLExtras.h" 26 #include "llvm/ADT/SmallPtrSet.h" 27 #include "llvm/ADT/SmallVector.h" 28 #include "llvm/ADT/Statistic.h" 29 #include "llvm/Analysis/ConstantFolding.h" 30 #include "llvm/Analysis/GlobalsModRef.h" 31 #include "llvm/Analysis/TargetLibraryInfo.h" 32 #include "llvm/Analysis/ValueLattice.h" 33 #include "llvm/Analysis/ValueLatticeUtils.h" 34 #include "llvm/IR/BasicBlock.h" 35 #include "llvm/IR/CallSite.h" 36 #include "llvm/IR/Constant.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DataLayout.h" 39 #include "llvm/IR/DerivedTypes.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/GlobalVariable.h" 42 #include "llvm/IR/InstVisitor.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/Module.h" 47 #include "llvm/IR/PassManager.h" 48 #include "llvm/IR/Type.h" 49 #include "llvm/IR/User.h" 50 #include "llvm/IR/Value.h" 51 #include "llvm/InitializePasses.h" 52 #include "llvm/Pass.h" 53 #include "llvm/Support/Casting.h" 54 #include "llvm/Support/Debug.h" 55 #include "llvm/Support/ErrorHandling.h" 56 #include "llvm/Support/raw_ostream.h" 57 #include "llvm/Transforms/Scalar.h" 58 #include "llvm/Transforms/Utils/Local.h" 59 #include "llvm/Transforms/Utils/PredicateInfo.h" 60 #include <cassert> 61 #include <utility> 62 #include <vector> 63 64 using namespace llvm; 65 66 #define DEBUG_TYPE "sccp" 67 68 STATISTIC(NumInstRemoved, "Number of instructions removed"); 69 STATISTIC(NumDeadBlocks , "Number of basic blocks unreachable"); 70 71 STATISTIC(IPNumInstRemoved, "Number of instructions removed by IPSCCP"); 72 STATISTIC(IPNumArgsElimed ,"Number of arguments constant propagated by IPSCCP"); 73 STATISTIC(IPNumGlobalConst, "Number of globals found to be constant by IPSCCP"); 74 75 namespace { 76 77 // Use ValueLatticeElement as LatticeVal. 78 using LatticeVal = ValueLatticeElement; 79 80 // Helper to check if \p LV is either a constant or a constant 81 // range with a single element. This should cover exactly the same cases as the 82 // old LatticeVal::isConstant() and is intended to be used in the transition 83 // from LatticeVal to LatticeValueElement. 84 bool isConstant(const LatticeVal &LV) { 85 return LV.isConstant() || 86 (LV.isConstantRange() && LV.getConstantRange().isSingleElement()); 87 } 88 89 // Helper to check if \p LV is either overdefined or a constant range with more 90 // than a single element. This should cover exactly the same cases as the old 91 // LatticeVal::isOverdefined() and is intended to be used in the transition from 92 // LatticeVal to LatticeValueElement. 93 bool isOverdefined(const LatticeVal &LV) { 94 return LV.isOverdefined() || 95 (LV.isConstantRange() && !LV.getConstantRange().isSingleElement()); 96 } 97 98 //===----------------------------------------------------------------------===// 99 // 100 /// SCCPSolver - This class is a general purpose solver for Sparse Conditional 101 /// Constant Propagation. 102 /// 103 class SCCPSolver : public InstVisitor<SCCPSolver> { 104 const DataLayout &DL; 105 std::function<const TargetLibraryInfo &(Function &)> GetTLI; 106 SmallPtrSet<BasicBlock *, 8> BBExecutable; // The BBs that are executable. 107 DenseMap<Value *, LatticeVal> ValueState; // The state each value is in. 108 109 /// StructValueState - This maintains ValueState for values that have 110 /// StructType, for example for formal arguments, calls, insertelement, etc. 111 DenseMap<std::pair<Value *, unsigned>, LatticeVal> StructValueState; 112 113 /// GlobalValue - If we are tracking any values for the contents of a global 114 /// variable, we keep a mapping from the constant accessor to the element of 115 /// the global, to the currently known value. If the value becomes 116 /// overdefined, it's entry is simply removed from this map. 117 DenseMap<GlobalVariable *, LatticeVal> TrackedGlobals; 118 119 /// TrackedRetVals - If we are tracking arguments into and the return 120 /// value out of a function, it will have an entry in this map, indicating 121 /// what the known return value for the function is. 122 MapVector<Function *, LatticeVal> TrackedRetVals; 123 124 /// TrackedMultipleRetVals - Same as TrackedRetVals, but used for functions 125 /// that return multiple values. 126 MapVector<std::pair<Function *, unsigned>, LatticeVal> TrackedMultipleRetVals; 127 128 /// MRVFunctionsTracked - Each function in TrackedMultipleRetVals is 129 /// represented here for efficient lookup. 130 SmallPtrSet<Function *, 16> MRVFunctionsTracked; 131 132 /// MustTailFunctions - Each function here is a callee of non-removable 133 /// musttail call site. 134 SmallPtrSet<Function *, 16> MustTailCallees; 135 136 /// TrackingIncomingArguments - This is the set of functions for whose 137 /// arguments we make optimistic assumptions about and try to prove as 138 /// constants. 139 SmallPtrSet<Function *, 16> TrackingIncomingArguments; 140 141 /// The reason for two worklists is that overdefined is the lowest state 142 /// on the lattice, and moving things to overdefined as fast as possible 143 /// makes SCCP converge much faster. 144 /// 145 /// By having a separate worklist, we accomplish this because everything 146 /// possibly overdefined will become overdefined at the soonest possible 147 /// point. 148 SmallVector<Value *, 64> OverdefinedInstWorkList; 149 SmallVector<Value *, 64> InstWorkList; 150 151 // The BasicBlock work list 152 SmallVector<BasicBlock *, 64> BBWorkList; 153 154 /// KnownFeasibleEdges - Entries in this set are edges which have already had 155 /// PHI nodes retriggered. 156 using Edge = std::pair<BasicBlock *, BasicBlock *>; 157 DenseSet<Edge> KnownFeasibleEdges; 158 159 DenseMap<Function *, AnalysisResultsForFn> AnalysisResults; 160 DenseMap<Value *, SmallPtrSet<User *, 2>> AdditionalUsers; 161 162 LLVMContext &Ctx; 163 164 public: 165 void addAnalysis(Function &F, AnalysisResultsForFn A) { 166 AnalysisResults.insert({&F, std::move(A)}); 167 } 168 169 const PredicateBase *getPredicateInfoFor(Instruction *I) { 170 auto A = AnalysisResults.find(I->getParent()->getParent()); 171 if (A == AnalysisResults.end()) 172 return nullptr; 173 return A->second.PredInfo->getPredicateInfoFor(I); 174 } 175 176 DomTreeUpdater getDTU(Function &F) { 177 auto A = AnalysisResults.find(&F); 178 assert(A != AnalysisResults.end() && "Need analysis results for function."); 179 return {A->second.DT, A->second.PDT, DomTreeUpdater::UpdateStrategy::Lazy}; 180 } 181 182 SCCPSolver(const DataLayout &DL, 183 std::function<const TargetLibraryInfo &(Function &)> GetTLI, 184 LLVMContext &Ctx) 185 : DL(DL), GetTLI(std::move(GetTLI)), Ctx(Ctx) {} 186 187 /// MarkBlockExecutable - This method can be used by clients to mark all of 188 /// the blocks that are known to be intrinsically live in the processed unit. 189 /// 190 /// This returns true if the block was not considered live before. 191 bool MarkBlockExecutable(BasicBlock *BB) { 192 if (!BBExecutable.insert(BB).second) 193 return false; 194 LLVM_DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << '\n'); 195 BBWorkList.push_back(BB); // Add the block to the work list! 196 return true; 197 } 198 199 /// TrackValueOfGlobalVariable - Clients can use this method to 200 /// inform the SCCPSolver that it should track loads and stores to the 201 /// specified global variable if it can. This is only legal to call if 202 /// performing Interprocedural SCCP. 203 void TrackValueOfGlobalVariable(GlobalVariable *GV) { 204 // We only track the contents of scalar globals. 205 if (GV->getValueType()->isSingleValueType()) { 206 LatticeVal &IV = TrackedGlobals[GV]; 207 if (!isa<UndefValue>(GV->getInitializer())) 208 IV.markConstant(GV->getInitializer()); 209 } 210 } 211 212 /// AddTrackedFunction - If the SCCP solver is supposed to track calls into 213 /// and out of the specified function (which cannot have its address taken), 214 /// this method must be called. 215 void AddTrackedFunction(Function *F) { 216 // Add an entry, F -> undef. 217 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) { 218 MRVFunctionsTracked.insert(F); 219 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 220 TrackedMultipleRetVals.insert(std::make_pair(std::make_pair(F, i), 221 LatticeVal())); 222 } else 223 TrackedRetVals.insert(std::make_pair(F, LatticeVal())); 224 } 225 226 /// AddMustTailCallee - If the SCCP solver finds that this function is called 227 /// from non-removable musttail call site. 228 void AddMustTailCallee(Function *F) { 229 MustTailCallees.insert(F); 230 } 231 232 /// Returns true if the given function is called from non-removable musttail 233 /// call site. 234 bool isMustTailCallee(Function *F) { 235 return MustTailCallees.count(F); 236 } 237 238 void AddArgumentTrackedFunction(Function *F) { 239 TrackingIncomingArguments.insert(F); 240 } 241 242 /// Returns true if the given function is in the solver's set of 243 /// argument-tracked functions. 244 bool isArgumentTrackedFunction(Function *F) { 245 return TrackingIncomingArguments.count(F); 246 } 247 248 /// Solve - Solve for constants and executable blocks. 249 void Solve(); 250 251 /// ResolvedUndefsIn - While solving the dataflow for a function, we assume 252 /// that branches on undef values cannot reach any of their successors. 253 /// However, this is not a safe assumption. After we solve dataflow, this 254 /// method should be use to handle this. If this returns true, the solver 255 /// should be rerun. 256 bool ResolvedUndefsIn(Function &F); 257 258 bool isBlockExecutable(BasicBlock *BB) const { 259 return BBExecutable.count(BB); 260 } 261 262 // isEdgeFeasible - Return true if the control flow edge from the 'From' basic 263 // block to the 'To' basic block is currently feasible. 264 bool isEdgeFeasible(BasicBlock *From, BasicBlock *To); 265 266 std::vector<LatticeVal> getStructLatticeValueFor(Value *V) const { 267 std::vector<LatticeVal> StructValues; 268 auto *STy = dyn_cast<StructType>(V->getType()); 269 assert(STy && "getStructLatticeValueFor() can be called only on structs"); 270 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 271 auto I = StructValueState.find(std::make_pair(V, i)); 272 assert(I != StructValueState.end() && "Value not in valuemap!"); 273 StructValues.push_back(I->second); 274 } 275 return StructValues; 276 } 277 278 const LatticeVal &getLatticeValueFor(Value *V) const { 279 assert(!V->getType()->isStructTy() && 280 "Should use getStructLatticeValueFor"); 281 DenseMap<Value *, LatticeVal>::const_iterator I = ValueState.find(V); 282 assert(I != ValueState.end() && 283 "V not found in ValueState nor Paramstate map!"); 284 return I->second; 285 } 286 287 /// getTrackedRetVals - Get the inferred return value map. 288 const MapVector<Function*, LatticeVal> &getTrackedRetVals() { 289 return TrackedRetVals; 290 } 291 292 /// getTrackedGlobals - Get and return the set of inferred initializers for 293 /// global variables. 294 const DenseMap<GlobalVariable*, LatticeVal> &getTrackedGlobals() { 295 return TrackedGlobals; 296 } 297 298 /// getMRVFunctionsTracked - Get the set of functions which return multiple 299 /// values tracked by the pass. 300 const SmallPtrSet<Function *, 16> getMRVFunctionsTracked() { 301 return MRVFunctionsTracked; 302 } 303 304 /// getMustTailCallees - Get the set of functions which are called 305 /// from non-removable musttail call sites. 306 const SmallPtrSet<Function *, 16> getMustTailCallees() { 307 return MustTailCallees; 308 } 309 310 /// markOverdefined - Mark the specified value overdefined. This 311 /// works with both scalars and structs. 312 void markOverdefined(Value *V) { 313 if (auto *STy = dyn_cast<StructType>(V->getType())) 314 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 315 markOverdefined(getStructValueState(V, i), V); 316 else 317 markOverdefined(ValueState[V], V); 318 } 319 320 // isStructLatticeConstant - Return true if all the lattice values 321 // corresponding to elements of the structure are constants, 322 // false otherwise. 323 bool isStructLatticeConstant(Function *F, StructType *STy) { 324 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 325 const auto &It = TrackedMultipleRetVals.find(std::make_pair(F, i)); 326 assert(It != TrackedMultipleRetVals.end()); 327 LatticeVal LV = It->second; 328 if (!isConstant(LV)) 329 return false; 330 } 331 return true; 332 } 333 334 /// Helper to return a Constant if \p LV is either a constant or a constant 335 /// range with a single element. 336 Constant *getConstant(const LatticeVal &LV) const { 337 if (LV.isConstant()) 338 return LV.getConstant(); 339 340 if (LV.isConstantRange()) { 341 auto &CR = LV.getConstantRange(); 342 if (CR.getSingleElement()) 343 return ConstantInt::get(Ctx, *CR.getSingleElement()); 344 } 345 return nullptr; 346 } 347 348 private: 349 ConstantInt *getConstantInt(const LatticeVal &IV) const { 350 return dyn_cast_or_null<ConstantInt>(getConstant(IV)); 351 } 352 353 // pushToWorkList - Helper for markConstant/markOverdefined 354 void pushToWorkList(LatticeVal &IV, Value *V) { 355 if (isOverdefined(IV)) 356 return OverdefinedInstWorkList.push_back(V); 357 InstWorkList.push_back(V); 358 } 359 360 // Helper to push \p V to the worklist, after updating it to \p IV. Also 361 // prints a debug message with the updated value. 362 void pushToWorkListMsg(LatticeVal &IV, Value *V) { 363 LLVM_DEBUG(dbgs() << "updated " << IV << ": " << *V << '\n'); 364 if (isOverdefined(IV)) 365 return OverdefinedInstWorkList.push_back(V); 366 InstWorkList.push_back(V); 367 } 368 369 // markConstant - Make a value be marked as "constant". If the value 370 // is not already a constant, add it to the instruction work list so that 371 // the users of the instruction are updated later. 372 bool markConstant(LatticeVal &IV, Value *V, Constant *C) { 373 if (!IV.markConstant(C)) return false; 374 LLVM_DEBUG(dbgs() << "markConstant: " << *C << ": " << *V << '\n'); 375 pushToWorkList(IV, V); 376 return true; 377 } 378 379 bool markConstant(Value *V, Constant *C) { 380 assert(!V->getType()->isStructTy() && "structs should use mergeInValue"); 381 return markConstant(ValueState[V], V, C); 382 } 383 384 // markOverdefined - Make a value be marked as "overdefined". If the 385 // value is not already overdefined, add it to the overdefined instruction 386 // work list so that the users of the instruction are updated later. 387 bool markOverdefined(LatticeVal &IV, Value *V) { 388 if (!IV.markOverdefined()) return false; 389 390 LLVM_DEBUG(dbgs() << "markOverdefined: "; 391 if (auto *F = dyn_cast<Function>(V)) dbgs() 392 << "Function '" << F->getName() << "'\n"; 393 else dbgs() << *V << '\n'); 394 // Only instructions go on the work list 395 pushToWorkList(IV, V); 396 return true; 397 } 398 399 bool mergeInValue(LatticeVal &IV, Value *V, LatticeVal MergeWithV, 400 bool Widen = true) { 401 // Do a simple form of widening, to avoid extending a range repeatedly in a 402 // loop. If IV is a constant range, it means we already set it once. If 403 // MergeWithV would extend IV, mark V as overdefined. 404 if (Widen && IV.isConstantRange() && MergeWithV.isConstantRange() && 405 !IV.getConstantRange().contains(MergeWithV.getConstantRange())) { 406 markOverdefined(IV, V); 407 return true; 408 } 409 if (IV.mergeIn(MergeWithV, DL)) { 410 pushToWorkList(IV, V); 411 LLVM_DEBUG(dbgs() << "Merged " << MergeWithV << " into " << *V << " : " 412 << IV << "\n"); 413 return true; 414 } 415 return false; 416 } 417 418 bool mergeInValue(Value *V, LatticeVal MergeWithV, bool Widen = true) { 419 assert(!V->getType()->isStructTy() && 420 "non-structs should use markConstant"); 421 return mergeInValue(ValueState[V], V, MergeWithV, Widen); 422 } 423 424 /// getValueState - Return the LatticeVal object that corresponds to the 425 /// value. This function handles the case when the value hasn't been seen yet 426 /// by properly seeding constants etc. 427 LatticeVal &getValueState(Value *V) { 428 assert(!V->getType()->isStructTy() && "Should use getStructValueState"); 429 430 std::pair<DenseMap<Value*, LatticeVal>::iterator, bool> I = 431 ValueState.insert(std::make_pair(V, LatticeVal())); 432 LatticeVal &LV = I.first->second; 433 434 if (!I.second) 435 return LV; // Common case, already in the map. 436 437 if (auto *C = dyn_cast<Constant>(V)) 438 LV.markConstant(C); // Constants are constant 439 440 // All others are unknown by default. 441 return LV; 442 } 443 444 LatticeVal toLatticeVal(const ValueLatticeElement &V, Type *T) { 445 LatticeVal Res; 446 if (V.isUnknownOrUndef()) 447 return Res; 448 449 if (V.isConstant()) { 450 Res.markConstant(V.getConstant()); 451 return Res; 452 } 453 if (V.isConstantRange() && V.getConstantRange().isSingleElement()) { 454 Res.markConstant( 455 ConstantInt::get(T, *V.getConstantRange().getSingleElement())); 456 return Res; 457 } 458 Res.markOverdefined(); 459 return Res; 460 } 461 462 /// getStructValueState - Return the LatticeVal object that corresponds to the 463 /// value/field pair. This function handles the case when the value hasn't 464 /// been seen yet by properly seeding constants etc. 465 LatticeVal &getStructValueState(Value *V, unsigned i) { 466 assert(V->getType()->isStructTy() && "Should use getValueState"); 467 assert(i < cast<StructType>(V->getType())->getNumElements() && 468 "Invalid element #"); 469 470 std::pair<DenseMap<std::pair<Value*, unsigned>, LatticeVal>::iterator, 471 bool> I = StructValueState.insert( 472 std::make_pair(std::make_pair(V, i), LatticeVal())); 473 LatticeVal &LV = I.first->second; 474 475 if (!I.second) 476 return LV; // Common case, already in the map. 477 478 if (auto *C = dyn_cast<Constant>(V)) { 479 Constant *Elt = C->getAggregateElement(i); 480 481 if (!Elt) 482 LV.markOverdefined(); // Unknown sort of constant. 483 else if (isa<UndefValue>(Elt)) 484 ; // Undef values remain unknown. 485 else 486 LV.markConstant(Elt); // Constants are constant. 487 } 488 489 // All others are underdefined by default. 490 return LV; 491 } 492 493 /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB 494 /// work list if it is not already executable. 495 bool markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest) { 496 if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second) 497 return false; // This edge is already known to be executable! 498 499 if (!MarkBlockExecutable(Dest)) { 500 // If the destination is already executable, we just made an *edge* 501 // feasible that wasn't before. Revisit the PHI nodes in the block 502 // because they have potentially new operands. 503 LLVM_DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName() 504 << " -> " << Dest->getName() << '\n'); 505 506 for (PHINode &PN : Dest->phis()) 507 visitPHINode(PN); 508 } 509 return true; 510 } 511 512 // getFeasibleSuccessors - Return a vector of booleans to indicate which 513 // successors are reachable from a given terminator instruction. 514 void getFeasibleSuccessors(Instruction &TI, SmallVectorImpl<bool> &Succs); 515 516 // OperandChangedState - This method is invoked on all of the users of an 517 // instruction that was just changed state somehow. Based on this 518 // information, we need to update the specified user of this instruction. 519 void OperandChangedState(Instruction *I) { 520 if (BBExecutable.count(I->getParent())) // Inst is executable? 521 visit(*I); 522 } 523 524 // Add U as additional user of V. 525 void addAdditionalUser(Value *V, User *U) { 526 auto Iter = AdditionalUsers.insert({V, {}}); 527 Iter.first->second.insert(U); 528 } 529 530 // Mark I's users as changed, including AdditionalUsers. 531 void markUsersAsChanged(Value *I) { 532 for (User *U : I->users()) 533 if (auto *UI = dyn_cast<Instruction>(U)) 534 OperandChangedState(UI); 535 536 auto Iter = AdditionalUsers.find(I); 537 if (Iter != AdditionalUsers.end()) { 538 for (User *U : Iter->second) 539 if (auto *UI = dyn_cast<Instruction>(U)) 540 OperandChangedState(UI); 541 } 542 } 543 544 private: 545 friend class InstVisitor<SCCPSolver>; 546 547 // visit implementations - Something changed in this instruction. Either an 548 // operand made a transition, or the instruction is newly executable. Change 549 // the value type of I to reflect these changes if appropriate. 550 void visitPHINode(PHINode &I); 551 552 // Terminators 553 554 void visitReturnInst(ReturnInst &I); 555 void visitTerminator(Instruction &TI); 556 557 void visitCastInst(CastInst &I); 558 void visitSelectInst(SelectInst &I); 559 void visitUnaryOperator(Instruction &I); 560 void visitBinaryOperator(Instruction &I); 561 void visitCmpInst(CmpInst &I); 562 void visitExtractValueInst(ExtractValueInst &EVI); 563 void visitInsertValueInst(InsertValueInst &IVI); 564 565 void visitCatchSwitchInst(CatchSwitchInst &CPI) { 566 markOverdefined(&CPI); 567 visitTerminator(CPI); 568 } 569 570 // Instructions that cannot be folded away. 571 572 void visitStoreInst (StoreInst &I); 573 void visitLoadInst (LoadInst &I); 574 void visitGetElementPtrInst(GetElementPtrInst &I); 575 576 void visitCallInst (CallInst &I) { 577 visitCallSite(&I); 578 } 579 580 void visitInvokeInst (InvokeInst &II) { 581 visitCallSite(&II); 582 visitTerminator(II); 583 } 584 585 void visitCallBrInst (CallBrInst &CBI) { 586 visitCallSite(&CBI); 587 visitTerminator(CBI); 588 } 589 590 void visitCallSite (CallSite CS); 591 void visitResumeInst (ResumeInst &I) { /*returns void*/ } 592 void visitUnreachableInst(UnreachableInst &I) { /*returns void*/ } 593 void visitFenceInst (FenceInst &I) { /*returns void*/ } 594 595 void visitInstruction(Instruction &I) { 596 // All the instructions we don't do any special handling for just 597 // go to overdefined. 598 LLVM_DEBUG(dbgs() << "SCCP: Don't know how to handle: " << I << '\n'); 599 markOverdefined(&I); 600 } 601 }; 602 603 } // end anonymous namespace 604 605 // getFeasibleSuccessors - Return a vector of booleans to indicate which 606 // successors are reachable from a given terminator instruction. 607 void SCCPSolver::getFeasibleSuccessors(Instruction &TI, 608 SmallVectorImpl<bool> &Succs) { 609 Succs.resize(TI.getNumSuccessors()); 610 if (auto *BI = dyn_cast<BranchInst>(&TI)) { 611 if (BI->isUnconditional()) { 612 Succs[0] = true; 613 return; 614 } 615 616 LatticeVal BCValue = getValueState(BI->getCondition()); 617 ConstantInt *CI = getConstantInt(BCValue); 618 if (!CI) { 619 // Overdefined condition variables, and branches on unfoldable constant 620 // conditions, mean the branch could go either way. 621 if (!BCValue.isUnknownOrUndef()) 622 Succs[0] = Succs[1] = true; 623 return; 624 } 625 626 // Constant condition variables mean the branch can only go a single way. 627 Succs[CI->isZero()] = true; 628 return; 629 } 630 631 // Unwinding instructions successors are always executable. 632 if (TI.isExceptionalTerminator()) { 633 Succs.assign(TI.getNumSuccessors(), true); 634 return; 635 } 636 637 if (auto *SI = dyn_cast<SwitchInst>(&TI)) { 638 if (!SI->getNumCases()) { 639 Succs[0] = true; 640 return; 641 } 642 LatticeVal SCValue = getValueState(SI->getCondition()); 643 ConstantInt *CI = getConstantInt(SCValue); 644 645 if (!CI) { // Overdefined or unknown condition? 646 // All destinations are executable! 647 if (!SCValue.isUnknownOrUndef()) 648 Succs.assign(TI.getNumSuccessors(), true); 649 return; 650 } 651 652 Succs[SI->findCaseValue(CI)->getSuccessorIndex()] = true; 653 return; 654 } 655 656 // In case of indirect branch and its address is a blockaddress, we mark 657 // the target as executable. 658 if (auto *IBR = dyn_cast<IndirectBrInst>(&TI)) { 659 // Casts are folded by visitCastInst. 660 LatticeVal IBRValue = getValueState(IBR->getAddress()); 661 BlockAddress *Addr = dyn_cast_or_null<BlockAddress>(getConstant(IBRValue)); 662 if (!Addr) { // Overdefined or unknown condition? 663 // All destinations are executable! 664 if (!IBRValue.isUnknownOrUndef()) 665 Succs.assign(TI.getNumSuccessors(), true); 666 return; 667 } 668 669 BasicBlock* T = Addr->getBasicBlock(); 670 assert(Addr->getFunction() == T->getParent() && 671 "Block address of a different function ?"); 672 for (unsigned i = 0; i < IBR->getNumSuccessors(); ++i) { 673 // This is the target. 674 if (IBR->getDestination(i) == T) { 675 Succs[i] = true; 676 return; 677 } 678 } 679 680 // If we didn't find our destination in the IBR successor list, then we 681 // have undefined behavior. Its ok to assume no successor is executable. 682 return; 683 } 684 685 // In case of callbr, we pessimistically assume that all successors are 686 // feasible. 687 if (isa<CallBrInst>(&TI)) { 688 Succs.assign(TI.getNumSuccessors(), true); 689 return; 690 } 691 692 LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n'); 693 llvm_unreachable("SCCP: Don't know how to handle this terminator!"); 694 } 695 696 // isEdgeFeasible - Return true if the control flow edge from the 'From' basic 697 // block to the 'To' basic block is currently feasible. 698 bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) { 699 // Check if we've called markEdgeExecutable on the edge yet. (We could 700 // be more aggressive and try to consider edges which haven't been marked 701 // yet, but there isn't any need.) 702 return KnownFeasibleEdges.count(Edge(From, To)); 703 } 704 705 // visit Implementations - Something changed in this instruction, either an 706 // operand made a transition, or the instruction is newly executable. Change 707 // the value type of I to reflect these changes if appropriate. This method 708 // makes sure to do the following actions: 709 // 710 // 1. If a phi node merges two constants in, and has conflicting value coming 711 // from different branches, or if the PHI node merges in an overdefined 712 // value, then the PHI node becomes overdefined. 713 // 2. If a phi node merges only constants in, and they all agree on value, the 714 // PHI node becomes a constant value equal to that. 715 // 3. If V <- x (op) y && isConstant(x) && isConstant(y) V = Constant 716 // 4. If V <- x (op) y && (isOverdefined(x) || isOverdefined(y)) V = Overdefined 717 // 5. If V <- MEM or V <- CALL or V <- (unknown) then V = Overdefined 718 // 6. If a conditional branch has a value that is constant, make the selected 719 // destination executable 720 // 7. If a conditional branch has a value that is overdefined, make all 721 // successors executable. 722 void SCCPSolver::visitPHINode(PHINode &PN) { 723 // If this PN returns a struct, just mark the result overdefined. 724 // TODO: We could do a lot better than this if code actually uses this. 725 if (PN.getType()->isStructTy()) 726 return (void)markOverdefined(&PN); 727 728 if (isOverdefined(getValueState(&PN))) { 729 return (void)markOverdefined(&PN); 730 } 731 732 // Super-extra-high-degree PHI nodes are unlikely to ever be marked constant, 733 // and slow us down a lot. Just mark them overdefined. 734 if (PN.getNumIncomingValues() > 64) 735 return (void)markOverdefined(&PN); 736 737 // Look at all of the executable operands of the PHI node. If any of them 738 // are overdefined, the PHI becomes overdefined as well. If they are all 739 // constant, and they agree with each other, the PHI becomes the identical 740 // constant. If they are constant and don't agree, the PHI is overdefined. 741 // If there are no executable operands, the PHI remains unknown. 742 Constant *OperandVal = nullptr; 743 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) { 744 LatticeVal IV = getValueState(PN.getIncomingValue(i)); 745 if (IV.isUnknownOrUndef()) continue; // Doesn't influence PHI node. 746 747 if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent())) 748 continue; 749 750 if (isOverdefined(IV)) // PHI node becomes overdefined! 751 return (void)markOverdefined(&PN); 752 753 if (!OperandVal) { // Grab the first value. 754 OperandVal = getConstant(IV); 755 continue; 756 } 757 758 // There is already a reachable operand. If we conflict with it, 759 // then the PHI node becomes overdefined. If we agree with it, we 760 // can continue on. 761 762 // Check to see if there are two different constants merging, if so, the PHI 763 // node is overdefined. 764 if (getConstant(IV) != OperandVal) 765 return (void)markOverdefined(&PN); 766 } 767 768 // If we exited the loop, this means that the PHI node only has constant 769 // arguments that agree with each other(and OperandVal is the constant) or 770 // OperandVal is null because there are no defined incoming arguments. If 771 // this is the case, the PHI remains unknown. 772 if (OperandVal) 773 markConstant(&PN, OperandVal); // Acquire operand value 774 } 775 776 void SCCPSolver::visitReturnInst(ReturnInst &I) { 777 if (I.getNumOperands() == 0) return; // ret void 778 779 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would 780 // discover a concrete value later. 781 if (isOverdefined(ValueState[&I])) 782 return; 783 784 Function *F = I.getParent()->getParent(); 785 Value *ResultOp = I.getOperand(0); 786 787 // If we are tracking the return value of this function, merge it in. 788 if (!TrackedRetVals.empty() && !ResultOp->getType()->isStructTy()) { 789 MapVector<Function*, LatticeVal>::iterator TFRVI = 790 TrackedRetVals.find(F); 791 if (TFRVI != TrackedRetVals.end()) { 792 mergeInValue(TFRVI->second, F, getValueState(ResultOp)); 793 return; 794 } 795 } 796 797 // Handle functions that return multiple values. 798 if (!TrackedMultipleRetVals.empty()) { 799 if (auto *STy = dyn_cast<StructType>(ResultOp->getType())) 800 if (MRVFunctionsTracked.count(F)) 801 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 802 mergeInValue(TrackedMultipleRetVals[std::make_pair(F, i)], F, 803 getStructValueState(ResultOp, i)); 804 } 805 } 806 807 void SCCPSolver::visitTerminator(Instruction &TI) { 808 SmallVector<bool, 16> SuccFeasible; 809 getFeasibleSuccessors(TI, SuccFeasible); 810 811 BasicBlock *BB = TI.getParent(); 812 813 // Mark all feasible successors executable. 814 for (unsigned i = 0, e = SuccFeasible.size(); i != e; ++i) 815 if (SuccFeasible[i]) 816 markEdgeExecutable(BB, TI.getSuccessor(i)); 817 } 818 819 void SCCPSolver::visitCastInst(CastInst &I) { 820 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would 821 // discover a concrete value later. 822 if (isOverdefined(ValueState[&I])) 823 return; 824 825 LatticeVal OpSt = getValueState(I.getOperand(0)); 826 if (Constant *OpC = getConstant(OpSt)) { 827 // Fold the constant as we build. 828 Constant *C = ConstantFoldCastOperand(I.getOpcode(), OpC, I.getType(), DL); 829 if (isa<UndefValue>(C)) 830 return; 831 // Propagate constant value 832 markConstant(&I, C); 833 } else if (!OpSt.isUnknownOrUndef()) 834 markOverdefined(&I); 835 } 836 837 void SCCPSolver::visitExtractValueInst(ExtractValueInst &EVI) { 838 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would 839 // discover a concrete value later. 840 if (isOverdefined(ValueState[&EVI])) 841 return; 842 843 // If this returns a struct, mark all elements over defined, we don't track 844 // structs in structs. 845 if (EVI.getType()->isStructTy()) 846 return (void)markOverdefined(&EVI); 847 848 // If this is extracting from more than one level of struct, we don't know. 849 if (EVI.getNumIndices() != 1) 850 return (void)markOverdefined(&EVI); 851 852 Value *AggVal = EVI.getAggregateOperand(); 853 if (AggVal->getType()->isStructTy()) { 854 unsigned i = *EVI.idx_begin(); 855 LatticeVal EltVal = getStructValueState(AggVal, i); 856 mergeInValue(getValueState(&EVI), &EVI, EltVal); 857 } else { 858 // Otherwise, must be extracting from an array. 859 return (void)markOverdefined(&EVI); 860 } 861 } 862 863 void SCCPSolver::visitInsertValueInst(InsertValueInst &IVI) { 864 auto *STy = dyn_cast<StructType>(IVI.getType()); 865 if (!STy) 866 return (void)markOverdefined(&IVI); 867 868 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would 869 // discover a concrete value later. 870 if (isOverdefined(ValueState[&IVI])) 871 return; 872 873 // If this has more than one index, we can't handle it, drive all results to 874 // undef. 875 if (IVI.getNumIndices() != 1) 876 return (void)markOverdefined(&IVI); 877 878 Value *Aggr = IVI.getAggregateOperand(); 879 unsigned Idx = *IVI.idx_begin(); 880 881 // Compute the result based on what we're inserting. 882 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 883 // This passes through all values that aren't the inserted element. 884 if (i != Idx) { 885 LatticeVal EltVal = getStructValueState(Aggr, i); 886 mergeInValue(getStructValueState(&IVI, i), &IVI, EltVal); 887 continue; 888 } 889 890 Value *Val = IVI.getInsertedValueOperand(); 891 if (Val->getType()->isStructTy()) 892 // We don't track structs in structs. 893 markOverdefined(getStructValueState(&IVI, i), &IVI); 894 else { 895 LatticeVal InVal = getValueState(Val); 896 mergeInValue(getStructValueState(&IVI, i), &IVI, InVal); 897 } 898 } 899 } 900 901 void SCCPSolver::visitSelectInst(SelectInst &I) { 902 // If this select returns a struct, just mark the result overdefined. 903 // TODO: We could do a lot better than this if code actually uses this. 904 if (I.getType()->isStructTy()) 905 return (void)markOverdefined(&I); 906 907 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would 908 // discover a concrete value later. 909 if (isOverdefined(ValueState[&I])) 910 return; 911 912 LatticeVal CondValue = getValueState(I.getCondition()); 913 if (CondValue.isUnknownOrUndef()) 914 return; 915 916 if (ConstantInt *CondCB = getConstantInt(CondValue)) { 917 Value *OpVal = CondCB->isZero() ? I.getFalseValue() : I.getTrueValue(); 918 mergeInValue(&I, getValueState(OpVal)); 919 return; 920 } 921 922 // Otherwise, the condition is overdefined or a constant we can't evaluate. 923 // See if we can produce something better than overdefined based on the T/F 924 // value. 925 LatticeVal TVal = getValueState(I.getTrueValue()); 926 LatticeVal FVal = getValueState(I.getFalseValue()); 927 928 // select ?, C, C -> C. 929 if (isConstant(TVal) && isConstant(FVal) && 930 getConstant(TVal) == getConstant(FVal)) 931 return (void)markConstant(&I, getConstant(FVal)); 932 933 if (TVal.isUnknownOrUndef()) // select ?, undef, X -> X. 934 return (void)mergeInValue(&I, FVal); 935 if (FVal.isUnknownOrUndef()) // select ?, X, undef -> X. 936 return (void)mergeInValue(&I, TVal); 937 markOverdefined(&I); 938 } 939 940 // Handle Unary Operators. 941 void SCCPSolver::visitUnaryOperator(Instruction &I) { 942 LatticeVal V0State = getValueState(I.getOperand(0)); 943 944 LatticeVal &IV = ValueState[&I]; 945 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would 946 // discover a concrete value later. 947 if (isOverdefined(IV)) return; 948 949 if (isConstant(V0State)) { 950 Constant *C = ConstantExpr::get(I.getOpcode(), getConstant(V0State)); 951 952 // op Y -> undef. 953 if (isa<UndefValue>(C)) 954 return; 955 return (void)markConstant(IV, &I, C); 956 } 957 958 // If something is undef, wait for it to resolve. 959 if (!isOverdefined(V0State)) 960 return; 961 962 markOverdefined(&I); 963 } 964 965 // Handle Binary Operators. 966 void SCCPSolver::visitBinaryOperator(Instruction &I) { 967 LatticeVal V1State = getValueState(I.getOperand(0)); 968 LatticeVal V2State = getValueState(I.getOperand(1)); 969 970 LatticeVal &IV = ValueState[&I]; 971 if (isOverdefined(IV)) { 972 markOverdefined(&I); 973 return; 974 } 975 976 if (isConstant(V1State) && isConstant(V2State)) { 977 Constant *C = ConstantExpr::get(I.getOpcode(), getConstant(V1State), 978 getConstant(V2State)); 979 // X op Y -> undef. 980 if (isa<UndefValue>(C)) 981 return; 982 return (void)markConstant(IV, &I, C); 983 } 984 985 // If something is undef, wait for it to resolve. 986 if (V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef()) 987 return; 988 989 // Otherwise, one of our operands is overdefined. Try to produce something 990 // better than overdefined with some tricks. 991 // If this is 0 / Y, it doesn't matter that the second operand is 992 // overdefined, and we can replace it with zero. 993 if (I.getOpcode() == Instruction::UDiv || I.getOpcode() == Instruction::SDiv) 994 if (isConstant(V1State) && getConstant(V1State)->isNullValue()) 995 return (void)markConstant(IV, &I, getConstant(V1State)); 996 997 // If this is: 998 // -> AND/MUL with 0 999 // -> OR with -1 1000 // it doesn't matter that the other operand is overdefined. 1001 if (I.getOpcode() == Instruction::And || I.getOpcode() == Instruction::Mul || 1002 I.getOpcode() == Instruction::Or) { 1003 LatticeVal *NonOverdefVal = nullptr; 1004 if (!isOverdefined(V1State)) 1005 NonOverdefVal = &V1State; 1006 1007 else if (!isOverdefined(V2State)) 1008 NonOverdefVal = &V2State; 1009 if (NonOverdefVal) { 1010 if (!isConstant(*NonOverdefVal)) 1011 return; 1012 1013 if (I.getOpcode() == Instruction::And || 1014 I.getOpcode() == Instruction::Mul) { 1015 // X and 0 = 0 1016 // X * 0 = 0 1017 if (getConstant(*NonOverdefVal)->isNullValue()) 1018 return (void)markConstant(IV, &I, getConstant(*NonOverdefVal)); 1019 } else { 1020 // X or -1 = -1 1021 if (ConstantInt *CI = getConstantInt(*NonOverdefVal)) 1022 if (CI->isMinusOne()) 1023 return (void)markConstant(IV, &I, CI); 1024 } 1025 } 1026 } 1027 1028 markOverdefined(&I); 1029 } 1030 1031 // Handle ICmpInst instruction. 1032 void SCCPSolver::visitCmpInst(CmpInst &I) { 1033 // Do not cache this lookup, getValueState calls later in the function might 1034 // invalidate the reference. 1035 if (isOverdefined(ValueState[&I])) { 1036 markOverdefined(&I); 1037 return; 1038 } 1039 1040 Value *Op1 = I.getOperand(0); 1041 Value *Op2 = I.getOperand(1); 1042 1043 // For parameters, use ParamState which includes constant range info if 1044 // available. 1045 auto V1State = getValueState(Op1); 1046 auto V2State = getValueState(Op2); 1047 1048 Constant *C = V1State.getCompare(I.getPredicate(), I.getType(), V2State); 1049 if (C) { 1050 if (isa<UndefValue>(C)) 1051 return; 1052 LatticeVal CV; 1053 CV.markConstant(C); 1054 mergeInValue(&I, CV); 1055 return; 1056 } 1057 1058 // If operands are still unknown, wait for it to resolve. 1059 if ((V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef()) && 1060 !isConstant(ValueState[&I])) 1061 return; 1062 1063 markOverdefined(&I); 1064 } 1065 1066 // Handle getelementptr instructions. If all operands are constants then we 1067 // can turn this into a getelementptr ConstantExpr. 1068 void SCCPSolver::visitGetElementPtrInst(GetElementPtrInst &I) { 1069 if (isOverdefined(ValueState[&I])) return; 1070 1071 SmallVector<Constant*, 8> Operands; 1072 Operands.reserve(I.getNumOperands()); 1073 1074 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { 1075 LatticeVal State = getValueState(I.getOperand(i)); 1076 if (State.isUnknownOrUndef()) 1077 return; // Operands are not resolved yet. 1078 1079 if (isOverdefined(State)) 1080 return (void)markOverdefined(&I); 1081 1082 if (Constant *C = getConstant(State)) { 1083 Operands.push_back(C); 1084 continue; 1085 } 1086 1087 return (void)markOverdefined(&I); 1088 } 1089 1090 Constant *Ptr = Operands[0]; 1091 auto Indices = makeArrayRef(Operands.begin() + 1, Operands.end()); 1092 Constant *C = 1093 ConstantExpr::getGetElementPtr(I.getSourceElementType(), Ptr, Indices); 1094 if (isa<UndefValue>(C)) 1095 return; 1096 markConstant(&I, C); 1097 } 1098 1099 void SCCPSolver::visitStoreInst(StoreInst &SI) { 1100 // If this store is of a struct, ignore it. 1101 if (SI.getOperand(0)->getType()->isStructTy()) 1102 return; 1103 1104 if (TrackedGlobals.empty() || !isa<GlobalVariable>(SI.getOperand(1))) 1105 return; 1106 1107 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would 1108 // discover a concrete value later. 1109 if (isOverdefined(ValueState[&SI])) 1110 return; 1111 1112 GlobalVariable *GV = cast<GlobalVariable>(SI.getOperand(1)); 1113 DenseMap<GlobalVariable*, LatticeVal>::iterator I = TrackedGlobals.find(GV); 1114 if (I == TrackedGlobals.end()) 1115 return; 1116 1117 // Get the value we are storing into the global, then merge it. 1118 mergeInValue(I->second, GV, getValueState(SI.getOperand(0))); 1119 if (isOverdefined(I->second)) 1120 TrackedGlobals.erase(I); // No need to keep tracking this! 1121 } 1122 1123 // Handle load instructions. If the operand is a constant pointer to a constant 1124 // global, we can replace the load with the loaded constant value! 1125 void SCCPSolver::visitLoadInst(LoadInst &I) { 1126 // If this load is of a struct, just mark the result overdefined. 1127 if (I.getType()->isStructTy()) 1128 return (void)markOverdefined(&I); 1129 1130 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would 1131 // discover a concrete value later. 1132 if (isOverdefined(ValueState[&I])) 1133 return; 1134 1135 LatticeVal PtrVal = getValueState(I.getOperand(0)); 1136 if (PtrVal.isUnknownOrUndef()) 1137 return; // The pointer is not resolved yet! 1138 1139 LatticeVal &IV = ValueState[&I]; 1140 1141 if (!isConstant(PtrVal) || I.isVolatile()) 1142 return (void)markOverdefined(IV, &I); 1143 1144 Constant *Ptr = getConstant(PtrVal); 1145 1146 // load null is undefined. 1147 if (isa<ConstantPointerNull>(Ptr)) { 1148 if (NullPointerIsDefined(I.getFunction(), I.getPointerAddressSpace())) 1149 return (void)markOverdefined(IV, &I); 1150 else 1151 return; 1152 } 1153 1154 // Transform load (constant global) into the value loaded. 1155 if (auto *GV = dyn_cast<GlobalVariable>(Ptr)) { 1156 if (!TrackedGlobals.empty()) { 1157 // If we are tracking this global, merge in the known value for it. 1158 DenseMap<GlobalVariable*, LatticeVal>::iterator It = 1159 TrackedGlobals.find(GV); 1160 if (It != TrackedGlobals.end()) { 1161 mergeInValue(IV, &I, It->second); 1162 return; 1163 } 1164 } 1165 } 1166 1167 // Transform load from a constant into a constant if possible. 1168 if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, I.getType(), DL)) { 1169 if (isa<UndefValue>(C)) 1170 return; 1171 return (void)markConstant(IV, &I, C); 1172 } 1173 1174 // Otherwise we cannot say for certain what value this load will produce. 1175 // Bail out. 1176 markOverdefined(IV, &I); 1177 } 1178 1179 void SCCPSolver::visitCallSite(CallSite CS) { 1180 Function *F = CS.getCalledFunction(); 1181 Instruction *I = CS.getInstruction(); 1182 1183 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 1184 if (II->getIntrinsicID() == Intrinsic::ssa_copy) { 1185 if (isOverdefined(ValueState[I])) 1186 return (void)markOverdefined(I); 1187 1188 auto *PI = getPredicateInfoFor(I); 1189 if (!PI) 1190 return; 1191 1192 Value *CopyOf = I->getOperand(0); 1193 auto *PBranch = dyn_cast<PredicateBranch>(PI); 1194 if (!PBranch) { 1195 mergeInValue(ValueState[I], I, getValueState(CopyOf)); 1196 return; 1197 } 1198 1199 Value *Cond = PBranch->Condition; 1200 1201 // Everything below relies on the condition being a comparison. 1202 auto *Cmp = dyn_cast<CmpInst>(Cond); 1203 if (!Cmp) { 1204 mergeInValue(ValueState[I], I, getValueState(CopyOf)); 1205 return; 1206 } 1207 1208 Value *CmpOp0 = Cmp->getOperand(0); 1209 Value *CmpOp1 = Cmp->getOperand(1); 1210 if (CopyOf != CmpOp0 && CopyOf != CmpOp1) { 1211 mergeInValue(ValueState[I], I, getValueState(CopyOf)); 1212 return; 1213 } 1214 1215 if (CmpOp0 != CopyOf) 1216 std::swap(CmpOp0, CmpOp1); 1217 1218 LatticeVal OriginalVal = getValueState(CopyOf); 1219 LatticeVal EqVal = getValueState(CmpOp1); 1220 LatticeVal &IV = ValueState[I]; 1221 if (PBranch->TrueEdge && Cmp->getPredicate() == CmpInst::ICMP_EQ) { 1222 addAdditionalUser(CmpOp1, I); 1223 if (isConstant(OriginalVal)) 1224 mergeInValue(IV, I, OriginalVal); 1225 else 1226 mergeInValue(IV, I, EqVal); 1227 return; 1228 } 1229 if (!PBranch->TrueEdge && Cmp->getPredicate() == CmpInst::ICMP_NE) { 1230 addAdditionalUser(CmpOp1, I); 1231 if (isConstant(OriginalVal)) 1232 mergeInValue(IV, I, OriginalVal); 1233 else 1234 mergeInValue(IV, I, EqVal); 1235 return; 1236 } 1237 1238 return (void)mergeInValue(IV, I, getValueState(CopyOf)); 1239 } 1240 } 1241 1242 // The common case is that we aren't tracking the callee, either because we 1243 // are not doing interprocedural analysis or the callee is indirect, or is 1244 // external. Handle these cases first. 1245 if (!F || F->isDeclaration()) { 1246 CallOverdefined: 1247 // Void return and not tracking callee, just bail. 1248 if (I->getType()->isVoidTy()) return; 1249 1250 // Otherwise, if we have a single return value case, and if the function is 1251 // a declaration, maybe we can constant fold it. 1252 if (F && F->isDeclaration() && !I->getType()->isStructTy() && 1253 canConstantFoldCallTo(cast<CallBase>(CS.getInstruction()), F)) { 1254 SmallVector<Constant*, 8> Operands; 1255 for (CallSite::arg_iterator AI = CS.arg_begin(), E = CS.arg_end(); 1256 AI != E; ++AI) { 1257 if (AI->get()->getType()->isStructTy()) 1258 return markOverdefined(I); // Can't handle struct args. 1259 LatticeVal State = getValueState(*AI); 1260 1261 if (State.isUnknownOrUndef()) 1262 return; // Operands are not resolved yet. 1263 if (isOverdefined(State)) 1264 return (void)markOverdefined(I); 1265 assert(isConstant(State) && "Unknown state!"); 1266 Operands.push_back(getConstant(State)); 1267 } 1268 1269 if (isOverdefined(getValueState(I))) 1270 return; 1271 1272 // If we can constant fold this, mark the result of the call as a 1273 // constant. 1274 if (Constant *C = ConstantFoldCall(cast<CallBase>(CS.getInstruction()), F, 1275 Operands, &GetTLI(*F))) { 1276 // call -> undef. 1277 if (isa<UndefValue>(C)) 1278 return; 1279 return (void)markConstant(I, C); 1280 } 1281 } 1282 1283 // Otherwise, we don't know anything about this call, mark it overdefined. 1284 return (void)markOverdefined(I); 1285 } 1286 1287 // If this is a local function that doesn't have its address taken, mark its 1288 // entry block executable and merge in the actual arguments to the call into 1289 // the formal arguments of the function. 1290 if (!TrackingIncomingArguments.empty() && TrackingIncomingArguments.count(F)){ 1291 MarkBlockExecutable(&F->front()); 1292 1293 // Propagate information from this call site into the callee. 1294 CallSite::arg_iterator CAI = CS.arg_begin(); 1295 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); 1296 AI != E; ++AI, ++CAI) { 1297 // If this argument is byval, and if the function is not readonly, there 1298 // will be an implicit copy formed of the input aggregate. 1299 if (AI->hasByValAttr() && !F->onlyReadsMemory()) { 1300 markOverdefined(&*AI); 1301 continue; 1302 } 1303 1304 if (auto *STy = dyn_cast<StructType>(AI->getType())) { 1305 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1306 LatticeVal CallArg = getStructValueState(*CAI, i); 1307 mergeInValue(getStructValueState(&*AI, i), &*AI, CallArg, false); 1308 } 1309 } else 1310 mergeInValue(&*AI, getValueState(*CAI), false); 1311 } 1312 } 1313 1314 // If this is a single/zero retval case, see if we're tracking the function. 1315 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) { 1316 if (!MRVFunctionsTracked.count(F)) 1317 goto CallOverdefined; // Not tracking this callee. 1318 1319 // If we are tracking this callee, propagate the result of the function 1320 // into this call site. 1321 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 1322 mergeInValue(getStructValueState(I, i), I, 1323 TrackedMultipleRetVals[std::make_pair(F, i)]); 1324 } else { 1325 MapVector<Function*, LatticeVal>::iterator TFRVI = TrackedRetVals.find(F); 1326 if (TFRVI == TrackedRetVals.end()) 1327 goto CallOverdefined; // Not tracking this callee. 1328 1329 // If so, propagate the return value of the callee into this call result. 1330 mergeInValue(I, TFRVI->second); 1331 } 1332 } 1333 1334 void SCCPSolver::Solve() { 1335 // Process the work lists until they are empty! 1336 while (!BBWorkList.empty() || !InstWorkList.empty() || 1337 !OverdefinedInstWorkList.empty()) { 1338 // Process the overdefined instruction's work list first, which drives other 1339 // things to overdefined more quickly. 1340 while (!OverdefinedInstWorkList.empty()) { 1341 Value *I = OverdefinedInstWorkList.pop_back_val(); 1342 1343 LLVM_DEBUG(dbgs() << "\nPopped off OI-WL: " << *I << '\n'); 1344 1345 // "I" got into the work list because it either made the transition from 1346 // bottom to constant, or to overdefined. 1347 // 1348 // Anything on this worklist that is overdefined need not be visited 1349 // since all of its users will have already been marked as overdefined 1350 // Update all of the users of this instruction's value. 1351 // 1352 markUsersAsChanged(I); 1353 } 1354 1355 // Process the instruction work list. 1356 while (!InstWorkList.empty()) { 1357 Value *I = InstWorkList.pop_back_val(); 1358 1359 LLVM_DEBUG(dbgs() << "\nPopped off I-WL: " << *I << '\n'); 1360 1361 // "I" got into the work list because it made the transition from undef to 1362 // constant. 1363 // 1364 // Anything on this worklist that is overdefined need not be visited 1365 // since all of its users will have already been marked as overdefined. 1366 // Update all of the users of this instruction's value. 1367 // 1368 if (I->getType()->isStructTy() || !isOverdefined(getValueState(I))) 1369 markUsersAsChanged(I); 1370 } 1371 1372 // Process the basic block work list. 1373 while (!BBWorkList.empty()) { 1374 BasicBlock *BB = BBWorkList.back(); 1375 BBWorkList.pop_back(); 1376 1377 LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB << '\n'); 1378 1379 // Notify all instructions in this basic block that they are newly 1380 // executable. 1381 visit(BB); 1382 } 1383 } 1384 } 1385 1386 /// ResolvedUndefsIn - While solving the dataflow for a function, we assume 1387 /// that branches on undef values cannot reach any of their successors. 1388 /// However, this is not a safe assumption. After we solve dataflow, this 1389 /// method should be use to handle this. If this returns true, the solver 1390 /// should be rerun. 1391 /// 1392 /// This method handles this by finding an unresolved branch and marking it one 1393 /// of the edges from the block as being feasible, even though the condition 1394 /// doesn't say it would otherwise be. This allows SCCP to find the rest of the 1395 /// CFG and only slightly pessimizes the analysis results (by marking one, 1396 /// potentially infeasible, edge feasible). This cannot usefully modify the 1397 /// constraints on the condition of the branch, as that would impact other users 1398 /// of the value. 1399 /// 1400 /// This scan also checks for values that use undefs. It conservatively marks 1401 /// them as overdefined. 1402 bool SCCPSolver::ResolvedUndefsIn(Function &F) { 1403 for (BasicBlock &BB : F) { 1404 if (!BBExecutable.count(&BB)) 1405 continue; 1406 1407 for (Instruction &I : BB) { 1408 // Look for instructions which produce undef values. 1409 if (I.getType()->isVoidTy()) continue; 1410 1411 if (auto *STy = dyn_cast<StructType>(I.getType())) { 1412 // Only a few things that can be structs matter for undef. 1413 1414 // Tracked calls must never be marked overdefined in ResolvedUndefsIn. 1415 if (CallSite CS = CallSite(&I)) 1416 if (Function *F = CS.getCalledFunction()) 1417 if (MRVFunctionsTracked.count(F)) 1418 continue; 1419 1420 // extractvalue and insertvalue don't need to be marked; they are 1421 // tracked as precisely as their operands. 1422 if (isa<ExtractValueInst>(I) || isa<InsertValueInst>(I)) 1423 continue; 1424 // Send the results of everything else to overdefined. We could be 1425 // more precise than this but it isn't worth bothering. 1426 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1427 LatticeVal &LV = getStructValueState(&I, i); 1428 if (LV.isUnknownOrUndef()) 1429 markOverdefined(LV, &I); 1430 } 1431 continue; 1432 } 1433 1434 LatticeVal &LV = getValueState(&I); 1435 if (!LV.isUnknownOrUndef()) 1436 continue; 1437 1438 // There are two reasons a call can have an undef result 1439 // 1. It could be tracked. 1440 // 2. It could be constant-foldable. 1441 // Because of the way we solve return values, tracked calls must 1442 // never be marked overdefined in ResolvedUndefsIn. 1443 if (CallSite CS = CallSite(&I)) 1444 if (Function *F = CS.getCalledFunction()) 1445 if (TrackedRetVals.count(F)) 1446 continue; 1447 1448 if (isa<LoadInst>(I)) { 1449 // A load here means one of two things: a load of undef from a global, 1450 // a load from an unknown pointer. Either way, having it return undef 1451 // is okay. 1452 continue; 1453 } 1454 1455 markOverdefined(&I); 1456 return true; 1457 } 1458 1459 // Check to see if we have a branch or switch on an undefined value. If so 1460 // we force the branch to go one way or the other to make the successor 1461 // values live. It doesn't really matter which way we force it. 1462 Instruction *TI = BB.getTerminator(); 1463 if (auto *BI = dyn_cast<BranchInst>(TI)) { 1464 if (!BI->isConditional()) continue; 1465 if (!getValueState(BI->getCondition()).isUnknownOrUndef()) 1466 continue; 1467 1468 // If the input to SCCP is actually branch on undef, fix the undef to 1469 // false. 1470 if (isa<UndefValue>(BI->getCondition())) { 1471 BI->setCondition(ConstantInt::getFalse(BI->getContext())); 1472 markEdgeExecutable(&BB, TI->getSuccessor(1)); 1473 return true; 1474 } 1475 1476 // Otherwise, it is a branch on a symbolic value which is currently 1477 // considered to be undef. Make sure some edge is executable, so a 1478 // branch on "undef" always flows somewhere. 1479 // FIXME: Distinguish between dead code and an LLVM "undef" value. 1480 BasicBlock *DefaultSuccessor = TI->getSuccessor(1); 1481 if (markEdgeExecutable(&BB, DefaultSuccessor)) 1482 return true; 1483 1484 continue; 1485 } 1486 1487 if (auto *IBR = dyn_cast<IndirectBrInst>(TI)) { 1488 // Indirect branch with no successor ?. Its ok to assume it branches 1489 // to no target. 1490 if (IBR->getNumSuccessors() < 1) 1491 continue; 1492 1493 if (!getValueState(IBR->getAddress()).isUnknownOrUndef()) 1494 continue; 1495 1496 // If the input to SCCP is actually branch on undef, fix the undef to 1497 // the first successor of the indirect branch. 1498 if (isa<UndefValue>(IBR->getAddress())) { 1499 IBR->setAddress(BlockAddress::get(IBR->getSuccessor(0))); 1500 markEdgeExecutable(&BB, IBR->getSuccessor(0)); 1501 return true; 1502 } 1503 1504 // Otherwise, it is a branch on a symbolic value which is currently 1505 // considered to be undef. Make sure some edge is executable, so a 1506 // branch on "undef" always flows somewhere. 1507 // FIXME: IndirectBr on "undef" doesn't actually need to go anywhere: 1508 // we can assume the branch has undefined behavior instead. 1509 BasicBlock *DefaultSuccessor = IBR->getSuccessor(0); 1510 if (markEdgeExecutable(&BB, DefaultSuccessor)) 1511 return true; 1512 1513 continue; 1514 } 1515 1516 if (auto *SI = dyn_cast<SwitchInst>(TI)) { 1517 if (!SI->getNumCases() || 1518 !getValueState(SI->getCondition()).isUnknownOrUndef()) 1519 continue; 1520 1521 // If the input to SCCP is actually switch on undef, fix the undef to 1522 // the first constant. 1523 if (isa<UndefValue>(SI->getCondition())) { 1524 SI->setCondition(SI->case_begin()->getCaseValue()); 1525 markEdgeExecutable(&BB, SI->case_begin()->getCaseSuccessor()); 1526 return true; 1527 } 1528 1529 // Otherwise, it is a branch on a symbolic value which is currently 1530 // considered to be undef. Make sure some edge is executable, so a 1531 // branch on "undef" always flows somewhere. 1532 // FIXME: Distinguish between dead code and an LLVM "undef" value. 1533 BasicBlock *DefaultSuccessor = SI->case_begin()->getCaseSuccessor(); 1534 if (markEdgeExecutable(&BB, DefaultSuccessor)) 1535 return true; 1536 1537 continue; 1538 } 1539 } 1540 1541 return false; 1542 } 1543 1544 static bool tryToReplaceWithConstant(SCCPSolver &Solver, Value *V) { 1545 Constant *Const = nullptr; 1546 if (V->getType()->isStructTy()) { 1547 std::vector<LatticeVal> IVs = Solver.getStructLatticeValueFor(V); 1548 if (any_of(IVs, [](const LatticeVal &LV) { return isOverdefined(LV); })) 1549 return false; 1550 std::vector<Constant *> ConstVals; 1551 auto *ST = cast<StructType>(V->getType()); 1552 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { 1553 LatticeVal V = IVs[i]; 1554 ConstVals.push_back(isConstant(V) 1555 ? Solver.getConstant(V) 1556 : UndefValue::get(ST->getElementType(i))); 1557 } 1558 Const = ConstantStruct::get(ST, ConstVals); 1559 } else { 1560 const LatticeVal &IV = Solver.getLatticeValueFor(V); 1561 if (isOverdefined(IV)) 1562 return false; 1563 1564 Const = 1565 isConstant(IV) ? Solver.getConstant(IV) : UndefValue::get(V->getType()); 1566 } 1567 assert(Const && "Constant is nullptr here!"); 1568 1569 // Replacing `musttail` instructions with constant breaks `musttail` invariant 1570 // unless the call itself can be removed 1571 CallInst *CI = dyn_cast<CallInst>(V); 1572 if (CI && CI->isMustTailCall() && !CI->isSafeToRemove()) { 1573 CallSite CS(CI); 1574 Function *F = CS.getCalledFunction(); 1575 1576 // Don't zap returns of the callee 1577 if (F) 1578 Solver.AddMustTailCallee(F); 1579 1580 LLVM_DEBUG(dbgs() << " Can\'t treat the result of musttail call : " << *CI 1581 << " as a constant\n"); 1582 return false; 1583 } 1584 1585 LLVM_DEBUG(dbgs() << " Constant: " << *Const << " = " << *V << '\n'); 1586 1587 // Replaces all of the uses of a variable with uses of the constant. 1588 V->replaceAllUsesWith(Const); 1589 return true; 1590 } 1591 1592 // runSCCP() - Run the Sparse Conditional Constant Propagation algorithm, 1593 // and return true if the function was modified. 1594 static bool runSCCP(Function &F, const DataLayout &DL, 1595 const TargetLibraryInfo *TLI) { 1596 LLVM_DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n"); 1597 SCCPSolver Solver( 1598 DL, [TLI](Function &F) -> const TargetLibraryInfo & { return *TLI; }, 1599 F.getContext()); 1600 1601 // Mark the first block of the function as being executable. 1602 Solver.MarkBlockExecutable(&F.front()); 1603 1604 // Mark all arguments to the function as being overdefined. 1605 for (Argument &AI : F.args()) 1606 Solver.markOverdefined(&AI); 1607 1608 // Solve for constants. 1609 bool ResolvedUndefs = true; 1610 while (ResolvedUndefs) { 1611 Solver.Solve(); 1612 LLVM_DEBUG(dbgs() << "RESOLVING UNDEFs\n"); 1613 ResolvedUndefs = Solver.ResolvedUndefsIn(F); 1614 } 1615 1616 bool MadeChanges = false; 1617 1618 // If we decided that there are basic blocks that are dead in this function, 1619 // delete their contents now. Note that we cannot actually delete the blocks, 1620 // as we cannot modify the CFG of the function. 1621 1622 for (BasicBlock &BB : F) { 1623 if (!Solver.isBlockExecutable(&BB)) { 1624 LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << BB); 1625 1626 ++NumDeadBlocks; 1627 NumInstRemoved += removeAllNonTerminatorAndEHPadInstructions(&BB); 1628 1629 MadeChanges = true; 1630 continue; 1631 } 1632 1633 // Iterate over all of the instructions in a function, replacing them with 1634 // constants if we have found them to be of constant values. 1635 for (BasicBlock::iterator BI = BB.begin(), E = BB.end(); BI != E;) { 1636 Instruction *Inst = &*BI++; 1637 if (Inst->getType()->isVoidTy() || Inst->isTerminator()) 1638 continue; 1639 1640 if (tryToReplaceWithConstant(Solver, Inst)) { 1641 if (isInstructionTriviallyDead(Inst)) 1642 Inst->eraseFromParent(); 1643 // Hey, we just changed something! 1644 MadeChanges = true; 1645 ++NumInstRemoved; 1646 } 1647 } 1648 } 1649 1650 return MadeChanges; 1651 } 1652 1653 PreservedAnalyses SCCPPass::run(Function &F, FunctionAnalysisManager &AM) { 1654 const DataLayout &DL = F.getParent()->getDataLayout(); 1655 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1656 if (!runSCCP(F, DL, &TLI)) 1657 return PreservedAnalyses::all(); 1658 1659 auto PA = PreservedAnalyses(); 1660 PA.preserve<GlobalsAA>(); 1661 PA.preserveSet<CFGAnalyses>(); 1662 return PA; 1663 } 1664 1665 namespace { 1666 1667 //===--------------------------------------------------------------------===// 1668 // 1669 /// SCCP Class - This class uses the SCCPSolver to implement a per-function 1670 /// Sparse Conditional Constant Propagator. 1671 /// 1672 class SCCPLegacyPass : public FunctionPass { 1673 public: 1674 // Pass identification, replacement for typeid 1675 static char ID; 1676 1677 SCCPLegacyPass() : FunctionPass(ID) { 1678 initializeSCCPLegacyPassPass(*PassRegistry::getPassRegistry()); 1679 } 1680 1681 void getAnalysisUsage(AnalysisUsage &AU) const override { 1682 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1683 AU.addPreserved<GlobalsAAWrapperPass>(); 1684 AU.setPreservesCFG(); 1685 } 1686 1687 // runOnFunction - Run the Sparse Conditional Constant Propagation 1688 // algorithm, and return true if the function was modified. 1689 bool runOnFunction(Function &F) override { 1690 if (skipFunction(F)) 1691 return false; 1692 const DataLayout &DL = F.getParent()->getDataLayout(); 1693 const TargetLibraryInfo *TLI = 1694 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1695 return runSCCP(F, DL, TLI); 1696 } 1697 }; 1698 1699 } // end anonymous namespace 1700 1701 char SCCPLegacyPass::ID = 0; 1702 1703 INITIALIZE_PASS_BEGIN(SCCPLegacyPass, "sccp", 1704 "Sparse Conditional Constant Propagation", false, false) 1705 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1706 INITIALIZE_PASS_END(SCCPLegacyPass, "sccp", 1707 "Sparse Conditional Constant Propagation", false, false) 1708 1709 // createSCCPPass - This is the public interface to this file. 1710 FunctionPass *llvm::createSCCPPass() { return new SCCPLegacyPass(); } 1711 1712 static void findReturnsToZap(Function &F, 1713 SmallVector<ReturnInst *, 8> &ReturnsToZap, 1714 SCCPSolver &Solver) { 1715 // We can only do this if we know that nothing else can call the function. 1716 if (!Solver.isArgumentTrackedFunction(&F)) 1717 return; 1718 1719 // There is a non-removable musttail call site of this function. Zapping 1720 // returns is not allowed. 1721 if (Solver.isMustTailCallee(&F)) { 1722 LLVM_DEBUG(dbgs() << "Can't zap returns of the function : " << F.getName() 1723 << " due to present musttail call of it\n"); 1724 return; 1725 } 1726 1727 assert( 1728 all_of(F.users(), 1729 [&Solver](User *U) { 1730 if (isa<Instruction>(U) && 1731 !Solver.isBlockExecutable(cast<Instruction>(U)->getParent())) 1732 return true; 1733 // Non-callsite uses are not impacted by zapping. Also, constant 1734 // uses (like blockaddresses) could stuck around, without being 1735 // used in the underlying IR, meaning we do not have lattice 1736 // values for them. 1737 if (!CallSite(U)) 1738 return true; 1739 if (U->getType()->isStructTy()) { 1740 return all_of( 1741 Solver.getStructLatticeValueFor(U), 1742 [](const LatticeVal &LV) { return !isOverdefined(LV); }); 1743 } 1744 return !isOverdefined(Solver.getLatticeValueFor(U)); 1745 }) && 1746 "We can only zap functions where all live users have a concrete value"); 1747 1748 for (BasicBlock &BB : F) { 1749 if (CallInst *CI = BB.getTerminatingMustTailCall()) { 1750 LLVM_DEBUG(dbgs() << "Can't zap return of the block due to present " 1751 << "musttail call : " << *CI << "\n"); 1752 (void)CI; 1753 return; 1754 } 1755 1756 if (auto *RI = dyn_cast<ReturnInst>(BB.getTerminator())) 1757 if (!isa<UndefValue>(RI->getOperand(0))) 1758 ReturnsToZap.push_back(RI); 1759 } 1760 } 1761 1762 // Update the condition for terminators that are branching on indeterminate 1763 // values, forcing them to use a specific edge. 1764 static void forceIndeterminateEdge(Instruction* I, SCCPSolver &Solver) { 1765 BasicBlock *Dest = nullptr; 1766 Constant *C = nullptr; 1767 if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) { 1768 if (!isa<ConstantInt>(SI->getCondition())) { 1769 // Indeterminate switch; use first case value. 1770 Dest = SI->case_begin()->getCaseSuccessor(); 1771 C = SI->case_begin()->getCaseValue(); 1772 } 1773 } else if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 1774 if (!isa<ConstantInt>(BI->getCondition())) { 1775 // Indeterminate branch; use false. 1776 Dest = BI->getSuccessor(1); 1777 C = ConstantInt::getFalse(BI->getContext()); 1778 } 1779 } else if (IndirectBrInst *IBR = dyn_cast<IndirectBrInst>(I)) { 1780 if (!isa<BlockAddress>(IBR->getAddress()->stripPointerCasts())) { 1781 // Indeterminate indirectbr; use successor 0. 1782 Dest = IBR->getSuccessor(0); 1783 C = BlockAddress::get(IBR->getSuccessor(0)); 1784 } 1785 } else { 1786 llvm_unreachable("Unexpected terminator instruction"); 1787 } 1788 if (C) { 1789 assert(Solver.isEdgeFeasible(I->getParent(), Dest) && 1790 "Didn't find feasible edge?"); 1791 (void)Dest; 1792 1793 I->setOperand(0, C); 1794 } 1795 } 1796 1797 bool llvm::runIPSCCP( 1798 Module &M, const DataLayout &DL, 1799 std::function<const TargetLibraryInfo &(Function &)> GetTLI, 1800 function_ref<AnalysisResultsForFn(Function &)> getAnalysis) { 1801 SCCPSolver Solver(DL, GetTLI, M.getContext()); 1802 1803 // Loop over all functions, marking arguments to those with their addresses 1804 // taken or that are external as overdefined. 1805 for (Function &F : M) { 1806 if (F.isDeclaration()) 1807 continue; 1808 1809 Solver.addAnalysis(F, getAnalysis(F)); 1810 1811 // Determine if we can track the function's return values. If so, add the 1812 // function to the solver's set of return-tracked functions. 1813 if (canTrackReturnsInterprocedurally(&F)) 1814 Solver.AddTrackedFunction(&F); 1815 1816 // Determine if we can track the function's arguments. If so, add the 1817 // function to the solver's set of argument-tracked functions. 1818 if (canTrackArgumentsInterprocedurally(&F)) { 1819 Solver.AddArgumentTrackedFunction(&F); 1820 continue; 1821 } 1822 1823 // Assume the function is called. 1824 Solver.MarkBlockExecutable(&F.front()); 1825 1826 // Assume nothing about the incoming arguments. 1827 for (Argument &AI : F.args()) 1828 Solver.markOverdefined(&AI); 1829 } 1830 1831 // Determine if we can track any of the module's global variables. If so, add 1832 // the global variables we can track to the solver's set of tracked global 1833 // variables. 1834 for (GlobalVariable &G : M.globals()) { 1835 G.removeDeadConstantUsers(); 1836 if (canTrackGlobalVariableInterprocedurally(&G)) 1837 Solver.TrackValueOfGlobalVariable(&G); 1838 } 1839 1840 // Solve for constants. 1841 bool ResolvedUndefs = true; 1842 Solver.Solve(); 1843 while (ResolvedUndefs) { 1844 LLVM_DEBUG(dbgs() << "RESOLVING UNDEFS\n"); 1845 ResolvedUndefs = false; 1846 for (Function &F : M) 1847 if (Solver.ResolvedUndefsIn(F)) { 1848 // We run Solve() after we resolved an undef in a function, because 1849 // we might deduce a fact that eliminates an undef in another function. 1850 Solver.Solve(); 1851 ResolvedUndefs = true; 1852 } 1853 } 1854 1855 bool MadeChanges = false; 1856 1857 // Iterate over all of the instructions in the module, replacing them with 1858 // constants if we have found them to be of constant values. 1859 1860 for (Function &F : M) { 1861 if (F.isDeclaration()) 1862 continue; 1863 1864 SmallVector<BasicBlock *, 512> BlocksToErase; 1865 1866 if (Solver.isBlockExecutable(&F.front())) 1867 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end(); AI != E; 1868 ++AI) { 1869 if (!AI->use_empty() && tryToReplaceWithConstant(Solver, &*AI)) { 1870 ++IPNumArgsElimed; 1871 continue; 1872 } 1873 } 1874 1875 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { 1876 if (!Solver.isBlockExecutable(&*BB)) { 1877 LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << *BB); 1878 ++NumDeadBlocks; 1879 1880 MadeChanges = true; 1881 1882 if (&*BB != &F.front()) 1883 BlocksToErase.push_back(&*BB); 1884 continue; 1885 } 1886 1887 for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) { 1888 Instruction *Inst = &*BI++; 1889 if (Inst->getType()->isVoidTy()) 1890 continue; 1891 if (tryToReplaceWithConstant(Solver, Inst)) { 1892 if (Inst->isSafeToRemove()) 1893 Inst->eraseFromParent(); 1894 // Hey, we just changed something! 1895 MadeChanges = true; 1896 ++IPNumInstRemoved; 1897 } 1898 } 1899 } 1900 1901 DomTreeUpdater DTU = Solver.getDTU(F); 1902 // Change dead blocks to unreachable. We do it after replacing constants 1903 // in all executable blocks, because changeToUnreachable may remove PHI 1904 // nodes in executable blocks we found values for. The function's entry 1905 // block is not part of BlocksToErase, so we have to handle it separately. 1906 for (BasicBlock *BB : BlocksToErase) { 1907 NumInstRemoved += 1908 changeToUnreachable(BB->getFirstNonPHI(), /*UseLLVMTrap=*/false, 1909 /*PreserveLCSSA=*/false, &DTU); 1910 } 1911 if (!Solver.isBlockExecutable(&F.front())) 1912 NumInstRemoved += changeToUnreachable(F.front().getFirstNonPHI(), 1913 /*UseLLVMTrap=*/false, 1914 /*PreserveLCSSA=*/false, &DTU); 1915 1916 // Now that all instructions in the function are constant folded, 1917 // use ConstantFoldTerminator to get rid of in-edges, record DT updates and 1918 // delete dead BBs. 1919 for (BasicBlock *DeadBB : BlocksToErase) { 1920 // If there are any PHI nodes in this successor, drop entries for BB now. 1921 for (Value::user_iterator UI = DeadBB->user_begin(), 1922 UE = DeadBB->user_end(); 1923 UI != UE;) { 1924 // Grab the user and then increment the iterator early, as the user 1925 // will be deleted. Step past all adjacent uses from the same user. 1926 auto *I = dyn_cast<Instruction>(*UI); 1927 do { ++UI; } while (UI != UE && *UI == I); 1928 1929 // Ignore blockaddress users; BasicBlock's dtor will handle them. 1930 if (!I) continue; 1931 1932 // If we have forced an edge for an indeterminate value, then force the 1933 // terminator to fold to that edge. 1934 forceIndeterminateEdge(I, Solver); 1935 BasicBlock *InstBB = I->getParent(); 1936 bool Folded = ConstantFoldTerminator(InstBB, 1937 /*DeleteDeadConditions=*/false, 1938 /*TLI=*/nullptr, &DTU); 1939 assert(Folded && 1940 "Expect TermInst on constantint or blockaddress to be folded"); 1941 (void) Folded; 1942 // If we folded the terminator to an unconditional branch to another 1943 // dead block, replace it with Unreachable, to avoid trying to fold that 1944 // branch again. 1945 BranchInst *BI = cast<BranchInst>(InstBB->getTerminator()); 1946 if (BI && BI->isUnconditional() && 1947 !Solver.isBlockExecutable(BI->getSuccessor(0))) { 1948 InstBB->getTerminator()->eraseFromParent(); 1949 new UnreachableInst(InstBB->getContext(), InstBB); 1950 } 1951 } 1952 // Mark dead BB for deletion. 1953 DTU.deleteBB(DeadBB); 1954 } 1955 1956 for (BasicBlock &BB : F) { 1957 for (BasicBlock::iterator BI = BB.begin(), E = BB.end(); BI != E;) { 1958 Instruction *Inst = &*BI++; 1959 if (Solver.getPredicateInfoFor(Inst)) { 1960 if (auto *II = dyn_cast<IntrinsicInst>(Inst)) { 1961 if (II->getIntrinsicID() == Intrinsic::ssa_copy) { 1962 Value *Op = II->getOperand(0); 1963 Inst->replaceAllUsesWith(Op); 1964 Inst->eraseFromParent(); 1965 } 1966 } 1967 } 1968 } 1969 } 1970 } 1971 1972 // If we inferred constant or undef return values for a function, we replaced 1973 // all call uses with the inferred value. This means we don't need to bother 1974 // actually returning anything from the function. Replace all return 1975 // instructions with return undef. 1976 // 1977 // Do this in two stages: first identify the functions we should process, then 1978 // actually zap their returns. This is important because we can only do this 1979 // if the address of the function isn't taken. In cases where a return is the 1980 // last use of a function, the order of processing functions would affect 1981 // whether other functions are optimizable. 1982 SmallVector<ReturnInst*, 8> ReturnsToZap; 1983 1984 const MapVector<Function*, LatticeVal> &RV = Solver.getTrackedRetVals(); 1985 for (const auto &I : RV) { 1986 Function *F = I.first; 1987 if (isOverdefined(I.second) || F->getReturnType()->isVoidTy()) 1988 continue; 1989 findReturnsToZap(*F, ReturnsToZap, Solver); 1990 } 1991 1992 for (auto F : Solver.getMRVFunctionsTracked()) { 1993 assert(F->getReturnType()->isStructTy() && 1994 "The return type should be a struct"); 1995 StructType *STy = cast<StructType>(F->getReturnType()); 1996 if (Solver.isStructLatticeConstant(F, STy)) 1997 findReturnsToZap(*F, ReturnsToZap, Solver); 1998 } 1999 2000 // Zap all returns which we've identified as zap to change. 2001 for (unsigned i = 0, e = ReturnsToZap.size(); i != e; ++i) { 2002 Function *F = ReturnsToZap[i]->getParent()->getParent(); 2003 ReturnsToZap[i]->setOperand(0, UndefValue::get(F->getReturnType())); 2004 } 2005 2006 // If we inferred constant or undef values for globals variables, we can 2007 // delete the global and any stores that remain to it. 2008 const DenseMap<GlobalVariable*, LatticeVal> &TG = Solver.getTrackedGlobals(); 2009 for (DenseMap<GlobalVariable*, LatticeVal>::const_iterator I = TG.begin(), 2010 E = TG.end(); I != E; ++I) { 2011 GlobalVariable *GV = I->first; 2012 if (isOverdefined(I->second)) 2013 continue; 2014 LLVM_DEBUG(dbgs() << "Found that GV '" << GV->getName() 2015 << "' is constant!\n"); 2016 while (!GV->use_empty()) { 2017 StoreInst *SI = cast<StoreInst>(GV->user_back()); 2018 SI->eraseFromParent(); 2019 } 2020 M.getGlobalList().erase(GV); 2021 ++IPNumGlobalConst; 2022 } 2023 2024 return MadeChanges; 2025 } 2026