1 //===- SCCP.cpp - Sparse Conditional Constant Propagation -----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements sparse conditional constant propagation and merging: 10 // 11 // Specifically, this: 12 // * Assumes values are constant unless proven otherwise 13 // * Assumes BasicBlocks are dead unless proven otherwise 14 // * Proves values to be constant, and replaces them with constants 15 // * Proves conditional branches to be unconditional 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Scalar/SCCP.h" 20 #include "llvm/ADT/ArrayRef.h" 21 #include "llvm/ADT/DenseMap.h" 22 #include "llvm/ADT/DenseSet.h" 23 #include "llvm/ADT/MapVector.h" 24 #include "llvm/ADT/PointerIntPair.h" 25 #include "llvm/ADT/STLExtras.h" 26 #include "llvm/ADT/SmallPtrSet.h" 27 #include "llvm/ADT/SmallVector.h" 28 #include "llvm/ADT/Statistic.h" 29 #include "llvm/Analysis/ConstantFolding.h" 30 #include "llvm/Analysis/GlobalsModRef.h" 31 #include "llvm/Analysis/InstructionSimplify.h" 32 #include "llvm/Analysis/TargetLibraryInfo.h" 33 #include "llvm/Analysis/ValueLattice.h" 34 #include "llvm/Analysis/ValueLatticeUtils.h" 35 #include "llvm/IR/BasicBlock.h" 36 #include "llvm/IR/Constant.h" 37 #include "llvm/IR/Constants.h" 38 #include "llvm/IR/DataLayout.h" 39 #include "llvm/IR/DerivedTypes.h" 40 #include "llvm/IR/Function.h" 41 #include "llvm/IR/GlobalVariable.h" 42 #include "llvm/IR/InstVisitor.h" 43 #include "llvm/IR/InstrTypes.h" 44 #include "llvm/IR/Instruction.h" 45 #include "llvm/IR/Instructions.h" 46 #include "llvm/IR/Module.h" 47 #include "llvm/IR/PassManager.h" 48 #include "llvm/IR/Type.h" 49 #include "llvm/IR/User.h" 50 #include "llvm/IR/Value.h" 51 #include "llvm/InitializePasses.h" 52 #include "llvm/Pass.h" 53 #include "llvm/Support/Casting.h" 54 #include "llvm/Support/Debug.h" 55 #include "llvm/Support/ErrorHandling.h" 56 #include "llvm/Support/raw_ostream.h" 57 #include "llvm/Transforms/Scalar.h" 58 #include "llvm/Transforms/Utils/Local.h" 59 #include "llvm/Transforms/Utils/PredicateInfo.h" 60 #include <cassert> 61 #include <utility> 62 #include <vector> 63 64 using namespace llvm; 65 66 #define DEBUG_TYPE "sccp" 67 68 STATISTIC(NumInstRemoved, "Number of instructions removed"); 69 STATISTIC(NumDeadBlocks , "Number of basic blocks unreachable"); 70 71 STATISTIC(IPNumInstRemoved, "Number of instructions removed by IPSCCP"); 72 STATISTIC(IPNumArgsElimed ,"Number of arguments constant propagated by IPSCCP"); 73 STATISTIC(IPNumGlobalConst, "Number of globals found to be constant by IPSCCP"); 74 75 namespace { 76 77 // Helper to check if \p LV is either a constant or a constant 78 // range with a single element. This should cover exactly the same cases as the 79 // old ValueLatticeElement::isConstant() and is intended to be used in the 80 // transition to ValueLatticeElement. 81 bool isConstant(const ValueLatticeElement &LV) { 82 return LV.isConstant() || 83 (LV.isConstantRange() && LV.getConstantRange().isSingleElement()); 84 } 85 86 // Helper to check if \p LV is either overdefined or a constant range with more 87 // than a single element. This should cover exactly the same cases as the old 88 // ValueLatticeElement::isOverdefined() and is intended to be used in the 89 // transition to ValueLatticeElement. 90 bool isOverdefined(const ValueLatticeElement &LV) { 91 return LV.isOverdefined() || 92 (LV.isConstantRange() && !LV.getConstantRange().isSingleElement()); 93 } 94 95 //===----------------------------------------------------------------------===// 96 // 97 /// SCCPSolver - This class is a general purpose solver for Sparse Conditional 98 /// Constant Propagation. 99 /// 100 class SCCPSolver : public InstVisitor<SCCPSolver> { 101 const DataLayout &DL; 102 std::function<const TargetLibraryInfo &(Function &)> GetTLI; 103 SmallPtrSet<BasicBlock *, 8> BBExecutable; // The BBs that are executable. 104 DenseMap<Value *, ValueLatticeElement> 105 ValueState; // The state each value is in. 106 107 /// StructValueState - This maintains ValueState for values that have 108 /// StructType, for example for formal arguments, calls, insertelement, etc. 109 DenseMap<std::pair<Value *, unsigned>, ValueLatticeElement> StructValueState; 110 111 /// GlobalValue - If we are tracking any values for the contents of a global 112 /// variable, we keep a mapping from the constant accessor to the element of 113 /// the global, to the currently known value. If the value becomes 114 /// overdefined, it's entry is simply removed from this map. 115 DenseMap<GlobalVariable *, ValueLatticeElement> TrackedGlobals; 116 117 /// TrackedRetVals - If we are tracking arguments into and the return 118 /// value out of a function, it will have an entry in this map, indicating 119 /// what the known return value for the function is. 120 MapVector<Function *, ValueLatticeElement> TrackedRetVals; 121 122 /// TrackedMultipleRetVals - Same as TrackedRetVals, but used for functions 123 /// that return multiple values. 124 MapVector<std::pair<Function *, unsigned>, ValueLatticeElement> 125 TrackedMultipleRetVals; 126 127 /// MRVFunctionsTracked - Each function in TrackedMultipleRetVals is 128 /// represented here for efficient lookup. 129 SmallPtrSet<Function *, 16> MRVFunctionsTracked; 130 131 /// MustTailFunctions - Each function here is a callee of non-removable 132 /// musttail call site. 133 SmallPtrSet<Function *, 16> MustTailCallees; 134 135 /// TrackingIncomingArguments - This is the set of functions for whose 136 /// arguments we make optimistic assumptions about and try to prove as 137 /// constants. 138 SmallPtrSet<Function *, 16> TrackingIncomingArguments; 139 140 /// The reason for two worklists is that overdefined is the lowest state 141 /// on the lattice, and moving things to overdefined as fast as possible 142 /// makes SCCP converge much faster. 143 /// 144 /// By having a separate worklist, we accomplish this because everything 145 /// possibly overdefined will become overdefined at the soonest possible 146 /// point. 147 SmallVector<Value *, 64> OverdefinedInstWorkList; 148 SmallVector<Value *, 64> InstWorkList; 149 150 // The BasicBlock work list 151 SmallVector<BasicBlock *, 64> BBWorkList; 152 153 /// KnownFeasibleEdges - Entries in this set are edges which have already had 154 /// PHI nodes retriggered. 155 using Edge = std::pair<BasicBlock *, BasicBlock *>; 156 DenseSet<Edge> KnownFeasibleEdges; 157 158 DenseMap<Function *, AnalysisResultsForFn> AnalysisResults; 159 DenseMap<Value *, SmallPtrSet<User *, 2>> AdditionalUsers; 160 161 LLVMContext &Ctx; 162 163 public: 164 void addAnalysis(Function &F, AnalysisResultsForFn A) { 165 AnalysisResults.insert({&F, std::move(A)}); 166 } 167 168 const PredicateBase *getPredicateInfoFor(Instruction *I) { 169 auto A = AnalysisResults.find(I->getParent()->getParent()); 170 if (A == AnalysisResults.end()) 171 return nullptr; 172 return A->second.PredInfo->getPredicateInfoFor(I); 173 } 174 175 DomTreeUpdater getDTU(Function &F) { 176 auto A = AnalysisResults.find(&F); 177 assert(A != AnalysisResults.end() && "Need analysis results for function."); 178 return {A->second.DT, A->second.PDT, DomTreeUpdater::UpdateStrategy::Lazy}; 179 } 180 181 SCCPSolver(const DataLayout &DL, 182 std::function<const TargetLibraryInfo &(Function &)> GetTLI, 183 LLVMContext &Ctx) 184 : DL(DL), GetTLI(std::move(GetTLI)), Ctx(Ctx) {} 185 186 /// MarkBlockExecutable - This method can be used by clients to mark all of 187 /// the blocks that are known to be intrinsically live in the processed unit. 188 /// 189 /// This returns true if the block was not considered live before. 190 bool MarkBlockExecutable(BasicBlock *BB) { 191 if (!BBExecutable.insert(BB).second) 192 return false; 193 LLVM_DEBUG(dbgs() << "Marking Block Executable: " << BB->getName() << '\n'); 194 BBWorkList.push_back(BB); // Add the block to the work list! 195 return true; 196 } 197 198 /// TrackValueOfGlobalVariable - Clients can use this method to 199 /// inform the SCCPSolver that it should track loads and stores to the 200 /// specified global variable if it can. This is only legal to call if 201 /// performing Interprocedural SCCP. 202 void TrackValueOfGlobalVariable(GlobalVariable *GV) { 203 // We only track the contents of scalar globals. 204 if (GV->getValueType()->isSingleValueType()) { 205 ValueLatticeElement &IV = TrackedGlobals[GV]; 206 if (!isa<UndefValue>(GV->getInitializer())) 207 IV.markConstant(GV->getInitializer()); 208 } 209 } 210 211 /// AddTrackedFunction - If the SCCP solver is supposed to track calls into 212 /// and out of the specified function (which cannot have its address taken), 213 /// this method must be called. 214 void AddTrackedFunction(Function *F) { 215 // Add an entry, F -> undef. 216 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) { 217 MRVFunctionsTracked.insert(F); 218 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 219 TrackedMultipleRetVals.insert( 220 std::make_pair(std::make_pair(F, i), ValueLatticeElement())); 221 } else 222 TrackedRetVals.insert(std::make_pair(F, ValueLatticeElement())); 223 } 224 225 /// AddMustTailCallee - If the SCCP solver finds that this function is called 226 /// from non-removable musttail call site. 227 void AddMustTailCallee(Function *F) { 228 MustTailCallees.insert(F); 229 } 230 231 /// Returns true if the given function is called from non-removable musttail 232 /// call site. 233 bool isMustTailCallee(Function *F) { 234 return MustTailCallees.count(F); 235 } 236 237 void AddArgumentTrackedFunction(Function *F) { 238 TrackingIncomingArguments.insert(F); 239 } 240 241 /// Returns true if the given function is in the solver's set of 242 /// argument-tracked functions. 243 bool isArgumentTrackedFunction(Function *F) { 244 return TrackingIncomingArguments.count(F); 245 } 246 247 /// Solve - Solve for constants and executable blocks. 248 void Solve(); 249 250 /// ResolvedUndefsIn - While solving the dataflow for a function, we assume 251 /// that branches on undef values cannot reach any of their successors. 252 /// However, this is not a safe assumption. After we solve dataflow, this 253 /// method should be use to handle this. If this returns true, the solver 254 /// should be rerun. 255 bool ResolvedUndefsIn(Function &F); 256 257 bool isBlockExecutable(BasicBlock *BB) const { 258 return BBExecutable.count(BB); 259 } 260 261 // isEdgeFeasible - Return true if the control flow edge from the 'From' basic 262 // block to the 'To' basic block is currently feasible. 263 bool isEdgeFeasible(BasicBlock *From, BasicBlock *To); 264 265 std::vector<ValueLatticeElement> getStructLatticeValueFor(Value *V) const { 266 std::vector<ValueLatticeElement> StructValues; 267 auto *STy = dyn_cast<StructType>(V->getType()); 268 assert(STy && "getStructLatticeValueFor() can be called only on structs"); 269 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 270 auto I = StructValueState.find(std::make_pair(V, i)); 271 assert(I != StructValueState.end() && "Value not in valuemap!"); 272 StructValues.push_back(I->second); 273 } 274 return StructValues; 275 } 276 277 const ValueLatticeElement &getLatticeValueFor(Value *V) const { 278 assert(!V->getType()->isStructTy() && 279 "Should use getStructLatticeValueFor"); 280 DenseMap<Value *, ValueLatticeElement>::const_iterator I = 281 ValueState.find(V); 282 assert(I != ValueState.end() && 283 "V not found in ValueState nor Paramstate map!"); 284 return I->second; 285 } 286 287 /// getTrackedRetVals - Get the inferred return value map. 288 const MapVector<Function *, ValueLatticeElement> &getTrackedRetVals() { 289 return TrackedRetVals; 290 } 291 292 /// getTrackedGlobals - Get and return the set of inferred initializers for 293 /// global variables. 294 const DenseMap<GlobalVariable *, ValueLatticeElement> &getTrackedGlobals() { 295 return TrackedGlobals; 296 } 297 298 /// getMRVFunctionsTracked - Get the set of functions which return multiple 299 /// values tracked by the pass. 300 const SmallPtrSet<Function *, 16> getMRVFunctionsTracked() { 301 return MRVFunctionsTracked; 302 } 303 304 /// getMustTailCallees - Get the set of functions which are called 305 /// from non-removable musttail call sites. 306 const SmallPtrSet<Function *, 16> getMustTailCallees() { 307 return MustTailCallees; 308 } 309 310 /// markOverdefined - Mark the specified value overdefined. This 311 /// works with both scalars and structs. 312 void markOverdefined(Value *V) { 313 if (auto *STy = dyn_cast<StructType>(V->getType())) 314 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 315 markOverdefined(getStructValueState(V, i), V); 316 else 317 markOverdefined(ValueState[V], V); 318 } 319 320 // isStructLatticeConstant - Return true if all the lattice values 321 // corresponding to elements of the structure are constants, 322 // false otherwise. 323 bool isStructLatticeConstant(Function *F, StructType *STy) { 324 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 325 const auto &It = TrackedMultipleRetVals.find(std::make_pair(F, i)); 326 assert(It != TrackedMultipleRetVals.end()); 327 ValueLatticeElement LV = It->second; 328 if (!isConstant(LV)) 329 return false; 330 } 331 return true; 332 } 333 334 /// Helper to return a Constant if \p LV is either a constant or a constant 335 /// range with a single element. 336 Constant *getConstant(const ValueLatticeElement &LV) const { 337 if (LV.isConstant()) 338 return LV.getConstant(); 339 340 if (LV.isConstantRange()) { 341 auto &CR = LV.getConstantRange(); 342 if (CR.getSingleElement()) 343 return ConstantInt::get(Ctx, *CR.getSingleElement()); 344 } 345 return nullptr; 346 } 347 348 private: 349 ConstantInt *getConstantInt(const ValueLatticeElement &IV) const { 350 return dyn_cast_or_null<ConstantInt>(getConstant(IV)); 351 } 352 353 // pushToWorkList - Helper for markConstant/markOverdefined 354 void pushToWorkList(ValueLatticeElement &IV, Value *V) { 355 if (IV.isOverdefined()) 356 return OverdefinedInstWorkList.push_back(V); 357 InstWorkList.push_back(V); 358 } 359 360 // Helper to push \p V to the worklist, after updating it to \p IV. Also 361 // prints a debug message with the updated value. 362 void pushToWorkListMsg(ValueLatticeElement &IV, Value *V) { 363 LLVM_DEBUG(dbgs() << "updated " << IV << ": " << *V << '\n'); 364 pushToWorkList(IV, V); 365 } 366 367 // markConstant - Make a value be marked as "constant". If the value 368 // is not already a constant, add it to the instruction work list so that 369 // the users of the instruction are updated later. 370 bool markConstant(ValueLatticeElement &IV, Value *V, Constant *C, 371 bool MayIncludeUndef = false) { 372 if (!IV.markConstant(C, MayIncludeUndef)) 373 return false; 374 LLVM_DEBUG(dbgs() << "markConstant: " << *C << ": " << *V << '\n'); 375 pushToWorkList(IV, V); 376 return true; 377 } 378 379 bool markConstant(Value *V, Constant *C) { 380 assert(!V->getType()->isStructTy() && "structs should use mergeInValue"); 381 return markConstant(ValueState[V], V, C); 382 } 383 384 // markOverdefined - Make a value be marked as "overdefined". If the 385 // value is not already overdefined, add it to the overdefined instruction 386 // work list so that the users of the instruction are updated later. 387 bool markOverdefined(ValueLatticeElement &IV, Value *V) { 388 if (!IV.markOverdefined()) return false; 389 390 LLVM_DEBUG(dbgs() << "markOverdefined: "; 391 if (auto *F = dyn_cast<Function>(V)) dbgs() 392 << "Function '" << F->getName() << "'\n"; 393 else dbgs() << *V << '\n'); 394 // Only instructions go on the work list 395 pushToWorkList(IV, V); 396 return true; 397 } 398 399 /// Merge \p MergeWithV into \p IV and push \p V to the worklist, if \p IV 400 /// changes. 401 bool mergeInValue(ValueLatticeElement &IV, Value *V, 402 ValueLatticeElement MergeWithV, 403 ValueLatticeElement::MergeOptions Opts = { 404 /*MayIncludeUndef=*/false, /*CheckWiden=*/true}) { 405 if (IV.mergeIn(MergeWithV, Opts)) { 406 pushToWorkList(IV, V); 407 LLVM_DEBUG(dbgs() << "Merged " << MergeWithV << " into " << *V << " : " 408 << IV << "\n"); 409 return true; 410 } 411 return false; 412 } 413 414 bool mergeInValue(Value *V, ValueLatticeElement MergeWithV, 415 ValueLatticeElement::MergeOptions Opts = { 416 /*MayIncludeUndef=*/false, /*CheckWiden=*/true}) { 417 assert(!V->getType()->isStructTy() && 418 "non-structs should use markConstant"); 419 return mergeInValue(ValueState[V], V, MergeWithV, Opts); 420 } 421 422 /// getValueState - Return the ValueLatticeElement object that corresponds to 423 /// the value. This function handles the case when the value hasn't been seen 424 /// yet by properly seeding constants etc. 425 ValueLatticeElement &getValueState(Value *V) { 426 assert(!V->getType()->isStructTy() && "Should use getStructValueState"); 427 428 auto I = ValueState.insert(std::make_pair(V, ValueLatticeElement())); 429 ValueLatticeElement &LV = I.first->second; 430 431 if (!I.second) 432 return LV; // Common case, already in the map. 433 434 if (auto *C = dyn_cast<Constant>(V)) 435 LV.markConstant(C); // Constants are constant 436 437 // All others are unknown by default. 438 return LV; 439 } 440 441 /// getStructValueState - Return the ValueLatticeElement object that 442 /// corresponds to the value/field pair. This function handles the case when 443 /// the value hasn't been seen yet by properly seeding constants etc. 444 ValueLatticeElement &getStructValueState(Value *V, unsigned i) { 445 assert(V->getType()->isStructTy() && "Should use getValueState"); 446 assert(i < cast<StructType>(V->getType())->getNumElements() && 447 "Invalid element #"); 448 449 auto I = StructValueState.insert( 450 std::make_pair(std::make_pair(V, i), ValueLatticeElement())); 451 ValueLatticeElement &LV = I.first->second; 452 453 if (!I.second) 454 return LV; // Common case, already in the map. 455 456 if (auto *C = dyn_cast<Constant>(V)) { 457 Constant *Elt = C->getAggregateElement(i); 458 459 if (!Elt) 460 LV.markOverdefined(); // Unknown sort of constant. 461 else if (isa<UndefValue>(Elt)) 462 ; // Undef values remain unknown. 463 else 464 LV.markConstant(Elt); // Constants are constant. 465 } 466 467 // All others are underdefined by default. 468 return LV; 469 } 470 471 /// markEdgeExecutable - Mark a basic block as executable, adding it to the BB 472 /// work list if it is not already executable. 473 bool markEdgeExecutable(BasicBlock *Source, BasicBlock *Dest) { 474 if (!KnownFeasibleEdges.insert(Edge(Source, Dest)).second) 475 return false; // This edge is already known to be executable! 476 477 if (!MarkBlockExecutable(Dest)) { 478 // If the destination is already executable, we just made an *edge* 479 // feasible that wasn't before. Revisit the PHI nodes in the block 480 // because they have potentially new operands. 481 LLVM_DEBUG(dbgs() << "Marking Edge Executable: " << Source->getName() 482 << " -> " << Dest->getName() << '\n'); 483 484 for (PHINode &PN : Dest->phis()) 485 visitPHINode(PN); 486 } 487 return true; 488 } 489 490 // getFeasibleSuccessors - Return a vector of booleans to indicate which 491 // successors are reachable from a given terminator instruction. 492 void getFeasibleSuccessors(Instruction &TI, SmallVectorImpl<bool> &Succs); 493 494 // OperandChangedState - This method is invoked on all of the users of an 495 // instruction that was just changed state somehow. Based on this 496 // information, we need to update the specified user of this instruction. 497 void OperandChangedState(Instruction *I) { 498 if (BBExecutable.count(I->getParent())) // Inst is executable? 499 visit(*I); 500 } 501 502 // Add U as additional user of V. 503 void addAdditionalUser(Value *V, User *U) { 504 auto Iter = AdditionalUsers.insert({V, {}}); 505 Iter.first->second.insert(U); 506 } 507 508 // Mark I's users as changed, including AdditionalUsers. 509 void markUsersAsChanged(Value *I) { 510 // Functions include their arguments in the use-list. Changed function 511 // values mean that the result of the function changed. We only need to 512 // update the call sites with the new function result and do not have to 513 // propagate the call arguments. 514 if (isa<Function>(I)) { 515 for (User *U : I->users()) { 516 if (auto *CB = dyn_cast<CallBase>(U)) 517 handleCallResult(*CB); 518 } 519 } else { 520 for (User *U : I->users()) 521 if (auto *UI = dyn_cast<Instruction>(U)) 522 OperandChangedState(UI); 523 } 524 525 auto Iter = AdditionalUsers.find(I); 526 if (Iter != AdditionalUsers.end()) { 527 for (User *U : Iter->second) 528 if (auto *UI = dyn_cast<Instruction>(U)) 529 OperandChangedState(UI); 530 } 531 } 532 void handleCallOverdefined(CallBase &CB); 533 void handleCallResult(CallBase &CB); 534 void handleCallArguments(CallBase &CB); 535 536 private: 537 friend class InstVisitor<SCCPSolver>; 538 539 // visit implementations - Something changed in this instruction. Either an 540 // operand made a transition, or the instruction is newly executable. Change 541 // the value type of I to reflect these changes if appropriate. 542 void visitPHINode(PHINode &I); 543 544 // Terminators 545 546 void visitReturnInst(ReturnInst &I); 547 void visitTerminator(Instruction &TI); 548 549 void visitCastInst(CastInst &I); 550 void visitSelectInst(SelectInst &I); 551 void visitUnaryOperator(Instruction &I); 552 void visitBinaryOperator(Instruction &I); 553 void visitCmpInst(CmpInst &I); 554 void visitExtractValueInst(ExtractValueInst &EVI); 555 void visitInsertValueInst(InsertValueInst &IVI); 556 557 void visitCatchSwitchInst(CatchSwitchInst &CPI) { 558 markOverdefined(&CPI); 559 visitTerminator(CPI); 560 } 561 562 // Instructions that cannot be folded away. 563 564 void visitStoreInst (StoreInst &I); 565 void visitLoadInst (LoadInst &I); 566 void visitGetElementPtrInst(GetElementPtrInst &I); 567 568 void visitCallInst (CallInst &I) { 569 visitCallBase(I); 570 } 571 572 void visitInvokeInst (InvokeInst &II) { 573 visitCallBase(II); 574 visitTerminator(II); 575 } 576 577 void visitCallBrInst (CallBrInst &CBI) { 578 visitCallBase(CBI); 579 visitTerminator(CBI); 580 } 581 582 void visitCallBase (CallBase &CB); 583 void visitResumeInst (ResumeInst &I) { /*returns void*/ } 584 void visitUnreachableInst(UnreachableInst &I) { /*returns void*/ } 585 void visitFenceInst (FenceInst &I) { /*returns void*/ } 586 587 void visitInstruction(Instruction &I) { 588 // All the instructions we don't do any special handling for just 589 // go to overdefined. 590 LLVM_DEBUG(dbgs() << "SCCP: Don't know how to handle: " << I << '\n'); 591 markOverdefined(&I); 592 } 593 }; 594 595 } // end anonymous namespace 596 597 // getFeasibleSuccessors - Return a vector of booleans to indicate which 598 // successors are reachable from a given terminator instruction. 599 void SCCPSolver::getFeasibleSuccessors(Instruction &TI, 600 SmallVectorImpl<bool> &Succs) { 601 Succs.resize(TI.getNumSuccessors()); 602 if (auto *BI = dyn_cast<BranchInst>(&TI)) { 603 if (BI->isUnconditional()) { 604 Succs[0] = true; 605 return; 606 } 607 608 ValueLatticeElement BCValue = getValueState(BI->getCondition()); 609 ConstantInt *CI = getConstantInt(BCValue); 610 if (!CI) { 611 // Overdefined condition variables, and branches on unfoldable constant 612 // conditions, mean the branch could go either way. 613 if (!BCValue.isUnknownOrUndef()) 614 Succs[0] = Succs[1] = true; 615 return; 616 } 617 618 // Constant condition variables mean the branch can only go a single way. 619 Succs[CI->isZero()] = true; 620 return; 621 } 622 623 // Unwinding instructions successors are always executable. 624 if (TI.isExceptionalTerminator()) { 625 Succs.assign(TI.getNumSuccessors(), true); 626 return; 627 } 628 629 if (auto *SI = dyn_cast<SwitchInst>(&TI)) { 630 if (!SI->getNumCases()) { 631 Succs[0] = true; 632 return; 633 } 634 ValueLatticeElement SCValue = getValueState(SI->getCondition()); 635 ConstantInt *CI = getConstantInt(SCValue); 636 637 if (!CI) { // Overdefined or unknown condition? 638 // All destinations are executable! 639 if (!SCValue.isUnknownOrUndef()) 640 Succs.assign(TI.getNumSuccessors(), true); 641 return; 642 } 643 644 Succs[SI->findCaseValue(CI)->getSuccessorIndex()] = true; 645 return; 646 } 647 648 // In case of indirect branch and its address is a blockaddress, we mark 649 // the target as executable. 650 if (auto *IBR = dyn_cast<IndirectBrInst>(&TI)) { 651 // Casts are folded by visitCastInst. 652 ValueLatticeElement IBRValue = getValueState(IBR->getAddress()); 653 BlockAddress *Addr = dyn_cast_or_null<BlockAddress>(getConstant(IBRValue)); 654 if (!Addr) { // Overdefined or unknown condition? 655 // All destinations are executable! 656 if (!IBRValue.isUnknownOrUndef()) 657 Succs.assign(TI.getNumSuccessors(), true); 658 return; 659 } 660 661 BasicBlock* T = Addr->getBasicBlock(); 662 assert(Addr->getFunction() == T->getParent() && 663 "Block address of a different function ?"); 664 for (unsigned i = 0; i < IBR->getNumSuccessors(); ++i) { 665 // This is the target. 666 if (IBR->getDestination(i) == T) { 667 Succs[i] = true; 668 return; 669 } 670 } 671 672 // If we didn't find our destination in the IBR successor list, then we 673 // have undefined behavior. Its ok to assume no successor is executable. 674 return; 675 } 676 677 // In case of callbr, we pessimistically assume that all successors are 678 // feasible. 679 if (isa<CallBrInst>(&TI)) { 680 Succs.assign(TI.getNumSuccessors(), true); 681 return; 682 } 683 684 LLVM_DEBUG(dbgs() << "Unknown terminator instruction: " << TI << '\n'); 685 llvm_unreachable("SCCP: Don't know how to handle this terminator!"); 686 } 687 688 // isEdgeFeasible - Return true if the control flow edge from the 'From' basic 689 // block to the 'To' basic block is currently feasible. 690 bool SCCPSolver::isEdgeFeasible(BasicBlock *From, BasicBlock *To) { 691 // Check if we've called markEdgeExecutable on the edge yet. (We could 692 // be more aggressive and try to consider edges which haven't been marked 693 // yet, but there isn't any need.) 694 return KnownFeasibleEdges.count(Edge(From, To)); 695 } 696 697 // visit Implementations - Something changed in this instruction, either an 698 // operand made a transition, or the instruction is newly executable. Change 699 // the value type of I to reflect these changes if appropriate. This method 700 // makes sure to do the following actions: 701 // 702 // 1. If a phi node merges two constants in, and has conflicting value coming 703 // from different branches, or if the PHI node merges in an overdefined 704 // value, then the PHI node becomes overdefined. 705 // 2. If a phi node merges only constants in, and they all agree on value, the 706 // PHI node becomes a constant value equal to that. 707 // 3. If V <- x (op) y && isConstant(x) && isConstant(y) V = Constant 708 // 4. If V <- x (op) y && (isOverdefined(x) || isOverdefined(y)) V = Overdefined 709 // 5. If V <- MEM or V <- CALL or V <- (unknown) then V = Overdefined 710 // 6. If a conditional branch has a value that is constant, make the selected 711 // destination executable 712 // 7. If a conditional branch has a value that is overdefined, make all 713 // successors executable. 714 void SCCPSolver::visitPHINode(PHINode &PN) { 715 // If this PN returns a struct, just mark the result overdefined. 716 // TODO: We could do a lot better than this if code actually uses this. 717 if (PN.getType()->isStructTy()) 718 return (void)markOverdefined(&PN); 719 720 if (getValueState(&PN).isOverdefined()) 721 return; // Quick exit 722 723 // Super-extra-high-degree PHI nodes are unlikely to ever be marked constant, 724 // and slow us down a lot. Just mark them overdefined. 725 if (PN.getNumIncomingValues() > 64) 726 return (void)markOverdefined(&PN); 727 728 // Look at all of the executable operands of the PHI node. If any of them 729 // are overdefined, the PHI becomes overdefined as well. If they are all 730 // constant, and they agree with each other, the PHI becomes the identical 731 // constant. If they are constant and don't agree, the PHI is overdefined. 732 // If there are no executable operands, the PHI remains unknown. 733 bool Changed = false; 734 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) { 735 ValueLatticeElement IV = getValueState(PN.getIncomingValue(i)); 736 if (!isEdgeFeasible(PN.getIncomingBlock(i), PN.getParent())) 737 continue; 738 739 ValueLatticeElement &Res = getValueState(&PN); 740 Changed |= Res.mergeIn(IV); 741 if (Res.isOverdefined()) 742 break; 743 } 744 if (Changed) 745 pushToWorkListMsg(ValueState[&PN], &PN); 746 } 747 748 void SCCPSolver::visitReturnInst(ReturnInst &I) { 749 if (I.getNumOperands() == 0) return; // ret void 750 751 Function *F = I.getParent()->getParent(); 752 Value *ResultOp = I.getOperand(0); 753 754 // If we are tracking the return value of this function, merge it in. 755 if (!TrackedRetVals.empty() && !ResultOp->getType()->isStructTy()) { 756 auto TFRVI = TrackedRetVals.find(F); 757 if (TFRVI != TrackedRetVals.end()) { 758 mergeInValue(TFRVI->second, F, getValueState(ResultOp)); 759 return; 760 } 761 } 762 763 // Handle functions that return multiple values. 764 if (!TrackedMultipleRetVals.empty()) { 765 if (auto *STy = dyn_cast<StructType>(ResultOp->getType())) 766 if (MRVFunctionsTracked.count(F)) 767 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 768 mergeInValue(TrackedMultipleRetVals[std::make_pair(F, i)], F, 769 getStructValueState(ResultOp, i)); 770 } 771 } 772 773 void SCCPSolver::visitTerminator(Instruction &TI) { 774 SmallVector<bool, 16> SuccFeasible; 775 getFeasibleSuccessors(TI, SuccFeasible); 776 777 BasicBlock *BB = TI.getParent(); 778 779 // Mark all feasible successors executable. 780 for (unsigned i = 0, e = SuccFeasible.size(); i != e; ++i) 781 if (SuccFeasible[i]) 782 markEdgeExecutable(BB, TI.getSuccessor(i)); 783 } 784 785 void SCCPSolver::visitCastInst(CastInst &I) { 786 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would 787 // discover a concrete value later. 788 if (ValueState[&I].isOverdefined()) 789 return; 790 791 ValueLatticeElement OpSt = getValueState(I.getOperand(0)); 792 if (Constant *OpC = getConstant(OpSt)) { 793 // Fold the constant as we build. 794 Constant *C = ConstantFoldCastOperand(I.getOpcode(), OpC, I.getType(), DL); 795 if (isa<UndefValue>(C)) 796 return; 797 // Propagate constant value 798 markConstant(&I, C); 799 } else if (OpSt.isConstantRange() && I.getDestTy()->isIntegerTy()) { 800 auto &LV = getValueState(&I); 801 ConstantRange OpRange = OpSt.getConstantRange(); 802 Type *DestTy = I.getDestTy(); 803 ConstantRange Res = 804 OpRange.castOp(I.getOpcode(), DL.getTypeSizeInBits(DestTy)); 805 mergeInValue(LV, &I, ValueLatticeElement::getRange(Res)); 806 } else if (!OpSt.isUnknownOrUndef()) 807 markOverdefined(&I); 808 } 809 810 void SCCPSolver::visitExtractValueInst(ExtractValueInst &EVI) { 811 // If this returns a struct, mark all elements over defined, we don't track 812 // structs in structs. 813 if (EVI.getType()->isStructTy()) 814 return (void)markOverdefined(&EVI); 815 816 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would 817 // discover a concrete value later. 818 if (ValueState[&EVI].isOverdefined()) 819 return (void)markOverdefined(&EVI); 820 821 // If this is extracting from more than one level of struct, we don't know. 822 if (EVI.getNumIndices() != 1) 823 return (void)markOverdefined(&EVI); 824 825 Value *AggVal = EVI.getAggregateOperand(); 826 if (AggVal->getType()->isStructTy()) { 827 unsigned i = *EVI.idx_begin(); 828 ValueLatticeElement EltVal = getStructValueState(AggVal, i); 829 mergeInValue(getValueState(&EVI), &EVI, EltVal); 830 } else { 831 // Otherwise, must be extracting from an array. 832 return (void)markOverdefined(&EVI); 833 } 834 } 835 836 void SCCPSolver::visitInsertValueInst(InsertValueInst &IVI) { 837 auto *STy = dyn_cast<StructType>(IVI.getType()); 838 if (!STy) 839 return (void)markOverdefined(&IVI); 840 841 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would 842 // discover a concrete value later. 843 if (isOverdefined(ValueState[&IVI])) 844 return (void)markOverdefined(&IVI); 845 846 // If this has more than one index, we can't handle it, drive all results to 847 // undef. 848 if (IVI.getNumIndices() != 1) 849 return (void)markOverdefined(&IVI); 850 851 Value *Aggr = IVI.getAggregateOperand(); 852 unsigned Idx = *IVI.idx_begin(); 853 854 // Compute the result based on what we're inserting. 855 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 856 // This passes through all values that aren't the inserted element. 857 if (i != Idx) { 858 ValueLatticeElement EltVal = getStructValueState(Aggr, i); 859 mergeInValue(getStructValueState(&IVI, i), &IVI, EltVal); 860 continue; 861 } 862 863 Value *Val = IVI.getInsertedValueOperand(); 864 if (Val->getType()->isStructTy()) 865 // We don't track structs in structs. 866 markOverdefined(getStructValueState(&IVI, i), &IVI); 867 else { 868 ValueLatticeElement InVal = getValueState(Val); 869 mergeInValue(getStructValueState(&IVI, i), &IVI, InVal); 870 } 871 } 872 } 873 874 void SCCPSolver::visitSelectInst(SelectInst &I) { 875 // If this select returns a struct, just mark the result overdefined. 876 // TODO: We could do a lot better than this if code actually uses this. 877 if (I.getType()->isStructTy()) 878 return (void)markOverdefined(&I); 879 880 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would 881 // discover a concrete value later. 882 if (ValueState[&I].isOverdefined()) 883 return (void)markOverdefined(&I); 884 885 ValueLatticeElement CondValue = getValueState(I.getCondition()); 886 if (CondValue.isUnknownOrUndef()) 887 return; 888 889 if (ConstantInt *CondCB = getConstantInt(CondValue)) { 890 Value *OpVal = CondCB->isZero() ? I.getFalseValue() : I.getTrueValue(); 891 mergeInValue(&I, getValueState(OpVal)); 892 return; 893 } 894 895 // Otherwise, the condition is overdefined or a constant we can't evaluate. 896 // See if we can produce something better than overdefined based on the T/F 897 // value. 898 ValueLatticeElement TVal = getValueState(I.getTrueValue()); 899 ValueLatticeElement FVal = getValueState(I.getFalseValue()); 900 901 bool Changed = ValueState[&I].mergeIn(TVal); 902 Changed |= ValueState[&I].mergeIn(FVal); 903 if (Changed) 904 pushToWorkListMsg(ValueState[&I], &I); 905 } 906 907 // Handle Unary Operators. 908 void SCCPSolver::visitUnaryOperator(Instruction &I) { 909 ValueLatticeElement V0State = getValueState(I.getOperand(0)); 910 911 ValueLatticeElement &IV = ValueState[&I]; 912 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would 913 // discover a concrete value later. 914 if (isOverdefined(IV)) 915 return (void)markOverdefined(&I); 916 917 if (isConstant(V0State)) { 918 Constant *C = ConstantExpr::get(I.getOpcode(), getConstant(V0State)); 919 920 // op Y -> undef. 921 if (isa<UndefValue>(C)) 922 return; 923 return (void)markConstant(IV, &I, C); 924 } 925 926 // If something is undef, wait for it to resolve. 927 if (!isOverdefined(V0State)) 928 return; 929 930 markOverdefined(&I); 931 } 932 933 // Handle Binary Operators. 934 void SCCPSolver::visitBinaryOperator(Instruction &I) { 935 ValueLatticeElement V1State = getValueState(I.getOperand(0)); 936 ValueLatticeElement V2State = getValueState(I.getOperand(1)); 937 938 ValueLatticeElement &IV = ValueState[&I]; 939 if (IV.isOverdefined()) 940 return; 941 942 // If something is undef, wait for it to resolve. 943 if (V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef()) 944 return; 945 946 if (V1State.isOverdefined() && V2State.isOverdefined()) 947 return (void)markOverdefined(&I); 948 949 // If either of the operands is a constant, try to fold it to a constant. 950 // TODO: Use information from notconstant better. 951 if ((V1State.isConstant() || V2State.isConstant())) { 952 Value *V1 = isConstant(V1State) ? getConstant(V1State) : I.getOperand(0); 953 Value *V2 = isConstant(V2State) ? getConstant(V2State) : I.getOperand(1); 954 Value *R = SimplifyBinOp(I.getOpcode(), V1, V2, SimplifyQuery(DL)); 955 auto *C = dyn_cast_or_null<Constant>(R); 956 if (C) { 957 // X op Y -> undef. 958 if (isa<UndefValue>(C)) 959 return; 960 // Conservatively assume that the result may be based on operands that may 961 // be undef. Note that we use mergeInValue to combine the constant with 962 // the existing lattice value for I, as different constants might be found 963 // after one of the operands go to overdefined, e.g. due to one operand 964 // being a special floating value. 965 ValueLatticeElement NewV; 966 NewV.markConstant(C, /*MayIncludeUndef=*/true); 967 return (void)mergeInValue(&I, NewV); 968 } 969 } 970 971 // Only use ranges for binary operators on integers. 972 if (!I.getType()->isIntegerTy()) 973 return markOverdefined(&I); 974 975 // Try to simplify to a constant range. 976 ConstantRange A = ConstantRange::getFull(I.getType()->getScalarSizeInBits()); 977 ConstantRange B = ConstantRange::getFull(I.getType()->getScalarSizeInBits()); 978 if (V1State.isConstantRange()) 979 A = V1State.getConstantRange(); 980 if (V2State.isConstantRange()) 981 B = V2State.getConstantRange(); 982 983 ConstantRange R = A.binaryOp(cast<BinaryOperator>(&I)->getOpcode(), B); 984 mergeInValue(&I, ValueLatticeElement::getRange(R)); 985 986 // TODO: Currently we do not exploit special values that produce something 987 // better than overdefined with an overdefined operand for vector or floating 988 // point types, like and <4 x i32> overdefined, zeroinitializer. 989 } 990 991 // Handle ICmpInst instruction. 992 void SCCPSolver::visitCmpInst(CmpInst &I) { 993 // Do not cache this lookup, getValueState calls later in the function might 994 // invalidate the reference. 995 if (isOverdefined(ValueState[&I])) 996 return (void)markOverdefined(&I); 997 998 Value *Op1 = I.getOperand(0); 999 Value *Op2 = I.getOperand(1); 1000 1001 // For parameters, use ParamState which includes constant range info if 1002 // available. 1003 auto V1State = getValueState(Op1); 1004 auto V2State = getValueState(Op2); 1005 1006 Constant *C = V1State.getCompare(I.getPredicate(), I.getType(), V2State); 1007 if (C) { 1008 if (isa<UndefValue>(C)) 1009 return; 1010 ValueLatticeElement CV; 1011 CV.markConstant(C); 1012 mergeInValue(&I, CV); 1013 return; 1014 } 1015 1016 // If operands are still unknown, wait for it to resolve. 1017 if ((V1State.isUnknownOrUndef() || V2State.isUnknownOrUndef()) && 1018 !isConstant(ValueState[&I])) 1019 return; 1020 1021 markOverdefined(&I); 1022 } 1023 1024 // Handle getelementptr instructions. If all operands are constants then we 1025 // can turn this into a getelementptr ConstantExpr. 1026 void SCCPSolver::visitGetElementPtrInst(GetElementPtrInst &I) { 1027 if (isOverdefined(ValueState[&I])) 1028 return (void)markOverdefined(&I); 1029 1030 SmallVector<Constant*, 8> Operands; 1031 Operands.reserve(I.getNumOperands()); 1032 1033 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { 1034 ValueLatticeElement State = getValueState(I.getOperand(i)); 1035 if (State.isUnknownOrUndef()) 1036 return; // Operands are not resolved yet. 1037 1038 if (isOverdefined(State)) 1039 return (void)markOverdefined(&I); 1040 1041 if (Constant *C = getConstant(State)) { 1042 Operands.push_back(C); 1043 continue; 1044 } 1045 1046 return (void)markOverdefined(&I); 1047 } 1048 1049 Constant *Ptr = Operands[0]; 1050 auto Indices = makeArrayRef(Operands.begin() + 1, Operands.end()); 1051 Constant *C = 1052 ConstantExpr::getGetElementPtr(I.getSourceElementType(), Ptr, Indices); 1053 if (isa<UndefValue>(C)) 1054 return; 1055 markConstant(&I, C); 1056 } 1057 1058 void SCCPSolver::visitStoreInst(StoreInst &SI) { 1059 // If this store is of a struct, ignore it. 1060 if (SI.getOperand(0)->getType()->isStructTy()) 1061 return; 1062 1063 if (TrackedGlobals.empty() || !isa<GlobalVariable>(SI.getOperand(1))) 1064 return; 1065 1066 GlobalVariable *GV = cast<GlobalVariable>(SI.getOperand(1)); 1067 auto I = TrackedGlobals.find(GV); 1068 if (I == TrackedGlobals.end()) 1069 return; 1070 1071 // Get the value we are storing into the global, then merge it. 1072 mergeInValue(I->second, GV, getValueState(SI.getOperand(0)), 1073 ValueLatticeElement::MergeOptions().setCheckWiden(false)); 1074 if (I->second.isOverdefined()) 1075 TrackedGlobals.erase(I); // No need to keep tracking this! 1076 } 1077 1078 // Handle load instructions. If the operand is a constant pointer to a constant 1079 // global, we can replace the load with the loaded constant value! 1080 void SCCPSolver::visitLoadInst(LoadInst &I) { 1081 // If this load is of a struct, just mark the result overdefined. 1082 if (I.getType()->isStructTy()) 1083 return (void)markOverdefined(&I); 1084 1085 // ResolvedUndefsIn might mark I as overdefined. Bail out, even if we would 1086 // discover a concrete value later. 1087 if (ValueState[&I].isOverdefined()) 1088 return (void)markOverdefined(&I); 1089 1090 ValueLatticeElement PtrVal = getValueState(I.getOperand(0)); 1091 if (PtrVal.isUnknownOrUndef()) 1092 return; // The pointer is not resolved yet! 1093 1094 ValueLatticeElement &IV = ValueState[&I]; 1095 1096 if (!isConstant(PtrVal) || I.isVolatile()) 1097 return (void)markOverdefined(IV, &I); 1098 1099 Constant *Ptr = getConstant(PtrVal); 1100 1101 // load null is undefined. 1102 if (isa<ConstantPointerNull>(Ptr)) { 1103 if (NullPointerIsDefined(I.getFunction(), I.getPointerAddressSpace())) 1104 return (void)markOverdefined(IV, &I); 1105 else 1106 return; 1107 } 1108 1109 // Transform load (constant global) into the value loaded. 1110 if (auto *GV = dyn_cast<GlobalVariable>(Ptr)) { 1111 if (!TrackedGlobals.empty()) { 1112 // If we are tracking this global, merge in the known value for it. 1113 auto It = TrackedGlobals.find(GV); 1114 if (It != TrackedGlobals.end()) { 1115 mergeInValue(IV, &I, It->second, 1116 ValueLatticeElement::MergeOptions().setCheckWiden(false)); 1117 return; 1118 } 1119 } 1120 } 1121 1122 // Transform load from a constant into a constant if possible. 1123 if (Constant *C = ConstantFoldLoadFromConstPtr(Ptr, I.getType(), DL)) { 1124 if (isa<UndefValue>(C)) 1125 return; 1126 return (void)markConstant(IV, &I, C); 1127 } 1128 1129 // Otherwise we cannot say for certain what value this load will produce. 1130 // Bail out. 1131 markOverdefined(IV, &I); 1132 } 1133 1134 void SCCPSolver::visitCallBase(CallBase &CB) { 1135 handleCallResult(CB); 1136 handleCallArguments(CB); 1137 } 1138 1139 void SCCPSolver::handleCallOverdefined(CallBase &CB) { 1140 Function *F = CB.getCalledFunction(); 1141 1142 // Void return and not tracking callee, just bail. 1143 if (CB.getType()->isVoidTy()) 1144 return; 1145 1146 // Otherwise, if we have a single return value case, and if the function is 1147 // a declaration, maybe we can constant fold it. 1148 if (F && F->isDeclaration() && !CB.getType()->isStructTy() && 1149 canConstantFoldCallTo(&CB, F)) { 1150 SmallVector<Constant *, 8> Operands; 1151 for (auto AI = CB.arg_begin(), E = CB.arg_end(); AI != E; ++AI) { 1152 if (AI->get()->getType()->isStructTy()) 1153 return markOverdefined(&CB); // Can't handle struct args. 1154 ValueLatticeElement State = getValueState(*AI); 1155 1156 if (State.isUnknownOrUndef()) 1157 return; // Operands are not resolved yet. 1158 if (isOverdefined(State)) 1159 return (void)markOverdefined(&CB); 1160 assert(isConstant(State) && "Unknown state!"); 1161 Operands.push_back(getConstant(State)); 1162 } 1163 1164 if (isOverdefined(getValueState(&CB))) 1165 return (void)markOverdefined(&CB); 1166 1167 // If we can constant fold this, mark the result of the call as a 1168 // constant. 1169 if (Constant *C = ConstantFoldCall(&CB, F, Operands, &GetTLI(*F))) { 1170 // call -> undef. 1171 if (isa<UndefValue>(C)) 1172 return; 1173 return (void)markConstant(&CB, C); 1174 } 1175 } 1176 1177 // Otherwise, we don't know anything about this call, mark it overdefined. 1178 return (void)markOverdefined(&CB); 1179 } 1180 1181 void SCCPSolver::handleCallArguments(CallBase &CB) { 1182 Function *F = CB.getCalledFunction(); 1183 // If this is a local function that doesn't have its address taken, mark its 1184 // entry block executable and merge in the actual arguments to the call into 1185 // the formal arguments of the function. 1186 if (!TrackingIncomingArguments.empty() && 1187 TrackingIncomingArguments.count(F)) { 1188 MarkBlockExecutable(&F->front()); 1189 1190 // Propagate information from this call site into the callee. 1191 auto CAI = CB.arg_begin(); 1192 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; 1193 ++AI, ++CAI) { 1194 // If this argument is byval, and if the function is not readonly, there 1195 // will be an implicit copy formed of the input aggregate. 1196 if (AI->hasByValAttr() && !F->onlyReadsMemory()) { 1197 markOverdefined(&*AI); 1198 continue; 1199 } 1200 1201 if (auto *STy = dyn_cast<StructType>(AI->getType())) { 1202 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1203 ValueLatticeElement CallArg = getStructValueState(*CAI, i); 1204 mergeInValue(getStructValueState(&*AI, i), &*AI, CallArg); 1205 } 1206 } else 1207 mergeInValue(&*AI, getValueState(*CAI), 1208 ValueLatticeElement::MergeOptions().setCheckWiden(false)); 1209 } 1210 } 1211 } 1212 1213 void SCCPSolver::handleCallResult(CallBase &CB) { 1214 Function *F = CB.getCalledFunction(); 1215 1216 if (auto *II = dyn_cast<IntrinsicInst>(&CB)) { 1217 if (II->getIntrinsicID() == Intrinsic::ssa_copy) { 1218 if (ValueState[&CB].isOverdefined()) 1219 return; 1220 1221 Value *CopyOf = CB.getOperand(0); 1222 auto *PI = getPredicateInfoFor(&CB); 1223 auto *PBranch = dyn_cast_or_null<PredicateBranch>(PI); 1224 ValueLatticeElement OriginalVal = getValueState(CopyOf); 1225 if (!PI || !PBranch) { 1226 mergeInValue(ValueState[&CB], &CB, OriginalVal); 1227 return; 1228 } 1229 1230 // Everything below relies on the condition being a comparison. 1231 auto *Cmp = dyn_cast<CmpInst>(PBranch->Condition); 1232 if (!Cmp) { 1233 mergeInValue(ValueState[&CB], &CB, OriginalVal); 1234 return; 1235 } 1236 1237 Value *CmpOp0 = Cmp->getOperand(0); 1238 Value *CmpOp1 = Cmp->getOperand(1); 1239 if (CopyOf != CmpOp0 && CopyOf != CmpOp1) { 1240 mergeInValue(ValueState[&CB], &CB, OriginalVal); 1241 return; 1242 } 1243 1244 auto Pred = Cmp->getPredicate(); 1245 if (CmpOp0 != CopyOf) { 1246 std::swap(CmpOp0, CmpOp1); 1247 Pred = Cmp->getSwappedPredicate(); 1248 } 1249 1250 // Wait until CmpOp1 is resolved. 1251 if (getValueState(CmpOp1).isUnknown()) { 1252 addAdditionalUser(CmpOp1, &CB); 1253 return; 1254 } 1255 1256 if (!PBranch->TrueEdge) 1257 Pred = CmpInst::getInversePredicate(Pred); 1258 1259 ValueLatticeElement CondVal = getValueState(CmpOp1); 1260 ValueLatticeElement &IV = ValueState[&CB]; 1261 if (CondVal.isConstantRange() || OriginalVal.isConstantRange()) { 1262 auto NewCR = 1263 ConstantRange::getFull(DL.getTypeSizeInBits(CopyOf->getType())); 1264 1265 // Get the range imposed by the condition. 1266 if (CondVal.isConstantRange()) 1267 NewCR = ConstantRange::makeAllowedICmpRegion( 1268 Pred, CondVal.getConstantRange()); 1269 1270 // Combine range info for the original value with the new range from the 1271 // condition. 1272 auto OriginalCR = OriginalVal.isConstantRange() 1273 ? OriginalVal.getConstantRange() 1274 : ConstantRange::getFull( 1275 DL.getTypeSizeInBits(CopyOf->getType())); 1276 NewCR = NewCR.intersectWith(OriginalCR); 1277 1278 addAdditionalUser(CmpOp1, &CB); 1279 // TODO: Actually filp MayIncludeUndef for the created range to false, 1280 // once most places in the optimizer respect the branches on 1281 // undef/poison are UB rule. The reason why the new range cannot be 1282 // undef is as follows below: 1283 // The new range is based on a branch condition. That guarantees that 1284 // neither of the compare operands can be undef in the branch targets, 1285 // unless we have conditions that are always true/false (e.g. icmp ule 1286 // i32, %a, i32_max). For the latter overdefined/empty range will be 1287 // inferred, but the branch will get folded accordingly anyways. 1288 mergeInValue( 1289 IV, &CB, 1290 ValueLatticeElement::getRange(NewCR, /*MayIncludeUndef=*/true)); 1291 return; 1292 } else if (Pred == CmpInst::ICMP_EQ && CondVal.isConstant()) { 1293 // For non-integer values or integer constant expressions, only 1294 // propagate equal constants. 1295 addAdditionalUser(CmpOp1, &CB); 1296 mergeInValue(IV, &CB, CondVal); 1297 return; 1298 } 1299 1300 return (void)mergeInValue(IV, &CB, OriginalVal); 1301 } 1302 } 1303 1304 // The common case is that we aren't tracking the callee, either because we 1305 // are not doing interprocedural analysis or the callee is indirect, or is 1306 // external. Handle these cases first. 1307 if (!F || F->isDeclaration()) 1308 return handleCallOverdefined(CB); 1309 1310 // If this is a single/zero retval case, see if we're tracking the function. 1311 if (auto *STy = dyn_cast<StructType>(F->getReturnType())) { 1312 if (!MRVFunctionsTracked.count(F)) 1313 return handleCallOverdefined(CB); // Not tracking this callee. 1314 1315 // If we are tracking this callee, propagate the result of the function 1316 // into this call site. 1317 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) 1318 mergeInValue(getStructValueState(&CB, i), &CB, 1319 TrackedMultipleRetVals[std::make_pair(F, i)]); 1320 } else { 1321 auto TFRVI = TrackedRetVals.find(F); 1322 if (TFRVI == TrackedRetVals.end()) 1323 return handleCallOverdefined(CB); // Not tracking this callee. 1324 1325 // If so, propagate the return value of the callee into this call result. 1326 mergeInValue(&CB, TFRVI->second); 1327 } 1328 } 1329 1330 void SCCPSolver::Solve() { 1331 // Process the work lists until they are empty! 1332 while (!BBWorkList.empty() || !InstWorkList.empty() || 1333 !OverdefinedInstWorkList.empty()) { 1334 // Process the overdefined instruction's work list first, which drives other 1335 // things to overdefined more quickly. 1336 while (!OverdefinedInstWorkList.empty()) { 1337 Value *I = OverdefinedInstWorkList.pop_back_val(); 1338 1339 LLVM_DEBUG(dbgs() << "\nPopped off OI-WL: " << *I << '\n'); 1340 1341 // "I" got into the work list because it either made the transition from 1342 // bottom to constant, or to overdefined. 1343 // 1344 // Anything on this worklist that is overdefined need not be visited 1345 // since all of its users will have already been marked as overdefined 1346 // Update all of the users of this instruction's value. 1347 // 1348 markUsersAsChanged(I); 1349 } 1350 1351 // Process the instruction work list. 1352 while (!InstWorkList.empty()) { 1353 Value *I = InstWorkList.pop_back_val(); 1354 1355 LLVM_DEBUG(dbgs() << "\nPopped off I-WL: " << *I << '\n'); 1356 1357 // "I" got into the work list because it made the transition from undef to 1358 // constant. 1359 // 1360 // Anything on this worklist that is overdefined need not be visited 1361 // since all of its users will have already been marked as overdefined. 1362 // Update all of the users of this instruction's value. 1363 // 1364 if (I->getType()->isStructTy() || !getValueState(I).isOverdefined()) 1365 markUsersAsChanged(I); 1366 } 1367 1368 // Process the basic block work list. 1369 while (!BBWorkList.empty()) { 1370 BasicBlock *BB = BBWorkList.back(); 1371 BBWorkList.pop_back(); 1372 1373 LLVM_DEBUG(dbgs() << "\nPopped off BBWL: " << *BB << '\n'); 1374 1375 // Notify all instructions in this basic block that they are newly 1376 // executable. 1377 visit(BB); 1378 } 1379 } 1380 } 1381 1382 /// ResolvedUndefsIn - While solving the dataflow for a function, we assume 1383 /// that branches on undef values cannot reach any of their successors. 1384 /// However, this is not a safe assumption. After we solve dataflow, this 1385 /// method should be use to handle this. If this returns true, the solver 1386 /// should be rerun. 1387 /// 1388 /// This method handles this by finding an unresolved branch and marking it one 1389 /// of the edges from the block as being feasible, even though the condition 1390 /// doesn't say it would otherwise be. This allows SCCP to find the rest of the 1391 /// CFG and only slightly pessimizes the analysis results (by marking one, 1392 /// potentially infeasible, edge feasible). This cannot usefully modify the 1393 /// constraints on the condition of the branch, as that would impact other users 1394 /// of the value. 1395 /// 1396 /// This scan also checks for values that use undefs. It conservatively marks 1397 /// them as overdefined. 1398 bool SCCPSolver::ResolvedUndefsIn(Function &F) { 1399 for (BasicBlock &BB : F) { 1400 if (!BBExecutable.count(&BB)) 1401 continue; 1402 1403 for (Instruction &I : BB) { 1404 // Look for instructions which produce undef values. 1405 if (I.getType()->isVoidTy()) continue; 1406 1407 if (auto *STy = dyn_cast<StructType>(I.getType())) { 1408 // Only a few things that can be structs matter for undef. 1409 1410 // Tracked calls must never be marked overdefined in ResolvedUndefsIn. 1411 if (auto *CB = dyn_cast<CallBase>(&I)) 1412 if (Function *F = CB->getCalledFunction()) 1413 if (MRVFunctionsTracked.count(F)) 1414 continue; 1415 1416 // extractvalue and insertvalue don't need to be marked; they are 1417 // tracked as precisely as their operands. 1418 if (isa<ExtractValueInst>(I) || isa<InsertValueInst>(I)) 1419 continue; 1420 // Send the results of everything else to overdefined. We could be 1421 // more precise than this but it isn't worth bothering. 1422 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1423 ValueLatticeElement &LV = getStructValueState(&I, i); 1424 if (LV.isUnknownOrUndef()) 1425 markOverdefined(LV, &I); 1426 } 1427 continue; 1428 } 1429 1430 ValueLatticeElement &LV = getValueState(&I); 1431 if (!LV.isUnknownOrUndef()) 1432 continue; 1433 1434 // There are two reasons a call can have an undef result 1435 // 1. It could be tracked. 1436 // 2. It could be constant-foldable. 1437 // Because of the way we solve return values, tracked calls must 1438 // never be marked overdefined in ResolvedUndefsIn. 1439 if (auto *CB = dyn_cast<CallBase>(&I)) 1440 if (Function *F = CB->getCalledFunction()) 1441 if (TrackedRetVals.count(F)) 1442 continue; 1443 1444 if (isa<LoadInst>(I)) { 1445 // A load here means one of two things: a load of undef from a global, 1446 // a load from an unknown pointer. Either way, having it return undef 1447 // is okay. 1448 continue; 1449 } 1450 1451 markOverdefined(&I); 1452 return true; 1453 } 1454 1455 // Check to see if we have a branch or switch on an undefined value. If so 1456 // we force the branch to go one way or the other to make the successor 1457 // values live. It doesn't really matter which way we force it. 1458 Instruction *TI = BB.getTerminator(); 1459 if (auto *BI = dyn_cast<BranchInst>(TI)) { 1460 if (!BI->isConditional()) continue; 1461 if (!getValueState(BI->getCondition()).isUnknownOrUndef()) 1462 continue; 1463 1464 // If the input to SCCP is actually branch on undef, fix the undef to 1465 // false. 1466 if (isa<UndefValue>(BI->getCondition())) { 1467 BI->setCondition(ConstantInt::getFalse(BI->getContext())); 1468 markEdgeExecutable(&BB, TI->getSuccessor(1)); 1469 return true; 1470 } 1471 1472 // Otherwise, it is a branch on a symbolic value which is currently 1473 // considered to be undef. Make sure some edge is executable, so a 1474 // branch on "undef" always flows somewhere. 1475 // FIXME: Distinguish between dead code and an LLVM "undef" value. 1476 BasicBlock *DefaultSuccessor = TI->getSuccessor(1); 1477 if (markEdgeExecutable(&BB, DefaultSuccessor)) 1478 return true; 1479 1480 continue; 1481 } 1482 1483 if (auto *IBR = dyn_cast<IndirectBrInst>(TI)) { 1484 // Indirect branch with no successor ?. Its ok to assume it branches 1485 // to no target. 1486 if (IBR->getNumSuccessors() < 1) 1487 continue; 1488 1489 if (!getValueState(IBR->getAddress()).isUnknownOrUndef()) 1490 continue; 1491 1492 // If the input to SCCP is actually branch on undef, fix the undef to 1493 // the first successor of the indirect branch. 1494 if (isa<UndefValue>(IBR->getAddress())) { 1495 IBR->setAddress(BlockAddress::get(IBR->getSuccessor(0))); 1496 markEdgeExecutable(&BB, IBR->getSuccessor(0)); 1497 return true; 1498 } 1499 1500 // Otherwise, it is a branch on a symbolic value which is currently 1501 // considered to be undef. Make sure some edge is executable, so a 1502 // branch on "undef" always flows somewhere. 1503 // FIXME: IndirectBr on "undef" doesn't actually need to go anywhere: 1504 // we can assume the branch has undefined behavior instead. 1505 BasicBlock *DefaultSuccessor = IBR->getSuccessor(0); 1506 if (markEdgeExecutable(&BB, DefaultSuccessor)) 1507 return true; 1508 1509 continue; 1510 } 1511 1512 if (auto *SI = dyn_cast<SwitchInst>(TI)) { 1513 if (!SI->getNumCases() || 1514 !getValueState(SI->getCondition()).isUnknownOrUndef()) 1515 continue; 1516 1517 // If the input to SCCP is actually switch on undef, fix the undef to 1518 // the first constant. 1519 if (isa<UndefValue>(SI->getCondition())) { 1520 SI->setCondition(SI->case_begin()->getCaseValue()); 1521 markEdgeExecutable(&BB, SI->case_begin()->getCaseSuccessor()); 1522 return true; 1523 } 1524 1525 // Otherwise, it is a branch on a symbolic value which is currently 1526 // considered to be undef. Make sure some edge is executable, so a 1527 // branch on "undef" always flows somewhere. 1528 // FIXME: Distinguish between dead code and an LLVM "undef" value. 1529 BasicBlock *DefaultSuccessor = SI->case_begin()->getCaseSuccessor(); 1530 if (markEdgeExecutable(&BB, DefaultSuccessor)) 1531 return true; 1532 1533 continue; 1534 } 1535 } 1536 1537 return false; 1538 } 1539 1540 static bool tryToReplaceWithConstant(SCCPSolver &Solver, Value *V) { 1541 Constant *Const = nullptr; 1542 if (V->getType()->isStructTy()) { 1543 std::vector<ValueLatticeElement> IVs = Solver.getStructLatticeValueFor(V); 1544 if (any_of(IVs, 1545 [](const ValueLatticeElement &LV) { return isOverdefined(LV); })) 1546 return false; 1547 std::vector<Constant *> ConstVals; 1548 auto *ST = cast<StructType>(V->getType()); 1549 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) { 1550 ValueLatticeElement V = IVs[i]; 1551 ConstVals.push_back(isConstant(V) 1552 ? Solver.getConstant(V) 1553 : UndefValue::get(ST->getElementType(i))); 1554 } 1555 Const = ConstantStruct::get(ST, ConstVals); 1556 } else { 1557 const ValueLatticeElement &IV = Solver.getLatticeValueFor(V); 1558 if (isOverdefined(IV)) 1559 return false; 1560 1561 Const = 1562 isConstant(IV) ? Solver.getConstant(IV) : UndefValue::get(V->getType()); 1563 } 1564 assert(Const && "Constant is nullptr here!"); 1565 1566 // Replacing `musttail` instructions with constant breaks `musttail` invariant 1567 // unless the call itself can be removed 1568 CallInst *CI = dyn_cast<CallInst>(V); 1569 if (CI && CI->isMustTailCall() && !CI->isSafeToRemove()) { 1570 Function *F = CI->getCalledFunction(); 1571 1572 // Don't zap returns of the callee 1573 if (F) 1574 Solver.AddMustTailCallee(F); 1575 1576 LLVM_DEBUG(dbgs() << " Can\'t treat the result of musttail call : " << *CI 1577 << " as a constant\n"); 1578 return false; 1579 } 1580 1581 LLVM_DEBUG(dbgs() << " Constant: " << *Const << " = " << *V << '\n'); 1582 1583 // Replaces all of the uses of a variable with uses of the constant. 1584 V->replaceAllUsesWith(Const); 1585 return true; 1586 } 1587 1588 // runSCCP() - Run the Sparse Conditional Constant Propagation algorithm, 1589 // and return true if the function was modified. 1590 static bool runSCCP(Function &F, const DataLayout &DL, 1591 const TargetLibraryInfo *TLI) { 1592 LLVM_DEBUG(dbgs() << "SCCP on function '" << F.getName() << "'\n"); 1593 SCCPSolver Solver( 1594 DL, [TLI](Function &F) -> const TargetLibraryInfo & { return *TLI; }, 1595 F.getContext()); 1596 1597 // Mark the first block of the function as being executable. 1598 Solver.MarkBlockExecutable(&F.front()); 1599 1600 // Mark all arguments to the function as being overdefined. 1601 for (Argument &AI : F.args()) 1602 Solver.markOverdefined(&AI); 1603 1604 // Solve for constants. 1605 bool ResolvedUndefs = true; 1606 while (ResolvedUndefs) { 1607 Solver.Solve(); 1608 LLVM_DEBUG(dbgs() << "RESOLVING UNDEFs\n"); 1609 ResolvedUndefs = Solver.ResolvedUndefsIn(F); 1610 } 1611 1612 bool MadeChanges = false; 1613 1614 // If we decided that there are basic blocks that are dead in this function, 1615 // delete their contents now. Note that we cannot actually delete the blocks, 1616 // as we cannot modify the CFG of the function. 1617 1618 for (BasicBlock &BB : F) { 1619 if (!Solver.isBlockExecutable(&BB)) { 1620 LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << BB); 1621 1622 ++NumDeadBlocks; 1623 NumInstRemoved += removeAllNonTerminatorAndEHPadInstructions(&BB); 1624 1625 MadeChanges = true; 1626 continue; 1627 } 1628 1629 // Iterate over all of the instructions in a function, replacing them with 1630 // constants if we have found them to be of constant values. 1631 for (BasicBlock::iterator BI = BB.begin(), E = BB.end(); BI != E;) { 1632 Instruction *Inst = &*BI++; 1633 if (Inst->getType()->isVoidTy() || Inst->isTerminator()) 1634 continue; 1635 1636 if (tryToReplaceWithConstant(Solver, Inst)) { 1637 if (isInstructionTriviallyDead(Inst)) 1638 Inst->eraseFromParent(); 1639 // Hey, we just changed something! 1640 MadeChanges = true; 1641 ++NumInstRemoved; 1642 } 1643 } 1644 } 1645 1646 return MadeChanges; 1647 } 1648 1649 PreservedAnalyses SCCPPass::run(Function &F, FunctionAnalysisManager &AM) { 1650 const DataLayout &DL = F.getParent()->getDataLayout(); 1651 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1652 if (!runSCCP(F, DL, &TLI)) 1653 return PreservedAnalyses::all(); 1654 1655 auto PA = PreservedAnalyses(); 1656 PA.preserve<GlobalsAA>(); 1657 PA.preserveSet<CFGAnalyses>(); 1658 return PA; 1659 } 1660 1661 namespace { 1662 1663 //===--------------------------------------------------------------------===// 1664 // 1665 /// SCCP Class - This class uses the SCCPSolver to implement a per-function 1666 /// Sparse Conditional Constant Propagator. 1667 /// 1668 class SCCPLegacyPass : public FunctionPass { 1669 public: 1670 // Pass identification, replacement for typeid 1671 static char ID; 1672 1673 SCCPLegacyPass() : FunctionPass(ID) { 1674 initializeSCCPLegacyPassPass(*PassRegistry::getPassRegistry()); 1675 } 1676 1677 void getAnalysisUsage(AnalysisUsage &AU) const override { 1678 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1679 AU.addPreserved<GlobalsAAWrapperPass>(); 1680 AU.setPreservesCFG(); 1681 } 1682 1683 // runOnFunction - Run the Sparse Conditional Constant Propagation 1684 // algorithm, and return true if the function was modified. 1685 bool runOnFunction(Function &F) override { 1686 if (skipFunction(F)) 1687 return false; 1688 const DataLayout &DL = F.getParent()->getDataLayout(); 1689 const TargetLibraryInfo *TLI = 1690 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1691 return runSCCP(F, DL, TLI); 1692 } 1693 }; 1694 1695 } // end anonymous namespace 1696 1697 char SCCPLegacyPass::ID = 0; 1698 1699 INITIALIZE_PASS_BEGIN(SCCPLegacyPass, "sccp", 1700 "Sparse Conditional Constant Propagation", false, false) 1701 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1702 INITIALIZE_PASS_END(SCCPLegacyPass, "sccp", 1703 "Sparse Conditional Constant Propagation", false, false) 1704 1705 // createSCCPPass - This is the public interface to this file. 1706 FunctionPass *llvm::createSCCPPass() { return new SCCPLegacyPass(); } 1707 1708 static void findReturnsToZap(Function &F, 1709 SmallVector<ReturnInst *, 8> &ReturnsToZap, 1710 SCCPSolver &Solver) { 1711 // We can only do this if we know that nothing else can call the function. 1712 if (!Solver.isArgumentTrackedFunction(&F)) 1713 return; 1714 1715 // There is a non-removable musttail call site of this function. Zapping 1716 // returns is not allowed. 1717 if (Solver.isMustTailCallee(&F)) { 1718 LLVM_DEBUG(dbgs() << "Can't zap returns of the function : " << F.getName() 1719 << " due to present musttail call of it\n"); 1720 return; 1721 } 1722 1723 assert( 1724 all_of(F.users(), 1725 [&Solver](User *U) { 1726 if (isa<Instruction>(U) && 1727 !Solver.isBlockExecutable(cast<Instruction>(U)->getParent())) 1728 return true; 1729 // Non-callsite uses are not impacted by zapping. Also, constant 1730 // uses (like blockaddresses) could stuck around, without being 1731 // used in the underlying IR, meaning we do not have lattice 1732 // values for them. 1733 if (!isa<CallBase>(U)) 1734 return true; 1735 if (U->getType()->isStructTy()) { 1736 return all_of(Solver.getStructLatticeValueFor(U), 1737 [](const ValueLatticeElement &LV) { 1738 return !isOverdefined(LV); 1739 }); 1740 } 1741 return !isOverdefined(Solver.getLatticeValueFor(U)); 1742 }) && 1743 "We can only zap functions where all live users have a concrete value"); 1744 1745 for (BasicBlock &BB : F) { 1746 if (CallInst *CI = BB.getTerminatingMustTailCall()) { 1747 LLVM_DEBUG(dbgs() << "Can't zap return of the block due to present " 1748 << "musttail call : " << *CI << "\n"); 1749 (void)CI; 1750 return; 1751 } 1752 1753 if (auto *RI = dyn_cast<ReturnInst>(BB.getTerminator())) 1754 if (!isa<UndefValue>(RI->getOperand(0))) 1755 ReturnsToZap.push_back(RI); 1756 } 1757 } 1758 1759 // Update the condition for terminators that are branching on indeterminate 1760 // values, forcing them to use a specific edge. 1761 static void forceIndeterminateEdge(Instruction* I, SCCPSolver &Solver) { 1762 BasicBlock *Dest = nullptr; 1763 Constant *C = nullptr; 1764 if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) { 1765 if (!isa<ConstantInt>(SI->getCondition())) { 1766 // Indeterminate switch; use first case value. 1767 Dest = SI->case_begin()->getCaseSuccessor(); 1768 C = SI->case_begin()->getCaseValue(); 1769 } 1770 } else if (BranchInst *BI = dyn_cast<BranchInst>(I)) { 1771 if (!isa<ConstantInt>(BI->getCondition())) { 1772 // Indeterminate branch; use false. 1773 Dest = BI->getSuccessor(1); 1774 C = ConstantInt::getFalse(BI->getContext()); 1775 } 1776 } else if (IndirectBrInst *IBR = dyn_cast<IndirectBrInst>(I)) { 1777 if (!isa<BlockAddress>(IBR->getAddress()->stripPointerCasts())) { 1778 // Indeterminate indirectbr; use successor 0. 1779 Dest = IBR->getSuccessor(0); 1780 C = BlockAddress::get(IBR->getSuccessor(0)); 1781 } 1782 } else { 1783 llvm_unreachable("Unexpected terminator instruction"); 1784 } 1785 if (C) { 1786 assert(Solver.isEdgeFeasible(I->getParent(), Dest) && 1787 "Didn't find feasible edge?"); 1788 (void)Dest; 1789 1790 I->setOperand(0, C); 1791 } 1792 } 1793 1794 bool llvm::runIPSCCP( 1795 Module &M, const DataLayout &DL, 1796 std::function<const TargetLibraryInfo &(Function &)> GetTLI, 1797 function_ref<AnalysisResultsForFn(Function &)> getAnalysis) { 1798 SCCPSolver Solver(DL, GetTLI, M.getContext()); 1799 1800 // Loop over all functions, marking arguments to those with their addresses 1801 // taken or that are external as overdefined. 1802 for (Function &F : M) { 1803 if (F.isDeclaration()) 1804 continue; 1805 1806 Solver.addAnalysis(F, getAnalysis(F)); 1807 1808 // Determine if we can track the function's return values. If so, add the 1809 // function to the solver's set of return-tracked functions. 1810 if (canTrackReturnsInterprocedurally(&F)) 1811 Solver.AddTrackedFunction(&F); 1812 1813 // Determine if we can track the function's arguments. If so, add the 1814 // function to the solver's set of argument-tracked functions. 1815 if (canTrackArgumentsInterprocedurally(&F)) { 1816 Solver.AddArgumentTrackedFunction(&F); 1817 continue; 1818 } 1819 1820 // Assume the function is called. 1821 Solver.MarkBlockExecutable(&F.front()); 1822 1823 // Assume nothing about the incoming arguments. 1824 for (Argument &AI : F.args()) 1825 Solver.markOverdefined(&AI); 1826 } 1827 1828 // Determine if we can track any of the module's global variables. If so, add 1829 // the global variables we can track to the solver's set of tracked global 1830 // variables. 1831 for (GlobalVariable &G : M.globals()) { 1832 G.removeDeadConstantUsers(); 1833 if (canTrackGlobalVariableInterprocedurally(&G)) 1834 Solver.TrackValueOfGlobalVariable(&G); 1835 } 1836 1837 // Solve for constants. 1838 bool ResolvedUndefs = true; 1839 Solver.Solve(); 1840 while (ResolvedUndefs) { 1841 LLVM_DEBUG(dbgs() << "RESOLVING UNDEFS\n"); 1842 ResolvedUndefs = false; 1843 for (Function &F : M) 1844 if (Solver.ResolvedUndefsIn(F)) { 1845 // We run Solve() after we resolved an undef in a function, because 1846 // we might deduce a fact that eliminates an undef in another function. 1847 Solver.Solve(); 1848 ResolvedUndefs = true; 1849 } 1850 } 1851 1852 bool MadeChanges = false; 1853 1854 // Iterate over all of the instructions in the module, replacing them with 1855 // constants if we have found them to be of constant values. 1856 1857 for (Function &F : M) { 1858 if (F.isDeclaration()) 1859 continue; 1860 1861 SmallVector<BasicBlock *, 512> BlocksToErase; 1862 1863 if (Solver.isBlockExecutable(&F.front())) 1864 for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end(); AI != E; 1865 ++AI) { 1866 if (!AI->use_empty() && tryToReplaceWithConstant(Solver, &*AI)) { 1867 ++IPNumArgsElimed; 1868 continue; 1869 } 1870 } 1871 1872 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { 1873 if (!Solver.isBlockExecutable(&*BB)) { 1874 LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << *BB); 1875 ++NumDeadBlocks; 1876 1877 MadeChanges = true; 1878 1879 if (&*BB != &F.front()) 1880 BlocksToErase.push_back(&*BB); 1881 continue; 1882 } 1883 1884 for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) { 1885 Instruction *Inst = &*BI++; 1886 if (Inst->getType()->isVoidTy()) 1887 continue; 1888 if (tryToReplaceWithConstant(Solver, Inst)) { 1889 if (Inst->isSafeToRemove()) 1890 Inst->eraseFromParent(); 1891 // Hey, we just changed something! 1892 MadeChanges = true; 1893 ++IPNumInstRemoved; 1894 } 1895 } 1896 } 1897 1898 DomTreeUpdater DTU = Solver.getDTU(F); 1899 // Change dead blocks to unreachable. We do it after replacing constants 1900 // in all executable blocks, because changeToUnreachable may remove PHI 1901 // nodes in executable blocks we found values for. The function's entry 1902 // block is not part of BlocksToErase, so we have to handle it separately. 1903 for (BasicBlock *BB : BlocksToErase) { 1904 NumInstRemoved += 1905 changeToUnreachable(BB->getFirstNonPHI(), /*UseLLVMTrap=*/false, 1906 /*PreserveLCSSA=*/false, &DTU); 1907 } 1908 if (!Solver.isBlockExecutable(&F.front())) 1909 NumInstRemoved += changeToUnreachable(F.front().getFirstNonPHI(), 1910 /*UseLLVMTrap=*/false, 1911 /*PreserveLCSSA=*/false, &DTU); 1912 1913 // Now that all instructions in the function are constant folded, 1914 // use ConstantFoldTerminator to get rid of in-edges, record DT updates and 1915 // delete dead BBs. 1916 for (BasicBlock *DeadBB : BlocksToErase) { 1917 // If there are any PHI nodes in this successor, drop entries for BB now. 1918 for (Value::user_iterator UI = DeadBB->user_begin(), 1919 UE = DeadBB->user_end(); 1920 UI != UE;) { 1921 // Grab the user and then increment the iterator early, as the user 1922 // will be deleted. Step past all adjacent uses from the same user. 1923 auto *I = dyn_cast<Instruction>(*UI); 1924 do { ++UI; } while (UI != UE && *UI == I); 1925 1926 // Ignore blockaddress users; BasicBlock's dtor will handle them. 1927 if (!I) continue; 1928 1929 // If we have forced an edge for an indeterminate value, then force the 1930 // terminator to fold to that edge. 1931 forceIndeterminateEdge(I, Solver); 1932 BasicBlock *InstBB = I->getParent(); 1933 bool Folded = ConstantFoldTerminator(InstBB, 1934 /*DeleteDeadConditions=*/false, 1935 /*TLI=*/nullptr, &DTU); 1936 assert(Folded && 1937 "Expect TermInst on constantint or blockaddress to be folded"); 1938 (void) Folded; 1939 // If we folded the terminator to an unconditional branch to another 1940 // dead block, replace it with Unreachable, to avoid trying to fold that 1941 // branch again. 1942 BranchInst *BI = cast<BranchInst>(InstBB->getTerminator()); 1943 if (BI && BI->isUnconditional() && 1944 !Solver.isBlockExecutable(BI->getSuccessor(0))) { 1945 InstBB->getTerminator()->eraseFromParent(); 1946 new UnreachableInst(InstBB->getContext(), InstBB); 1947 } 1948 } 1949 // Mark dead BB for deletion. 1950 DTU.deleteBB(DeadBB); 1951 } 1952 1953 for (BasicBlock &BB : F) { 1954 for (BasicBlock::iterator BI = BB.begin(), E = BB.end(); BI != E;) { 1955 Instruction *Inst = &*BI++; 1956 if (Solver.getPredicateInfoFor(Inst)) { 1957 if (auto *II = dyn_cast<IntrinsicInst>(Inst)) { 1958 if (II->getIntrinsicID() == Intrinsic::ssa_copy) { 1959 Value *Op = II->getOperand(0); 1960 Inst->replaceAllUsesWith(Op); 1961 Inst->eraseFromParent(); 1962 } 1963 } 1964 } 1965 } 1966 } 1967 } 1968 1969 // If we inferred constant or undef return values for a function, we replaced 1970 // all call uses with the inferred value. This means we don't need to bother 1971 // actually returning anything from the function. Replace all return 1972 // instructions with return undef. 1973 // 1974 // Do this in two stages: first identify the functions we should process, then 1975 // actually zap their returns. This is important because we can only do this 1976 // if the address of the function isn't taken. In cases where a return is the 1977 // last use of a function, the order of processing functions would affect 1978 // whether other functions are optimizable. 1979 SmallVector<ReturnInst*, 8> ReturnsToZap; 1980 1981 for (const auto &I : Solver.getTrackedRetVals()) { 1982 Function *F = I.first; 1983 if (isOverdefined(I.second) || F->getReturnType()->isVoidTy()) 1984 continue; 1985 findReturnsToZap(*F, ReturnsToZap, Solver); 1986 } 1987 1988 for (auto F : Solver.getMRVFunctionsTracked()) { 1989 assert(F->getReturnType()->isStructTy() && 1990 "The return type should be a struct"); 1991 StructType *STy = cast<StructType>(F->getReturnType()); 1992 if (Solver.isStructLatticeConstant(F, STy)) 1993 findReturnsToZap(*F, ReturnsToZap, Solver); 1994 } 1995 1996 // Zap all returns which we've identified as zap to change. 1997 for (unsigned i = 0, e = ReturnsToZap.size(); i != e; ++i) { 1998 Function *F = ReturnsToZap[i]->getParent()->getParent(); 1999 ReturnsToZap[i]->setOperand(0, UndefValue::get(F->getReturnType())); 2000 } 2001 2002 // If we inferred constant or undef values for globals variables, we can 2003 // delete the global and any stores that remain to it. 2004 for (auto &I : make_early_inc_range(Solver.getTrackedGlobals())) { 2005 GlobalVariable *GV = I.first; 2006 if (isOverdefined(I.second)) 2007 continue; 2008 LLVM_DEBUG(dbgs() << "Found that GV '" << GV->getName() 2009 << "' is constant!\n"); 2010 while (!GV->use_empty()) { 2011 StoreInst *SI = cast<StoreInst>(GV->user_back()); 2012 SI->eraseFromParent(); 2013 } 2014 M.getGlobalList().erase(GV); 2015 ++IPNumGlobalConst; 2016 } 2017 2018 return MadeChanges; 2019 } 2020