1 //===- AggressiveInstCombine.cpp ------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the aggressive expression pattern combiner classes. 11 // Currently, it handles expression patterns for: 12 // * Truncate instruction 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h" 17 #include "AggressiveInstCombineInternal.h" 18 #include "llvm/Analysis/AliasAnalysis.h" 19 #include "llvm/Analysis/BasicAliasAnalysis.h" 20 #include "llvm/Analysis/GlobalsModRef.h" 21 #include "llvm/Analysis/TargetLibraryInfo.h" 22 #include "llvm/Analysis/Utils/Local.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/Dominators.h" 25 #include "llvm/IR/IRBuilder.h" 26 #include "llvm/IR/LegacyPassManager.h" 27 #include "llvm/IR/PatternMatch.h" 28 #include "llvm/Pass.h" 29 using namespace llvm; 30 using namespace PatternMatch; 31 32 #define DEBUG_TYPE "aggressive-instcombine" 33 34 namespace { 35 /// Contains expression pattern combiner logic. 36 /// This class provides both the logic to combine expression patterns and 37 /// combine them. It differs from InstCombiner class in that each pattern 38 /// combiner runs only once as opposed to InstCombine's multi-iteration, 39 /// which allows pattern combiner to have higher complexity than the O(1) 40 /// required by the instruction combiner. 41 class AggressiveInstCombinerLegacyPass : public FunctionPass { 42 public: 43 static char ID; // Pass identification, replacement for typeid 44 45 AggressiveInstCombinerLegacyPass() : FunctionPass(ID) { 46 initializeAggressiveInstCombinerLegacyPassPass( 47 *PassRegistry::getPassRegistry()); 48 } 49 50 void getAnalysisUsage(AnalysisUsage &AU) const override; 51 52 /// Run all expression pattern optimizations on the given /p F function. 53 /// 54 /// \param F function to optimize. 55 /// \returns true if the IR is changed. 56 bool runOnFunction(Function &F) override; 57 }; 58 } // namespace 59 60 /// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and 61 /// the bit indexes (Mask) needed by a masked compare. If we're matching a chain 62 /// of 'and' ops, then we also need to capture the fact that we saw an 63 /// "and X, 1", so that's an extra return value for that case. 64 struct MaskOps { 65 Value *Root; 66 APInt Mask; 67 bool MatchAndChain; 68 bool FoundAnd1; 69 70 MaskOps(unsigned BitWidth, bool MatchAnds) : 71 Root(nullptr), Mask(APInt::getNullValue(BitWidth)), 72 MatchAndChain(MatchAnds), FoundAnd1(false) {} 73 }; 74 75 /// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a 76 /// chain of 'and' or 'or' instructions looking for shift ops of a common source 77 /// value. Examples: 78 /// or (or (or X, (X >> 3)), (X >> 5)), (X >> 8) 79 /// returns { X, 0x129 } 80 /// and (and (X >> 1), 1), (X >> 4) 81 /// returns { X, 0x12 } 82 static bool matchAndOrChain(Value *V, MaskOps &MOps) { 83 Value *Op0, *Op1; 84 if (MOps.MatchAndChain) { 85 // Recurse through a chain of 'and' operands. This requires an extra check 86 // vs. the 'or' matcher: we must find an "and X, 1" instruction somewhere 87 // in the chain to know that all of the high bits are cleared. 88 if (match(V, m_And(m_Value(Op0), m_One()))) { 89 MOps.FoundAnd1 = true; 90 return matchAndOrChain(Op0, MOps); 91 } 92 if (match(V, m_And(m_Value(Op0), m_Value(Op1)))) 93 return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps); 94 } else { 95 // Recurse through a chain of 'or' operands. 96 if (match(V, m_Or(m_Value(Op0), m_Value(Op1)))) 97 return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps); 98 } 99 100 // We need a shift-right or a bare value representing a compare of bit 0 of 101 // the original source operand. 102 Value *Candidate; 103 uint64_t BitIndex = 0; 104 if (!match(V, m_LShr(m_Value(Candidate), m_ConstantInt(BitIndex)))) 105 Candidate = V; 106 107 // Initialize result source operand. 108 if (!MOps.Root) 109 MOps.Root = Candidate; 110 111 // The shift constant is out-of-range? This code hasn't been simplified. 112 if (BitIndex >= MOps.Mask.getBitWidth()) 113 return false; 114 115 // Fill in the mask bit derived from the shift constant. 116 MOps.Mask.setBit(BitIndex); 117 return MOps.Root == Candidate; 118 } 119 120 /// Match patterns that correspond to "any-bits-set" and "all-bits-set". 121 /// These will include a chain of 'or' or 'and'-shifted bits from a 122 /// common source value: 123 /// and (or (lshr X, C), ...), 1 --> (X & CMask) != 0 124 /// and (and (lshr X, C), ...), 1 --> (X & CMask) == CMask 125 /// Note: "any-bits-clear" and "all-bits-clear" are variations of these patterns 126 /// that differ only with a final 'not' of the result. We expect that final 127 /// 'not' to be folded with the compare that we create here (invert predicate). 128 static bool foldAnyOrAllBitsSet(Instruction &I) { 129 // The 'any-bits-set' ('or' chain) pattern is simpler to match because the 130 // final "and X, 1" instruction must be the final op in the sequence. 131 bool MatchAllBitsSet; 132 if (match(&I, m_c_And(m_OneUse(m_And(m_Value(), m_Value())), m_Value()))) 133 MatchAllBitsSet = true; 134 else if (match(&I, m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One()))) 135 MatchAllBitsSet = false; 136 else 137 return false; 138 139 MaskOps MOps(I.getType()->getScalarSizeInBits(), MatchAllBitsSet); 140 if (MatchAllBitsSet) { 141 if (!matchAndOrChain(cast<BinaryOperator>(&I), MOps) || !MOps.FoundAnd1) 142 return false; 143 } else { 144 if (!matchAndOrChain(cast<BinaryOperator>(&I)->getOperand(0), MOps)) 145 return false; 146 } 147 148 // The pattern was found. Create a masked compare that replaces all of the 149 // shift and logic ops. 150 IRBuilder<> Builder(&I); 151 Constant *Mask = ConstantInt::get(I.getType(), MOps.Mask); 152 Value *And = Builder.CreateAnd(MOps.Root, Mask); 153 Value *Cmp = MatchAllBitsSet ? Builder.CreateICmpEQ(And, Mask) : 154 Builder.CreateIsNotNull(And); 155 Value *Zext = Builder.CreateZExt(Cmp, I.getType()); 156 I.replaceAllUsesWith(Zext); 157 return true; 158 } 159 160 /// This is the entry point for folds that could be implemented in regular 161 /// InstCombine, but they are separated because they are not expected to 162 /// occur frequently and/or have more than a constant-length pattern match. 163 static bool foldUnusualPatterns(Function &F, DominatorTree &DT) { 164 bool MadeChange = false; 165 for (BasicBlock &BB : F) { 166 // Ignore unreachable basic blocks. 167 if (!DT.isReachableFromEntry(&BB)) 168 continue; 169 // Do not delete instructions under here and invalidate the iterator. 170 // Walk the block backwards for efficiency. We're matching a chain of 171 // use->defs, so we're more likely to succeed by starting from the bottom. 172 // Also, we want to avoid matching partial patterns. 173 // TODO: It would be more efficient if we removed dead instructions 174 // iteratively in this loop rather than waiting until the end. 175 for (Instruction &I : make_range(BB.rbegin(), BB.rend())) 176 MadeChange |= foldAnyOrAllBitsSet(I); 177 } 178 179 // We're done with transforms, so remove dead instructions. 180 if (MadeChange) 181 for (BasicBlock &BB : F) 182 SimplifyInstructionsInBlock(&BB); 183 184 return MadeChange; 185 } 186 187 /// This is the entry point for all transforms. Pass manager differences are 188 /// handled in the callers of this function. 189 static bool runImpl(Function &F, TargetLibraryInfo &TLI, DominatorTree &DT) { 190 bool MadeChange = false; 191 const DataLayout &DL = F.getParent()->getDataLayout(); 192 TruncInstCombine TIC(TLI, DL, DT); 193 MadeChange |= TIC.run(F); 194 MadeChange |= foldUnusualPatterns(F, DT); 195 return MadeChange; 196 } 197 198 void AggressiveInstCombinerLegacyPass::getAnalysisUsage( 199 AnalysisUsage &AU) const { 200 AU.setPreservesCFG(); 201 AU.addRequired<DominatorTreeWrapperPass>(); 202 AU.addRequired<TargetLibraryInfoWrapperPass>(); 203 AU.addPreserved<AAResultsWrapperPass>(); 204 AU.addPreserved<BasicAAWrapperPass>(); 205 AU.addPreserved<DominatorTreeWrapperPass>(); 206 AU.addPreserved<GlobalsAAWrapperPass>(); 207 } 208 209 bool AggressiveInstCombinerLegacyPass::runOnFunction(Function &F) { 210 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 211 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 212 return runImpl(F, TLI, DT); 213 } 214 215 PreservedAnalyses AggressiveInstCombinePass::run(Function &F, 216 FunctionAnalysisManager &AM) { 217 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 218 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 219 if (!runImpl(F, TLI, DT)) { 220 // No changes, all analyses are preserved. 221 return PreservedAnalyses::all(); 222 } 223 // Mark all the analyses that instcombine updates as preserved. 224 PreservedAnalyses PA; 225 PA.preserveSet<CFGAnalyses>(); 226 PA.preserve<AAManager>(); 227 PA.preserve<GlobalsAA>(); 228 return PA; 229 } 230 231 char AggressiveInstCombinerLegacyPass::ID = 0; 232 INITIALIZE_PASS_BEGIN(AggressiveInstCombinerLegacyPass, 233 "aggressive-instcombine", 234 "Combine pattern based expressions", false, false) 235 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 236 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 237 INITIALIZE_PASS_END(AggressiveInstCombinerLegacyPass, "aggressive-instcombine", 238 "Combine pattern based expressions", false, false) 239 240 // Initialization Routines 241 void llvm::initializeAggressiveInstCombine(PassRegistry &Registry) { 242 initializeAggressiveInstCombinerLegacyPassPass(Registry); 243 } 244 245 void LLVMInitializeAggressiveInstCombiner(LLVMPassRegistryRef R) { 246 initializeAggressiveInstCombinerLegacyPassPass(*unwrap(R)); 247 } 248 249 FunctionPass *llvm::createAggressiveInstCombinerPass() { 250 return new AggressiveInstCombinerLegacyPass(); 251 } 252 253 void LLVMAddAggressiveInstCombinerPass(LLVMPassManagerRef PM) { 254 unwrap(PM)->add(createAggressiveInstCombinerPass()); 255 } 256