1 //===- NaryReassociate.cpp - Reassociate n-ary expressions ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass reassociates n-ary add expressions and eliminates the redundancy 11 // exposed by the reassociation. 12 // 13 // A motivating example: 14 // 15 // void foo(int a, int b) { 16 // bar(a + b); 17 // bar((a + 2) + b); 18 // } 19 // 20 // An ideal compiler should reassociate (a + 2) + b to (a + b) + 2 and simplify 21 // the above code to 22 // 23 // int t = a + b; 24 // bar(t); 25 // bar(t + 2); 26 // 27 // However, the Reassociate pass is unable to do that because it processes each 28 // instruction individually and believes (a + 2) + b is the best form according 29 // to its rank system. 30 // 31 // To address this limitation, NaryReassociate reassociates an expression in a 32 // form that reuses existing instructions. As a result, NaryReassociate can 33 // reassociate (a + 2) + b in the example to (a + b) + 2 because it detects that 34 // (a + b) is computed before. 35 // 36 // NaryReassociate works as follows. For every instruction in the form of (a + 37 // b) + c, it checks whether a + c or b + c is already computed by a dominating 38 // instruction. If so, it then reassociates (a + b) + c into (a + c) + b or (b + 39 // c) + a and removes the redundancy accordingly. To efficiently look up whether 40 // an expression is computed before, we store each instruction seen and its SCEV 41 // into an SCEV-to-instruction map. 42 // 43 // Although the algorithm pattern-matches only ternary additions, it 44 // automatically handles many >3-ary expressions by walking through the function 45 // in the depth-first order. For example, given 46 // 47 // (a + c) + d 48 // ((a + b) + c) + d 49 // 50 // NaryReassociate first rewrites (a + b) + c to (a + c) + b, and then rewrites 51 // ((a + c) + b) + d into ((a + c) + d) + b. 52 // 53 // Finally, the above dominator-based algorithm may need to be run multiple 54 // iterations before emitting optimal code. One source of this need is that we 55 // only split an operand when it is used only once. The above algorithm can 56 // eliminate an instruction and decrease the usage count of its operands. As a 57 // result, an instruction that previously had multiple uses may become a 58 // single-use instruction and thus eligible for split consideration. For 59 // example, 60 // 61 // ac = a + c 62 // ab = a + b 63 // abc = ab + c 64 // ab2 = ab + b 65 // ab2c = ab2 + c 66 // 67 // In the first iteration, we cannot reassociate abc to ac+b because ab is used 68 // twice. However, we can reassociate ab2c to abc+b in the first iteration. As a 69 // result, ab2 becomes dead and ab will be used only once in the second 70 // iteration. 71 // 72 // Limitations and TODO items: 73 // 74 // 1) We only considers n-ary adds and muls for now. This should be extended 75 // and generalized. 76 // 77 //===----------------------------------------------------------------------===// 78 79 #include "llvm/Transforms/Scalar/NaryReassociate.h" 80 #include "llvm/Analysis/ValueTracking.h" 81 #include "llvm/IR/Module.h" 82 #include "llvm/IR/PatternMatch.h" 83 #include "llvm/Support/Debug.h" 84 #include "llvm/Support/raw_ostream.h" 85 #include "llvm/Transforms/Scalar.h" 86 #include "llvm/Transforms/Utils/Local.h" 87 using namespace llvm; 88 using namespace PatternMatch; 89 90 #define DEBUG_TYPE "nary-reassociate" 91 92 namespace { 93 class NaryReassociateLegacyPass : public FunctionPass { 94 public: 95 static char ID; 96 97 NaryReassociateLegacyPass() : FunctionPass(ID) { 98 initializeNaryReassociateLegacyPassPass(*PassRegistry::getPassRegistry()); 99 } 100 101 bool doInitialization(Module &M) override { 102 return false; 103 } 104 bool runOnFunction(Function &F) override; 105 106 void getAnalysisUsage(AnalysisUsage &AU) const override { 107 AU.addPreserved<DominatorTreeWrapperPass>(); 108 AU.addPreserved<ScalarEvolutionWrapperPass>(); 109 AU.addPreserved<TargetLibraryInfoWrapperPass>(); 110 AU.addRequired<AssumptionCacheTracker>(); 111 AU.addRequired<DominatorTreeWrapperPass>(); 112 AU.addRequired<ScalarEvolutionWrapperPass>(); 113 AU.addRequired<TargetLibraryInfoWrapperPass>(); 114 AU.addRequired<TargetTransformInfoWrapperPass>(); 115 AU.setPreservesCFG(); 116 } 117 118 private: 119 NaryReassociatePass Impl; 120 }; 121 } // anonymous namespace 122 123 char NaryReassociateLegacyPass::ID = 0; 124 INITIALIZE_PASS_BEGIN(NaryReassociateLegacyPass, "nary-reassociate", 125 "Nary reassociation", false, false) 126 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 127 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 128 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 129 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 130 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 131 INITIALIZE_PASS_END(NaryReassociateLegacyPass, "nary-reassociate", 132 "Nary reassociation", false, false) 133 134 FunctionPass *llvm::createNaryReassociatePass() { 135 return new NaryReassociateLegacyPass(); 136 } 137 138 bool NaryReassociateLegacyPass::runOnFunction(Function &F) { 139 if (skipFunction(F)) 140 return false; 141 142 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 143 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 144 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 145 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 146 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 147 148 return Impl.runImpl(F, AC, DT, SE, TLI, TTI); 149 } 150 151 PreservedAnalyses NaryReassociatePass::run(Function &F, 152 FunctionAnalysisManager &AM) { 153 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 154 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 155 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 156 auto *TLI = &AM.getResult<TargetLibraryAnalysis>(F); 157 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 158 159 bool Changed = runImpl(F, AC, DT, SE, TLI, TTI); 160 161 // FIXME: We need to invalidate this to avoid PR28400. Is there a better 162 // solution? 163 AM.invalidate<ScalarEvolutionAnalysis>(F); 164 165 if (!Changed) 166 return PreservedAnalyses::all(); 167 168 // FIXME: This should also 'preserve the CFG'. 169 PreservedAnalyses PA; 170 PA.preserve<DominatorTreeAnalysis>(); 171 PA.preserve<ScalarEvolutionAnalysis>(); 172 PA.preserve<TargetLibraryAnalysis>(); 173 return PA; 174 } 175 176 bool NaryReassociatePass::runImpl(Function &F, AssumptionCache *AC_, 177 DominatorTree *DT_, ScalarEvolution *SE_, 178 TargetLibraryInfo *TLI_, 179 TargetTransformInfo *TTI_) { 180 AC = AC_; 181 DT = DT_; 182 SE = SE_; 183 TLI = TLI_; 184 TTI = TTI_; 185 DL = &F.getParent()->getDataLayout(); 186 187 bool Changed = false, ChangedInThisIteration; 188 do { 189 ChangedInThisIteration = doOneIteration(F); 190 Changed |= ChangedInThisIteration; 191 } while (ChangedInThisIteration); 192 return Changed; 193 } 194 195 // Whitelist the instruction types NaryReassociate handles for now. 196 static bool isPotentiallyNaryReassociable(Instruction *I) { 197 switch (I->getOpcode()) { 198 case Instruction::Add: 199 case Instruction::GetElementPtr: 200 case Instruction::Mul: 201 return true; 202 default: 203 return false; 204 } 205 } 206 207 bool NaryReassociatePass::doOneIteration(Function &F) { 208 bool Changed = false; 209 SeenExprs.clear(); 210 // Process the basic blocks in a depth first traversal of the dominator 211 // tree. This order ensures that all bases of a candidate are in Candidates 212 // when we process it. 213 for (const auto Node : depth_first(DT)) { 214 BasicBlock *BB = Node->getBlock(); 215 for (auto I = BB->begin(); I != BB->end(); ++I) { 216 if (SE->isSCEVable(I->getType()) && isPotentiallyNaryReassociable(&*I)) { 217 const SCEV *OldSCEV = SE->getSCEV(&*I); 218 if (Instruction *NewI = tryReassociate(&*I)) { 219 Changed = true; 220 SE->forgetValue(&*I); 221 I->replaceAllUsesWith(NewI); 222 // If SeenExprs constains I's WeakVH, that entry will be replaced with 223 // nullptr. 224 RecursivelyDeleteTriviallyDeadInstructions(&*I, TLI); 225 I = NewI->getIterator(); 226 } 227 // Add the rewritten instruction to SeenExprs; the original instruction 228 // is deleted. 229 const SCEV *NewSCEV = SE->getSCEV(&*I); 230 SeenExprs[NewSCEV].push_back(WeakVH(&*I)); 231 // Ideally, NewSCEV should equal OldSCEV because tryReassociate(I) 232 // is equivalent to I. However, ScalarEvolution::getSCEV may 233 // weaken nsw causing NewSCEV not to equal OldSCEV. For example, suppose 234 // we reassociate 235 // I = &a[sext(i +nsw j)] // assuming sizeof(a[0]) = 4 236 // to 237 // NewI = &a[sext(i)] + sext(j). 238 // 239 // ScalarEvolution computes 240 // getSCEV(I) = a + 4 * sext(i + j) 241 // getSCEV(newI) = a + 4 * sext(i) + 4 * sext(j) 242 // which are different SCEVs. 243 // 244 // To alleviate this issue of ScalarEvolution not always capturing 245 // equivalence, we add I to SeenExprs[OldSCEV] as well so that we can 246 // map both SCEV before and after tryReassociate(I) to I. 247 // 248 // This improvement is exercised in @reassociate_gep_nsw in nary-gep.ll. 249 if (NewSCEV != OldSCEV) 250 SeenExprs[OldSCEV].push_back(WeakVH(&*I)); 251 } 252 } 253 } 254 return Changed; 255 } 256 257 Instruction *NaryReassociatePass::tryReassociate(Instruction *I) { 258 switch (I->getOpcode()) { 259 case Instruction::Add: 260 case Instruction::Mul: 261 return tryReassociateBinaryOp(cast<BinaryOperator>(I)); 262 case Instruction::GetElementPtr: 263 return tryReassociateGEP(cast<GetElementPtrInst>(I)); 264 default: 265 llvm_unreachable("should be filtered out by isPotentiallyNaryReassociable"); 266 } 267 } 268 269 static bool isGEPFoldable(GetElementPtrInst *GEP, 270 const TargetTransformInfo *TTI) { 271 SmallVector<const Value*, 4> Indices; 272 for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I) 273 Indices.push_back(*I); 274 return TTI->getGEPCost(GEP->getSourceElementType(), GEP->getPointerOperand(), 275 Indices) == TargetTransformInfo::TCC_Free; 276 } 277 278 Instruction *NaryReassociatePass::tryReassociateGEP(GetElementPtrInst *GEP) { 279 // Not worth reassociating GEP if it is foldable. 280 if (isGEPFoldable(GEP, TTI)) 281 return nullptr; 282 283 gep_type_iterator GTI = gep_type_begin(*GEP); 284 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) { 285 if (isa<SequentialType>(*GTI++)) { 286 if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, *GTI)) { 287 return NewGEP; 288 } 289 } 290 } 291 return nullptr; 292 } 293 294 bool NaryReassociatePass::requiresSignExtension(Value *Index, 295 GetElementPtrInst *GEP) { 296 unsigned PointerSizeInBits = 297 DL->getPointerSizeInBits(GEP->getType()->getPointerAddressSpace()); 298 return cast<IntegerType>(Index->getType())->getBitWidth() < PointerSizeInBits; 299 } 300 301 GetElementPtrInst * 302 NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, 303 unsigned I, Type *IndexedType) { 304 Value *IndexToSplit = GEP->getOperand(I + 1); 305 if (SExtInst *SExt = dyn_cast<SExtInst>(IndexToSplit)) { 306 IndexToSplit = SExt->getOperand(0); 307 } else if (ZExtInst *ZExt = dyn_cast<ZExtInst>(IndexToSplit)) { 308 // zext can be treated as sext if the source is non-negative. 309 if (isKnownNonNegative(ZExt->getOperand(0), *DL, 0, AC, GEP, DT)) 310 IndexToSplit = ZExt->getOperand(0); 311 } 312 313 if (AddOperator *AO = dyn_cast<AddOperator>(IndexToSplit)) { 314 // If the I-th index needs sext and the underlying add is not equipped with 315 // nsw, we cannot split the add because 316 // sext(LHS + RHS) != sext(LHS) + sext(RHS). 317 if (requiresSignExtension(IndexToSplit, GEP) && 318 computeOverflowForSignedAdd(AO, *DL, AC, GEP, DT) != 319 OverflowResult::NeverOverflows) 320 return nullptr; 321 322 Value *LHS = AO->getOperand(0), *RHS = AO->getOperand(1); 323 // IndexToSplit = LHS + RHS. 324 if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I, LHS, RHS, IndexedType)) 325 return NewGEP; 326 // Symmetrically, try IndexToSplit = RHS + LHS. 327 if (LHS != RHS) { 328 if (auto *NewGEP = 329 tryReassociateGEPAtIndex(GEP, I, RHS, LHS, IndexedType)) 330 return NewGEP; 331 } 332 } 333 return nullptr; 334 } 335 336 GetElementPtrInst * 337 NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, 338 unsigned I, Value *LHS, 339 Value *RHS, Type *IndexedType) { 340 // Look for GEP's closest dominator that has the same SCEV as GEP except that 341 // the I-th index is replaced with LHS. 342 SmallVector<const SCEV *, 4> IndexExprs; 343 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 344 IndexExprs.push_back(SE->getSCEV(*Index)); 345 // Replace the I-th index with LHS. 346 IndexExprs[I] = SE->getSCEV(LHS); 347 if (isKnownNonNegative(LHS, *DL, 0, AC, GEP, DT) && 348 DL->getTypeSizeInBits(LHS->getType()) < 349 DL->getTypeSizeInBits(GEP->getOperand(I)->getType())) { 350 // Zero-extend LHS if it is non-negative. InstCombine canonicalizes sext to 351 // zext if the source operand is proved non-negative. We should do that 352 // consistently so that CandidateExpr more likely appears before. See 353 // @reassociate_gep_assume for an example of this canonicalization. 354 IndexExprs[I] = 355 SE->getZeroExtendExpr(IndexExprs[I], GEP->getOperand(I)->getType()); 356 } 357 const SCEV *CandidateExpr = SE->getGEPExpr( 358 GEP->getSourceElementType(), SE->getSCEV(GEP->getPointerOperand()), 359 IndexExprs, GEP->isInBounds()); 360 361 Value *Candidate = findClosestMatchingDominator(CandidateExpr, GEP); 362 if (Candidate == nullptr) 363 return nullptr; 364 365 IRBuilder<> Builder(GEP); 366 // Candidate does not necessarily have the same pointer type as GEP. Use 367 // bitcast or pointer cast to make sure they have the same type, so that the 368 // later RAUW doesn't complain. 369 Candidate = Builder.CreateBitOrPointerCast(Candidate, GEP->getType()); 370 assert(Candidate->getType() == GEP->getType()); 371 372 // NewGEP = (char *)Candidate + RHS * sizeof(IndexedType) 373 uint64_t IndexedSize = DL->getTypeAllocSize(IndexedType); 374 Type *ElementType = GEP->getResultElementType(); 375 uint64_t ElementSize = DL->getTypeAllocSize(ElementType); 376 // Another less rare case: because I is not necessarily the last index of the 377 // GEP, the size of the type at the I-th index (IndexedSize) is not 378 // necessarily divisible by ElementSize. For example, 379 // 380 // #pragma pack(1) 381 // struct S { 382 // int a[3]; 383 // int64 b[8]; 384 // }; 385 // #pragma pack() 386 // 387 // sizeof(S) = 100 is indivisible by sizeof(int64) = 8. 388 // 389 // TODO: bail out on this case for now. We could emit uglygep. 390 if (IndexedSize % ElementSize != 0) 391 return nullptr; 392 393 // NewGEP = &Candidate[RHS * (sizeof(IndexedType) / sizeof(Candidate[0]))); 394 Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); 395 if (RHS->getType() != IntPtrTy) 396 RHS = Builder.CreateSExtOrTrunc(RHS, IntPtrTy); 397 if (IndexedSize != ElementSize) { 398 RHS = Builder.CreateMul( 399 RHS, ConstantInt::get(IntPtrTy, IndexedSize / ElementSize)); 400 } 401 GetElementPtrInst *NewGEP = 402 cast<GetElementPtrInst>(Builder.CreateGEP(Candidate, RHS)); 403 NewGEP->setIsInBounds(GEP->isInBounds()); 404 NewGEP->takeName(GEP); 405 return NewGEP; 406 } 407 408 Instruction *NaryReassociatePass::tryReassociateBinaryOp(BinaryOperator *I) { 409 Value *LHS = I->getOperand(0), *RHS = I->getOperand(1); 410 if (auto *NewI = tryReassociateBinaryOp(LHS, RHS, I)) 411 return NewI; 412 if (auto *NewI = tryReassociateBinaryOp(RHS, LHS, I)) 413 return NewI; 414 return nullptr; 415 } 416 417 Instruction *NaryReassociatePass::tryReassociateBinaryOp(Value *LHS, Value *RHS, 418 BinaryOperator *I) { 419 Value *A = nullptr, *B = nullptr; 420 // To be conservative, we reassociate I only when it is the only user of (A op 421 // B). 422 if (LHS->hasOneUse() && matchTernaryOp(I, LHS, A, B)) { 423 // I = (A op B) op RHS 424 // = (A op RHS) op B or (B op RHS) op A 425 const SCEV *AExpr = SE->getSCEV(A), *BExpr = SE->getSCEV(B); 426 const SCEV *RHSExpr = SE->getSCEV(RHS); 427 if (BExpr != RHSExpr) { 428 if (auto *NewI = 429 tryReassociatedBinaryOp(getBinarySCEV(I, AExpr, RHSExpr), B, I)) 430 return NewI; 431 } 432 if (AExpr != RHSExpr) { 433 if (auto *NewI = 434 tryReassociatedBinaryOp(getBinarySCEV(I, BExpr, RHSExpr), A, I)) 435 return NewI; 436 } 437 } 438 return nullptr; 439 } 440 441 Instruction *NaryReassociatePass::tryReassociatedBinaryOp(const SCEV *LHSExpr, 442 Value *RHS, 443 BinaryOperator *I) { 444 // Look for the closest dominator LHS of I that computes LHSExpr, and replace 445 // I with LHS op RHS. 446 auto *LHS = findClosestMatchingDominator(LHSExpr, I); 447 if (LHS == nullptr) 448 return nullptr; 449 450 Instruction *NewI = nullptr; 451 switch (I->getOpcode()) { 452 case Instruction::Add: 453 NewI = BinaryOperator::CreateAdd(LHS, RHS, "", I); 454 break; 455 case Instruction::Mul: 456 NewI = BinaryOperator::CreateMul(LHS, RHS, "", I); 457 break; 458 default: 459 llvm_unreachable("Unexpected instruction."); 460 } 461 NewI->takeName(I); 462 return NewI; 463 } 464 465 bool NaryReassociatePass::matchTernaryOp(BinaryOperator *I, Value *V, 466 Value *&Op1, Value *&Op2) { 467 switch (I->getOpcode()) { 468 case Instruction::Add: 469 return match(V, m_Add(m_Value(Op1), m_Value(Op2))); 470 case Instruction::Mul: 471 return match(V, m_Mul(m_Value(Op1), m_Value(Op2))); 472 default: 473 llvm_unreachable("Unexpected instruction."); 474 } 475 return false; 476 } 477 478 const SCEV *NaryReassociatePass::getBinarySCEV(BinaryOperator *I, 479 const SCEV *LHS, 480 const SCEV *RHS) { 481 switch (I->getOpcode()) { 482 case Instruction::Add: 483 return SE->getAddExpr(LHS, RHS); 484 case Instruction::Mul: 485 return SE->getMulExpr(LHS, RHS); 486 default: 487 llvm_unreachable("Unexpected instruction."); 488 } 489 return nullptr; 490 } 491 492 Instruction * 493 NaryReassociatePass::findClosestMatchingDominator(const SCEV *CandidateExpr, 494 Instruction *Dominatee) { 495 auto Pos = SeenExprs.find(CandidateExpr); 496 if (Pos == SeenExprs.end()) 497 return nullptr; 498 499 auto &Candidates = Pos->second; 500 // Because we process the basic blocks in pre-order of the dominator tree, a 501 // candidate that doesn't dominate the current instruction won't dominate any 502 // future instruction either. Therefore, we pop it out of the stack. This 503 // optimization makes the algorithm O(n). 504 while (!Candidates.empty()) { 505 // Candidates stores WeakVHs, so a candidate can be nullptr if it's removed 506 // during rewriting. 507 if (Value *Candidate = Candidates.back()) { 508 Instruction *CandidateInstruction = cast<Instruction>(Candidate); 509 if (DT->dominates(CandidateInstruction, Dominatee)) 510 return CandidateInstruction; 511 } 512 Candidates.pop_back(); 513 } 514 return nullptr; 515 } 516