1 //===- NaryReassociate.cpp - Reassociate n-ary expressions ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass reassociates n-ary add expressions and eliminates the redundancy 10 // exposed by the reassociation. 11 // 12 // A motivating example: 13 // 14 // void foo(int a, int b) { 15 // bar(a + b); 16 // bar((a + 2) + b); 17 // } 18 // 19 // An ideal compiler should reassociate (a + 2) + b to (a + b) + 2 and simplify 20 // the above code to 21 // 22 // int t = a + b; 23 // bar(t); 24 // bar(t + 2); 25 // 26 // However, the Reassociate pass is unable to do that because it processes each 27 // instruction individually and believes (a + 2) + b is the best form according 28 // to its rank system. 29 // 30 // To address this limitation, NaryReassociate reassociates an expression in a 31 // form that reuses existing instructions. As a result, NaryReassociate can 32 // reassociate (a + 2) + b in the example to (a + b) + 2 because it detects that 33 // (a + b) is computed before. 34 // 35 // NaryReassociate works as follows. For every instruction in the form of (a + 36 // b) + c, it checks whether a + c or b + c is already computed by a dominating 37 // instruction. If so, it then reassociates (a + b) + c into (a + c) + b or (b + 38 // c) + a and removes the redundancy accordingly. To efficiently look up whether 39 // an expression is computed before, we store each instruction seen and its SCEV 40 // into an SCEV-to-instruction map. 41 // 42 // Although the algorithm pattern-matches only ternary additions, it 43 // automatically handles many >3-ary expressions by walking through the function 44 // in the depth-first order. For example, given 45 // 46 // (a + c) + d 47 // ((a + b) + c) + d 48 // 49 // NaryReassociate first rewrites (a + b) + c to (a + c) + b, and then rewrites 50 // ((a + c) + b) + d into ((a + c) + d) + b. 51 // 52 // Finally, the above dominator-based algorithm may need to be run multiple 53 // iterations before emitting optimal code. One source of this need is that we 54 // only split an operand when it is used only once. The above algorithm can 55 // eliminate an instruction and decrease the usage count of its operands. As a 56 // result, an instruction that previously had multiple uses may become a 57 // single-use instruction and thus eligible for split consideration. For 58 // example, 59 // 60 // ac = a + c 61 // ab = a + b 62 // abc = ab + c 63 // ab2 = ab + b 64 // ab2c = ab2 + c 65 // 66 // In the first iteration, we cannot reassociate abc to ac+b because ab is used 67 // twice. However, we can reassociate ab2c to abc+b in the first iteration. As a 68 // result, ab2 becomes dead and ab will be used only once in the second 69 // iteration. 70 // 71 // Limitations and TODO items: 72 // 73 // 1) We only considers n-ary adds and muls for now. This should be extended 74 // and generalized. 75 // 76 //===----------------------------------------------------------------------===// 77 78 #include "llvm/Transforms/Scalar/NaryReassociate.h" 79 #include "llvm/ADT/DepthFirstIterator.h" 80 #include "llvm/ADT/SmallVector.h" 81 #include "llvm/Analysis/AssumptionCache.h" 82 #include "llvm/Analysis/ScalarEvolution.h" 83 #include "llvm/Analysis/TargetLibraryInfo.h" 84 #include "llvm/Analysis/TargetTransformInfo.h" 85 #include "llvm/Analysis/ValueTracking.h" 86 #include "llvm/IR/BasicBlock.h" 87 #include "llvm/IR/Constants.h" 88 #include "llvm/IR/DataLayout.h" 89 #include "llvm/IR/DerivedTypes.h" 90 #include "llvm/IR/Dominators.h" 91 #include "llvm/IR/Function.h" 92 #include "llvm/IR/GetElementPtrTypeIterator.h" 93 #include "llvm/IR/IRBuilder.h" 94 #include "llvm/IR/InstrTypes.h" 95 #include "llvm/IR/Instruction.h" 96 #include "llvm/IR/Instructions.h" 97 #include "llvm/IR/Module.h" 98 #include "llvm/IR/Operator.h" 99 #include "llvm/IR/PatternMatch.h" 100 #include "llvm/IR/Type.h" 101 #include "llvm/IR/Value.h" 102 #include "llvm/IR/ValueHandle.h" 103 #include "llvm/InitializePasses.h" 104 #include "llvm/Pass.h" 105 #include "llvm/Support/Casting.h" 106 #include "llvm/Support/ErrorHandling.h" 107 #include "llvm/Transforms/Scalar.h" 108 #include "llvm/Transforms/Utils/Local.h" 109 #include <cassert> 110 #include <cstdint> 111 112 using namespace llvm; 113 using namespace PatternMatch; 114 115 #define DEBUG_TYPE "nary-reassociate" 116 117 namespace { 118 119 class NaryReassociateLegacyPass : public FunctionPass { 120 public: 121 static char ID; 122 123 NaryReassociateLegacyPass() : FunctionPass(ID) { 124 initializeNaryReassociateLegacyPassPass(*PassRegistry::getPassRegistry()); 125 } 126 127 bool doInitialization(Module &M) override { 128 return false; 129 } 130 131 bool runOnFunction(Function &F) override; 132 133 void getAnalysisUsage(AnalysisUsage &AU) const override { 134 AU.addPreserved<DominatorTreeWrapperPass>(); 135 AU.addPreserved<ScalarEvolutionWrapperPass>(); 136 AU.addPreserved<TargetLibraryInfoWrapperPass>(); 137 AU.addRequired<AssumptionCacheTracker>(); 138 AU.addRequired<DominatorTreeWrapperPass>(); 139 AU.addRequired<ScalarEvolutionWrapperPass>(); 140 AU.addRequired<TargetLibraryInfoWrapperPass>(); 141 AU.addRequired<TargetTransformInfoWrapperPass>(); 142 AU.setPreservesCFG(); 143 } 144 145 private: 146 NaryReassociatePass Impl; 147 }; 148 149 } // end anonymous namespace 150 151 char NaryReassociateLegacyPass::ID = 0; 152 153 INITIALIZE_PASS_BEGIN(NaryReassociateLegacyPass, "nary-reassociate", 154 "Nary reassociation", false, false) 155 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 156 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 157 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 158 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 159 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 160 INITIALIZE_PASS_END(NaryReassociateLegacyPass, "nary-reassociate", 161 "Nary reassociation", false, false) 162 163 FunctionPass *llvm::createNaryReassociatePass() { 164 return new NaryReassociateLegacyPass(); 165 } 166 167 bool NaryReassociateLegacyPass::runOnFunction(Function &F) { 168 if (skipFunction(F)) 169 return false; 170 171 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 172 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 173 auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 174 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 175 auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 176 177 return Impl.runImpl(F, AC, DT, SE, TLI, TTI); 178 } 179 180 PreservedAnalyses NaryReassociatePass::run(Function &F, 181 FunctionAnalysisManager &AM) { 182 auto *AC = &AM.getResult<AssumptionAnalysis>(F); 183 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F); 184 auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F); 185 auto *TLI = &AM.getResult<TargetLibraryAnalysis>(F); 186 auto *TTI = &AM.getResult<TargetIRAnalysis>(F); 187 188 if (!runImpl(F, AC, DT, SE, TLI, TTI)) 189 return PreservedAnalyses::all(); 190 191 PreservedAnalyses PA; 192 PA.preserveSet<CFGAnalyses>(); 193 PA.preserve<ScalarEvolutionAnalysis>(); 194 return PA; 195 } 196 197 bool NaryReassociatePass::runImpl(Function &F, AssumptionCache *AC_, 198 DominatorTree *DT_, ScalarEvolution *SE_, 199 TargetLibraryInfo *TLI_, 200 TargetTransformInfo *TTI_) { 201 AC = AC_; 202 DT = DT_; 203 SE = SE_; 204 TLI = TLI_; 205 TTI = TTI_; 206 DL = &F.getParent()->getDataLayout(); 207 208 bool Changed = false, ChangedInThisIteration; 209 do { 210 ChangedInThisIteration = doOneIteration(F); 211 Changed |= ChangedInThisIteration; 212 } while (ChangedInThisIteration); 213 return Changed; 214 } 215 216 bool NaryReassociatePass::doOneIteration(Function &F) { 217 bool Changed = false; 218 SeenExprs.clear(); 219 // Process the basic blocks in a depth first traversal of the dominator 220 // tree. This order ensures that all bases of a candidate are in Candidates 221 // when we process it. 222 SmallVector<WeakTrackingVH, 16> DeadInsts; 223 for (const auto Node : depth_first(DT)) { 224 BasicBlock *BB = Node->getBlock(); 225 for (auto I = BB->begin(); I != BB->end(); ++I) { 226 Instruction *OrigI = &*I; 227 const SCEV *OrigSCEV = nullptr; 228 if (Instruction *NewI = tryReassociate(OrigI, OrigSCEV)) { 229 Changed = true; 230 OrigI->replaceAllUsesWith(NewI); 231 232 // Add 'OrigI' to the list of dead instructions. 233 DeadInsts.push_back(WeakTrackingVH(OrigI)); 234 // Add the rewritten instruction to SeenExprs; the original 235 // instruction is deleted. 236 const SCEV *NewSCEV = SE->getSCEV(NewI); 237 SeenExprs[NewSCEV].push_back(WeakTrackingVH(NewI)); 238 239 // Ideally, NewSCEV should equal OldSCEV because tryReassociate(I) 240 // is equivalent to I. However, ScalarEvolution::getSCEV may 241 // weaken nsw causing NewSCEV not to equal OldSCEV. For example, 242 // suppose we reassociate 243 // I = &a[sext(i +nsw j)] // assuming sizeof(a[0]) = 4 244 // to 245 // NewI = &a[sext(i)] + sext(j). 246 // 247 // ScalarEvolution computes 248 // getSCEV(I) = a + 4 * sext(i + j) 249 // getSCEV(newI) = a + 4 * sext(i) + 4 * sext(j) 250 // which are different SCEVs. 251 // 252 // To alleviate this issue of ScalarEvolution not always capturing 253 // equivalence, we add I to SeenExprs[OldSCEV] as well so that we can 254 // map both SCEV before and after tryReassociate(I) to I. 255 // 256 // This improvement is exercised in @reassociate_gep_nsw in 257 // nary-gep.ll. 258 if (NewSCEV != OrigSCEV) 259 SeenExprs[OrigSCEV].push_back(WeakTrackingVH(NewI)); 260 } else if (OrigSCEV) 261 SeenExprs[OrigSCEV].push_back(WeakTrackingVH(OrigI)); 262 } 263 } 264 // Delete all dead instructions from 'DeadInsts'. 265 // Please note ScalarEvolution is updated along the way. 266 RecursivelyDeleteTriviallyDeadInstructionsPermissive( 267 DeadInsts, TLI, nullptr, [this](Value *V) { SE->forgetValue(V); }); 268 269 return Changed; 270 } 271 272 Instruction *NaryReassociatePass::tryReassociate(Instruction * I, 273 const SCEV *&OrigSCEV) { 274 275 if (!SE->isSCEVable(I->getType())) 276 return nullptr; 277 278 switch (I->getOpcode()) { 279 case Instruction::Add: 280 case Instruction::Mul: 281 OrigSCEV = SE->getSCEV(I); 282 return tryReassociateBinaryOp(cast<BinaryOperator>(I)); 283 case Instruction::GetElementPtr: 284 OrigSCEV = SE->getSCEV(I); 285 return tryReassociateGEP(cast<GetElementPtrInst>(I)); 286 default: 287 return nullptr; 288 } 289 290 llvm_unreachable("should not be reached"); 291 return nullptr; 292 } 293 294 static bool isGEPFoldable(GetElementPtrInst *GEP, 295 const TargetTransformInfo *TTI) { 296 SmallVector<const Value*, 4> Indices; 297 for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I) 298 Indices.push_back(*I); 299 return TTI->getGEPCost(GEP->getSourceElementType(), GEP->getPointerOperand(), 300 Indices) == TargetTransformInfo::TCC_Free; 301 } 302 303 Instruction *NaryReassociatePass::tryReassociateGEP(GetElementPtrInst *GEP) { 304 // Not worth reassociating GEP if it is foldable. 305 if (isGEPFoldable(GEP, TTI)) 306 return nullptr; 307 308 gep_type_iterator GTI = gep_type_begin(*GEP); 309 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { 310 if (GTI.isSequential()) { 311 if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, 312 GTI.getIndexedType())) { 313 return NewGEP; 314 } 315 } 316 } 317 return nullptr; 318 } 319 320 bool NaryReassociatePass::requiresSignExtension(Value *Index, 321 GetElementPtrInst *GEP) { 322 unsigned PointerSizeInBits = 323 DL->getPointerSizeInBits(GEP->getType()->getPointerAddressSpace()); 324 return cast<IntegerType>(Index->getType())->getBitWidth() < PointerSizeInBits; 325 } 326 327 GetElementPtrInst * 328 NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, 329 unsigned I, Type *IndexedType) { 330 Value *IndexToSplit = GEP->getOperand(I + 1); 331 if (SExtInst *SExt = dyn_cast<SExtInst>(IndexToSplit)) { 332 IndexToSplit = SExt->getOperand(0); 333 } else if (ZExtInst *ZExt = dyn_cast<ZExtInst>(IndexToSplit)) { 334 // zext can be treated as sext if the source is non-negative. 335 if (isKnownNonNegative(ZExt->getOperand(0), *DL, 0, AC, GEP, DT)) 336 IndexToSplit = ZExt->getOperand(0); 337 } 338 339 if (AddOperator *AO = dyn_cast<AddOperator>(IndexToSplit)) { 340 // If the I-th index needs sext and the underlying add is not equipped with 341 // nsw, we cannot split the add because 342 // sext(LHS + RHS) != sext(LHS) + sext(RHS). 343 if (requiresSignExtension(IndexToSplit, GEP) && 344 computeOverflowForSignedAdd(AO, *DL, AC, GEP, DT) != 345 OverflowResult::NeverOverflows) 346 return nullptr; 347 348 Value *LHS = AO->getOperand(0), *RHS = AO->getOperand(1); 349 // IndexToSplit = LHS + RHS. 350 if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I, LHS, RHS, IndexedType)) 351 return NewGEP; 352 // Symmetrically, try IndexToSplit = RHS + LHS. 353 if (LHS != RHS) { 354 if (auto *NewGEP = 355 tryReassociateGEPAtIndex(GEP, I, RHS, LHS, IndexedType)) 356 return NewGEP; 357 } 358 } 359 return nullptr; 360 } 361 362 GetElementPtrInst * 363 NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP, 364 unsigned I, Value *LHS, 365 Value *RHS, Type *IndexedType) { 366 // Look for GEP's closest dominator that has the same SCEV as GEP except that 367 // the I-th index is replaced with LHS. 368 SmallVector<const SCEV *, 4> IndexExprs; 369 for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index) 370 IndexExprs.push_back(SE->getSCEV(*Index)); 371 // Replace the I-th index with LHS. 372 IndexExprs[I] = SE->getSCEV(LHS); 373 if (isKnownNonNegative(LHS, *DL, 0, AC, GEP, DT) && 374 DL->getTypeSizeInBits(LHS->getType()).getFixedSize() < 375 DL->getTypeSizeInBits(GEP->getOperand(I)->getType()).getFixedSize()) { 376 // Zero-extend LHS if it is non-negative. InstCombine canonicalizes sext to 377 // zext if the source operand is proved non-negative. We should do that 378 // consistently so that CandidateExpr more likely appears before. See 379 // @reassociate_gep_assume for an example of this canonicalization. 380 IndexExprs[I] = 381 SE->getZeroExtendExpr(IndexExprs[I], GEP->getOperand(I)->getType()); 382 } 383 const SCEV *CandidateExpr = SE->getGEPExpr(cast<GEPOperator>(GEP), 384 IndexExprs); 385 386 Value *Candidate = findClosestMatchingDominator(CandidateExpr, GEP); 387 if (Candidate == nullptr) 388 return nullptr; 389 390 IRBuilder<> Builder(GEP); 391 // Candidate does not necessarily have the same pointer type as GEP. Use 392 // bitcast or pointer cast to make sure they have the same type, so that the 393 // later RAUW doesn't complain. 394 Candidate = Builder.CreateBitOrPointerCast(Candidate, GEP->getType()); 395 assert(Candidate->getType() == GEP->getType()); 396 397 // NewGEP = (char *)Candidate + RHS * sizeof(IndexedType) 398 uint64_t IndexedSize = DL->getTypeAllocSize(IndexedType); 399 Type *ElementType = GEP->getResultElementType(); 400 uint64_t ElementSize = DL->getTypeAllocSize(ElementType); 401 // Another less rare case: because I is not necessarily the last index of the 402 // GEP, the size of the type at the I-th index (IndexedSize) is not 403 // necessarily divisible by ElementSize. For example, 404 // 405 // #pragma pack(1) 406 // struct S { 407 // int a[3]; 408 // int64 b[8]; 409 // }; 410 // #pragma pack() 411 // 412 // sizeof(S) = 100 is indivisible by sizeof(int64) = 8. 413 // 414 // TODO: bail out on this case for now. We could emit uglygep. 415 if (IndexedSize % ElementSize != 0) 416 return nullptr; 417 418 // NewGEP = &Candidate[RHS * (sizeof(IndexedType) / sizeof(Candidate[0]))); 419 Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); 420 if (RHS->getType() != IntPtrTy) 421 RHS = Builder.CreateSExtOrTrunc(RHS, IntPtrTy); 422 if (IndexedSize != ElementSize) { 423 RHS = Builder.CreateMul( 424 RHS, ConstantInt::get(IntPtrTy, IndexedSize / ElementSize)); 425 } 426 GetElementPtrInst *NewGEP = cast<GetElementPtrInst>( 427 Builder.CreateGEP(GEP->getResultElementType(), Candidate, RHS)); 428 NewGEP->setIsInBounds(GEP->isInBounds()); 429 NewGEP->takeName(GEP); 430 return NewGEP; 431 } 432 433 Instruction *NaryReassociatePass::tryReassociateBinaryOp(BinaryOperator *I) { 434 Value *LHS = I->getOperand(0), *RHS = I->getOperand(1); 435 // There is no need to reassociate 0. 436 if (SE->getSCEV(I)->isZero()) 437 return nullptr; 438 if (auto *NewI = tryReassociateBinaryOp(LHS, RHS, I)) 439 return NewI; 440 if (auto *NewI = tryReassociateBinaryOp(RHS, LHS, I)) 441 return NewI; 442 return nullptr; 443 } 444 445 Instruction *NaryReassociatePass::tryReassociateBinaryOp(Value *LHS, Value *RHS, 446 BinaryOperator *I) { 447 Value *A = nullptr, *B = nullptr; 448 // To be conservative, we reassociate I only when it is the only user of (A op 449 // B). 450 if (LHS->hasOneUse() && matchTernaryOp(I, LHS, A, B)) { 451 // I = (A op B) op RHS 452 // = (A op RHS) op B or (B op RHS) op A 453 const SCEV *AExpr = SE->getSCEV(A), *BExpr = SE->getSCEV(B); 454 const SCEV *RHSExpr = SE->getSCEV(RHS); 455 if (BExpr != RHSExpr) { 456 if (auto *NewI = 457 tryReassociatedBinaryOp(getBinarySCEV(I, AExpr, RHSExpr), B, I)) 458 return NewI; 459 } 460 if (AExpr != RHSExpr) { 461 if (auto *NewI = 462 tryReassociatedBinaryOp(getBinarySCEV(I, BExpr, RHSExpr), A, I)) 463 return NewI; 464 } 465 } 466 return nullptr; 467 } 468 469 Instruction *NaryReassociatePass::tryReassociatedBinaryOp(const SCEV *LHSExpr, 470 Value *RHS, 471 BinaryOperator *I) { 472 // Look for the closest dominator LHS of I that computes LHSExpr, and replace 473 // I with LHS op RHS. 474 auto *LHS = findClosestMatchingDominator(LHSExpr, I); 475 if (LHS == nullptr) 476 return nullptr; 477 478 Instruction *NewI = nullptr; 479 switch (I->getOpcode()) { 480 case Instruction::Add: 481 NewI = BinaryOperator::CreateAdd(LHS, RHS, "", I); 482 break; 483 case Instruction::Mul: 484 NewI = BinaryOperator::CreateMul(LHS, RHS, "", I); 485 break; 486 default: 487 llvm_unreachable("Unexpected instruction."); 488 } 489 NewI->takeName(I); 490 return NewI; 491 } 492 493 bool NaryReassociatePass::matchTernaryOp(BinaryOperator *I, Value *V, 494 Value *&Op1, Value *&Op2) { 495 switch (I->getOpcode()) { 496 case Instruction::Add: 497 return match(V, m_Add(m_Value(Op1), m_Value(Op2))); 498 case Instruction::Mul: 499 return match(V, m_Mul(m_Value(Op1), m_Value(Op2))); 500 default: 501 llvm_unreachable("Unexpected instruction."); 502 } 503 return false; 504 } 505 506 const SCEV *NaryReassociatePass::getBinarySCEV(BinaryOperator *I, 507 const SCEV *LHS, 508 const SCEV *RHS) { 509 switch (I->getOpcode()) { 510 case Instruction::Add: 511 return SE->getAddExpr(LHS, RHS); 512 case Instruction::Mul: 513 return SE->getMulExpr(LHS, RHS); 514 default: 515 llvm_unreachable("Unexpected instruction."); 516 } 517 return nullptr; 518 } 519 520 Instruction * 521 NaryReassociatePass::findClosestMatchingDominator(const SCEV *CandidateExpr, 522 Instruction *Dominatee) { 523 auto Pos = SeenExprs.find(CandidateExpr); 524 if (Pos == SeenExprs.end()) 525 return nullptr; 526 527 auto &Candidates = Pos->second; 528 // Because we process the basic blocks in pre-order of the dominator tree, a 529 // candidate that doesn't dominate the current instruction won't dominate any 530 // future instruction either. Therefore, we pop it out of the stack. This 531 // optimization makes the algorithm O(n). 532 while (!Candidates.empty()) { 533 // Candidates stores WeakTrackingVHs, so a candidate can be nullptr if it's 534 // removed 535 // during rewriting. 536 if (Value *Candidate = Candidates.back()) { 537 Instruction *CandidateInstruction = cast<Instruction>(Candidate); 538 if (DT->dominates(CandidateInstruction, Dominatee)) 539 return CandidateInstruction; 540 } 541 Candidates.pop_back(); 542 } 543 return nullptr; 544 } 545