1 //===- NaryReassociate.cpp - Reassociate n-ary expressions ----------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass reassociates n-ary add expressions and eliminates the redundancy
11 // exposed by the reassociation.
12 //
13 // A motivating example:
14 //
15 //   void foo(int a, int b) {
16 //     bar(a + b);
17 //     bar((a + 2) + b);
18 //   }
19 //
20 // An ideal compiler should reassociate (a + 2) + b to (a + b) + 2 and simplify
21 // the above code to
22 //
23 //   int t = a + b;
24 //   bar(t);
25 //   bar(t + 2);
26 //
27 // However, the Reassociate pass is unable to do that because it processes each
28 // instruction individually and believes (a + 2) + b is the best form according
29 // to its rank system.
30 //
31 // To address this limitation, NaryReassociate reassociates an expression in a
32 // form that reuses existing instructions. As a result, NaryReassociate can
33 // reassociate (a + 2) + b in the example to (a + b) + 2 because it detects that
34 // (a + b) is computed before.
35 //
36 // NaryReassociate works as follows. For every instruction in the form of (a +
37 // b) + c, it checks whether a + c or b + c is already computed by a dominating
38 // instruction. If so, it then reassociates (a + b) + c into (a + c) + b or (b +
39 // c) + a and removes the redundancy accordingly. To efficiently look up whether
40 // an expression is computed before, we store each instruction seen and its SCEV
41 // into an SCEV-to-instruction map.
42 //
43 // Although the algorithm pattern-matches only ternary additions, it
44 // automatically handles many >3-ary expressions by walking through the function
45 // in the depth-first order. For example, given
46 //
47 //   (a + c) + d
48 //   ((a + b) + c) + d
49 //
50 // NaryReassociate first rewrites (a + b) + c to (a + c) + b, and then rewrites
51 // ((a + c) + b) + d into ((a + c) + d) + b.
52 //
53 // Finally, the above dominator-based algorithm may need to be run multiple
54 // iterations before emitting optimal code. One source of this need is that we
55 // only split an operand when it is used only once. The above algorithm can
56 // eliminate an instruction and decrease the usage count of its operands. As a
57 // result, an instruction that previously had multiple uses may become a
58 // single-use instruction and thus eligible for split consideration. For
59 // example,
60 //
61 //   ac = a + c
62 //   ab = a + b
63 //   abc = ab + c
64 //   ab2 = ab + b
65 //   ab2c = ab2 + c
66 //
67 // In the first iteration, we cannot reassociate abc to ac+b because ab is used
68 // twice. However, we can reassociate ab2c to abc+b in the first iteration. As a
69 // result, ab2 becomes dead and ab will be used only once in the second
70 // iteration.
71 //
72 // Limitations and TODO items:
73 //
74 // 1) We only considers n-ary adds and muls for now. This should be extended
75 // and generalized.
76 //
77 //===----------------------------------------------------------------------===//
78 
79 #include "llvm/Transforms/Scalar/NaryReassociate.h"
80 #include "llvm/Analysis/ValueTracking.h"
81 #include "llvm/IR/Module.h"
82 #include "llvm/IR/PatternMatch.h"
83 #include "llvm/Support/Debug.h"
84 #include "llvm/Support/raw_ostream.h"
85 #include "llvm/Transforms/Scalar.h"
86 #include "llvm/Transforms/Utils/Local.h"
87 using namespace llvm;
88 using namespace PatternMatch;
89 
90 #define DEBUG_TYPE "nary-reassociate"
91 
92 namespace {
93 class NaryReassociateLegacyPass : public FunctionPass {
94 public:
95   static char ID;
96 
97   NaryReassociateLegacyPass() : FunctionPass(ID) {
98     initializeNaryReassociateLegacyPassPass(*PassRegistry::getPassRegistry());
99   }
100 
101   bool doInitialization(Module &M) override {
102     return false;
103   }
104   bool runOnFunction(Function &F) override;
105 
106   void getAnalysisUsage(AnalysisUsage &AU) const override {
107     AU.addPreserved<DominatorTreeWrapperPass>();
108     AU.addPreserved<ScalarEvolutionWrapperPass>();
109     AU.addPreserved<TargetLibraryInfoWrapperPass>();
110     AU.addRequired<AssumptionCacheTracker>();
111     AU.addRequired<DominatorTreeWrapperPass>();
112     AU.addRequired<ScalarEvolutionWrapperPass>();
113     AU.addRequired<TargetLibraryInfoWrapperPass>();
114     AU.addRequired<TargetTransformInfoWrapperPass>();
115     AU.setPreservesCFG();
116   }
117 
118 private:
119   NaryReassociatePass Impl;
120 };
121 } // anonymous namespace
122 
123 char NaryReassociateLegacyPass::ID = 0;
124 INITIALIZE_PASS_BEGIN(NaryReassociateLegacyPass, "nary-reassociate",
125                       "Nary reassociation", false, false)
126 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
127 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
128 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
129 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
130 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
131 INITIALIZE_PASS_END(NaryReassociateLegacyPass, "nary-reassociate",
132                     "Nary reassociation", false, false)
133 
134 FunctionPass *llvm::createNaryReassociatePass() {
135   return new NaryReassociateLegacyPass();
136 }
137 
138 bool NaryReassociateLegacyPass::runOnFunction(Function &F) {
139   if (skipFunction(F))
140     return false;
141 
142   auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
143   auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
144   auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
145   auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
146   auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
147 
148   return Impl.runImpl(F, AC, DT, SE, TLI, TTI);
149 }
150 
151 PreservedAnalyses NaryReassociatePass::run(Function &F,
152                                            FunctionAnalysisManager &AM) {
153   auto *AC = &AM.getResult<AssumptionAnalysis>(F);
154   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
155   auto *SE = &AM.getResult<ScalarEvolutionAnalysis>(F);
156   auto *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
157   auto *TTI = &AM.getResult<TargetIRAnalysis>(F);
158 
159   if (!runImpl(F, AC, DT, SE, TLI, TTI))
160     return PreservedAnalyses::all();
161 
162   PreservedAnalyses PA;
163   PA.preserveSet<CFGAnalyses>();
164   PA.preserve<ScalarEvolutionAnalysis>();
165   return PA;
166 }
167 
168 bool NaryReassociatePass::runImpl(Function &F, AssumptionCache *AC_,
169                                   DominatorTree *DT_, ScalarEvolution *SE_,
170                                   TargetLibraryInfo *TLI_,
171                                   TargetTransformInfo *TTI_) {
172   AC = AC_;
173   DT = DT_;
174   SE = SE_;
175   TLI = TLI_;
176   TTI = TTI_;
177   DL = &F.getParent()->getDataLayout();
178 
179   bool Changed = false, ChangedInThisIteration;
180   do {
181     ChangedInThisIteration = doOneIteration(F);
182     Changed |= ChangedInThisIteration;
183   } while (ChangedInThisIteration);
184   return Changed;
185 }
186 
187 // Whitelist the instruction types NaryReassociate handles for now.
188 static bool isPotentiallyNaryReassociable(Instruction *I) {
189   switch (I->getOpcode()) {
190   case Instruction::Add:
191   case Instruction::GetElementPtr:
192   case Instruction::Mul:
193     return true;
194   default:
195     return false;
196   }
197 }
198 
199 bool NaryReassociatePass::doOneIteration(Function &F) {
200   bool Changed = false;
201   SeenExprs.clear();
202   // Process the basic blocks in a depth first traversal of the dominator
203   // tree. This order ensures that all bases of a candidate are in Candidates
204   // when we process it.
205   for (const auto Node : depth_first(DT)) {
206     BasicBlock *BB = Node->getBlock();
207     for (auto I = BB->begin(); I != BB->end(); ++I) {
208       if (SE->isSCEVable(I->getType()) && isPotentiallyNaryReassociable(&*I)) {
209         const SCEV *OldSCEV = SE->getSCEV(&*I);
210         if (Instruction *NewI = tryReassociate(&*I)) {
211           Changed = true;
212           SE->forgetValue(&*I);
213           I->replaceAllUsesWith(NewI);
214           // If SeenExprs constains I's WeakTrackingVH, that entry will be
215           // replaced with
216           // nullptr.
217           RecursivelyDeleteTriviallyDeadInstructions(&*I, TLI);
218           I = NewI->getIterator();
219         }
220         // Add the rewritten instruction to SeenExprs; the original instruction
221         // is deleted.
222         const SCEV *NewSCEV = SE->getSCEV(&*I);
223         SeenExprs[NewSCEV].push_back(WeakTrackingVH(&*I));
224         // Ideally, NewSCEV should equal OldSCEV because tryReassociate(I)
225         // is equivalent to I. However, ScalarEvolution::getSCEV may
226         // weaken nsw causing NewSCEV not to equal OldSCEV. For example, suppose
227         // we reassociate
228         //   I = &a[sext(i +nsw j)] // assuming sizeof(a[0]) = 4
229         // to
230         //   NewI = &a[sext(i)] + sext(j).
231         //
232         // ScalarEvolution computes
233         //   getSCEV(I)    = a + 4 * sext(i + j)
234         //   getSCEV(newI) = a + 4 * sext(i) + 4 * sext(j)
235         // which are different SCEVs.
236         //
237         // To alleviate this issue of ScalarEvolution not always capturing
238         // equivalence, we add I to SeenExprs[OldSCEV] as well so that we can
239         // map both SCEV before and after tryReassociate(I) to I.
240         //
241         // This improvement is exercised in @reassociate_gep_nsw in nary-gep.ll.
242         if (NewSCEV != OldSCEV)
243           SeenExprs[OldSCEV].push_back(WeakTrackingVH(&*I));
244       }
245     }
246   }
247   return Changed;
248 }
249 
250 Instruction *NaryReassociatePass::tryReassociate(Instruction *I) {
251   switch (I->getOpcode()) {
252   case Instruction::Add:
253   case Instruction::Mul:
254     return tryReassociateBinaryOp(cast<BinaryOperator>(I));
255   case Instruction::GetElementPtr:
256     return tryReassociateGEP(cast<GetElementPtrInst>(I));
257   default:
258     llvm_unreachable("should be filtered out by isPotentiallyNaryReassociable");
259   }
260 }
261 
262 static bool isGEPFoldable(GetElementPtrInst *GEP,
263                           const TargetTransformInfo *TTI) {
264   SmallVector<const Value*, 4> Indices;
265   for (auto I = GEP->idx_begin(); I != GEP->idx_end(); ++I)
266     Indices.push_back(*I);
267   return TTI->getGEPCost(GEP->getSourceElementType(), GEP->getPointerOperand(),
268                          Indices) == TargetTransformInfo::TCC_Free;
269 }
270 
271 Instruction *NaryReassociatePass::tryReassociateGEP(GetElementPtrInst *GEP) {
272   // Not worth reassociating GEP if it is foldable.
273   if (isGEPFoldable(GEP, TTI))
274     return nullptr;
275 
276   gep_type_iterator GTI = gep_type_begin(*GEP);
277   for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
278     if (GTI.isSequential()) {
279       if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1,
280                                                   GTI.getIndexedType())) {
281         return NewGEP;
282       }
283     }
284   }
285   return nullptr;
286 }
287 
288 bool NaryReassociatePass::requiresSignExtension(Value *Index,
289                                                 GetElementPtrInst *GEP) {
290   unsigned PointerSizeInBits =
291       DL->getPointerSizeInBits(GEP->getType()->getPointerAddressSpace());
292   return cast<IntegerType>(Index->getType())->getBitWidth() < PointerSizeInBits;
293 }
294 
295 GetElementPtrInst *
296 NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
297                                               unsigned I, Type *IndexedType) {
298   Value *IndexToSplit = GEP->getOperand(I + 1);
299   if (SExtInst *SExt = dyn_cast<SExtInst>(IndexToSplit)) {
300     IndexToSplit = SExt->getOperand(0);
301   } else if (ZExtInst *ZExt = dyn_cast<ZExtInst>(IndexToSplit)) {
302     // zext can be treated as sext if the source is non-negative.
303     if (isKnownNonNegative(ZExt->getOperand(0), *DL, 0, AC, GEP, DT))
304       IndexToSplit = ZExt->getOperand(0);
305   }
306 
307   if (AddOperator *AO = dyn_cast<AddOperator>(IndexToSplit)) {
308     // If the I-th index needs sext and the underlying add is not equipped with
309     // nsw, we cannot split the add because
310     //   sext(LHS + RHS) != sext(LHS) + sext(RHS).
311     if (requiresSignExtension(IndexToSplit, GEP) &&
312         computeOverflowForSignedAdd(AO, *DL, AC, GEP, DT) !=
313             OverflowResult::NeverOverflows)
314       return nullptr;
315 
316     Value *LHS = AO->getOperand(0), *RHS = AO->getOperand(1);
317     // IndexToSplit = LHS + RHS.
318     if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I, LHS, RHS, IndexedType))
319       return NewGEP;
320     // Symmetrically, try IndexToSplit = RHS + LHS.
321     if (LHS != RHS) {
322       if (auto *NewGEP =
323               tryReassociateGEPAtIndex(GEP, I, RHS, LHS, IndexedType))
324         return NewGEP;
325     }
326   }
327   return nullptr;
328 }
329 
330 GetElementPtrInst *
331 NaryReassociatePass::tryReassociateGEPAtIndex(GetElementPtrInst *GEP,
332                                               unsigned I, Value *LHS,
333                                               Value *RHS, Type *IndexedType) {
334   // Look for GEP's closest dominator that has the same SCEV as GEP except that
335   // the I-th index is replaced with LHS.
336   SmallVector<const SCEV *, 4> IndexExprs;
337   for (auto Index = GEP->idx_begin(); Index != GEP->idx_end(); ++Index)
338     IndexExprs.push_back(SE->getSCEV(*Index));
339   // Replace the I-th index with LHS.
340   IndexExprs[I] = SE->getSCEV(LHS);
341   if (isKnownNonNegative(LHS, *DL, 0, AC, GEP, DT) &&
342       DL->getTypeSizeInBits(LHS->getType()) <
343           DL->getTypeSizeInBits(GEP->getOperand(I)->getType())) {
344     // Zero-extend LHS if it is non-negative. InstCombine canonicalizes sext to
345     // zext if the source operand is proved non-negative. We should do that
346     // consistently so that CandidateExpr more likely appears before. See
347     // @reassociate_gep_assume for an example of this canonicalization.
348     IndexExprs[I] =
349         SE->getZeroExtendExpr(IndexExprs[I], GEP->getOperand(I)->getType());
350   }
351   const SCEV *CandidateExpr = SE->getGEPExpr(cast<GEPOperator>(GEP),
352                                              IndexExprs);
353 
354   Value *Candidate = findClosestMatchingDominator(CandidateExpr, GEP);
355   if (Candidate == nullptr)
356     return nullptr;
357 
358   IRBuilder<> Builder(GEP);
359   // Candidate does not necessarily have the same pointer type as GEP. Use
360   // bitcast or pointer cast to make sure they have the same type, so that the
361   // later RAUW doesn't complain.
362   Candidate = Builder.CreateBitOrPointerCast(Candidate, GEP->getType());
363   assert(Candidate->getType() == GEP->getType());
364 
365   // NewGEP = (char *)Candidate + RHS * sizeof(IndexedType)
366   uint64_t IndexedSize = DL->getTypeAllocSize(IndexedType);
367   Type *ElementType = GEP->getResultElementType();
368   uint64_t ElementSize = DL->getTypeAllocSize(ElementType);
369   // Another less rare case: because I is not necessarily the last index of the
370   // GEP, the size of the type at the I-th index (IndexedSize) is not
371   // necessarily divisible by ElementSize. For example,
372   //
373   // #pragma pack(1)
374   // struct S {
375   //   int a[3];
376   //   int64 b[8];
377   // };
378   // #pragma pack()
379   //
380   // sizeof(S) = 100 is indivisible by sizeof(int64) = 8.
381   //
382   // TODO: bail out on this case for now. We could emit uglygep.
383   if (IndexedSize % ElementSize != 0)
384     return nullptr;
385 
386   // NewGEP = &Candidate[RHS * (sizeof(IndexedType) / sizeof(Candidate[0])));
387   Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
388   if (RHS->getType() != IntPtrTy)
389     RHS = Builder.CreateSExtOrTrunc(RHS, IntPtrTy);
390   if (IndexedSize != ElementSize) {
391     RHS = Builder.CreateMul(
392         RHS, ConstantInt::get(IntPtrTy, IndexedSize / ElementSize));
393   }
394   GetElementPtrInst *NewGEP =
395       cast<GetElementPtrInst>(Builder.CreateGEP(Candidate, RHS));
396   NewGEP->setIsInBounds(GEP->isInBounds());
397   NewGEP->takeName(GEP);
398   return NewGEP;
399 }
400 
401 Instruction *NaryReassociatePass::tryReassociateBinaryOp(BinaryOperator *I) {
402   Value *LHS = I->getOperand(0), *RHS = I->getOperand(1);
403   if (auto *NewI = tryReassociateBinaryOp(LHS, RHS, I))
404     return NewI;
405   if (auto *NewI = tryReassociateBinaryOp(RHS, LHS, I))
406     return NewI;
407   return nullptr;
408 }
409 
410 Instruction *NaryReassociatePass::tryReassociateBinaryOp(Value *LHS, Value *RHS,
411                                                          BinaryOperator *I) {
412   Value *A = nullptr, *B = nullptr;
413   // To be conservative, we reassociate I only when it is the only user of (A op
414   // B).
415   if (LHS->hasOneUse() && matchTernaryOp(I, LHS, A, B)) {
416     // I = (A op B) op RHS
417     //   = (A op RHS) op B or (B op RHS) op A
418     const SCEV *AExpr = SE->getSCEV(A), *BExpr = SE->getSCEV(B);
419     const SCEV *RHSExpr = SE->getSCEV(RHS);
420     if (BExpr != RHSExpr) {
421       if (auto *NewI =
422               tryReassociatedBinaryOp(getBinarySCEV(I, AExpr, RHSExpr), B, I))
423         return NewI;
424     }
425     if (AExpr != RHSExpr) {
426       if (auto *NewI =
427               tryReassociatedBinaryOp(getBinarySCEV(I, BExpr, RHSExpr), A, I))
428         return NewI;
429     }
430   }
431   return nullptr;
432 }
433 
434 Instruction *NaryReassociatePass::tryReassociatedBinaryOp(const SCEV *LHSExpr,
435                                                           Value *RHS,
436                                                           BinaryOperator *I) {
437   // Look for the closest dominator LHS of I that computes LHSExpr, and replace
438   // I with LHS op RHS.
439   auto *LHS = findClosestMatchingDominator(LHSExpr, I);
440   if (LHS == nullptr)
441     return nullptr;
442 
443   Instruction *NewI = nullptr;
444   switch (I->getOpcode()) {
445   case Instruction::Add:
446     NewI = BinaryOperator::CreateAdd(LHS, RHS, "", I);
447     break;
448   case Instruction::Mul:
449     NewI = BinaryOperator::CreateMul(LHS, RHS, "", I);
450     break;
451   default:
452     llvm_unreachable("Unexpected instruction.");
453   }
454   NewI->takeName(I);
455   return NewI;
456 }
457 
458 bool NaryReassociatePass::matchTernaryOp(BinaryOperator *I, Value *V,
459                                          Value *&Op1, Value *&Op2) {
460   switch (I->getOpcode()) {
461   case Instruction::Add:
462     return match(V, m_Add(m_Value(Op1), m_Value(Op2)));
463   case Instruction::Mul:
464     return match(V, m_Mul(m_Value(Op1), m_Value(Op2)));
465   default:
466     llvm_unreachable("Unexpected instruction.");
467   }
468   return false;
469 }
470 
471 const SCEV *NaryReassociatePass::getBinarySCEV(BinaryOperator *I,
472                                                const SCEV *LHS,
473                                                const SCEV *RHS) {
474   switch (I->getOpcode()) {
475   case Instruction::Add:
476     return SE->getAddExpr(LHS, RHS);
477   case Instruction::Mul:
478     return SE->getMulExpr(LHS, RHS);
479   default:
480     llvm_unreachable("Unexpected instruction.");
481   }
482   return nullptr;
483 }
484 
485 Instruction *
486 NaryReassociatePass::findClosestMatchingDominator(const SCEV *CandidateExpr,
487                                                   Instruction *Dominatee) {
488   auto Pos = SeenExprs.find(CandidateExpr);
489   if (Pos == SeenExprs.end())
490     return nullptr;
491 
492   auto &Candidates = Pos->second;
493   // Because we process the basic blocks in pre-order of the dominator tree, a
494   // candidate that doesn't dominate the current instruction won't dominate any
495   // future instruction either. Therefore, we pop it out of the stack. This
496   // optimization makes the algorithm O(n).
497   while (!Candidates.empty()) {
498     // Candidates stores WeakTrackingVHs, so a candidate can be nullptr if it's
499     // removed
500     // during rewriting.
501     if (Value *Candidate = Candidates.back()) {
502       Instruction *CandidateInstruction = cast<Instruction>(Candidate);
503       if (DT->dominates(CandidateInstruction, Dominatee))
504         return CandidateInstruction;
505     }
506     Candidates.pop_back();
507   }
508   return nullptr;
509 }
510