1 //===- TruncInstCombine.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // TruncInstCombine - looks for expression dags post-dominated by TruncInst and
10 // for each eligible dag, it will create a reduced bit-width expression, replace
11 // the old expression with this new one and remove the old expression.
12 // Eligible expression dag is such that:
13 //   1. Contains only supported instructions.
14 //   2. Supported leaves: ZExtInst, SExtInst, TruncInst and Constant value.
15 //   3. Can be evaluated into type with reduced legal bit-width.
16 //   4. All instructions in the dag must not have users outside the dag.
17 //      The only exception is for {ZExt, SExt}Inst with operand type equal to
18 //      the new reduced type evaluated in (3).
19 //
20 // The motivation for this optimization is that evaluating and expression using
21 // smaller bit-width is preferable, especially for vectorization where we can
22 // fit more values in one vectorized instruction. In addition, this optimization
23 // may decrease the number of cast instructions, but will not increase it.
24 //
25 //===----------------------------------------------------------------------===//
26 
27 #include "AggressiveInstCombineInternal.h"
28 #include "llvm/ADT/STLExtras.h"
29 #include "llvm/ADT/Statistic.h"
30 #include "llvm/Analysis/ConstantFolding.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/Support/KnownBits.h"
38 
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "aggressive-instcombine"
42 
43 STATISTIC(
44     NumDAGsReduced,
45     "Number of truncations eliminated by reducing bit width of expression DAG");
46 STATISTIC(NumInstrsReduced,
47           "Number of instructions whose bit width was reduced");
48 
49 /// Given an instruction and a container, it fills all the relevant operands of
50 /// that instruction, with respect to the Trunc expression dag optimizaton.
51 static void getRelevantOperands(Instruction *I, SmallVectorImpl<Value *> &Ops) {
52   unsigned Opc = I->getOpcode();
53   switch (Opc) {
54   case Instruction::Trunc:
55   case Instruction::ZExt:
56   case Instruction::SExt:
57     // These CastInst are considered leaves of the evaluated expression, thus,
58     // their operands are not relevent.
59     break;
60   case Instruction::Add:
61   case Instruction::Sub:
62   case Instruction::Mul:
63   case Instruction::And:
64   case Instruction::Or:
65   case Instruction::Xor:
66   case Instruction::Shl:
67   case Instruction::LShr:
68   case Instruction::AShr:
69     Ops.push_back(I->getOperand(0));
70     Ops.push_back(I->getOperand(1));
71     break;
72   case Instruction::Select:
73     Ops.push_back(I->getOperand(1));
74     Ops.push_back(I->getOperand(2));
75     break;
76   default:
77     llvm_unreachable("Unreachable!");
78   }
79 }
80 
81 bool TruncInstCombine::buildTruncExpressionDag() {
82   SmallVector<Value *, 8> Worklist;
83   SmallVector<Instruction *, 8> Stack;
84   // Clear old expression dag.
85   InstInfoMap.clear();
86 
87   Worklist.push_back(CurrentTruncInst->getOperand(0));
88 
89   while (!Worklist.empty()) {
90     Value *Curr = Worklist.back();
91 
92     if (isa<Constant>(Curr)) {
93       Worklist.pop_back();
94       continue;
95     }
96 
97     auto *I = dyn_cast<Instruction>(Curr);
98     if (!I)
99       return false;
100 
101     if (!Stack.empty() && Stack.back() == I) {
102       // Already handled all instruction operands, can remove it from both the
103       // Worklist and the Stack, and add it to the instruction info map.
104       Worklist.pop_back();
105       Stack.pop_back();
106       // Insert I to the Info map.
107       InstInfoMap.insert(std::make_pair(I, Info()));
108       continue;
109     }
110 
111     if (InstInfoMap.count(I)) {
112       Worklist.pop_back();
113       continue;
114     }
115 
116     // Add the instruction to the stack before start handling its operands.
117     Stack.push_back(I);
118 
119     unsigned Opc = I->getOpcode();
120     switch (Opc) {
121     case Instruction::Trunc:
122     case Instruction::ZExt:
123     case Instruction::SExt:
124       // trunc(trunc(x)) -> trunc(x)
125       // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest
126       // trunc(ext(x)) -> trunc(x) if the source type is larger than the new
127       // dest
128       break;
129     case Instruction::Add:
130     case Instruction::Sub:
131     case Instruction::Mul:
132     case Instruction::And:
133     case Instruction::Or:
134     case Instruction::Xor:
135     case Instruction::Shl:
136     case Instruction::LShr:
137     case Instruction::AShr:
138     case Instruction::Select: {
139       SmallVector<Value *, 2> Operands;
140       getRelevantOperands(I, Operands);
141       append_range(Worklist, Operands);
142       break;
143     }
144     default:
145       // TODO: Can handle more cases here:
146       // 1. shufflevector, extractelement, insertelement
147       // 2. udiv, urem
148       // 3. phi node(and loop handling)
149       // ...
150       return false;
151     }
152   }
153   return true;
154 }
155 
156 unsigned TruncInstCombine::getMinBitWidth() {
157   SmallVector<Value *, 8> Worklist;
158   SmallVector<Instruction *, 8> Stack;
159 
160   Value *Src = CurrentTruncInst->getOperand(0);
161   Type *DstTy = CurrentTruncInst->getType();
162   unsigned TruncBitWidth = DstTy->getScalarSizeInBits();
163   unsigned OrigBitWidth =
164       CurrentTruncInst->getOperand(0)->getType()->getScalarSizeInBits();
165 
166   if (isa<Constant>(Src))
167     return TruncBitWidth;
168 
169   Worklist.push_back(Src);
170   InstInfoMap[cast<Instruction>(Src)].ValidBitWidth = TruncBitWidth;
171 
172   while (!Worklist.empty()) {
173     Value *Curr = Worklist.back();
174 
175     if (isa<Constant>(Curr)) {
176       Worklist.pop_back();
177       continue;
178     }
179 
180     // Otherwise, it must be an instruction.
181     auto *I = cast<Instruction>(Curr);
182 
183     auto &Info = InstInfoMap[I];
184 
185     SmallVector<Value *, 2> Operands;
186     getRelevantOperands(I, Operands);
187 
188     if (!Stack.empty() && Stack.back() == I) {
189       // Already handled all instruction operands, can remove it from both, the
190       // Worklist and the Stack, and update MinBitWidth.
191       Worklist.pop_back();
192       Stack.pop_back();
193       for (auto *Operand : Operands)
194         if (auto *IOp = dyn_cast<Instruction>(Operand))
195           Info.MinBitWidth =
196               std::max(Info.MinBitWidth, InstInfoMap[IOp].MinBitWidth);
197       continue;
198     }
199 
200     // Add the instruction to the stack before start handling its operands.
201     Stack.push_back(I);
202     unsigned ValidBitWidth = Info.ValidBitWidth;
203 
204     // Update minimum bit-width before handling its operands. This is required
205     // when the instruction is part of a loop.
206     Info.MinBitWidth = std::max(Info.MinBitWidth, Info.ValidBitWidth);
207 
208     for (auto *Operand : Operands)
209       if (auto *IOp = dyn_cast<Instruction>(Operand)) {
210         // If we already calculated the minimum bit-width for this valid
211         // bit-width, or for a smaller valid bit-width, then just keep the
212         // answer we already calculated.
213         unsigned IOpBitwidth = InstInfoMap.lookup(IOp).ValidBitWidth;
214         if (IOpBitwidth >= ValidBitWidth)
215           continue;
216         InstInfoMap[IOp].ValidBitWidth = ValidBitWidth;
217         Worklist.push_back(IOp);
218       }
219   }
220   unsigned MinBitWidth = InstInfoMap.lookup(cast<Instruction>(Src)).MinBitWidth;
221   assert(MinBitWidth >= TruncBitWidth);
222 
223   if (MinBitWidth > TruncBitWidth) {
224     // In this case reducing expression with vector type might generate a new
225     // vector type, which is not preferable as it might result in generating
226     // sub-optimal code.
227     if (DstTy->isVectorTy())
228       return OrigBitWidth;
229     // Use the smallest integer type in the range [MinBitWidth, OrigBitWidth).
230     Type *Ty = DL.getSmallestLegalIntType(DstTy->getContext(), MinBitWidth);
231     // Update minimum bit-width with the new destination type bit-width if
232     // succeeded to find such, otherwise, with original bit-width.
233     MinBitWidth = Ty ? Ty->getScalarSizeInBits() : OrigBitWidth;
234   } else { // MinBitWidth == TruncBitWidth
235     // In this case the expression can be evaluated with the trunc instruction
236     // destination type, and trunc instruction can be omitted. However, we
237     // should not perform the evaluation if the original type is a legal scalar
238     // type and the target type is illegal.
239     bool FromLegal = MinBitWidth == 1 || DL.isLegalInteger(OrigBitWidth);
240     bool ToLegal = MinBitWidth == 1 || DL.isLegalInteger(MinBitWidth);
241     if (!DstTy->isVectorTy() && FromLegal && !ToLegal)
242       return OrigBitWidth;
243   }
244   return MinBitWidth;
245 }
246 
247 Type *TruncInstCombine::getBestTruncatedType() {
248   if (!buildTruncExpressionDag())
249     return nullptr;
250 
251   // We don't want to duplicate instructions, which isn't profitable. Thus, we
252   // can't shrink something that has multiple users, unless all users are
253   // post-dominated by the trunc instruction, i.e., were visited during the
254   // expression evaluation.
255   unsigned DesiredBitWidth = 0;
256   for (auto Itr : InstInfoMap) {
257     Instruction *I = Itr.first;
258     if (I->hasOneUse())
259       continue;
260     bool IsExtInst = (isa<ZExtInst>(I) || isa<SExtInst>(I));
261     for (auto *U : I->users())
262       if (auto *UI = dyn_cast<Instruction>(U))
263         if (UI != CurrentTruncInst && !InstInfoMap.count(UI)) {
264           if (!IsExtInst)
265             return nullptr;
266           // If this is an extension from the dest type, we can eliminate it,
267           // even if it has multiple users. Thus, update the DesiredBitWidth and
268           // validate all extension instructions agrees on same DesiredBitWidth.
269           unsigned ExtInstBitWidth =
270               I->getOperand(0)->getType()->getScalarSizeInBits();
271           if (DesiredBitWidth && DesiredBitWidth != ExtInstBitWidth)
272             return nullptr;
273           DesiredBitWidth = ExtInstBitWidth;
274         }
275   }
276 
277   unsigned OrigBitWidth =
278       CurrentTruncInst->getOperand(0)->getType()->getScalarSizeInBits();
279 
280   // Initialize MinBitWidth for shift instructions with the minimum number
281   // that is greater than shift amount (i.e. shift amount + 1).
282   // For `lshr` adjust MinBitWidth so that all potentially truncated
283   // bits of the value-to-be-shifted are zeros.
284   // For `ashr` adjust MinBitWidth so that all potentially truncated
285   // bits of the value-to-be-shifted are sign bits (all zeros or ones)
286   // and even one (first) untruncated bit is sign bit.
287   // Exit early if MinBitWidth is not less than original bitwidth.
288   for (auto &Itr : InstInfoMap) {
289     Instruction *I = Itr.first;
290     if (I->isShift()) {
291       KnownBits KnownRHS = computeKnownBits(I->getOperand(1), DL);
292       unsigned MinBitWidth = KnownRHS.getMaxValue()
293                                  .uadd_sat(APInt(OrigBitWidth, 1))
294                                  .getLimitedValue(OrigBitWidth);
295       if (MinBitWidth == OrigBitWidth)
296         return nullptr;
297       if (I->getOpcode() == Instruction::LShr) {
298         KnownBits KnownLHS = computeKnownBits(I->getOperand(0), DL);
299         MinBitWidth =
300             std::max(MinBitWidth, KnownLHS.getMaxValue().getActiveBits());
301       }
302       if (I->getOpcode() == Instruction::AShr) {
303         unsigned NumSignBits = ComputeNumSignBits(I->getOperand(0), DL);
304         MinBitWidth = std::max(MinBitWidth, OrigBitWidth - NumSignBits + 1);
305       }
306       if (MinBitWidth >= OrigBitWidth)
307         return nullptr;
308       Itr.second.MinBitWidth = MinBitWidth;
309     }
310   }
311 
312   // Calculate minimum allowed bit-width allowed for shrinking the currently
313   // visited truncate's operand.
314   unsigned MinBitWidth = getMinBitWidth();
315 
316   // Check that we can shrink to smaller bit-width than original one and that
317   // it is similar to the DesiredBitWidth is such exists.
318   if (MinBitWidth >= OrigBitWidth ||
319       (DesiredBitWidth && DesiredBitWidth != MinBitWidth))
320     return nullptr;
321 
322   return IntegerType::get(CurrentTruncInst->getContext(), MinBitWidth);
323 }
324 
325 /// Given a reduced scalar type \p Ty and a \p V value, return a reduced type
326 /// for \p V, according to its type, if it vector type, return the vector
327 /// version of \p Ty, otherwise return \p Ty.
328 static Type *getReducedType(Value *V, Type *Ty) {
329   assert(Ty && !Ty->isVectorTy() && "Expect Scalar Type");
330   if (auto *VTy = dyn_cast<VectorType>(V->getType()))
331     return VectorType::get(Ty, VTy->getElementCount());
332   return Ty;
333 }
334 
335 Value *TruncInstCombine::getReducedOperand(Value *V, Type *SclTy) {
336   Type *Ty = getReducedType(V, SclTy);
337   if (auto *C = dyn_cast<Constant>(V)) {
338     C = ConstantExpr::getIntegerCast(C, Ty, false);
339     // If we got a constantexpr back, try to simplify it with DL info.
340     return ConstantFoldConstant(C, DL, &TLI);
341   }
342 
343   auto *I = cast<Instruction>(V);
344   Info Entry = InstInfoMap.lookup(I);
345   assert(Entry.NewValue);
346   return Entry.NewValue;
347 }
348 
349 void TruncInstCombine::ReduceExpressionDag(Type *SclTy) {
350   NumInstrsReduced += InstInfoMap.size();
351   for (auto &Itr : InstInfoMap) { // Forward
352     Instruction *I = Itr.first;
353     TruncInstCombine::Info &NodeInfo = Itr.second;
354 
355     assert(!NodeInfo.NewValue && "Instruction has been evaluated");
356 
357     IRBuilder<> Builder(I);
358     Value *Res = nullptr;
359     unsigned Opc = I->getOpcode();
360     switch (Opc) {
361     case Instruction::Trunc:
362     case Instruction::ZExt:
363     case Instruction::SExt: {
364       Type *Ty = getReducedType(I, SclTy);
365       // If the source type of the cast is the type we're trying for then we can
366       // just return the source.  There's no need to insert it because it is not
367       // new.
368       if (I->getOperand(0)->getType() == Ty) {
369         assert(!isa<TruncInst>(I) && "Cannot reach here with TruncInst");
370         NodeInfo.NewValue = I->getOperand(0);
371         continue;
372       }
373       // Otherwise, must be the same type of cast, so just reinsert a new one.
374       // This also handles the case of zext(trunc(x)) -> zext(x).
375       Res = Builder.CreateIntCast(I->getOperand(0), Ty,
376                                   Opc == Instruction::SExt);
377 
378       // Update Worklist entries with new value if needed.
379       // There are three possible changes to the Worklist:
380       // 1. Update Old-TruncInst -> New-TruncInst.
381       // 2. Remove Old-TruncInst (if New node is not TruncInst).
382       // 3. Add New-TruncInst (if Old node was not TruncInst).
383       auto *Entry = find(Worklist, I);
384       if (Entry != Worklist.end()) {
385         if (auto *NewCI = dyn_cast<TruncInst>(Res))
386           *Entry = NewCI;
387         else
388           Worklist.erase(Entry);
389       } else if (auto *NewCI = dyn_cast<TruncInst>(Res))
390           Worklist.push_back(NewCI);
391       break;
392     }
393     case Instruction::Add:
394     case Instruction::Sub:
395     case Instruction::Mul:
396     case Instruction::And:
397     case Instruction::Or:
398     case Instruction::Xor:
399     case Instruction::Shl:
400     case Instruction::LShr:
401     case Instruction::AShr: {
402       Value *LHS = getReducedOperand(I->getOperand(0), SclTy);
403       Value *RHS = getReducedOperand(I->getOperand(1), SclTy);
404       Res = Builder.CreateBinOp((Instruction::BinaryOps)Opc, LHS, RHS);
405       // Preserve `exact` flag since truncation doesn't change exactness
406       if (auto *PEO = dyn_cast<PossiblyExactOperator>(I))
407         if (auto *ResI = dyn_cast<Instruction>(Res))
408           ResI->setIsExact(PEO->isExact());
409       break;
410     }
411     case Instruction::Select: {
412       Value *Op0 = I->getOperand(0);
413       Value *LHS = getReducedOperand(I->getOperand(1), SclTy);
414       Value *RHS = getReducedOperand(I->getOperand(2), SclTy);
415       Res = Builder.CreateSelect(Op0, LHS, RHS);
416       break;
417     }
418     default:
419       llvm_unreachable("Unhandled instruction");
420     }
421 
422     NodeInfo.NewValue = Res;
423     if (auto *ResI = dyn_cast<Instruction>(Res))
424       ResI->takeName(I);
425   }
426 
427   Value *Res = getReducedOperand(CurrentTruncInst->getOperand(0), SclTy);
428   Type *DstTy = CurrentTruncInst->getType();
429   if (Res->getType() != DstTy) {
430     IRBuilder<> Builder(CurrentTruncInst);
431     Res = Builder.CreateIntCast(Res, DstTy, false);
432     if (auto *ResI = dyn_cast<Instruction>(Res))
433       ResI->takeName(CurrentTruncInst);
434   }
435   CurrentTruncInst->replaceAllUsesWith(Res);
436 
437   // Erase old expression dag, which was replaced by the reduced expression dag.
438   // We iterate backward, which means we visit the instruction before we visit
439   // any of its operands, this way, when we get to the operand, we already
440   // removed the instructions (from the expression dag) that uses it.
441   CurrentTruncInst->eraseFromParent();
442   for (auto I = InstInfoMap.rbegin(), E = InstInfoMap.rend(); I != E; ++I) {
443     // We still need to check that the instruction has no users before we erase
444     // it, because {SExt, ZExt}Inst Instruction might have other users that was
445     // not reduced, in such case, we need to keep that instruction.
446     if (I->first->use_empty())
447       I->first->eraseFromParent();
448   }
449 }
450 
451 bool TruncInstCombine::run(Function &F) {
452   bool MadeIRChange = false;
453 
454   // Collect all TruncInst in the function into the Worklist for evaluating.
455   for (auto &BB : F) {
456     // Ignore unreachable basic block.
457     if (!DT.isReachableFromEntry(&BB))
458       continue;
459     for (auto &I : BB)
460       if (auto *CI = dyn_cast<TruncInst>(&I))
461         Worklist.push_back(CI);
462   }
463 
464   // Process all TruncInst in the Worklist, for each instruction:
465   //   1. Check if it dominates an eligible expression dag to be reduced.
466   //   2. Create a reduced expression dag and replace the old one with it.
467   while (!Worklist.empty()) {
468     CurrentTruncInst = Worklist.pop_back_val();
469 
470     if (Type *NewDstSclTy = getBestTruncatedType()) {
471       LLVM_DEBUG(
472           dbgs() << "ICE: TruncInstCombine reducing type of expression dag "
473                     "dominated by: "
474                  << CurrentTruncInst << '\n');
475       ReduceExpressionDag(NewDstSclTy);
476       ++NumDAGsReduced;
477       MadeIRChange = true;
478     }
479   }
480 
481   return MadeIRChange;
482 }
483