1 //===- InstCombineSimplifyDemanded.cpp ------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains logic for simplifying instructions based on information
11 // about how they are used.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "InstCombineInternal.h"
16 #include "llvm/Analysis/ValueTracking.h"
17 #include "llvm/IR/IntrinsicInst.h"
18 #include "llvm/IR/PatternMatch.h"
19 
20 using namespace llvm;
21 using namespace llvm::PatternMatch;
22 
23 #define DEBUG_TYPE "instcombine"
24 
25 /// Check to see if the specified operand of the specified instruction is a
26 /// constant integer. If so, check to see if there are any bits set in the
27 /// constant that are not demanded. If so, shrink the constant and return true.
28 static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
29                                    const APInt &Demanded) {
30   assert(I && "No instruction?");
31   assert(OpNo < I->getNumOperands() && "Operand index too large");
32 
33   // The operand must be a constant integer or splat integer.
34   Value *Op = I->getOperand(OpNo);
35   const APInt *C;
36   if (!match(Op, m_APInt(C)))
37     return false;
38 
39   // If there are no bits set that aren't demanded, nothing to do.
40   if (C->isSubsetOf(Demanded))
41     return false;
42 
43   // This instruction is producing bits that are not demanded. Shrink the RHS.
44   I->setOperand(OpNo, ConstantInt::get(Op->getType(), *C & Demanded));
45 
46   return true;
47 }
48 
49 
50 
51 /// Inst is an integer instruction that SimplifyDemandedBits knows about. See if
52 /// the instruction has any properties that allow us to simplify its operands.
53 bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
54   unsigned BitWidth = Inst.getType()->getScalarSizeInBits();
55   APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
56   APInt DemandedMask(APInt::getAllOnesValue(BitWidth));
57 
58   Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, KnownZero, KnownOne,
59                                      0, &Inst);
60   if (!V) return false;
61   if (V == &Inst) return true;
62   replaceInstUsesWith(Inst, V);
63   return true;
64 }
65 
66 /// This form of SimplifyDemandedBits simplifies the specified instruction
67 /// operand if possible, updating it in place. It returns true if it made any
68 /// change and false otherwise.
69 bool InstCombiner::SimplifyDemandedBits(Instruction *I, unsigned OpNo,
70                                         const APInt &DemandedMask,
71                                         APInt &KnownZero, APInt &KnownOne,
72                                         unsigned Depth) {
73   Use &U = I->getOperandUse(OpNo);
74   Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask, KnownZero,
75                                           KnownOne, Depth, I);
76   if (!NewVal) return false;
77   U = NewVal;
78   return true;
79 }
80 
81 
82 /// This function attempts to replace V with a simpler value based on the
83 /// demanded bits. When this function is called, it is known that only the bits
84 /// set in DemandedMask of the result of V are ever used downstream.
85 /// Consequently, depending on the mask and V, it may be possible to replace V
86 /// with a constant or one of its operands. In such cases, this function does
87 /// the replacement and returns true. In all other cases, it returns false after
88 /// analyzing the expression and setting KnownOne and known to be one in the
89 /// expression. KnownZero contains all the bits that are known to be zero in the
90 /// expression. These are provided to potentially allow the caller (which might
91 /// recursively be SimplifyDemandedBits itself) to simplify the expression.
92 /// KnownOne and KnownZero always follow the invariant that:
93 ///   KnownOne & KnownZero == 0.
94 /// That is, a bit can't be both 1 and 0. Note that the bits in KnownOne and
95 /// KnownZero may only be accurate for those bits set in DemandedMask. Note also
96 /// that the bitwidth of V, DemandedMask, KnownZero and KnownOne must all be the
97 /// same.
98 ///
99 /// This returns null if it did not change anything and it permits no
100 /// simplification.  This returns V itself if it did some simplification of V's
101 /// operands based on the information about what bits are demanded. This returns
102 /// some other non-null value if it found out that V is equal to another value
103 /// in the context where the specified bits are demanded, but not for all users.
104 Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
105                                              APInt &KnownZero, APInt &KnownOne,
106                                              unsigned Depth,
107                                              Instruction *CxtI) {
108   assert(V != nullptr && "Null pointer of Value???");
109   assert(Depth <= 6 && "Limit Search Depth");
110   uint32_t BitWidth = DemandedMask.getBitWidth();
111   Type *VTy = V->getType();
112   assert(
113       (!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) &&
114       KnownZero.getBitWidth() == BitWidth &&
115       KnownOne.getBitWidth() == BitWidth &&
116       "Value *V, DemandedMask, KnownZero and KnownOne "
117       "must have same BitWidth");
118 
119   if (isa<Constant>(V)) {
120     computeKnownBits(V, KnownZero, KnownOne, Depth, CxtI);
121     return nullptr;
122   }
123 
124   KnownZero.clearAllBits();
125   KnownOne.clearAllBits();
126   if (DemandedMask == 0)     // Not demanding any bits from V.
127     return UndefValue::get(VTy);
128 
129   if (Depth == 6)        // Limit search depth.
130     return nullptr;
131 
132   Instruction *I = dyn_cast<Instruction>(V);
133   if (!I) {
134     computeKnownBits(V, KnownZero, KnownOne, Depth, CxtI);
135     return nullptr;        // Only analyze instructions.
136   }
137 
138   // If there are multiple uses of this value and we aren't at the root, then
139   // we can't do any simplifications of the operands, because DemandedMask
140   // only reflects the bits demanded by *one* of the users.
141   if (Depth != 0 && !I->hasOneUse()) {
142     return SimplifyMultipleUseDemandedBits(I, DemandedMask, KnownZero, KnownOne,
143                                            Depth, CxtI);
144   }
145 
146   APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
147   APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
148 
149   // If this is the root being simplified, allow it to have multiple uses,
150   // just set the DemandedMask to all bits so that we can try to simplify the
151   // operands.  This allows visitTruncInst (for example) to simplify the
152   // operand of a trunc without duplicating all the logic below.
153   if (Depth == 0 && !V->hasOneUse())
154     DemandedMask.setAllBits();
155 
156   switch (I->getOpcode()) {
157   default:
158     computeKnownBits(I, KnownZero, KnownOne, Depth, CxtI);
159     break;
160   case Instruction::And: {
161     // If either the LHS or the RHS are Zero, the result is zero.
162     if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnownZero, RHSKnownOne,
163                              Depth + 1) ||
164         SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnownZero, LHSKnownZero,
165                              LHSKnownOne, Depth + 1))
166       return I;
167     assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
168     assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
169 
170     // Output known-0 are known to be clear if zero in either the LHS | RHS.
171     APInt IKnownZero = RHSKnownZero | LHSKnownZero;
172     // Output known-1 bits are only known if set in both the LHS & RHS.
173     APInt IKnownOne = RHSKnownOne & LHSKnownOne;
174 
175     // If the client is only demanding bits that we know, return the known
176     // constant.
177     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
178       return Constant::getIntegerValue(VTy, IKnownOne);
179 
180     // If all of the demanded bits are known 1 on one side, return the other.
181     // These bits cannot contribute to the result of the 'and'.
182     if (DemandedMask.isSubsetOf(LHSKnownZero | RHSKnownOne))
183       return I->getOperand(0);
184     if (DemandedMask.isSubsetOf(RHSKnownZero | LHSKnownOne))
185       return I->getOperand(1);
186 
187     // If the RHS is a constant, see if we can simplify it.
188     if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnownZero))
189       return I;
190 
191     KnownZero = std::move(IKnownZero);
192     KnownOne  = std::move(IKnownOne);
193     break;
194   }
195   case Instruction::Or: {
196     // If either the LHS or the RHS are One, the result is One.
197     if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnownZero, RHSKnownOne,
198                              Depth + 1) ||
199         SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnownOne, LHSKnownZero,
200                              LHSKnownOne, Depth + 1))
201       return I;
202     assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
203     assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
204 
205     // Output known-0 bits are only known if clear in both the LHS & RHS.
206     APInt IKnownZero = RHSKnownZero & LHSKnownZero;
207     // Output known-1 are known to be set if set in either the LHS | RHS.
208     APInt IKnownOne = RHSKnownOne | LHSKnownOne;
209 
210     // If the client is only demanding bits that we know, return the known
211     // constant.
212     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
213       return Constant::getIntegerValue(VTy, IKnownOne);
214 
215     // If all of the demanded bits are known zero on one side, return the other.
216     // These bits cannot contribute to the result of the 'or'.
217     if (DemandedMask.isSubsetOf(LHSKnownOne | RHSKnownZero))
218       return I->getOperand(0);
219     if (DemandedMask.isSubsetOf(RHSKnownOne | LHSKnownZero))
220       return I->getOperand(1);
221 
222     // If the RHS is a constant, see if we can simplify it.
223     if (ShrinkDemandedConstant(I, 1, DemandedMask))
224       return I;
225 
226     KnownZero = std::move(IKnownZero);
227     KnownOne  = std::move(IKnownOne);
228     break;
229   }
230   case Instruction::Xor: {
231     if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnownZero, RHSKnownOne,
232                              Depth + 1) ||
233         SimplifyDemandedBits(I, 0, DemandedMask, LHSKnownZero, LHSKnownOne,
234                              Depth + 1))
235       return I;
236     assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
237     assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
238 
239     // Output known-0 bits are known if clear or set in both the LHS & RHS.
240     APInt IKnownZero = (RHSKnownZero & LHSKnownZero) |
241                        (RHSKnownOne & LHSKnownOne);
242     // Output known-1 are known to be set if set in only one of the LHS, RHS.
243     APInt IKnownOne =  (RHSKnownZero & LHSKnownOne) |
244                        (RHSKnownOne & LHSKnownZero);
245 
246     // If the client is only demanding bits that we know, return the known
247     // constant.
248     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
249       return Constant::getIntegerValue(VTy, IKnownOne);
250 
251     // If all of the demanded bits are known zero on one side, return the other.
252     // These bits cannot contribute to the result of the 'xor'.
253     if (DemandedMask.isSubsetOf(RHSKnownZero))
254       return I->getOperand(0);
255     if (DemandedMask.isSubsetOf(LHSKnownZero))
256       return I->getOperand(1);
257 
258     // If all of the demanded bits are known to be zero on one side or the
259     // other, turn this into an *inclusive* or.
260     //    e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
261     if (DemandedMask.isSubsetOf(RHSKnownZero | LHSKnownZero)) {
262       Instruction *Or =
263         BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
264                                  I->getName());
265       return InsertNewInstWith(Or, *I);
266     }
267 
268     // If all of the demanded bits on one side are known, and all of the set
269     // bits on that side are also known to be set on the other side, turn this
270     // into an AND, as we know the bits will be cleared.
271     //    e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
272     if (DemandedMask.isSubsetOf(RHSKnownZero|RHSKnownOne) &&
273         RHSKnownOne.isSubsetOf(LHSKnownOne)) {
274       Constant *AndC = Constant::getIntegerValue(VTy,
275                                                  ~RHSKnownOne & DemandedMask);
276       Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
277       return InsertNewInstWith(And, *I);
278     }
279 
280     // If the RHS is a constant, see if we can simplify it.
281     // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
282     if (ShrinkDemandedConstant(I, 1, DemandedMask))
283       return I;
284 
285     // If our LHS is an 'and' and if it has one use, and if any of the bits we
286     // are flipping are known to be set, then the xor is just resetting those
287     // bits to zero.  We can just knock out bits from the 'and' and the 'xor',
288     // simplifying both of them.
289     if (Instruction *LHSInst = dyn_cast<Instruction>(I->getOperand(0)))
290       if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() &&
291           isa<ConstantInt>(I->getOperand(1)) &&
292           isa<ConstantInt>(LHSInst->getOperand(1)) &&
293           (LHSKnownOne & RHSKnownOne & DemandedMask) != 0) {
294         ConstantInt *AndRHS = cast<ConstantInt>(LHSInst->getOperand(1));
295         ConstantInt *XorRHS = cast<ConstantInt>(I->getOperand(1));
296         APInt NewMask = ~(LHSKnownOne & RHSKnownOne & DemandedMask);
297 
298         Constant *AndC =
299           ConstantInt::get(I->getType(), NewMask & AndRHS->getValue());
300         Instruction *NewAnd = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
301         InsertNewInstWith(NewAnd, *I);
302 
303         Constant *XorC =
304           ConstantInt::get(I->getType(), NewMask & XorRHS->getValue());
305         Instruction *NewXor = BinaryOperator::CreateXor(NewAnd, XorC);
306         return InsertNewInstWith(NewXor, *I);
307       }
308 
309     // Output known-0 bits are known if clear or set in both the LHS & RHS.
310     KnownZero = std::move(IKnownZero);
311     // Output known-1 are known to be set if set in only one of the LHS, RHS.
312     KnownOne  = std::move(IKnownOne);
313     break;
314   }
315   case Instruction::Select:
316     // If this is a select as part of a min/max pattern, don't simplify any
317     // further in case we break the structure.
318     Value *LHS, *RHS;
319     if (matchSelectPattern(I, LHS, RHS).Flavor != SPF_UNKNOWN)
320       return nullptr;
321 
322     if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnownZero, RHSKnownOne,
323                              Depth + 1) ||
324         SimplifyDemandedBits(I, 1, DemandedMask, LHSKnownZero, LHSKnownOne,
325                              Depth + 1))
326       return I;
327     assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?");
328     assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?");
329 
330     // If the operands are constants, see if we can simplify them.
331     if (ShrinkDemandedConstant(I, 1, DemandedMask) ||
332         ShrinkDemandedConstant(I, 2, DemandedMask))
333       return I;
334 
335     // Only known if known in both the LHS and RHS.
336     KnownOne = RHSKnownOne & LHSKnownOne;
337     KnownZero = RHSKnownZero & LHSKnownZero;
338     break;
339   case Instruction::Trunc: {
340     unsigned truncBf = I->getOperand(0)->getType()->getScalarSizeInBits();
341     DemandedMask = DemandedMask.zext(truncBf);
342     KnownZero = KnownZero.zext(truncBf);
343     KnownOne = KnownOne.zext(truncBf);
344     if (SimplifyDemandedBits(I, 0, DemandedMask, KnownZero, KnownOne,
345                              Depth + 1))
346       return I;
347     DemandedMask = DemandedMask.trunc(BitWidth);
348     KnownZero = KnownZero.trunc(BitWidth);
349     KnownOne = KnownOne.trunc(BitWidth);
350     assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
351     break;
352   }
353   case Instruction::BitCast:
354     if (!I->getOperand(0)->getType()->isIntOrIntVectorTy())
355       return nullptr;  // vector->int or fp->int?
356 
357     if (VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
358       if (VectorType *SrcVTy =
359             dyn_cast<VectorType>(I->getOperand(0)->getType())) {
360         if (DstVTy->getNumElements() != SrcVTy->getNumElements())
361           // Don't touch a bitcast between vectors of different element counts.
362           return nullptr;
363       } else
364         // Don't touch a scalar-to-vector bitcast.
365         return nullptr;
366     } else if (I->getOperand(0)->getType()->isVectorTy())
367       // Don't touch a vector-to-scalar bitcast.
368       return nullptr;
369 
370     if (SimplifyDemandedBits(I, 0, DemandedMask, KnownZero, KnownOne,
371                              Depth + 1))
372       return I;
373     assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
374     break;
375   case Instruction::ZExt: {
376     // Compute the bits in the result that are not present in the input.
377     unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
378 
379     DemandedMask = DemandedMask.trunc(SrcBitWidth);
380     KnownZero = KnownZero.trunc(SrcBitWidth);
381     KnownOne = KnownOne.trunc(SrcBitWidth);
382     if (SimplifyDemandedBits(I, 0, DemandedMask, KnownZero, KnownOne,
383                              Depth + 1))
384       return I;
385     DemandedMask = DemandedMask.zext(BitWidth);
386     KnownZero = KnownZero.zext(BitWidth);
387     KnownOne = KnownOne.zext(BitWidth);
388     assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
389     // The top bits are known to be zero.
390     KnownZero.setBitsFrom(SrcBitWidth);
391     break;
392   }
393   case Instruction::SExt: {
394     // Compute the bits in the result that are not present in the input.
395     unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits();
396 
397     APInt InputDemandedBits = DemandedMask &
398                               APInt::getLowBitsSet(BitWidth, SrcBitWidth);
399 
400     APInt NewBits(APInt::getBitsSetFrom(BitWidth, SrcBitWidth));
401     // If any of the sign extended bits are demanded, we know that the sign
402     // bit is demanded.
403     if ((NewBits & DemandedMask) != 0)
404       InputDemandedBits.setBit(SrcBitWidth-1);
405 
406     InputDemandedBits = InputDemandedBits.trunc(SrcBitWidth);
407     KnownZero = KnownZero.trunc(SrcBitWidth);
408     KnownOne = KnownOne.trunc(SrcBitWidth);
409     if (SimplifyDemandedBits(I, 0, InputDemandedBits, KnownZero, KnownOne,
410                              Depth + 1))
411       return I;
412     InputDemandedBits = InputDemandedBits.zext(BitWidth);
413     KnownZero = KnownZero.zext(BitWidth);
414     KnownOne = KnownOne.zext(BitWidth);
415     assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
416 
417     // If the sign bit of the input is known set or clear, then we know the
418     // top bits of the result.
419 
420     // If the input sign bit is known zero, or if the NewBits are not demanded
421     // convert this into a zero extension.
422     if (KnownZero[SrcBitWidth-1] || (NewBits & ~DemandedMask) == NewBits) {
423       // Convert to ZExt cast
424       CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName());
425       return InsertNewInstWith(NewCast, *I);
426     } else if (KnownOne[SrcBitWidth-1]) {    // Input sign bit known set
427       KnownOne |= NewBits;
428     }
429     break;
430   }
431   case Instruction::Add:
432   case Instruction::Sub: {
433     /// If the high-bits of an ADD/SUB are not demanded, then we do not care
434     /// about the high bits of the operands.
435     unsigned NLZ = DemandedMask.countLeadingZeros();
436     if (NLZ > 0) {
437       // Right fill the mask of bits for this ADD/SUB to demand the most
438       // significant bit and all those below it.
439       APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
440       if (ShrinkDemandedConstant(I, 0, DemandedFromOps) ||
441           SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnownZero, LHSKnownOne,
442                                Depth + 1) ||
443           ShrinkDemandedConstant(I, 1, DemandedFromOps) ||
444           SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnownZero, RHSKnownOne,
445                                Depth + 1)) {
446         // Disable the nsw and nuw flags here: We can no longer guarantee that
447         // we won't wrap after simplification. Removing the nsw/nuw flags is
448         // legal here because the top bit is not demanded.
449         BinaryOperator &BinOP = *cast<BinaryOperator>(I);
450         BinOP.setHasNoSignedWrap(false);
451         BinOP.setHasNoUnsignedWrap(false);
452         return I;
453       }
454 
455       // If we are known to be adding/subtracting zeros to every bit below
456       // the highest demanded bit, we just return the other side.
457       if ((DemandedFromOps & RHSKnownZero) == DemandedFromOps)
458         return I->getOperand(0);
459       // We can't do this with the LHS for subtraction.
460       if (I->getOpcode() == Instruction::Add &&
461           (DemandedFromOps & LHSKnownZero) == DemandedFromOps)
462         return I->getOperand(1);
463     }
464 
465     // Otherwise just hand the add/sub off to computeKnownBits to fill in
466     // the known zeros and ones.
467     computeKnownBits(V, KnownZero, KnownOne, Depth, CxtI);
468     break;
469   }
470   case Instruction::Shl: {
471     const APInt *SA;
472     if (match(I->getOperand(1), m_APInt(SA))) {
473       const APInt *ShrAmt;
474       if (match(I->getOperand(0), m_Shr(m_Value(), m_APInt(ShrAmt)))) {
475         Instruction *Shr = cast<Instruction>(I->getOperand(0));
476         if (Value *R = simplifyShrShlDemandedBits(
477                 Shr, *ShrAmt, I, *SA, DemandedMask, KnownZero, KnownOne))
478           return R;
479       }
480 
481       uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
482       APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt));
483 
484       // If the shift is NUW/NSW, then it does demand the high bits.
485       ShlOperator *IOp = cast<ShlOperator>(I);
486       if (IOp->hasNoSignedWrap())
487         DemandedMaskIn.setHighBits(ShiftAmt+1);
488       else if (IOp->hasNoUnsignedWrap())
489         DemandedMaskIn.setHighBits(ShiftAmt);
490 
491       if (SimplifyDemandedBits(I, 0, DemandedMaskIn, KnownZero, KnownOne,
492                                Depth + 1))
493         return I;
494       assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
495       KnownZero <<= ShiftAmt;
496       KnownOne  <<= ShiftAmt;
497       // low bits known zero.
498       if (ShiftAmt)
499         KnownZero.setLowBits(ShiftAmt);
500     }
501     break;
502   }
503   case Instruction::LShr: {
504     const APInt *SA;
505     if (match(I->getOperand(1), m_APInt(SA))) {
506       uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
507 
508       // Unsigned shift right.
509       APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
510 
511       // If the shift is exact, then it does demand the low bits (and knows that
512       // they are zero).
513       if (cast<LShrOperator>(I)->isExact())
514         DemandedMaskIn.setLowBits(ShiftAmt);
515 
516       if (SimplifyDemandedBits(I, 0, DemandedMaskIn, KnownZero, KnownOne,
517                                Depth + 1))
518         return I;
519       assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
520       KnownZero.lshrInPlace(ShiftAmt);
521       KnownOne.lshrInPlace(ShiftAmt);
522       if (ShiftAmt)
523         KnownZero.setHighBits(ShiftAmt);  // high bits known zero.
524     }
525     break;
526   }
527   case Instruction::AShr: {
528     // If this is an arithmetic shift right and only the low-bit is set, we can
529     // always convert this into a logical shr, even if the shift amount is
530     // variable.  The low bit of the shift cannot be an input sign bit unless
531     // the shift amount is >= the size of the datatype, which is undefined.
532     if (DemandedMask == 1) {
533       // Perform the logical shift right.
534       Instruction *NewVal = BinaryOperator::CreateLShr(
535                         I->getOperand(0), I->getOperand(1), I->getName());
536       return InsertNewInstWith(NewVal, *I);
537     }
538 
539     // If the sign bit is the only bit demanded by this ashr, then there is no
540     // need to do it, the shift doesn't change the high bit.
541     if (DemandedMask.isSignMask())
542       return I->getOperand(0);
543 
544     const APInt *SA;
545     if (match(I->getOperand(1), m_APInt(SA))) {
546       uint32_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
547 
548       // Signed shift right.
549       APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
550       // If any of the high bits are demanded, we should set the sign bit as
551       // demanded.
552       if (DemandedMask.countLeadingZeros() <= ShiftAmt)
553         DemandedMaskIn.setSignBit();
554 
555       // If the shift is exact, then it does demand the low bits (and knows that
556       // they are zero).
557       if (cast<AShrOperator>(I)->isExact())
558         DemandedMaskIn.setLowBits(ShiftAmt);
559 
560       if (SimplifyDemandedBits(I, 0, DemandedMaskIn, KnownZero, KnownOne,
561                                Depth + 1))
562         return I;
563 
564       assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
565       // Compute the new bits that are at the top now.
566       APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
567       KnownZero.lshrInPlace(ShiftAmt);
568       KnownOne.lshrInPlace(ShiftAmt);
569 
570       // Handle the sign bits.
571       APInt SignMask(APInt::getSignMask(BitWidth));
572       // Adjust to where it is now in the mask.
573       SignMask.lshrInPlace(ShiftAmt);
574 
575       // If the input sign bit is known to be zero, or if none of the top bits
576       // are demanded, turn this into an unsigned shift right.
577       if (BitWidth <= ShiftAmt || KnownZero[BitWidth-ShiftAmt-1] ||
578           !DemandedMask.intersects(HighBits)) {
579         BinaryOperator *LShr = BinaryOperator::CreateLShr(I->getOperand(0),
580                                                           I->getOperand(1));
581         LShr->setIsExact(cast<BinaryOperator>(I)->isExact());
582         return InsertNewInstWith(LShr, *I);
583       } else if (KnownOne.intersects(SignMask)) { // New bits are known one.
584         KnownOne |= HighBits;
585       }
586     }
587     break;
588   }
589   case Instruction::SRem:
590     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
591       // X % -1 demands all the bits because we don't want to introduce
592       // INT_MIN % -1 (== undef) by accident.
593       if (Rem->isAllOnesValue())
594         break;
595       APInt RA = Rem->getValue().abs();
596       if (RA.isPowerOf2()) {
597         if (DemandedMask.ult(RA))    // srem won't affect demanded bits
598           return I->getOperand(0);
599 
600         APInt LowBits = RA - 1;
601         APInt Mask2 = LowBits | APInt::getSignMask(BitWidth);
602         if (SimplifyDemandedBits(I, 0, Mask2, LHSKnownZero, LHSKnownOne,
603                                  Depth + 1))
604           return I;
605 
606         // The low bits of LHS are unchanged by the srem.
607         KnownZero = LHSKnownZero & LowBits;
608         KnownOne = LHSKnownOne & LowBits;
609 
610         // If LHS is non-negative or has all low bits zero, then the upper bits
611         // are all zero.
612         if (LHSKnownZero.isSignBitSet() || LowBits.isSubsetOf(LHSKnownZero))
613           KnownZero |= ~LowBits;
614 
615         // If LHS is negative and not all low bits are zero, then the upper bits
616         // are all one.
617         if (LHSKnownOne.isSignBitSet() && LowBits.intersects(LHSKnownOne))
618           KnownOne |= ~LowBits;
619 
620         assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
621         break;
622       }
623     }
624 
625     // The sign bit is the LHS's sign bit, except when the result of the
626     // remainder is zero.
627     if (DemandedMask.isSignBitSet()) {
628       computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
629                        CxtI);
630       // If it's known zero, our sign bit is also zero.
631       if (LHSKnownZero.isSignBitSet())
632         KnownZero.setSignBit();
633     }
634     break;
635   case Instruction::URem: {
636     APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0);
637     APInt AllOnes = APInt::getAllOnesValue(BitWidth);
638     if (SimplifyDemandedBits(I, 0, AllOnes, KnownZero2, KnownOne2, Depth + 1) ||
639         SimplifyDemandedBits(I, 1, AllOnes, KnownZero2, KnownOne2, Depth + 1))
640       return I;
641 
642     unsigned Leaders = KnownZero2.countLeadingOnes();
643     KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask;
644     break;
645   }
646   case Instruction::Call:
647     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
648       switch (II->getIntrinsicID()) {
649       default: break;
650       case Intrinsic::bswap: {
651         // If the only bits demanded come from one byte of the bswap result,
652         // just shift the input byte into position to eliminate the bswap.
653         unsigned NLZ = DemandedMask.countLeadingZeros();
654         unsigned NTZ = DemandedMask.countTrailingZeros();
655 
656         // Round NTZ down to the next byte.  If we have 11 trailing zeros, then
657         // we need all the bits down to bit 8.  Likewise, round NLZ.  If we
658         // have 14 leading zeros, round to 8.
659         NLZ &= ~7;
660         NTZ &= ~7;
661         // If we need exactly one byte, we can do this transformation.
662         if (BitWidth-NLZ-NTZ == 8) {
663           unsigned ResultBit = NTZ;
664           unsigned InputBit = BitWidth-NTZ-8;
665 
666           // Replace this with either a left or right shift to get the byte into
667           // the right place.
668           Instruction *NewVal;
669           if (InputBit > ResultBit)
670             NewVal = BinaryOperator::CreateLShr(II->getArgOperand(0),
671                     ConstantInt::get(I->getType(), InputBit-ResultBit));
672           else
673             NewVal = BinaryOperator::CreateShl(II->getArgOperand(0),
674                     ConstantInt::get(I->getType(), ResultBit-InputBit));
675           NewVal->takeName(I);
676           return InsertNewInstWith(NewVal, *I);
677         }
678 
679         // TODO: Could compute known zero/one bits based on the input.
680         break;
681       }
682       case Intrinsic::x86_mmx_pmovmskb:
683       case Intrinsic::x86_sse_movmsk_ps:
684       case Intrinsic::x86_sse2_movmsk_pd:
685       case Intrinsic::x86_sse2_pmovmskb_128:
686       case Intrinsic::x86_avx_movmsk_ps_256:
687       case Intrinsic::x86_avx_movmsk_pd_256:
688       case Intrinsic::x86_avx2_pmovmskb: {
689         // MOVMSK copies the vector elements' sign bits to the low bits
690         // and zeros the high bits.
691         unsigned ArgWidth;
692         if (II->getIntrinsicID() == Intrinsic::x86_mmx_pmovmskb) {
693           ArgWidth = 8; // Arg is x86_mmx, but treated as <8 x i8>.
694         } else {
695           auto Arg = II->getArgOperand(0);
696           auto ArgType = cast<VectorType>(Arg->getType());
697           ArgWidth = ArgType->getNumElements();
698         }
699 
700         // If we don't need any of low bits then return zero,
701         // we know that DemandedMask is non-zero already.
702         APInt DemandedElts = DemandedMask.zextOrTrunc(ArgWidth);
703         if (DemandedElts == 0)
704           return ConstantInt::getNullValue(VTy);
705 
706         // We know that the upper bits are set to zero.
707         KnownZero.setBitsFrom(ArgWidth);
708         return nullptr;
709       }
710       case Intrinsic::x86_sse42_crc32_64_64:
711         KnownZero.setBitsFrom(32);
712         return nullptr;
713       }
714     }
715     computeKnownBits(V, KnownZero, KnownOne, Depth, CxtI);
716     break;
717   }
718 
719   // If the client is only demanding bits that we know, return the known
720   // constant.
721   if (DemandedMask.isSubsetOf(KnownZero|KnownOne))
722     return Constant::getIntegerValue(VTy, KnownOne);
723   return nullptr;
724 }
725 
726 /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne
727 /// bits. It also tries to handle simplifications that can be done based on
728 /// DemandedMask, but without modifying the Instruction.
729 Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I,
730                                                      const APInt &DemandedMask,
731                                                      APInt &KnownZero,
732                                                      APInt &KnownOne,
733                                                      unsigned Depth,
734                                                      Instruction *CxtI) {
735   unsigned BitWidth = DemandedMask.getBitWidth();
736   Type *ITy = I->getType();
737 
738   APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0);
739   APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0);
740 
741   // Despite the fact that we can't simplify this instruction in all User's
742   // context, we can at least compute the knownzero/knownone bits, and we can
743   // do simplifications that apply to *just* the one user if we know that
744   // this instruction has a simpler value in that context.
745   switch (I->getOpcode()) {
746   case Instruction::And: {
747     // If either the LHS or the RHS are Zero, the result is zero.
748     computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth + 1,
749                      CxtI);
750     computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
751                      CxtI);
752 
753     // Output known-0 are known to be clear if zero in either the LHS | RHS.
754     APInt IKnownZero = RHSKnownZero | LHSKnownZero;
755     // Output known-1 bits are only known if set in both the LHS & RHS.
756     APInt IKnownOne = RHSKnownOne & LHSKnownOne;
757 
758     // If the client is only demanding bits that we know, return the known
759     // constant.
760     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
761       return Constant::getIntegerValue(ITy, IKnownOne);
762 
763     // If all of the demanded bits are known 1 on one side, return the other.
764     // These bits cannot contribute to the result of the 'and' in this
765     // context.
766     if (DemandedMask.isSubsetOf(LHSKnownZero | RHSKnownOne))
767       return I->getOperand(0);
768     if (DemandedMask.isSubsetOf(RHSKnownZero | LHSKnownOne))
769       return I->getOperand(1);
770 
771     KnownZero = std::move(IKnownZero);
772     KnownOne  = std::move(IKnownOne);
773     break;
774   }
775   case Instruction::Or: {
776     // We can simplify (X|Y) -> X or Y in the user's context if we know that
777     // only bits from X or Y are demanded.
778 
779     // If either the LHS or the RHS are One, the result is One.
780     computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth + 1,
781                      CxtI);
782     computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
783                      CxtI);
784 
785     // Output known-0 bits are only known if clear in both the LHS & RHS.
786     APInt IKnownZero = RHSKnownZero & LHSKnownZero;
787     // Output known-1 are known to be set if set in either the LHS | RHS.
788     APInt IKnownOne = RHSKnownOne | LHSKnownOne;
789 
790     // If the client is only demanding bits that we know, return the known
791     // constant.
792     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
793       return Constant::getIntegerValue(ITy, IKnownOne);
794 
795     // If all of the demanded bits are known zero on one side, return the
796     // other.  These bits cannot contribute to the result of the 'or' in this
797     // context.
798     if (DemandedMask.isSubsetOf(LHSKnownOne | RHSKnownZero))
799       return I->getOperand(0);
800     if (DemandedMask.isSubsetOf(RHSKnownOne | LHSKnownZero))
801       return I->getOperand(1);
802 
803     KnownZero = std::move(IKnownZero);
804     KnownOne  = std::move(IKnownOne);
805     break;
806   }
807   case Instruction::Xor: {
808     // We can simplify (X^Y) -> X or Y in the user's context if we know that
809     // only bits from X or Y are demanded.
810 
811     computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth + 1,
812                      CxtI);
813     computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1,
814                      CxtI);
815 
816     // Output known-0 bits are known if clear or set in both the LHS & RHS.
817     APInt IKnownZero = (RHSKnownZero & LHSKnownZero) |
818                        (RHSKnownOne & LHSKnownOne);
819     // Output known-1 are known to be set if set in only one of the LHS, RHS.
820     APInt IKnownOne =  (RHSKnownZero & LHSKnownOne) |
821                        (RHSKnownOne & LHSKnownZero);
822 
823     // If the client is only demanding bits that we know, return the known
824     // constant.
825     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
826       return Constant::getIntegerValue(ITy, IKnownOne);
827 
828     // If all of the demanded bits are known zero on one side, return the
829     // other.
830     if (DemandedMask.isSubsetOf(RHSKnownZero))
831       return I->getOperand(0);
832     if (DemandedMask.isSubsetOf(LHSKnownZero))
833       return I->getOperand(1);
834 
835     // Output known-0 bits are known if clear or set in both the LHS & RHS.
836     KnownZero = std::move(IKnownZero);
837     // Output known-1 are known to be set if set in only one of the LHS, RHS.
838     KnownOne  = std::move(IKnownOne);
839     break;
840   }
841   default:
842     // Compute the KnownZero/KnownOne bits to simplify things downstream.
843     computeKnownBits(I, KnownZero, KnownOne, Depth, CxtI);
844 
845     // If this user is only demanding bits that we know, return the known
846     // constant.
847     if (DemandedMask.isSubsetOf(KnownZero|KnownOne))
848       return Constant::getIntegerValue(ITy, KnownOne);
849 
850     break;
851   }
852 
853   return nullptr;
854 }
855 
856 
857 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify
858 /// "E1 = (X lsr C1) << C2", where the C1 and C2 are constant, into
859 /// "E2 = X << (C2 - C1)" or "E2 = X >> (C1 - C2)", depending on the sign
860 /// of "C2-C1".
861 ///
862 /// Suppose E1 and E2 are generally different in bits S={bm, bm+1,
863 /// ..., bn}, without considering the specific value X is holding.
864 /// This transformation is legal iff one of following conditions is hold:
865 ///  1) All the bit in S are 0, in this case E1 == E2.
866 ///  2) We don't care those bits in S, per the input DemandedMask.
867 ///  3) Combination of 1) and 2). Some bits in S are 0, and we don't care the
868 ///     rest bits.
869 ///
870 /// Currently we only test condition 2).
871 ///
872 /// As with SimplifyDemandedUseBits, it returns NULL if the simplification was
873 /// not successful.
874 Value *
875 InstCombiner::simplifyShrShlDemandedBits(Instruction *Shr, const APInt &ShrOp1,
876                                          Instruction *Shl, const APInt &ShlOp1,
877                                          const APInt &DemandedMask,
878                                          APInt &KnownZero, APInt &KnownOne) {
879   if (!ShlOp1 || !ShrOp1)
880     return nullptr; // No-op.
881 
882   Value *VarX = Shr->getOperand(0);
883   Type *Ty = VarX->getType();
884   unsigned BitWidth = Ty->getScalarSizeInBits();
885   if (ShlOp1.uge(BitWidth) || ShrOp1.uge(BitWidth))
886     return nullptr; // Undef.
887 
888   unsigned ShlAmt = ShlOp1.getZExtValue();
889   unsigned ShrAmt = ShrOp1.getZExtValue();
890 
891   KnownOne.clearAllBits();
892   KnownZero.setLowBits(ShlAmt - 1);
893   KnownZero &= DemandedMask;
894 
895   APInt BitMask1(APInt::getAllOnesValue(BitWidth));
896   APInt BitMask2(APInt::getAllOnesValue(BitWidth));
897 
898   bool isLshr = (Shr->getOpcode() == Instruction::LShr);
899   BitMask1 = isLshr ? (BitMask1.lshr(ShrAmt) << ShlAmt) :
900                       (BitMask1.ashr(ShrAmt) << ShlAmt);
901 
902   if (ShrAmt <= ShlAmt) {
903     BitMask2 <<= (ShlAmt - ShrAmt);
904   } else {
905     BitMask2 = isLshr ? BitMask2.lshr(ShrAmt - ShlAmt):
906                         BitMask2.ashr(ShrAmt - ShlAmt);
907   }
908 
909   // Check if condition-2 (see the comment to this function) is satified.
910   if ((BitMask1 & DemandedMask) == (BitMask2 & DemandedMask)) {
911     if (ShrAmt == ShlAmt)
912       return VarX;
913 
914     if (!Shr->hasOneUse())
915       return nullptr;
916 
917     BinaryOperator *New;
918     if (ShrAmt < ShlAmt) {
919       Constant *Amt = ConstantInt::get(VarX->getType(), ShlAmt - ShrAmt);
920       New = BinaryOperator::CreateShl(VarX, Amt);
921       BinaryOperator *Orig = cast<BinaryOperator>(Shl);
922       New->setHasNoSignedWrap(Orig->hasNoSignedWrap());
923       New->setHasNoUnsignedWrap(Orig->hasNoUnsignedWrap());
924     } else {
925       Constant *Amt = ConstantInt::get(VarX->getType(), ShrAmt - ShlAmt);
926       New = isLshr ? BinaryOperator::CreateLShr(VarX, Amt) :
927                      BinaryOperator::CreateAShr(VarX, Amt);
928       if (cast<BinaryOperator>(Shr)->isExact())
929         New->setIsExact(true);
930     }
931 
932     return InsertNewInstWith(New, *Shl);
933   }
934 
935   return nullptr;
936 }
937 
938 /// The specified value produces a vector with any number of elements.
939 /// DemandedElts contains the set of elements that are actually used by the
940 /// caller. This method analyzes which elements of the operand are undef and
941 /// returns that information in UndefElts.
942 ///
943 /// If the information about demanded elements can be used to simplify the
944 /// operation, the operation is simplified, then the resultant value is
945 /// returned.  This returns null if no change was made.
946 Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
947                                                 APInt &UndefElts,
948                                                 unsigned Depth) {
949   unsigned VWidth = V->getType()->getVectorNumElements();
950   APInt EltMask(APInt::getAllOnesValue(VWidth));
951   assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!");
952 
953   if (isa<UndefValue>(V)) {
954     // If the entire vector is undefined, just return this info.
955     UndefElts = EltMask;
956     return nullptr;
957   }
958 
959   if (DemandedElts == 0) { // If nothing is demanded, provide undef.
960     UndefElts = EltMask;
961     return UndefValue::get(V->getType());
962   }
963 
964   UndefElts = 0;
965 
966   // Handle ConstantAggregateZero, ConstantVector, ConstantDataSequential.
967   if (Constant *C = dyn_cast<Constant>(V)) {
968     // Check if this is identity. If so, return 0 since we are not simplifying
969     // anything.
970     if (DemandedElts.isAllOnesValue())
971       return nullptr;
972 
973     Type *EltTy = cast<VectorType>(V->getType())->getElementType();
974     Constant *Undef = UndefValue::get(EltTy);
975 
976     SmallVector<Constant*, 16> Elts;
977     for (unsigned i = 0; i != VWidth; ++i) {
978       if (!DemandedElts[i]) {   // If not demanded, set to undef.
979         Elts.push_back(Undef);
980         UndefElts.setBit(i);
981         continue;
982       }
983 
984       Constant *Elt = C->getAggregateElement(i);
985       if (!Elt) return nullptr;
986 
987       if (isa<UndefValue>(Elt)) {   // Already undef.
988         Elts.push_back(Undef);
989         UndefElts.setBit(i);
990       } else {                               // Otherwise, defined.
991         Elts.push_back(Elt);
992       }
993     }
994 
995     // If we changed the constant, return it.
996     Constant *NewCV = ConstantVector::get(Elts);
997     return NewCV != C ? NewCV : nullptr;
998   }
999 
1000   // Limit search depth.
1001   if (Depth == 10)
1002     return nullptr;
1003 
1004   // If multiple users are using the root value, proceed with
1005   // simplification conservatively assuming that all elements
1006   // are needed.
1007   if (!V->hasOneUse()) {
1008     // Quit if we find multiple users of a non-root value though.
1009     // They'll be handled when it's their turn to be visited by
1010     // the main instcombine process.
1011     if (Depth != 0)
1012       // TODO: Just compute the UndefElts information recursively.
1013       return nullptr;
1014 
1015     // Conservatively assume that all elements are needed.
1016     DemandedElts = EltMask;
1017   }
1018 
1019   Instruction *I = dyn_cast<Instruction>(V);
1020   if (!I) return nullptr;        // Only analyze instructions.
1021 
1022   bool MadeChange = false;
1023   APInt UndefElts2(VWidth, 0);
1024   APInt UndefElts3(VWidth, 0);
1025   Value *TmpV;
1026   switch (I->getOpcode()) {
1027   default: break;
1028 
1029   case Instruction::InsertElement: {
1030     // If this is a variable index, we don't know which element it overwrites.
1031     // demand exactly the same input as we produce.
1032     ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2));
1033     if (!Idx) {
1034       // Note that we can't propagate undef elt info, because we don't know
1035       // which elt is getting updated.
1036       TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts,
1037                                         UndefElts2, Depth + 1);
1038       if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1039       break;
1040     }
1041 
1042     // If this is inserting an element that isn't demanded, remove this
1043     // insertelement.
1044     unsigned IdxNo = Idx->getZExtValue();
1045     if (IdxNo >= VWidth || !DemandedElts[IdxNo]) {
1046       Worklist.Add(I);
1047       return I->getOperand(0);
1048     }
1049 
1050     // Otherwise, the element inserted overwrites whatever was there, so the
1051     // input demanded set is simpler than the output set.
1052     APInt DemandedElts2 = DemandedElts;
1053     DemandedElts2.clearBit(IdxNo);
1054     TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts2,
1055                                       UndefElts, Depth + 1);
1056     if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1057 
1058     // The inserted element is defined.
1059     UndefElts.clearBit(IdxNo);
1060     break;
1061   }
1062   case Instruction::ShuffleVector: {
1063     ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I);
1064     unsigned LHSVWidth =
1065       Shuffle->getOperand(0)->getType()->getVectorNumElements();
1066     APInt LeftDemanded(LHSVWidth, 0), RightDemanded(LHSVWidth, 0);
1067     for (unsigned i = 0; i < VWidth; i++) {
1068       if (DemandedElts[i]) {
1069         unsigned MaskVal = Shuffle->getMaskValue(i);
1070         if (MaskVal != -1u) {
1071           assert(MaskVal < LHSVWidth * 2 &&
1072                  "shufflevector mask index out of range!");
1073           if (MaskVal < LHSVWidth)
1074             LeftDemanded.setBit(MaskVal);
1075           else
1076             RightDemanded.setBit(MaskVal - LHSVWidth);
1077         }
1078       }
1079     }
1080 
1081     APInt LHSUndefElts(LHSVWidth, 0);
1082     TmpV = SimplifyDemandedVectorElts(I->getOperand(0), LeftDemanded,
1083                                       LHSUndefElts, Depth + 1);
1084     if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1085 
1086     APInt RHSUndefElts(LHSVWidth, 0);
1087     TmpV = SimplifyDemandedVectorElts(I->getOperand(1), RightDemanded,
1088                                       RHSUndefElts, Depth + 1);
1089     if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1090 
1091     bool NewUndefElts = false;
1092     unsigned LHSIdx = -1u, LHSValIdx = -1u;
1093     unsigned RHSIdx = -1u, RHSValIdx = -1u;
1094     bool LHSUniform = true;
1095     bool RHSUniform = true;
1096     for (unsigned i = 0; i < VWidth; i++) {
1097       unsigned MaskVal = Shuffle->getMaskValue(i);
1098       if (MaskVal == -1u) {
1099         UndefElts.setBit(i);
1100       } else if (!DemandedElts[i]) {
1101         NewUndefElts = true;
1102         UndefElts.setBit(i);
1103       } else if (MaskVal < LHSVWidth) {
1104         if (LHSUndefElts[MaskVal]) {
1105           NewUndefElts = true;
1106           UndefElts.setBit(i);
1107         } else {
1108           LHSIdx = LHSIdx == -1u ? i : LHSVWidth;
1109           LHSValIdx = LHSValIdx == -1u ? MaskVal : LHSVWidth;
1110           LHSUniform = LHSUniform && (MaskVal == i);
1111         }
1112       } else {
1113         if (RHSUndefElts[MaskVal - LHSVWidth]) {
1114           NewUndefElts = true;
1115           UndefElts.setBit(i);
1116         } else {
1117           RHSIdx = RHSIdx == -1u ? i : LHSVWidth;
1118           RHSValIdx = RHSValIdx == -1u ? MaskVal - LHSVWidth : LHSVWidth;
1119           RHSUniform = RHSUniform && (MaskVal - LHSVWidth == i);
1120         }
1121       }
1122     }
1123 
1124     // Try to transform shuffle with constant vector and single element from
1125     // this constant vector to single insertelement instruction.
1126     // shufflevector V, C, <v1, v2, .., ci, .., vm> ->
1127     // insertelement V, C[ci], ci-n
1128     if (LHSVWidth == Shuffle->getType()->getNumElements()) {
1129       Value *Op = nullptr;
1130       Constant *Value = nullptr;
1131       unsigned Idx = -1u;
1132 
1133       // Find constant vector with the single element in shuffle (LHS or RHS).
1134       if (LHSIdx < LHSVWidth && RHSUniform) {
1135         if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(0))) {
1136           Op = Shuffle->getOperand(1);
1137           Value = CV->getOperand(LHSValIdx);
1138           Idx = LHSIdx;
1139         }
1140       }
1141       if (RHSIdx < LHSVWidth && LHSUniform) {
1142         if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(1))) {
1143           Op = Shuffle->getOperand(0);
1144           Value = CV->getOperand(RHSValIdx);
1145           Idx = RHSIdx;
1146         }
1147       }
1148       // Found constant vector with single element - convert to insertelement.
1149       if (Op && Value) {
1150         Instruction *New = InsertElementInst::Create(
1151             Op, Value, ConstantInt::get(Type::getInt32Ty(I->getContext()), Idx),
1152             Shuffle->getName());
1153         InsertNewInstWith(New, *Shuffle);
1154         return New;
1155       }
1156     }
1157     if (NewUndefElts) {
1158       // Add additional discovered undefs.
1159       SmallVector<Constant*, 16> Elts;
1160       for (unsigned i = 0; i < VWidth; ++i) {
1161         if (UndefElts[i])
1162           Elts.push_back(UndefValue::get(Type::getInt32Ty(I->getContext())));
1163         else
1164           Elts.push_back(ConstantInt::get(Type::getInt32Ty(I->getContext()),
1165                                           Shuffle->getMaskValue(i)));
1166       }
1167       I->setOperand(2, ConstantVector::get(Elts));
1168       MadeChange = true;
1169     }
1170     break;
1171   }
1172   case Instruction::Select: {
1173     APInt LeftDemanded(DemandedElts), RightDemanded(DemandedElts);
1174     if (ConstantVector* CV = dyn_cast<ConstantVector>(I->getOperand(0))) {
1175       for (unsigned i = 0; i < VWidth; i++) {
1176         Constant *CElt = CV->getAggregateElement(i);
1177         // Method isNullValue always returns false when called on a
1178         // ConstantExpr. If CElt is a ConstantExpr then skip it in order to
1179         // to avoid propagating incorrect information.
1180         if (isa<ConstantExpr>(CElt))
1181           continue;
1182         if (CElt->isNullValue())
1183           LeftDemanded.clearBit(i);
1184         else
1185           RightDemanded.clearBit(i);
1186       }
1187     }
1188 
1189     TmpV = SimplifyDemandedVectorElts(I->getOperand(1), LeftDemanded, UndefElts,
1190                                       Depth + 1);
1191     if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1192 
1193     TmpV = SimplifyDemandedVectorElts(I->getOperand(2), RightDemanded,
1194                                       UndefElts2, Depth + 1);
1195     if (TmpV) { I->setOperand(2, TmpV); MadeChange = true; }
1196 
1197     // Output elements are undefined if both are undefined.
1198     UndefElts &= UndefElts2;
1199     break;
1200   }
1201   case Instruction::BitCast: {
1202     // Vector->vector casts only.
1203     VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
1204     if (!VTy) break;
1205     unsigned InVWidth = VTy->getNumElements();
1206     APInt InputDemandedElts(InVWidth, 0);
1207     UndefElts2 = APInt(InVWidth, 0);
1208     unsigned Ratio;
1209 
1210     if (VWidth == InVWidth) {
1211       // If we are converting from <4 x i32> -> <4 x f32>, we demand the same
1212       // elements as are demanded of us.
1213       Ratio = 1;
1214       InputDemandedElts = DemandedElts;
1215     } else if ((VWidth % InVWidth) == 0) {
1216       // If the number of elements in the output is a multiple of the number of
1217       // elements in the input then an input element is live if any of the
1218       // corresponding output elements are live.
1219       Ratio = VWidth / InVWidth;
1220       for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1221         if (DemandedElts[OutIdx])
1222           InputDemandedElts.setBit(OutIdx / Ratio);
1223     } else if ((InVWidth % VWidth) == 0) {
1224       // If the number of elements in the input is a multiple of the number of
1225       // elements in the output then an input element is live if the
1226       // corresponding output element is live.
1227       Ratio = InVWidth / VWidth;
1228       for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1229         if (DemandedElts[InIdx / Ratio])
1230           InputDemandedElts.setBit(InIdx);
1231     } else {
1232       // Unsupported so far.
1233       break;
1234     }
1235 
1236     // div/rem demand all inputs, because they don't want divide by zero.
1237     TmpV = SimplifyDemandedVectorElts(I->getOperand(0), InputDemandedElts,
1238                                       UndefElts2, Depth + 1);
1239     if (TmpV) {
1240       I->setOperand(0, TmpV);
1241       MadeChange = true;
1242     }
1243 
1244     if (VWidth == InVWidth) {
1245       UndefElts = UndefElts2;
1246     } else if ((VWidth % InVWidth) == 0) {
1247       // If the number of elements in the output is a multiple of the number of
1248       // elements in the input then an output element is undef if the
1249       // corresponding input element is undef.
1250       for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1251         if (UndefElts2[OutIdx / Ratio])
1252           UndefElts.setBit(OutIdx);
1253     } else if ((InVWidth % VWidth) == 0) {
1254       // If the number of elements in the input is a multiple of the number of
1255       // elements in the output then an output element is undef if all of the
1256       // corresponding input elements are undef.
1257       for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
1258         APInt SubUndef = UndefElts2.lshr(OutIdx * Ratio).zextOrTrunc(Ratio);
1259         if (SubUndef.countPopulation() == Ratio)
1260           UndefElts.setBit(OutIdx);
1261       }
1262     } else {
1263       llvm_unreachable("Unimp");
1264     }
1265     break;
1266   }
1267   case Instruction::And:
1268   case Instruction::Or:
1269   case Instruction::Xor:
1270   case Instruction::Add:
1271   case Instruction::Sub:
1272   case Instruction::Mul:
1273     // div/rem demand all inputs, because they don't want divide by zero.
1274     TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, UndefElts,
1275                                       Depth + 1);
1276     if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1277     TmpV = SimplifyDemandedVectorElts(I->getOperand(1), DemandedElts,
1278                                       UndefElts2, Depth + 1);
1279     if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; }
1280 
1281     // Output elements are undefined if both are undefined.  Consider things
1282     // like undef&0.  The result is known zero, not undef.
1283     UndefElts &= UndefElts2;
1284     break;
1285   case Instruction::FPTrunc:
1286   case Instruction::FPExt:
1287     TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, UndefElts,
1288                                       Depth + 1);
1289     if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; }
1290     break;
1291 
1292   case Instruction::Call: {
1293     IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1294     if (!II) break;
1295     switch (II->getIntrinsicID()) {
1296     default: break;
1297 
1298     case Intrinsic::x86_xop_vfrcz_ss:
1299     case Intrinsic::x86_xop_vfrcz_sd:
1300       // The instructions for these intrinsics are speced to zero upper bits not
1301       // pass them through like other scalar intrinsics. So we shouldn't just
1302       // use Arg0 if DemandedElts[0] is clear like we do for other intrinsics.
1303       // Instead we should return a zero vector.
1304       if (!DemandedElts[0]) {
1305         Worklist.Add(II);
1306         return ConstantAggregateZero::get(II->getType());
1307       }
1308 
1309       // Only the lower element is used.
1310       DemandedElts = 1;
1311       TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
1312                                         UndefElts, Depth + 1);
1313       if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1314 
1315       // Only the lower element is undefined. The high elements are zero.
1316       UndefElts = UndefElts[0];
1317       break;
1318 
1319     // Unary scalar-as-vector operations that work column-wise.
1320     case Intrinsic::x86_sse_rcp_ss:
1321     case Intrinsic::x86_sse_rsqrt_ss:
1322     case Intrinsic::x86_sse_sqrt_ss:
1323     case Intrinsic::x86_sse2_sqrt_sd:
1324       TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
1325                                         UndefElts, Depth + 1);
1326       if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1327 
1328       // If lowest element of a scalar op isn't used then use Arg0.
1329       if (!DemandedElts[0]) {
1330         Worklist.Add(II);
1331         return II->getArgOperand(0);
1332       }
1333       // TODO: If only low elt lower SQRT to FSQRT (with rounding/exceptions
1334       // checks).
1335       break;
1336 
1337     // Binary scalar-as-vector operations that work column-wise. The high
1338     // elements come from operand 0. The low element is a function of both
1339     // operands.
1340     case Intrinsic::x86_sse_min_ss:
1341     case Intrinsic::x86_sse_max_ss:
1342     case Intrinsic::x86_sse_cmp_ss:
1343     case Intrinsic::x86_sse2_min_sd:
1344     case Intrinsic::x86_sse2_max_sd:
1345     case Intrinsic::x86_sse2_cmp_sd: {
1346       TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
1347                                         UndefElts, Depth + 1);
1348       if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1349 
1350       // If lowest element of a scalar op isn't used then use Arg0.
1351       if (!DemandedElts[0]) {
1352         Worklist.Add(II);
1353         return II->getArgOperand(0);
1354       }
1355 
1356       // Only lower element is used for operand 1.
1357       DemandedElts = 1;
1358       TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts,
1359                                         UndefElts2, Depth + 1);
1360       if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1361 
1362       // Lower element is undefined if both lower elements are undefined.
1363       // Consider things like undef&0.  The result is known zero, not undef.
1364       if (!UndefElts2[0])
1365         UndefElts.clearBit(0);
1366 
1367       break;
1368     }
1369 
1370     // Binary scalar-as-vector operations that work column-wise. The high
1371     // elements come from operand 0 and the low element comes from operand 1.
1372     case Intrinsic::x86_sse41_round_ss:
1373     case Intrinsic::x86_sse41_round_sd: {
1374       // Don't use the low element of operand 0.
1375       APInt DemandedElts2 = DemandedElts;
1376       DemandedElts2.clearBit(0);
1377       TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts2,
1378                                         UndefElts, Depth + 1);
1379       if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1380 
1381       // If lowest element of a scalar op isn't used then use Arg0.
1382       if (!DemandedElts[0]) {
1383         Worklist.Add(II);
1384         return II->getArgOperand(0);
1385       }
1386 
1387       // Only lower element is used for operand 1.
1388       DemandedElts = 1;
1389       TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts,
1390                                         UndefElts2, Depth + 1);
1391       if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1392 
1393       // Take the high undef elements from operand 0 and take the lower element
1394       // from operand 1.
1395       UndefElts.clearBit(0);
1396       UndefElts |= UndefElts2[0];
1397       break;
1398     }
1399 
1400     // Three input scalar-as-vector operations that work column-wise. The high
1401     // elements come from operand 0 and the low element is a function of all
1402     // three inputs.
1403     case Intrinsic::x86_avx512_mask_add_ss_round:
1404     case Intrinsic::x86_avx512_mask_div_ss_round:
1405     case Intrinsic::x86_avx512_mask_mul_ss_round:
1406     case Intrinsic::x86_avx512_mask_sub_ss_round:
1407     case Intrinsic::x86_avx512_mask_max_ss_round:
1408     case Intrinsic::x86_avx512_mask_min_ss_round:
1409     case Intrinsic::x86_avx512_mask_add_sd_round:
1410     case Intrinsic::x86_avx512_mask_div_sd_round:
1411     case Intrinsic::x86_avx512_mask_mul_sd_round:
1412     case Intrinsic::x86_avx512_mask_sub_sd_round:
1413     case Intrinsic::x86_avx512_mask_max_sd_round:
1414     case Intrinsic::x86_avx512_mask_min_sd_round:
1415     case Intrinsic::x86_fma_vfmadd_ss:
1416     case Intrinsic::x86_fma_vfmsub_ss:
1417     case Intrinsic::x86_fma_vfnmadd_ss:
1418     case Intrinsic::x86_fma_vfnmsub_ss:
1419     case Intrinsic::x86_fma_vfmadd_sd:
1420     case Intrinsic::x86_fma_vfmsub_sd:
1421     case Intrinsic::x86_fma_vfnmadd_sd:
1422     case Intrinsic::x86_fma_vfnmsub_sd:
1423     case Intrinsic::x86_avx512_mask_vfmadd_ss:
1424     case Intrinsic::x86_avx512_mask_vfmadd_sd:
1425     case Intrinsic::x86_avx512_maskz_vfmadd_ss:
1426     case Intrinsic::x86_avx512_maskz_vfmadd_sd:
1427       TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
1428                                         UndefElts, Depth + 1);
1429       if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1430 
1431       // If lowest element of a scalar op isn't used then use Arg0.
1432       if (!DemandedElts[0]) {
1433         Worklist.Add(II);
1434         return II->getArgOperand(0);
1435       }
1436 
1437       // Only lower element is used for operand 1 and 2.
1438       DemandedElts = 1;
1439       TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts,
1440                                         UndefElts2, Depth + 1);
1441       if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1442       TmpV = SimplifyDemandedVectorElts(II->getArgOperand(2), DemandedElts,
1443                                         UndefElts3, Depth + 1);
1444       if (TmpV) { II->setArgOperand(2, TmpV); MadeChange = true; }
1445 
1446       // Lower element is undefined if all three lower elements are undefined.
1447       // Consider things like undef&0.  The result is known zero, not undef.
1448       if (!UndefElts2[0] || !UndefElts3[0])
1449         UndefElts.clearBit(0);
1450 
1451       break;
1452 
1453     case Intrinsic::x86_avx512_mask3_vfmadd_ss:
1454     case Intrinsic::x86_avx512_mask3_vfmadd_sd:
1455     case Intrinsic::x86_avx512_mask3_vfmsub_ss:
1456     case Intrinsic::x86_avx512_mask3_vfmsub_sd:
1457     case Intrinsic::x86_avx512_mask3_vfnmsub_ss:
1458     case Intrinsic::x86_avx512_mask3_vfnmsub_sd:
1459       // These intrinsics get the passthru bits from operand 2.
1460       TmpV = SimplifyDemandedVectorElts(II->getArgOperand(2), DemandedElts,
1461                                         UndefElts, Depth + 1);
1462       if (TmpV) { II->setArgOperand(2, TmpV); MadeChange = true; }
1463 
1464       // If lowest element of a scalar op isn't used then use Arg2.
1465       if (!DemandedElts[0]) {
1466         Worklist.Add(II);
1467         return II->getArgOperand(2);
1468       }
1469 
1470       // Only lower element is used for operand 0 and 1.
1471       DemandedElts = 1;
1472       TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts,
1473                                         UndefElts2, Depth + 1);
1474       if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1475       TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts,
1476                                         UndefElts3, Depth + 1);
1477       if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1478 
1479       // Lower element is undefined if all three lower elements are undefined.
1480       // Consider things like undef&0.  The result is known zero, not undef.
1481       if (!UndefElts2[0] || !UndefElts3[0])
1482         UndefElts.clearBit(0);
1483 
1484       break;
1485 
1486     case Intrinsic::x86_sse2_pmulu_dq:
1487     case Intrinsic::x86_sse41_pmuldq:
1488     case Intrinsic::x86_avx2_pmul_dq:
1489     case Intrinsic::x86_avx2_pmulu_dq:
1490     case Intrinsic::x86_avx512_pmul_dq_512:
1491     case Intrinsic::x86_avx512_pmulu_dq_512: {
1492       Value *Op0 = II->getArgOperand(0);
1493       Value *Op1 = II->getArgOperand(1);
1494       unsigned InnerVWidth = Op0->getType()->getVectorNumElements();
1495       assert((VWidth * 2) == InnerVWidth && "Unexpected input size");
1496 
1497       APInt InnerDemandedElts(InnerVWidth, 0);
1498       for (unsigned i = 0; i != VWidth; ++i)
1499         if (DemandedElts[i])
1500           InnerDemandedElts.setBit(i * 2);
1501 
1502       UndefElts2 = APInt(InnerVWidth, 0);
1503       TmpV = SimplifyDemandedVectorElts(Op0, InnerDemandedElts, UndefElts2,
1504                                         Depth + 1);
1505       if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; }
1506 
1507       UndefElts3 = APInt(InnerVWidth, 0);
1508       TmpV = SimplifyDemandedVectorElts(Op1, InnerDemandedElts, UndefElts3,
1509                                         Depth + 1);
1510       if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1511 
1512       break;
1513     }
1514 
1515     case Intrinsic::x86_sse2_packssdw_128:
1516     case Intrinsic::x86_sse2_packsswb_128:
1517     case Intrinsic::x86_sse2_packuswb_128:
1518     case Intrinsic::x86_sse41_packusdw:
1519     case Intrinsic::x86_avx2_packssdw:
1520     case Intrinsic::x86_avx2_packsswb:
1521     case Intrinsic::x86_avx2_packusdw:
1522     case Intrinsic::x86_avx2_packuswb:
1523     case Intrinsic::x86_avx512_packssdw_512:
1524     case Intrinsic::x86_avx512_packsswb_512:
1525     case Intrinsic::x86_avx512_packusdw_512:
1526     case Intrinsic::x86_avx512_packuswb_512: {
1527       auto *Ty0 = II->getArgOperand(0)->getType();
1528       unsigned InnerVWidth = Ty0->getVectorNumElements();
1529       assert(VWidth == (InnerVWidth * 2) && "Unexpected input size");
1530 
1531       unsigned NumLanes = Ty0->getPrimitiveSizeInBits() / 128;
1532       unsigned VWidthPerLane = VWidth / NumLanes;
1533       unsigned InnerVWidthPerLane = InnerVWidth / NumLanes;
1534 
1535       // Per lane, pack the elements of the first input and then the second.
1536       // e.g.
1537       // v8i16 PACK(v4i32 X, v4i32 Y) - (X[0..3],Y[0..3])
1538       // v32i8 PACK(v16i16 X, v16i16 Y) - (X[0..7],Y[0..7]),(X[8..15],Y[8..15])
1539       for (int OpNum = 0; OpNum != 2; ++OpNum) {
1540         APInt OpDemandedElts(InnerVWidth, 0);
1541         for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1542           unsigned LaneIdx = Lane * VWidthPerLane;
1543           for (unsigned Elt = 0; Elt != InnerVWidthPerLane; ++Elt) {
1544             unsigned Idx = LaneIdx + Elt + InnerVWidthPerLane * OpNum;
1545             if (DemandedElts[Idx])
1546               OpDemandedElts.setBit((Lane * InnerVWidthPerLane) + Elt);
1547           }
1548         }
1549 
1550         // Demand elements from the operand.
1551         auto *Op = II->getArgOperand(OpNum);
1552         APInt OpUndefElts(InnerVWidth, 0);
1553         TmpV = SimplifyDemandedVectorElts(Op, OpDemandedElts, OpUndefElts,
1554                                           Depth + 1);
1555         if (TmpV) {
1556           II->setArgOperand(OpNum, TmpV);
1557           MadeChange = true;
1558         }
1559 
1560         // Pack the operand's UNDEF elements, one lane at a time.
1561         OpUndefElts = OpUndefElts.zext(VWidth);
1562         for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1563           APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane);
1564           LaneElts = LaneElts.getLoBits(InnerVWidthPerLane);
1565           LaneElts = LaneElts.shl(InnerVWidthPerLane * (2 * Lane + OpNum));
1566           UndefElts |= LaneElts;
1567         }
1568       }
1569       break;
1570     }
1571 
1572     // PSHUFB
1573     case Intrinsic::x86_ssse3_pshuf_b_128:
1574     case Intrinsic::x86_avx2_pshuf_b:
1575     case Intrinsic::x86_avx512_pshuf_b_512:
1576     // PERMILVAR
1577     case Intrinsic::x86_avx_vpermilvar_ps:
1578     case Intrinsic::x86_avx_vpermilvar_ps_256:
1579     case Intrinsic::x86_avx512_vpermilvar_ps_512:
1580     case Intrinsic::x86_avx_vpermilvar_pd:
1581     case Intrinsic::x86_avx_vpermilvar_pd_256:
1582     case Intrinsic::x86_avx512_vpermilvar_pd_512:
1583     // PERMV
1584     case Intrinsic::x86_avx2_permd:
1585     case Intrinsic::x86_avx2_permps: {
1586       Value *Op1 = II->getArgOperand(1);
1587       TmpV = SimplifyDemandedVectorElts(Op1, DemandedElts, UndefElts,
1588                                         Depth + 1);
1589       if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; }
1590       break;
1591     }
1592 
1593     // SSE4A instructions leave the upper 64-bits of the 128-bit result
1594     // in an undefined state.
1595     case Intrinsic::x86_sse4a_extrq:
1596     case Intrinsic::x86_sse4a_extrqi:
1597     case Intrinsic::x86_sse4a_insertq:
1598     case Intrinsic::x86_sse4a_insertqi:
1599       UndefElts.setHighBits(VWidth / 2);
1600       break;
1601     case Intrinsic::amdgcn_buffer_load:
1602     case Intrinsic::amdgcn_buffer_load_format:
1603     case Intrinsic::amdgcn_image_sample:
1604     case Intrinsic::amdgcn_image_sample_cl:
1605     case Intrinsic::amdgcn_image_sample_d:
1606     case Intrinsic::amdgcn_image_sample_d_cl:
1607     case Intrinsic::amdgcn_image_sample_l:
1608     case Intrinsic::amdgcn_image_sample_b:
1609     case Intrinsic::amdgcn_image_sample_b_cl:
1610     case Intrinsic::amdgcn_image_sample_lz:
1611     case Intrinsic::amdgcn_image_sample_cd:
1612     case Intrinsic::amdgcn_image_sample_cd_cl:
1613 
1614     case Intrinsic::amdgcn_image_sample_c:
1615     case Intrinsic::amdgcn_image_sample_c_cl:
1616     case Intrinsic::amdgcn_image_sample_c_d:
1617     case Intrinsic::amdgcn_image_sample_c_d_cl:
1618     case Intrinsic::amdgcn_image_sample_c_l:
1619     case Intrinsic::amdgcn_image_sample_c_b:
1620     case Intrinsic::amdgcn_image_sample_c_b_cl:
1621     case Intrinsic::amdgcn_image_sample_c_lz:
1622     case Intrinsic::amdgcn_image_sample_c_cd:
1623     case Intrinsic::amdgcn_image_sample_c_cd_cl:
1624 
1625     case Intrinsic::amdgcn_image_sample_o:
1626     case Intrinsic::amdgcn_image_sample_cl_o:
1627     case Intrinsic::amdgcn_image_sample_d_o:
1628     case Intrinsic::amdgcn_image_sample_d_cl_o:
1629     case Intrinsic::amdgcn_image_sample_l_o:
1630     case Intrinsic::amdgcn_image_sample_b_o:
1631     case Intrinsic::amdgcn_image_sample_b_cl_o:
1632     case Intrinsic::amdgcn_image_sample_lz_o:
1633     case Intrinsic::amdgcn_image_sample_cd_o:
1634     case Intrinsic::amdgcn_image_sample_cd_cl_o:
1635 
1636     case Intrinsic::amdgcn_image_sample_c_o:
1637     case Intrinsic::amdgcn_image_sample_c_cl_o:
1638     case Intrinsic::amdgcn_image_sample_c_d_o:
1639     case Intrinsic::amdgcn_image_sample_c_d_cl_o:
1640     case Intrinsic::amdgcn_image_sample_c_l_o:
1641     case Intrinsic::amdgcn_image_sample_c_b_o:
1642     case Intrinsic::amdgcn_image_sample_c_b_cl_o:
1643     case Intrinsic::amdgcn_image_sample_c_lz_o:
1644     case Intrinsic::amdgcn_image_sample_c_cd_o:
1645     case Intrinsic::amdgcn_image_sample_c_cd_cl_o:
1646 
1647     case Intrinsic::amdgcn_image_getlod: {
1648       if (VWidth == 1 || !DemandedElts.isMask())
1649         return nullptr;
1650 
1651       // TODO: Handle 3 vectors when supported in code gen.
1652       unsigned NewNumElts = PowerOf2Ceil(DemandedElts.countTrailingOnes());
1653       if (NewNumElts == VWidth)
1654         return nullptr;
1655 
1656       Module *M = II->getParent()->getParent()->getParent();
1657       Type *EltTy = V->getType()->getVectorElementType();
1658 
1659       Type *NewTy = (NewNumElts == 1) ? EltTy :
1660         VectorType::get(EltTy, NewNumElts);
1661 
1662       auto IID = II->getIntrinsicID();
1663 
1664       bool IsBuffer = IID == Intrinsic::amdgcn_buffer_load ||
1665                       IID == Intrinsic::amdgcn_buffer_load_format;
1666 
1667       Function *NewIntrin = IsBuffer ?
1668         Intrinsic::getDeclaration(M, IID, NewTy) :
1669         // Samplers have 3 mangled types.
1670         Intrinsic::getDeclaration(M, IID,
1671                                   { NewTy, II->getArgOperand(0)->getType(),
1672                                       II->getArgOperand(1)->getType()});
1673 
1674       SmallVector<Value *, 5> Args;
1675       for (unsigned I = 0, E = II->getNumArgOperands(); I != E; ++I)
1676         Args.push_back(II->getArgOperand(I));
1677 
1678       IRBuilderBase::InsertPointGuard Guard(*Builder);
1679       Builder->SetInsertPoint(II);
1680 
1681       CallInst *NewCall = Builder->CreateCall(NewIntrin, Args);
1682       NewCall->takeName(II);
1683       NewCall->copyMetadata(*II);
1684 
1685       if (!IsBuffer) {
1686         ConstantInt *DMask = dyn_cast<ConstantInt>(NewCall->getArgOperand(3));
1687         if (DMask) {
1688           unsigned DMaskVal = DMask->getZExtValue() & 0xf;
1689 
1690           unsigned PopCnt = 0;
1691           unsigned NewDMask = 0;
1692           for (unsigned I = 0; I < 4; ++I) {
1693             const unsigned Bit = 1 << I;
1694             if (!!(DMaskVal & Bit)) {
1695               if (++PopCnt > NewNumElts)
1696                 break;
1697 
1698               NewDMask |= Bit;
1699             }
1700           }
1701 
1702           NewCall->setArgOperand(3, ConstantInt::get(DMask->getType(), NewDMask));
1703         }
1704       }
1705 
1706 
1707       if (NewNumElts == 1) {
1708         return Builder->CreateInsertElement(UndefValue::get(V->getType()),
1709                                             NewCall, static_cast<uint64_t>(0));
1710       }
1711 
1712       SmallVector<uint32_t, 8> EltMask;
1713       for (unsigned I = 0; I < VWidth; ++I)
1714         EltMask.push_back(I);
1715 
1716       Value *Shuffle = Builder->CreateShuffleVector(
1717         NewCall, UndefValue::get(NewTy), EltMask);
1718 
1719       MadeChange = true;
1720       return Shuffle;
1721     }
1722     }
1723     break;
1724   }
1725   }
1726   return MadeChange ? I : nullptr;
1727 }
1728