1 //===- InstCombineSimplifyDemanded.cpp ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains logic for simplifying instructions based on information
10 // about how they are used.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "InstCombineInternal.h"
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/IR/IntrinsicInst.h"
17 #include "llvm/IR/IntrinsicsAMDGPU.h"
18 #include "llvm/IR/IntrinsicsX86.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Support/KnownBits.h"
21 
22 using namespace llvm;
23 using namespace llvm::PatternMatch;
24 
25 #define DEBUG_TYPE "instcombine"
26 
27 namespace {
28 
29 struct AMDGPUImageDMaskIntrinsic {
30   unsigned Intr;
31 };
32 
33 #define GET_AMDGPUImageDMaskIntrinsicTable_IMPL
34 #include "InstCombineTables.inc"
35 
36 } // end anonymous namespace
37 
38 /// Check to see if the specified operand of the specified instruction is a
39 /// constant integer. If so, check to see if there are any bits set in the
40 /// constant that are not demanded. If so, shrink the constant and return true.
41 static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
42                                    const APInt &Demanded) {
43   assert(I && "No instruction?");
44   assert(OpNo < I->getNumOperands() && "Operand index too large");
45 
46   // The operand must be a constant integer or splat integer.
47   Value *Op = I->getOperand(OpNo);
48   const APInt *C;
49   if (!match(Op, m_APInt(C)))
50     return false;
51 
52   // If there are no bits set that aren't demanded, nothing to do.
53   if (C->isSubsetOf(Demanded))
54     return false;
55 
56   // This instruction is producing bits that are not demanded. Shrink the RHS.
57   I->setOperand(OpNo, ConstantInt::get(Op->getType(), *C & Demanded));
58 
59   return true;
60 }
61 
62 
63 
64 /// Inst is an integer instruction that SimplifyDemandedBits knows about. See if
65 /// the instruction has any properties that allow us to simplify its operands.
66 bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) {
67   unsigned BitWidth = Inst.getType()->getScalarSizeInBits();
68   KnownBits Known(BitWidth);
69   APInt DemandedMask(APInt::getAllOnesValue(BitWidth));
70 
71   Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, Known,
72                                      0, &Inst);
73   if (!V) return false;
74   if (V == &Inst) return true;
75   replaceInstUsesWith(Inst, V);
76   return true;
77 }
78 
79 /// This form of SimplifyDemandedBits simplifies the specified instruction
80 /// operand if possible, updating it in place. It returns true if it made any
81 /// change and false otherwise.
82 bool InstCombiner::SimplifyDemandedBits(Instruction *I, unsigned OpNo,
83                                         const APInt &DemandedMask,
84                                         KnownBits &Known,
85                                         unsigned Depth) {
86   Use &U = I->getOperandUse(OpNo);
87   Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask, Known,
88                                           Depth, I);
89   if (!NewVal) return false;
90   U = NewVal;
91   return true;
92 }
93 
94 
95 /// This function attempts to replace V with a simpler value based on the
96 /// demanded bits. When this function is called, it is known that only the bits
97 /// set in DemandedMask of the result of V are ever used downstream.
98 /// Consequently, depending on the mask and V, it may be possible to replace V
99 /// with a constant or one of its operands. In such cases, this function does
100 /// the replacement and returns true. In all other cases, it returns false after
101 /// analyzing the expression and setting KnownOne and known to be one in the
102 /// expression. Known.Zero contains all the bits that are known to be zero in
103 /// the expression. These are provided to potentially allow the caller (which
104 /// might recursively be SimplifyDemandedBits itself) to simplify the
105 /// expression.
106 /// Known.One and Known.Zero always follow the invariant that:
107 ///   Known.One & Known.Zero == 0.
108 /// That is, a bit can't be both 1 and 0. Note that the bits in Known.One and
109 /// Known.Zero may only be accurate for those bits set in DemandedMask. Note
110 /// also that the bitwidth of V, DemandedMask, Known.Zero and Known.One must all
111 /// be the same.
112 ///
113 /// This returns null if it did not change anything and it permits no
114 /// simplification.  This returns V itself if it did some simplification of V's
115 /// operands based on the information about what bits are demanded. This returns
116 /// some other non-null value if it found out that V is equal to another value
117 /// in the context where the specified bits are demanded, but not for all users.
118 Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
119                                              KnownBits &Known, unsigned Depth,
120                                              Instruction *CxtI) {
121   assert(V != nullptr && "Null pointer of Value???");
122   assert(Depth <= 6 && "Limit Search Depth");
123   uint32_t BitWidth = DemandedMask.getBitWidth();
124   Type *VTy = V->getType();
125   assert(
126       (!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) &&
127       Known.getBitWidth() == BitWidth &&
128       "Value *V, DemandedMask and Known must have same BitWidth");
129 
130   if (isa<Constant>(V)) {
131     computeKnownBits(V, Known, Depth, CxtI);
132     return nullptr;
133   }
134 
135   Known.resetAll();
136   if (DemandedMask.isNullValue())     // Not demanding any bits from V.
137     return UndefValue::get(VTy);
138 
139   if (Depth == 6)        // Limit search depth.
140     return nullptr;
141 
142   Instruction *I = dyn_cast<Instruction>(V);
143   if (!I) {
144     computeKnownBits(V, Known, Depth, CxtI);
145     return nullptr;        // Only analyze instructions.
146   }
147 
148   // If there are multiple uses of this value and we aren't at the root, then
149   // we can't do any simplifications of the operands, because DemandedMask
150   // only reflects the bits demanded by *one* of the users.
151   if (Depth != 0 && !I->hasOneUse())
152     return SimplifyMultipleUseDemandedBits(I, DemandedMask, Known, Depth, CxtI);
153 
154   KnownBits LHSKnown(BitWidth), RHSKnown(BitWidth);
155 
156   // If this is the root being simplified, allow it to have multiple uses,
157   // just set the DemandedMask to all bits so that we can try to simplify the
158   // operands.  This allows visitTruncInst (for example) to simplify the
159   // operand of a trunc without duplicating all the logic below.
160   if (Depth == 0 && !V->hasOneUse())
161     DemandedMask.setAllBits();
162 
163   switch (I->getOpcode()) {
164   default:
165     computeKnownBits(I, Known, Depth, CxtI);
166     break;
167   case Instruction::And: {
168     // If either the LHS or the RHS are Zero, the result is zero.
169     if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
170         SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.Zero, LHSKnown,
171                              Depth + 1))
172       return I;
173     assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?");
174     assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?");
175 
176     // Output known-0 are known to be clear if zero in either the LHS | RHS.
177     APInt IKnownZero = RHSKnown.Zero | LHSKnown.Zero;
178     // Output known-1 bits are only known if set in both the LHS & RHS.
179     APInt IKnownOne = RHSKnown.One & LHSKnown.One;
180 
181     // If the client is only demanding bits that we know, return the known
182     // constant.
183     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
184       return Constant::getIntegerValue(VTy, IKnownOne);
185 
186     // If all of the demanded bits are known 1 on one side, return the other.
187     // These bits cannot contribute to the result of the 'and'.
188     if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
189       return I->getOperand(0);
190     if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
191       return I->getOperand(1);
192 
193     // If the RHS is a constant, see if we can simplify it.
194     if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnown.Zero))
195       return I;
196 
197     Known.Zero = std::move(IKnownZero);
198     Known.One  = std::move(IKnownOne);
199     break;
200   }
201   case Instruction::Or: {
202     // If either the LHS or the RHS are One, the result is One.
203     if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
204         SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.One, LHSKnown,
205                              Depth + 1))
206       return I;
207     assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?");
208     assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?");
209 
210     // Output known-0 bits are only known if clear in both the LHS & RHS.
211     APInt IKnownZero = RHSKnown.Zero & LHSKnown.Zero;
212     // Output known-1 are known. to be set if s.et in either the LHS | RHS.
213     APInt IKnownOne = RHSKnown.One | LHSKnown.One;
214 
215     // If the client is only demanding bits that we know, return the known
216     // constant.
217     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
218       return Constant::getIntegerValue(VTy, IKnownOne);
219 
220     // If all of the demanded bits are known zero on one side, return the other.
221     // These bits cannot contribute to the result of the 'or'.
222     if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
223       return I->getOperand(0);
224     if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
225       return I->getOperand(1);
226 
227     // If the RHS is a constant, see if we can simplify it.
228     if (ShrinkDemandedConstant(I, 1, DemandedMask))
229       return I;
230 
231     Known.Zero = std::move(IKnownZero);
232     Known.One  = std::move(IKnownOne);
233     break;
234   }
235   case Instruction::Xor: {
236     if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) ||
237         SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Depth + 1))
238       return I;
239     assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?");
240     assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?");
241 
242     // Output known-0 bits are known if clear or set in both the LHS & RHS.
243     APInt IKnownZero = (RHSKnown.Zero & LHSKnown.Zero) |
244                        (RHSKnown.One & LHSKnown.One);
245     // Output known-1 are known to be set if set in only one of the LHS, RHS.
246     APInt IKnownOne =  (RHSKnown.Zero & LHSKnown.One) |
247                        (RHSKnown.One & LHSKnown.Zero);
248 
249     // If the client is only demanding bits that we know, return the known
250     // constant.
251     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
252       return Constant::getIntegerValue(VTy, IKnownOne);
253 
254     // If all of the demanded bits are known zero on one side, return the other.
255     // These bits cannot contribute to the result of the 'xor'.
256     if (DemandedMask.isSubsetOf(RHSKnown.Zero))
257       return I->getOperand(0);
258     if (DemandedMask.isSubsetOf(LHSKnown.Zero))
259       return I->getOperand(1);
260 
261     // If all of the demanded bits are known to be zero on one side or the
262     // other, turn this into an *inclusive* or.
263     //    e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
264     if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.Zero)) {
265       Instruction *Or =
266         BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1),
267                                  I->getName());
268       return InsertNewInstWith(Or, *I);
269     }
270 
271     // If all of the demanded bits on one side are known, and all of the set
272     // bits on that side are also known to be set on the other side, turn this
273     // into an AND, as we know the bits will be cleared.
274     //    e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
275     if (DemandedMask.isSubsetOf(RHSKnown.Zero|RHSKnown.One) &&
276         RHSKnown.One.isSubsetOf(LHSKnown.One)) {
277       Constant *AndC = Constant::getIntegerValue(VTy,
278                                                  ~RHSKnown.One & DemandedMask);
279       Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
280       return InsertNewInstWith(And, *I);
281     }
282 
283     // If the RHS is a constant, see if we can simplify it.
284     // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
285     if (ShrinkDemandedConstant(I, 1, DemandedMask))
286       return I;
287 
288     // If our LHS is an 'and' and if it has one use, and if any of the bits we
289     // are flipping are known to be set, then the xor is just resetting those
290     // bits to zero.  We can just knock out bits from the 'and' and the 'xor',
291     // simplifying both of them.
292     if (Instruction *LHSInst = dyn_cast<Instruction>(I->getOperand(0)))
293       if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() &&
294           isa<ConstantInt>(I->getOperand(1)) &&
295           isa<ConstantInt>(LHSInst->getOperand(1)) &&
296           (LHSKnown.One & RHSKnown.One & DemandedMask) != 0) {
297         ConstantInt *AndRHS = cast<ConstantInt>(LHSInst->getOperand(1));
298         ConstantInt *XorRHS = cast<ConstantInt>(I->getOperand(1));
299         APInt NewMask = ~(LHSKnown.One & RHSKnown.One & DemandedMask);
300 
301         Constant *AndC =
302           ConstantInt::get(I->getType(), NewMask & AndRHS->getValue());
303         Instruction *NewAnd = BinaryOperator::CreateAnd(I->getOperand(0), AndC);
304         InsertNewInstWith(NewAnd, *I);
305 
306         Constant *XorC =
307           ConstantInt::get(I->getType(), NewMask & XorRHS->getValue());
308         Instruction *NewXor = BinaryOperator::CreateXor(NewAnd, XorC);
309         return InsertNewInstWith(NewXor, *I);
310       }
311 
312     // Output known-0 bits are known if clear or set in both the LHS & RHS.
313     Known.Zero = std::move(IKnownZero);
314     // Output known-1 are known to be set if set in only one of the LHS, RHS.
315     Known.One  = std::move(IKnownOne);
316     break;
317   }
318   case Instruction::Select: {
319     Value *LHS, *RHS;
320     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
321     if (SPF == SPF_UMAX) {
322       // UMax(A, C) == A if ...
323       // The lowest non-zero bit of DemandMask is higher than the highest
324       // non-zero bit of C.
325       const APInt *C;
326       unsigned CTZ = DemandedMask.countTrailingZeros();
327       if (match(RHS, m_APInt(C)) && CTZ >= C->getActiveBits())
328         return LHS;
329     } else if (SPF == SPF_UMIN) {
330       // UMin(A, C) == A if ...
331       // The lowest non-zero bit of DemandMask is higher than the highest
332       // non-one bit of C.
333       // This comes from using DeMorgans on the above umax example.
334       const APInt *C;
335       unsigned CTZ = DemandedMask.countTrailingZeros();
336       if (match(RHS, m_APInt(C)) &&
337           CTZ >= C->getBitWidth() - C->countLeadingOnes())
338         return LHS;
339     }
340 
341     // If this is a select as part of any other min/max pattern, don't simplify
342     // any further in case we break the structure.
343     if (SPF != SPF_UNKNOWN)
344       return nullptr;
345 
346     if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnown, Depth + 1) ||
347         SimplifyDemandedBits(I, 1, DemandedMask, LHSKnown, Depth + 1))
348       return I;
349     assert(!RHSKnown.hasConflict() && "Bits known to be one AND zero?");
350     assert(!LHSKnown.hasConflict() && "Bits known to be one AND zero?");
351 
352     // If the operands are constants, see if we can simplify them.
353     // This is similar to ShrinkDemandedConstant, but for a select we want to
354     // try to keep the selected constants the same as icmp value constants, if
355     // we can. This helps not break apart (or helps put back together)
356     // canonical patterns like min and max.
357     auto CanonicalizeSelectConstant = [](Instruction *I, unsigned OpNo,
358                                          APInt DemandedMask) {
359       const APInt *SelC;
360       if (!match(I->getOperand(OpNo), m_APInt(SelC)))
361         return false;
362 
363       // Get the constant out of the ICmp, if there is one.
364       const APInt *CmpC;
365       ICmpInst::Predicate Pred;
366       if (!match(I->getOperand(0), m_c_ICmp(Pred, m_APInt(CmpC), m_Value())) ||
367           CmpC->getBitWidth() != SelC->getBitWidth())
368         return ShrinkDemandedConstant(I, OpNo, DemandedMask);
369 
370       // If the constant is already the same as the ICmp, leave it as-is.
371       if (*CmpC == *SelC)
372         return false;
373       // If the constants are not already the same, but can be with the demand
374       // mask, use the constant value from the ICmp.
375       if ((*CmpC & DemandedMask) == (*SelC & DemandedMask)) {
376         I->setOperand(OpNo, ConstantInt::get(I->getType(), *CmpC));
377         return true;
378       }
379       return ShrinkDemandedConstant(I, OpNo, DemandedMask);
380     };
381     if (CanonicalizeSelectConstant(I, 1, DemandedMask) ||
382         CanonicalizeSelectConstant(I, 2, DemandedMask))
383       return I;
384 
385     // Only known if known in both the LHS and RHS.
386     Known.One = RHSKnown.One & LHSKnown.One;
387     Known.Zero = RHSKnown.Zero & LHSKnown.Zero;
388     break;
389   }
390   case Instruction::ZExt:
391   case Instruction::Trunc: {
392     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
393 
394     APInt InputDemandedMask = DemandedMask.zextOrTrunc(SrcBitWidth);
395     KnownBits InputKnown(SrcBitWidth);
396     if (SimplifyDemandedBits(I, 0, InputDemandedMask, InputKnown, Depth + 1))
397       return I;
398     assert(InputKnown.getBitWidth() == SrcBitWidth && "Src width changed?");
399     Known = InputKnown.zextOrTrunc(BitWidth);
400     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
401     break;
402   }
403   case Instruction::BitCast:
404     if (!I->getOperand(0)->getType()->isIntOrIntVectorTy())
405       return nullptr;  // vector->int or fp->int?
406 
407     if (VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) {
408       if (VectorType *SrcVTy =
409             dyn_cast<VectorType>(I->getOperand(0)->getType())) {
410         if (DstVTy->getNumElements() != SrcVTy->getNumElements())
411           // Don't touch a bitcast between vectors of different element counts.
412           return nullptr;
413       } else
414         // Don't touch a scalar-to-vector bitcast.
415         return nullptr;
416     } else if (I->getOperand(0)->getType()->isVectorTy())
417       // Don't touch a vector-to-scalar bitcast.
418       return nullptr;
419 
420     if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1))
421       return I;
422     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
423     break;
424   case Instruction::SExt: {
425     // Compute the bits in the result that are not present in the input.
426     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
427 
428     APInt InputDemandedBits = DemandedMask.trunc(SrcBitWidth);
429 
430     // If any of the sign extended bits are demanded, we know that the sign
431     // bit is demanded.
432     if (DemandedMask.getActiveBits() > SrcBitWidth)
433       InputDemandedBits.setBit(SrcBitWidth-1);
434 
435     KnownBits InputKnown(SrcBitWidth);
436     if (SimplifyDemandedBits(I, 0, InputDemandedBits, InputKnown, Depth + 1))
437       return I;
438 
439     // If the input sign bit is known zero, or if the NewBits are not demanded
440     // convert this into a zero extension.
441     if (InputKnown.isNonNegative() ||
442         DemandedMask.getActiveBits() <= SrcBitWidth) {
443       // Convert to ZExt cast.
444       CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName());
445       return InsertNewInstWith(NewCast, *I);
446      }
447 
448     // If the sign bit of the input is known set or clear, then we know the
449     // top bits of the result.
450     Known = InputKnown.sext(BitWidth);
451     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
452     break;
453   }
454   case Instruction::Add:
455   case Instruction::Sub: {
456     /// If the high-bits of an ADD/SUB are not demanded, then we do not care
457     /// about the high bits of the operands.
458     unsigned NLZ = DemandedMask.countLeadingZeros();
459     // Right fill the mask of bits for this ADD/SUB to demand the most
460     // significant bit and all those below it.
461     APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ));
462     if (ShrinkDemandedConstant(I, 0, DemandedFromOps) ||
463         SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnown, Depth + 1) ||
464         ShrinkDemandedConstant(I, 1, DemandedFromOps) ||
465         SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Depth + 1)) {
466       if (NLZ > 0) {
467         // Disable the nsw and nuw flags here: We can no longer guarantee that
468         // we won't wrap after simplification. Removing the nsw/nuw flags is
469         // legal here because the top bit is not demanded.
470         BinaryOperator &BinOP = *cast<BinaryOperator>(I);
471         BinOP.setHasNoSignedWrap(false);
472         BinOP.setHasNoUnsignedWrap(false);
473       }
474       return I;
475     }
476 
477     // If we are known to be adding/subtracting zeros to every bit below
478     // the highest demanded bit, we just return the other side.
479     if (DemandedFromOps.isSubsetOf(RHSKnown.Zero))
480       return I->getOperand(0);
481     // We can't do this with the LHS for subtraction, unless we are only
482     // demanding the LSB.
483     if ((I->getOpcode() == Instruction::Add ||
484          DemandedFromOps.isOneValue()) &&
485         DemandedFromOps.isSubsetOf(LHSKnown.Zero))
486       return I->getOperand(1);
487 
488     // Otherwise just compute the known bits of the result.
489     bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
490     Known = KnownBits::computeForAddSub(I->getOpcode() == Instruction::Add,
491                                         NSW, LHSKnown, RHSKnown);
492     break;
493   }
494   case Instruction::Shl: {
495     const APInt *SA;
496     if (match(I->getOperand(1), m_APInt(SA))) {
497       const APInt *ShrAmt;
498       if (match(I->getOperand(0), m_Shr(m_Value(), m_APInt(ShrAmt))))
499         if (Instruction *Shr = dyn_cast<Instruction>(I->getOperand(0)))
500           if (Value *R = simplifyShrShlDemandedBits(Shr, *ShrAmt, I, *SA,
501                                                     DemandedMask, Known))
502             return R;
503 
504       uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
505       APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt));
506 
507       // If the shift is NUW/NSW, then it does demand the high bits.
508       ShlOperator *IOp = cast<ShlOperator>(I);
509       if (IOp->hasNoSignedWrap())
510         DemandedMaskIn.setHighBits(ShiftAmt+1);
511       else if (IOp->hasNoUnsignedWrap())
512         DemandedMaskIn.setHighBits(ShiftAmt);
513 
514       if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
515         return I;
516       assert(!Known.hasConflict() && "Bits known to be one AND zero?");
517       Known.Zero <<= ShiftAmt;
518       Known.One  <<= ShiftAmt;
519       // low bits known zero.
520       if (ShiftAmt)
521         Known.Zero.setLowBits(ShiftAmt);
522     }
523     break;
524   }
525   case Instruction::LShr: {
526     const APInt *SA;
527     if (match(I->getOperand(1), m_APInt(SA))) {
528       uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
529 
530       // Unsigned shift right.
531       APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
532 
533       // If the shift is exact, then it does demand the low bits (and knows that
534       // they are zero).
535       if (cast<LShrOperator>(I)->isExact())
536         DemandedMaskIn.setLowBits(ShiftAmt);
537 
538       if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
539         return I;
540       assert(!Known.hasConflict() && "Bits known to be one AND zero?");
541       Known.Zero.lshrInPlace(ShiftAmt);
542       Known.One.lshrInPlace(ShiftAmt);
543       if (ShiftAmt)
544         Known.Zero.setHighBits(ShiftAmt);  // high bits known zero.
545     }
546     break;
547   }
548   case Instruction::AShr: {
549     // If this is an arithmetic shift right and only the low-bit is set, we can
550     // always convert this into a logical shr, even if the shift amount is
551     // variable.  The low bit of the shift cannot be an input sign bit unless
552     // the shift amount is >= the size of the datatype, which is undefined.
553     if (DemandedMask.isOneValue()) {
554       // Perform the logical shift right.
555       Instruction *NewVal = BinaryOperator::CreateLShr(
556                         I->getOperand(0), I->getOperand(1), I->getName());
557       return InsertNewInstWith(NewVal, *I);
558     }
559 
560     // If the sign bit is the only bit demanded by this ashr, then there is no
561     // need to do it, the shift doesn't change the high bit.
562     if (DemandedMask.isSignMask())
563       return I->getOperand(0);
564 
565     const APInt *SA;
566     if (match(I->getOperand(1), m_APInt(SA))) {
567       uint32_t ShiftAmt = SA->getLimitedValue(BitWidth-1);
568 
569       // Signed shift right.
570       APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt));
571       // If any of the high bits are demanded, we should set the sign bit as
572       // demanded.
573       if (DemandedMask.countLeadingZeros() <= ShiftAmt)
574         DemandedMaskIn.setSignBit();
575 
576       // If the shift is exact, then it does demand the low bits (and knows that
577       // they are zero).
578       if (cast<AShrOperator>(I)->isExact())
579         DemandedMaskIn.setLowBits(ShiftAmt);
580 
581       if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1))
582         return I;
583 
584       unsigned SignBits = ComputeNumSignBits(I->getOperand(0), Depth + 1, CxtI);
585 
586       assert(!Known.hasConflict() && "Bits known to be one AND zero?");
587       // Compute the new bits that are at the top now plus sign bits.
588       APInt HighBits(APInt::getHighBitsSet(
589           BitWidth, std::min(SignBits + ShiftAmt - 1, BitWidth)));
590       Known.Zero.lshrInPlace(ShiftAmt);
591       Known.One.lshrInPlace(ShiftAmt);
592 
593       // If the input sign bit is known to be zero, or if none of the top bits
594       // are demanded, turn this into an unsigned shift right.
595       assert(BitWidth > ShiftAmt && "Shift amount not saturated?");
596       if (Known.Zero[BitWidth-ShiftAmt-1] ||
597           !DemandedMask.intersects(HighBits)) {
598         BinaryOperator *LShr = BinaryOperator::CreateLShr(I->getOperand(0),
599                                                           I->getOperand(1));
600         LShr->setIsExact(cast<BinaryOperator>(I)->isExact());
601         return InsertNewInstWith(LShr, *I);
602       } else if (Known.One[BitWidth-ShiftAmt-1]) { // New bits are known one.
603         Known.One |= HighBits;
604       }
605     }
606     break;
607   }
608   case Instruction::UDiv: {
609     // UDiv doesn't demand low bits that are zero in the divisor.
610     const APInt *SA;
611     if (match(I->getOperand(1), m_APInt(SA))) {
612       // If the shift is exact, then it does demand the low bits.
613       if (cast<UDivOperator>(I)->isExact())
614         break;
615 
616       // FIXME: Take the demanded mask of the result into account.
617       unsigned RHSTrailingZeros = SA->countTrailingZeros();
618       APInt DemandedMaskIn =
619           APInt::getHighBitsSet(BitWidth, BitWidth - RHSTrailingZeros);
620       if (SimplifyDemandedBits(I, 0, DemandedMaskIn, LHSKnown, Depth + 1))
621         return I;
622 
623       // Propagate zero bits from the input.
624       Known.Zero.setHighBits(std::min(
625           BitWidth, LHSKnown.Zero.countLeadingOnes() + RHSTrailingZeros));
626     }
627     break;
628   }
629   case Instruction::SRem:
630     if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) {
631       // X % -1 demands all the bits because we don't want to introduce
632       // INT_MIN % -1 (== undef) by accident.
633       if (Rem->isMinusOne())
634         break;
635       APInt RA = Rem->getValue().abs();
636       if (RA.isPowerOf2()) {
637         if (DemandedMask.ult(RA))    // srem won't affect demanded bits
638           return I->getOperand(0);
639 
640         APInt LowBits = RA - 1;
641         APInt Mask2 = LowBits | APInt::getSignMask(BitWidth);
642         if (SimplifyDemandedBits(I, 0, Mask2, LHSKnown, Depth + 1))
643           return I;
644 
645         // The low bits of LHS are unchanged by the srem.
646         Known.Zero = LHSKnown.Zero & LowBits;
647         Known.One = LHSKnown.One & LowBits;
648 
649         // If LHS is non-negative or has all low bits zero, then the upper bits
650         // are all zero.
651         if (LHSKnown.isNonNegative() || LowBits.isSubsetOf(LHSKnown.Zero))
652           Known.Zero |= ~LowBits;
653 
654         // If LHS is negative and not all low bits are zero, then the upper bits
655         // are all one.
656         if (LHSKnown.isNegative() && LowBits.intersects(LHSKnown.One))
657           Known.One |= ~LowBits;
658 
659         assert(!Known.hasConflict() && "Bits known to be one AND zero?");
660         break;
661       }
662     }
663 
664     // The sign bit is the LHS's sign bit, except when the result of the
665     // remainder is zero.
666     if (DemandedMask.isSignBitSet()) {
667       computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, CxtI);
668       // If it's known zero, our sign bit is also zero.
669       if (LHSKnown.isNonNegative())
670         Known.makeNonNegative();
671     }
672     break;
673   case Instruction::URem: {
674     KnownBits Known2(BitWidth);
675     APInt AllOnes = APInt::getAllOnesValue(BitWidth);
676     if (SimplifyDemandedBits(I, 0, AllOnes, Known2, Depth + 1) ||
677         SimplifyDemandedBits(I, 1, AllOnes, Known2, Depth + 1))
678       return I;
679 
680     unsigned Leaders = Known2.countMinLeadingZeros();
681     Known.Zero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask;
682     break;
683   }
684   case Instruction::Call:
685     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
686       switch (II->getIntrinsicID()) {
687       default: break;
688       case Intrinsic::bswap: {
689         // If the only bits demanded come from one byte of the bswap result,
690         // just shift the input byte into position to eliminate the bswap.
691         unsigned NLZ = DemandedMask.countLeadingZeros();
692         unsigned NTZ = DemandedMask.countTrailingZeros();
693 
694         // Round NTZ down to the next byte.  If we have 11 trailing zeros, then
695         // we need all the bits down to bit 8.  Likewise, round NLZ.  If we
696         // have 14 leading zeros, round to 8.
697         NLZ &= ~7;
698         NTZ &= ~7;
699         // If we need exactly one byte, we can do this transformation.
700         if (BitWidth-NLZ-NTZ == 8) {
701           unsigned ResultBit = NTZ;
702           unsigned InputBit = BitWidth-NTZ-8;
703 
704           // Replace this with either a left or right shift to get the byte into
705           // the right place.
706           Instruction *NewVal;
707           if (InputBit > ResultBit)
708             NewVal = BinaryOperator::CreateLShr(II->getArgOperand(0),
709                     ConstantInt::get(I->getType(), InputBit-ResultBit));
710           else
711             NewVal = BinaryOperator::CreateShl(II->getArgOperand(0),
712                     ConstantInt::get(I->getType(), ResultBit-InputBit));
713           NewVal->takeName(I);
714           return InsertNewInstWith(NewVal, *I);
715         }
716 
717         // TODO: Could compute known zero/one bits based on the input.
718         break;
719       }
720       case Intrinsic::fshr:
721       case Intrinsic::fshl: {
722         const APInt *SA;
723         if (!match(I->getOperand(2), m_APInt(SA)))
724           break;
725 
726         // Normalize to funnel shift left. APInt shifts of BitWidth are well-
727         // defined, so no need to special-case zero shifts here.
728         uint64_t ShiftAmt = SA->urem(BitWidth);
729         if (II->getIntrinsicID() == Intrinsic::fshr)
730           ShiftAmt = BitWidth - ShiftAmt;
731 
732         APInt DemandedMaskLHS(DemandedMask.lshr(ShiftAmt));
733         APInt DemandedMaskRHS(DemandedMask.shl(BitWidth - ShiftAmt));
734         if (SimplifyDemandedBits(I, 0, DemandedMaskLHS, LHSKnown, Depth + 1) ||
735             SimplifyDemandedBits(I, 1, DemandedMaskRHS, RHSKnown, Depth + 1))
736           return I;
737 
738         Known.Zero = LHSKnown.Zero.shl(ShiftAmt) |
739                      RHSKnown.Zero.lshr(BitWidth - ShiftAmt);
740         Known.One = LHSKnown.One.shl(ShiftAmt) |
741                     RHSKnown.One.lshr(BitWidth - ShiftAmt);
742         break;
743       }
744       case Intrinsic::x86_mmx_pmovmskb:
745       case Intrinsic::x86_sse_movmsk_ps:
746       case Intrinsic::x86_sse2_movmsk_pd:
747       case Intrinsic::x86_sse2_pmovmskb_128:
748       case Intrinsic::x86_avx_movmsk_ps_256:
749       case Intrinsic::x86_avx_movmsk_pd_256:
750       case Intrinsic::x86_avx2_pmovmskb: {
751         // MOVMSK copies the vector elements' sign bits to the low bits
752         // and zeros the high bits.
753         unsigned ArgWidth;
754         if (II->getIntrinsicID() == Intrinsic::x86_mmx_pmovmskb) {
755           ArgWidth = 8; // Arg is x86_mmx, but treated as <8 x i8>.
756         } else {
757           auto Arg = II->getArgOperand(0);
758           auto ArgType = cast<VectorType>(Arg->getType());
759           ArgWidth = ArgType->getNumElements();
760         }
761 
762         // If we don't need any of low bits then return zero,
763         // we know that DemandedMask is non-zero already.
764         APInt DemandedElts = DemandedMask.zextOrTrunc(ArgWidth);
765         if (DemandedElts.isNullValue())
766           return ConstantInt::getNullValue(VTy);
767 
768         // We know that the upper bits are set to zero.
769         Known.Zero.setBitsFrom(ArgWidth);
770         return nullptr;
771       }
772       case Intrinsic::x86_sse42_crc32_64_64:
773         Known.Zero.setBitsFrom(32);
774         return nullptr;
775       }
776     }
777     computeKnownBits(V, Known, Depth, CxtI);
778     break;
779   }
780 
781   // If the client is only demanding bits that we know, return the known
782   // constant.
783   if (DemandedMask.isSubsetOf(Known.Zero|Known.One))
784     return Constant::getIntegerValue(VTy, Known.One);
785   return nullptr;
786 }
787 
788 /// Helper routine of SimplifyDemandedUseBits. It computes Known
789 /// bits. It also tries to handle simplifications that can be done based on
790 /// DemandedMask, but without modifying the Instruction.
791 Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I,
792                                                      const APInt &DemandedMask,
793                                                      KnownBits &Known,
794                                                      unsigned Depth,
795                                                      Instruction *CxtI) {
796   unsigned BitWidth = DemandedMask.getBitWidth();
797   Type *ITy = I->getType();
798 
799   KnownBits LHSKnown(BitWidth);
800   KnownBits RHSKnown(BitWidth);
801 
802   // Despite the fact that we can't simplify this instruction in all User's
803   // context, we can at least compute the known bits, and we can
804   // do simplifications that apply to *just* the one user if we know that
805   // this instruction has a simpler value in that context.
806   switch (I->getOpcode()) {
807   case Instruction::And: {
808     // If either the LHS or the RHS are Zero, the result is zero.
809     computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
810     computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
811                      CxtI);
812 
813     // Output known-0 are known to be clear if zero in either the LHS | RHS.
814     APInt IKnownZero = RHSKnown.Zero | LHSKnown.Zero;
815     // Output known-1 bits are only known if set in both the LHS & RHS.
816     APInt IKnownOne = RHSKnown.One & LHSKnown.One;
817 
818     // If the client is only demanding bits that we know, return the known
819     // constant.
820     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
821       return Constant::getIntegerValue(ITy, IKnownOne);
822 
823     // If all of the demanded bits are known 1 on one side, return the other.
824     // These bits cannot contribute to the result of the 'and' in this
825     // context.
826     if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One))
827       return I->getOperand(0);
828     if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One))
829       return I->getOperand(1);
830 
831     Known.Zero = std::move(IKnownZero);
832     Known.One  = std::move(IKnownOne);
833     break;
834   }
835   case Instruction::Or: {
836     // We can simplify (X|Y) -> X or Y in the user's context if we know that
837     // only bits from X or Y are demanded.
838 
839     // If either the LHS or the RHS are One, the result is One.
840     computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
841     computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
842                      CxtI);
843 
844     // Output known-0 bits are only known if clear in both the LHS & RHS.
845     APInt IKnownZero = RHSKnown.Zero & LHSKnown.Zero;
846     // Output known-1 are known to be set if set in either the LHS | RHS.
847     APInt IKnownOne = RHSKnown.One | LHSKnown.One;
848 
849     // If the client is only demanding bits that we know, return the known
850     // constant.
851     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
852       return Constant::getIntegerValue(ITy, IKnownOne);
853 
854     // If all of the demanded bits are known zero on one side, return the
855     // other.  These bits cannot contribute to the result of the 'or' in this
856     // context.
857     if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero))
858       return I->getOperand(0);
859     if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero))
860       return I->getOperand(1);
861 
862     Known.Zero = std::move(IKnownZero);
863     Known.One  = std::move(IKnownOne);
864     break;
865   }
866   case Instruction::Xor: {
867     // We can simplify (X^Y) -> X or Y in the user's context if we know that
868     // only bits from X or Y are demanded.
869 
870     computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI);
871     computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1,
872                      CxtI);
873 
874     // Output known-0 bits are known if clear or set in both the LHS & RHS.
875     APInt IKnownZero = (RHSKnown.Zero & LHSKnown.Zero) |
876                        (RHSKnown.One & LHSKnown.One);
877     // Output known-1 are known to be set if set in only one of the LHS, RHS.
878     APInt IKnownOne =  (RHSKnown.Zero & LHSKnown.One) |
879                        (RHSKnown.One & LHSKnown.Zero);
880 
881     // If the client is only demanding bits that we know, return the known
882     // constant.
883     if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne))
884       return Constant::getIntegerValue(ITy, IKnownOne);
885 
886     // If all of the demanded bits are known zero on one side, return the
887     // other.
888     if (DemandedMask.isSubsetOf(RHSKnown.Zero))
889       return I->getOperand(0);
890     if (DemandedMask.isSubsetOf(LHSKnown.Zero))
891       return I->getOperand(1);
892 
893     // Output known-0 bits are known if clear or set in both the LHS & RHS.
894     Known.Zero = std::move(IKnownZero);
895     // Output known-1 are known to be set if set in only one of the LHS, RHS.
896     Known.One  = std::move(IKnownOne);
897     break;
898   }
899   default:
900     // Compute the Known bits to simplify things downstream.
901     computeKnownBits(I, Known, Depth, CxtI);
902 
903     // If this user is only demanding bits that we know, return the known
904     // constant.
905     if (DemandedMask.isSubsetOf(Known.Zero|Known.One))
906       return Constant::getIntegerValue(ITy, Known.One);
907 
908     break;
909   }
910 
911   return nullptr;
912 }
913 
914 
915 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify
916 /// "E1 = (X lsr C1) << C2", where the C1 and C2 are constant, into
917 /// "E2 = X << (C2 - C1)" or "E2 = X >> (C1 - C2)", depending on the sign
918 /// of "C2-C1".
919 ///
920 /// Suppose E1 and E2 are generally different in bits S={bm, bm+1,
921 /// ..., bn}, without considering the specific value X is holding.
922 /// This transformation is legal iff one of following conditions is hold:
923 ///  1) All the bit in S are 0, in this case E1 == E2.
924 ///  2) We don't care those bits in S, per the input DemandedMask.
925 ///  3) Combination of 1) and 2). Some bits in S are 0, and we don't care the
926 ///     rest bits.
927 ///
928 /// Currently we only test condition 2).
929 ///
930 /// As with SimplifyDemandedUseBits, it returns NULL if the simplification was
931 /// not successful.
932 Value *
933 InstCombiner::simplifyShrShlDemandedBits(Instruction *Shr, const APInt &ShrOp1,
934                                          Instruction *Shl, const APInt &ShlOp1,
935                                          const APInt &DemandedMask,
936                                          KnownBits &Known) {
937   if (!ShlOp1 || !ShrOp1)
938     return nullptr; // No-op.
939 
940   Value *VarX = Shr->getOperand(0);
941   Type *Ty = VarX->getType();
942   unsigned BitWidth = Ty->getScalarSizeInBits();
943   if (ShlOp1.uge(BitWidth) || ShrOp1.uge(BitWidth))
944     return nullptr; // Undef.
945 
946   unsigned ShlAmt = ShlOp1.getZExtValue();
947   unsigned ShrAmt = ShrOp1.getZExtValue();
948 
949   Known.One.clearAllBits();
950   Known.Zero.setLowBits(ShlAmt - 1);
951   Known.Zero &= DemandedMask;
952 
953   APInt BitMask1(APInt::getAllOnesValue(BitWidth));
954   APInt BitMask2(APInt::getAllOnesValue(BitWidth));
955 
956   bool isLshr = (Shr->getOpcode() == Instruction::LShr);
957   BitMask1 = isLshr ? (BitMask1.lshr(ShrAmt) << ShlAmt) :
958                       (BitMask1.ashr(ShrAmt) << ShlAmt);
959 
960   if (ShrAmt <= ShlAmt) {
961     BitMask2 <<= (ShlAmt - ShrAmt);
962   } else {
963     BitMask2 = isLshr ? BitMask2.lshr(ShrAmt - ShlAmt):
964                         BitMask2.ashr(ShrAmt - ShlAmt);
965   }
966 
967   // Check if condition-2 (see the comment to this function) is satified.
968   if ((BitMask1 & DemandedMask) == (BitMask2 & DemandedMask)) {
969     if (ShrAmt == ShlAmt)
970       return VarX;
971 
972     if (!Shr->hasOneUse())
973       return nullptr;
974 
975     BinaryOperator *New;
976     if (ShrAmt < ShlAmt) {
977       Constant *Amt = ConstantInt::get(VarX->getType(), ShlAmt - ShrAmt);
978       New = BinaryOperator::CreateShl(VarX, Amt);
979       BinaryOperator *Orig = cast<BinaryOperator>(Shl);
980       New->setHasNoSignedWrap(Orig->hasNoSignedWrap());
981       New->setHasNoUnsignedWrap(Orig->hasNoUnsignedWrap());
982     } else {
983       Constant *Amt = ConstantInt::get(VarX->getType(), ShrAmt - ShlAmt);
984       New = isLshr ? BinaryOperator::CreateLShr(VarX, Amt) :
985                      BinaryOperator::CreateAShr(VarX, Amt);
986       if (cast<BinaryOperator>(Shr)->isExact())
987         New->setIsExact(true);
988     }
989 
990     return InsertNewInstWith(New, *Shl);
991   }
992 
993   return nullptr;
994 }
995 
996 /// Implement SimplifyDemandedVectorElts for amdgcn buffer and image intrinsics.
997 ///
998 /// Note: This only supports non-TFE/LWE image intrinsic calls; those have
999 ///       struct returns.
1000 Value *InstCombiner::simplifyAMDGCNMemoryIntrinsicDemanded(IntrinsicInst *II,
1001                                                            APInt DemandedElts,
1002                                                            int DMaskIdx) {
1003 
1004   // FIXME: Allow v3i16/v3f16 in buffer intrinsics when the types are fully supported.
1005   if (DMaskIdx < 0 &&
1006       II->getType()->getScalarSizeInBits() != 32 &&
1007       DemandedElts.getActiveBits() == 3)
1008     return nullptr;
1009 
1010   unsigned VWidth = II->getType()->getVectorNumElements();
1011   if (VWidth == 1)
1012     return nullptr;
1013 
1014   IRBuilderBase::InsertPointGuard Guard(Builder);
1015   Builder.SetInsertPoint(II);
1016 
1017   // Assume the arguments are unchanged and later override them, if needed.
1018   SmallVector<Value *, 16> Args(II->arg_begin(), II->arg_end());
1019 
1020   if (DMaskIdx < 0) {
1021     // Buffer case.
1022 
1023     const unsigned ActiveBits = DemandedElts.getActiveBits();
1024     const unsigned UnusedComponentsAtFront = DemandedElts.countTrailingZeros();
1025 
1026     // Start assuming the prefix of elements is demanded, but possibly clear
1027     // some other bits if there are trailing zeros (unused components at front)
1028     // and update offset.
1029     DemandedElts = (1 << ActiveBits) - 1;
1030 
1031     if (UnusedComponentsAtFront > 0) {
1032       static const unsigned InvalidOffsetIdx = 0xf;
1033 
1034       unsigned OffsetIdx;
1035       switch (II->getIntrinsicID()) {
1036       case Intrinsic::amdgcn_raw_buffer_load:
1037         OffsetIdx = 1;
1038         break;
1039       case Intrinsic::amdgcn_s_buffer_load:
1040         // If resulting type is vec3, there is no point in trimming the
1041         // load with updated offset, as the vec3 would most likely be widened to
1042         // vec4 anyway during lowering.
1043         if (ActiveBits == 4 && UnusedComponentsAtFront == 1)
1044           OffsetIdx = InvalidOffsetIdx;
1045         else
1046           OffsetIdx = 1;
1047         break;
1048       case Intrinsic::amdgcn_struct_buffer_load:
1049         OffsetIdx = 2;
1050         break;
1051       default:
1052         // TODO: handle tbuffer* intrinsics.
1053         OffsetIdx = InvalidOffsetIdx;
1054         break;
1055       }
1056 
1057       if (OffsetIdx != InvalidOffsetIdx) {
1058         // Clear demanded bits and update the offset.
1059         DemandedElts &= ~((1 << UnusedComponentsAtFront) - 1);
1060         auto *Offset = II->getArgOperand(OffsetIdx);
1061         unsigned SingleComponentSizeInBits =
1062             getDataLayout().getTypeSizeInBits(II->getType()->getScalarType());
1063         unsigned OffsetAdd =
1064             UnusedComponentsAtFront * SingleComponentSizeInBits / 8;
1065         auto *OffsetAddVal = ConstantInt::get(Offset->getType(), OffsetAdd);
1066         Args[OffsetIdx] = Builder.CreateAdd(Offset, OffsetAddVal);
1067       }
1068     }
1069   } else {
1070     // Image case.
1071 
1072     ConstantInt *DMask = cast<ConstantInt>(II->getArgOperand(DMaskIdx));
1073     unsigned DMaskVal = DMask->getZExtValue() & 0xf;
1074 
1075     // Mask off values that are undefined because the dmask doesn't cover them
1076     DemandedElts &= (1 << countPopulation(DMaskVal)) - 1;
1077 
1078     unsigned NewDMaskVal = 0;
1079     unsigned OrigLoadIdx = 0;
1080     for (unsigned SrcIdx = 0; SrcIdx < 4; ++SrcIdx) {
1081       const unsigned Bit = 1 << SrcIdx;
1082       if (!!(DMaskVal & Bit)) {
1083         if (!!DemandedElts[OrigLoadIdx])
1084           NewDMaskVal |= Bit;
1085         OrigLoadIdx++;
1086       }
1087     }
1088 
1089     if (DMaskVal != NewDMaskVal)
1090       Args[DMaskIdx] = ConstantInt::get(DMask->getType(), NewDMaskVal);
1091   }
1092 
1093   unsigned NewNumElts = DemandedElts.countPopulation();
1094   if (!NewNumElts)
1095     return UndefValue::get(II->getType());
1096 
1097   if (NewNumElts >= VWidth && DemandedElts.isMask()) {
1098     if (DMaskIdx >= 0)
1099       II->setArgOperand(DMaskIdx, Args[DMaskIdx]);
1100     return nullptr;
1101   }
1102 
1103   // Determine the overload types of the original intrinsic.
1104   auto IID = II->getIntrinsicID();
1105   SmallVector<Intrinsic::IITDescriptor, 16> Table;
1106   getIntrinsicInfoTableEntries(IID, Table);
1107   ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
1108 
1109   // Validate function argument and return types, extracting overloaded types
1110   // along the way.
1111   FunctionType *FTy = II->getCalledFunction()->getFunctionType();
1112   SmallVector<Type *, 6> OverloadTys;
1113   Intrinsic::matchIntrinsicSignature(FTy, TableRef, OverloadTys);
1114 
1115   Module *M = II->getParent()->getParent()->getParent();
1116   Type *EltTy = II->getType()->getVectorElementType();
1117   Type *NewTy = (NewNumElts == 1) ? EltTy : VectorType::get(EltTy, NewNumElts);
1118 
1119   OverloadTys[0] = NewTy;
1120   Function *NewIntrin = Intrinsic::getDeclaration(M, IID, OverloadTys);
1121 
1122   CallInst *NewCall = Builder.CreateCall(NewIntrin, Args);
1123   NewCall->takeName(II);
1124   NewCall->copyMetadata(*II);
1125 
1126   if (NewNumElts == 1) {
1127     return Builder.CreateInsertElement(UndefValue::get(II->getType()), NewCall,
1128                                        DemandedElts.countTrailingZeros());
1129   }
1130 
1131   SmallVector<uint32_t, 8> EltMask;
1132   unsigned NewLoadIdx = 0;
1133   for (unsigned OrigLoadIdx = 0; OrigLoadIdx < VWidth; ++OrigLoadIdx) {
1134     if (!!DemandedElts[OrigLoadIdx])
1135       EltMask.push_back(NewLoadIdx++);
1136     else
1137       EltMask.push_back(NewNumElts);
1138   }
1139 
1140   Value *Shuffle =
1141       Builder.CreateShuffleVector(NewCall, UndefValue::get(NewTy), EltMask);
1142 
1143   return Shuffle;
1144 }
1145 
1146 /// The specified value produces a vector with any number of elements.
1147 /// This method analyzes which elements of the operand are undef and returns
1148 /// that information in UndefElts.
1149 ///
1150 /// DemandedElts contains the set of elements that are actually used by the
1151 /// caller, and by default (AllowMultipleUsers equals false) the value is
1152 /// simplified only if it has a single caller. If AllowMultipleUsers is set
1153 /// to true, DemandedElts refers to the union of sets of elements that are
1154 /// used by all callers.
1155 ///
1156 /// If the information about demanded elements can be used to simplify the
1157 /// operation, the operation is simplified, then the resultant value is
1158 /// returned.  This returns null if no change was made.
1159 Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
1160                                                 APInt &UndefElts,
1161                                                 unsigned Depth,
1162                                                 bool AllowMultipleUsers) {
1163   unsigned VWidth = V->getType()->getVectorNumElements();
1164   APInt EltMask(APInt::getAllOnesValue(VWidth));
1165   assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!");
1166 
1167   if (isa<UndefValue>(V)) {
1168     // If the entire vector is undefined, just return this info.
1169     UndefElts = EltMask;
1170     return nullptr;
1171   }
1172 
1173   if (DemandedElts.isNullValue()) { // If nothing is demanded, provide undef.
1174     UndefElts = EltMask;
1175     return UndefValue::get(V->getType());
1176   }
1177 
1178   UndefElts = 0;
1179 
1180   if (auto *C = dyn_cast<Constant>(V)) {
1181     // Check if this is identity. If so, return 0 since we are not simplifying
1182     // anything.
1183     if (DemandedElts.isAllOnesValue())
1184       return nullptr;
1185 
1186     Type *EltTy = cast<VectorType>(V->getType())->getElementType();
1187     Constant *Undef = UndefValue::get(EltTy);
1188     SmallVector<Constant*, 16> Elts;
1189     for (unsigned i = 0; i != VWidth; ++i) {
1190       if (!DemandedElts[i]) {   // If not demanded, set to undef.
1191         Elts.push_back(Undef);
1192         UndefElts.setBit(i);
1193         continue;
1194       }
1195 
1196       Constant *Elt = C->getAggregateElement(i);
1197       if (!Elt) return nullptr;
1198 
1199       if (isa<UndefValue>(Elt)) {   // Already undef.
1200         Elts.push_back(Undef);
1201         UndefElts.setBit(i);
1202       } else {                               // Otherwise, defined.
1203         Elts.push_back(Elt);
1204       }
1205     }
1206 
1207     // If we changed the constant, return it.
1208     Constant *NewCV = ConstantVector::get(Elts);
1209     return NewCV != C ? NewCV : nullptr;
1210   }
1211 
1212   // Limit search depth.
1213   if (Depth == 10)
1214     return nullptr;
1215 
1216   if (!AllowMultipleUsers) {
1217     // If multiple users are using the root value, proceed with
1218     // simplification conservatively assuming that all elements
1219     // are needed.
1220     if (!V->hasOneUse()) {
1221       // Quit if we find multiple users of a non-root value though.
1222       // They'll be handled when it's their turn to be visited by
1223       // the main instcombine process.
1224       if (Depth != 0)
1225         // TODO: Just compute the UndefElts information recursively.
1226         return nullptr;
1227 
1228       // Conservatively assume that all elements are needed.
1229       DemandedElts = EltMask;
1230     }
1231   }
1232 
1233   Instruction *I = dyn_cast<Instruction>(V);
1234   if (!I) return nullptr;        // Only analyze instructions.
1235 
1236   bool MadeChange = false;
1237   auto simplifyAndSetOp = [&](Instruction *Inst, unsigned OpNum,
1238                               APInt Demanded, APInt &Undef) {
1239     auto *II = dyn_cast<IntrinsicInst>(Inst);
1240     Value *Op = II ? II->getArgOperand(OpNum) : Inst->getOperand(OpNum);
1241     if (Value *V = SimplifyDemandedVectorElts(Op, Demanded, Undef, Depth + 1)) {
1242       if (II)
1243         II->setArgOperand(OpNum, V);
1244       else
1245         Inst->setOperand(OpNum, V);
1246       MadeChange = true;
1247     }
1248   };
1249 
1250   APInt UndefElts2(VWidth, 0);
1251   APInt UndefElts3(VWidth, 0);
1252   switch (I->getOpcode()) {
1253   default: break;
1254 
1255   case Instruction::GetElementPtr: {
1256     // The LangRef requires that struct geps have all constant indices.  As
1257     // such, we can't convert any operand to partial undef.
1258     auto mayIndexStructType = [](GetElementPtrInst &GEP) {
1259       for (auto I = gep_type_begin(GEP), E = gep_type_end(GEP);
1260            I != E; I++)
1261         if (I.isStruct())
1262           return true;;
1263       return false;
1264     };
1265     if (mayIndexStructType(cast<GetElementPtrInst>(*I)))
1266       break;
1267 
1268     // Conservatively track the demanded elements back through any vector
1269     // operands we may have.  We know there must be at least one, or we
1270     // wouldn't have a vector result to get here. Note that we intentionally
1271     // merge the undef bits here since gepping with either an undef base or
1272     // index results in undef.
1273     for (unsigned i = 0; i < I->getNumOperands(); i++) {
1274       if (isa<UndefValue>(I->getOperand(i))) {
1275         // If the entire vector is undefined, just return this info.
1276         UndefElts = EltMask;
1277         return nullptr;
1278       }
1279       if (I->getOperand(i)->getType()->isVectorTy()) {
1280         APInt UndefEltsOp(VWidth, 0);
1281         simplifyAndSetOp(I, i, DemandedElts, UndefEltsOp);
1282         UndefElts |= UndefEltsOp;
1283       }
1284     }
1285 
1286     break;
1287   }
1288   case Instruction::InsertElement: {
1289     // If this is a variable index, we don't know which element it overwrites.
1290     // demand exactly the same input as we produce.
1291     ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2));
1292     if (!Idx) {
1293       // Note that we can't propagate undef elt info, because we don't know
1294       // which elt is getting updated.
1295       simplifyAndSetOp(I, 0, DemandedElts, UndefElts2);
1296       break;
1297     }
1298 
1299     // The element inserted overwrites whatever was there, so the input demanded
1300     // set is simpler than the output set.
1301     unsigned IdxNo = Idx->getZExtValue();
1302     APInt PreInsertDemandedElts = DemandedElts;
1303     if (IdxNo < VWidth)
1304       PreInsertDemandedElts.clearBit(IdxNo);
1305 
1306     simplifyAndSetOp(I, 0, PreInsertDemandedElts, UndefElts);
1307 
1308     // If this is inserting an element that isn't demanded, remove this
1309     // insertelement.
1310     if (IdxNo >= VWidth || !DemandedElts[IdxNo]) {
1311       Worklist.push(I);
1312       return I->getOperand(0);
1313     }
1314 
1315     // The inserted element is defined.
1316     UndefElts.clearBit(IdxNo);
1317     break;
1318   }
1319   case Instruction::ShuffleVector: {
1320     auto *Shuffle = cast<ShuffleVectorInst>(I);
1321     assert(Shuffle->getOperand(0)->getType() ==
1322            Shuffle->getOperand(1)->getType() &&
1323            "Expected shuffle operands to have same type");
1324     unsigned OpWidth =
1325         Shuffle->getOperand(0)->getType()->getVectorNumElements();
1326     APInt LeftDemanded(OpWidth, 0), RightDemanded(OpWidth, 0);
1327     for (unsigned i = 0; i < VWidth; i++) {
1328       if (DemandedElts[i]) {
1329         unsigned MaskVal = Shuffle->getMaskValue(i);
1330         if (MaskVal != -1u) {
1331           assert(MaskVal < OpWidth * 2 &&
1332                  "shufflevector mask index out of range!");
1333           if (MaskVal < OpWidth)
1334             LeftDemanded.setBit(MaskVal);
1335           else
1336             RightDemanded.setBit(MaskVal - OpWidth);
1337         }
1338       }
1339     }
1340 
1341     APInt LHSUndefElts(OpWidth, 0);
1342     simplifyAndSetOp(I, 0, LeftDemanded, LHSUndefElts);
1343 
1344     APInt RHSUndefElts(OpWidth, 0);
1345     simplifyAndSetOp(I, 1, RightDemanded, RHSUndefElts);
1346 
1347     // If this shuffle does not change the vector length and the elements
1348     // demanded by this shuffle are an identity mask, then this shuffle is
1349     // unnecessary.
1350     //
1351     // We are assuming canonical form for the mask, so the source vector is
1352     // operand 0 and operand 1 is not used.
1353     //
1354     // Note that if an element is demanded and this shuffle mask is undefined
1355     // for that element, then the shuffle is not considered an identity
1356     // operation. The shuffle prevents poison from the operand vector from
1357     // leaking to the result by replacing poison with an undefined value.
1358     if (VWidth == OpWidth) {
1359       bool IsIdentityShuffle = true;
1360       for (unsigned i = 0; i < VWidth; i++) {
1361         unsigned MaskVal = Shuffle->getMaskValue(i);
1362         if (DemandedElts[i] && i != MaskVal) {
1363           IsIdentityShuffle = false;
1364           break;
1365         }
1366       }
1367       if (IsIdentityShuffle)
1368         return Shuffle->getOperand(0);
1369     }
1370 
1371     bool NewUndefElts = false;
1372     unsigned LHSIdx = -1u, LHSValIdx = -1u;
1373     unsigned RHSIdx = -1u, RHSValIdx = -1u;
1374     bool LHSUniform = true;
1375     bool RHSUniform = true;
1376     for (unsigned i = 0; i < VWidth; i++) {
1377       unsigned MaskVal = Shuffle->getMaskValue(i);
1378       if (MaskVal == -1u) {
1379         UndefElts.setBit(i);
1380       } else if (!DemandedElts[i]) {
1381         NewUndefElts = true;
1382         UndefElts.setBit(i);
1383       } else if (MaskVal < OpWidth) {
1384         if (LHSUndefElts[MaskVal]) {
1385           NewUndefElts = true;
1386           UndefElts.setBit(i);
1387         } else {
1388           LHSIdx = LHSIdx == -1u ? i : OpWidth;
1389           LHSValIdx = LHSValIdx == -1u ? MaskVal : OpWidth;
1390           LHSUniform = LHSUniform && (MaskVal == i);
1391         }
1392       } else {
1393         if (RHSUndefElts[MaskVal - OpWidth]) {
1394           NewUndefElts = true;
1395           UndefElts.setBit(i);
1396         } else {
1397           RHSIdx = RHSIdx == -1u ? i : OpWidth;
1398           RHSValIdx = RHSValIdx == -1u ? MaskVal - OpWidth : OpWidth;
1399           RHSUniform = RHSUniform && (MaskVal - OpWidth == i);
1400         }
1401       }
1402     }
1403 
1404     // Try to transform shuffle with constant vector and single element from
1405     // this constant vector to single insertelement instruction.
1406     // shufflevector V, C, <v1, v2, .., ci, .., vm> ->
1407     // insertelement V, C[ci], ci-n
1408     if (OpWidth == Shuffle->getType()->getNumElements()) {
1409       Value *Op = nullptr;
1410       Constant *Value = nullptr;
1411       unsigned Idx = -1u;
1412 
1413       // Find constant vector with the single element in shuffle (LHS or RHS).
1414       if (LHSIdx < OpWidth && RHSUniform) {
1415         if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(0))) {
1416           Op = Shuffle->getOperand(1);
1417           Value = CV->getOperand(LHSValIdx);
1418           Idx = LHSIdx;
1419         }
1420       }
1421       if (RHSIdx < OpWidth && LHSUniform) {
1422         if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(1))) {
1423           Op = Shuffle->getOperand(0);
1424           Value = CV->getOperand(RHSValIdx);
1425           Idx = RHSIdx;
1426         }
1427       }
1428       // Found constant vector with single element - convert to insertelement.
1429       if (Op && Value) {
1430         Instruction *New = InsertElementInst::Create(
1431             Op, Value, ConstantInt::get(Type::getInt32Ty(I->getContext()), Idx),
1432             Shuffle->getName());
1433         InsertNewInstWith(New, *Shuffle);
1434         return New;
1435       }
1436     }
1437     if (NewUndefElts) {
1438       // Add additional discovered undefs.
1439       SmallVector<Constant*, 16> Elts;
1440       for (unsigned i = 0; i < VWidth; ++i) {
1441         if (UndefElts[i])
1442           Elts.push_back(UndefValue::get(Type::getInt32Ty(I->getContext())));
1443         else
1444           Elts.push_back(ConstantInt::get(Type::getInt32Ty(I->getContext()),
1445                                           Shuffle->getMaskValue(i)));
1446       }
1447       I->setOperand(2, ConstantVector::get(Elts));
1448       MadeChange = true;
1449     }
1450     break;
1451   }
1452   case Instruction::Select: {
1453     // If this is a vector select, try to transform the select condition based
1454     // on the current demanded elements.
1455     SelectInst *Sel = cast<SelectInst>(I);
1456     if (Sel->getCondition()->getType()->isVectorTy()) {
1457       // TODO: We are not doing anything with UndefElts based on this call.
1458       // It is overwritten below based on the other select operands. If an
1459       // element of the select condition is known undef, then we are free to
1460       // choose the output value from either arm of the select. If we know that
1461       // one of those values is undef, then the output can be undef.
1462       simplifyAndSetOp(I, 0, DemandedElts, UndefElts);
1463     }
1464 
1465     // Next, see if we can transform the arms of the select.
1466     APInt DemandedLHS(DemandedElts), DemandedRHS(DemandedElts);
1467     if (auto *CV = dyn_cast<ConstantVector>(Sel->getCondition())) {
1468       for (unsigned i = 0; i < VWidth; i++) {
1469         // isNullValue() always returns false when called on a ConstantExpr.
1470         // Skip constant expressions to avoid propagating incorrect information.
1471         Constant *CElt = CV->getAggregateElement(i);
1472         if (isa<ConstantExpr>(CElt))
1473           continue;
1474         // TODO: If a select condition element is undef, we can demand from
1475         // either side. If one side is known undef, choosing that side would
1476         // propagate undef.
1477         if (CElt->isNullValue())
1478           DemandedLHS.clearBit(i);
1479         else
1480           DemandedRHS.clearBit(i);
1481       }
1482     }
1483 
1484     simplifyAndSetOp(I, 1, DemandedLHS, UndefElts2);
1485     simplifyAndSetOp(I, 2, DemandedRHS, UndefElts3);
1486 
1487     // Output elements are undefined if the element from each arm is undefined.
1488     // TODO: This can be improved. See comment in select condition handling.
1489     UndefElts = UndefElts2 & UndefElts3;
1490     break;
1491   }
1492   case Instruction::BitCast: {
1493     // Vector->vector casts only.
1494     VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType());
1495     if (!VTy) break;
1496     unsigned InVWidth = VTy->getNumElements();
1497     APInt InputDemandedElts(InVWidth, 0);
1498     UndefElts2 = APInt(InVWidth, 0);
1499     unsigned Ratio;
1500 
1501     if (VWidth == InVWidth) {
1502       // If we are converting from <4 x i32> -> <4 x f32>, we demand the same
1503       // elements as are demanded of us.
1504       Ratio = 1;
1505       InputDemandedElts = DemandedElts;
1506     } else if ((VWidth % InVWidth) == 0) {
1507       // If the number of elements in the output is a multiple of the number of
1508       // elements in the input then an input element is live if any of the
1509       // corresponding output elements are live.
1510       Ratio = VWidth / InVWidth;
1511       for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1512         if (DemandedElts[OutIdx])
1513           InputDemandedElts.setBit(OutIdx / Ratio);
1514     } else if ((InVWidth % VWidth) == 0) {
1515       // If the number of elements in the input is a multiple of the number of
1516       // elements in the output then an input element is live if the
1517       // corresponding output element is live.
1518       Ratio = InVWidth / VWidth;
1519       for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx)
1520         if (DemandedElts[InIdx / Ratio])
1521           InputDemandedElts.setBit(InIdx);
1522     } else {
1523       // Unsupported so far.
1524       break;
1525     }
1526 
1527     simplifyAndSetOp(I, 0, InputDemandedElts, UndefElts2);
1528 
1529     if (VWidth == InVWidth) {
1530       UndefElts = UndefElts2;
1531     } else if ((VWidth % InVWidth) == 0) {
1532       // If the number of elements in the output is a multiple of the number of
1533       // elements in the input then an output element is undef if the
1534       // corresponding input element is undef.
1535       for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx)
1536         if (UndefElts2[OutIdx / Ratio])
1537           UndefElts.setBit(OutIdx);
1538     } else if ((InVWidth % VWidth) == 0) {
1539       // If the number of elements in the input is a multiple of the number of
1540       // elements in the output then an output element is undef if all of the
1541       // corresponding input elements are undef.
1542       for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) {
1543         APInt SubUndef = UndefElts2.lshr(OutIdx * Ratio).zextOrTrunc(Ratio);
1544         if (SubUndef.countPopulation() == Ratio)
1545           UndefElts.setBit(OutIdx);
1546       }
1547     } else {
1548       llvm_unreachable("Unimp");
1549     }
1550     break;
1551   }
1552   case Instruction::FPTrunc:
1553   case Instruction::FPExt:
1554     simplifyAndSetOp(I, 0, DemandedElts, UndefElts);
1555     break;
1556 
1557   case Instruction::Call: {
1558     IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1559     if (!II) break;
1560     switch (II->getIntrinsicID()) {
1561     case Intrinsic::masked_gather: // fallthrough
1562     case Intrinsic::masked_load: {
1563       // Subtlety: If we load from a pointer, the pointer must be valid
1564       // regardless of whether the element is demanded.  Doing otherwise risks
1565       // segfaults which didn't exist in the original program.
1566       APInt DemandedPtrs(APInt::getAllOnesValue(VWidth)),
1567         DemandedPassThrough(DemandedElts);
1568       if (auto *CV = dyn_cast<ConstantVector>(II->getOperand(2)))
1569         for (unsigned i = 0; i < VWidth; i++) {
1570           Constant *CElt = CV->getAggregateElement(i);
1571           if (CElt->isNullValue())
1572             DemandedPtrs.clearBit(i);
1573           else if (CElt->isAllOnesValue())
1574             DemandedPassThrough.clearBit(i);
1575         }
1576       if (II->getIntrinsicID() == Intrinsic::masked_gather)
1577         simplifyAndSetOp(II, 0, DemandedPtrs, UndefElts2);
1578       simplifyAndSetOp(II, 3, DemandedPassThrough, UndefElts3);
1579 
1580       // Output elements are undefined if the element from both sources are.
1581       // TODO: can strengthen via mask as well.
1582       UndefElts = UndefElts2 & UndefElts3;
1583       break;
1584     }
1585     case Intrinsic::x86_xop_vfrcz_ss:
1586     case Intrinsic::x86_xop_vfrcz_sd:
1587       // The instructions for these intrinsics are speced to zero upper bits not
1588       // pass them through like other scalar intrinsics. So we shouldn't just
1589       // use Arg0 if DemandedElts[0] is clear like we do for other intrinsics.
1590       // Instead we should return a zero vector.
1591       if (!DemandedElts[0]) {
1592         Worklist.push(II);
1593         return ConstantAggregateZero::get(II->getType());
1594       }
1595 
1596       // Only the lower element is used.
1597       DemandedElts = 1;
1598       simplifyAndSetOp(II, 0, DemandedElts, UndefElts);
1599 
1600       // Only the lower element is undefined. The high elements are zero.
1601       UndefElts = UndefElts[0];
1602       break;
1603 
1604     // Unary scalar-as-vector operations that work column-wise.
1605     case Intrinsic::x86_sse_rcp_ss:
1606     case Intrinsic::x86_sse_rsqrt_ss:
1607       simplifyAndSetOp(II, 0, DemandedElts, UndefElts);
1608 
1609       // If lowest element of a scalar op isn't used then use Arg0.
1610       if (!DemandedElts[0]) {
1611         Worklist.push(II);
1612         return II->getArgOperand(0);
1613       }
1614       // TODO: If only low elt lower SQRT to FSQRT (with rounding/exceptions
1615       // checks).
1616       break;
1617 
1618     // Binary scalar-as-vector operations that work column-wise. The high
1619     // elements come from operand 0. The low element is a function of both
1620     // operands.
1621     case Intrinsic::x86_sse_min_ss:
1622     case Intrinsic::x86_sse_max_ss:
1623     case Intrinsic::x86_sse_cmp_ss:
1624     case Intrinsic::x86_sse2_min_sd:
1625     case Intrinsic::x86_sse2_max_sd:
1626     case Intrinsic::x86_sse2_cmp_sd: {
1627       simplifyAndSetOp(II, 0, DemandedElts, UndefElts);
1628 
1629       // If lowest element of a scalar op isn't used then use Arg0.
1630       if (!DemandedElts[0]) {
1631         Worklist.push(II);
1632         return II->getArgOperand(0);
1633       }
1634 
1635       // Only lower element is used for operand 1.
1636       DemandedElts = 1;
1637       simplifyAndSetOp(II, 1, DemandedElts, UndefElts2);
1638 
1639       // Lower element is undefined if both lower elements are undefined.
1640       // Consider things like undef&0.  The result is known zero, not undef.
1641       if (!UndefElts2[0])
1642         UndefElts.clearBit(0);
1643 
1644       break;
1645     }
1646 
1647     // Binary scalar-as-vector operations that work column-wise. The high
1648     // elements come from operand 0 and the low element comes from operand 1.
1649     case Intrinsic::x86_sse41_round_ss:
1650     case Intrinsic::x86_sse41_round_sd: {
1651       // Don't use the low element of operand 0.
1652       APInt DemandedElts2 = DemandedElts;
1653       DemandedElts2.clearBit(0);
1654       simplifyAndSetOp(II, 0, DemandedElts2, UndefElts);
1655 
1656       // If lowest element of a scalar op isn't used then use Arg0.
1657       if (!DemandedElts[0]) {
1658         Worklist.push(II);
1659         return II->getArgOperand(0);
1660       }
1661 
1662       // Only lower element is used for operand 1.
1663       DemandedElts = 1;
1664       simplifyAndSetOp(II, 1, DemandedElts, UndefElts2);
1665 
1666       // Take the high undef elements from operand 0 and take the lower element
1667       // from operand 1.
1668       UndefElts.clearBit(0);
1669       UndefElts |= UndefElts2[0];
1670       break;
1671     }
1672 
1673     // Three input scalar-as-vector operations that work column-wise. The high
1674     // elements come from operand 0 and the low element is a function of all
1675     // three inputs.
1676     case Intrinsic::x86_avx512_mask_add_ss_round:
1677     case Intrinsic::x86_avx512_mask_div_ss_round:
1678     case Intrinsic::x86_avx512_mask_mul_ss_round:
1679     case Intrinsic::x86_avx512_mask_sub_ss_round:
1680     case Intrinsic::x86_avx512_mask_max_ss_round:
1681     case Intrinsic::x86_avx512_mask_min_ss_round:
1682     case Intrinsic::x86_avx512_mask_add_sd_round:
1683     case Intrinsic::x86_avx512_mask_div_sd_round:
1684     case Intrinsic::x86_avx512_mask_mul_sd_round:
1685     case Intrinsic::x86_avx512_mask_sub_sd_round:
1686     case Intrinsic::x86_avx512_mask_max_sd_round:
1687     case Intrinsic::x86_avx512_mask_min_sd_round:
1688       simplifyAndSetOp(II, 0, DemandedElts, UndefElts);
1689 
1690       // If lowest element of a scalar op isn't used then use Arg0.
1691       if (!DemandedElts[0]) {
1692         Worklist.push(II);
1693         return II->getArgOperand(0);
1694       }
1695 
1696       // Only lower element is used for operand 1 and 2.
1697       DemandedElts = 1;
1698       simplifyAndSetOp(II, 1, DemandedElts, UndefElts2);
1699       simplifyAndSetOp(II, 2, DemandedElts, UndefElts3);
1700 
1701       // Lower element is undefined if all three lower elements are undefined.
1702       // Consider things like undef&0.  The result is known zero, not undef.
1703       if (!UndefElts2[0] || !UndefElts3[0])
1704         UndefElts.clearBit(0);
1705 
1706       break;
1707 
1708     case Intrinsic::x86_sse2_packssdw_128:
1709     case Intrinsic::x86_sse2_packsswb_128:
1710     case Intrinsic::x86_sse2_packuswb_128:
1711     case Intrinsic::x86_sse41_packusdw:
1712     case Intrinsic::x86_avx2_packssdw:
1713     case Intrinsic::x86_avx2_packsswb:
1714     case Intrinsic::x86_avx2_packusdw:
1715     case Intrinsic::x86_avx2_packuswb:
1716     case Intrinsic::x86_avx512_packssdw_512:
1717     case Intrinsic::x86_avx512_packsswb_512:
1718     case Intrinsic::x86_avx512_packusdw_512:
1719     case Intrinsic::x86_avx512_packuswb_512: {
1720       auto *Ty0 = II->getArgOperand(0)->getType();
1721       unsigned InnerVWidth = Ty0->getVectorNumElements();
1722       assert(VWidth == (InnerVWidth * 2) && "Unexpected input size");
1723 
1724       unsigned NumLanes = Ty0->getPrimitiveSizeInBits() / 128;
1725       unsigned VWidthPerLane = VWidth / NumLanes;
1726       unsigned InnerVWidthPerLane = InnerVWidth / NumLanes;
1727 
1728       // Per lane, pack the elements of the first input and then the second.
1729       // e.g.
1730       // v8i16 PACK(v4i32 X, v4i32 Y) - (X[0..3],Y[0..3])
1731       // v32i8 PACK(v16i16 X, v16i16 Y) - (X[0..7],Y[0..7]),(X[8..15],Y[8..15])
1732       for (int OpNum = 0; OpNum != 2; ++OpNum) {
1733         APInt OpDemandedElts(InnerVWidth, 0);
1734         for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1735           unsigned LaneIdx = Lane * VWidthPerLane;
1736           for (unsigned Elt = 0; Elt != InnerVWidthPerLane; ++Elt) {
1737             unsigned Idx = LaneIdx + Elt + InnerVWidthPerLane * OpNum;
1738             if (DemandedElts[Idx])
1739               OpDemandedElts.setBit((Lane * InnerVWidthPerLane) + Elt);
1740           }
1741         }
1742 
1743         // Demand elements from the operand.
1744         APInt OpUndefElts(InnerVWidth, 0);
1745         simplifyAndSetOp(II, OpNum, OpDemandedElts, OpUndefElts);
1746 
1747         // Pack the operand's UNDEF elements, one lane at a time.
1748         OpUndefElts = OpUndefElts.zext(VWidth);
1749         for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
1750           APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane);
1751           LaneElts = LaneElts.getLoBits(InnerVWidthPerLane);
1752           LaneElts <<= InnerVWidthPerLane * (2 * Lane + OpNum);
1753           UndefElts |= LaneElts;
1754         }
1755       }
1756       break;
1757     }
1758 
1759     // PSHUFB
1760     case Intrinsic::x86_ssse3_pshuf_b_128:
1761     case Intrinsic::x86_avx2_pshuf_b:
1762     case Intrinsic::x86_avx512_pshuf_b_512:
1763     // PERMILVAR
1764     case Intrinsic::x86_avx_vpermilvar_ps:
1765     case Intrinsic::x86_avx_vpermilvar_ps_256:
1766     case Intrinsic::x86_avx512_vpermilvar_ps_512:
1767     case Intrinsic::x86_avx_vpermilvar_pd:
1768     case Intrinsic::x86_avx_vpermilvar_pd_256:
1769     case Intrinsic::x86_avx512_vpermilvar_pd_512:
1770     // PERMV
1771     case Intrinsic::x86_avx2_permd:
1772     case Intrinsic::x86_avx2_permps: {
1773       simplifyAndSetOp(II, 1, DemandedElts, UndefElts);
1774       break;
1775     }
1776 
1777     // SSE4A instructions leave the upper 64-bits of the 128-bit result
1778     // in an undefined state.
1779     case Intrinsic::x86_sse4a_extrq:
1780     case Intrinsic::x86_sse4a_extrqi:
1781     case Intrinsic::x86_sse4a_insertq:
1782     case Intrinsic::x86_sse4a_insertqi:
1783       UndefElts.setHighBits(VWidth / 2);
1784       break;
1785     case Intrinsic::amdgcn_buffer_load:
1786     case Intrinsic::amdgcn_buffer_load_format:
1787     case Intrinsic::amdgcn_raw_buffer_load:
1788     case Intrinsic::amdgcn_raw_buffer_load_format:
1789     case Intrinsic::amdgcn_raw_tbuffer_load:
1790     case Intrinsic::amdgcn_s_buffer_load:
1791     case Intrinsic::amdgcn_struct_buffer_load:
1792     case Intrinsic::amdgcn_struct_buffer_load_format:
1793     case Intrinsic::amdgcn_struct_tbuffer_load:
1794     case Intrinsic::amdgcn_tbuffer_load:
1795       return simplifyAMDGCNMemoryIntrinsicDemanded(II, DemandedElts);
1796     default: {
1797       if (getAMDGPUImageDMaskIntrinsic(II->getIntrinsicID()))
1798         return simplifyAMDGCNMemoryIntrinsicDemanded(II, DemandedElts, 0);
1799 
1800       break;
1801     }
1802     } // switch on IntrinsicID
1803     break;
1804   } // case Call
1805   } // switch on Opcode
1806 
1807   // TODO: We bail completely on integer div/rem and shifts because they have
1808   // UB/poison potential, but that should be refined.
1809   BinaryOperator *BO;
1810   if (match(I, m_BinOp(BO)) && !BO->isIntDivRem() && !BO->isShift()) {
1811     simplifyAndSetOp(I, 0, DemandedElts, UndefElts);
1812     simplifyAndSetOp(I, 1, DemandedElts, UndefElts2);
1813 
1814     // Any change to an instruction with potential poison must clear those flags
1815     // because we can not guarantee those constraints now. Other analysis may
1816     // determine that it is safe to re-apply the flags.
1817     if (MadeChange)
1818       BO->dropPoisonGeneratingFlags();
1819 
1820     // Output elements are undefined if both are undefined. Consider things
1821     // like undef & 0. The result is known zero, not undef.
1822     UndefElts &= UndefElts2;
1823   }
1824 
1825   // If we've proven all of the lanes undef, return an undef value.
1826   // TODO: Intersect w/demanded lanes
1827   if (UndefElts.isAllOnesValue())
1828     return UndefValue::get(I->getType());;
1829 
1830   return MadeChange ? I : nullptr;
1831 }
1832