1 //===- InstCombineCompares.cpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitICmp and visitFCmp functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APSInt.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/ConstantFolding.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/TargetLibraryInfo.h"
20 #include "llvm/IR/ConstantRange.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/GetElementPtrTypeIterator.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/PatternMatch.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/KnownBits.h"
27 #include "llvm/Transforms/InstCombine/InstCombiner.h"
28 
29 using namespace llvm;
30 using namespace PatternMatch;
31 
32 #define DEBUG_TYPE "instcombine"
33 
34 // How many times is a select replaced by one of its operands?
35 STATISTIC(NumSel, "Number of select opts");
36 
37 
38 /// Compute Result = In1+In2, returning true if the result overflowed for this
39 /// type.
40 static bool addWithOverflow(APInt &Result, const APInt &In1,
41                             const APInt &In2, bool IsSigned = false) {
42   bool Overflow;
43   if (IsSigned)
44     Result = In1.sadd_ov(In2, Overflow);
45   else
46     Result = In1.uadd_ov(In2, Overflow);
47 
48   return Overflow;
49 }
50 
51 /// Compute Result = In1-In2, returning true if the result overflowed for this
52 /// type.
53 static bool subWithOverflow(APInt &Result, const APInt &In1,
54                             const APInt &In2, bool IsSigned = false) {
55   bool Overflow;
56   if (IsSigned)
57     Result = In1.ssub_ov(In2, Overflow);
58   else
59     Result = In1.usub_ov(In2, Overflow);
60 
61   return Overflow;
62 }
63 
64 /// Given an icmp instruction, return true if any use of this comparison is a
65 /// branch on sign bit comparison.
66 static bool hasBranchUse(ICmpInst &I) {
67   for (auto *U : I.users())
68     if (isa<BranchInst>(U))
69       return true;
70   return false;
71 }
72 
73 /// Returns true if the exploded icmp can be expressed as a signed comparison
74 /// to zero and updates the predicate accordingly.
75 /// The signedness of the comparison is preserved.
76 /// TODO: Refactor with decomposeBitTestICmp()?
77 static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) {
78   if (!ICmpInst::isSigned(Pred))
79     return false;
80 
81   if (C.isNullValue())
82     return ICmpInst::isRelational(Pred);
83 
84   if (C.isOneValue()) {
85     if (Pred == ICmpInst::ICMP_SLT) {
86       Pred = ICmpInst::ICMP_SLE;
87       return true;
88     }
89   } else if (C.isAllOnesValue()) {
90     if (Pred == ICmpInst::ICMP_SGT) {
91       Pred = ICmpInst::ICMP_SGE;
92       return true;
93     }
94   }
95 
96   return false;
97 }
98 
99 /// Given a signed integer type and a set of known zero and one bits, compute
100 /// the maximum and minimum values that could have the specified known zero and
101 /// known one bits, returning them in Min/Max.
102 /// TODO: Move to method on KnownBits struct?
103 static void computeSignedMinMaxValuesFromKnownBits(const KnownBits &Known,
104                                                    APInt &Min, APInt &Max) {
105   assert(Known.getBitWidth() == Min.getBitWidth() &&
106          Known.getBitWidth() == Max.getBitWidth() &&
107          "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
108   APInt UnknownBits = ~(Known.Zero|Known.One);
109 
110   // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
111   // bit if it is unknown.
112   Min = Known.One;
113   Max = Known.One|UnknownBits;
114 
115   if (UnknownBits.isNegative()) { // Sign bit is unknown
116     Min.setSignBit();
117     Max.clearSignBit();
118   }
119 }
120 
121 /// Given an unsigned integer type and a set of known zero and one bits, compute
122 /// the maximum and minimum values that could have the specified known zero and
123 /// known one bits, returning them in Min/Max.
124 /// TODO: Move to method on KnownBits struct?
125 static void computeUnsignedMinMaxValuesFromKnownBits(const KnownBits &Known,
126                                                      APInt &Min, APInt &Max) {
127   assert(Known.getBitWidth() == Min.getBitWidth() &&
128          Known.getBitWidth() == Max.getBitWidth() &&
129          "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
130   APInt UnknownBits = ~(Known.Zero|Known.One);
131 
132   // The minimum value is when the unknown bits are all zeros.
133   Min = Known.One;
134   // The maximum value is when the unknown bits are all ones.
135   Max = Known.One|UnknownBits;
136 }
137 
138 /// This is called when we see this pattern:
139 ///   cmp pred (load (gep GV, ...)), cmpcst
140 /// where GV is a global variable with a constant initializer. Try to simplify
141 /// this into some simple computation that does not need the load. For example
142 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
143 ///
144 /// If AndCst is non-null, then the loaded value is masked with that constant
145 /// before doing the comparison. This handles cases like "A[i]&4 == 0".
146 Instruction *
147 InstCombinerImpl::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
148                                                GlobalVariable *GV, CmpInst &ICI,
149                                                ConstantInt *AndCst) {
150   Constant *Init = GV->getInitializer();
151   if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
152     return nullptr;
153 
154   uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
155   // Don't blow up on huge arrays.
156   if (ArrayElementCount > MaxArraySizeForCombine)
157     return nullptr;
158 
159   // There are many forms of this optimization we can handle, for now, just do
160   // the simple index into a single-dimensional array.
161   //
162   // Require: GEP GV, 0, i {{, constant indices}}
163   if (GEP->getNumOperands() < 3 ||
164       !isa<ConstantInt>(GEP->getOperand(1)) ||
165       !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
166       isa<Constant>(GEP->getOperand(2)))
167     return nullptr;
168 
169   // Check that indices after the variable are constants and in-range for the
170   // type they index.  Collect the indices.  This is typically for arrays of
171   // structs.
172   SmallVector<unsigned, 4> LaterIndices;
173 
174   Type *EltTy = Init->getType()->getArrayElementType();
175   for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
176     ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
177     if (!Idx) return nullptr;  // Variable index.
178 
179     uint64_t IdxVal = Idx->getZExtValue();
180     if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index.
181 
182     if (StructType *STy = dyn_cast<StructType>(EltTy))
183       EltTy = STy->getElementType(IdxVal);
184     else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
185       if (IdxVal >= ATy->getNumElements()) return nullptr;
186       EltTy = ATy->getElementType();
187     } else {
188       return nullptr; // Unknown type.
189     }
190 
191     LaterIndices.push_back(IdxVal);
192   }
193 
194   enum { Overdefined = -3, Undefined = -2 };
195 
196   // Variables for our state machines.
197 
198   // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
199   // "i == 47 | i == 87", where 47 is the first index the condition is true for,
200   // and 87 is the second (and last) index.  FirstTrueElement is -2 when
201   // undefined, otherwise set to the first true element.  SecondTrueElement is
202   // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
203   int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
204 
205   // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
206   // form "i != 47 & i != 87".  Same state transitions as for true elements.
207   int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
208 
209   /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
210   /// define a state machine that triggers for ranges of values that the index
211   /// is true or false for.  This triggers on things like "abbbbc"[i] == 'b'.
212   /// This is -2 when undefined, -3 when overdefined, and otherwise the last
213   /// index in the range (inclusive).  We use -2 for undefined here because we
214   /// use relative comparisons and don't want 0-1 to match -1.
215   int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
216 
217   // MagicBitvector - This is a magic bitvector where we set a bit if the
218   // comparison is true for element 'i'.  If there are 64 elements or less in
219   // the array, this will fully represent all the comparison results.
220   uint64_t MagicBitvector = 0;
221 
222   // Scan the array and see if one of our patterns matches.
223   Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
224   for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
225     Constant *Elt = Init->getAggregateElement(i);
226     if (!Elt) return nullptr;
227 
228     // If this is indexing an array of structures, get the structure element.
229     if (!LaterIndices.empty())
230       Elt = ConstantExpr::getExtractValue(Elt, LaterIndices);
231 
232     // If the element is masked, handle it.
233     if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst);
234 
235     // Find out if the comparison would be true or false for the i'th element.
236     Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
237                                                   CompareRHS, DL, &TLI);
238     // If the result is undef for this element, ignore it.
239     if (isa<UndefValue>(C)) {
240       // Extend range state machines to cover this element in case there is an
241       // undef in the middle of the range.
242       if (TrueRangeEnd == (int)i-1)
243         TrueRangeEnd = i;
244       if (FalseRangeEnd == (int)i-1)
245         FalseRangeEnd = i;
246       continue;
247     }
248 
249     // If we can't compute the result for any of the elements, we have to give
250     // up evaluating the entire conditional.
251     if (!isa<ConstantInt>(C)) return nullptr;
252 
253     // Otherwise, we know if the comparison is true or false for this element,
254     // update our state machines.
255     bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
256 
257     // State machine for single/double/range index comparison.
258     if (IsTrueForElt) {
259       // Update the TrueElement state machine.
260       if (FirstTrueElement == Undefined)
261         FirstTrueElement = TrueRangeEnd = i;  // First true element.
262       else {
263         // Update double-compare state machine.
264         if (SecondTrueElement == Undefined)
265           SecondTrueElement = i;
266         else
267           SecondTrueElement = Overdefined;
268 
269         // Update range state machine.
270         if (TrueRangeEnd == (int)i-1)
271           TrueRangeEnd = i;
272         else
273           TrueRangeEnd = Overdefined;
274       }
275     } else {
276       // Update the FalseElement state machine.
277       if (FirstFalseElement == Undefined)
278         FirstFalseElement = FalseRangeEnd = i; // First false element.
279       else {
280         // Update double-compare state machine.
281         if (SecondFalseElement == Undefined)
282           SecondFalseElement = i;
283         else
284           SecondFalseElement = Overdefined;
285 
286         // Update range state machine.
287         if (FalseRangeEnd == (int)i-1)
288           FalseRangeEnd = i;
289         else
290           FalseRangeEnd = Overdefined;
291       }
292     }
293 
294     // If this element is in range, update our magic bitvector.
295     if (i < 64 && IsTrueForElt)
296       MagicBitvector |= 1ULL << i;
297 
298     // If all of our states become overdefined, bail out early.  Since the
299     // predicate is expensive, only check it every 8 elements.  This is only
300     // really useful for really huge arrays.
301     if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
302         SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
303         FalseRangeEnd == Overdefined)
304       return nullptr;
305   }
306 
307   // Now that we've scanned the entire array, emit our new comparison(s).  We
308   // order the state machines in complexity of the generated code.
309   Value *Idx = GEP->getOperand(2);
310 
311   // If the index is larger than the pointer size of the target, truncate the
312   // index down like the GEP would do implicitly.  We don't have to do this for
313   // an inbounds GEP because the index can't be out of range.
314   if (!GEP->isInBounds()) {
315     Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
316     unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
317     if (Idx->getType()->getPrimitiveSizeInBits().getFixedSize() > PtrSize)
318       Idx = Builder.CreateTrunc(Idx, IntPtrTy);
319   }
320 
321   // If the comparison is only true for one or two elements, emit direct
322   // comparisons.
323   if (SecondTrueElement != Overdefined) {
324     // None true -> false.
325     if (FirstTrueElement == Undefined)
326       return replaceInstUsesWith(ICI, Builder.getFalse());
327 
328     Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
329 
330     // True for one element -> 'i == 47'.
331     if (SecondTrueElement == Undefined)
332       return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
333 
334     // True for two elements -> 'i == 47 | i == 72'.
335     Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx);
336     Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
337     Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx);
338     return BinaryOperator::CreateOr(C1, C2);
339   }
340 
341   // If the comparison is only false for one or two elements, emit direct
342   // comparisons.
343   if (SecondFalseElement != Overdefined) {
344     // None false -> true.
345     if (FirstFalseElement == Undefined)
346       return replaceInstUsesWith(ICI, Builder.getTrue());
347 
348     Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
349 
350     // False for one element -> 'i != 47'.
351     if (SecondFalseElement == Undefined)
352       return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
353 
354     // False for two elements -> 'i != 47 & i != 72'.
355     Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx);
356     Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
357     Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx);
358     return BinaryOperator::CreateAnd(C1, C2);
359   }
360 
361   // If the comparison can be replaced with a range comparison for the elements
362   // where it is true, emit the range check.
363   if (TrueRangeEnd != Overdefined) {
364     assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
365 
366     // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
367     if (FirstTrueElement) {
368       Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
369       Idx = Builder.CreateAdd(Idx, Offs);
370     }
371 
372     Value *End = ConstantInt::get(Idx->getType(),
373                                   TrueRangeEnd-FirstTrueElement+1);
374     return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
375   }
376 
377   // False range check.
378   if (FalseRangeEnd != Overdefined) {
379     assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
380     // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
381     if (FirstFalseElement) {
382       Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
383       Idx = Builder.CreateAdd(Idx, Offs);
384     }
385 
386     Value *End = ConstantInt::get(Idx->getType(),
387                                   FalseRangeEnd-FirstFalseElement);
388     return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
389   }
390 
391   // If a magic bitvector captures the entire comparison state
392   // of this load, replace it with computation that does:
393   //   ((magic_cst >> i) & 1) != 0
394   {
395     Type *Ty = nullptr;
396 
397     // Look for an appropriate type:
398     // - The type of Idx if the magic fits
399     // - The smallest fitting legal type
400     if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
401       Ty = Idx->getType();
402     else
403       Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
404 
405     if (Ty) {
406       Value *V = Builder.CreateIntCast(Idx, Ty, false);
407       V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
408       V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
409       return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
410     }
411   }
412 
413   return nullptr;
414 }
415 
416 /// Return a value that can be used to compare the *offset* implied by a GEP to
417 /// zero. For example, if we have &A[i], we want to return 'i' for
418 /// "icmp ne i, 0". Note that, in general, indices can be complex, and scales
419 /// are involved. The above expression would also be legal to codegen as
420 /// "icmp ne (i*4), 0" (assuming A is a pointer to i32).
421 /// This latter form is less amenable to optimization though, and we are allowed
422 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
423 ///
424 /// If we can't emit an optimized form for this expression, this returns null.
425 ///
426 static Value *evaluateGEPOffsetExpression(User *GEP, InstCombinerImpl &IC,
427                                           const DataLayout &DL) {
428   gep_type_iterator GTI = gep_type_begin(GEP);
429 
430   // Check to see if this gep only has a single variable index.  If so, and if
431   // any constant indices are a multiple of its scale, then we can compute this
432   // in terms of the scale of the variable index.  For example, if the GEP
433   // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
434   // because the expression will cross zero at the same point.
435   unsigned i, e = GEP->getNumOperands();
436   int64_t Offset = 0;
437   for (i = 1; i != e; ++i, ++GTI) {
438     if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
439       // Compute the aggregate offset of constant indices.
440       if (CI->isZero()) continue;
441 
442       // Handle a struct index, which adds its field offset to the pointer.
443       if (StructType *STy = GTI.getStructTypeOrNull()) {
444         Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
445       } else {
446         uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
447         Offset += Size*CI->getSExtValue();
448       }
449     } else {
450       // Found our variable index.
451       break;
452     }
453   }
454 
455   // If there are no variable indices, we must have a constant offset, just
456   // evaluate it the general way.
457   if (i == e) return nullptr;
458 
459   Value *VariableIdx = GEP->getOperand(i);
460   // Determine the scale factor of the variable element.  For example, this is
461   // 4 if the variable index is into an array of i32.
462   uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());
463 
464   // Verify that there are no other variable indices.  If so, emit the hard way.
465   for (++i, ++GTI; i != e; ++i, ++GTI) {
466     ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
467     if (!CI) return nullptr;
468 
469     // Compute the aggregate offset of constant indices.
470     if (CI->isZero()) continue;
471 
472     // Handle a struct index, which adds its field offset to the pointer.
473     if (StructType *STy = GTI.getStructTypeOrNull()) {
474       Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
475     } else {
476       uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
477       Offset += Size*CI->getSExtValue();
478     }
479   }
480 
481   // Okay, we know we have a single variable index, which must be a
482   // pointer/array/vector index.  If there is no offset, life is simple, return
483   // the index.
484   Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());
485   unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();
486   if (Offset == 0) {
487     // Cast to intptrty in case a truncation occurs.  If an extension is needed,
488     // we don't need to bother extending: the extension won't affect where the
489     // computation crosses zero.
490     if (VariableIdx->getType()->getPrimitiveSizeInBits().getFixedSize() >
491         IntPtrWidth) {
492       VariableIdx = IC.Builder.CreateTrunc(VariableIdx, IntPtrTy);
493     }
494     return VariableIdx;
495   }
496 
497   // Otherwise, there is an index.  The computation we will do will be modulo
498   // the pointer size.
499   Offset = SignExtend64(Offset, IntPtrWidth);
500   VariableScale = SignExtend64(VariableScale, IntPtrWidth);
501 
502   // To do this transformation, any constant index must be a multiple of the
503   // variable scale factor.  For example, we can evaluate "12 + 4*i" as "3 + i",
504   // but we can't evaluate "10 + 3*i" in terms of i.  Check that the offset is a
505   // multiple of the variable scale.
506   int64_t NewOffs = Offset / (int64_t)VariableScale;
507   if (Offset != NewOffs*(int64_t)VariableScale)
508     return nullptr;
509 
510   // Okay, we can do this evaluation.  Start by converting the index to intptr.
511   if (VariableIdx->getType() != IntPtrTy)
512     VariableIdx = IC.Builder.CreateIntCast(VariableIdx, IntPtrTy,
513                                             true /*Signed*/);
514   Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
515   return IC.Builder.CreateAdd(VariableIdx, OffsetVal, "offset");
516 }
517 
518 /// Returns true if we can rewrite Start as a GEP with pointer Base
519 /// and some integer offset. The nodes that need to be re-written
520 /// for this transformation will be added to Explored.
521 static bool canRewriteGEPAsOffset(Value *Start, Value *Base,
522                                   const DataLayout &DL,
523                                   SetVector<Value *> &Explored) {
524   SmallVector<Value *, 16> WorkList(1, Start);
525   Explored.insert(Base);
526 
527   // The following traversal gives us an order which can be used
528   // when doing the final transformation. Since in the final
529   // transformation we create the PHI replacement instructions first,
530   // we don't have to get them in any particular order.
531   //
532   // However, for other instructions we will have to traverse the
533   // operands of an instruction first, which means that we have to
534   // do a post-order traversal.
535   while (!WorkList.empty()) {
536     SetVector<PHINode *> PHIs;
537 
538     while (!WorkList.empty()) {
539       if (Explored.size() >= 100)
540         return false;
541 
542       Value *V = WorkList.back();
543 
544       if (Explored.count(V) != 0) {
545         WorkList.pop_back();
546         continue;
547       }
548 
549       if (!isa<IntToPtrInst>(V) && !isa<PtrToIntInst>(V) &&
550           !isa<GetElementPtrInst>(V) && !isa<PHINode>(V))
551         // We've found some value that we can't explore which is different from
552         // the base. Therefore we can't do this transformation.
553         return false;
554 
555       if (isa<IntToPtrInst>(V) || isa<PtrToIntInst>(V)) {
556         auto *CI = cast<CastInst>(V);
557         if (!CI->isNoopCast(DL))
558           return false;
559 
560         if (Explored.count(CI->getOperand(0)) == 0)
561           WorkList.push_back(CI->getOperand(0));
562       }
563 
564       if (auto *GEP = dyn_cast<GEPOperator>(V)) {
565         // We're limiting the GEP to having one index. This will preserve
566         // the original pointer type. We could handle more cases in the
567         // future.
568         if (GEP->getNumIndices() != 1 || !GEP->isInBounds() ||
569             GEP->getType() != Start->getType())
570           return false;
571 
572         if (Explored.count(GEP->getOperand(0)) == 0)
573           WorkList.push_back(GEP->getOperand(0));
574       }
575 
576       if (WorkList.back() == V) {
577         WorkList.pop_back();
578         // We've finished visiting this node, mark it as such.
579         Explored.insert(V);
580       }
581 
582       if (auto *PN = dyn_cast<PHINode>(V)) {
583         // We cannot transform PHIs on unsplittable basic blocks.
584         if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
585           return false;
586         Explored.insert(PN);
587         PHIs.insert(PN);
588       }
589     }
590 
591     // Explore the PHI nodes further.
592     for (auto *PN : PHIs)
593       for (Value *Op : PN->incoming_values())
594         if (Explored.count(Op) == 0)
595           WorkList.push_back(Op);
596   }
597 
598   // Make sure that we can do this. Since we can't insert GEPs in a basic
599   // block before a PHI node, we can't easily do this transformation if
600   // we have PHI node users of transformed instructions.
601   for (Value *Val : Explored) {
602     for (Value *Use : Val->uses()) {
603 
604       auto *PHI = dyn_cast<PHINode>(Use);
605       auto *Inst = dyn_cast<Instruction>(Val);
606 
607       if (Inst == Base || Inst == PHI || !Inst || !PHI ||
608           Explored.count(PHI) == 0)
609         continue;
610 
611       if (PHI->getParent() == Inst->getParent())
612         return false;
613     }
614   }
615   return true;
616 }
617 
618 // Sets the appropriate insert point on Builder where we can add
619 // a replacement Instruction for V (if that is possible).
620 static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
621                               bool Before = true) {
622   if (auto *PHI = dyn_cast<PHINode>(V)) {
623     Builder.SetInsertPoint(&*PHI->getParent()->getFirstInsertionPt());
624     return;
625   }
626   if (auto *I = dyn_cast<Instruction>(V)) {
627     if (!Before)
628       I = &*std::next(I->getIterator());
629     Builder.SetInsertPoint(I);
630     return;
631   }
632   if (auto *A = dyn_cast<Argument>(V)) {
633     // Set the insertion point in the entry block.
634     BasicBlock &Entry = A->getParent()->getEntryBlock();
635     Builder.SetInsertPoint(&*Entry.getFirstInsertionPt());
636     return;
637   }
638   // Otherwise, this is a constant and we don't need to set a new
639   // insertion point.
640   assert(isa<Constant>(V) && "Setting insertion point for unknown value!");
641 }
642 
643 /// Returns a re-written value of Start as an indexed GEP using Base as a
644 /// pointer.
645 static Value *rewriteGEPAsOffset(Value *Start, Value *Base,
646                                  const DataLayout &DL,
647                                  SetVector<Value *> &Explored) {
648   // Perform all the substitutions. This is a bit tricky because we can
649   // have cycles in our use-def chains.
650   // 1. Create the PHI nodes without any incoming values.
651   // 2. Create all the other values.
652   // 3. Add the edges for the PHI nodes.
653   // 4. Emit GEPs to get the original pointers.
654   // 5. Remove the original instructions.
655   Type *IndexType = IntegerType::get(
656       Base->getContext(), DL.getIndexTypeSizeInBits(Start->getType()));
657 
658   DenseMap<Value *, Value *> NewInsts;
659   NewInsts[Base] = ConstantInt::getNullValue(IndexType);
660 
661   // Create the new PHI nodes, without adding any incoming values.
662   for (Value *Val : Explored) {
663     if (Val == Base)
664       continue;
665     // Create empty phi nodes. This avoids cyclic dependencies when creating
666     // the remaining instructions.
667     if (auto *PHI = dyn_cast<PHINode>(Val))
668       NewInsts[PHI] = PHINode::Create(IndexType, PHI->getNumIncomingValues(),
669                                       PHI->getName() + ".idx", PHI);
670   }
671   IRBuilder<> Builder(Base->getContext());
672 
673   // Create all the other instructions.
674   for (Value *Val : Explored) {
675 
676     if (NewInsts.find(Val) != NewInsts.end())
677       continue;
678 
679     if (auto *CI = dyn_cast<CastInst>(Val)) {
680       // Don't get rid of the intermediate variable here; the store can grow
681       // the map which will invalidate the reference to the input value.
682       Value *V = NewInsts[CI->getOperand(0)];
683       NewInsts[CI] = V;
684       continue;
685     }
686     if (auto *GEP = dyn_cast<GEPOperator>(Val)) {
687       Value *Index = NewInsts[GEP->getOperand(1)] ? NewInsts[GEP->getOperand(1)]
688                                                   : GEP->getOperand(1);
689       setInsertionPoint(Builder, GEP);
690       // Indices might need to be sign extended. GEPs will magically do
691       // this, but we need to do it ourselves here.
692       if (Index->getType()->getScalarSizeInBits() !=
693           NewInsts[GEP->getOperand(0)]->getType()->getScalarSizeInBits()) {
694         Index = Builder.CreateSExtOrTrunc(
695             Index, NewInsts[GEP->getOperand(0)]->getType(),
696             GEP->getOperand(0)->getName() + ".sext");
697       }
698 
699       auto *Op = NewInsts[GEP->getOperand(0)];
700       if (isa<ConstantInt>(Op) && cast<ConstantInt>(Op)->isZero())
701         NewInsts[GEP] = Index;
702       else
703         NewInsts[GEP] = Builder.CreateNSWAdd(
704             Op, Index, GEP->getOperand(0)->getName() + ".add");
705       continue;
706     }
707     if (isa<PHINode>(Val))
708       continue;
709 
710     llvm_unreachable("Unexpected instruction type");
711   }
712 
713   // Add the incoming values to the PHI nodes.
714   for (Value *Val : Explored) {
715     if (Val == Base)
716       continue;
717     // All the instructions have been created, we can now add edges to the
718     // phi nodes.
719     if (auto *PHI = dyn_cast<PHINode>(Val)) {
720       PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]);
721       for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
722         Value *NewIncoming = PHI->getIncomingValue(I);
723 
724         if (NewInsts.find(NewIncoming) != NewInsts.end())
725           NewIncoming = NewInsts[NewIncoming];
726 
727         NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I));
728       }
729     }
730   }
731 
732   for (Value *Val : Explored) {
733     if (Val == Base)
734       continue;
735 
736     // Depending on the type, for external users we have to emit
737     // a GEP or a GEP + ptrtoint.
738     setInsertionPoint(Builder, Val, false);
739 
740     // If required, create an inttoptr instruction for Base.
741     Value *NewBase = Base;
742     if (!Base->getType()->isPointerTy())
743       NewBase = Builder.CreateBitOrPointerCast(Base, Start->getType(),
744                                                Start->getName() + "to.ptr");
745 
746     Value *GEP = Builder.CreateInBoundsGEP(
747         Start->getType()->getPointerElementType(), NewBase,
748         makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr");
749 
750     if (!Val->getType()->isPointerTy()) {
751       Value *Cast = Builder.CreatePointerCast(GEP, Val->getType(),
752                                               Val->getName() + ".conv");
753       GEP = Cast;
754     }
755     Val->replaceAllUsesWith(GEP);
756   }
757 
758   return NewInsts[Start];
759 }
760 
761 /// Looks through GEPs, IntToPtrInsts and PtrToIntInsts in order to express
762 /// the input Value as a constant indexed GEP. Returns a pair containing
763 /// the GEPs Pointer and Index.
764 static std::pair<Value *, Value *>
765 getAsConstantIndexedAddress(Value *V, const DataLayout &DL) {
766   Type *IndexType = IntegerType::get(V->getContext(),
767                                      DL.getIndexTypeSizeInBits(V->getType()));
768 
769   Constant *Index = ConstantInt::getNullValue(IndexType);
770   while (true) {
771     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
772       // We accept only inbouds GEPs here to exclude the possibility of
773       // overflow.
774       if (!GEP->isInBounds())
775         break;
776       if (GEP->hasAllConstantIndices() && GEP->getNumIndices() == 1 &&
777           GEP->getType() == V->getType()) {
778         V = GEP->getOperand(0);
779         Constant *GEPIndex = static_cast<Constant *>(GEP->getOperand(1));
780         Index = ConstantExpr::getAdd(
781             Index, ConstantExpr::getSExtOrBitCast(GEPIndex, IndexType));
782         continue;
783       }
784       break;
785     }
786     if (auto *CI = dyn_cast<IntToPtrInst>(V)) {
787       if (!CI->isNoopCast(DL))
788         break;
789       V = CI->getOperand(0);
790       continue;
791     }
792     if (auto *CI = dyn_cast<PtrToIntInst>(V)) {
793       if (!CI->isNoopCast(DL))
794         break;
795       V = CI->getOperand(0);
796       continue;
797     }
798     break;
799   }
800   return {V, Index};
801 }
802 
803 /// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
804 /// We can look through PHIs, GEPs and casts in order to determine a common base
805 /// between GEPLHS and RHS.
806 static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS,
807                                               ICmpInst::Predicate Cond,
808                                               const DataLayout &DL) {
809   // FIXME: Support vector of pointers.
810   if (GEPLHS->getType()->isVectorTy())
811     return nullptr;
812 
813   if (!GEPLHS->hasAllConstantIndices())
814     return nullptr;
815 
816   // Make sure the pointers have the same type.
817   if (GEPLHS->getType() != RHS->getType())
818     return nullptr;
819 
820   Value *PtrBase, *Index;
821   std::tie(PtrBase, Index) = getAsConstantIndexedAddress(GEPLHS, DL);
822 
823   // The set of nodes that will take part in this transformation.
824   SetVector<Value *> Nodes;
825 
826   if (!canRewriteGEPAsOffset(RHS, PtrBase, DL, Nodes))
827     return nullptr;
828 
829   // We know we can re-write this as
830   //  ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)
831   // Since we've only looked through inbouds GEPs we know that we
832   // can't have overflow on either side. We can therefore re-write
833   // this as:
834   //   OFFSET1 cmp OFFSET2
835   Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, DL, Nodes);
836 
837   // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written
838   // GEP having PtrBase as the pointer base, and has returned in NewRHS the
839   // offset. Since Index is the offset of LHS to the base pointer, we will now
840   // compare the offsets instead of comparing the pointers.
841   return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Index, NewRHS);
842 }
843 
844 /// Fold comparisons between a GEP instruction and something else. At this point
845 /// we know that the GEP is on the LHS of the comparison.
846 Instruction *InstCombinerImpl::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
847                                            ICmpInst::Predicate Cond,
848                                            Instruction &I) {
849   // Don't transform signed compares of GEPs into index compares. Even if the
850   // GEP is inbounds, the final add of the base pointer can have signed overflow
851   // and would change the result of the icmp.
852   // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
853   // the maximum signed value for the pointer type.
854   if (ICmpInst::isSigned(Cond))
855     return nullptr;
856 
857   // Look through bitcasts and addrspacecasts. We do not however want to remove
858   // 0 GEPs.
859   if (!isa<GetElementPtrInst>(RHS))
860     RHS = RHS->stripPointerCasts();
861 
862   Value *PtrBase = GEPLHS->getOperand(0);
863   // FIXME: Support vector pointer GEPs.
864   if (PtrBase == RHS && GEPLHS->isInBounds() &&
865       !GEPLHS->getType()->isVectorTy()) {
866     // ((gep Ptr, OFFSET) cmp Ptr)   ---> (OFFSET cmp 0).
867     // This transformation (ignoring the base and scales) is valid because we
868     // know pointers can't overflow since the gep is inbounds.  See if we can
869     // output an optimized form.
870     Value *Offset = evaluateGEPOffsetExpression(GEPLHS, *this, DL);
871 
872     // If not, synthesize the offset the hard way.
873     if (!Offset)
874       Offset = EmitGEPOffset(GEPLHS);
875     return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
876                         Constant::getNullValue(Offset->getType()));
877   }
878 
879   if (GEPLHS->isInBounds() && ICmpInst::isEquality(Cond) &&
880       isa<Constant>(RHS) && cast<Constant>(RHS)->isNullValue() &&
881       !NullPointerIsDefined(I.getFunction(),
882                             RHS->getType()->getPointerAddressSpace())) {
883     // For most address spaces, an allocation can't be placed at null, but null
884     // itself is treated as a 0 size allocation in the in bounds rules.  Thus,
885     // the only valid inbounds address derived from null, is null itself.
886     // Thus, we have four cases to consider:
887     // 1) Base == nullptr, Offset == 0 -> inbounds, null
888     // 2) Base == nullptr, Offset != 0 -> poison as the result is out of bounds
889     // 3) Base != nullptr, Offset == (-base) -> poison (crossing allocations)
890     // 4) Base != nullptr, Offset != (-base) -> nonnull (and possibly poison)
891     //
892     // (Note if we're indexing a type of size 0, that simply collapses into one
893     //  of the buckets above.)
894     //
895     // In general, we're allowed to make values less poison (i.e. remove
896     //   sources of full UB), so in this case, we just select between the two
897     //   non-poison cases (1 and 4 above).
898     //
899     // For vectors, we apply the same reasoning on a per-lane basis.
900     auto *Base = GEPLHS->getPointerOperand();
901     if (GEPLHS->getType()->isVectorTy() && Base->getType()->isPointerTy()) {
902       auto EC = cast<VectorType>(GEPLHS->getType())->getElementCount();
903       Base = Builder.CreateVectorSplat(EC, Base);
904     }
905     return new ICmpInst(Cond, Base,
906                         ConstantExpr::getPointerBitCastOrAddrSpaceCast(
907                             cast<Constant>(RHS), Base->getType()));
908   } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
909     // If the base pointers are different, but the indices are the same, just
910     // compare the base pointer.
911     if (PtrBase != GEPRHS->getOperand(0)) {
912       bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
913       IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
914                         GEPRHS->getOperand(0)->getType();
915       if (IndicesTheSame)
916         for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
917           if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
918             IndicesTheSame = false;
919             break;
920           }
921 
922       // If all indices are the same, just compare the base pointers.
923       Type *BaseType = GEPLHS->getOperand(0)->getType();
924       if (IndicesTheSame && CmpInst::makeCmpResultType(BaseType) == I.getType())
925         return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0));
926 
927       // If we're comparing GEPs with two base pointers that only differ in type
928       // and both GEPs have only constant indices or just one use, then fold
929       // the compare with the adjusted indices.
930       // FIXME: Support vector of pointers.
931       if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
932           (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
933           (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
934           PtrBase->stripPointerCasts() ==
935               GEPRHS->getOperand(0)->stripPointerCasts() &&
936           !GEPLHS->getType()->isVectorTy()) {
937         Value *LOffset = EmitGEPOffset(GEPLHS);
938         Value *ROffset = EmitGEPOffset(GEPRHS);
939 
940         // If we looked through an addrspacecast between different sized address
941         // spaces, the LHS and RHS pointers are different sized
942         // integers. Truncate to the smaller one.
943         Type *LHSIndexTy = LOffset->getType();
944         Type *RHSIndexTy = ROffset->getType();
945         if (LHSIndexTy != RHSIndexTy) {
946           if (LHSIndexTy->getPrimitiveSizeInBits().getFixedSize() <
947               RHSIndexTy->getPrimitiveSizeInBits().getFixedSize()) {
948             ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy);
949           } else
950             LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy);
951         }
952 
953         Value *Cmp = Builder.CreateICmp(ICmpInst::getSignedPredicate(Cond),
954                                         LOffset, ROffset);
955         return replaceInstUsesWith(I, Cmp);
956       }
957 
958       // Otherwise, the base pointers are different and the indices are
959       // different. Try convert this to an indexed compare by looking through
960       // PHIs/casts.
961       return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
962     }
963 
964     // If one of the GEPs has all zero indices, recurse.
965     // FIXME: Handle vector of pointers.
966     if (!GEPLHS->getType()->isVectorTy() && GEPLHS->hasAllZeroIndices())
967       return foldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
968                          ICmpInst::getSwappedPredicate(Cond), I);
969 
970     // If the other GEP has all zero indices, recurse.
971     // FIXME: Handle vector of pointers.
972     if (!GEPRHS->getType()->isVectorTy() && GEPRHS->hasAllZeroIndices())
973       return foldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
974 
975     bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds();
976     if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
977       // If the GEPs only differ by one index, compare it.
978       unsigned NumDifferences = 0;  // Keep track of # differences.
979       unsigned DiffOperand = 0;     // The operand that differs.
980       for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
981         if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
982           Type *LHSType = GEPLHS->getOperand(i)->getType();
983           Type *RHSType = GEPRHS->getOperand(i)->getType();
984           // FIXME: Better support for vector of pointers.
985           if (LHSType->getPrimitiveSizeInBits() !=
986                    RHSType->getPrimitiveSizeInBits() ||
987               (GEPLHS->getType()->isVectorTy() &&
988                (!LHSType->isVectorTy() || !RHSType->isVectorTy()))) {
989             // Irreconcilable differences.
990             NumDifferences = 2;
991             break;
992           }
993 
994           if (NumDifferences++) break;
995           DiffOperand = i;
996         }
997 
998       if (NumDifferences == 0)   // SAME GEP?
999         return replaceInstUsesWith(I, // No comparison is needed here.
1000           ConstantInt::get(I.getType(), ICmpInst::isTrueWhenEqual(Cond)));
1001 
1002       else if (NumDifferences == 1 && GEPsInBounds) {
1003         Value *LHSV = GEPLHS->getOperand(DiffOperand);
1004         Value *RHSV = GEPRHS->getOperand(DiffOperand);
1005         // Make sure we do a signed comparison here.
1006         return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
1007       }
1008     }
1009 
1010     // Only lower this if the icmp is the only user of the GEP or if we expect
1011     // the result to fold to a constant!
1012     if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
1013         (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
1014       // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)  --->  (OFFSET1 cmp OFFSET2)
1015       Value *L = EmitGEPOffset(GEPLHS);
1016       Value *R = EmitGEPOffset(GEPRHS);
1017       return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
1018     }
1019   }
1020 
1021   // Try convert this to an indexed compare by looking through PHIs/casts as a
1022   // last resort.
1023   return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
1024 }
1025 
1026 Instruction *InstCombinerImpl::foldAllocaCmp(ICmpInst &ICI,
1027                                              const AllocaInst *Alloca,
1028                                              const Value *Other) {
1029   assert(ICI.isEquality() && "Cannot fold non-equality comparison.");
1030 
1031   // It would be tempting to fold away comparisons between allocas and any
1032   // pointer not based on that alloca (e.g. an argument). However, even
1033   // though such pointers cannot alias, they can still compare equal.
1034   //
1035   // But LLVM doesn't specify where allocas get their memory, so if the alloca
1036   // doesn't escape we can argue that it's impossible to guess its value, and we
1037   // can therefore act as if any such guesses are wrong.
1038   //
1039   // The code below checks that the alloca doesn't escape, and that it's only
1040   // used in a comparison once (the current instruction). The
1041   // single-comparison-use condition ensures that we're trivially folding all
1042   // comparisons against the alloca consistently, and avoids the risk of
1043   // erroneously folding a comparison of the pointer with itself.
1044 
1045   unsigned MaxIter = 32; // Break cycles and bound to constant-time.
1046 
1047   SmallVector<const Use *, 32> Worklist;
1048   for (const Use &U : Alloca->uses()) {
1049     if (Worklist.size() >= MaxIter)
1050       return nullptr;
1051     Worklist.push_back(&U);
1052   }
1053 
1054   unsigned NumCmps = 0;
1055   while (!Worklist.empty()) {
1056     assert(Worklist.size() <= MaxIter);
1057     const Use *U = Worklist.pop_back_val();
1058     const Value *V = U->getUser();
1059     --MaxIter;
1060 
1061     if (isa<BitCastInst>(V) || isa<GetElementPtrInst>(V) || isa<PHINode>(V) ||
1062         isa<SelectInst>(V)) {
1063       // Track the uses.
1064     } else if (isa<LoadInst>(V)) {
1065       // Loading from the pointer doesn't escape it.
1066       continue;
1067     } else if (const auto *SI = dyn_cast<StoreInst>(V)) {
1068       // Storing *to* the pointer is fine, but storing the pointer escapes it.
1069       if (SI->getValueOperand() == U->get())
1070         return nullptr;
1071       continue;
1072     } else if (isa<ICmpInst>(V)) {
1073       if (NumCmps++)
1074         return nullptr; // Found more than one cmp.
1075       continue;
1076     } else if (const auto *Intrin = dyn_cast<IntrinsicInst>(V)) {
1077       switch (Intrin->getIntrinsicID()) {
1078         // These intrinsics don't escape or compare the pointer. Memset is safe
1079         // because we don't allow ptrtoint. Memcpy and memmove are safe because
1080         // we don't allow stores, so src cannot point to V.
1081         case Intrinsic::lifetime_start: case Intrinsic::lifetime_end:
1082         case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset:
1083           continue;
1084         default:
1085           return nullptr;
1086       }
1087     } else {
1088       return nullptr;
1089     }
1090     for (const Use &U : V->uses()) {
1091       if (Worklist.size() >= MaxIter)
1092         return nullptr;
1093       Worklist.push_back(&U);
1094     }
1095   }
1096 
1097   Type *CmpTy = CmpInst::makeCmpResultType(Other->getType());
1098   return replaceInstUsesWith(
1099       ICI,
1100       ConstantInt::get(CmpTy, !CmpInst::isTrueWhenEqual(ICI.getPredicate())));
1101 }
1102 
1103 /// Fold "icmp pred (X+C), X".
1104 Instruction *InstCombinerImpl::foldICmpAddOpConst(Value *X, const APInt &C,
1105                                                   ICmpInst::Predicate Pred) {
1106   // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
1107   // so the values can never be equal.  Similarly for all other "or equals"
1108   // operators.
1109   assert(!!C && "C should not be zero!");
1110 
1111   // (X+1) <u X        --> X >u (MAXUINT-1)        --> X == 255
1112   // (X+2) <u X        --> X >u (MAXUINT-2)        --> X > 253
1113   // (X+MAXUINT) <u X  --> X >u (MAXUINT-MAXUINT)  --> X != 0
1114   if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
1115     Constant *R = ConstantInt::get(X->getType(),
1116                                    APInt::getMaxValue(C.getBitWidth()) - C);
1117     return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
1118   }
1119 
1120   // (X+1) >u X        --> X <u (0-1)        --> X != 255
1121   // (X+2) >u X        --> X <u (0-2)        --> X <u 254
1122   // (X+MAXUINT) >u X  --> X <u (0-MAXUINT)  --> X <u 1  --> X == 0
1123   if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
1124     return new ICmpInst(ICmpInst::ICMP_ULT, X,
1125                         ConstantInt::get(X->getType(), -C));
1126 
1127   APInt SMax = APInt::getSignedMaxValue(C.getBitWidth());
1128 
1129   // (X+ 1) <s X       --> X >s (MAXSINT-1)          --> X == 127
1130   // (X+ 2) <s X       --> X >s (MAXSINT-2)          --> X >s 125
1131   // (X+MAXSINT) <s X  --> X >s (MAXSINT-MAXSINT)    --> X >s 0
1132   // (X+MINSINT) <s X  --> X >s (MAXSINT-MINSINT)    --> X >s -1
1133   // (X+ -2) <s X      --> X >s (MAXSINT- -2)        --> X >s 126
1134   // (X+ -1) <s X      --> X >s (MAXSINT- -1)        --> X != 127
1135   if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
1136     return new ICmpInst(ICmpInst::ICMP_SGT, X,
1137                         ConstantInt::get(X->getType(), SMax - C));
1138 
1139   // (X+ 1) >s X       --> X <s (MAXSINT-(1-1))       --> X != 127
1140   // (X+ 2) >s X       --> X <s (MAXSINT-(2-1))       --> X <s 126
1141   // (X+MAXSINT) >s X  --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
1142   // (X+MINSINT) >s X  --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
1143   // (X+ -2) >s X      --> X <s (MAXSINT-(-2-1))      --> X <s -126
1144   // (X+ -1) >s X      --> X <s (MAXSINT-(-1-1))      --> X == -128
1145 
1146   assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
1147   return new ICmpInst(ICmpInst::ICMP_SLT, X,
1148                       ConstantInt::get(X->getType(), SMax - (C - 1)));
1149 }
1150 
1151 /// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" ->
1152 /// (icmp eq/ne A, Log2(AP2/AP1)) ->
1153 /// (icmp eq/ne A, Log2(AP2) - Log2(AP1)).
1154 Instruction *InstCombinerImpl::foldICmpShrConstConst(ICmpInst &I, Value *A,
1155                                                      const APInt &AP1,
1156                                                      const APInt &AP2) {
1157   assert(I.isEquality() && "Cannot fold icmp gt/lt");
1158 
1159   auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1160     if (I.getPredicate() == I.ICMP_NE)
1161       Pred = CmpInst::getInversePredicate(Pred);
1162     return new ICmpInst(Pred, LHS, RHS);
1163   };
1164 
1165   // Don't bother doing any work for cases which InstSimplify handles.
1166   if (AP2.isNullValue())
1167     return nullptr;
1168 
1169   bool IsAShr = isa<AShrOperator>(I.getOperand(0));
1170   if (IsAShr) {
1171     if (AP2.isAllOnesValue())
1172       return nullptr;
1173     if (AP2.isNegative() != AP1.isNegative())
1174       return nullptr;
1175     if (AP2.sgt(AP1))
1176       return nullptr;
1177   }
1178 
1179   if (!AP1)
1180     // 'A' must be large enough to shift out the highest set bit.
1181     return getICmp(I.ICMP_UGT, A,
1182                    ConstantInt::get(A->getType(), AP2.logBase2()));
1183 
1184   if (AP1 == AP2)
1185     return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1186 
1187   int Shift;
1188   if (IsAShr && AP1.isNegative())
1189     Shift = AP1.countLeadingOnes() - AP2.countLeadingOnes();
1190   else
1191     Shift = AP1.countLeadingZeros() - AP2.countLeadingZeros();
1192 
1193   if (Shift > 0) {
1194     if (IsAShr && AP1 == AP2.ashr(Shift)) {
1195       // There are multiple solutions if we are comparing against -1 and the LHS
1196       // of the ashr is not a power of two.
1197       if (AP1.isAllOnesValue() && !AP2.isPowerOf2())
1198         return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift));
1199       return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1200     } else if (AP1 == AP2.lshr(Shift)) {
1201       return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1202     }
1203   }
1204 
1205   // Shifting const2 will never be equal to const1.
1206   // FIXME: This should always be handled by InstSimplify?
1207   auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1208   return replaceInstUsesWith(I, TorF);
1209 }
1210 
1211 /// Handle "(icmp eq/ne (shl AP2, A), AP1)" ->
1212 /// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
1213 Instruction *InstCombinerImpl::foldICmpShlConstConst(ICmpInst &I, Value *A,
1214                                                      const APInt &AP1,
1215                                                      const APInt &AP2) {
1216   assert(I.isEquality() && "Cannot fold icmp gt/lt");
1217 
1218   auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1219     if (I.getPredicate() == I.ICMP_NE)
1220       Pred = CmpInst::getInversePredicate(Pred);
1221     return new ICmpInst(Pred, LHS, RHS);
1222   };
1223 
1224   // Don't bother doing any work for cases which InstSimplify handles.
1225   if (AP2.isNullValue())
1226     return nullptr;
1227 
1228   unsigned AP2TrailingZeros = AP2.countTrailingZeros();
1229 
1230   if (!AP1 && AP2TrailingZeros != 0)
1231     return getICmp(
1232         I.ICMP_UGE, A,
1233         ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros));
1234 
1235   if (AP1 == AP2)
1236     return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1237 
1238   // Get the distance between the lowest bits that are set.
1239   int Shift = AP1.countTrailingZeros() - AP2TrailingZeros;
1240 
1241   if (Shift > 0 && AP2.shl(Shift) == AP1)
1242     return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1243 
1244   // Shifting const2 will never be equal to const1.
1245   // FIXME: This should always be handled by InstSimplify?
1246   auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1247   return replaceInstUsesWith(I, TorF);
1248 }
1249 
1250 /// The caller has matched a pattern of the form:
1251 ///   I = icmp ugt (add (add A, B), CI2), CI1
1252 /// If this is of the form:
1253 ///   sum = a + b
1254 ///   if (sum+128 >u 255)
1255 /// Then replace it with llvm.sadd.with.overflow.i8.
1256 ///
1257 static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
1258                                           ConstantInt *CI2, ConstantInt *CI1,
1259                                           InstCombinerImpl &IC) {
1260   // The transformation we're trying to do here is to transform this into an
1261   // llvm.sadd.with.overflow.  To do this, we have to replace the original add
1262   // with a narrower add, and discard the add-with-constant that is part of the
1263   // range check (if we can't eliminate it, this isn't profitable).
1264 
1265   // In order to eliminate the add-with-constant, the compare can be its only
1266   // use.
1267   Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
1268   if (!AddWithCst->hasOneUse())
1269     return nullptr;
1270 
1271   // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
1272   if (!CI2->getValue().isPowerOf2())
1273     return nullptr;
1274   unsigned NewWidth = CI2->getValue().countTrailingZeros();
1275   if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1276     return nullptr;
1277 
1278   // The width of the new add formed is 1 more than the bias.
1279   ++NewWidth;
1280 
1281   // Check to see that CI1 is an all-ones value with NewWidth bits.
1282   if (CI1->getBitWidth() == NewWidth ||
1283       CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
1284     return nullptr;
1285 
1286   // This is only really a signed overflow check if the inputs have been
1287   // sign-extended; check for that condition. For example, if CI2 is 2^31 and
1288   // the operands of the add are 64 bits wide, we need at least 33 sign bits.
1289   unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1;
1290   if (IC.ComputeNumSignBits(A, 0, &I) < NeededSignBits ||
1291       IC.ComputeNumSignBits(B, 0, &I) < NeededSignBits)
1292     return nullptr;
1293 
1294   // In order to replace the original add with a narrower
1295   // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
1296   // and truncates that discard the high bits of the add.  Verify that this is
1297   // the case.
1298   Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
1299   for (User *U : OrigAdd->users()) {
1300     if (U == AddWithCst)
1301       continue;
1302 
1303     // Only accept truncates for now.  We would really like a nice recursive
1304     // predicate like SimplifyDemandedBits, but which goes downwards the use-def
1305     // chain to see which bits of a value are actually demanded.  If the
1306     // original add had another add which was then immediately truncated, we
1307     // could still do the transformation.
1308     TruncInst *TI = dyn_cast<TruncInst>(U);
1309     if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
1310       return nullptr;
1311   }
1312 
1313   // If the pattern matches, truncate the inputs to the narrower type and
1314   // use the sadd_with_overflow intrinsic to efficiently compute both the
1315   // result and the overflow bit.
1316   Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
1317   Function *F = Intrinsic::getDeclaration(
1318       I.getModule(), Intrinsic::sadd_with_overflow, NewType);
1319 
1320   InstCombiner::BuilderTy &Builder = IC.Builder;
1321 
1322   // Put the new code above the original add, in case there are any uses of the
1323   // add between the add and the compare.
1324   Builder.SetInsertPoint(OrigAdd);
1325 
1326   Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc");
1327   Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc");
1328   CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd");
1329   Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result");
1330   Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType());
1331 
1332   // The inner add was the result of the narrow add, zero extended to the
1333   // wider type.  Replace it with the result computed by the intrinsic.
1334   IC.replaceInstUsesWith(*OrigAdd, ZExt);
1335   IC.eraseInstFromFunction(*OrigAdd);
1336 
1337   // The original icmp gets replaced with the overflow value.
1338   return ExtractValueInst::Create(Call, 1, "sadd.overflow");
1339 }
1340 
1341 /// If we have:
1342 ///   icmp eq/ne (urem/srem %x, %y), 0
1343 /// iff %y is a power-of-two, we can replace this with a bit test:
1344 ///   icmp eq/ne (and %x, (add %y, -1)), 0
1345 Instruction *InstCombinerImpl::foldIRemByPowerOfTwoToBitTest(ICmpInst &I) {
1346   // This fold is only valid for equality predicates.
1347   if (!I.isEquality())
1348     return nullptr;
1349   ICmpInst::Predicate Pred;
1350   Value *X, *Y, *Zero;
1351   if (!match(&I, m_ICmp(Pred, m_OneUse(m_IRem(m_Value(X), m_Value(Y))),
1352                         m_CombineAnd(m_Zero(), m_Value(Zero)))))
1353     return nullptr;
1354   if (!isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, 0, &I))
1355     return nullptr;
1356   // This may increase instruction count, we don't enforce that Y is a constant.
1357   Value *Mask = Builder.CreateAdd(Y, Constant::getAllOnesValue(Y->getType()));
1358   Value *Masked = Builder.CreateAnd(X, Mask);
1359   return ICmpInst::Create(Instruction::ICmp, Pred, Masked, Zero);
1360 }
1361 
1362 /// Fold equality-comparison between zero and any (maybe truncated) right-shift
1363 /// by one-less-than-bitwidth into a sign test on the original value.
1364 Instruction *InstCombinerImpl::foldSignBitTest(ICmpInst &I) {
1365   Instruction *Val;
1366   ICmpInst::Predicate Pred;
1367   if (!I.isEquality() || !match(&I, m_ICmp(Pred, m_Instruction(Val), m_Zero())))
1368     return nullptr;
1369 
1370   Value *X;
1371   Type *XTy;
1372 
1373   Constant *C;
1374   if (match(Val, m_TruncOrSelf(m_Shr(m_Value(X), m_Constant(C))))) {
1375     XTy = X->getType();
1376     unsigned XBitWidth = XTy->getScalarSizeInBits();
1377     if (!match(C, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
1378                                      APInt(XBitWidth, XBitWidth - 1))))
1379       return nullptr;
1380   } else if (isa<BinaryOperator>(Val) &&
1381              (X = reassociateShiftAmtsOfTwoSameDirectionShifts(
1382                   cast<BinaryOperator>(Val), SQ.getWithInstruction(Val),
1383                   /*AnalyzeForSignBitExtraction=*/true))) {
1384     XTy = X->getType();
1385   } else
1386     return nullptr;
1387 
1388   return ICmpInst::Create(Instruction::ICmp,
1389                           Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_SGE
1390                                                     : ICmpInst::ICMP_SLT,
1391                           X, ConstantInt::getNullValue(XTy));
1392 }
1393 
1394 // Handle  icmp pred X, 0
1395 Instruction *InstCombinerImpl::foldICmpWithZero(ICmpInst &Cmp) {
1396   CmpInst::Predicate Pred = Cmp.getPredicate();
1397   if (!match(Cmp.getOperand(1), m_Zero()))
1398     return nullptr;
1399 
1400   // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0)
1401   if (Pred == ICmpInst::ICMP_SGT) {
1402     Value *A, *B;
1403     SelectPatternResult SPR = matchSelectPattern(Cmp.getOperand(0), A, B);
1404     if (SPR.Flavor == SPF_SMIN) {
1405       if (isKnownPositive(A, DL, 0, &AC, &Cmp, &DT))
1406         return new ICmpInst(Pred, B, Cmp.getOperand(1));
1407       if (isKnownPositive(B, DL, 0, &AC, &Cmp, &DT))
1408         return new ICmpInst(Pred, A, Cmp.getOperand(1));
1409     }
1410   }
1411 
1412   if (Instruction *New = foldIRemByPowerOfTwoToBitTest(Cmp))
1413     return New;
1414 
1415   // Given:
1416   //   icmp eq/ne (urem %x, %y), 0
1417   // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem':
1418   //   icmp eq/ne %x, 0
1419   Value *X, *Y;
1420   if (match(Cmp.getOperand(0), m_URem(m_Value(X), m_Value(Y))) &&
1421       ICmpInst::isEquality(Pred)) {
1422     KnownBits XKnown = computeKnownBits(X, 0, &Cmp);
1423     KnownBits YKnown = computeKnownBits(Y, 0, &Cmp);
1424     if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2)
1425       return new ICmpInst(Pred, X, Cmp.getOperand(1));
1426   }
1427 
1428   return nullptr;
1429 }
1430 
1431 /// Fold icmp Pred X, C.
1432 /// TODO: This code structure does not make sense. The saturating add fold
1433 /// should be moved to some other helper and extended as noted below (it is also
1434 /// possible that code has been made unnecessary - do we canonicalize IR to
1435 /// overflow/saturating intrinsics or not?).
1436 Instruction *InstCombinerImpl::foldICmpWithConstant(ICmpInst &Cmp) {
1437   // Match the following pattern, which is a common idiom when writing
1438   // overflow-safe integer arithmetic functions. The source performs an addition
1439   // in wider type and explicitly checks for overflow using comparisons against
1440   // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic.
1441   //
1442   // TODO: This could probably be generalized to handle other overflow-safe
1443   // operations if we worked out the formulas to compute the appropriate magic
1444   // constants.
1445   //
1446   // sum = a + b
1447   // if (sum+128 >u 255)  ...  -> llvm.sadd.with.overflow.i8
1448   CmpInst::Predicate Pred = Cmp.getPredicate();
1449   Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1);
1450   Value *A, *B;
1451   ConstantInt *CI, *CI2; // I = icmp ugt (add (add A, B), CI2), CI
1452   if (Pred == ICmpInst::ICMP_UGT && match(Op1, m_ConstantInt(CI)) &&
1453       match(Op0, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
1454     if (Instruction *Res = processUGT_ADDCST_ADD(Cmp, A, B, CI2, CI, *this))
1455       return Res;
1456 
1457   // icmp(phi(C1, C2, ...), C) -> phi(icmp(C1, C), icmp(C2, C), ...).
1458   Constant *C = dyn_cast<Constant>(Op1);
1459   if (!C)
1460     return nullptr;
1461 
1462   if (auto *Phi = dyn_cast<PHINode>(Op0))
1463     if (all_of(Phi->operands(), [](Value *V) { return isa<Constant>(V); })) {
1464       Type *Ty = Cmp.getType();
1465       Builder.SetInsertPoint(Phi);
1466       PHINode *NewPhi =
1467           Builder.CreatePHI(Ty, Phi->getNumOperands());
1468       for (BasicBlock *Predecessor : predecessors(Phi->getParent())) {
1469         auto *Input =
1470             cast<Constant>(Phi->getIncomingValueForBlock(Predecessor));
1471         auto *BoolInput = ConstantExpr::getCompare(Pred, Input, C);
1472         NewPhi->addIncoming(BoolInput, Predecessor);
1473       }
1474       NewPhi->takeName(&Cmp);
1475       return replaceInstUsesWith(Cmp, NewPhi);
1476     }
1477 
1478   return nullptr;
1479 }
1480 
1481 /// Canonicalize icmp instructions based on dominating conditions.
1482 Instruction *InstCombinerImpl::foldICmpWithDominatingICmp(ICmpInst &Cmp) {
1483   // This is a cheap/incomplete check for dominance - just match a single
1484   // predecessor with a conditional branch.
1485   BasicBlock *CmpBB = Cmp.getParent();
1486   BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1487   if (!DomBB)
1488     return nullptr;
1489 
1490   Value *DomCond;
1491   BasicBlock *TrueBB, *FalseBB;
1492   if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1493     return nullptr;
1494 
1495   assert((TrueBB == CmpBB || FalseBB == CmpBB) &&
1496          "Predecessor block does not point to successor?");
1497 
1498   // The branch should get simplified. Don't bother simplifying this condition.
1499   if (TrueBB == FalseBB)
1500     return nullptr;
1501 
1502   // Try to simplify this compare to T/F based on the dominating condition.
1503   Optional<bool> Imp = isImpliedCondition(DomCond, &Cmp, DL, TrueBB == CmpBB);
1504   if (Imp)
1505     return replaceInstUsesWith(Cmp, ConstantInt::get(Cmp.getType(), *Imp));
1506 
1507   CmpInst::Predicate Pred = Cmp.getPredicate();
1508   Value *X = Cmp.getOperand(0), *Y = Cmp.getOperand(1);
1509   ICmpInst::Predicate DomPred;
1510   const APInt *C, *DomC;
1511   if (match(DomCond, m_ICmp(DomPred, m_Specific(X), m_APInt(DomC))) &&
1512       match(Y, m_APInt(C))) {
1513     // We have 2 compares of a variable with constants. Calculate the constant
1514     // ranges of those compares to see if we can transform the 2nd compare:
1515     // DomBB:
1516     //   DomCond = icmp DomPred X, DomC
1517     //   br DomCond, CmpBB, FalseBB
1518     // CmpBB:
1519     //   Cmp = icmp Pred X, C
1520     ConstantRange CR = ConstantRange::makeAllowedICmpRegion(Pred, *C);
1521     ConstantRange DominatingCR =
1522         (CmpBB == TrueBB) ? ConstantRange::makeExactICmpRegion(DomPred, *DomC)
1523                           : ConstantRange::makeExactICmpRegion(
1524                                 CmpInst::getInversePredicate(DomPred), *DomC);
1525     ConstantRange Intersection = DominatingCR.intersectWith(CR);
1526     ConstantRange Difference = DominatingCR.difference(CR);
1527     if (Intersection.isEmptySet())
1528       return replaceInstUsesWith(Cmp, Builder.getFalse());
1529     if (Difference.isEmptySet())
1530       return replaceInstUsesWith(Cmp, Builder.getTrue());
1531 
1532     // Canonicalizing a sign bit comparison that gets used in a branch,
1533     // pessimizes codegen by generating branch on zero instruction instead
1534     // of a test and branch. So we avoid canonicalizing in such situations
1535     // because test and branch instruction has better branch displacement
1536     // than compare and branch instruction.
1537     bool UnusedBit;
1538     bool IsSignBit = isSignBitCheck(Pred, *C, UnusedBit);
1539     if (Cmp.isEquality() || (IsSignBit && hasBranchUse(Cmp)))
1540       return nullptr;
1541 
1542     if (const APInt *EqC = Intersection.getSingleElement())
1543       return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*EqC));
1544     if (const APInt *NeC = Difference.getSingleElement())
1545       return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*NeC));
1546   }
1547 
1548   return nullptr;
1549 }
1550 
1551 /// Fold icmp (trunc X, Y), C.
1552 Instruction *InstCombinerImpl::foldICmpTruncConstant(ICmpInst &Cmp,
1553                                                      TruncInst *Trunc,
1554                                                      const APInt &C) {
1555   ICmpInst::Predicate Pred = Cmp.getPredicate();
1556   Value *X = Trunc->getOperand(0);
1557   if (C.isOneValue() && C.getBitWidth() > 1) {
1558     // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1
1559     Value *V = nullptr;
1560     if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V))))
1561       return new ICmpInst(ICmpInst::ICMP_SLT, V,
1562                           ConstantInt::get(V->getType(), 1));
1563   }
1564 
1565   if (Cmp.isEquality() && Trunc->hasOneUse()) {
1566     // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
1567     // of the high bits truncated out of x are known.
1568     unsigned DstBits = Trunc->getType()->getScalarSizeInBits(),
1569              SrcBits = X->getType()->getScalarSizeInBits();
1570     KnownBits Known = computeKnownBits(X, 0, &Cmp);
1571 
1572     // If all the high bits are known, we can do this xform.
1573     if ((Known.Zero | Known.One).countLeadingOnes() >= SrcBits - DstBits) {
1574       // Pull in the high bits from known-ones set.
1575       APInt NewRHS = C.zext(SrcBits);
1576       NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits);
1577       return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), NewRHS));
1578     }
1579   }
1580 
1581   return nullptr;
1582 }
1583 
1584 /// Fold icmp (xor X, Y), C.
1585 Instruction *InstCombinerImpl::foldICmpXorConstant(ICmpInst &Cmp,
1586                                                    BinaryOperator *Xor,
1587                                                    const APInt &C) {
1588   Value *X = Xor->getOperand(0);
1589   Value *Y = Xor->getOperand(1);
1590   const APInt *XorC;
1591   if (!match(Y, m_APInt(XorC)))
1592     return nullptr;
1593 
1594   // If this is a comparison that tests the signbit (X < 0) or (x > -1),
1595   // fold the xor.
1596   ICmpInst::Predicate Pred = Cmp.getPredicate();
1597   bool TrueIfSigned = false;
1598   if (isSignBitCheck(Cmp.getPredicate(), C, TrueIfSigned)) {
1599 
1600     // If the sign bit of the XorCst is not set, there is no change to
1601     // the operation, just stop using the Xor.
1602     if (!XorC->isNegative())
1603       return replaceOperand(Cmp, 0, X);
1604 
1605     // Emit the opposite comparison.
1606     if (TrueIfSigned)
1607       return new ICmpInst(ICmpInst::ICMP_SGT, X,
1608                           ConstantInt::getAllOnesValue(X->getType()));
1609     else
1610       return new ICmpInst(ICmpInst::ICMP_SLT, X,
1611                           ConstantInt::getNullValue(X->getType()));
1612   }
1613 
1614   if (Xor->hasOneUse()) {
1615     // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask))
1616     if (!Cmp.isEquality() && XorC->isSignMask()) {
1617       Pred = Cmp.getFlippedSignednessPredicate();
1618       return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1619     }
1620 
1621     // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask))
1622     if (!Cmp.isEquality() && XorC->isMaxSignedValue()) {
1623       Pred = Cmp.getFlippedSignednessPredicate();
1624       Pred = Cmp.getSwappedPredicate(Pred);
1625       return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1626     }
1627   }
1628 
1629   // Mask constant magic can eliminate an 'xor' with unsigned compares.
1630   if (Pred == ICmpInst::ICMP_UGT) {
1631     // (xor X, ~C) >u C --> X <u ~C (when C+1 is a power of 2)
1632     if (*XorC == ~C && (C + 1).isPowerOf2())
1633       return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
1634     // (xor X, C) >u C --> X >u C (when C+1 is a power of 2)
1635     if (*XorC == C && (C + 1).isPowerOf2())
1636       return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
1637   }
1638   if (Pred == ICmpInst::ICMP_ULT) {
1639     // (xor X, -C) <u C --> X >u ~C (when C is a power of 2)
1640     if (*XorC == -C && C.isPowerOf2())
1641       return new ICmpInst(ICmpInst::ICMP_UGT, X,
1642                           ConstantInt::get(X->getType(), ~C));
1643     // (xor X, C) <u C --> X >u ~C (when -C is a power of 2)
1644     if (*XorC == C && (-C).isPowerOf2())
1645       return new ICmpInst(ICmpInst::ICMP_UGT, X,
1646                           ConstantInt::get(X->getType(), ~C));
1647   }
1648   return nullptr;
1649 }
1650 
1651 /// Fold icmp (and (sh X, Y), C2), C1.
1652 Instruction *InstCombinerImpl::foldICmpAndShift(ICmpInst &Cmp,
1653                                                 BinaryOperator *And,
1654                                                 const APInt &C1,
1655                                                 const APInt &C2) {
1656   BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0));
1657   if (!Shift || !Shift->isShift())
1658     return nullptr;
1659 
1660   // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could
1661   // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in
1662   // code produced by the clang front-end, for bitfield access.
1663   // This seemingly simple opportunity to fold away a shift turns out to be
1664   // rather complicated. See PR17827 for details.
1665   unsigned ShiftOpcode = Shift->getOpcode();
1666   bool IsShl = ShiftOpcode == Instruction::Shl;
1667   const APInt *C3;
1668   if (match(Shift->getOperand(1), m_APInt(C3))) {
1669     APInt NewAndCst, NewCmpCst;
1670     bool AnyCmpCstBitsShiftedOut;
1671     if (ShiftOpcode == Instruction::Shl) {
1672       // For a left shift, we can fold if the comparison is not signed. We can
1673       // also fold a signed comparison if the mask value and comparison value
1674       // are not negative. These constraints may not be obvious, but we can
1675       // prove that they are correct using an SMT solver.
1676       if (Cmp.isSigned() && (C2.isNegative() || C1.isNegative()))
1677         return nullptr;
1678 
1679       NewCmpCst = C1.lshr(*C3);
1680       NewAndCst = C2.lshr(*C3);
1681       AnyCmpCstBitsShiftedOut = NewCmpCst.shl(*C3) != C1;
1682     } else if (ShiftOpcode == Instruction::LShr) {
1683       // For a logical right shift, we can fold if the comparison is not signed.
1684       // We can also fold a signed comparison if the shifted mask value and the
1685       // shifted comparison value are not negative. These constraints may not be
1686       // obvious, but we can prove that they are correct using an SMT solver.
1687       NewCmpCst = C1.shl(*C3);
1688       NewAndCst = C2.shl(*C3);
1689       AnyCmpCstBitsShiftedOut = NewCmpCst.lshr(*C3) != C1;
1690       if (Cmp.isSigned() && (NewAndCst.isNegative() || NewCmpCst.isNegative()))
1691         return nullptr;
1692     } else {
1693       // For an arithmetic shift, check that both constants don't use (in a
1694       // signed sense) the top bits being shifted out.
1695       assert(ShiftOpcode == Instruction::AShr && "Unknown shift opcode");
1696       NewCmpCst = C1.shl(*C3);
1697       NewAndCst = C2.shl(*C3);
1698       AnyCmpCstBitsShiftedOut = NewCmpCst.ashr(*C3) != C1;
1699       if (NewAndCst.ashr(*C3) != C2)
1700         return nullptr;
1701     }
1702 
1703     if (AnyCmpCstBitsShiftedOut) {
1704       // If we shifted bits out, the fold is not going to work out. As a
1705       // special case, check to see if this means that the result is always
1706       // true or false now.
1707       if (Cmp.getPredicate() == ICmpInst::ICMP_EQ)
1708         return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType()));
1709       if (Cmp.getPredicate() == ICmpInst::ICMP_NE)
1710         return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType()));
1711     } else {
1712       Value *NewAnd = Builder.CreateAnd(
1713           Shift->getOperand(0), ConstantInt::get(And->getType(), NewAndCst));
1714       return new ICmpInst(Cmp.getPredicate(),
1715           NewAnd, ConstantInt::get(And->getType(), NewCmpCst));
1716     }
1717   }
1718 
1719   // Turn ((X >> Y) & C2) == 0  into  (X & (C2 << Y)) == 0.  The latter is
1720   // preferable because it allows the C2 << Y expression to be hoisted out of a
1721   // loop if Y is invariant and X is not.
1722   if (Shift->hasOneUse() && C1.isNullValue() && Cmp.isEquality() &&
1723       !Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) {
1724     // Compute C2 << Y.
1725     Value *NewShift =
1726         IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1))
1727               : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1));
1728 
1729     // Compute X & (C2 << Y).
1730     Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift);
1731     return replaceOperand(Cmp, 0, NewAnd);
1732   }
1733 
1734   return nullptr;
1735 }
1736 
1737 /// Fold icmp (and X, C2), C1.
1738 Instruction *InstCombinerImpl::foldICmpAndConstConst(ICmpInst &Cmp,
1739                                                      BinaryOperator *And,
1740                                                      const APInt &C1) {
1741   bool isICMP_NE = Cmp.getPredicate() == ICmpInst::ICMP_NE;
1742 
1743   // For vectors: icmp ne (and X, 1), 0 --> trunc X to N x i1
1744   // TODO: We canonicalize to the longer form for scalars because we have
1745   // better analysis/folds for icmp, and codegen may be better with icmp.
1746   if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.isNullValue() &&
1747       match(And->getOperand(1), m_One()))
1748     return new TruncInst(And->getOperand(0), Cmp.getType());
1749 
1750   const APInt *C2;
1751   Value *X;
1752   if (!match(And, m_And(m_Value(X), m_APInt(C2))))
1753     return nullptr;
1754 
1755   // Don't perform the following transforms if the AND has multiple uses
1756   if (!And->hasOneUse())
1757     return nullptr;
1758 
1759   if (Cmp.isEquality() && C1.isNullValue()) {
1760     // Restrict this fold to single-use 'and' (PR10267).
1761     // Replace (and X, (1 << size(X)-1) != 0) with X s< 0
1762     if (C2->isSignMask()) {
1763       Constant *Zero = Constant::getNullValue(X->getType());
1764       auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
1765       return new ICmpInst(NewPred, X, Zero);
1766     }
1767 
1768     // Restrict this fold only for single-use 'and' (PR10267).
1769     // ((%x & C) == 0) --> %x u< (-C)  iff (-C) is power of two.
1770     if ((~(*C2) + 1).isPowerOf2()) {
1771       Constant *NegBOC =
1772           ConstantExpr::getNeg(cast<Constant>(And->getOperand(1)));
1773       auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
1774       return new ICmpInst(NewPred, X, NegBOC);
1775     }
1776   }
1777 
1778   // If the LHS is an 'and' of a truncate and we can widen the and/compare to
1779   // the input width without changing the value produced, eliminate the cast:
1780   //
1781   // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1'
1782   //
1783   // We can do this transformation if the constants do not have their sign bits
1784   // set or if it is an equality comparison. Extending a relational comparison
1785   // when we're checking the sign bit would not work.
1786   Value *W;
1787   if (match(And->getOperand(0), m_OneUse(m_Trunc(m_Value(W)))) &&
1788       (Cmp.isEquality() || (!C1.isNegative() && !C2->isNegative()))) {
1789     // TODO: Is this a good transform for vectors? Wider types may reduce
1790     // throughput. Should this transform be limited (even for scalars) by using
1791     // shouldChangeType()?
1792     if (!Cmp.getType()->isVectorTy()) {
1793       Type *WideType = W->getType();
1794       unsigned WideScalarBits = WideType->getScalarSizeInBits();
1795       Constant *ZextC1 = ConstantInt::get(WideType, C1.zext(WideScalarBits));
1796       Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits));
1797       Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName());
1798       return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1799     }
1800   }
1801 
1802   if (Instruction *I = foldICmpAndShift(Cmp, And, C1, *C2))
1803     return I;
1804 
1805   // (icmp pred (and (or (lshr A, B), A), 1), 0) -->
1806   // (icmp pred (and A, (or (shl 1, B), 1), 0))
1807   //
1808   // iff pred isn't signed
1809   if (!Cmp.isSigned() && C1.isNullValue() && And->getOperand(0)->hasOneUse() &&
1810       match(And->getOperand(1), m_One())) {
1811     Constant *One = cast<Constant>(And->getOperand(1));
1812     Value *Or = And->getOperand(0);
1813     Value *A, *B, *LShr;
1814     if (match(Or, m_Or(m_Value(LShr), m_Value(A))) &&
1815         match(LShr, m_LShr(m_Specific(A), m_Value(B)))) {
1816       unsigned UsesRemoved = 0;
1817       if (And->hasOneUse())
1818         ++UsesRemoved;
1819       if (Or->hasOneUse())
1820         ++UsesRemoved;
1821       if (LShr->hasOneUse())
1822         ++UsesRemoved;
1823 
1824       // Compute A & ((1 << B) | 1)
1825       Value *NewOr = nullptr;
1826       if (auto *C = dyn_cast<Constant>(B)) {
1827         if (UsesRemoved >= 1)
1828           NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One);
1829       } else {
1830         if (UsesRemoved >= 3)
1831           NewOr = Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(),
1832                                                      /*HasNUW=*/true),
1833                                    One, Or->getName());
1834       }
1835       if (NewOr) {
1836         Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName());
1837         return replaceOperand(Cmp, 0, NewAnd);
1838       }
1839     }
1840   }
1841 
1842   return nullptr;
1843 }
1844 
1845 /// Fold icmp (and X, Y), C.
1846 Instruction *InstCombinerImpl::foldICmpAndConstant(ICmpInst &Cmp,
1847                                                    BinaryOperator *And,
1848                                                    const APInt &C) {
1849   if (Instruction *I = foldICmpAndConstConst(Cmp, And, C))
1850     return I;
1851 
1852   // TODO: These all require that Y is constant too, so refactor with the above.
1853 
1854   // Try to optimize things like "A[i] & 42 == 0" to index computations.
1855   Value *X = And->getOperand(0);
1856   Value *Y = And->getOperand(1);
1857   if (auto *LI = dyn_cast<LoadInst>(X))
1858     if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
1859       if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
1860         if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
1861             !LI->isVolatile() && isa<ConstantInt>(Y)) {
1862           ConstantInt *C2 = cast<ConstantInt>(Y);
1863           if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, Cmp, C2))
1864             return Res;
1865         }
1866 
1867   if (!Cmp.isEquality())
1868     return nullptr;
1869 
1870   // X & -C == -C -> X >  u ~C
1871   // X & -C != -C -> X <= u ~C
1872   //   iff C is a power of 2
1873   if (Cmp.getOperand(1) == Y && (-C).isPowerOf2()) {
1874     auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGT
1875                                                           : CmpInst::ICMP_ULE;
1876     return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1))));
1877   }
1878 
1879   // (X & C2) == 0 -> (trunc X) >= 0
1880   // (X & C2) != 0 -> (trunc X) <  0
1881   //   iff C2 is a power of 2 and it masks the sign bit of a legal integer type.
1882   const APInt *C2;
1883   if (And->hasOneUse() && C.isNullValue() && match(Y, m_APInt(C2))) {
1884     int32_t ExactLogBase2 = C2->exactLogBase2();
1885     if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) {
1886       Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1);
1887       if (auto *AndVTy = dyn_cast<VectorType>(And->getType()))
1888         NTy = VectorType::get(NTy, AndVTy->getElementCount());
1889       Value *Trunc = Builder.CreateTrunc(X, NTy);
1890       auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE
1891                                                             : CmpInst::ICMP_SLT;
1892       return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy));
1893     }
1894   }
1895 
1896   return nullptr;
1897 }
1898 
1899 /// Fold icmp (or X, Y), C.
1900 Instruction *InstCombinerImpl::foldICmpOrConstant(ICmpInst &Cmp,
1901                                                   BinaryOperator *Or,
1902                                                   const APInt &C) {
1903   ICmpInst::Predicate Pred = Cmp.getPredicate();
1904   if (C.isOneValue()) {
1905     // icmp slt signum(V) 1 --> icmp slt V, 1
1906     Value *V = nullptr;
1907     if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V))))
1908       return new ICmpInst(ICmpInst::ICMP_SLT, V,
1909                           ConstantInt::get(V->getType(), 1));
1910   }
1911 
1912   Value *OrOp0 = Or->getOperand(0), *OrOp1 = Or->getOperand(1);
1913   const APInt *MaskC;
1914   if (match(OrOp1, m_APInt(MaskC)) && Cmp.isEquality()) {
1915     if (*MaskC == C && (C + 1).isPowerOf2()) {
1916       // X | C == C --> X <=u C
1917       // X | C != C --> X  >u C
1918       //   iff C+1 is a power of 2 (C is a bitmask of the low bits)
1919       Pred = (Pred == CmpInst::ICMP_EQ) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT;
1920       return new ICmpInst(Pred, OrOp0, OrOp1);
1921     }
1922 
1923     // More general: canonicalize 'equality with set bits mask' to
1924     // 'equality with clear bits mask'.
1925     // (X | MaskC) == C --> (X & ~MaskC) == C ^ MaskC
1926     // (X | MaskC) != C --> (X & ~MaskC) != C ^ MaskC
1927     if (Or->hasOneUse()) {
1928       Value *And = Builder.CreateAnd(OrOp0, ~(*MaskC));
1929       Constant *NewC = ConstantInt::get(Or->getType(), C ^ (*MaskC));
1930       return new ICmpInst(Pred, And, NewC);
1931     }
1932   }
1933 
1934   if (!Cmp.isEquality() || !C.isNullValue() || !Or->hasOneUse())
1935     return nullptr;
1936 
1937   Value *P, *Q;
1938   if (match(Or, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
1939     // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
1940     // -> and (icmp eq P, null), (icmp eq Q, null).
1941     Value *CmpP =
1942         Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
1943     Value *CmpQ =
1944         Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
1945     auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1946     return BinaryOperator::Create(BOpc, CmpP, CmpQ);
1947   }
1948 
1949   // Are we using xors to bitwise check for a pair of (in)equalities? Convert to
1950   // a shorter form that has more potential to be folded even further.
1951   Value *X1, *X2, *X3, *X4;
1952   if (match(OrOp0, m_OneUse(m_Xor(m_Value(X1), m_Value(X2)))) &&
1953       match(OrOp1, m_OneUse(m_Xor(m_Value(X3), m_Value(X4))))) {
1954     // ((X1 ^ X2) || (X3 ^ X4)) == 0 --> (X1 == X2) && (X3 == X4)
1955     // ((X1 ^ X2) || (X3 ^ X4)) != 0 --> (X1 != X2) || (X3 != X4)
1956     Value *Cmp12 = Builder.CreateICmp(Pred, X1, X2);
1957     Value *Cmp34 = Builder.CreateICmp(Pred, X3, X4);
1958     auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1959     return BinaryOperator::Create(BOpc, Cmp12, Cmp34);
1960   }
1961 
1962   return nullptr;
1963 }
1964 
1965 /// Fold icmp (mul X, Y), C.
1966 Instruction *InstCombinerImpl::foldICmpMulConstant(ICmpInst &Cmp,
1967                                                    BinaryOperator *Mul,
1968                                                    const APInt &C) {
1969   const APInt *MulC;
1970   if (!match(Mul->getOperand(1), m_APInt(MulC)))
1971     return nullptr;
1972 
1973   // If this is a test of the sign bit and the multiply is sign-preserving with
1974   // a constant operand, use the multiply LHS operand instead.
1975   ICmpInst::Predicate Pred = Cmp.getPredicate();
1976   if (isSignTest(Pred, C) && Mul->hasNoSignedWrap()) {
1977     if (MulC->isNegative())
1978       Pred = ICmpInst::getSwappedPredicate(Pred);
1979     return new ICmpInst(Pred, Mul->getOperand(0),
1980                         Constant::getNullValue(Mul->getType()));
1981   }
1982 
1983   // If the multiply does not wrap, try to divide the compare constant by the
1984   // multiplication factor.
1985   if (Cmp.isEquality() && !MulC->isNullValue()) {
1986     // (mul nsw X, MulC) == C --> X == C /s MulC
1987     if (Mul->hasNoSignedWrap() && C.srem(*MulC).isNullValue()) {
1988       Constant *NewC = ConstantInt::get(Mul->getType(), C.sdiv(*MulC));
1989       return new ICmpInst(Pred, Mul->getOperand(0), NewC);
1990     }
1991     // (mul nuw X, MulC) == C --> X == C /u MulC
1992     if (Mul->hasNoUnsignedWrap() && C.urem(*MulC).isNullValue()) {
1993       Constant *NewC = ConstantInt::get(Mul->getType(), C.udiv(*MulC));
1994       return new ICmpInst(Pred, Mul->getOperand(0), NewC);
1995     }
1996   }
1997 
1998   return nullptr;
1999 }
2000 
2001 /// Fold icmp (shl 1, Y), C.
2002 static Instruction *foldICmpShlOne(ICmpInst &Cmp, Instruction *Shl,
2003                                    const APInt &C) {
2004   Value *Y;
2005   if (!match(Shl, m_Shl(m_One(), m_Value(Y))))
2006     return nullptr;
2007 
2008   Type *ShiftType = Shl->getType();
2009   unsigned TypeBits = C.getBitWidth();
2010   bool CIsPowerOf2 = C.isPowerOf2();
2011   ICmpInst::Predicate Pred = Cmp.getPredicate();
2012   if (Cmp.isUnsigned()) {
2013     // (1 << Y) pred C -> Y pred Log2(C)
2014     if (!CIsPowerOf2) {
2015       // (1 << Y) <  30 -> Y <= 4
2016       // (1 << Y) <= 30 -> Y <= 4
2017       // (1 << Y) >= 30 -> Y >  4
2018       // (1 << Y) >  30 -> Y >  4
2019       if (Pred == ICmpInst::ICMP_ULT)
2020         Pred = ICmpInst::ICMP_ULE;
2021       else if (Pred == ICmpInst::ICMP_UGE)
2022         Pred = ICmpInst::ICMP_UGT;
2023     }
2024 
2025     // (1 << Y) >= 2147483648 -> Y >= 31 -> Y == 31
2026     // (1 << Y) <  2147483648 -> Y <  31 -> Y != 31
2027     unsigned CLog2 = C.logBase2();
2028     if (CLog2 == TypeBits - 1) {
2029       if (Pred == ICmpInst::ICMP_UGE)
2030         Pred = ICmpInst::ICMP_EQ;
2031       else if (Pred == ICmpInst::ICMP_ULT)
2032         Pred = ICmpInst::ICMP_NE;
2033     }
2034     return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2));
2035   } else if (Cmp.isSigned()) {
2036     Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
2037     if (C.isAllOnesValue()) {
2038       // (1 << Y) <= -1 -> Y == 31
2039       if (Pred == ICmpInst::ICMP_SLE)
2040         return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
2041 
2042       // (1 << Y) >  -1 -> Y != 31
2043       if (Pred == ICmpInst::ICMP_SGT)
2044         return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
2045     } else if (!C) {
2046       // (1 << Y) <  0 -> Y == 31
2047       // (1 << Y) <= 0 -> Y == 31
2048       if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
2049         return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
2050 
2051       // (1 << Y) >= 0 -> Y != 31
2052       // (1 << Y) >  0 -> Y != 31
2053       if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
2054         return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
2055     }
2056   } else if (Cmp.isEquality() && CIsPowerOf2) {
2057     return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, C.logBase2()));
2058   }
2059 
2060   return nullptr;
2061 }
2062 
2063 /// Fold icmp (shl X, Y), C.
2064 Instruction *InstCombinerImpl::foldICmpShlConstant(ICmpInst &Cmp,
2065                                                    BinaryOperator *Shl,
2066                                                    const APInt &C) {
2067   const APInt *ShiftVal;
2068   if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal)))
2069     return foldICmpShlConstConst(Cmp, Shl->getOperand(1), C, *ShiftVal);
2070 
2071   const APInt *ShiftAmt;
2072   if (!match(Shl->getOperand(1), m_APInt(ShiftAmt)))
2073     return foldICmpShlOne(Cmp, Shl, C);
2074 
2075   // Check that the shift amount is in range. If not, don't perform undefined
2076   // shifts. When the shift is visited, it will be simplified.
2077   unsigned TypeBits = C.getBitWidth();
2078   if (ShiftAmt->uge(TypeBits))
2079     return nullptr;
2080 
2081   ICmpInst::Predicate Pred = Cmp.getPredicate();
2082   Value *X = Shl->getOperand(0);
2083   Type *ShType = Shl->getType();
2084 
2085   // NSW guarantees that we are only shifting out sign bits from the high bits,
2086   // so we can ASHR the compare constant without needing a mask and eliminate
2087   // the shift.
2088   if (Shl->hasNoSignedWrap()) {
2089     if (Pred == ICmpInst::ICMP_SGT) {
2090       // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt)
2091       APInt ShiftedC = C.ashr(*ShiftAmt);
2092       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2093     }
2094     if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2095         C.ashr(*ShiftAmt).shl(*ShiftAmt) == C) {
2096       APInt ShiftedC = C.ashr(*ShiftAmt);
2097       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2098     }
2099     if (Pred == ICmpInst::ICMP_SLT) {
2100       // SLE is the same as above, but SLE is canonicalized to SLT, so convert:
2101       // (X << S) <=s C is equiv to X <=s (C >> S) for all C
2102       // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX
2103       // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN
2104       assert(!C.isMinSignedValue() && "Unexpected icmp slt");
2105       APInt ShiftedC = (C - 1).ashr(*ShiftAmt) + 1;
2106       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2107     }
2108     // If this is a signed comparison to 0 and the shift is sign preserving,
2109     // use the shift LHS operand instead; isSignTest may change 'Pred', so only
2110     // do that if we're sure to not continue on in this function.
2111     if (isSignTest(Pred, C))
2112       return new ICmpInst(Pred, X, Constant::getNullValue(ShType));
2113   }
2114 
2115   // NUW guarantees that we are only shifting out zero bits from the high bits,
2116   // so we can LSHR the compare constant without needing a mask and eliminate
2117   // the shift.
2118   if (Shl->hasNoUnsignedWrap()) {
2119     if (Pred == ICmpInst::ICMP_UGT) {
2120       // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt)
2121       APInt ShiftedC = C.lshr(*ShiftAmt);
2122       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2123     }
2124     if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2125         C.lshr(*ShiftAmt).shl(*ShiftAmt) == C) {
2126       APInt ShiftedC = C.lshr(*ShiftAmt);
2127       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2128     }
2129     if (Pred == ICmpInst::ICMP_ULT) {
2130       // ULE is the same as above, but ULE is canonicalized to ULT, so convert:
2131       // (X << S) <=u C is equiv to X <=u (C >> S) for all C
2132       // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u
2133       // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0
2134       assert(C.ugt(0) && "ult 0 should have been eliminated");
2135       APInt ShiftedC = (C - 1).lshr(*ShiftAmt) + 1;
2136       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2137     }
2138   }
2139 
2140   if (Cmp.isEquality() && Shl->hasOneUse()) {
2141     // Strength-reduce the shift into an 'and'.
2142     Constant *Mask = ConstantInt::get(
2143         ShType,
2144         APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
2145     Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2146     Constant *LShrC = ConstantInt::get(ShType, C.lshr(*ShiftAmt));
2147     return new ICmpInst(Pred, And, LShrC);
2148   }
2149 
2150   // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
2151   bool TrueIfSigned = false;
2152   if (Shl->hasOneUse() && isSignBitCheck(Pred, C, TrueIfSigned)) {
2153     // (X << 31) <s 0  --> (X & 1) != 0
2154     Constant *Mask = ConstantInt::get(
2155         ShType,
2156         APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
2157     Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2158     return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
2159                         And, Constant::getNullValue(ShType));
2160   }
2161 
2162   // Simplify 'shl' inequality test into 'and' equality test.
2163   if (Cmp.isUnsigned() && Shl->hasOneUse()) {
2164     // (X l<< C2) u<=/u> C1 iff C1+1 is power of two -> X & (~C1 l>> C2) ==/!= 0
2165     if ((C + 1).isPowerOf2() &&
2166         (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT)) {
2167       Value *And = Builder.CreateAnd(X, (~C).lshr(ShiftAmt->getZExtValue()));
2168       return new ICmpInst(Pred == ICmpInst::ICMP_ULE ? ICmpInst::ICMP_EQ
2169                                                      : ICmpInst::ICMP_NE,
2170                           And, Constant::getNullValue(ShType));
2171     }
2172     // (X l<< C2) u</u>= C1 iff C1 is power of two -> X & (-C1 l>> C2) ==/!= 0
2173     if (C.isPowerOf2() &&
2174         (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) {
2175       Value *And =
2176           Builder.CreateAnd(X, (~(C - 1)).lshr(ShiftAmt->getZExtValue()));
2177       return new ICmpInst(Pred == ICmpInst::ICMP_ULT ? ICmpInst::ICMP_EQ
2178                                                      : ICmpInst::ICMP_NE,
2179                           And, Constant::getNullValue(ShType));
2180     }
2181   }
2182 
2183   // Transform (icmp pred iM (shl iM %v, N), C)
2184   // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N))
2185   // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N.
2186   // This enables us to get rid of the shift in favor of a trunc that may be
2187   // free on the target. It has the additional benefit of comparing to a
2188   // smaller constant that may be more target-friendly.
2189   unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1);
2190   if (Shl->hasOneUse() && Amt != 0 && C.countTrailingZeros() >= Amt &&
2191       DL.isLegalInteger(TypeBits - Amt)) {
2192     Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt);
2193     if (auto *ShVTy = dyn_cast<VectorType>(ShType))
2194       TruncTy = VectorType::get(TruncTy, ShVTy->getElementCount());
2195     Constant *NewC =
2196         ConstantInt::get(TruncTy, C.ashr(*ShiftAmt).trunc(TypeBits - Amt));
2197     return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC);
2198   }
2199 
2200   return nullptr;
2201 }
2202 
2203 /// Fold icmp ({al}shr X, Y), C.
2204 Instruction *InstCombinerImpl::foldICmpShrConstant(ICmpInst &Cmp,
2205                                                    BinaryOperator *Shr,
2206                                                    const APInt &C) {
2207   // An exact shr only shifts out zero bits, so:
2208   // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0
2209   Value *X = Shr->getOperand(0);
2210   CmpInst::Predicate Pred = Cmp.getPredicate();
2211   if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() &&
2212       C.isNullValue())
2213     return new ICmpInst(Pred, X, Cmp.getOperand(1));
2214 
2215   const APInt *ShiftVal;
2216   if (Cmp.isEquality() && match(Shr->getOperand(0), m_APInt(ShiftVal)))
2217     return foldICmpShrConstConst(Cmp, Shr->getOperand(1), C, *ShiftVal);
2218 
2219   const APInt *ShiftAmt;
2220   if (!match(Shr->getOperand(1), m_APInt(ShiftAmt)))
2221     return nullptr;
2222 
2223   // Check that the shift amount is in range. If not, don't perform undefined
2224   // shifts. When the shift is visited it will be simplified.
2225   unsigned TypeBits = C.getBitWidth();
2226   unsigned ShAmtVal = ShiftAmt->getLimitedValue(TypeBits);
2227   if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2228     return nullptr;
2229 
2230   bool IsAShr = Shr->getOpcode() == Instruction::AShr;
2231   bool IsExact = Shr->isExact();
2232   Type *ShrTy = Shr->getType();
2233   // TODO: If we could guarantee that InstSimplify would handle all of the
2234   // constant-value-based preconditions in the folds below, then we could assert
2235   // those conditions rather than checking them. This is difficult because of
2236   // undef/poison (PR34838).
2237   if (IsAShr) {
2238     if (Pred == CmpInst::ICMP_SLT || (Pred == CmpInst::ICMP_SGT && IsExact)) {
2239       // icmp slt (ashr X, ShAmtC), C --> icmp slt X, (C << ShAmtC)
2240       // icmp sgt (ashr exact X, ShAmtC), C --> icmp sgt X, (C << ShAmtC)
2241       APInt ShiftedC = C.shl(ShAmtVal);
2242       if (ShiftedC.ashr(ShAmtVal) == C)
2243         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2244     }
2245     if (Pred == CmpInst::ICMP_SGT) {
2246       // icmp sgt (ashr X, ShAmtC), C --> icmp sgt X, ((C + 1) << ShAmtC) - 1
2247       APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2248       if (!C.isMaxSignedValue() && !(C + 1).shl(ShAmtVal).isMinSignedValue() &&
2249           (ShiftedC + 1).ashr(ShAmtVal) == (C + 1))
2250         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2251     }
2252   } else {
2253     if (Pred == CmpInst::ICMP_ULT || (Pred == CmpInst::ICMP_UGT && IsExact)) {
2254       // icmp ult (lshr X, ShAmtC), C --> icmp ult X, (C << ShAmtC)
2255       // icmp ugt (lshr exact X, ShAmtC), C --> icmp ugt X, (C << ShAmtC)
2256       APInt ShiftedC = C.shl(ShAmtVal);
2257       if (ShiftedC.lshr(ShAmtVal) == C)
2258         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2259     }
2260     if (Pred == CmpInst::ICMP_UGT) {
2261       // icmp ugt (lshr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
2262       APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2263       if ((ShiftedC + 1).lshr(ShAmtVal) == (C + 1))
2264         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2265     }
2266   }
2267 
2268   if (!Cmp.isEquality())
2269     return nullptr;
2270 
2271   // Handle equality comparisons of shift-by-constant.
2272 
2273   // If the comparison constant changes with the shift, the comparison cannot
2274   // succeed (bits of the comparison constant cannot match the shifted value).
2275   // This should be known by InstSimplify and already be folded to true/false.
2276   assert(((IsAShr && C.shl(ShAmtVal).ashr(ShAmtVal) == C) ||
2277           (!IsAShr && C.shl(ShAmtVal).lshr(ShAmtVal) == C)) &&
2278          "Expected icmp+shr simplify did not occur.");
2279 
2280   // If the bits shifted out are known zero, compare the unshifted value:
2281   //  (X & 4) >> 1 == 2  --> (X & 4) == 4.
2282   if (Shr->isExact())
2283     return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, C << ShAmtVal));
2284 
2285   if (Shr->hasOneUse()) {
2286     // Canonicalize the shift into an 'and':
2287     // icmp eq/ne (shr X, ShAmt), C --> icmp eq/ne (and X, HiMask), (C << ShAmt)
2288     APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
2289     Constant *Mask = ConstantInt::get(ShrTy, Val);
2290     Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask");
2291     return new ICmpInst(Pred, And, ConstantInt::get(ShrTy, C << ShAmtVal));
2292   }
2293 
2294   return nullptr;
2295 }
2296 
2297 Instruction *InstCombinerImpl::foldICmpSRemConstant(ICmpInst &Cmp,
2298                                                     BinaryOperator *SRem,
2299                                                     const APInt &C) {
2300   // Match an 'is positive' or 'is negative' comparison of remainder by a
2301   // constant power-of-2 value:
2302   // (X % pow2C) sgt/slt 0
2303   const ICmpInst::Predicate Pred = Cmp.getPredicate();
2304   if (Pred != ICmpInst::ICMP_SGT && Pred != ICmpInst::ICMP_SLT)
2305     return nullptr;
2306 
2307   // TODO: The one-use check is standard because we do not typically want to
2308   //       create longer instruction sequences, but this might be a special-case
2309   //       because srem is not good for analysis or codegen.
2310   if (!SRem->hasOneUse())
2311     return nullptr;
2312 
2313   const APInt *DivisorC;
2314   if (!C.isNullValue() || !match(SRem->getOperand(1), m_Power2(DivisorC)))
2315     return nullptr;
2316 
2317   // Mask off the sign bit and the modulo bits (low-bits).
2318   Type *Ty = SRem->getType();
2319   APInt SignMask = APInt::getSignMask(Ty->getScalarSizeInBits());
2320   Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1));
2321   Value *And = Builder.CreateAnd(SRem->getOperand(0), MaskC);
2322 
2323   // For 'is positive?' check that the sign-bit is clear and at least 1 masked
2324   // bit is set. Example:
2325   // (i8 X % 32) s> 0 --> (X & 159) s> 0
2326   if (Pred == ICmpInst::ICMP_SGT)
2327     return new ICmpInst(ICmpInst::ICMP_SGT, And, ConstantInt::getNullValue(Ty));
2328 
2329   // For 'is negative?' check that the sign-bit is set and at least 1 masked
2330   // bit is set. Example:
2331   // (i16 X % 4) s< 0 --> (X & 32771) u> 32768
2332   return new ICmpInst(ICmpInst::ICMP_UGT, And, ConstantInt::get(Ty, SignMask));
2333 }
2334 
2335 /// Fold icmp (udiv X, Y), C.
2336 Instruction *InstCombinerImpl::foldICmpUDivConstant(ICmpInst &Cmp,
2337                                                     BinaryOperator *UDiv,
2338                                                     const APInt &C) {
2339   const APInt *C2;
2340   if (!match(UDiv->getOperand(0), m_APInt(C2)))
2341     return nullptr;
2342 
2343   assert(*C2 != 0 && "udiv 0, X should have been simplified already.");
2344 
2345   // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1))
2346   Value *Y = UDiv->getOperand(1);
2347   if (Cmp.getPredicate() == ICmpInst::ICMP_UGT) {
2348     assert(!C.isMaxValue() &&
2349            "icmp ugt X, UINT_MAX should have been simplified already.");
2350     return new ICmpInst(ICmpInst::ICMP_ULE, Y,
2351                         ConstantInt::get(Y->getType(), C2->udiv(C + 1)));
2352   }
2353 
2354   // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C)
2355   if (Cmp.getPredicate() == ICmpInst::ICMP_ULT) {
2356     assert(C != 0 && "icmp ult X, 0 should have been simplified already.");
2357     return new ICmpInst(ICmpInst::ICMP_UGT, Y,
2358                         ConstantInt::get(Y->getType(), C2->udiv(C)));
2359   }
2360 
2361   return nullptr;
2362 }
2363 
2364 /// Fold icmp ({su}div X, Y), C.
2365 Instruction *InstCombinerImpl::foldICmpDivConstant(ICmpInst &Cmp,
2366                                                    BinaryOperator *Div,
2367                                                    const APInt &C) {
2368   // Fold: icmp pred ([us]div X, C2), C -> range test
2369   // Fold this div into the comparison, producing a range check.
2370   // Determine, based on the divide type, what the range is being
2371   // checked.  If there is an overflow on the low or high side, remember
2372   // it, otherwise compute the range [low, hi) bounding the new value.
2373   // See: InsertRangeTest above for the kinds of replacements possible.
2374   const APInt *C2;
2375   if (!match(Div->getOperand(1), m_APInt(C2)))
2376     return nullptr;
2377 
2378   // FIXME: If the operand types don't match the type of the divide
2379   // then don't attempt this transform. The code below doesn't have the
2380   // logic to deal with a signed divide and an unsigned compare (and
2381   // vice versa). This is because (x /s C2) <s C  produces different
2382   // results than (x /s C2) <u C or (x /u C2) <s C or even
2383   // (x /u C2) <u C.  Simply casting the operands and result won't
2384   // work. :(  The if statement below tests that condition and bails
2385   // if it finds it.
2386   bool DivIsSigned = Div->getOpcode() == Instruction::SDiv;
2387   if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2388     return nullptr;
2389 
2390   // The ProdOV computation fails on divide by 0 and divide by -1. Cases with
2391   // INT_MIN will also fail if the divisor is 1. Although folds of all these
2392   // division-by-constant cases should be present, we can not assert that they
2393   // have happened before we reach this icmp instruction.
2394   if (C2->isNullValue() || C2->isOneValue() ||
2395       (DivIsSigned && C2->isAllOnesValue()))
2396     return nullptr;
2397 
2398   // Compute Prod = C * C2. We are essentially solving an equation of
2399   // form X / C2 = C. We solve for X by multiplying C2 and C.
2400   // By solving for X, we can turn this into a range check instead of computing
2401   // a divide.
2402   APInt Prod = C * *C2;
2403 
2404   // Determine if the product overflows by seeing if the product is not equal to
2405   // the divide. Make sure we do the same kind of divide as in the LHS
2406   // instruction that we're folding.
2407   bool ProdOV = (DivIsSigned ? Prod.sdiv(*C2) : Prod.udiv(*C2)) != C;
2408 
2409   ICmpInst::Predicate Pred = Cmp.getPredicate();
2410 
2411   // If the division is known to be exact, then there is no remainder from the
2412   // divide, so the covered range size is unit, otherwise it is the divisor.
2413   APInt RangeSize = Div->isExact() ? APInt(C2->getBitWidth(), 1) : *C2;
2414 
2415   // Figure out the interval that is being checked.  For example, a comparison
2416   // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
2417   // Compute this interval based on the constants involved and the signedness of
2418   // the compare/divide.  This computes a half-open interval, keeping track of
2419   // whether either value in the interval overflows.  After analysis each
2420   // overflow variable is set to 0 if it's corresponding bound variable is valid
2421   // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
2422   int LoOverflow = 0, HiOverflow = 0;
2423   APInt LoBound, HiBound;
2424 
2425   if (!DivIsSigned) {  // udiv
2426     // e.g. X/5 op 3  --> [15, 20)
2427     LoBound = Prod;
2428     HiOverflow = LoOverflow = ProdOV;
2429     if (!HiOverflow) {
2430       // If this is not an exact divide, then many values in the range collapse
2431       // to the same result value.
2432       HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false);
2433     }
2434   } else if (C2->isStrictlyPositive()) { // Divisor is > 0.
2435     if (C.isNullValue()) {       // (X / pos) op 0
2436       // Can't overflow.  e.g.  X/2 op 0 --> [-1, 2)
2437       LoBound = -(RangeSize - 1);
2438       HiBound = RangeSize;
2439     } else if (C.isStrictlyPositive()) {   // (X / pos) op pos
2440       LoBound = Prod;     // e.g.   X/5 op 3 --> [15, 20)
2441       HiOverflow = LoOverflow = ProdOV;
2442       if (!HiOverflow)
2443         HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true);
2444     } else {                       // (X / pos) op neg
2445       // e.g. X/5 op -3  --> [-15-4, -15+1) --> [-19, -14)
2446       HiBound = Prod + 1;
2447       LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2448       if (!LoOverflow) {
2449         APInt DivNeg = -RangeSize;
2450         LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
2451       }
2452     }
2453   } else if (C2->isNegative()) { // Divisor is < 0.
2454     if (Div->isExact())
2455       RangeSize.negate();
2456     if (C.isNullValue()) { // (X / neg) op 0
2457       // e.g. X/-5 op 0  --> [-4, 5)
2458       LoBound = RangeSize + 1;
2459       HiBound = -RangeSize;
2460       if (HiBound == *C2) {        // -INTMIN = INTMIN
2461         HiOverflow = 1;            // [INTMIN+1, overflow)
2462         HiBound = APInt();         // e.g. X/INTMIN = 0 --> X > INTMIN
2463       }
2464     } else if (C.isStrictlyPositive()) {   // (X / neg) op pos
2465       // e.g. X/-5 op 3  --> [-19, -14)
2466       HiBound = Prod + 1;
2467       HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2468       if (!LoOverflow)
2469         LoOverflow = addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0;
2470     } else {                       // (X / neg) op neg
2471       LoBound = Prod;       // e.g. X/-5 op -3  --> [15, 20)
2472       LoOverflow = HiOverflow = ProdOV;
2473       if (!HiOverflow)
2474         HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true);
2475     }
2476 
2477     // Dividing by a negative swaps the condition.  LT <-> GT
2478     Pred = ICmpInst::getSwappedPredicate(Pred);
2479   }
2480 
2481   Value *X = Div->getOperand(0);
2482   switch (Pred) {
2483     default: llvm_unreachable("Unhandled icmp opcode!");
2484     case ICmpInst::ICMP_EQ:
2485       if (LoOverflow && HiOverflow)
2486         return replaceInstUsesWith(Cmp, Builder.getFalse());
2487       if (HiOverflow)
2488         return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2489                             ICmpInst::ICMP_UGE, X,
2490                             ConstantInt::get(Div->getType(), LoBound));
2491       if (LoOverflow)
2492         return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2493                             ICmpInst::ICMP_ULT, X,
2494                             ConstantInt::get(Div->getType(), HiBound));
2495       return replaceInstUsesWith(
2496           Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, true));
2497     case ICmpInst::ICMP_NE:
2498       if (LoOverflow && HiOverflow)
2499         return replaceInstUsesWith(Cmp, Builder.getTrue());
2500       if (HiOverflow)
2501         return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2502                             ICmpInst::ICMP_ULT, X,
2503                             ConstantInt::get(Div->getType(), LoBound));
2504       if (LoOverflow)
2505         return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2506                             ICmpInst::ICMP_UGE, X,
2507                             ConstantInt::get(Div->getType(), HiBound));
2508       return replaceInstUsesWith(Cmp,
2509                                  insertRangeTest(X, LoBound, HiBound,
2510                                                  DivIsSigned, false));
2511     case ICmpInst::ICMP_ULT:
2512     case ICmpInst::ICMP_SLT:
2513       if (LoOverflow == +1)   // Low bound is greater than input range.
2514         return replaceInstUsesWith(Cmp, Builder.getTrue());
2515       if (LoOverflow == -1)   // Low bound is less than input range.
2516         return replaceInstUsesWith(Cmp, Builder.getFalse());
2517       return new ICmpInst(Pred, X, ConstantInt::get(Div->getType(), LoBound));
2518     case ICmpInst::ICMP_UGT:
2519     case ICmpInst::ICMP_SGT:
2520       if (HiOverflow == +1)       // High bound greater than input range.
2521         return replaceInstUsesWith(Cmp, Builder.getFalse());
2522       if (HiOverflow == -1)       // High bound less than input range.
2523         return replaceInstUsesWith(Cmp, Builder.getTrue());
2524       if (Pred == ICmpInst::ICMP_UGT)
2525         return new ICmpInst(ICmpInst::ICMP_UGE, X,
2526                             ConstantInt::get(Div->getType(), HiBound));
2527       return new ICmpInst(ICmpInst::ICMP_SGE, X,
2528                           ConstantInt::get(Div->getType(), HiBound));
2529   }
2530 
2531   return nullptr;
2532 }
2533 
2534 /// Fold icmp (sub X, Y), C.
2535 Instruction *InstCombinerImpl::foldICmpSubConstant(ICmpInst &Cmp,
2536                                                    BinaryOperator *Sub,
2537                                                    const APInt &C) {
2538   Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1);
2539   ICmpInst::Predicate Pred = Cmp.getPredicate();
2540   const APInt *C2;
2541   APInt SubResult;
2542 
2543   // icmp eq/ne (sub C, Y), C -> icmp eq/ne Y, 0
2544   if (match(X, m_APInt(C2)) && *C2 == C && Cmp.isEquality())
2545     return new ICmpInst(Cmp.getPredicate(), Y,
2546                         ConstantInt::get(Y->getType(), 0));
2547 
2548   // (icmp P (sub nuw|nsw C2, Y), C) -> (icmp swap(P) Y, C2-C)
2549   if (match(X, m_APInt(C2)) &&
2550       ((Cmp.isUnsigned() && Sub->hasNoUnsignedWrap()) ||
2551        (Cmp.isSigned() && Sub->hasNoSignedWrap())) &&
2552       !subWithOverflow(SubResult, *C2, C, Cmp.isSigned()))
2553     return new ICmpInst(Cmp.getSwappedPredicate(), Y,
2554                         ConstantInt::get(Y->getType(), SubResult));
2555 
2556   // The following transforms are only worth it if the only user of the subtract
2557   // is the icmp.
2558   if (!Sub->hasOneUse())
2559     return nullptr;
2560 
2561   if (Sub->hasNoSignedWrap()) {
2562     // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y)
2563     if (Pred == ICmpInst::ICMP_SGT && C.isAllOnesValue())
2564       return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
2565 
2566     // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y)
2567     if (Pred == ICmpInst::ICMP_SGT && C.isNullValue())
2568       return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
2569 
2570     // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y)
2571     if (Pred == ICmpInst::ICMP_SLT && C.isNullValue())
2572       return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
2573 
2574     // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y)
2575     if (Pred == ICmpInst::ICMP_SLT && C.isOneValue())
2576       return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
2577   }
2578 
2579   if (!match(X, m_APInt(C2)))
2580     return nullptr;
2581 
2582   // C2 - Y <u C -> (Y | (C - 1)) == C2
2583   //   iff (C2 & (C - 1)) == C - 1 and C is a power of 2
2584   if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() &&
2585       (*C2 & (C - 1)) == (C - 1))
2586     return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, C - 1), X);
2587 
2588   // C2 - Y >u C -> (Y | C) != C2
2589   //   iff C2 & C == C and C + 1 is a power of 2
2590   if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == C)
2591     return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, C), X);
2592 
2593   return nullptr;
2594 }
2595 
2596 /// Fold icmp (add X, Y), C.
2597 Instruction *InstCombinerImpl::foldICmpAddConstant(ICmpInst &Cmp,
2598                                                    BinaryOperator *Add,
2599                                                    const APInt &C) {
2600   Value *Y = Add->getOperand(1);
2601   const APInt *C2;
2602   if (Cmp.isEquality() || !match(Y, m_APInt(C2)))
2603     return nullptr;
2604 
2605   // Fold icmp pred (add X, C2), C.
2606   Value *X = Add->getOperand(0);
2607   Type *Ty = Add->getType();
2608   CmpInst::Predicate Pred = Cmp.getPredicate();
2609 
2610   // If the add does not wrap, we can always adjust the compare by subtracting
2611   // the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE
2612   // are canonicalized to SGT/SLT/UGT/ULT.
2613   if ((Add->hasNoSignedWrap() &&
2614        (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) ||
2615       (Add->hasNoUnsignedWrap() &&
2616        (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT))) {
2617     bool Overflow;
2618     APInt NewC =
2619         Cmp.isSigned() ? C.ssub_ov(*C2, Overflow) : C.usub_ov(*C2, Overflow);
2620     // If there is overflow, the result must be true or false.
2621     // TODO: Can we assert there is no overflow because InstSimplify always
2622     // handles those cases?
2623     if (!Overflow)
2624       // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2)
2625       return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC));
2626   }
2627 
2628   auto CR = ConstantRange::makeExactICmpRegion(Pred, C).subtract(*C2);
2629   const APInt &Upper = CR.getUpper();
2630   const APInt &Lower = CR.getLower();
2631   if (Cmp.isSigned()) {
2632     if (Lower.isSignMask())
2633       return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper));
2634     if (Upper.isSignMask())
2635       return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower));
2636   } else {
2637     if (Lower.isMinValue())
2638       return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper));
2639     if (Upper.isMinValue())
2640       return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower));
2641   }
2642 
2643   if (!Add->hasOneUse())
2644     return nullptr;
2645 
2646   // X+C <u C2 -> (X & -C2) == C
2647   //   iff C & (C2-1) == 0
2648   //       C2 is a power of 2
2649   if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && (*C2 & (C - 1)) == 0)
2650     return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -C),
2651                         ConstantExpr::getNeg(cast<Constant>(Y)));
2652 
2653   // X+C >u C2 -> (X & ~C2) != C
2654   //   iff C & C2 == 0
2655   //       C2+1 is a power of 2
2656   if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == 0)
2657     return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~C),
2658                         ConstantExpr::getNeg(cast<Constant>(Y)));
2659 
2660   return nullptr;
2661 }
2662 
2663 bool InstCombinerImpl::matchThreeWayIntCompare(SelectInst *SI, Value *&LHS,
2664                                                Value *&RHS, ConstantInt *&Less,
2665                                                ConstantInt *&Equal,
2666                                                ConstantInt *&Greater) {
2667   // TODO: Generalize this to work with other comparison idioms or ensure
2668   // they get canonicalized into this form.
2669 
2670   // select i1 (a == b),
2671   //        i32 Equal,
2672   //        i32 (select i1 (a < b), i32 Less, i32 Greater)
2673   // where Equal, Less and Greater are placeholders for any three constants.
2674   ICmpInst::Predicate PredA;
2675   if (!match(SI->getCondition(), m_ICmp(PredA, m_Value(LHS), m_Value(RHS))) ||
2676       !ICmpInst::isEquality(PredA))
2677     return false;
2678   Value *EqualVal = SI->getTrueValue();
2679   Value *UnequalVal = SI->getFalseValue();
2680   // We still can get non-canonical predicate here, so canonicalize.
2681   if (PredA == ICmpInst::ICMP_NE)
2682     std::swap(EqualVal, UnequalVal);
2683   if (!match(EqualVal, m_ConstantInt(Equal)))
2684     return false;
2685   ICmpInst::Predicate PredB;
2686   Value *LHS2, *RHS2;
2687   if (!match(UnequalVal, m_Select(m_ICmp(PredB, m_Value(LHS2), m_Value(RHS2)),
2688                                   m_ConstantInt(Less), m_ConstantInt(Greater))))
2689     return false;
2690   // We can get predicate mismatch here, so canonicalize if possible:
2691   // First, ensure that 'LHS' match.
2692   if (LHS2 != LHS) {
2693     // x sgt y <--> y slt x
2694     std::swap(LHS2, RHS2);
2695     PredB = ICmpInst::getSwappedPredicate(PredB);
2696   }
2697   if (LHS2 != LHS)
2698     return false;
2699   // We also need to canonicalize 'RHS'.
2700   if (PredB == ICmpInst::ICMP_SGT && isa<Constant>(RHS2)) {
2701     // x sgt C-1  <-->  x sge C  <-->  not(x slt C)
2702     auto FlippedStrictness =
2703         InstCombiner::getFlippedStrictnessPredicateAndConstant(
2704             PredB, cast<Constant>(RHS2));
2705     if (!FlippedStrictness)
2706       return false;
2707     assert(FlippedStrictness->first == ICmpInst::ICMP_SGE && "Sanity check");
2708     RHS2 = FlippedStrictness->second;
2709     // And kind-of perform the result swap.
2710     std::swap(Less, Greater);
2711     PredB = ICmpInst::ICMP_SLT;
2712   }
2713   return PredB == ICmpInst::ICMP_SLT && RHS == RHS2;
2714 }
2715 
2716 Instruction *InstCombinerImpl::foldICmpSelectConstant(ICmpInst &Cmp,
2717                                                       SelectInst *Select,
2718                                                       ConstantInt *C) {
2719 
2720   assert(C && "Cmp RHS should be a constant int!");
2721   // If we're testing a constant value against the result of a three way
2722   // comparison, the result can be expressed directly in terms of the
2723   // original values being compared.  Note: We could possibly be more
2724   // aggressive here and remove the hasOneUse test. The original select is
2725   // really likely to simplify or sink when we remove a test of the result.
2726   Value *OrigLHS, *OrigRHS;
2727   ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
2728   if (Cmp.hasOneUse() &&
2729       matchThreeWayIntCompare(Select, OrigLHS, OrigRHS, C1LessThan, C2Equal,
2730                               C3GreaterThan)) {
2731     assert(C1LessThan && C2Equal && C3GreaterThan);
2732 
2733     bool TrueWhenLessThan =
2734         ConstantExpr::getCompare(Cmp.getPredicate(), C1LessThan, C)
2735             ->isAllOnesValue();
2736     bool TrueWhenEqual =
2737         ConstantExpr::getCompare(Cmp.getPredicate(), C2Equal, C)
2738             ->isAllOnesValue();
2739     bool TrueWhenGreaterThan =
2740         ConstantExpr::getCompare(Cmp.getPredicate(), C3GreaterThan, C)
2741             ->isAllOnesValue();
2742 
2743     // This generates the new instruction that will replace the original Cmp
2744     // Instruction. Instead of enumerating the various combinations when
2745     // TrueWhenLessThan, TrueWhenEqual and TrueWhenGreaterThan are true versus
2746     // false, we rely on chaining of ORs and future passes of InstCombine to
2747     // simplify the OR further (i.e. a s< b || a == b becomes a s<= b).
2748 
2749     // When none of the three constants satisfy the predicate for the RHS (C),
2750     // the entire original Cmp can be simplified to a false.
2751     Value *Cond = Builder.getFalse();
2752     if (TrueWhenLessThan)
2753       Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT,
2754                                                        OrigLHS, OrigRHS));
2755     if (TrueWhenEqual)
2756       Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ,
2757                                                        OrigLHS, OrigRHS));
2758     if (TrueWhenGreaterThan)
2759       Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT,
2760                                                        OrigLHS, OrigRHS));
2761 
2762     return replaceInstUsesWith(Cmp, Cond);
2763   }
2764   return nullptr;
2765 }
2766 
2767 static Instruction *foldICmpBitCast(ICmpInst &Cmp,
2768                                     InstCombiner::BuilderTy &Builder) {
2769   auto *Bitcast = dyn_cast<BitCastInst>(Cmp.getOperand(0));
2770   if (!Bitcast)
2771     return nullptr;
2772 
2773   ICmpInst::Predicate Pred = Cmp.getPredicate();
2774   Value *Op1 = Cmp.getOperand(1);
2775   Value *BCSrcOp = Bitcast->getOperand(0);
2776 
2777   // Make sure the bitcast doesn't change the number of vector elements.
2778   if (Bitcast->getSrcTy()->getScalarSizeInBits() ==
2779           Bitcast->getDestTy()->getScalarSizeInBits()) {
2780     // Zero-equality and sign-bit checks are preserved through sitofp + bitcast.
2781     Value *X;
2782     if (match(BCSrcOp, m_SIToFP(m_Value(X)))) {
2783       // icmp  eq (bitcast (sitofp X)), 0 --> icmp  eq X, 0
2784       // icmp  ne (bitcast (sitofp X)), 0 --> icmp  ne X, 0
2785       // icmp slt (bitcast (sitofp X)), 0 --> icmp slt X, 0
2786       // icmp sgt (bitcast (sitofp X)), 0 --> icmp sgt X, 0
2787       if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_SLT ||
2788            Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT) &&
2789           match(Op1, m_Zero()))
2790         return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
2791 
2792       // icmp slt (bitcast (sitofp X)), 1 --> icmp slt X, 1
2793       if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_One()))
2794         return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), 1));
2795 
2796       // icmp sgt (bitcast (sitofp X)), -1 --> icmp sgt X, -1
2797       if (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))
2798         return new ICmpInst(Pred, X,
2799                             ConstantInt::getAllOnesValue(X->getType()));
2800     }
2801 
2802     // Zero-equality checks are preserved through unsigned floating-point casts:
2803     // icmp eq (bitcast (uitofp X)), 0 --> icmp eq X, 0
2804     // icmp ne (bitcast (uitofp X)), 0 --> icmp ne X, 0
2805     if (match(BCSrcOp, m_UIToFP(m_Value(X))))
2806       if (Cmp.isEquality() && match(Op1, m_Zero()))
2807         return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
2808 
2809     // If this is a sign-bit test of a bitcast of a casted FP value, eliminate
2810     // the FP extend/truncate because that cast does not change the sign-bit.
2811     // This is true for all standard IEEE-754 types and the X86 80-bit type.
2812     // The sign-bit is always the most significant bit in those types.
2813     const APInt *C;
2814     bool TrueIfSigned;
2815     if (match(Op1, m_APInt(C)) && Bitcast->hasOneUse() &&
2816         InstCombiner::isSignBitCheck(Pred, *C, TrueIfSigned)) {
2817       if (match(BCSrcOp, m_FPExt(m_Value(X))) ||
2818           match(BCSrcOp, m_FPTrunc(m_Value(X)))) {
2819         // (bitcast (fpext/fptrunc X)) to iX) < 0 --> (bitcast X to iY) < 0
2820         // (bitcast (fpext/fptrunc X)) to iX) > -1 --> (bitcast X to iY) > -1
2821         Type *XType = X->getType();
2822 
2823         // We can't currently handle Power style floating point operations here.
2824         if (!(XType->isPPC_FP128Ty() || BCSrcOp->getType()->isPPC_FP128Ty())) {
2825 
2826           Type *NewType = Builder.getIntNTy(XType->getScalarSizeInBits());
2827           if (auto *XVTy = dyn_cast<VectorType>(XType))
2828             NewType = VectorType::get(NewType, XVTy->getElementCount());
2829           Value *NewBitcast = Builder.CreateBitCast(X, NewType);
2830           if (TrueIfSigned)
2831             return new ICmpInst(ICmpInst::ICMP_SLT, NewBitcast,
2832                                 ConstantInt::getNullValue(NewType));
2833           else
2834             return new ICmpInst(ICmpInst::ICMP_SGT, NewBitcast,
2835                                 ConstantInt::getAllOnesValue(NewType));
2836         }
2837       }
2838     }
2839   }
2840 
2841   // Test to see if the operands of the icmp are casted versions of other
2842   // values. If the ptr->ptr cast can be stripped off both arguments, do so.
2843   if (Bitcast->getType()->isPointerTy() &&
2844       (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
2845     // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
2846     // so eliminate it as well.
2847     if (auto *BC2 = dyn_cast<BitCastInst>(Op1))
2848       Op1 = BC2->getOperand(0);
2849 
2850     Op1 = Builder.CreateBitCast(Op1, BCSrcOp->getType());
2851     return new ICmpInst(Pred, BCSrcOp, Op1);
2852   }
2853 
2854   // Folding: icmp <pred> iN X, C
2855   //  where X = bitcast <M x iK> (shufflevector <M x iK> %vec, undef, SC)) to iN
2856   //    and C is a splat of a K-bit pattern
2857   //    and SC is a constant vector = <C', C', C', ..., C'>
2858   // Into:
2859   //   %E = extractelement <M x iK> %vec, i32 C'
2860   //   icmp <pred> iK %E, trunc(C)
2861   const APInt *C;
2862   if (!match(Cmp.getOperand(1), m_APInt(C)) ||
2863       !Bitcast->getType()->isIntegerTy() ||
2864       !Bitcast->getSrcTy()->isIntOrIntVectorTy())
2865     return nullptr;
2866 
2867   Value *Vec;
2868   ArrayRef<int> Mask;
2869   if (match(BCSrcOp, m_Shuffle(m_Value(Vec), m_Undef(), m_Mask(Mask)))) {
2870     // Check whether every element of Mask is the same constant
2871     if (is_splat(Mask)) {
2872       auto *VecTy = cast<VectorType>(BCSrcOp->getType());
2873       auto *EltTy = cast<IntegerType>(VecTy->getElementType());
2874       if (C->isSplat(EltTy->getBitWidth())) {
2875         // Fold the icmp based on the value of C
2876         // If C is M copies of an iK sized bit pattern,
2877         // then:
2878         //   =>  %E = extractelement <N x iK> %vec, i32 Elem
2879         //       icmp <pred> iK %SplatVal, <pattern>
2880         Value *Elem = Builder.getInt32(Mask[0]);
2881         Value *Extract = Builder.CreateExtractElement(Vec, Elem);
2882         Value *NewC = ConstantInt::get(EltTy, C->trunc(EltTy->getBitWidth()));
2883         return new ICmpInst(Pred, Extract, NewC);
2884       }
2885     }
2886   }
2887   return nullptr;
2888 }
2889 
2890 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C
2891 /// where X is some kind of instruction.
2892 Instruction *InstCombinerImpl::foldICmpInstWithConstant(ICmpInst &Cmp) {
2893   const APInt *C;
2894   if (!match(Cmp.getOperand(1), m_APInt(C)))
2895     return nullptr;
2896 
2897   if (auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0))) {
2898     switch (BO->getOpcode()) {
2899     case Instruction::Xor:
2900       if (Instruction *I = foldICmpXorConstant(Cmp, BO, *C))
2901         return I;
2902       break;
2903     case Instruction::And:
2904       if (Instruction *I = foldICmpAndConstant(Cmp, BO, *C))
2905         return I;
2906       break;
2907     case Instruction::Or:
2908       if (Instruction *I = foldICmpOrConstant(Cmp, BO, *C))
2909         return I;
2910       break;
2911     case Instruction::Mul:
2912       if (Instruction *I = foldICmpMulConstant(Cmp, BO, *C))
2913         return I;
2914       break;
2915     case Instruction::Shl:
2916       if (Instruction *I = foldICmpShlConstant(Cmp, BO, *C))
2917         return I;
2918       break;
2919     case Instruction::LShr:
2920     case Instruction::AShr:
2921       if (Instruction *I = foldICmpShrConstant(Cmp, BO, *C))
2922         return I;
2923       break;
2924     case Instruction::SRem:
2925       if (Instruction *I = foldICmpSRemConstant(Cmp, BO, *C))
2926         return I;
2927       break;
2928     case Instruction::UDiv:
2929       if (Instruction *I = foldICmpUDivConstant(Cmp, BO, *C))
2930         return I;
2931       LLVM_FALLTHROUGH;
2932     case Instruction::SDiv:
2933       if (Instruction *I = foldICmpDivConstant(Cmp, BO, *C))
2934         return I;
2935       break;
2936     case Instruction::Sub:
2937       if (Instruction *I = foldICmpSubConstant(Cmp, BO, *C))
2938         return I;
2939       break;
2940     case Instruction::Add:
2941       if (Instruction *I = foldICmpAddConstant(Cmp, BO, *C))
2942         return I;
2943       break;
2944     default:
2945       break;
2946     }
2947     // TODO: These folds could be refactored to be part of the above calls.
2948     if (Instruction *I = foldICmpBinOpEqualityWithConstant(Cmp, BO, *C))
2949       return I;
2950   }
2951 
2952   // Match against CmpInst LHS being instructions other than binary operators.
2953 
2954   if (auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0))) {
2955     // For now, we only support constant integers while folding the
2956     // ICMP(SELECT)) pattern. We can extend this to support vector of integers
2957     // similar to the cases handled by binary ops above.
2958     if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1)))
2959       if (Instruction *I = foldICmpSelectConstant(Cmp, SI, ConstRHS))
2960         return I;
2961   }
2962 
2963   if (auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0))) {
2964     if (Instruction *I = foldICmpTruncConstant(Cmp, TI, *C))
2965       return I;
2966   }
2967 
2968   if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)))
2969     if (Instruction *I = foldICmpIntrinsicWithConstant(Cmp, II, *C))
2970       return I;
2971 
2972   return nullptr;
2973 }
2974 
2975 /// Fold an icmp equality instruction with binary operator LHS and constant RHS:
2976 /// icmp eq/ne BO, C.
2977 Instruction *InstCombinerImpl::foldICmpBinOpEqualityWithConstant(
2978     ICmpInst &Cmp, BinaryOperator *BO, const APInt &C) {
2979   // TODO: Some of these folds could work with arbitrary constants, but this
2980   // function is limited to scalar and vector splat constants.
2981   if (!Cmp.isEquality())
2982     return nullptr;
2983 
2984   ICmpInst::Predicate Pred = Cmp.getPredicate();
2985   bool isICMP_NE = Pred == ICmpInst::ICMP_NE;
2986   Constant *RHS = cast<Constant>(Cmp.getOperand(1));
2987   Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
2988 
2989   switch (BO->getOpcode()) {
2990   case Instruction::SRem:
2991     // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
2992     if (C.isNullValue() && BO->hasOneUse()) {
2993       const APInt *BOC;
2994       if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) {
2995         Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName());
2996         return new ICmpInst(Pred, NewRem,
2997                             Constant::getNullValue(BO->getType()));
2998       }
2999     }
3000     break;
3001   case Instruction::Add: {
3002     // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
3003     if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
3004       if (BO->hasOneUse())
3005         return new ICmpInst(Pred, BOp0, ConstantExpr::getSub(RHS, BOC));
3006     } else if (C.isNullValue()) {
3007       // Replace ((add A, B) != 0) with (A != -B) if A or B is
3008       // efficiently invertible, or if the add has just this one use.
3009       if (Value *NegVal = dyn_castNegVal(BOp1))
3010         return new ICmpInst(Pred, BOp0, NegVal);
3011       if (Value *NegVal = dyn_castNegVal(BOp0))
3012         return new ICmpInst(Pred, NegVal, BOp1);
3013       if (BO->hasOneUse()) {
3014         Value *Neg = Builder.CreateNeg(BOp1);
3015         Neg->takeName(BO);
3016         return new ICmpInst(Pred, BOp0, Neg);
3017       }
3018     }
3019     break;
3020   }
3021   case Instruction::Xor:
3022     if (BO->hasOneUse()) {
3023       if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
3024         // For the xor case, we can xor two constants together, eliminating
3025         // the explicit xor.
3026         return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC));
3027       } else if (C.isNullValue()) {
3028         // Replace ((xor A, B) != 0) with (A != B)
3029         return new ICmpInst(Pred, BOp0, BOp1);
3030       }
3031     }
3032     break;
3033   case Instruction::Sub:
3034     if (BO->hasOneUse()) {
3035       // Only check for constant LHS here, as constant RHS will be canonicalized
3036       // to add and use the fold above.
3037       if (Constant *BOC = dyn_cast<Constant>(BOp0)) {
3038         // Replace ((sub BOC, B) != C) with (B != BOC-C).
3039         return new ICmpInst(Pred, BOp1, ConstantExpr::getSub(BOC, RHS));
3040       } else if (C.isNullValue()) {
3041         // Replace ((sub A, B) != 0) with (A != B).
3042         return new ICmpInst(Pred, BOp0, BOp1);
3043       }
3044     }
3045     break;
3046   case Instruction::Or: {
3047     const APInt *BOC;
3048     if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) {
3049       // Comparing if all bits outside of a constant mask are set?
3050       // Replace (X | C) == -1 with (X & ~C) == ~C.
3051       // This removes the -1 constant.
3052       Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1));
3053       Value *And = Builder.CreateAnd(BOp0, NotBOC);
3054       return new ICmpInst(Pred, And, NotBOC);
3055     }
3056     break;
3057   }
3058   case Instruction::And: {
3059     const APInt *BOC;
3060     if (match(BOp1, m_APInt(BOC))) {
3061       // If we have ((X & C) == C), turn it into ((X & C) != 0).
3062       if (C == *BOC && C.isPowerOf2())
3063         return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE,
3064                             BO, Constant::getNullValue(RHS->getType()));
3065     }
3066     break;
3067   }
3068   case Instruction::UDiv:
3069     if (C.isNullValue()) {
3070       // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A)
3071       auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3072       return new ICmpInst(NewPred, BOp1, BOp0);
3073     }
3074     break;
3075   default:
3076     break;
3077   }
3078   return nullptr;
3079 }
3080 
3081 /// Fold an equality icmp with LLVM intrinsic and constant operand.
3082 Instruction *InstCombinerImpl::foldICmpEqIntrinsicWithConstant(
3083     ICmpInst &Cmp, IntrinsicInst *II, const APInt &C) {
3084   Type *Ty = II->getType();
3085   unsigned BitWidth = C.getBitWidth();
3086   switch (II->getIntrinsicID()) {
3087   case Intrinsic::abs:
3088     // abs(A) == 0  ->  A == 0
3089     // abs(A) == INT_MIN  ->  A == INT_MIN
3090     if (C.isNullValue() || C.isMinSignedValue())
3091       return new ICmpInst(Cmp.getPredicate(), II->getArgOperand(0),
3092                           ConstantInt::get(Ty, C));
3093     break;
3094 
3095   case Intrinsic::bswap:
3096     // bswap(A) == C  ->  A == bswap(C)
3097     return new ICmpInst(Cmp.getPredicate(), II->getArgOperand(0),
3098                         ConstantInt::get(Ty, C.byteSwap()));
3099 
3100   case Intrinsic::ctlz:
3101   case Intrinsic::cttz: {
3102     // ctz(A) == bitwidth(A)  ->  A == 0 and likewise for !=
3103     if (C == BitWidth)
3104       return new ICmpInst(Cmp.getPredicate(), II->getArgOperand(0),
3105                           ConstantInt::getNullValue(Ty));
3106 
3107     // ctz(A) == C -> A & Mask1 == Mask2, where Mask2 only has bit C set
3108     // and Mask1 has bits 0..C+1 set. Similar for ctl, but for high bits.
3109     // Limit to one use to ensure we don't increase instruction count.
3110     unsigned Num = C.getLimitedValue(BitWidth);
3111     if (Num != BitWidth && II->hasOneUse()) {
3112       bool IsTrailing = II->getIntrinsicID() == Intrinsic::cttz;
3113       APInt Mask1 = IsTrailing ? APInt::getLowBitsSet(BitWidth, Num + 1)
3114                                : APInt::getHighBitsSet(BitWidth, Num + 1);
3115       APInt Mask2 = IsTrailing
3116         ? APInt::getOneBitSet(BitWidth, Num)
3117         : APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
3118       return new ICmpInst(Cmp.getPredicate(),
3119           Builder.CreateAnd(II->getArgOperand(0), Mask1),
3120           ConstantInt::get(Ty, Mask2));
3121     }
3122     break;
3123   }
3124 
3125   case Intrinsic::ctpop: {
3126     // popcount(A) == 0  ->  A == 0 and likewise for !=
3127     // popcount(A) == bitwidth(A)  ->  A == -1 and likewise for !=
3128     bool IsZero = C.isNullValue();
3129     if (IsZero || C == BitWidth)
3130       return new ICmpInst(Cmp.getPredicate(), II->getArgOperand(0),
3131           IsZero ? Constant::getNullValue(Ty) : Constant::getAllOnesValue(Ty));
3132 
3133     break;
3134   }
3135 
3136   case Intrinsic::uadd_sat: {
3137     // uadd.sat(a, b) == 0  ->  (a | b) == 0
3138     if (C.isNullValue()) {
3139       Value *Or = Builder.CreateOr(II->getArgOperand(0), II->getArgOperand(1));
3140       return new ICmpInst(Cmp.getPredicate(), Or, Constant::getNullValue(Ty));
3141     }
3142     break;
3143   }
3144 
3145   case Intrinsic::usub_sat: {
3146     // usub.sat(a, b) == 0  ->  a <= b
3147     if (C.isNullValue()) {
3148       ICmpInst::Predicate NewPred = Cmp.getPredicate() == ICmpInst::ICMP_EQ
3149           ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3150       return new ICmpInst(NewPred, II->getArgOperand(0), II->getArgOperand(1));
3151     }
3152     break;
3153   }
3154   default:
3155     break;
3156   }
3157 
3158   return nullptr;
3159 }
3160 
3161 /// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
3162 Instruction *InstCombinerImpl::foldICmpIntrinsicWithConstant(ICmpInst &Cmp,
3163                                                              IntrinsicInst *II,
3164                                                              const APInt &C) {
3165   if (Cmp.isEquality())
3166     return foldICmpEqIntrinsicWithConstant(Cmp, II, C);
3167 
3168   Type *Ty = II->getType();
3169   unsigned BitWidth = C.getBitWidth();
3170   ICmpInst::Predicate Pred = Cmp.getPredicate();
3171   switch (II->getIntrinsicID()) {
3172   case Intrinsic::ctpop: {
3173     // (ctpop X > BitWidth - 1) --> X == -1
3174     Value *X = II->getArgOperand(0);
3175     if (C == BitWidth - 1 && Pred == ICmpInst::ICMP_UGT)
3176       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ, X,
3177                              ConstantInt::getAllOnesValue(Ty));
3178     // (ctpop X < BitWidth) --> X != -1
3179     if (C == BitWidth && Pred == ICmpInst::ICMP_ULT)
3180       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE, X,
3181                              ConstantInt::getAllOnesValue(Ty));
3182     break;
3183   }
3184   case Intrinsic::ctlz: {
3185     // ctlz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX < 0b00010000
3186     if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
3187       unsigned Num = C.getLimitedValue();
3188       APInt Limit = APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
3189       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_ULT,
3190                              II->getArgOperand(0), ConstantInt::get(Ty, Limit));
3191     }
3192 
3193     // ctlz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX > 0b00011111
3194     if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) {
3195       unsigned Num = C.getLimitedValue();
3196       APInt Limit = APInt::getLowBitsSet(BitWidth, BitWidth - Num);
3197       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_UGT,
3198                              II->getArgOperand(0), ConstantInt::get(Ty, Limit));
3199     }
3200     break;
3201   }
3202   case Intrinsic::cttz: {
3203     // Limit to one use to ensure we don't increase instruction count.
3204     if (!II->hasOneUse())
3205       return nullptr;
3206 
3207     // cttz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX & 0b00001111 == 0
3208     if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
3209       APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue() + 1);
3210       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ,
3211                              Builder.CreateAnd(II->getArgOperand(0), Mask),
3212                              ConstantInt::getNullValue(Ty));
3213     }
3214 
3215     // cttz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX & 0b00000111 != 0
3216     if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) {
3217       APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue());
3218       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE,
3219                              Builder.CreateAnd(II->getArgOperand(0), Mask),
3220                              ConstantInt::getNullValue(Ty));
3221     }
3222     break;
3223   }
3224   default:
3225     break;
3226   }
3227 
3228   return nullptr;
3229 }
3230 
3231 /// Handle icmp with constant (but not simple integer constant) RHS.
3232 Instruction *InstCombinerImpl::foldICmpInstWithConstantNotInt(ICmpInst &I) {
3233   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3234   Constant *RHSC = dyn_cast<Constant>(Op1);
3235   Instruction *LHSI = dyn_cast<Instruction>(Op0);
3236   if (!RHSC || !LHSI)
3237     return nullptr;
3238 
3239   switch (LHSI->getOpcode()) {
3240   case Instruction::GetElementPtr:
3241     // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
3242     if (RHSC->isNullValue() &&
3243         cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices())
3244       return new ICmpInst(
3245           I.getPredicate(), LHSI->getOperand(0),
3246           Constant::getNullValue(LHSI->getOperand(0)->getType()));
3247     break;
3248   case Instruction::PHI:
3249     // Only fold icmp into the PHI if the phi and icmp are in the same
3250     // block.  If in the same block, we're encouraging jump threading.  If
3251     // not, we are just pessimizing the code by making an i1 phi.
3252     if (LHSI->getParent() == I.getParent())
3253       if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
3254         return NV;
3255     break;
3256   case Instruction::Select: {
3257     // If either operand of the select is a constant, we can fold the
3258     // comparison into the select arms, which will cause one to be
3259     // constant folded and the select turned into a bitwise or.
3260     Value *Op1 = nullptr, *Op2 = nullptr;
3261     ConstantInt *CI = nullptr;
3262     if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
3263       Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
3264       CI = dyn_cast<ConstantInt>(Op1);
3265     }
3266     if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
3267       Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
3268       CI = dyn_cast<ConstantInt>(Op2);
3269     }
3270 
3271     // We only want to perform this transformation if it will not lead to
3272     // additional code. This is true if either both sides of the select
3273     // fold to a constant (in which case the icmp is replaced with a select
3274     // which will usually simplify) or this is the only user of the
3275     // select (in which case we are trading a select+icmp for a simpler
3276     // select+icmp) or all uses of the select can be replaced based on
3277     // dominance information ("Global cases").
3278     bool Transform = false;
3279     if (Op1 && Op2)
3280       Transform = true;
3281     else if (Op1 || Op2) {
3282       // Local case
3283       if (LHSI->hasOneUse())
3284         Transform = true;
3285       // Global cases
3286       else if (CI && !CI->isZero())
3287         // When Op1 is constant try replacing select with second operand.
3288         // Otherwise Op2 is constant and try replacing select with first
3289         // operand.
3290         Transform =
3291             replacedSelectWithOperand(cast<SelectInst>(LHSI), &I, Op1 ? 2 : 1);
3292     }
3293     if (Transform) {
3294       if (!Op1)
3295         Op1 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC,
3296                                  I.getName());
3297       if (!Op2)
3298         Op2 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC,
3299                                  I.getName());
3300       return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
3301     }
3302     break;
3303   }
3304   case Instruction::IntToPtr:
3305     // icmp pred inttoptr(X), null -> icmp pred X, 0
3306     if (RHSC->isNullValue() &&
3307         DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
3308       return new ICmpInst(
3309           I.getPredicate(), LHSI->getOperand(0),
3310           Constant::getNullValue(LHSI->getOperand(0)->getType()));
3311     break;
3312 
3313   case Instruction::Load:
3314     // Try to optimize things like "A[i] > 4" to index computations.
3315     if (GetElementPtrInst *GEP =
3316             dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
3317       if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
3318         if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
3319             !cast<LoadInst>(LHSI)->isVolatile())
3320           if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
3321             return Res;
3322     }
3323     break;
3324   }
3325 
3326   return nullptr;
3327 }
3328 
3329 /// Some comparisons can be simplified.
3330 /// In this case, we are looking for comparisons that look like
3331 /// a check for a lossy truncation.
3332 /// Folds:
3333 ///   icmp SrcPred (x & Mask), x    to    icmp DstPred x, Mask
3334 /// Where Mask is some pattern that produces all-ones in low bits:
3335 ///    (-1 >> y)
3336 ///    ((-1 << y) >> y)     <- non-canonical, has extra uses
3337 ///   ~(-1 << y)
3338 ///    ((1 << y) + (-1))    <- non-canonical, has extra uses
3339 /// The Mask can be a constant, too.
3340 /// For some predicates, the operands are commutative.
3341 /// For others, x can only be on a specific side.
3342 static Value *foldICmpWithLowBitMaskedVal(ICmpInst &I,
3343                                           InstCombiner::BuilderTy &Builder) {
3344   ICmpInst::Predicate SrcPred;
3345   Value *X, *M, *Y;
3346   auto m_VariableMask = m_CombineOr(
3347       m_CombineOr(m_Not(m_Shl(m_AllOnes(), m_Value())),
3348                   m_Add(m_Shl(m_One(), m_Value()), m_AllOnes())),
3349       m_CombineOr(m_LShr(m_AllOnes(), m_Value()),
3350                   m_LShr(m_Shl(m_AllOnes(), m_Value(Y)), m_Deferred(Y))));
3351   auto m_Mask = m_CombineOr(m_VariableMask, m_LowBitMask());
3352   if (!match(&I, m_c_ICmp(SrcPred,
3353                           m_c_And(m_CombineAnd(m_Mask, m_Value(M)), m_Value(X)),
3354                           m_Deferred(X))))
3355     return nullptr;
3356 
3357   ICmpInst::Predicate DstPred;
3358   switch (SrcPred) {
3359   case ICmpInst::Predicate::ICMP_EQ:
3360     //  x & (-1 >> y) == x    ->    x u<= (-1 >> y)
3361     DstPred = ICmpInst::Predicate::ICMP_ULE;
3362     break;
3363   case ICmpInst::Predicate::ICMP_NE:
3364     //  x & (-1 >> y) != x    ->    x u> (-1 >> y)
3365     DstPred = ICmpInst::Predicate::ICMP_UGT;
3366     break;
3367   case ICmpInst::Predicate::ICMP_ULT:
3368     //  x & (-1 >> y) u< x    ->    x u> (-1 >> y)
3369     //  x u> x & (-1 >> y)    ->    x u> (-1 >> y)
3370     DstPred = ICmpInst::Predicate::ICMP_UGT;
3371     break;
3372   case ICmpInst::Predicate::ICMP_UGE:
3373     //  x & (-1 >> y) u>= x    ->    x u<= (-1 >> y)
3374     //  x u<= x & (-1 >> y)    ->    x u<= (-1 >> y)
3375     DstPred = ICmpInst::Predicate::ICMP_ULE;
3376     break;
3377   case ICmpInst::Predicate::ICMP_SLT:
3378     //  x & (-1 >> y) s< x    ->    x s> (-1 >> y)
3379     //  x s> x & (-1 >> y)    ->    x s> (-1 >> y)
3380     if (!match(M, m_Constant())) // Can not do this fold with non-constant.
3381       return nullptr;
3382     if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
3383       return nullptr;
3384     DstPred = ICmpInst::Predicate::ICMP_SGT;
3385     break;
3386   case ICmpInst::Predicate::ICMP_SGE:
3387     //  x & (-1 >> y) s>= x    ->    x s<= (-1 >> y)
3388     //  x s<= x & (-1 >> y)    ->    x s<= (-1 >> y)
3389     if (!match(M, m_Constant())) // Can not do this fold with non-constant.
3390       return nullptr;
3391     if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
3392       return nullptr;
3393     DstPred = ICmpInst::Predicate::ICMP_SLE;
3394     break;
3395   case ICmpInst::Predicate::ICMP_SGT:
3396   case ICmpInst::Predicate::ICMP_SLE:
3397     return nullptr;
3398   case ICmpInst::Predicate::ICMP_UGT:
3399   case ICmpInst::Predicate::ICMP_ULE:
3400     llvm_unreachable("Instsimplify took care of commut. variant");
3401     break;
3402   default:
3403     llvm_unreachable("All possible folds are handled.");
3404   }
3405 
3406   // The mask value may be a vector constant that has undefined elements. But it
3407   // may not be safe to propagate those undefs into the new compare, so replace
3408   // those elements by copying an existing, defined, and safe scalar constant.
3409   Type *OpTy = M->getType();
3410   auto *VecC = dyn_cast<Constant>(M);
3411   auto *OpVTy = dyn_cast<FixedVectorType>(OpTy);
3412   if (OpVTy && VecC && VecC->containsUndefElement()) {
3413     Constant *SafeReplacementConstant = nullptr;
3414     for (unsigned i = 0, e = OpVTy->getNumElements(); i != e; ++i) {
3415       if (!isa<UndefValue>(VecC->getAggregateElement(i))) {
3416         SafeReplacementConstant = VecC->getAggregateElement(i);
3417         break;
3418       }
3419     }
3420     assert(SafeReplacementConstant && "Failed to find undef replacement");
3421     M = Constant::replaceUndefsWith(VecC, SafeReplacementConstant);
3422   }
3423 
3424   return Builder.CreateICmp(DstPred, X, M);
3425 }
3426 
3427 /// Some comparisons can be simplified.
3428 /// In this case, we are looking for comparisons that look like
3429 /// a check for a lossy signed truncation.
3430 /// Folds:   (MaskedBits is a constant.)
3431 ///   ((%x << MaskedBits) a>> MaskedBits) SrcPred %x
3432 /// Into:
3433 ///   (add %x, (1 << (KeptBits-1))) DstPred (1 << KeptBits)
3434 /// Where  KeptBits = bitwidth(%x) - MaskedBits
3435 static Value *
3436 foldICmpWithTruncSignExtendedVal(ICmpInst &I,
3437                                  InstCombiner::BuilderTy &Builder) {
3438   ICmpInst::Predicate SrcPred;
3439   Value *X;
3440   const APInt *C0, *C1; // FIXME: non-splats, potentially with undef.
3441   // We are ok with 'shl' having multiple uses, but 'ashr' must be one-use.
3442   if (!match(&I, m_c_ICmp(SrcPred,
3443                           m_OneUse(m_AShr(m_Shl(m_Value(X), m_APInt(C0)),
3444                                           m_APInt(C1))),
3445                           m_Deferred(X))))
3446     return nullptr;
3447 
3448   // Potential handling of non-splats: for each element:
3449   //  * if both are undef, replace with constant 0.
3450   //    Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0.
3451   //  * if both are not undef, and are different, bailout.
3452   //  * else, only one is undef, then pick the non-undef one.
3453 
3454   // The shift amount must be equal.
3455   if (*C0 != *C1)
3456     return nullptr;
3457   const APInt &MaskedBits = *C0;
3458   assert(MaskedBits != 0 && "shift by zero should be folded away already.");
3459 
3460   ICmpInst::Predicate DstPred;
3461   switch (SrcPred) {
3462   case ICmpInst::Predicate::ICMP_EQ:
3463     // ((%x << MaskedBits) a>> MaskedBits) == %x
3464     //   =>
3465     // (add %x, (1 << (KeptBits-1))) u< (1 << KeptBits)
3466     DstPred = ICmpInst::Predicate::ICMP_ULT;
3467     break;
3468   case ICmpInst::Predicate::ICMP_NE:
3469     // ((%x << MaskedBits) a>> MaskedBits) != %x
3470     //   =>
3471     // (add %x, (1 << (KeptBits-1))) u>= (1 << KeptBits)
3472     DstPred = ICmpInst::Predicate::ICMP_UGE;
3473     break;
3474   // FIXME: are more folds possible?
3475   default:
3476     return nullptr;
3477   }
3478 
3479   auto *XType = X->getType();
3480   const unsigned XBitWidth = XType->getScalarSizeInBits();
3481   const APInt BitWidth = APInt(XBitWidth, XBitWidth);
3482   assert(BitWidth.ugt(MaskedBits) && "shifts should leave some bits untouched");
3483 
3484   // KeptBits = bitwidth(%x) - MaskedBits
3485   const APInt KeptBits = BitWidth - MaskedBits;
3486   assert(KeptBits.ugt(0) && KeptBits.ult(BitWidth) && "unreachable");
3487   // ICmpCst = (1 << KeptBits)
3488   const APInt ICmpCst = APInt(XBitWidth, 1).shl(KeptBits);
3489   assert(ICmpCst.isPowerOf2());
3490   // AddCst = (1 << (KeptBits-1))
3491   const APInt AddCst = ICmpCst.lshr(1);
3492   assert(AddCst.ult(ICmpCst) && AddCst.isPowerOf2());
3493 
3494   // T0 = add %x, AddCst
3495   Value *T0 = Builder.CreateAdd(X, ConstantInt::get(XType, AddCst));
3496   // T1 = T0 DstPred ICmpCst
3497   Value *T1 = Builder.CreateICmp(DstPred, T0, ConstantInt::get(XType, ICmpCst));
3498 
3499   return T1;
3500 }
3501 
3502 // Given pattern:
3503 //   icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
3504 // we should move shifts to the same hand of 'and', i.e. rewrite as
3505 //   icmp eq/ne (and (x shift (Q+K)), y), 0  iff (Q+K) u< bitwidth(x)
3506 // We are only interested in opposite logical shifts here.
3507 // One of the shifts can be truncated.
3508 // If we can, we want to end up creating 'lshr' shift.
3509 static Value *
3510 foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ,
3511                                            InstCombiner::BuilderTy &Builder) {
3512   if (!I.isEquality() || !match(I.getOperand(1), m_Zero()) ||
3513       !I.getOperand(0)->hasOneUse())
3514     return nullptr;
3515 
3516   auto m_AnyLogicalShift = m_LogicalShift(m_Value(), m_Value());
3517 
3518   // Look for an 'and' of two logical shifts, one of which may be truncated.
3519   // We use m_TruncOrSelf() on the RHS to correctly handle commutative case.
3520   Instruction *XShift, *MaybeTruncation, *YShift;
3521   if (!match(
3522           I.getOperand(0),
3523           m_c_And(m_CombineAnd(m_AnyLogicalShift, m_Instruction(XShift)),
3524                   m_CombineAnd(m_TruncOrSelf(m_CombineAnd(
3525                                    m_AnyLogicalShift, m_Instruction(YShift))),
3526                                m_Instruction(MaybeTruncation)))))
3527     return nullptr;
3528 
3529   // We potentially looked past 'trunc', but only when matching YShift,
3530   // therefore YShift must have the widest type.
3531   Instruction *WidestShift = YShift;
3532   // Therefore XShift must have the shallowest type.
3533   // Or they both have identical types if there was no truncation.
3534   Instruction *NarrowestShift = XShift;
3535 
3536   Type *WidestTy = WidestShift->getType();
3537   Type *NarrowestTy = NarrowestShift->getType();
3538   assert(NarrowestTy == I.getOperand(0)->getType() &&
3539          "We did not look past any shifts while matching XShift though.");
3540   bool HadTrunc = WidestTy != I.getOperand(0)->getType();
3541 
3542   // If YShift is a 'lshr', swap the shifts around.
3543   if (match(YShift, m_LShr(m_Value(), m_Value())))
3544     std::swap(XShift, YShift);
3545 
3546   // The shifts must be in opposite directions.
3547   auto XShiftOpcode = XShift->getOpcode();
3548   if (XShiftOpcode == YShift->getOpcode())
3549     return nullptr; // Do not care about same-direction shifts here.
3550 
3551   Value *X, *XShAmt, *Y, *YShAmt;
3552   match(XShift, m_BinOp(m_Value(X), m_ZExtOrSelf(m_Value(XShAmt))));
3553   match(YShift, m_BinOp(m_Value(Y), m_ZExtOrSelf(m_Value(YShAmt))));
3554 
3555   // If one of the values being shifted is a constant, then we will end with
3556   // and+icmp, and [zext+]shift instrs will be constant-folded. If they are not,
3557   // however, we will need to ensure that we won't increase instruction count.
3558   if (!isa<Constant>(X) && !isa<Constant>(Y)) {
3559     // At least one of the hands of the 'and' should be one-use shift.
3560     if (!match(I.getOperand(0),
3561                m_c_And(m_OneUse(m_AnyLogicalShift), m_Value())))
3562       return nullptr;
3563     if (HadTrunc) {
3564       // Due to the 'trunc', we will need to widen X. For that either the old
3565       // 'trunc' or the shift amt in the non-truncated shift should be one-use.
3566       if (!MaybeTruncation->hasOneUse() &&
3567           !NarrowestShift->getOperand(1)->hasOneUse())
3568         return nullptr;
3569     }
3570   }
3571 
3572   // We have two shift amounts from two different shifts. The types of those
3573   // shift amounts may not match. If that's the case let's bailout now.
3574   if (XShAmt->getType() != YShAmt->getType())
3575     return nullptr;
3576 
3577   // As input, we have the following pattern:
3578   //   icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
3579   // We want to rewrite that as:
3580   //   icmp eq/ne (and (x shift (Q+K)), y), 0  iff (Q+K) u< bitwidth(x)
3581   // While we know that originally (Q+K) would not overflow
3582   // (because  2 * (N-1) u<= iN -1), we have looked past extensions of
3583   // shift amounts. so it may now overflow in smaller bitwidth.
3584   // To ensure that does not happen, we need to ensure that the total maximal
3585   // shift amount is still representable in that smaller bit width.
3586   unsigned MaximalPossibleTotalShiftAmount =
3587       (WidestTy->getScalarSizeInBits() - 1) +
3588       (NarrowestTy->getScalarSizeInBits() - 1);
3589   APInt MaximalRepresentableShiftAmount =
3590       APInt::getAllOnesValue(XShAmt->getType()->getScalarSizeInBits());
3591   if (MaximalRepresentableShiftAmount.ult(MaximalPossibleTotalShiftAmount))
3592     return nullptr;
3593 
3594   // Can we fold (XShAmt+YShAmt) ?
3595   auto *NewShAmt = dyn_cast_or_null<Constant>(
3596       SimplifyAddInst(XShAmt, YShAmt, /*isNSW=*/false,
3597                       /*isNUW=*/false, SQ.getWithInstruction(&I)));
3598   if (!NewShAmt)
3599     return nullptr;
3600   NewShAmt = ConstantExpr::getZExtOrBitCast(NewShAmt, WidestTy);
3601   unsigned WidestBitWidth = WidestTy->getScalarSizeInBits();
3602 
3603   // Is the new shift amount smaller than the bit width?
3604   // FIXME: could also rely on ConstantRange.
3605   if (!match(NewShAmt,
3606              m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_ULT,
3607                                 APInt(WidestBitWidth, WidestBitWidth))))
3608     return nullptr;
3609 
3610   // An extra legality check is needed if we had trunc-of-lshr.
3611   if (HadTrunc && match(WidestShift, m_LShr(m_Value(), m_Value()))) {
3612     auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
3613                     WidestShift]() {
3614       // It isn't obvious whether it's worth it to analyze non-constants here.
3615       // Also, let's basically give up on non-splat cases, pessimizing vectors.
3616       // If *any* of these preconditions matches we can perform the fold.
3617       Constant *NewShAmtSplat = NewShAmt->getType()->isVectorTy()
3618                                     ? NewShAmt->getSplatValue()
3619                                     : NewShAmt;
3620       // If it's edge-case shift (by 0 or by WidestBitWidth-1) we can fold.
3621       if (NewShAmtSplat &&
3622           (NewShAmtSplat->isNullValue() ||
3623            NewShAmtSplat->getUniqueInteger() == WidestBitWidth - 1))
3624         return true;
3625       // We consider *min* leading zeros so a single outlier
3626       // blocks the transform as opposed to allowing it.
3627       if (auto *C = dyn_cast<Constant>(NarrowestShift->getOperand(0))) {
3628         KnownBits Known = computeKnownBits(C, SQ.DL);
3629         unsigned MinLeadZero = Known.countMinLeadingZeros();
3630         // If the value being shifted has at most lowest bit set we can fold.
3631         unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
3632         if (MaxActiveBits <= 1)
3633           return true;
3634         // Precondition:  NewShAmt u<= countLeadingZeros(C)
3635         if (NewShAmtSplat && NewShAmtSplat->getUniqueInteger().ule(MinLeadZero))
3636           return true;
3637       }
3638       if (auto *C = dyn_cast<Constant>(WidestShift->getOperand(0))) {
3639         KnownBits Known = computeKnownBits(C, SQ.DL);
3640         unsigned MinLeadZero = Known.countMinLeadingZeros();
3641         // If the value being shifted has at most lowest bit set we can fold.
3642         unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
3643         if (MaxActiveBits <= 1)
3644           return true;
3645         // Precondition:  ((WidestBitWidth-1)-NewShAmt) u<= countLeadingZeros(C)
3646         if (NewShAmtSplat) {
3647           APInt AdjNewShAmt =
3648               (WidestBitWidth - 1) - NewShAmtSplat->getUniqueInteger();
3649           if (AdjNewShAmt.ule(MinLeadZero))
3650             return true;
3651         }
3652       }
3653       return false; // Can't tell if it's ok.
3654     };
3655     if (!CanFold())
3656       return nullptr;
3657   }
3658 
3659   // All good, we can do this fold.
3660   X = Builder.CreateZExt(X, WidestTy);
3661   Y = Builder.CreateZExt(Y, WidestTy);
3662   // The shift is the same that was for X.
3663   Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
3664                   ? Builder.CreateLShr(X, NewShAmt)
3665                   : Builder.CreateShl(X, NewShAmt);
3666   Value *T1 = Builder.CreateAnd(T0, Y);
3667   return Builder.CreateICmp(I.getPredicate(), T1,
3668                             Constant::getNullValue(WidestTy));
3669 }
3670 
3671 /// Fold
3672 ///   (-1 u/ x) u< y
3673 ///   ((x * y) u/ x) != y
3674 /// to
3675 ///   @llvm.umul.with.overflow(x, y) plus extraction of overflow bit
3676 /// Note that the comparison is commutative, while inverted (u>=, ==) predicate
3677 /// will mean that we are looking for the opposite answer.
3678 Value *InstCombinerImpl::foldUnsignedMultiplicationOverflowCheck(ICmpInst &I) {
3679   ICmpInst::Predicate Pred;
3680   Value *X, *Y;
3681   Instruction *Mul;
3682   bool NeedNegation;
3683   // Look for: (-1 u/ x) u</u>= y
3684   if (!I.isEquality() &&
3685       match(&I, m_c_ICmp(Pred, m_OneUse(m_UDiv(m_AllOnes(), m_Value(X))),
3686                          m_Value(Y)))) {
3687     Mul = nullptr;
3688 
3689     // Are we checking that overflow does not happen, or does happen?
3690     switch (Pred) {
3691     case ICmpInst::Predicate::ICMP_ULT:
3692       NeedNegation = false;
3693       break; // OK
3694     case ICmpInst::Predicate::ICMP_UGE:
3695       NeedNegation = true;
3696       break; // OK
3697     default:
3698       return nullptr; // Wrong predicate.
3699     }
3700   } else // Look for: ((x * y) u/ x) !=/== y
3701       if (I.isEquality() &&
3702           match(&I, m_c_ICmp(Pred, m_Value(Y),
3703                              m_OneUse(m_UDiv(m_CombineAnd(m_c_Mul(m_Deferred(Y),
3704                                                                   m_Value(X)),
3705                                                           m_Instruction(Mul)),
3706                                              m_Deferred(X)))))) {
3707     NeedNegation = Pred == ICmpInst::Predicate::ICMP_EQ;
3708   } else
3709     return nullptr;
3710 
3711   BuilderTy::InsertPointGuard Guard(Builder);
3712   // If the pattern included (x * y), we'll want to insert new instructions
3713   // right before that original multiplication so that we can replace it.
3714   bool MulHadOtherUses = Mul && !Mul->hasOneUse();
3715   if (MulHadOtherUses)
3716     Builder.SetInsertPoint(Mul);
3717 
3718   Function *F = Intrinsic::getDeclaration(
3719       I.getModule(), Intrinsic::umul_with_overflow, X->getType());
3720   CallInst *Call = Builder.CreateCall(F, {X, Y}, "umul");
3721 
3722   // If the multiplication was used elsewhere, to ensure that we don't leave
3723   // "duplicate" instructions, replace uses of that original multiplication
3724   // with the multiplication result from the with.overflow intrinsic.
3725   if (MulHadOtherUses)
3726     replaceInstUsesWith(*Mul, Builder.CreateExtractValue(Call, 0, "umul.val"));
3727 
3728   Value *Res = Builder.CreateExtractValue(Call, 1, "umul.ov");
3729   if (NeedNegation) // This technically increases instruction count.
3730     Res = Builder.CreateNot(Res, "umul.not.ov");
3731 
3732   // If we replaced the mul, erase it. Do this after all uses of Builder,
3733   // as the mul is used as insertion point.
3734   if (MulHadOtherUses)
3735     eraseInstFromFunction(*Mul);
3736 
3737   return Res;
3738 }
3739 
3740 static Instruction *foldICmpXNegX(ICmpInst &I) {
3741   CmpInst::Predicate Pred;
3742   Value *X;
3743   if (!match(&I, m_c_ICmp(Pred, m_NSWNeg(m_Value(X)), m_Deferred(X))))
3744     return nullptr;
3745 
3746   if (ICmpInst::isSigned(Pred))
3747     Pred = ICmpInst::getSwappedPredicate(Pred);
3748   else if (ICmpInst::isUnsigned(Pred))
3749     Pred = ICmpInst::getSignedPredicate(Pred);
3750   // else for equality-comparisons just keep the predicate.
3751 
3752   return ICmpInst::Create(Instruction::ICmp, Pred, X,
3753                           Constant::getNullValue(X->getType()), I.getName());
3754 }
3755 
3756 /// Try to fold icmp (binop), X or icmp X, (binop).
3757 /// TODO: A large part of this logic is duplicated in InstSimplify's
3758 /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
3759 /// duplication.
3760 Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
3761                                              const SimplifyQuery &SQ) {
3762   const SimplifyQuery Q = SQ.getWithInstruction(&I);
3763   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3764 
3765   // Special logic for binary operators.
3766   BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0);
3767   BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1);
3768   if (!BO0 && !BO1)
3769     return nullptr;
3770 
3771   if (Instruction *NewICmp = foldICmpXNegX(I))
3772     return NewICmp;
3773 
3774   const CmpInst::Predicate Pred = I.getPredicate();
3775   Value *X;
3776 
3777   // Convert add-with-unsigned-overflow comparisons into a 'not' with compare.
3778   // (Op1 + X) u</u>= Op1 --> ~Op1 u</u>= X
3779   if (match(Op0, m_OneUse(m_c_Add(m_Specific(Op1), m_Value(X)))) &&
3780       (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
3781     return new ICmpInst(Pred, Builder.CreateNot(Op1), X);
3782   // Op0 u>/u<= (Op0 + X) --> X u>/u<= ~Op0
3783   if (match(Op1, m_OneUse(m_c_Add(m_Specific(Op0), m_Value(X)))) &&
3784       (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
3785     return new ICmpInst(Pred, X, Builder.CreateNot(Op0));
3786 
3787   bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
3788   if (BO0 && isa<OverflowingBinaryOperator>(BO0))
3789     NoOp0WrapProblem =
3790         ICmpInst::isEquality(Pred) ||
3791         (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) ||
3792         (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap());
3793   if (BO1 && isa<OverflowingBinaryOperator>(BO1))
3794     NoOp1WrapProblem =
3795         ICmpInst::isEquality(Pred) ||
3796         (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) ||
3797         (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap());
3798 
3799   // Analyze the case when either Op0 or Op1 is an add instruction.
3800   // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
3801   Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
3802   if (BO0 && BO0->getOpcode() == Instruction::Add) {
3803     A = BO0->getOperand(0);
3804     B = BO0->getOperand(1);
3805   }
3806   if (BO1 && BO1->getOpcode() == Instruction::Add) {
3807     C = BO1->getOperand(0);
3808     D = BO1->getOperand(1);
3809   }
3810 
3811   // icmp (A+B), A -> icmp B, 0 for equalities or if there is no overflow.
3812   // icmp (A+B), B -> icmp A, 0 for equalities or if there is no overflow.
3813   if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
3814     return new ICmpInst(Pred, A == Op1 ? B : A,
3815                         Constant::getNullValue(Op1->getType()));
3816 
3817   // icmp C, (C+D) -> icmp 0, D for equalities or if there is no overflow.
3818   // icmp D, (C+D) -> icmp 0, C for equalities or if there is no overflow.
3819   if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
3820     return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
3821                         C == Op0 ? D : C);
3822 
3823   // icmp (A+B), (A+D) -> icmp B, D for equalities or if there is no overflow.
3824   if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem &&
3825       NoOp1WrapProblem) {
3826     // Determine Y and Z in the form icmp (X+Y), (X+Z).
3827     Value *Y, *Z;
3828     if (A == C) {
3829       // C + B == C + D  ->  B == D
3830       Y = B;
3831       Z = D;
3832     } else if (A == D) {
3833       // D + B == C + D  ->  B == C
3834       Y = B;
3835       Z = C;
3836     } else if (B == C) {
3837       // A + C == C + D  ->  A == D
3838       Y = A;
3839       Z = D;
3840     } else {
3841       assert(B == D);
3842       // A + D == C + D  ->  A == C
3843       Y = A;
3844       Z = C;
3845     }
3846     return new ICmpInst(Pred, Y, Z);
3847   }
3848 
3849   // icmp slt (A + -1), Op1 -> icmp sle A, Op1
3850   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT &&
3851       match(B, m_AllOnes()))
3852     return new ICmpInst(CmpInst::ICMP_SLE, A, Op1);
3853 
3854   // icmp sge (A + -1), Op1 -> icmp sgt A, Op1
3855   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE &&
3856       match(B, m_AllOnes()))
3857     return new ICmpInst(CmpInst::ICMP_SGT, A, Op1);
3858 
3859   // icmp sle (A + 1), Op1 -> icmp slt A, Op1
3860   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE && match(B, m_One()))
3861     return new ICmpInst(CmpInst::ICMP_SLT, A, Op1);
3862 
3863   // icmp sgt (A + 1), Op1 -> icmp sge A, Op1
3864   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT && match(B, m_One()))
3865     return new ICmpInst(CmpInst::ICMP_SGE, A, Op1);
3866 
3867   // icmp sgt Op0, (C + -1) -> icmp sge Op0, C
3868   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGT &&
3869       match(D, m_AllOnes()))
3870     return new ICmpInst(CmpInst::ICMP_SGE, Op0, C);
3871 
3872   // icmp sle Op0, (C + -1) -> icmp slt Op0, C
3873   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLE &&
3874       match(D, m_AllOnes()))
3875     return new ICmpInst(CmpInst::ICMP_SLT, Op0, C);
3876 
3877   // icmp sge Op0, (C + 1) -> icmp sgt Op0, C
3878   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGE && match(D, m_One()))
3879     return new ICmpInst(CmpInst::ICMP_SGT, Op0, C);
3880 
3881   // icmp slt Op0, (C + 1) -> icmp sle Op0, C
3882   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLT && match(D, m_One()))
3883     return new ICmpInst(CmpInst::ICMP_SLE, Op0, C);
3884 
3885   // TODO: The subtraction-related identities shown below also hold, but
3886   // canonicalization from (X -nuw 1) to (X + -1) means that the combinations
3887   // wouldn't happen even if they were implemented.
3888   //
3889   // icmp ult (A - 1), Op1 -> icmp ule A, Op1
3890   // icmp uge (A - 1), Op1 -> icmp ugt A, Op1
3891   // icmp ugt Op0, (C - 1) -> icmp uge Op0, C
3892   // icmp ule Op0, (C - 1) -> icmp ult Op0, C
3893 
3894   // icmp ule (A + 1), Op0 -> icmp ult A, Op1
3895   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_ULE && match(B, m_One()))
3896     return new ICmpInst(CmpInst::ICMP_ULT, A, Op1);
3897 
3898   // icmp ugt (A + 1), Op0 -> icmp uge A, Op1
3899   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_UGT && match(B, m_One()))
3900     return new ICmpInst(CmpInst::ICMP_UGE, A, Op1);
3901 
3902   // icmp uge Op0, (C + 1) -> icmp ugt Op0, C
3903   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_UGE && match(D, m_One()))
3904     return new ICmpInst(CmpInst::ICMP_UGT, Op0, C);
3905 
3906   // icmp ult Op0, (C + 1) -> icmp ule Op0, C
3907   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_ULT && match(D, m_One()))
3908     return new ICmpInst(CmpInst::ICMP_ULE, Op0, C);
3909 
3910   // if C1 has greater magnitude than C2:
3911   //  icmp (A + C1), (C + C2) -> icmp (A + C3), C
3912   //  s.t. C3 = C1 - C2
3913   //
3914   // if C2 has greater magnitude than C1:
3915   //  icmp (A + C1), (C + C2) -> icmp A, (C + C3)
3916   //  s.t. C3 = C2 - C1
3917   if (A && C && NoOp0WrapProblem && NoOp1WrapProblem &&
3918       (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned())
3919     if (ConstantInt *C1 = dyn_cast<ConstantInt>(B))
3920       if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) {
3921         const APInt &AP1 = C1->getValue();
3922         const APInt &AP2 = C2->getValue();
3923         if (AP1.isNegative() == AP2.isNegative()) {
3924           APInt AP1Abs = C1->getValue().abs();
3925           APInt AP2Abs = C2->getValue().abs();
3926           if (AP1Abs.uge(AP2Abs)) {
3927             ConstantInt *C3 = Builder.getInt(AP1 - AP2);
3928             Value *NewAdd = Builder.CreateNSWAdd(A, C3);
3929             return new ICmpInst(Pred, NewAdd, C);
3930           } else {
3931             ConstantInt *C3 = Builder.getInt(AP2 - AP1);
3932             Value *NewAdd = Builder.CreateNSWAdd(C, C3);
3933             return new ICmpInst(Pred, A, NewAdd);
3934           }
3935         }
3936       }
3937 
3938   // Analyze the case when either Op0 or Op1 is a sub instruction.
3939   // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
3940   A = nullptr;
3941   B = nullptr;
3942   C = nullptr;
3943   D = nullptr;
3944   if (BO0 && BO0->getOpcode() == Instruction::Sub) {
3945     A = BO0->getOperand(0);
3946     B = BO0->getOperand(1);
3947   }
3948   if (BO1 && BO1->getOpcode() == Instruction::Sub) {
3949     C = BO1->getOperand(0);
3950     D = BO1->getOperand(1);
3951   }
3952 
3953   // icmp (A-B), A -> icmp 0, B for equalities or if there is no overflow.
3954   if (A == Op1 && NoOp0WrapProblem)
3955     return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
3956   // icmp C, (C-D) -> icmp D, 0 for equalities or if there is no overflow.
3957   if (C == Op0 && NoOp1WrapProblem)
3958     return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
3959 
3960   // Convert sub-with-unsigned-overflow comparisons into a comparison of args.
3961   // (A - B) u>/u<= A --> B u>/u<= A
3962   if (A == Op1 && (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
3963     return new ICmpInst(Pred, B, A);
3964   // C u</u>= (C - D) --> C u</u>= D
3965   if (C == Op0 && (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
3966     return new ICmpInst(Pred, C, D);
3967   // (A - B) u>=/u< A --> B u>/u<= A  iff B != 0
3968   if (A == Op1 && (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
3969       isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
3970     return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), B, A);
3971   // C u<=/u> (C - D) --> C u</u>= D  iff B != 0
3972   if (C == Op0 && (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
3973       isKnownNonZero(D, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
3974     return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), C, D);
3975 
3976   // icmp (A-B), (C-B) -> icmp A, C for equalities or if there is no overflow.
3977   if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem)
3978     return new ICmpInst(Pred, A, C);
3979 
3980   // icmp (A-B), (A-D) -> icmp D, B for equalities or if there is no overflow.
3981   if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem)
3982     return new ICmpInst(Pred, D, B);
3983 
3984   // icmp (0-X) < cst --> x > -cst
3985   if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
3986     Value *X;
3987     if (match(BO0, m_Neg(m_Value(X))))
3988       if (Constant *RHSC = dyn_cast<Constant>(Op1))
3989         if (RHSC->isNotMinSignedValue())
3990           return new ICmpInst(I.getSwappedPredicate(), X,
3991                               ConstantExpr::getNeg(RHSC));
3992   }
3993 
3994   {
3995     // Try to remove shared constant multiplier from equality comparison:
3996     // X * C == Y * C (with no overflowing/aliasing) --> X == Y
3997     Value *X, *Y;
3998     const APInt *C;
3999     if (match(Op0, m_Mul(m_Value(X), m_APInt(C))) && *C != 0 &&
4000         match(Op1, m_Mul(m_Value(Y), m_SpecificInt(*C))) && I.isEquality())
4001       if (!C->countTrailingZeros() ||
4002           (BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap()) ||
4003           (BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap()))
4004       return new ICmpInst(Pred, X, Y);
4005   }
4006 
4007   BinaryOperator *SRem = nullptr;
4008   // icmp (srem X, Y), Y
4009   if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1))
4010     SRem = BO0;
4011   // icmp Y, (srem X, Y)
4012   else if (BO1 && BO1->getOpcode() == Instruction::SRem &&
4013            Op0 == BO1->getOperand(1))
4014     SRem = BO1;
4015   if (SRem) {
4016     // We don't check hasOneUse to avoid increasing register pressure because
4017     // the value we use is the same value this instruction was already using.
4018     switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
4019     default:
4020       break;
4021     case ICmpInst::ICMP_EQ:
4022       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4023     case ICmpInst::ICMP_NE:
4024       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4025     case ICmpInst::ICMP_SGT:
4026     case ICmpInst::ICMP_SGE:
4027       return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
4028                           Constant::getAllOnesValue(SRem->getType()));
4029     case ICmpInst::ICMP_SLT:
4030     case ICmpInst::ICMP_SLE:
4031       return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1),
4032                           Constant::getNullValue(SRem->getType()));
4033     }
4034   }
4035 
4036   if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && BO0->hasOneUse() &&
4037       BO1->hasOneUse() && BO0->getOperand(1) == BO1->getOperand(1)) {
4038     switch (BO0->getOpcode()) {
4039     default:
4040       break;
4041     case Instruction::Add:
4042     case Instruction::Sub:
4043     case Instruction::Xor: {
4044       if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
4045         return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4046 
4047       const APInt *C;
4048       if (match(BO0->getOperand(1), m_APInt(C))) {
4049         // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
4050         if (C->isSignMask()) {
4051           ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate();
4052           return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
4053         }
4054 
4055         // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
4056         if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) {
4057           ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate();
4058           NewPred = I.getSwappedPredicate(NewPred);
4059           return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
4060         }
4061       }
4062       break;
4063     }
4064     case Instruction::Mul: {
4065       if (!I.isEquality())
4066         break;
4067 
4068       const APInt *C;
4069       if (match(BO0->getOperand(1), m_APInt(C)) && !C->isNullValue() &&
4070           !C->isOneValue()) {
4071         // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask)
4072         // Mask = -1 >> count-trailing-zeros(C).
4073         if (unsigned TZs = C->countTrailingZeros()) {
4074           Constant *Mask = ConstantInt::get(
4075               BO0->getType(),
4076               APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs));
4077           Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask);
4078           Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask);
4079           return new ICmpInst(Pred, And1, And2);
4080         }
4081       }
4082       break;
4083     }
4084     case Instruction::UDiv:
4085     case Instruction::LShr:
4086       if (I.isSigned() || !BO0->isExact() || !BO1->isExact())
4087         break;
4088       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4089 
4090     case Instruction::SDiv:
4091       if (!I.isEquality() || !BO0->isExact() || !BO1->isExact())
4092         break;
4093       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4094 
4095     case Instruction::AShr:
4096       if (!BO0->isExact() || !BO1->isExact())
4097         break;
4098       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4099 
4100     case Instruction::Shl: {
4101       bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap();
4102       bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap();
4103       if (!NUW && !NSW)
4104         break;
4105       if (!NSW && I.isSigned())
4106         break;
4107       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4108     }
4109     }
4110   }
4111 
4112   if (BO0) {
4113     // Transform  A & (L - 1) `ult` L --> L != 0
4114     auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes());
4115     auto BitwiseAnd = m_c_And(m_Value(), LSubOne);
4116 
4117     if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) {
4118       auto *Zero = Constant::getNullValue(BO0->getType());
4119       return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero);
4120     }
4121   }
4122 
4123   if (Value *V = foldUnsignedMultiplicationOverflowCheck(I))
4124     return replaceInstUsesWith(I, V);
4125 
4126   if (Value *V = foldICmpWithLowBitMaskedVal(I, Builder))
4127     return replaceInstUsesWith(I, V);
4128 
4129   if (Value *V = foldICmpWithTruncSignExtendedVal(I, Builder))
4130     return replaceInstUsesWith(I, V);
4131 
4132   if (Value *V = foldShiftIntoShiftInAnotherHandOfAndInICmp(I, SQ, Builder))
4133     return replaceInstUsesWith(I, V);
4134 
4135   return nullptr;
4136 }
4137 
4138 /// Fold icmp Pred min|max(X, Y), X.
4139 static Instruction *foldICmpWithMinMax(ICmpInst &Cmp) {
4140   ICmpInst::Predicate Pred = Cmp.getPredicate();
4141   Value *Op0 = Cmp.getOperand(0);
4142   Value *X = Cmp.getOperand(1);
4143 
4144   // Canonicalize minimum or maximum operand to LHS of the icmp.
4145   if (match(X, m_c_SMin(m_Specific(Op0), m_Value())) ||
4146       match(X, m_c_SMax(m_Specific(Op0), m_Value())) ||
4147       match(X, m_c_UMin(m_Specific(Op0), m_Value())) ||
4148       match(X, m_c_UMax(m_Specific(Op0), m_Value()))) {
4149     std::swap(Op0, X);
4150     Pred = Cmp.getSwappedPredicate();
4151   }
4152 
4153   Value *Y;
4154   if (match(Op0, m_c_SMin(m_Specific(X), m_Value(Y)))) {
4155     // smin(X, Y)  == X --> X s<= Y
4156     // smin(X, Y) s>= X --> X s<= Y
4157     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SGE)
4158       return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
4159 
4160     // smin(X, Y) != X --> X s> Y
4161     // smin(X, Y) s< X --> X s> Y
4162     if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SLT)
4163       return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
4164 
4165     // These cases should be handled in InstSimplify:
4166     // smin(X, Y) s<= X --> true
4167     // smin(X, Y) s> X --> false
4168     return nullptr;
4169   }
4170 
4171   if (match(Op0, m_c_SMax(m_Specific(X), m_Value(Y)))) {
4172     // smax(X, Y)  == X --> X s>= Y
4173     // smax(X, Y) s<= X --> X s>= Y
4174     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLE)
4175       return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
4176 
4177     // smax(X, Y) != X --> X s< Y
4178     // smax(X, Y) s> X --> X s< Y
4179     if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SGT)
4180       return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
4181 
4182     // These cases should be handled in InstSimplify:
4183     // smax(X, Y) s>= X --> true
4184     // smax(X, Y) s< X --> false
4185     return nullptr;
4186   }
4187 
4188   if (match(Op0, m_c_UMin(m_Specific(X), m_Value(Y)))) {
4189     // umin(X, Y)  == X --> X u<= Y
4190     // umin(X, Y) u>= X --> X u<= Y
4191     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_UGE)
4192       return new ICmpInst(ICmpInst::ICMP_ULE, X, Y);
4193 
4194     // umin(X, Y) != X --> X u> Y
4195     // umin(X, Y) u< X --> X u> Y
4196     if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT)
4197       return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
4198 
4199     // These cases should be handled in InstSimplify:
4200     // umin(X, Y) u<= X --> true
4201     // umin(X, Y) u> X --> false
4202     return nullptr;
4203   }
4204 
4205   if (match(Op0, m_c_UMax(m_Specific(X), m_Value(Y)))) {
4206     // umax(X, Y)  == X --> X u>= Y
4207     // umax(X, Y) u<= X --> X u>= Y
4208     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_ULE)
4209       return new ICmpInst(ICmpInst::ICMP_UGE, X, Y);
4210 
4211     // umax(X, Y) != X --> X u< Y
4212     // umax(X, Y) u> X --> X u< Y
4213     if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_UGT)
4214       return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
4215 
4216     // These cases should be handled in InstSimplify:
4217     // umax(X, Y) u>= X --> true
4218     // umax(X, Y) u< X --> false
4219     return nullptr;
4220   }
4221 
4222   return nullptr;
4223 }
4224 
4225 Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) {
4226   if (!I.isEquality())
4227     return nullptr;
4228 
4229   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4230   const CmpInst::Predicate Pred = I.getPredicate();
4231   Value *A, *B, *C, *D;
4232   if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
4233     if (A == Op1 || B == Op1) { // (A^B) == A  ->  B == 0
4234       Value *OtherVal = A == Op1 ? B : A;
4235       return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
4236     }
4237 
4238     if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
4239       // A^c1 == C^c2 --> A == C^(c1^c2)
4240       ConstantInt *C1, *C2;
4241       if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) &&
4242           Op1->hasOneUse()) {
4243         Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue());
4244         Value *Xor = Builder.CreateXor(C, NC);
4245         return new ICmpInst(Pred, A, Xor);
4246       }
4247 
4248       // A^B == A^D -> B == D
4249       if (A == C)
4250         return new ICmpInst(Pred, B, D);
4251       if (A == D)
4252         return new ICmpInst(Pred, B, C);
4253       if (B == C)
4254         return new ICmpInst(Pred, A, D);
4255       if (B == D)
4256         return new ICmpInst(Pred, A, C);
4257     }
4258   }
4259 
4260   if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) {
4261     // A == (A^B)  ->  B == 0
4262     Value *OtherVal = A == Op0 ? B : A;
4263     return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
4264   }
4265 
4266   // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
4267   if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) &&
4268       match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) {
4269     Value *X = nullptr, *Y = nullptr, *Z = nullptr;
4270 
4271     if (A == C) {
4272       X = B;
4273       Y = D;
4274       Z = A;
4275     } else if (A == D) {
4276       X = B;
4277       Y = C;
4278       Z = A;
4279     } else if (B == C) {
4280       X = A;
4281       Y = D;
4282       Z = B;
4283     } else if (B == D) {
4284       X = A;
4285       Y = C;
4286       Z = B;
4287     }
4288 
4289     if (X) { // Build (X^Y) & Z
4290       Op1 = Builder.CreateXor(X, Y);
4291       Op1 = Builder.CreateAnd(Op1, Z);
4292       return new ICmpInst(Pred, Op1, Constant::getNullValue(Op1->getType()));
4293     }
4294   }
4295 
4296   // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B)
4297   // and       (B & (1<<X)-1) == (zext A) --> A == (trunc B)
4298   ConstantInt *Cst1;
4299   if ((Op0->hasOneUse() && match(Op0, m_ZExt(m_Value(A))) &&
4300        match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) ||
4301       (Op1->hasOneUse() && match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) &&
4302        match(Op1, m_ZExt(m_Value(A))))) {
4303     APInt Pow2 = Cst1->getValue() + 1;
4304     if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) &&
4305         Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth())
4306       return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType()));
4307   }
4308 
4309   // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
4310   // For lshr and ashr pairs.
4311   if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_ConstantInt(Cst1)))) &&
4312        match(Op1, m_OneUse(m_LShr(m_Value(B), m_Specific(Cst1))))) ||
4313       (match(Op0, m_OneUse(m_AShr(m_Value(A), m_ConstantInt(Cst1)))) &&
4314        match(Op1, m_OneUse(m_AShr(m_Value(B), m_Specific(Cst1)))))) {
4315     unsigned TypeBits = Cst1->getBitWidth();
4316     unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
4317     if (ShAmt < TypeBits && ShAmt != 0) {
4318       ICmpInst::Predicate NewPred =
4319           Pred == ICmpInst::ICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
4320       Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
4321       APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
4322       return new ICmpInst(NewPred, Xor, Builder.getInt(CmpVal));
4323     }
4324   }
4325 
4326   // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0
4327   if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) &&
4328       match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) {
4329     unsigned TypeBits = Cst1->getBitWidth();
4330     unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
4331     if (ShAmt < TypeBits && ShAmt != 0) {
4332       Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
4333       APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
4334       Value *And = Builder.CreateAnd(Xor, Builder.getInt(AndVal),
4335                                       I.getName() + ".mask");
4336       return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType()));
4337     }
4338   }
4339 
4340   // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
4341   // "icmp (and X, mask), cst"
4342   uint64_t ShAmt = 0;
4343   if (Op0->hasOneUse() &&
4344       match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) &&
4345       match(Op1, m_ConstantInt(Cst1)) &&
4346       // Only do this when A has multiple uses.  This is most important to do
4347       // when it exposes other optimizations.
4348       !A->hasOneUse()) {
4349     unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits();
4350 
4351     if (ShAmt < ASize) {
4352       APInt MaskV =
4353           APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits());
4354       MaskV <<= ShAmt;
4355 
4356       APInt CmpV = Cst1->getValue().zext(ASize);
4357       CmpV <<= ShAmt;
4358 
4359       Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV));
4360       return new ICmpInst(Pred, Mask, Builder.getInt(CmpV));
4361     }
4362   }
4363 
4364   // If both operands are byte-swapped or bit-reversed, just compare the
4365   // original values.
4366   // TODO: Move this to a function similar to foldICmpIntrinsicWithConstant()
4367   // and handle more intrinsics.
4368   if ((match(Op0, m_BSwap(m_Value(A))) && match(Op1, m_BSwap(m_Value(B)))) ||
4369       (match(Op0, m_BitReverse(m_Value(A))) &&
4370        match(Op1, m_BitReverse(m_Value(B)))))
4371     return new ICmpInst(Pred, A, B);
4372 
4373   // Canonicalize checking for a power-of-2-or-zero value:
4374   // (A & (A-1)) == 0 --> ctpop(A) < 2 (two commuted variants)
4375   // ((A-1) & A) != 0 --> ctpop(A) > 1 (two commuted variants)
4376   if (!match(Op0, m_OneUse(m_c_And(m_Add(m_Value(A), m_AllOnes()),
4377                                    m_Deferred(A)))) ||
4378       !match(Op1, m_ZeroInt()))
4379     A = nullptr;
4380 
4381   // (A & -A) == A --> ctpop(A) < 2 (four commuted variants)
4382   // (-A & A) != A --> ctpop(A) > 1 (four commuted variants)
4383   if (match(Op0, m_OneUse(m_c_And(m_Neg(m_Specific(Op1)), m_Specific(Op1)))))
4384     A = Op1;
4385   else if (match(Op1,
4386                  m_OneUse(m_c_And(m_Neg(m_Specific(Op0)), m_Specific(Op0)))))
4387     A = Op0;
4388 
4389   if (A) {
4390     Type *Ty = A->getType();
4391     CallInst *CtPop = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, A);
4392     return Pred == ICmpInst::ICMP_EQ
4393         ? new ICmpInst(ICmpInst::ICMP_ULT, CtPop, ConstantInt::get(Ty, 2))
4394         : new ICmpInst(ICmpInst::ICMP_UGT, CtPop, ConstantInt::get(Ty, 1));
4395   }
4396 
4397   return nullptr;
4398 }
4399 
4400 static Instruction *foldICmpWithZextOrSext(ICmpInst &ICmp,
4401                                            InstCombiner::BuilderTy &Builder) {
4402   assert(isa<CastInst>(ICmp.getOperand(0)) && "Expected cast for operand 0");
4403   auto *CastOp0 = cast<CastInst>(ICmp.getOperand(0));
4404   Value *X;
4405   if (!match(CastOp0, m_ZExtOrSExt(m_Value(X))))
4406     return nullptr;
4407 
4408   bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt;
4409   bool IsSignedCmp = ICmp.isSigned();
4410   if (auto *CastOp1 = dyn_cast<CastInst>(ICmp.getOperand(1))) {
4411     // If the signedness of the two casts doesn't agree (i.e. one is a sext
4412     // and the other is a zext), then we can't handle this.
4413     // TODO: This is too strict. We can handle some predicates (equality?).
4414     if (CastOp0->getOpcode() != CastOp1->getOpcode())
4415       return nullptr;
4416 
4417     // Not an extension from the same type?
4418     Value *Y = CastOp1->getOperand(0);
4419     Type *XTy = X->getType(), *YTy = Y->getType();
4420     if (XTy != YTy) {
4421       // One of the casts must have one use because we are creating a new cast.
4422       if (!CastOp0->hasOneUse() && !CastOp1->hasOneUse())
4423         return nullptr;
4424       // Extend the narrower operand to the type of the wider operand.
4425       if (XTy->getScalarSizeInBits() < YTy->getScalarSizeInBits())
4426         X = Builder.CreateCast(CastOp0->getOpcode(), X, YTy);
4427       else if (YTy->getScalarSizeInBits() < XTy->getScalarSizeInBits())
4428         Y = Builder.CreateCast(CastOp0->getOpcode(), Y, XTy);
4429       else
4430         return nullptr;
4431     }
4432 
4433     // (zext X) == (zext Y) --> X == Y
4434     // (sext X) == (sext Y) --> X == Y
4435     if (ICmp.isEquality())
4436       return new ICmpInst(ICmp.getPredicate(), X, Y);
4437 
4438     // A signed comparison of sign extended values simplifies into a
4439     // signed comparison.
4440     if (IsSignedCmp && IsSignedExt)
4441       return new ICmpInst(ICmp.getPredicate(), X, Y);
4442 
4443     // The other three cases all fold into an unsigned comparison.
4444     return new ICmpInst(ICmp.getUnsignedPredicate(), X, Y);
4445   }
4446 
4447   // Below here, we are only folding a compare with constant.
4448   auto *C = dyn_cast<Constant>(ICmp.getOperand(1));
4449   if (!C)
4450     return nullptr;
4451 
4452   // Compute the constant that would happen if we truncated to SrcTy then
4453   // re-extended to DestTy.
4454   Type *SrcTy = CastOp0->getSrcTy();
4455   Type *DestTy = CastOp0->getDestTy();
4456   Constant *Res1 = ConstantExpr::getTrunc(C, SrcTy);
4457   Constant *Res2 = ConstantExpr::getCast(CastOp0->getOpcode(), Res1, DestTy);
4458 
4459   // If the re-extended constant didn't change...
4460   if (Res2 == C) {
4461     if (ICmp.isEquality())
4462       return new ICmpInst(ICmp.getPredicate(), X, Res1);
4463 
4464     // A signed comparison of sign extended values simplifies into a
4465     // signed comparison.
4466     if (IsSignedExt && IsSignedCmp)
4467       return new ICmpInst(ICmp.getPredicate(), X, Res1);
4468 
4469     // The other three cases all fold into an unsigned comparison.
4470     return new ICmpInst(ICmp.getUnsignedPredicate(), X, Res1);
4471   }
4472 
4473   // The re-extended constant changed, partly changed (in the case of a vector),
4474   // or could not be determined to be equal (in the case of a constant
4475   // expression), so the constant cannot be represented in the shorter type.
4476   // All the cases that fold to true or false will have already been handled
4477   // by SimplifyICmpInst, so only deal with the tricky case.
4478   if (IsSignedCmp || !IsSignedExt || !isa<ConstantInt>(C))
4479     return nullptr;
4480 
4481   // Is source op positive?
4482   // icmp ult (sext X), C --> icmp sgt X, -1
4483   if (ICmp.getPredicate() == ICmpInst::ICMP_ULT)
4484     return new ICmpInst(CmpInst::ICMP_SGT, X, Constant::getAllOnesValue(SrcTy));
4485 
4486   // Is source op negative?
4487   // icmp ugt (sext X), C --> icmp slt X, 0
4488   assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
4489   return new ICmpInst(CmpInst::ICMP_SLT, X, Constant::getNullValue(SrcTy));
4490 }
4491 
4492 /// Handle icmp (cast x), (cast or constant).
4493 Instruction *InstCombinerImpl::foldICmpWithCastOp(ICmpInst &ICmp) {
4494   auto *CastOp0 = dyn_cast<CastInst>(ICmp.getOperand(0));
4495   if (!CastOp0)
4496     return nullptr;
4497   if (!isa<Constant>(ICmp.getOperand(1)) && !isa<CastInst>(ICmp.getOperand(1)))
4498     return nullptr;
4499 
4500   Value *Op0Src = CastOp0->getOperand(0);
4501   Type *SrcTy = CastOp0->getSrcTy();
4502   Type *DestTy = CastOp0->getDestTy();
4503 
4504   // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
4505   // integer type is the same size as the pointer type.
4506   auto CompatibleSizes = [&](Type *SrcTy, Type *DestTy) {
4507     if (isa<VectorType>(SrcTy)) {
4508       SrcTy = cast<VectorType>(SrcTy)->getElementType();
4509       DestTy = cast<VectorType>(DestTy)->getElementType();
4510     }
4511     return DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth();
4512   };
4513   if (CastOp0->getOpcode() == Instruction::PtrToInt &&
4514       CompatibleSizes(SrcTy, DestTy)) {
4515     Value *NewOp1 = nullptr;
4516     if (auto *PtrToIntOp1 = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) {
4517       Value *PtrSrc = PtrToIntOp1->getOperand(0);
4518       if (PtrSrc->getType()->getPointerAddressSpace() ==
4519           Op0Src->getType()->getPointerAddressSpace()) {
4520         NewOp1 = PtrToIntOp1->getOperand(0);
4521         // If the pointer types don't match, insert a bitcast.
4522         if (Op0Src->getType() != NewOp1->getType())
4523           NewOp1 = Builder.CreateBitCast(NewOp1, Op0Src->getType());
4524       }
4525     } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
4526       NewOp1 = ConstantExpr::getIntToPtr(RHSC, SrcTy);
4527     }
4528 
4529     if (NewOp1)
4530       return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1);
4531   }
4532 
4533   return foldICmpWithZextOrSext(ICmp, Builder);
4534 }
4535 
4536 static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS) {
4537   switch (BinaryOp) {
4538     default:
4539       llvm_unreachable("Unsupported binary op");
4540     case Instruction::Add:
4541     case Instruction::Sub:
4542       return match(RHS, m_Zero());
4543     case Instruction::Mul:
4544       return match(RHS, m_One());
4545   }
4546 }
4547 
4548 OverflowResult
4549 InstCombinerImpl::computeOverflow(Instruction::BinaryOps BinaryOp,
4550                                   bool IsSigned, Value *LHS, Value *RHS,
4551                                   Instruction *CxtI) const {
4552   switch (BinaryOp) {
4553     default:
4554       llvm_unreachable("Unsupported binary op");
4555     case Instruction::Add:
4556       if (IsSigned)
4557         return computeOverflowForSignedAdd(LHS, RHS, CxtI);
4558       else
4559         return computeOverflowForUnsignedAdd(LHS, RHS, CxtI);
4560     case Instruction::Sub:
4561       if (IsSigned)
4562         return computeOverflowForSignedSub(LHS, RHS, CxtI);
4563       else
4564         return computeOverflowForUnsignedSub(LHS, RHS, CxtI);
4565     case Instruction::Mul:
4566       if (IsSigned)
4567         return computeOverflowForSignedMul(LHS, RHS, CxtI);
4568       else
4569         return computeOverflowForUnsignedMul(LHS, RHS, CxtI);
4570   }
4571 }
4572 
4573 bool InstCombinerImpl::OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp,
4574                                              bool IsSigned, Value *LHS,
4575                                              Value *RHS, Instruction &OrigI,
4576                                              Value *&Result,
4577                                              Constant *&Overflow) {
4578   if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
4579     std::swap(LHS, RHS);
4580 
4581   // If the overflow check was an add followed by a compare, the insertion point
4582   // may be pointing to the compare.  We want to insert the new instructions
4583   // before the add in case there are uses of the add between the add and the
4584   // compare.
4585   Builder.SetInsertPoint(&OrigI);
4586 
4587   Type *OverflowTy = Type::getInt1Ty(LHS->getContext());
4588   if (auto *LHSTy = dyn_cast<VectorType>(LHS->getType()))
4589     OverflowTy = VectorType::get(OverflowTy, LHSTy->getElementCount());
4590 
4591   if (isNeutralValue(BinaryOp, RHS)) {
4592     Result = LHS;
4593     Overflow = ConstantInt::getFalse(OverflowTy);
4594     return true;
4595   }
4596 
4597   switch (computeOverflow(BinaryOp, IsSigned, LHS, RHS, &OrigI)) {
4598     case OverflowResult::MayOverflow:
4599       return false;
4600     case OverflowResult::AlwaysOverflowsLow:
4601     case OverflowResult::AlwaysOverflowsHigh:
4602       Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
4603       Result->takeName(&OrigI);
4604       Overflow = ConstantInt::getTrue(OverflowTy);
4605       return true;
4606     case OverflowResult::NeverOverflows:
4607       Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
4608       Result->takeName(&OrigI);
4609       Overflow = ConstantInt::getFalse(OverflowTy);
4610       if (auto *Inst = dyn_cast<Instruction>(Result)) {
4611         if (IsSigned)
4612           Inst->setHasNoSignedWrap();
4613         else
4614           Inst->setHasNoUnsignedWrap();
4615       }
4616       return true;
4617   }
4618 
4619   llvm_unreachable("Unexpected overflow result");
4620 }
4621 
4622 /// Recognize and process idiom involving test for multiplication
4623 /// overflow.
4624 ///
4625 /// The caller has matched a pattern of the form:
4626 ///   I = cmp u (mul(zext A, zext B), V
4627 /// The function checks if this is a test for overflow and if so replaces
4628 /// multiplication with call to 'mul.with.overflow' intrinsic.
4629 ///
4630 /// \param I Compare instruction.
4631 /// \param MulVal Result of 'mult' instruction.  It is one of the arguments of
4632 ///               the compare instruction.  Must be of integer type.
4633 /// \param OtherVal The other argument of compare instruction.
4634 /// \returns Instruction which must replace the compare instruction, NULL if no
4635 ///          replacement required.
4636 static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
4637                                          Value *OtherVal,
4638                                          InstCombinerImpl &IC) {
4639   // Don't bother doing this transformation for pointers, don't do it for
4640   // vectors.
4641   if (!isa<IntegerType>(MulVal->getType()))
4642     return nullptr;
4643 
4644   assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal);
4645   assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal);
4646   auto *MulInstr = dyn_cast<Instruction>(MulVal);
4647   if (!MulInstr)
4648     return nullptr;
4649   assert(MulInstr->getOpcode() == Instruction::Mul);
4650 
4651   auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)),
4652        *RHS = cast<ZExtOperator>(MulInstr->getOperand(1));
4653   assert(LHS->getOpcode() == Instruction::ZExt);
4654   assert(RHS->getOpcode() == Instruction::ZExt);
4655   Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
4656 
4657   // Calculate type and width of the result produced by mul.with.overflow.
4658   Type *TyA = A->getType(), *TyB = B->getType();
4659   unsigned WidthA = TyA->getPrimitiveSizeInBits(),
4660            WidthB = TyB->getPrimitiveSizeInBits();
4661   unsigned MulWidth;
4662   Type *MulType;
4663   if (WidthB > WidthA) {
4664     MulWidth = WidthB;
4665     MulType = TyB;
4666   } else {
4667     MulWidth = WidthA;
4668     MulType = TyA;
4669   }
4670 
4671   // In order to replace the original mul with a narrower mul.with.overflow,
4672   // all uses must ignore upper bits of the product.  The number of used low
4673   // bits must be not greater than the width of mul.with.overflow.
4674   if (MulVal->hasNUsesOrMore(2))
4675     for (User *U : MulVal->users()) {
4676       if (U == &I)
4677         continue;
4678       if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
4679         // Check if truncation ignores bits above MulWidth.
4680         unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
4681         if (TruncWidth > MulWidth)
4682           return nullptr;
4683       } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
4684         // Check if AND ignores bits above MulWidth.
4685         if (BO->getOpcode() != Instruction::And)
4686           return nullptr;
4687         if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
4688           const APInt &CVal = CI->getValue();
4689           if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth)
4690             return nullptr;
4691         } else {
4692           // In this case we could have the operand of the binary operation
4693           // being defined in another block, and performing the replacement
4694           // could break the dominance relation.
4695           return nullptr;
4696         }
4697       } else {
4698         // Other uses prohibit this transformation.
4699         return nullptr;
4700       }
4701     }
4702 
4703   // Recognize patterns
4704   switch (I.getPredicate()) {
4705   case ICmpInst::ICMP_EQ:
4706   case ICmpInst::ICMP_NE:
4707     // Recognize pattern:
4708     //   mulval = mul(zext A, zext B)
4709     //   cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits.
4710     ConstantInt *CI;
4711     Value *ValToMask;
4712     if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) {
4713       if (ValToMask != MulVal)
4714         return nullptr;
4715       const APInt &CVal = CI->getValue() + 1;
4716       if (CVal.isPowerOf2()) {
4717         unsigned MaskWidth = CVal.logBase2();
4718         if (MaskWidth == MulWidth)
4719           break; // Recognized
4720       }
4721     }
4722     return nullptr;
4723 
4724   case ICmpInst::ICMP_UGT:
4725     // Recognize pattern:
4726     //   mulval = mul(zext A, zext B)
4727     //   cmp ugt mulval, max
4728     if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4729       APInt MaxVal = APInt::getMaxValue(MulWidth);
4730       MaxVal = MaxVal.zext(CI->getBitWidth());
4731       if (MaxVal.eq(CI->getValue()))
4732         break; // Recognized
4733     }
4734     return nullptr;
4735 
4736   case ICmpInst::ICMP_UGE:
4737     // Recognize pattern:
4738     //   mulval = mul(zext A, zext B)
4739     //   cmp uge mulval, max+1
4740     if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4741       APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
4742       if (MaxVal.eq(CI->getValue()))
4743         break; // Recognized
4744     }
4745     return nullptr;
4746 
4747   case ICmpInst::ICMP_ULE:
4748     // Recognize pattern:
4749     //   mulval = mul(zext A, zext B)
4750     //   cmp ule mulval, max
4751     if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4752       APInt MaxVal = APInt::getMaxValue(MulWidth);
4753       MaxVal = MaxVal.zext(CI->getBitWidth());
4754       if (MaxVal.eq(CI->getValue()))
4755         break; // Recognized
4756     }
4757     return nullptr;
4758 
4759   case ICmpInst::ICMP_ULT:
4760     // Recognize pattern:
4761     //   mulval = mul(zext A, zext B)
4762     //   cmp ule mulval, max + 1
4763     if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4764       APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
4765       if (MaxVal.eq(CI->getValue()))
4766         break; // Recognized
4767     }
4768     return nullptr;
4769 
4770   default:
4771     return nullptr;
4772   }
4773 
4774   InstCombiner::BuilderTy &Builder = IC.Builder;
4775   Builder.SetInsertPoint(MulInstr);
4776 
4777   // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
4778   Value *MulA = A, *MulB = B;
4779   if (WidthA < MulWidth)
4780     MulA = Builder.CreateZExt(A, MulType);
4781   if (WidthB < MulWidth)
4782     MulB = Builder.CreateZExt(B, MulType);
4783   Function *F = Intrinsic::getDeclaration(
4784       I.getModule(), Intrinsic::umul_with_overflow, MulType);
4785   CallInst *Call = Builder.CreateCall(F, {MulA, MulB}, "umul");
4786   IC.addToWorklist(MulInstr);
4787 
4788   // If there are uses of mul result other than the comparison, we know that
4789   // they are truncation or binary AND. Change them to use result of
4790   // mul.with.overflow and adjust properly mask/size.
4791   if (MulVal->hasNUsesOrMore(2)) {
4792     Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value");
4793     for (auto UI = MulVal->user_begin(), UE = MulVal->user_end(); UI != UE;) {
4794       User *U = *UI++;
4795       if (U == &I || U == OtherVal)
4796         continue;
4797       if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
4798         if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
4799           IC.replaceInstUsesWith(*TI, Mul);
4800         else
4801           TI->setOperand(0, Mul);
4802       } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
4803         assert(BO->getOpcode() == Instruction::And);
4804         // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
4805         ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
4806         APInt ShortMask = CI->getValue().trunc(MulWidth);
4807         Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask);
4808         Value *Zext = Builder.CreateZExt(ShortAnd, BO->getType());
4809         IC.replaceInstUsesWith(*BO, Zext);
4810       } else {
4811         llvm_unreachable("Unexpected Binary operation");
4812       }
4813       IC.addToWorklist(cast<Instruction>(U));
4814     }
4815   }
4816   if (isa<Instruction>(OtherVal))
4817     IC.addToWorklist(cast<Instruction>(OtherVal));
4818 
4819   // The original icmp gets replaced with the overflow value, maybe inverted
4820   // depending on predicate.
4821   bool Inverse = false;
4822   switch (I.getPredicate()) {
4823   case ICmpInst::ICMP_NE:
4824     break;
4825   case ICmpInst::ICMP_EQ:
4826     Inverse = true;
4827     break;
4828   case ICmpInst::ICMP_UGT:
4829   case ICmpInst::ICMP_UGE:
4830     if (I.getOperand(0) == MulVal)
4831       break;
4832     Inverse = true;
4833     break;
4834   case ICmpInst::ICMP_ULT:
4835   case ICmpInst::ICMP_ULE:
4836     if (I.getOperand(1) == MulVal)
4837       break;
4838     Inverse = true;
4839     break;
4840   default:
4841     llvm_unreachable("Unexpected predicate");
4842   }
4843   if (Inverse) {
4844     Value *Res = Builder.CreateExtractValue(Call, 1);
4845     return BinaryOperator::CreateNot(Res);
4846   }
4847 
4848   return ExtractValueInst::Create(Call, 1);
4849 }
4850 
4851 /// When performing a comparison against a constant, it is possible that not all
4852 /// the bits in the LHS are demanded. This helper method computes the mask that
4853 /// IS demanded.
4854 static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth) {
4855   const APInt *RHS;
4856   if (!match(I.getOperand(1), m_APInt(RHS)))
4857     return APInt::getAllOnesValue(BitWidth);
4858 
4859   // If this is a normal comparison, it demands all bits. If it is a sign bit
4860   // comparison, it only demands the sign bit.
4861   bool UnusedBit;
4862   if (InstCombiner::isSignBitCheck(I.getPredicate(), *RHS, UnusedBit))
4863     return APInt::getSignMask(BitWidth);
4864 
4865   switch (I.getPredicate()) {
4866   // For a UGT comparison, we don't care about any bits that
4867   // correspond to the trailing ones of the comparand.  The value of these
4868   // bits doesn't impact the outcome of the comparison, because any value
4869   // greater than the RHS must differ in a bit higher than these due to carry.
4870   case ICmpInst::ICMP_UGT:
4871     return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingOnes());
4872 
4873   // Similarly, for a ULT comparison, we don't care about the trailing zeros.
4874   // Any value less than the RHS must differ in a higher bit because of carries.
4875   case ICmpInst::ICMP_ULT:
4876     return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingZeros());
4877 
4878   default:
4879     return APInt::getAllOnesValue(BitWidth);
4880   }
4881 }
4882 
4883 /// Check if the order of \p Op0 and \p Op1 as operands in an ICmpInst
4884 /// should be swapped.
4885 /// The decision is based on how many times these two operands are reused
4886 /// as subtract operands and their positions in those instructions.
4887 /// The rationale is that several architectures use the same instruction for
4888 /// both subtract and cmp. Thus, it is better if the order of those operands
4889 /// match.
4890 /// \return true if Op0 and Op1 should be swapped.
4891 static bool swapMayExposeCSEOpportunities(const Value *Op0, const Value *Op1) {
4892   // Filter out pointer values as those cannot appear directly in subtract.
4893   // FIXME: we may want to go through inttoptrs or bitcasts.
4894   if (Op0->getType()->isPointerTy())
4895     return false;
4896   // If a subtract already has the same operands as a compare, swapping would be
4897   // bad. If a subtract has the same operands as a compare but in reverse order,
4898   // then swapping is good.
4899   int GoodToSwap = 0;
4900   for (const User *U : Op0->users()) {
4901     if (match(U, m_Sub(m_Specific(Op1), m_Specific(Op0))))
4902       GoodToSwap++;
4903     else if (match(U, m_Sub(m_Specific(Op0), m_Specific(Op1))))
4904       GoodToSwap--;
4905   }
4906   return GoodToSwap > 0;
4907 }
4908 
4909 /// Check that one use is in the same block as the definition and all
4910 /// other uses are in blocks dominated by a given block.
4911 ///
4912 /// \param DI Definition
4913 /// \param UI Use
4914 /// \param DB Block that must dominate all uses of \p DI outside
4915 ///           the parent block
4916 /// \return true when \p UI is the only use of \p DI in the parent block
4917 /// and all other uses of \p DI are in blocks dominated by \p DB.
4918 ///
4919 bool InstCombinerImpl::dominatesAllUses(const Instruction *DI,
4920                                         const Instruction *UI,
4921                                         const BasicBlock *DB) const {
4922   assert(DI && UI && "Instruction not defined\n");
4923   // Ignore incomplete definitions.
4924   if (!DI->getParent())
4925     return false;
4926   // DI and UI must be in the same block.
4927   if (DI->getParent() != UI->getParent())
4928     return false;
4929   // Protect from self-referencing blocks.
4930   if (DI->getParent() == DB)
4931     return false;
4932   for (const User *U : DI->users()) {
4933     auto *Usr = cast<Instruction>(U);
4934     if (Usr != UI && !DT.dominates(DB, Usr->getParent()))
4935       return false;
4936   }
4937   return true;
4938 }
4939 
4940 /// Return true when the instruction sequence within a block is select-cmp-br.
4941 static bool isChainSelectCmpBranch(const SelectInst *SI) {
4942   const BasicBlock *BB = SI->getParent();
4943   if (!BB)
4944     return false;
4945   auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator());
4946   if (!BI || BI->getNumSuccessors() != 2)
4947     return false;
4948   auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
4949   if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
4950     return false;
4951   return true;
4952 }
4953 
4954 /// True when a select result is replaced by one of its operands
4955 /// in select-icmp sequence. This will eventually result in the elimination
4956 /// of the select.
4957 ///
4958 /// \param SI    Select instruction
4959 /// \param Icmp  Compare instruction
4960 /// \param SIOpd Operand that replaces the select
4961 ///
4962 /// Notes:
4963 /// - The replacement is global and requires dominator information
4964 /// - The caller is responsible for the actual replacement
4965 ///
4966 /// Example:
4967 ///
4968 /// entry:
4969 ///  %4 = select i1 %3, %C* %0, %C* null
4970 ///  %5 = icmp eq %C* %4, null
4971 ///  br i1 %5, label %9, label %7
4972 ///  ...
4973 ///  ; <label>:7                                       ; preds = %entry
4974 ///  %8 = getelementptr inbounds %C* %4, i64 0, i32 0
4975 ///  ...
4976 ///
4977 /// can be transformed to
4978 ///
4979 ///  %5 = icmp eq %C* %0, null
4980 ///  %6 = select i1 %3, i1 %5, i1 true
4981 ///  br i1 %6, label %9, label %7
4982 ///  ...
4983 ///  ; <label>:7                                       ; preds = %entry
4984 ///  %8 = getelementptr inbounds %C* %0, i64 0, i32 0  // replace by %0!
4985 ///
4986 /// Similar when the first operand of the select is a constant or/and
4987 /// the compare is for not equal rather than equal.
4988 ///
4989 /// NOTE: The function is only called when the select and compare constants
4990 /// are equal, the optimization can work only for EQ predicates. This is not a
4991 /// major restriction since a NE compare should be 'normalized' to an equal
4992 /// compare, which usually happens in the combiner and test case
4993 /// select-cmp-br.ll checks for it.
4994 bool InstCombinerImpl::replacedSelectWithOperand(SelectInst *SI,
4995                                                  const ICmpInst *Icmp,
4996                                                  const unsigned SIOpd) {
4997   assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!");
4998   if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) {
4999     BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
5000     // The check for the single predecessor is not the best that can be
5001     // done. But it protects efficiently against cases like when SI's
5002     // home block has two successors, Succ and Succ1, and Succ1 predecessor
5003     // of Succ. Then SI can't be replaced by SIOpd because the use that gets
5004     // replaced can be reached on either path. So the uniqueness check
5005     // guarantees that the path all uses of SI (outside SI's parent) are on
5006     // is disjoint from all other paths out of SI. But that information
5007     // is more expensive to compute, and the trade-off here is in favor
5008     // of compile-time. It should also be noticed that we check for a single
5009     // predecessor and not only uniqueness. This to handle the situation when
5010     // Succ and Succ1 points to the same basic block.
5011     if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) {
5012       NumSel++;
5013       SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
5014       return true;
5015     }
5016   }
5017   return false;
5018 }
5019 
5020 /// Try to fold the comparison based on range information we can get by checking
5021 /// whether bits are known to be zero or one in the inputs.
5022 Instruction *InstCombinerImpl::foldICmpUsingKnownBits(ICmpInst &I) {
5023   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5024   Type *Ty = Op0->getType();
5025   ICmpInst::Predicate Pred = I.getPredicate();
5026 
5027   // Get scalar or pointer size.
5028   unsigned BitWidth = Ty->isIntOrIntVectorTy()
5029                           ? Ty->getScalarSizeInBits()
5030                           : DL.getPointerTypeSizeInBits(Ty->getScalarType());
5031 
5032   if (!BitWidth)
5033     return nullptr;
5034 
5035   KnownBits Op0Known(BitWidth);
5036   KnownBits Op1Known(BitWidth);
5037 
5038   if (SimplifyDemandedBits(&I, 0,
5039                            getDemandedBitsLHSMask(I, BitWidth),
5040                            Op0Known, 0))
5041     return &I;
5042 
5043   if (SimplifyDemandedBits(&I, 1, APInt::getAllOnesValue(BitWidth),
5044                            Op1Known, 0))
5045     return &I;
5046 
5047   // Given the known and unknown bits, compute a range that the LHS could be
5048   // in.  Compute the Min, Max and RHS values based on the known bits. For the
5049   // EQ and NE we use unsigned values.
5050   APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
5051   APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
5052   if (I.isSigned()) {
5053     computeSignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max);
5054     computeSignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max);
5055   } else {
5056     computeUnsignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max);
5057     computeUnsignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max);
5058   }
5059 
5060   // If Min and Max are known to be the same, then SimplifyDemandedBits figured
5061   // out that the LHS or RHS is a constant. Constant fold this now, so that
5062   // code below can assume that Min != Max.
5063   if (!isa<Constant>(Op0) && Op0Min == Op0Max)
5064     return new ICmpInst(Pred, ConstantExpr::getIntegerValue(Ty, Op0Min), Op1);
5065   if (!isa<Constant>(Op1) && Op1Min == Op1Max)
5066     return new ICmpInst(Pred, Op0, ConstantExpr::getIntegerValue(Ty, Op1Min));
5067 
5068   // Based on the range information we know about the LHS, see if we can
5069   // simplify this comparison.  For example, (x&4) < 8 is always true.
5070   switch (Pred) {
5071   default:
5072     llvm_unreachable("Unknown icmp opcode!");
5073   case ICmpInst::ICMP_EQ:
5074   case ICmpInst::ICMP_NE: {
5075     if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max)) {
5076       return Pred == CmpInst::ICMP_EQ
5077                  ? replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()))
5078                  : replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5079     }
5080 
5081     // If all bits are known zero except for one, then we know at most one bit
5082     // is set. If the comparison is against zero, then this is a check to see if
5083     // *that* bit is set.
5084     APInt Op0KnownZeroInverted = ~Op0Known.Zero;
5085     if (Op1Known.isZero()) {
5086       // If the LHS is an AND with the same constant, look through it.
5087       Value *LHS = nullptr;
5088       const APInt *LHSC;
5089       if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) ||
5090           *LHSC != Op0KnownZeroInverted)
5091         LHS = Op0;
5092 
5093       Value *X;
5094       if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
5095         APInt ValToCheck = Op0KnownZeroInverted;
5096         Type *XTy = X->getType();
5097         if (ValToCheck.isPowerOf2()) {
5098           // ((1 << X) & 8) == 0 -> X != 3
5099           // ((1 << X) & 8) != 0 -> X == 3
5100           auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
5101           auto NewPred = ICmpInst::getInversePredicate(Pred);
5102           return new ICmpInst(NewPred, X, CmpC);
5103         } else if ((++ValToCheck).isPowerOf2()) {
5104           // ((1 << X) & 7) == 0 -> X >= 3
5105           // ((1 << X) & 7) != 0 -> X  < 3
5106           auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
5107           auto NewPred =
5108               Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT;
5109           return new ICmpInst(NewPred, X, CmpC);
5110         }
5111       }
5112 
5113       // Check if the LHS is 8 >>u x and the result is a power of 2 like 1.
5114       const APInt *CI;
5115       if (Op0KnownZeroInverted.isOneValue() &&
5116           match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) {
5117         // ((8 >>u X) & 1) == 0 -> X != 3
5118         // ((8 >>u X) & 1) != 0 -> X == 3
5119         unsigned CmpVal = CI->countTrailingZeros();
5120         auto NewPred = ICmpInst::getInversePredicate(Pred);
5121         return new ICmpInst(NewPred, X, ConstantInt::get(X->getType(), CmpVal));
5122       }
5123     }
5124     break;
5125   }
5126   case ICmpInst::ICMP_ULT: {
5127     if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
5128       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5129     if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
5130       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5131     if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
5132       return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5133 
5134     const APInt *CmpC;
5135     if (match(Op1, m_APInt(CmpC))) {
5136       // A <u C -> A == C-1 if min(A)+1 == C
5137       if (*CmpC == Op0Min + 1)
5138         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5139                             ConstantInt::get(Op1->getType(), *CmpC - 1));
5140       // X <u C --> X == 0, if the number of zero bits in the bottom of X
5141       // exceeds the log2 of C.
5142       if (Op0Known.countMinTrailingZeros() >= CmpC->ceilLogBase2())
5143         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5144                             Constant::getNullValue(Op1->getType()));
5145     }
5146     break;
5147   }
5148   case ICmpInst::ICMP_UGT: {
5149     if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
5150       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5151     if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
5152       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5153     if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
5154       return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5155 
5156     const APInt *CmpC;
5157     if (match(Op1, m_APInt(CmpC))) {
5158       // A >u C -> A == C+1 if max(a)-1 == C
5159       if (*CmpC == Op0Max - 1)
5160         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5161                             ConstantInt::get(Op1->getType(), *CmpC + 1));
5162       // X >u C --> X != 0, if the number of zero bits in the bottom of X
5163       // exceeds the log2 of C.
5164       if (Op0Known.countMinTrailingZeros() >= CmpC->getActiveBits())
5165         return new ICmpInst(ICmpInst::ICMP_NE, Op0,
5166                             Constant::getNullValue(Op1->getType()));
5167     }
5168     break;
5169   }
5170   case ICmpInst::ICMP_SLT: {
5171     if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
5172       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5173     if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
5174       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5175     if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
5176       return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5177     const APInt *CmpC;
5178     if (match(Op1, m_APInt(CmpC))) {
5179       if (*CmpC == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C
5180         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5181                             ConstantInt::get(Op1->getType(), *CmpC - 1));
5182     }
5183     break;
5184   }
5185   case ICmpInst::ICMP_SGT: {
5186     if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
5187       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5188     if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
5189       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5190     if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
5191       return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5192     const APInt *CmpC;
5193     if (match(Op1, m_APInt(CmpC))) {
5194       if (*CmpC == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C
5195         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5196                             ConstantInt::get(Op1->getType(), *CmpC + 1));
5197     }
5198     break;
5199   }
5200   case ICmpInst::ICMP_SGE:
5201     assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
5202     if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
5203       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5204     if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
5205       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5206     if (Op1Min == Op0Max) // A >=s B -> A == B if max(A) == min(B)
5207       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5208     break;
5209   case ICmpInst::ICMP_SLE:
5210     assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
5211     if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
5212       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5213     if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
5214       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5215     if (Op1Max == Op0Min) // A <=s B -> A == B if min(A) == max(B)
5216       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5217     break;
5218   case ICmpInst::ICMP_UGE:
5219     assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
5220     if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
5221       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5222     if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
5223       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5224     if (Op1Min == Op0Max) // A >=u B -> A == B if max(A) == min(B)
5225       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5226     break;
5227   case ICmpInst::ICMP_ULE:
5228     assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
5229     if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
5230       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5231     if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
5232       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5233     if (Op1Max == Op0Min) // A <=u B -> A == B if min(A) == max(B)
5234       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5235     break;
5236   }
5237 
5238   // Turn a signed comparison into an unsigned one if both operands are known to
5239   // have the same sign.
5240   if (I.isSigned() &&
5241       ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) ||
5242        (Op0Known.One.isNegative() && Op1Known.One.isNegative())))
5243     return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
5244 
5245   return nullptr;
5246 }
5247 
5248 llvm::Optional<std::pair<CmpInst::Predicate, Constant *>>
5249 InstCombiner::getFlippedStrictnessPredicateAndConstant(CmpInst::Predicate Pred,
5250                                                        Constant *C) {
5251   assert(ICmpInst::isRelational(Pred) && ICmpInst::isIntPredicate(Pred) &&
5252          "Only for relational integer predicates.");
5253 
5254   Type *Type = C->getType();
5255   bool IsSigned = ICmpInst::isSigned(Pred);
5256 
5257   CmpInst::Predicate UnsignedPred = ICmpInst::getUnsignedPredicate(Pred);
5258   bool WillIncrement =
5259       UnsignedPred == ICmpInst::ICMP_ULE || UnsignedPred == ICmpInst::ICMP_UGT;
5260 
5261   // Check if the constant operand can be safely incremented/decremented
5262   // without overflowing/underflowing.
5263   auto ConstantIsOk = [WillIncrement, IsSigned](ConstantInt *C) {
5264     return WillIncrement ? !C->isMaxValue(IsSigned) : !C->isMinValue(IsSigned);
5265   };
5266 
5267   Constant *SafeReplacementConstant = nullptr;
5268   if (auto *CI = dyn_cast<ConstantInt>(C)) {
5269     // Bail out if the constant can't be safely incremented/decremented.
5270     if (!ConstantIsOk(CI))
5271       return llvm::None;
5272   } else if (auto *FVTy = dyn_cast<FixedVectorType>(Type)) {
5273     unsigned NumElts = FVTy->getNumElements();
5274     for (unsigned i = 0; i != NumElts; ++i) {
5275       Constant *Elt = C->getAggregateElement(i);
5276       if (!Elt)
5277         return llvm::None;
5278 
5279       if (isa<UndefValue>(Elt))
5280         continue;
5281 
5282       // Bail out if we can't determine if this constant is min/max or if we
5283       // know that this constant is min/max.
5284       auto *CI = dyn_cast<ConstantInt>(Elt);
5285       if (!CI || !ConstantIsOk(CI))
5286         return llvm::None;
5287 
5288       if (!SafeReplacementConstant)
5289         SafeReplacementConstant = CI;
5290     }
5291   } else {
5292     // ConstantExpr?
5293     return llvm::None;
5294   }
5295 
5296   // It may not be safe to change a compare predicate in the presence of
5297   // undefined elements, so replace those elements with the first safe constant
5298   // that we found.
5299   if (C->containsUndefElement()) {
5300     assert(SafeReplacementConstant && "Replacement constant not set");
5301     C = Constant::replaceUndefsWith(C, SafeReplacementConstant);
5302   }
5303 
5304   CmpInst::Predicate NewPred = CmpInst::getFlippedStrictnessPredicate(Pred);
5305 
5306   // Increment or decrement the constant.
5307   Constant *OneOrNegOne = ConstantInt::get(Type, WillIncrement ? 1 : -1, true);
5308   Constant *NewC = ConstantExpr::getAdd(C, OneOrNegOne);
5309 
5310   return std::make_pair(NewPred, NewC);
5311 }
5312 
5313 /// If we have an icmp le or icmp ge instruction with a constant operand, turn
5314 /// it into the appropriate icmp lt or icmp gt instruction. This transform
5315 /// allows them to be folded in visitICmpInst.
5316 static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) {
5317   ICmpInst::Predicate Pred = I.getPredicate();
5318   if (ICmpInst::isEquality(Pred) || !ICmpInst::isIntPredicate(Pred) ||
5319       InstCombiner::isCanonicalPredicate(Pred))
5320     return nullptr;
5321 
5322   Value *Op0 = I.getOperand(0);
5323   Value *Op1 = I.getOperand(1);
5324   auto *Op1C = dyn_cast<Constant>(Op1);
5325   if (!Op1C)
5326     return nullptr;
5327 
5328   auto FlippedStrictness =
5329       InstCombiner::getFlippedStrictnessPredicateAndConstant(Pred, Op1C);
5330   if (!FlippedStrictness)
5331     return nullptr;
5332 
5333   return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second);
5334 }
5335 
5336 /// If we have a comparison with a non-canonical predicate, if we can update
5337 /// all the users, invert the predicate and adjust all the users.
5338 CmpInst *InstCombinerImpl::canonicalizeICmpPredicate(CmpInst &I) {
5339   // Is the predicate already canonical?
5340   CmpInst::Predicate Pred = I.getPredicate();
5341   if (InstCombiner::isCanonicalPredicate(Pred))
5342     return nullptr;
5343 
5344   // Can all users be adjusted to predicate inversion?
5345   if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr))
5346     return nullptr;
5347 
5348   // Ok, we can canonicalize comparison!
5349   // Let's first invert the comparison's predicate.
5350   I.setPredicate(CmpInst::getInversePredicate(Pred));
5351   I.setName(I.getName() + ".not");
5352 
5353   // And now let's adjust every user.
5354   for (User *U : I.users()) {
5355     switch (cast<Instruction>(U)->getOpcode()) {
5356     case Instruction::Select: {
5357       auto *SI = cast<SelectInst>(U);
5358       SI->swapValues();
5359       SI->swapProfMetadata();
5360       break;
5361     }
5362     case Instruction::Br:
5363       cast<BranchInst>(U)->swapSuccessors(); // swaps prof metadata too
5364       break;
5365     case Instruction::Xor:
5366       replaceInstUsesWith(cast<Instruction>(*U), &I);
5367       break;
5368     default:
5369       llvm_unreachable("Got unexpected user - out of sync with "
5370                        "canFreelyInvertAllUsersOf() ?");
5371     }
5372   }
5373 
5374   return &I;
5375 }
5376 
5377 /// Integer compare with boolean values can always be turned into bitwise ops.
5378 static Instruction *canonicalizeICmpBool(ICmpInst &I,
5379                                          InstCombiner::BuilderTy &Builder) {
5380   Value *A = I.getOperand(0), *B = I.getOperand(1);
5381   assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only");
5382 
5383   // A boolean compared to true/false can be simplified to Op0/true/false in
5384   // 14 out of the 20 (10 predicates * 2 constants) possible combinations.
5385   // Cases not handled by InstSimplify are always 'not' of Op0.
5386   if (match(B, m_Zero())) {
5387     switch (I.getPredicate()) {
5388       case CmpInst::ICMP_EQ:  // A ==   0 -> !A
5389       case CmpInst::ICMP_ULE: // A <=u  0 -> !A
5390       case CmpInst::ICMP_SGE: // A >=s  0 -> !A
5391         return BinaryOperator::CreateNot(A);
5392       default:
5393         llvm_unreachable("ICmp i1 X, C not simplified as expected.");
5394     }
5395   } else if (match(B, m_One())) {
5396     switch (I.getPredicate()) {
5397       case CmpInst::ICMP_NE:  // A !=  1 -> !A
5398       case CmpInst::ICMP_ULT: // A <u  1 -> !A
5399       case CmpInst::ICMP_SGT: // A >s -1 -> !A
5400         return BinaryOperator::CreateNot(A);
5401       default:
5402         llvm_unreachable("ICmp i1 X, C not simplified as expected.");
5403     }
5404   }
5405 
5406   switch (I.getPredicate()) {
5407   default:
5408     llvm_unreachable("Invalid icmp instruction!");
5409   case ICmpInst::ICMP_EQ:
5410     // icmp eq i1 A, B -> ~(A ^ B)
5411     return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
5412 
5413   case ICmpInst::ICMP_NE:
5414     // icmp ne i1 A, B -> A ^ B
5415     return BinaryOperator::CreateXor(A, B);
5416 
5417   case ICmpInst::ICMP_UGT:
5418     // icmp ugt -> icmp ult
5419     std::swap(A, B);
5420     LLVM_FALLTHROUGH;
5421   case ICmpInst::ICMP_ULT:
5422     // icmp ult i1 A, B -> ~A & B
5423     return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
5424 
5425   case ICmpInst::ICMP_SGT:
5426     // icmp sgt -> icmp slt
5427     std::swap(A, B);
5428     LLVM_FALLTHROUGH;
5429   case ICmpInst::ICMP_SLT:
5430     // icmp slt i1 A, B -> A & ~B
5431     return BinaryOperator::CreateAnd(Builder.CreateNot(B), A);
5432 
5433   case ICmpInst::ICMP_UGE:
5434     // icmp uge -> icmp ule
5435     std::swap(A, B);
5436     LLVM_FALLTHROUGH;
5437   case ICmpInst::ICMP_ULE:
5438     // icmp ule i1 A, B -> ~A | B
5439     return BinaryOperator::CreateOr(Builder.CreateNot(A), B);
5440 
5441   case ICmpInst::ICMP_SGE:
5442     // icmp sge -> icmp sle
5443     std::swap(A, B);
5444     LLVM_FALLTHROUGH;
5445   case ICmpInst::ICMP_SLE:
5446     // icmp sle i1 A, B -> A | ~B
5447     return BinaryOperator::CreateOr(Builder.CreateNot(B), A);
5448   }
5449 }
5450 
5451 // Transform pattern like:
5452 //   (1 << Y) u<= X  or  ~(-1 << Y) u<  X  or  ((1 << Y)+(-1)) u<  X
5453 //   (1 << Y) u>  X  or  ~(-1 << Y) u>= X  or  ((1 << Y)+(-1)) u>= X
5454 // Into:
5455 //   (X l>> Y) != 0
5456 //   (X l>> Y) == 0
5457 static Instruction *foldICmpWithHighBitMask(ICmpInst &Cmp,
5458                                             InstCombiner::BuilderTy &Builder) {
5459   ICmpInst::Predicate Pred, NewPred;
5460   Value *X, *Y;
5461   if (match(&Cmp,
5462             m_c_ICmp(Pred, m_OneUse(m_Shl(m_One(), m_Value(Y))), m_Value(X)))) {
5463     switch (Pred) {
5464     case ICmpInst::ICMP_ULE:
5465       NewPred = ICmpInst::ICMP_NE;
5466       break;
5467     case ICmpInst::ICMP_UGT:
5468       NewPred = ICmpInst::ICMP_EQ;
5469       break;
5470     default:
5471       return nullptr;
5472     }
5473   } else if (match(&Cmp, m_c_ICmp(Pred,
5474                                   m_OneUse(m_CombineOr(
5475                                       m_Not(m_Shl(m_AllOnes(), m_Value(Y))),
5476                                       m_Add(m_Shl(m_One(), m_Value(Y)),
5477                                             m_AllOnes()))),
5478                                   m_Value(X)))) {
5479     // The variant with 'add' is not canonical, (the variant with 'not' is)
5480     // we only get it because it has extra uses, and can't be canonicalized,
5481 
5482     switch (Pred) {
5483     case ICmpInst::ICMP_ULT:
5484       NewPred = ICmpInst::ICMP_NE;
5485       break;
5486     case ICmpInst::ICMP_UGE:
5487       NewPred = ICmpInst::ICMP_EQ;
5488       break;
5489     default:
5490       return nullptr;
5491     }
5492   } else
5493     return nullptr;
5494 
5495   Value *NewX = Builder.CreateLShr(X, Y, X->getName() + ".highbits");
5496   Constant *Zero = Constant::getNullValue(NewX->getType());
5497   return CmpInst::Create(Instruction::ICmp, NewPred, NewX, Zero);
5498 }
5499 
5500 static Instruction *foldVectorCmp(CmpInst &Cmp,
5501                                   InstCombiner::BuilderTy &Builder) {
5502   const CmpInst::Predicate Pred = Cmp.getPredicate();
5503   Value *LHS = Cmp.getOperand(0), *RHS = Cmp.getOperand(1);
5504   Value *V1, *V2;
5505   ArrayRef<int> M;
5506   if (!match(LHS, m_Shuffle(m_Value(V1), m_Undef(), m_Mask(M))))
5507     return nullptr;
5508 
5509   // If both arguments of the cmp are shuffles that use the same mask and
5510   // shuffle within a single vector, move the shuffle after the cmp:
5511   // cmp (shuffle V1, M), (shuffle V2, M) --> shuffle (cmp V1, V2), M
5512   Type *V1Ty = V1->getType();
5513   if (match(RHS, m_Shuffle(m_Value(V2), m_Undef(), m_SpecificMask(M))) &&
5514       V1Ty == V2->getType() && (LHS->hasOneUse() || RHS->hasOneUse())) {
5515     Value *NewCmp = Builder.CreateCmp(Pred, V1, V2);
5516     return new ShuffleVectorInst(NewCmp, UndefValue::get(NewCmp->getType()), M);
5517   }
5518 
5519   // Try to canonicalize compare with splatted operand and splat constant.
5520   // TODO: We could generalize this for more than splats. See/use the code in
5521   //       InstCombiner::foldVectorBinop().
5522   Constant *C;
5523   if (!LHS->hasOneUse() || !match(RHS, m_Constant(C)))
5524     return nullptr;
5525 
5526   // Length-changing splats are ok, so adjust the constants as needed:
5527   // cmp (shuffle V1, M), C --> shuffle (cmp V1, C'), M
5528   Constant *ScalarC = C->getSplatValue(/* AllowUndefs */ true);
5529   int MaskSplatIndex;
5530   if (ScalarC && match(M, m_SplatOrUndefMask(MaskSplatIndex))) {
5531     // We allow undefs in matching, but this transform removes those for safety.
5532     // Demanded elements analysis should be able to recover some/all of that.
5533     C = ConstantVector::getSplat(cast<VectorType>(V1Ty)->getElementCount(),
5534                                  ScalarC);
5535     SmallVector<int, 8> NewM(M.size(), MaskSplatIndex);
5536     Value *NewCmp = Builder.CreateCmp(Pred, V1, C);
5537     return new ShuffleVectorInst(NewCmp, UndefValue::get(NewCmp->getType()),
5538                                  NewM);
5539   }
5540 
5541   return nullptr;
5542 }
5543 
5544 // extract(uadd.with.overflow(A, B), 0) ult A
5545 //  -> extract(uadd.with.overflow(A, B), 1)
5546 static Instruction *foldICmpOfUAddOv(ICmpInst &I) {
5547   CmpInst::Predicate Pred = I.getPredicate();
5548   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5549 
5550   Value *UAddOv;
5551   Value *A, *B;
5552   auto UAddOvResultPat = m_ExtractValue<0>(
5553       m_Intrinsic<Intrinsic::uadd_with_overflow>(m_Value(A), m_Value(B)));
5554   if (match(Op0, UAddOvResultPat) &&
5555       ((Pred == ICmpInst::ICMP_ULT && (Op1 == A || Op1 == B)) ||
5556        (Pred == ICmpInst::ICMP_EQ && match(Op1, m_ZeroInt()) &&
5557         (match(A, m_One()) || match(B, m_One()))) ||
5558        (Pred == ICmpInst::ICMP_NE && match(Op1, m_AllOnes()) &&
5559         (match(A, m_AllOnes()) || match(B, m_AllOnes())))))
5560     // extract(uadd.with.overflow(A, B), 0) < A
5561     // extract(uadd.with.overflow(A, 1), 0) == 0
5562     // extract(uadd.with.overflow(A, -1), 0) != -1
5563     UAddOv = cast<ExtractValueInst>(Op0)->getAggregateOperand();
5564   else if (match(Op1, UAddOvResultPat) &&
5565            Pred == ICmpInst::ICMP_UGT && (Op0 == A || Op0 == B))
5566     // A > extract(uadd.with.overflow(A, B), 0)
5567     UAddOv = cast<ExtractValueInst>(Op1)->getAggregateOperand();
5568   else
5569     return nullptr;
5570 
5571   return ExtractValueInst::Create(UAddOv, 1);
5572 }
5573 
5574 Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
5575   bool Changed = false;
5576   const SimplifyQuery Q = SQ.getWithInstruction(&I);
5577   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5578   unsigned Op0Cplxity = getComplexity(Op0);
5579   unsigned Op1Cplxity = getComplexity(Op1);
5580 
5581   /// Orders the operands of the compare so that they are listed from most
5582   /// complex to least complex.  This puts constants before unary operators,
5583   /// before binary operators.
5584   if (Op0Cplxity < Op1Cplxity ||
5585       (Op0Cplxity == Op1Cplxity && swapMayExposeCSEOpportunities(Op0, Op1))) {
5586     I.swapOperands();
5587     std::swap(Op0, Op1);
5588     Changed = true;
5589   }
5590 
5591   if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, Q))
5592     return replaceInstUsesWith(I, V);
5593 
5594   // Comparing -val or val with non-zero is the same as just comparing val
5595   // ie, abs(val) != 0 -> val != 0
5596   if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) {
5597     Value *Cond, *SelectTrue, *SelectFalse;
5598     if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
5599                             m_Value(SelectFalse)))) {
5600       if (Value *V = dyn_castNegVal(SelectTrue)) {
5601         if (V == SelectFalse)
5602           return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
5603       }
5604       else if (Value *V = dyn_castNegVal(SelectFalse)) {
5605         if (V == SelectTrue)
5606           return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
5607       }
5608     }
5609   }
5610 
5611   if (Op0->getType()->isIntOrIntVectorTy(1))
5612     if (Instruction *Res = canonicalizeICmpBool(I, Builder))
5613       return Res;
5614 
5615   if (Instruction *Res = canonicalizeCmpWithConstant(I))
5616     return Res;
5617 
5618   if (Instruction *Res = canonicalizeICmpPredicate(I))
5619     return Res;
5620 
5621   if (Instruction *Res = foldICmpWithConstant(I))
5622     return Res;
5623 
5624   if (Instruction *Res = foldICmpWithDominatingICmp(I))
5625     return Res;
5626 
5627   if (Instruction *Res = foldICmpBinOp(I, Q))
5628     return Res;
5629 
5630   if (Instruction *Res = foldICmpUsingKnownBits(I))
5631     return Res;
5632 
5633   // Test if the ICmpInst instruction is used exclusively by a select as
5634   // part of a minimum or maximum operation. If so, refrain from doing
5635   // any other folding. This helps out other analyses which understand
5636   // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
5637   // and CodeGen. And in this case, at least one of the comparison
5638   // operands has at least one user besides the compare (the select),
5639   // which would often largely negate the benefit of folding anyway.
5640   //
5641   // Do the same for the other patterns recognized by matchSelectPattern.
5642   if (I.hasOneUse())
5643     if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
5644       Value *A, *B;
5645       SelectPatternResult SPR = matchSelectPattern(SI, A, B);
5646       if (SPR.Flavor != SPF_UNKNOWN)
5647         return nullptr;
5648     }
5649 
5650   // Do this after checking for min/max to prevent infinite looping.
5651   if (Instruction *Res = foldICmpWithZero(I))
5652     return Res;
5653 
5654   // FIXME: We only do this after checking for min/max to prevent infinite
5655   // looping caused by a reverse canonicalization of these patterns for min/max.
5656   // FIXME: The organization of folds is a mess. These would naturally go into
5657   // canonicalizeCmpWithConstant(), but we can't move all of the above folds
5658   // down here after the min/max restriction.
5659   ICmpInst::Predicate Pred = I.getPredicate();
5660   const APInt *C;
5661   if (match(Op1, m_APInt(C))) {
5662     // For i32: x >u 2147483647 -> x <s 0  -> true if sign bit set
5663     if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) {
5664       Constant *Zero = Constant::getNullValue(Op0->getType());
5665       return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero);
5666     }
5667 
5668     // For i32: x <u 2147483648 -> x >s -1  -> true if sign bit clear
5669     if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) {
5670       Constant *AllOnes = Constant::getAllOnesValue(Op0->getType());
5671       return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes);
5672     }
5673   }
5674 
5675   if (Instruction *Res = foldICmpInstWithConstant(I))
5676     return Res;
5677 
5678   // Try to match comparison as a sign bit test. Intentionally do this after
5679   // foldICmpInstWithConstant() to potentially let other folds to happen first.
5680   if (Instruction *New = foldSignBitTest(I))
5681     return New;
5682 
5683   if (Instruction *Res = foldICmpInstWithConstantNotInt(I))
5684     return Res;
5685 
5686   // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
5687   if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
5688     if (Instruction *NI = foldGEPICmp(GEP, Op1, I.getPredicate(), I))
5689       return NI;
5690   if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
5691     if (Instruction *NI = foldGEPICmp(GEP, Op0,
5692                            ICmpInst::getSwappedPredicate(I.getPredicate()), I))
5693       return NI;
5694 
5695   // Try to optimize equality comparisons against alloca-based pointers.
5696   if (Op0->getType()->isPointerTy() && I.isEquality()) {
5697     assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?");
5698     if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op0)))
5699       if (Instruction *New = foldAllocaCmp(I, Alloca, Op1))
5700         return New;
5701     if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op1)))
5702       if (Instruction *New = foldAllocaCmp(I, Alloca, Op0))
5703         return New;
5704   }
5705 
5706   if (Instruction *Res = foldICmpBitCast(I, Builder))
5707     return Res;
5708 
5709   // TODO: Hoist this above the min/max bailout.
5710   if (Instruction *R = foldICmpWithCastOp(I))
5711     return R;
5712 
5713   if (Instruction *Res = foldICmpWithMinMax(I))
5714     return Res;
5715 
5716   {
5717     Value *A, *B;
5718     // Transform (A & ~B) == 0 --> (A & B) != 0
5719     // and       (A & ~B) != 0 --> (A & B) == 0
5720     // if A is a power of 2.
5721     if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
5722         match(Op1, m_Zero()) &&
5723         isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality())
5724       return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(A, B),
5725                           Op1);
5726 
5727     // ~X < ~Y --> Y < X
5728     // ~X < C -->  X > ~C
5729     if (match(Op0, m_Not(m_Value(A)))) {
5730       if (match(Op1, m_Not(m_Value(B))))
5731         return new ICmpInst(I.getPredicate(), B, A);
5732 
5733       const APInt *C;
5734       if (match(Op1, m_APInt(C)))
5735         return new ICmpInst(I.getSwappedPredicate(), A,
5736                             ConstantInt::get(Op1->getType(), ~(*C)));
5737     }
5738 
5739     Instruction *AddI = nullptr;
5740     if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B),
5741                                      m_Instruction(AddI))) &&
5742         isa<IntegerType>(A->getType())) {
5743       Value *Result;
5744       Constant *Overflow;
5745       // m_UAddWithOverflow can match patterns that do not include  an explicit
5746       // "add" instruction, so check the opcode of the matched op.
5747       if (AddI->getOpcode() == Instruction::Add &&
5748           OptimizeOverflowCheck(Instruction::Add, /*Signed*/ false, A, B, *AddI,
5749                                 Result, Overflow)) {
5750         replaceInstUsesWith(*AddI, Result);
5751         eraseInstFromFunction(*AddI);
5752         return replaceInstUsesWith(I, Overflow);
5753       }
5754     }
5755 
5756     // (zext a) * (zext b)  --> llvm.umul.with.overflow.
5757     if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
5758       if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this))
5759         return R;
5760     }
5761     if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
5762       if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this))
5763         return R;
5764     }
5765   }
5766 
5767   if (Instruction *Res = foldICmpEquality(I))
5768     return Res;
5769 
5770   if (Instruction *Res = foldICmpOfUAddOv(I))
5771     return Res;
5772 
5773   // The 'cmpxchg' instruction returns an aggregate containing the old value and
5774   // an i1 which indicates whether or not we successfully did the swap.
5775   //
5776   // Replace comparisons between the old value and the expected value with the
5777   // indicator that 'cmpxchg' returns.
5778   //
5779   // N.B.  This transform is only valid when the 'cmpxchg' is not permitted to
5780   // spuriously fail.  In those cases, the old value may equal the expected
5781   // value but it is possible for the swap to not occur.
5782   if (I.getPredicate() == ICmpInst::ICMP_EQ)
5783     if (auto *EVI = dyn_cast<ExtractValueInst>(Op0))
5784       if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
5785         if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
5786             !ACXI->isWeak())
5787           return ExtractValueInst::Create(ACXI, 1);
5788 
5789   {
5790     Value *X;
5791     const APInt *C;
5792     // icmp X+Cst, X
5793     if (match(Op0, m_Add(m_Value(X), m_APInt(C))) && Op1 == X)
5794       return foldICmpAddOpConst(X, *C, I.getPredicate());
5795 
5796     // icmp X, X+Cst
5797     if (match(Op1, m_Add(m_Value(X), m_APInt(C))) && Op0 == X)
5798       return foldICmpAddOpConst(X, *C, I.getSwappedPredicate());
5799   }
5800 
5801   if (Instruction *Res = foldICmpWithHighBitMask(I, Builder))
5802     return Res;
5803 
5804   if (I.getType()->isVectorTy())
5805     if (Instruction *Res = foldVectorCmp(I, Builder))
5806       return Res;
5807 
5808   return Changed ? &I : nullptr;
5809 }
5810 
5811 /// Fold fcmp ([us]itofp x, cst) if possible.
5812 Instruction *InstCombinerImpl::foldFCmpIntToFPConst(FCmpInst &I,
5813                                                     Instruction *LHSI,
5814                                                     Constant *RHSC) {
5815   if (!isa<ConstantFP>(RHSC)) return nullptr;
5816   const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
5817 
5818   // Get the width of the mantissa.  We don't want to hack on conversions that
5819   // might lose information from the integer, e.g. "i64 -> float"
5820   int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
5821   if (MantissaWidth == -1) return nullptr;  // Unknown.
5822 
5823   IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
5824 
5825   bool LHSUnsigned = isa<UIToFPInst>(LHSI);
5826 
5827   if (I.isEquality()) {
5828     FCmpInst::Predicate P = I.getPredicate();
5829     bool IsExact = false;
5830     APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned);
5831     RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact);
5832 
5833     // If the floating point constant isn't an integer value, we know if we will
5834     // ever compare equal / not equal to it.
5835     if (!IsExact) {
5836       // TODO: Can never be -0.0 and other non-representable values
5837       APFloat RHSRoundInt(RHS);
5838       RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven);
5839       if (RHS != RHSRoundInt) {
5840         if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ)
5841           return replaceInstUsesWith(I, Builder.getFalse());
5842 
5843         assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE);
5844         return replaceInstUsesWith(I, Builder.getTrue());
5845       }
5846     }
5847 
5848     // TODO: If the constant is exactly representable, is it always OK to do
5849     // equality compares as integer?
5850   }
5851 
5852   // Check to see that the input is converted from an integer type that is small
5853   // enough that preserves all bits.  TODO: check here for "known" sign bits.
5854   // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
5855   unsigned InputSize = IntTy->getScalarSizeInBits();
5856 
5857   // Following test does NOT adjust InputSize downwards for signed inputs,
5858   // because the most negative value still requires all the mantissa bits
5859   // to distinguish it from one less than that value.
5860   if ((int)InputSize > MantissaWidth) {
5861     // Conversion would lose accuracy. Check if loss can impact comparison.
5862     int Exp = ilogb(RHS);
5863     if (Exp == APFloat::IEK_Inf) {
5864       int MaxExponent = ilogb(APFloat::getLargest(RHS.getSemantics()));
5865       if (MaxExponent < (int)InputSize - !LHSUnsigned)
5866         // Conversion could create infinity.
5867         return nullptr;
5868     } else {
5869       // Note that if RHS is zero or NaN, then Exp is negative
5870       // and first condition is trivially false.
5871       if (MantissaWidth <= Exp && Exp <= (int)InputSize - !LHSUnsigned)
5872         // Conversion could affect comparison.
5873         return nullptr;
5874     }
5875   }
5876 
5877   // Otherwise, we can potentially simplify the comparison.  We know that it
5878   // will always come through as an integer value and we know the constant is
5879   // not a NAN (it would have been previously simplified).
5880   assert(!RHS.isNaN() && "NaN comparison not already folded!");
5881 
5882   ICmpInst::Predicate Pred;
5883   switch (I.getPredicate()) {
5884   default: llvm_unreachable("Unexpected predicate!");
5885   case FCmpInst::FCMP_UEQ:
5886   case FCmpInst::FCMP_OEQ:
5887     Pred = ICmpInst::ICMP_EQ;
5888     break;
5889   case FCmpInst::FCMP_UGT:
5890   case FCmpInst::FCMP_OGT:
5891     Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
5892     break;
5893   case FCmpInst::FCMP_UGE:
5894   case FCmpInst::FCMP_OGE:
5895     Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
5896     break;
5897   case FCmpInst::FCMP_ULT:
5898   case FCmpInst::FCMP_OLT:
5899     Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
5900     break;
5901   case FCmpInst::FCMP_ULE:
5902   case FCmpInst::FCMP_OLE:
5903     Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
5904     break;
5905   case FCmpInst::FCMP_UNE:
5906   case FCmpInst::FCMP_ONE:
5907     Pred = ICmpInst::ICMP_NE;
5908     break;
5909   case FCmpInst::FCMP_ORD:
5910     return replaceInstUsesWith(I, Builder.getTrue());
5911   case FCmpInst::FCMP_UNO:
5912     return replaceInstUsesWith(I, Builder.getFalse());
5913   }
5914 
5915   // Now we know that the APFloat is a normal number, zero or inf.
5916 
5917   // See if the FP constant is too large for the integer.  For example,
5918   // comparing an i8 to 300.0.
5919   unsigned IntWidth = IntTy->getScalarSizeInBits();
5920 
5921   if (!LHSUnsigned) {
5922     // If the RHS value is > SignedMax, fold the comparison.  This handles +INF
5923     // and large values.
5924     APFloat SMax(RHS.getSemantics());
5925     SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
5926                           APFloat::rmNearestTiesToEven);
5927     if (SMax < RHS) { // smax < 13123.0
5928       if (Pred == ICmpInst::ICMP_NE  || Pred == ICmpInst::ICMP_SLT ||
5929           Pred == ICmpInst::ICMP_SLE)
5930         return replaceInstUsesWith(I, Builder.getTrue());
5931       return replaceInstUsesWith(I, Builder.getFalse());
5932     }
5933   } else {
5934     // If the RHS value is > UnsignedMax, fold the comparison. This handles
5935     // +INF and large values.
5936     APFloat UMax(RHS.getSemantics());
5937     UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
5938                           APFloat::rmNearestTiesToEven);
5939     if (UMax < RHS) { // umax < 13123.0
5940       if (Pred == ICmpInst::ICMP_NE  || Pred == ICmpInst::ICMP_ULT ||
5941           Pred == ICmpInst::ICMP_ULE)
5942         return replaceInstUsesWith(I, Builder.getTrue());
5943       return replaceInstUsesWith(I, Builder.getFalse());
5944     }
5945   }
5946 
5947   if (!LHSUnsigned) {
5948     // See if the RHS value is < SignedMin.
5949     APFloat SMin(RHS.getSemantics());
5950     SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
5951                           APFloat::rmNearestTiesToEven);
5952     if (SMin > RHS) { // smin > 12312.0
5953       if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
5954           Pred == ICmpInst::ICMP_SGE)
5955         return replaceInstUsesWith(I, Builder.getTrue());
5956       return replaceInstUsesWith(I, Builder.getFalse());
5957     }
5958   } else {
5959     // See if the RHS value is < UnsignedMin.
5960     APFloat UMin(RHS.getSemantics());
5961     UMin.convertFromAPInt(APInt::getMinValue(IntWidth), false,
5962                           APFloat::rmNearestTiesToEven);
5963     if (UMin > RHS) { // umin > 12312.0
5964       if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
5965           Pred == ICmpInst::ICMP_UGE)
5966         return replaceInstUsesWith(I, Builder.getTrue());
5967       return replaceInstUsesWith(I, Builder.getFalse());
5968     }
5969   }
5970 
5971   // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
5972   // [0, UMAX], but it may still be fractional.  See if it is fractional by
5973   // casting the FP value to the integer value and back, checking for equality.
5974   // Don't do this for zero, because -0.0 is not fractional.
5975   Constant *RHSInt = LHSUnsigned
5976     ? ConstantExpr::getFPToUI(RHSC, IntTy)
5977     : ConstantExpr::getFPToSI(RHSC, IntTy);
5978   if (!RHS.isZero()) {
5979     bool Equal = LHSUnsigned
5980       ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
5981       : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
5982     if (!Equal) {
5983       // If we had a comparison against a fractional value, we have to adjust
5984       // the compare predicate and sometimes the value.  RHSC is rounded towards
5985       // zero at this point.
5986       switch (Pred) {
5987       default: llvm_unreachable("Unexpected integer comparison!");
5988       case ICmpInst::ICMP_NE:  // (float)int != 4.4   --> true
5989         return replaceInstUsesWith(I, Builder.getTrue());
5990       case ICmpInst::ICMP_EQ:  // (float)int == 4.4   --> false
5991         return replaceInstUsesWith(I, Builder.getFalse());
5992       case ICmpInst::ICMP_ULE:
5993         // (float)int <= 4.4   --> int <= 4
5994         // (float)int <= -4.4  --> false
5995         if (RHS.isNegative())
5996           return replaceInstUsesWith(I, Builder.getFalse());
5997         break;
5998       case ICmpInst::ICMP_SLE:
5999         // (float)int <= 4.4   --> int <= 4
6000         // (float)int <= -4.4  --> int < -4
6001         if (RHS.isNegative())
6002           Pred = ICmpInst::ICMP_SLT;
6003         break;
6004       case ICmpInst::ICMP_ULT:
6005         // (float)int < -4.4   --> false
6006         // (float)int < 4.4    --> int <= 4
6007         if (RHS.isNegative())
6008           return replaceInstUsesWith(I, Builder.getFalse());
6009         Pred = ICmpInst::ICMP_ULE;
6010         break;
6011       case ICmpInst::ICMP_SLT:
6012         // (float)int < -4.4   --> int < -4
6013         // (float)int < 4.4    --> int <= 4
6014         if (!RHS.isNegative())
6015           Pred = ICmpInst::ICMP_SLE;
6016         break;
6017       case ICmpInst::ICMP_UGT:
6018         // (float)int > 4.4    --> int > 4
6019         // (float)int > -4.4   --> true
6020         if (RHS.isNegative())
6021           return replaceInstUsesWith(I, Builder.getTrue());
6022         break;
6023       case ICmpInst::ICMP_SGT:
6024         // (float)int > 4.4    --> int > 4
6025         // (float)int > -4.4   --> int >= -4
6026         if (RHS.isNegative())
6027           Pred = ICmpInst::ICMP_SGE;
6028         break;
6029       case ICmpInst::ICMP_UGE:
6030         // (float)int >= -4.4   --> true
6031         // (float)int >= 4.4    --> int > 4
6032         if (RHS.isNegative())
6033           return replaceInstUsesWith(I, Builder.getTrue());
6034         Pred = ICmpInst::ICMP_UGT;
6035         break;
6036       case ICmpInst::ICMP_SGE:
6037         // (float)int >= -4.4   --> int >= -4
6038         // (float)int >= 4.4    --> int > 4
6039         if (!RHS.isNegative())
6040           Pred = ICmpInst::ICMP_SGT;
6041         break;
6042       }
6043     }
6044   }
6045 
6046   // Lower this FP comparison into an appropriate integer version of the
6047   // comparison.
6048   return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
6049 }
6050 
6051 /// Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
6052 static Instruction *foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI,
6053                                               Constant *RHSC) {
6054   // When C is not 0.0 and infinities are not allowed:
6055   // (C / X) < 0.0 is a sign-bit test of X
6056   // (C / X) < 0.0 --> X < 0.0 (if C is positive)
6057   // (C / X) < 0.0 --> X > 0.0 (if C is negative, swap the predicate)
6058   //
6059   // Proof:
6060   // Multiply (C / X) < 0.0 by X * X / C.
6061   // - X is non zero, if it is the flag 'ninf' is violated.
6062   // - C defines the sign of X * X * C. Thus it also defines whether to swap
6063   //   the predicate. C is also non zero by definition.
6064   //
6065   // Thus X * X / C is non zero and the transformation is valid. [qed]
6066 
6067   FCmpInst::Predicate Pred = I.getPredicate();
6068 
6069   // Check that predicates are valid.
6070   if ((Pred != FCmpInst::FCMP_OGT) && (Pred != FCmpInst::FCMP_OLT) &&
6071       (Pred != FCmpInst::FCMP_OGE) && (Pred != FCmpInst::FCMP_OLE))
6072     return nullptr;
6073 
6074   // Check that RHS operand is zero.
6075   if (!match(RHSC, m_AnyZeroFP()))
6076     return nullptr;
6077 
6078   // Check fastmath flags ('ninf').
6079   if (!LHSI->hasNoInfs() || !I.hasNoInfs())
6080     return nullptr;
6081 
6082   // Check the properties of the dividend. It must not be zero to avoid a
6083   // division by zero (see Proof).
6084   const APFloat *C;
6085   if (!match(LHSI->getOperand(0), m_APFloat(C)))
6086     return nullptr;
6087 
6088   if (C->isZero())
6089     return nullptr;
6090 
6091   // Get swapped predicate if necessary.
6092   if (C->isNegative())
6093     Pred = I.getSwappedPredicate();
6094 
6095   return new FCmpInst(Pred, LHSI->getOperand(1), RHSC, "", &I);
6096 }
6097 
6098 /// Optimize fabs(X) compared with zero.
6099 static Instruction *foldFabsWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC) {
6100   Value *X;
6101   if (!match(I.getOperand(0), m_FAbs(m_Value(X))) ||
6102       !match(I.getOperand(1), m_PosZeroFP()))
6103     return nullptr;
6104 
6105   auto replacePredAndOp0 = [&IC](FCmpInst *I, FCmpInst::Predicate P, Value *X) {
6106     I->setPredicate(P);
6107     return IC.replaceOperand(*I, 0, X);
6108   };
6109 
6110   switch (I.getPredicate()) {
6111   case FCmpInst::FCMP_UGE:
6112   case FCmpInst::FCMP_OLT:
6113     // fabs(X) >= 0.0 --> true
6114     // fabs(X) <  0.0 --> false
6115     llvm_unreachable("fcmp should have simplified");
6116 
6117   case FCmpInst::FCMP_OGT:
6118     // fabs(X) > 0.0 --> X != 0.0
6119     return replacePredAndOp0(&I, FCmpInst::FCMP_ONE, X);
6120 
6121   case FCmpInst::FCMP_UGT:
6122     // fabs(X) u> 0.0 --> X u!= 0.0
6123     return replacePredAndOp0(&I, FCmpInst::FCMP_UNE, X);
6124 
6125   case FCmpInst::FCMP_OLE:
6126     // fabs(X) <= 0.0 --> X == 0.0
6127     return replacePredAndOp0(&I, FCmpInst::FCMP_OEQ, X);
6128 
6129   case FCmpInst::FCMP_ULE:
6130     // fabs(X) u<= 0.0 --> X u== 0.0
6131     return replacePredAndOp0(&I, FCmpInst::FCMP_UEQ, X);
6132 
6133   case FCmpInst::FCMP_OGE:
6134     // fabs(X) >= 0.0 --> !isnan(X)
6135     assert(!I.hasNoNaNs() && "fcmp should have simplified");
6136     return replacePredAndOp0(&I, FCmpInst::FCMP_ORD, X);
6137 
6138   case FCmpInst::FCMP_ULT:
6139     // fabs(X) u< 0.0 --> isnan(X)
6140     assert(!I.hasNoNaNs() && "fcmp should have simplified");
6141     return replacePredAndOp0(&I, FCmpInst::FCMP_UNO, X);
6142 
6143   case FCmpInst::FCMP_OEQ:
6144   case FCmpInst::FCMP_UEQ:
6145   case FCmpInst::FCMP_ONE:
6146   case FCmpInst::FCMP_UNE:
6147   case FCmpInst::FCMP_ORD:
6148   case FCmpInst::FCMP_UNO:
6149     // Look through the fabs() because it doesn't change anything but the sign.
6150     // fabs(X) == 0.0 --> X == 0.0,
6151     // fabs(X) != 0.0 --> X != 0.0
6152     // isnan(fabs(X)) --> isnan(X)
6153     // !isnan(fabs(X) --> !isnan(X)
6154     return replacePredAndOp0(&I, I.getPredicate(), X);
6155 
6156   default:
6157     return nullptr;
6158   }
6159 }
6160 
6161 Instruction *InstCombinerImpl::visitFCmpInst(FCmpInst &I) {
6162   bool Changed = false;
6163 
6164   /// Orders the operands of the compare so that they are listed from most
6165   /// complex to least complex.  This puts constants before unary operators,
6166   /// before binary operators.
6167   if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
6168     I.swapOperands();
6169     Changed = true;
6170   }
6171 
6172   const CmpInst::Predicate Pred = I.getPredicate();
6173   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6174   if (Value *V = SimplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(),
6175                                   SQ.getWithInstruction(&I)))
6176     return replaceInstUsesWith(I, V);
6177 
6178   // Simplify 'fcmp pred X, X'
6179   Type *OpType = Op0->getType();
6180   assert(OpType == Op1->getType() && "fcmp with different-typed operands?");
6181   if (Op0 == Op1) {
6182     switch (Pred) {
6183       default: break;
6184     case FCmpInst::FCMP_UNO:    // True if unordered: isnan(X) | isnan(Y)
6185     case FCmpInst::FCMP_ULT:    // True if unordered or less than
6186     case FCmpInst::FCMP_UGT:    // True if unordered or greater than
6187     case FCmpInst::FCMP_UNE:    // True if unordered or not equal
6188       // Canonicalize these to be 'fcmp uno %X, 0.0'.
6189       I.setPredicate(FCmpInst::FCMP_UNO);
6190       I.setOperand(1, Constant::getNullValue(OpType));
6191       return &I;
6192 
6193     case FCmpInst::FCMP_ORD:    // True if ordered (no nans)
6194     case FCmpInst::FCMP_OEQ:    // True if ordered and equal
6195     case FCmpInst::FCMP_OGE:    // True if ordered and greater than or equal
6196     case FCmpInst::FCMP_OLE:    // True if ordered and less than or equal
6197       // Canonicalize these to be 'fcmp ord %X, 0.0'.
6198       I.setPredicate(FCmpInst::FCMP_ORD);
6199       I.setOperand(1, Constant::getNullValue(OpType));
6200       return &I;
6201     }
6202   }
6203 
6204   // If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand,
6205   // then canonicalize the operand to 0.0.
6206   if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
6207     if (!match(Op0, m_PosZeroFP()) && isKnownNeverNaN(Op0, &TLI))
6208       return replaceOperand(I, 0, ConstantFP::getNullValue(OpType));
6209 
6210     if (!match(Op1, m_PosZeroFP()) && isKnownNeverNaN(Op1, &TLI))
6211       return replaceOperand(I, 1, ConstantFP::getNullValue(OpType));
6212   }
6213 
6214   // fcmp pred (fneg X), (fneg Y) -> fcmp swap(pred) X, Y
6215   Value *X, *Y;
6216   if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
6217     return new FCmpInst(I.getSwappedPredicate(), X, Y, "", &I);
6218 
6219   // Test if the FCmpInst instruction is used exclusively by a select as
6220   // part of a minimum or maximum operation. If so, refrain from doing
6221   // any other folding. This helps out other analyses which understand
6222   // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
6223   // and CodeGen. And in this case, at least one of the comparison
6224   // operands has at least one user besides the compare (the select),
6225   // which would often largely negate the benefit of folding anyway.
6226   if (I.hasOneUse())
6227     if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
6228       Value *A, *B;
6229       SelectPatternResult SPR = matchSelectPattern(SI, A, B);
6230       if (SPR.Flavor != SPF_UNKNOWN)
6231         return nullptr;
6232     }
6233 
6234   // The sign of 0.0 is ignored by fcmp, so canonicalize to +0.0:
6235   // fcmp Pred X, -0.0 --> fcmp Pred X, 0.0
6236   if (match(Op1, m_AnyZeroFP()) && !match(Op1, m_PosZeroFP()))
6237     return replaceOperand(I, 1, ConstantFP::getNullValue(OpType));
6238 
6239   // Handle fcmp with instruction LHS and constant RHS.
6240   Instruction *LHSI;
6241   Constant *RHSC;
6242   if (match(Op0, m_Instruction(LHSI)) && match(Op1, m_Constant(RHSC))) {
6243     switch (LHSI->getOpcode()) {
6244     case Instruction::PHI:
6245       // Only fold fcmp into the PHI if the phi and fcmp are in the same
6246       // block.  If in the same block, we're encouraging jump threading.  If
6247       // not, we are just pessimizing the code by making an i1 phi.
6248       if (LHSI->getParent() == I.getParent())
6249         if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
6250           return NV;
6251       break;
6252     case Instruction::SIToFP:
6253     case Instruction::UIToFP:
6254       if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC))
6255         return NV;
6256       break;
6257     case Instruction::FDiv:
6258       if (Instruction *NV = foldFCmpReciprocalAndZero(I, LHSI, RHSC))
6259         return NV;
6260       break;
6261     case Instruction::Load:
6262       if (auto *GEP = dyn_cast<GetElementPtrInst>(LHSI->getOperand(0)))
6263         if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
6264           if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
6265               !cast<LoadInst>(LHSI)->isVolatile())
6266             if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
6267               return Res;
6268       break;
6269   }
6270   }
6271 
6272   if (Instruction *R = foldFabsWithFcmpZero(I, *this))
6273     return R;
6274 
6275   if (match(Op0, m_FNeg(m_Value(X)))) {
6276     // fcmp pred (fneg X), C --> fcmp swap(pred) X, -C
6277     Constant *C;
6278     if (match(Op1, m_Constant(C))) {
6279       Constant *NegC = ConstantExpr::getFNeg(C);
6280       return new FCmpInst(I.getSwappedPredicate(), X, NegC, "", &I);
6281     }
6282   }
6283 
6284   if (match(Op0, m_FPExt(m_Value(X)))) {
6285     // fcmp (fpext X), (fpext Y) -> fcmp X, Y
6286     if (match(Op1, m_FPExt(m_Value(Y))) && X->getType() == Y->getType())
6287       return new FCmpInst(Pred, X, Y, "", &I);
6288 
6289     // fcmp (fpext X), C -> fcmp X, (fptrunc C) if fptrunc is lossless
6290     const APFloat *C;
6291     if (match(Op1, m_APFloat(C))) {
6292       const fltSemantics &FPSem =
6293           X->getType()->getScalarType()->getFltSemantics();
6294       bool Lossy;
6295       APFloat TruncC = *C;
6296       TruncC.convert(FPSem, APFloat::rmNearestTiesToEven, &Lossy);
6297 
6298       // Avoid lossy conversions and denormals.
6299       // Zero is a special case that's OK to convert.
6300       APFloat Fabs = TruncC;
6301       Fabs.clearSign();
6302       if (!Lossy &&
6303           (!(Fabs < APFloat::getSmallestNormalized(FPSem)) || Fabs.isZero())) {
6304         Constant *NewC = ConstantFP::get(X->getType(), TruncC);
6305         return new FCmpInst(Pred, X, NewC, "", &I);
6306       }
6307     }
6308   }
6309 
6310   if (I.getType()->isVectorTy())
6311     if (Instruction *Res = foldVectorCmp(I, Builder))
6312       return Res;
6313 
6314   return Changed ? &I : nullptr;
6315 }
6316