1 //===- InstCombineCompares.cpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitICmp and visitFCmp functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APSInt.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/ConstantFolding.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/TargetLibraryInfo.h"
20 #include "llvm/IR/ConstantRange.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/GetElementPtrTypeIterator.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/PatternMatch.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/KnownBits.h"
27 #include "llvm/Transforms/InstCombine/InstCombiner.h"
28 
29 using namespace llvm;
30 using namespace PatternMatch;
31 
32 #define DEBUG_TYPE "instcombine"
33 
34 // How many times is a select replaced by one of its operands?
35 STATISTIC(NumSel, "Number of select opts");
36 
37 
38 /// Compute Result = In1+In2, returning true if the result overflowed for this
39 /// type.
40 static bool addWithOverflow(APInt &Result, const APInt &In1,
41                             const APInt &In2, bool IsSigned = false) {
42   bool Overflow;
43   if (IsSigned)
44     Result = In1.sadd_ov(In2, Overflow);
45   else
46     Result = In1.uadd_ov(In2, Overflow);
47 
48   return Overflow;
49 }
50 
51 /// Compute Result = In1-In2, returning true if the result overflowed for this
52 /// type.
53 static bool subWithOverflow(APInt &Result, const APInt &In1,
54                             const APInt &In2, bool IsSigned = false) {
55   bool Overflow;
56   if (IsSigned)
57     Result = In1.ssub_ov(In2, Overflow);
58   else
59     Result = In1.usub_ov(In2, Overflow);
60 
61   return Overflow;
62 }
63 
64 /// Given an icmp instruction, return true if any use of this comparison is a
65 /// branch on sign bit comparison.
66 static bool hasBranchUse(ICmpInst &I) {
67   for (auto *U : I.users())
68     if (isa<BranchInst>(U))
69       return true;
70   return false;
71 }
72 
73 /// Returns true if the exploded icmp can be expressed as a signed comparison
74 /// to zero and updates the predicate accordingly.
75 /// The signedness of the comparison is preserved.
76 /// TODO: Refactor with decomposeBitTestICmp()?
77 static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) {
78   if (!ICmpInst::isSigned(Pred))
79     return false;
80 
81   if (C.isNullValue())
82     return ICmpInst::isRelational(Pred);
83 
84   if (C.isOneValue()) {
85     if (Pred == ICmpInst::ICMP_SLT) {
86       Pred = ICmpInst::ICMP_SLE;
87       return true;
88     }
89   } else if (C.isAllOnesValue()) {
90     if (Pred == ICmpInst::ICMP_SGT) {
91       Pred = ICmpInst::ICMP_SGE;
92       return true;
93     }
94   }
95 
96   return false;
97 }
98 
99 /// This is called when we see this pattern:
100 ///   cmp pred (load (gep GV, ...)), cmpcst
101 /// where GV is a global variable with a constant initializer. Try to simplify
102 /// this into some simple computation that does not need the load. For example
103 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
104 ///
105 /// If AndCst is non-null, then the loaded value is masked with that constant
106 /// before doing the comparison. This handles cases like "A[i]&4 == 0".
107 Instruction *
108 InstCombinerImpl::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
109                                                GlobalVariable *GV, CmpInst &ICI,
110                                                ConstantInt *AndCst) {
111   Constant *Init = GV->getInitializer();
112   if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
113     return nullptr;
114 
115   uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
116   // Don't blow up on huge arrays.
117   if (ArrayElementCount > MaxArraySizeForCombine)
118     return nullptr;
119 
120   // There are many forms of this optimization we can handle, for now, just do
121   // the simple index into a single-dimensional array.
122   //
123   // Require: GEP GV, 0, i {{, constant indices}}
124   if (GEP->getNumOperands() < 3 ||
125       !isa<ConstantInt>(GEP->getOperand(1)) ||
126       !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
127       isa<Constant>(GEP->getOperand(2)))
128     return nullptr;
129 
130   // Check that indices after the variable are constants and in-range for the
131   // type they index.  Collect the indices.  This is typically for arrays of
132   // structs.
133   SmallVector<unsigned, 4> LaterIndices;
134 
135   Type *EltTy = Init->getType()->getArrayElementType();
136   for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
137     ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
138     if (!Idx) return nullptr;  // Variable index.
139 
140     uint64_t IdxVal = Idx->getZExtValue();
141     if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index.
142 
143     if (StructType *STy = dyn_cast<StructType>(EltTy))
144       EltTy = STy->getElementType(IdxVal);
145     else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
146       if (IdxVal >= ATy->getNumElements()) return nullptr;
147       EltTy = ATy->getElementType();
148     } else {
149       return nullptr; // Unknown type.
150     }
151 
152     LaterIndices.push_back(IdxVal);
153   }
154 
155   enum { Overdefined = -3, Undefined = -2 };
156 
157   // Variables for our state machines.
158 
159   // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
160   // "i == 47 | i == 87", where 47 is the first index the condition is true for,
161   // and 87 is the second (and last) index.  FirstTrueElement is -2 when
162   // undefined, otherwise set to the first true element.  SecondTrueElement is
163   // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
164   int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
165 
166   // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
167   // form "i != 47 & i != 87".  Same state transitions as for true elements.
168   int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
169 
170   /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
171   /// define a state machine that triggers for ranges of values that the index
172   /// is true or false for.  This triggers on things like "abbbbc"[i] == 'b'.
173   /// This is -2 when undefined, -3 when overdefined, and otherwise the last
174   /// index in the range (inclusive).  We use -2 for undefined here because we
175   /// use relative comparisons and don't want 0-1 to match -1.
176   int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
177 
178   // MagicBitvector - This is a magic bitvector where we set a bit if the
179   // comparison is true for element 'i'.  If there are 64 elements or less in
180   // the array, this will fully represent all the comparison results.
181   uint64_t MagicBitvector = 0;
182 
183   // Scan the array and see if one of our patterns matches.
184   Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
185   for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
186     Constant *Elt = Init->getAggregateElement(i);
187     if (!Elt) return nullptr;
188 
189     // If this is indexing an array of structures, get the structure element.
190     if (!LaterIndices.empty())
191       Elt = ConstantExpr::getExtractValue(Elt, LaterIndices);
192 
193     // If the element is masked, handle it.
194     if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst);
195 
196     // Find out if the comparison would be true or false for the i'th element.
197     Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
198                                                   CompareRHS, DL, &TLI);
199     // If the result is undef for this element, ignore it.
200     if (isa<UndefValue>(C)) {
201       // Extend range state machines to cover this element in case there is an
202       // undef in the middle of the range.
203       if (TrueRangeEnd == (int)i-1)
204         TrueRangeEnd = i;
205       if (FalseRangeEnd == (int)i-1)
206         FalseRangeEnd = i;
207       continue;
208     }
209 
210     // If we can't compute the result for any of the elements, we have to give
211     // up evaluating the entire conditional.
212     if (!isa<ConstantInt>(C)) return nullptr;
213 
214     // Otherwise, we know if the comparison is true or false for this element,
215     // update our state machines.
216     bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
217 
218     // State machine for single/double/range index comparison.
219     if (IsTrueForElt) {
220       // Update the TrueElement state machine.
221       if (FirstTrueElement == Undefined)
222         FirstTrueElement = TrueRangeEnd = i;  // First true element.
223       else {
224         // Update double-compare state machine.
225         if (SecondTrueElement == Undefined)
226           SecondTrueElement = i;
227         else
228           SecondTrueElement = Overdefined;
229 
230         // Update range state machine.
231         if (TrueRangeEnd == (int)i-1)
232           TrueRangeEnd = i;
233         else
234           TrueRangeEnd = Overdefined;
235       }
236     } else {
237       // Update the FalseElement state machine.
238       if (FirstFalseElement == Undefined)
239         FirstFalseElement = FalseRangeEnd = i; // First false element.
240       else {
241         // Update double-compare state machine.
242         if (SecondFalseElement == Undefined)
243           SecondFalseElement = i;
244         else
245           SecondFalseElement = Overdefined;
246 
247         // Update range state machine.
248         if (FalseRangeEnd == (int)i-1)
249           FalseRangeEnd = i;
250         else
251           FalseRangeEnd = Overdefined;
252       }
253     }
254 
255     // If this element is in range, update our magic bitvector.
256     if (i < 64 && IsTrueForElt)
257       MagicBitvector |= 1ULL << i;
258 
259     // If all of our states become overdefined, bail out early.  Since the
260     // predicate is expensive, only check it every 8 elements.  This is only
261     // really useful for really huge arrays.
262     if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
263         SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
264         FalseRangeEnd == Overdefined)
265       return nullptr;
266   }
267 
268   // Now that we've scanned the entire array, emit our new comparison(s).  We
269   // order the state machines in complexity of the generated code.
270   Value *Idx = GEP->getOperand(2);
271 
272   // If the index is larger than the pointer size of the target, truncate the
273   // index down like the GEP would do implicitly.  We don't have to do this for
274   // an inbounds GEP because the index can't be out of range.
275   if (!GEP->isInBounds()) {
276     Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
277     unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
278     if (Idx->getType()->getPrimitiveSizeInBits().getFixedSize() > PtrSize)
279       Idx = Builder.CreateTrunc(Idx, IntPtrTy);
280   }
281 
282   // If inbounds keyword is not present, Idx * ElementSize can overflow.
283   // Let's assume that ElementSize is 2 and the wanted value is at offset 0.
284   // Then, there are two possible values for Idx to match offset 0:
285   // 0x00..00, 0x80..00.
286   // Emitting 'icmp eq Idx, 0' isn't correct in this case because the
287   // comparison is false if Idx was 0x80..00.
288   // We need to erase the highest countTrailingZeros(ElementSize) bits of Idx.
289   unsigned ElementSize =
290       DL.getTypeAllocSize(Init->getType()->getArrayElementType());
291   auto MaskIdx = [&](Value* Idx){
292     if (!GEP->isInBounds() && countTrailingZeros(ElementSize) != 0) {
293       Value *Mask = ConstantInt::get(Idx->getType(), -1);
294       Mask = Builder.CreateLShr(Mask, countTrailingZeros(ElementSize));
295       Idx = Builder.CreateAnd(Idx, Mask);
296     }
297     return Idx;
298   };
299 
300   // If the comparison is only true for one or two elements, emit direct
301   // comparisons.
302   if (SecondTrueElement != Overdefined) {
303     Idx = MaskIdx(Idx);
304     // None true -> false.
305     if (FirstTrueElement == Undefined)
306       return replaceInstUsesWith(ICI, Builder.getFalse());
307 
308     Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
309 
310     // True for one element -> 'i == 47'.
311     if (SecondTrueElement == Undefined)
312       return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
313 
314     // True for two elements -> 'i == 47 | i == 72'.
315     Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx);
316     Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
317     Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx);
318     return BinaryOperator::CreateOr(C1, C2);
319   }
320 
321   // If the comparison is only false for one or two elements, emit direct
322   // comparisons.
323   if (SecondFalseElement != Overdefined) {
324     Idx = MaskIdx(Idx);
325     // None false -> true.
326     if (FirstFalseElement == Undefined)
327       return replaceInstUsesWith(ICI, Builder.getTrue());
328 
329     Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
330 
331     // False for one element -> 'i != 47'.
332     if (SecondFalseElement == Undefined)
333       return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
334 
335     // False for two elements -> 'i != 47 & i != 72'.
336     Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx);
337     Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
338     Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx);
339     return BinaryOperator::CreateAnd(C1, C2);
340   }
341 
342   // If the comparison can be replaced with a range comparison for the elements
343   // where it is true, emit the range check.
344   if (TrueRangeEnd != Overdefined) {
345     assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
346     Idx = MaskIdx(Idx);
347 
348     // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
349     if (FirstTrueElement) {
350       Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
351       Idx = Builder.CreateAdd(Idx, Offs);
352     }
353 
354     Value *End = ConstantInt::get(Idx->getType(),
355                                   TrueRangeEnd-FirstTrueElement+1);
356     return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
357   }
358 
359   // False range check.
360   if (FalseRangeEnd != Overdefined) {
361     assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
362     Idx = MaskIdx(Idx);
363     // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
364     if (FirstFalseElement) {
365       Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
366       Idx = Builder.CreateAdd(Idx, Offs);
367     }
368 
369     Value *End = ConstantInt::get(Idx->getType(),
370                                   FalseRangeEnd-FirstFalseElement);
371     return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
372   }
373 
374   // If a magic bitvector captures the entire comparison state
375   // of this load, replace it with computation that does:
376   //   ((magic_cst >> i) & 1) != 0
377   {
378     Type *Ty = nullptr;
379 
380     // Look for an appropriate type:
381     // - The type of Idx if the magic fits
382     // - The smallest fitting legal type
383     if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
384       Ty = Idx->getType();
385     else
386       Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
387 
388     if (Ty) {
389       Idx = MaskIdx(Idx);
390       Value *V = Builder.CreateIntCast(Idx, Ty, false);
391       V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
392       V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
393       return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
394     }
395   }
396 
397   return nullptr;
398 }
399 
400 /// Return a value that can be used to compare the *offset* implied by a GEP to
401 /// zero. For example, if we have &A[i], we want to return 'i' for
402 /// "icmp ne i, 0". Note that, in general, indices can be complex, and scales
403 /// are involved. The above expression would also be legal to codegen as
404 /// "icmp ne (i*4), 0" (assuming A is a pointer to i32).
405 /// This latter form is less amenable to optimization though, and we are allowed
406 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
407 ///
408 /// If we can't emit an optimized form for this expression, this returns null.
409 ///
410 static Value *evaluateGEPOffsetExpression(User *GEP, InstCombinerImpl &IC,
411                                           const DataLayout &DL) {
412   gep_type_iterator GTI = gep_type_begin(GEP);
413 
414   // Check to see if this gep only has a single variable index.  If so, and if
415   // any constant indices are a multiple of its scale, then we can compute this
416   // in terms of the scale of the variable index.  For example, if the GEP
417   // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
418   // because the expression will cross zero at the same point.
419   unsigned i, e = GEP->getNumOperands();
420   int64_t Offset = 0;
421   for (i = 1; i != e; ++i, ++GTI) {
422     if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
423       // Compute the aggregate offset of constant indices.
424       if (CI->isZero()) continue;
425 
426       // Handle a struct index, which adds its field offset to the pointer.
427       if (StructType *STy = GTI.getStructTypeOrNull()) {
428         Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
429       } else {
430         uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
431         Offset += Size*CI->getSExtValue();
432       }
433     } else {
434       // Found our variable index.
435       break;
436     }
437   }
438 
439   // If there are no variable indices, we must have a constant offset, just
440   // evaluate it the general way.
441   if (i == e) return nullptr;
442 
443   Value *VariableIdx = GEP->getOperand(i);
444   // Determine the scale factor of the variable element.  For example, this is
445   // 4 if the variable index is into an array of i32.
446   uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());
447 
448   // Verify that there are no other variable indices.  If so, emit the hard way.
449   for (++i, ++GTI; i != e; ++i, ++GTI) {
450     ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
451     if (!CI) return nullptr;
452 
453     // Compute the aggregate offset of constant indices.
454     if (CI->isZero()) continue;
455 
456     // Handle a struct index, which adds its field offset to the pointer.
457     if (StructType *STy = GTI.getStructTypeOrNull()) {
458       Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
459     } else {
460       uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
461       Offset += Size*CI->getSExtValue();
462     }
463   }
464 
465   // Okay, we know we have a single variable index, which must be a
466   // pointer/array/vector index.  If there is no offset, life is simple, return
467   // the index.
468   Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());
469   unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();
470   if (Offset == 0) {
471     // Cast to intptrty in case a truncation occurs.  If an extension is needed,
472     // we don't need to bother extending: the extension won't affect where the
473     // computation crosses zero.
474     if (VariableIdx->getType()->getPrimitiveSizeInBits().getFixedSize() >
475         IntPtrWidth) {
476       VariableIdx = IC.Builder.CreateTrunc(VariableIdx, IntPtrTy);
477     }
478     return VariableIdx;
479   }
480 
481   // Otherwise, there is an index.  The computation we will do will be modulo
482   // the pointer size.
483   Offset = SignExtend64(Offset, IntPtrWidth);
484   VariableScale = SignExtend64(VariableScale, IntPtrWidth);
485 
486   // To do this transformation, any constant index must be a multiple of the
487   // variable scale factor.  For example, we can evaluate "12 + 4*i" as "3 + i",
488   // but we can't evaluate "10 + 3*i" in terms of i.  Check that the offset is a
489   // multiple of the variable scale.
490   int64_t NewOffs = Offset / (int64_t)VariableScale;
491   if (Offset != NewOffs*(int64_t)VariableScale)
492     return nullptr;
493 
494   // Okay, we can do this evaluation.  Start by converting the index to intptr.
495   if (VariableIdx->getType() != IntPtrTy)
496     VariableIdx = IC.Builder.CreateIntCast(VariableIdx, IntPtrTy,
497                                             true /*Signed*/);
498   Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
499   return IC.Builder.CreateAdd(VariableIdx, OffsetVal, "offset");
500 }
501 
502 /// Returns true if we can rewrite Start as a GEP with pointer Base
503 /// and some integer offset. The nodes that need to be re-written
504 /// for this transformation will be added to Explored.
505 static bool canRewriteGEPAsOffset(Value *Start, Value *Base,
506                                   const DataLayout &DL,
507                                   SetVector<Value *> &Explored) {
508   SmallVector<Value *, 16> WorkList(1, Start);
509   Explored.insert(Base);
510 
511   // The following traversal gives us an order which can be used
512   // when doing the final transformation. Since in the final
513   // transformation we create the PHI replacement instructions first,
514   // we don't have to get them in any particular order.
515   //
516   // However, for other instructions we will have to traverse the
517   // operands of an instruction first, which means that we have to
518   // do a post-order traversal.
519   while (!WorkList.empty()) {
520     SetVector<PHINode *> PHIs;
521 
522     while (!WorkList.empty()) {
523       if (Explored.size() >= 100)
524         return false;
525 
526       Value *V = WorkList.back();
527 
528       if (Explored.contains(V)) {
529         WorkList.pop_back();
530         continue;
531       }
532 
533       if (!isa<IntToPtrInst>(V) && !isa<PtrToIntInst>(V) &&
534           !isa<GetElementPtrInst>(V) && !isa<PHINode>(V))
535         // We've found some value that we can't explore which is different from
536         // the base. Therefore we can't do this transformation.
537         return false;
538 
539       if (isa<IntToPtrInst>(V) || isa<PtrToIntInst>(V)) {
540         auto *CI = cast<CastInst>(V);
541         if (!CI->isNoopCast(DL))
542           return false;
543 
544         if (Explored.count(CI->getOperand(0)) == 0)
545           WorkList.push_back(CI->getOperand(0));
546       }
547 
548       if (auto *GEP = dyn_cast<GEPOperator>(V)) {
549         // We're limiting the GEP to having one index. This will preserve
550         // the original pointer type. We could handle more cases in the
551         // future.
552         if (GEP->getNumIndices() != 1 || !GEP->isInBounds() ||
553             GEP->getType() != Start->getType())
554           return false;
555 
556         if (Explored.count(GEP->getOperand(0)) == 0)
557           WorkList.push_back(GEP->getOperand(0));
558       }
559 
560       if (WorkList.back() == V) {
561         WorkList.pop_back();
562         // We've finished visiting this node, mark it as such.
563         Explored.insert(V);
564       }
565 
566       if (auto *PN = dyn_cast<PHINode>(V)) {
567         // We cannot transform PHIs on unsplittable basic blocks.
568         if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
569           return false;
570         Explored.insert(PN);
571         PHIs.insert(PN);
572       }
573     }
574 
575     // Explore the PHI nodes further.
576     for (auto *PN : PHIs)
577       for (Value *Op : PN->incoming_values())
578         if (Explored.count(Op) == 0)
579           WorkList.push_back(Op);
580   }
581 
582   // Make sure that we can do this. Since we can't insert GEPs in a basic
583   // block before a PHI node, we can't easily do this transformation if
584   // we have PHI node users of transformed instructions.
585   for (Value *Val : Explored) {
586     for (Value *Use : Val->uses()) {
587 
588       auto *PHI = dyn_cast<PHINode>(Use);
589       auto *Inst = dyn_cast<Instruction>(Val);
590 
591       if (Inst == Base || Inst == PHI || !Inst || !PHI ||
592           Explored.count(PHI) == 0)
593         continue;
594 
595       if (PHI->getParent() == Inst->getParent())
596         return false;
597     }
598   }
599   return true;
600 }
601 
602 // Sets the appropriate insert point on Builder where we can add
603 // a replacement Instruction for V (if that is possible).
604 static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
605                               bool Before = true) {
606   if (auto *PHI = dyn_cast<PHINode>(V)) {
607     Builder.SetInsertPoint(&*PHI->getParent()->getFirstInsertionPt());
608     return;
609   }
610   if (auto *I = dyn_cast<Instruction>(V)) {
611     if (!Before)
612       I = &*std::next(I->getIterator());
613     Builder.SetInsertPoint(I);
614     return;
615   }
616   if (auto *A = dyn_cast<Argument>(V)) {
617     // Set the insertion point in the entry block.
618     BasicBlock &Entry = A->getParent()->getEntryBlock();
619     Builder.SetInsertPoint(&*Entry.getFirstInsertionPt());
620     return;
621   }
622   // Otherwise, this is a constant and we don't need to set a new
623   // insertion point.
624   assert(isa<Constant>(V) && "Setting insertion point for unknown value!");
625 }
626 
627 /// Returns a re-written value of Start as an indexed GEP using Base as a
628 /// pointer.
629 static Value *rewriteGEPAsOffset(Value *Start, Value *Base,
630                                  const DataLayout &DL,
631                                  SetVector<Value *> &Explored) {
632   // Perform all the substitutions. This is a bit tricky because we can
633   // have cycles in our use-def chains.
634   // 1. Create the PHI nodes without any incoming values.
635   // 2. Create all the other values.
636   // 3. Add the edges for the PHI nodes.
637   // 4. Emit GEPs to get the original pointers.
638   // 5. Remove the original instructions.
639   Type *IndexType = IntegerType::get(
640       Base->getContext(), DL.getIndexTypeSizeInBits(Start->getType()));
641 
642   DenseMap<Value *, Value *> NewInsts;
643   NewInsts[Base] = ConstantInt::getNullValue(IndexType);
644 
645   // Create the new PHI nodes, without adding any incoming values.
646   for (Value *Val : Explored) {
647     if (Val == Base)
648       continue;
649     // Create empty phi nodes. This avoids cyclic dependencies when creating
650     // the remaining instructions.
651     if (auto *PHI = dyn_cast<PHINode>(Val))
652       NewInsts[PHI] = PHINode::Create(IndexType, PHI->getNumIncomingValues(),
653                                       PHI->getName() + ".idx", PHI);
654   }
655   IRBuilder<> Builder(Base->getContext());
656 
657   // Create all the other instructions.
658   for (Value *Val : Explored) {
659 
660     if (NewInsts.find(Val) != NewInsts.end())
661       continue;
662 
663     if (auto *CI = dyn_cast<CastInst>(Val)) {
664       // Don't get rid of the intermediate variable here; the store can grow
665       // the map which will invalidate the reference to the input value.
666       Value *V = NewInsts[CI->getOperand(0)];
667       NewInsts[CI] = V;
668       continue;
669     }
670     if (auto *GEP = dyn_cast<GEPOperator>(Val)) {
671       Value *Index = NewInsts[GEP->getOperand(1)] ? NewInsts[GEP->getOperand(1)]
672                                                   : GEP->getOperand(1);
673       setInsertionPoint(Builder, GEP);
674       // Indices might need to be sign extended. GEPs will magically do
675       // this, but we need to do it ourselves here.
676       if (Index->getType()->getScalarSizeInBits() !=
677           NewInsts[GEP->getOperand(0)]->getType()->getScalarSizeInBits()) {
678         Index = Builder.CreateSExtOrTrunc(
679             Index, NewInsts[GEP->getOperand(0)]->getType(),
680             GEP->getOperand(0)->getName() + ".sext");
681       }
682 
683       auto *Op = NewInsts[GEP->getOperand(0)];
684       if (isa<ConstantInt>(Op) && cast<ConstantInt>(Op)->isZero())
685         NewInsts[GEP] = Index;
686       else
687         NewInsts[GEP] = Builder.CreateNSWAdd(
688             Op, Index, GEP->getOperand(0)->getName() + ".add");
689       continue;
690     }
691     if (isa<PHINode>(Val))
692       continue;
693 
694     llvm_unreachable("Unexpected instruction type");
695   }
696 
697   // Add the incoming values to the PHI nodes.
698   for (Value *Val : Explored) {
699     if (Val == Base)
700       continue;
701     // All the instructions have been created, we can now add edges to the
702     // phi nodes.
703     if (auto *PHI = dyn_cast<PHINode>(Val)) {
704       PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]);
705       for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
706         Value *NewIncoming = PHI->getIncomingValue(I);
707 
708         if (NewInsts.find(NewIncoming) != NewInsts.end())
709           NewIncoming = NewInsts[NewIncoming];
710 
711         NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I));
712       }
713     }
714   }
715 
716   for (Value *Val : Explored) {
717     if (Val == Base)
718       continue;
719 
720     // Depending on the type, for external users we have to emit
721     // a GEP or a GEP + ptrtoint.
722     setInsertionPoint(Builder, Val, false);
723 
724     // If required, create an inttoptr instruction for Base.
725     Value *NewBase = Base;
726     if (!Base->getType()->isPointerTy())
727       NewBase = Builder.CreateBitOrPointerCast(Base, Start->getType(),
728                                                Start->getName() + "to.ptr");
729 
730     Value *GEP = Builder.CreateInBoundsGEP(
731         Start->getType()->getPointerElementType(), NewBase,
732         makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr");
733 
734     if (!Val->getType()->isPointerTy()) {
735       Value *Cast = Builder.CreatePointerCast(GEP, Val->getType(),
736                                               Val->getName() + ".conv");
737       GEP = Cast;
738     }
739     Val->replaceAllUsesWith(GEP);
740   }
741 
742   return NewInsts[Start];
743 }
744 
745 /// Looks through GEPs, IntToPtrInsts and PtrToIntInsts in order to express
746 /// the input Value as a constant indexed GEP. Returns a pair containing
747 /// the GEPs Pointer and Index.
748 static std::pair<Value *, Value *>
749 getAsConstantIndexedAddress(Value *V, const DataLayout &DL) {
750   Type *IndexType = IntegerType::get(V->getContext(),
751                                      DL.getIndexTypeSizeInBits(V->getType()));
752 
753   Constant *Index = ConstantInt::getNullValue(IndexType);
754   while (true) {
755     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
756       // We accept only inbouds GEPs here to exclude the possibility of
757       // overflow.
758       if (!GEP->isInBounds())
759         break;
760       if (GEP->hasAllConstantIndices() && GEP->getNumIndices() == 1 &&
761           GEP->getType() == V->getType()) {
762         V = GEP->getOperand(0);
763         Constant *GEPIndex = static_cast<Constant *>(GEP->getOperand(1));
764         Index = ConstantExpr::getAdd(
765             Index, ConstantExpr::getSExtOrBitCast(GEPIndex, IndexType));
766         continue;
767       }
768       break;
769     }
770     if (auto *CI = dyn_cast<IntToPtrInst>(V)) {
771       if (!CI->isNoopCast(DL))
772         break;
773       V = CI->getOperand(0);
774       continue;
775     }
776     if (auto *CI = dyn_cast<PtrToIntInst>(V)) {
777       if (!CI->isNoopCast(DL))
778         break;
779       V = CI->getOperand(0);
780       continue;
781     }
782     break;
783   }
784   return {V, Index};
785 }
786 
787 /// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
788 /// We can look through PHIs, GEPs and casts in order to determine a common base
789 /// between GEPLHS and RHS.
790 static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS,
791                                               ICmpInst::Predicate Cond,
792                                               const DataLayout &DL) {
793   // FIXME: Support vector of pointers.
794   if (GEPLHS->getType()->isVectorTy())
795     return nullptr;
796 
797   if (!GEPLHS->hasAllConstantIndices())
798     return nullptr;
799 
800   // Make sure the pointers have the same type.
801   if (GEPLHS->getType() != RHS->getType())
802     return nullptr;
803 
804   Value *PtrBase, *Index;
805   std::tie(PtrBase, Index) = getAsConstantIndexedAddress(GEPLHS, DL);
806 
807   // The set of nodes that will take part in this transformation.
808   SetVector<Value *> Nodes;
809 
810   if (!canRewriteGEPAsOffset(RHS, PtrBase, DL, Nodes))
811     return nullptr;
812 
813   // We know we can re-write this as
814   //  ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)
815   // Since we've only looked through inbouds GEPs we know that we
816   // can't have overflow on either side. We can therefore re-write
817   // this as:
818   //   OFFSET1 cmp OFFSET2
819   Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, DL, Nodes);
820 
821   // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written
822   // GEP having PtrBase as the pointer base, and has returned in NewRHS the
823   // offset. Since Index is the offset of LHS to the base pointer, we will now
824   // compare the offsets instead of comparing the pointers.
825   return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Index, NewRHS);
826 }
827 
828 /// Fold comparisons between a GEP instruction and something else. At this point
829 /// we know that the GEP is on the LHS of the comparison.
830 Instruction *InstCombinerImpl::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
831                                            ICmpInst::Predicate Cond,
832                                            Instruction &I) {
833   // Don't transform signed compares of GEPs into index compares. Even if the
834   // GEP is inbounds, the final add of the base pointer can have signed overflow
835   // and would change the result of the icmp.
836   // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
837   // the maximum signed value for the pointer type.
838   if (ICmpInst::isSigned(Cond))
839     return nullptr;
840 
841   // Look through bitcasts and addrspacecasts. We do not however want to remove
842   // 0 GEPs.
843   if (!isa<GetElementPtrInst>(RHS))
844     RHS = RHS->stripPointerCasts();
845 
846   Value *PtrBase = GEPLHS->getOperand(0);
847   // FIXME: Support vector pointer GEPs.
848   if (PtrBase == RHS && GEPLHS->isInBounds() &&
849       !GEPLHS->getType()->isVectorTy()) {
850     // ((gep Ptr, OFFSET) cmp Ptr)   ---> (OFFSET cmp 0).
851     // This transformation (ignoring the base and scales) is valid because we
852     // know pointers can't overflow since the gep is inbounds.  See if we can
853     // output an optimized form.
854     Value *Offset = evaluateGEPOffsetExpression(GEPLHS, *this, DL);
855 
856     // If not, synthesize the offset the hard way.
857     if (!Offset)
858       Offset = EmitGEPOffset(GEPLHS);
859     return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
860                         Constant::getNullValue(Offset->getType()));
861   }
862 
863   if (GEPLHS->isInBounds() && ICmpInst::isEquality(Cond) &&
864       isa<Constant>(RHS) && cast<Constant>(RHS)->isNullValue() &&
865       !NullPointerIsDefined(I.getFunction(),
866                             RHS->getType()->getPointerAddressSpace())) {
867     // For most address spaces, an allocation can't be placed at null, but null
868     // itself is treated as a 0 size allocation in the in bounds rules.  Thus,
869     // the only valid inbounds address derived from null, is null itself.
870     // Thus, we have four cases to consider:
871     // 1) Base == nullptr, Offset == 0 -> inbounds, null
872     // 2) Base == nullptr, Offset != 0 -> poison as the result is out of bounds
873     // 3) Base != nullptr, Offset == (-base) -> poison (crossing allocations)
874     // 4) Base != nullptr, Offset != (-base) -> nonnull (and possibly poison)
875     //
876     // (Note if we're indexing a type of size 0, that simply collapses into one
877     //  of the buckets above.)
878     //
879     // In general, we're allowed to make values less poison (i.e. remove
880     //   sources of full UB), so in this case, we just select between the two
881     //   non-poison cases (1 and 4 above).
882     //
883     // For vectors, we apply the same reasoning on a per-lane basis.
884     auto *Base = GEPLHS->getPointerOperand();
885     if (GEPLHS->getType()->isVectorTy() && Base->getType()->isPointerTy()) {
886       auto EC = cast<VectorType>(GEPLHS->getType())->getElementCount();
887       Base = Builder.CreateVectorSplat(EC, Base);
888     }
889     return new ICmpInst(Cond, Base,
890                         ConstantExpr::getPointerBitCastOrAddrSpaceCast(
891                             cast<Constant>(RHS), Base->getType()));
892   } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
893     // If the base pointers are different, but the indices are the same, just
894     // compare the base pointer.
895     if (PtrBase != GEPRHS->getOperand(0)) {
896       bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
897       IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
898                         GEPRHS->getOperand(0)->getType();
899       if (IndicesTheSame)
900         for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
901           if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
902             IndicesTheSame = false;
903             break;
904           }
905 
906       // If all indices are the same, just compare the base pointers.
907       Type *BaseType = GEPLHS->getOperand(0)->getType();
908       if (IndicesTheSame && CmpInst::makeCmpResultType(BaseType) == I.getType())
909         return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0));
910 
911       // If we're comparing GEPs with two base pointers that only differ in type
912       // and both GEPs have only constant indices or just one use, then fold
913       // the compare with the adjusted indices.
914       // FIXME: Support vector of pointers.
915       if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
916           (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
917           (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
918           PtrBase->stripPointerCasts() ==
919               GEPRHS->getOperand(0)->stripPointerCasts() &&
920           !GEPLHS->getType()->isVectorTy()) {
921         Value *LOffset = EmitGEPOffset(GEPLHS);
922         Value *ROffset = EmitGEPOffset(GEPRHS);
923 
924         // If we looked through an addrspacecast between different sized address
925         // spaces, the LHS and RHS pointers are different sized
926         // integers. Truncate to the smaller one.
927         Type *LHSIndexTy = LOffset->getType();
928         Type *RHSIndexTy = ROffset->getType();
929         if (LHSIndexTy != RHSIndexTy) {
930           if (LHSIndexTy->getPrimitiveSizeInBits().getFixedSize() <
931               RHSIndexTy->getPrimitiveSizeInBits().getFixedSize()) {
932             ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy);
933           } else
934             LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy);
935         }
936 
937         Value *Cmp = Builder.CreateICmp(ICmpInst::getSignedPredicate(Cond),
938                                         LOffset, ROffset);
939         return replaceInstUsesWith(I, Cmp);
940       }
941 
942       // Otherwise, the base pointers are different and the indices are
943       // different. Try convert this to an indexed compare by looking through
944       // PHIs/casts.
945       return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
946     }
947 
948     // If one of the GEPs has all zero indices, recurse.
949     // FIXME: Handle vector of pointers.
950     if (!GEPLHS->getType()->isVectorTy() && GEPLHS->hasAllZeroIndices())
951       return foldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
952                          ICmpInst::getSwappedPredicate(Cond), I);
953 
954     // If the other GEP has all zero indices, recurse.
955     // FIXME: Handle vector of pointers.
956     if (!GEPRHS->getType()->isVectorTy() && GEPRHS->hasAllZeroIndices())
957       return foldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
958 
959     bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds();
960     if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
961       // If the GEPs only differ by one index, compare it.
962       unsigned NumDifferences = 0;  // Keep track of # differences.
963       unsigned DiffOperand = 0;     // The operand that differs.
964       for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
965         if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
966           Type *LHSType = GEPLHS->getOperand(i)->getType();
967           Type *RHSType = GEPRHS->getOperand(i)->getType();
968           // FIXME: Better support for vector of pointers.
969           if (LHSType->getPrimitiveSizeInBits() !=
970                    RHSType->getPrimitiveSizeInBits() ||
971               (GEPLHS->getType()->isVectorTy() &&
972                (!LHSType->isVectorTy() || !RHSType->isVectorTy()))) {
973             // Irreconcilable differences.
974             NumDifferences = 2;
975             break;
976           }
977 
978           if (NumDifferences++) break;
979           DiffOperand = i;
980         }
981 
982       if (NumDifferences == 0)   // SAME GEP?
983         return replaceInstUsesWith(I, // No comparison is needed here.
984           ConstantInt::get(I.getType(), ICmpInst::isTrueWhenEqual(Cond)));
985 
986       else if (NumDifferences == 1 && GEPsInBounds) {
987         Value *LHSV = GEPLHS->getOperand(DiffOperand);
988         Value *RHSV = GEPRHS->getOperand(DiffOperand);
989         // Make sure we do a signed comparison here.
990         return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
991       }
992     }
993 
994     // Only lower this if the icmp is the only user of the GEP or if we expect
995     // the result to fold to a constant!
996     if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
997         (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
998       // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)  --->  (OFFSET1 cmp OFFSET2)
999       Value *L = EmitGEPOffset(GEPLHS);
1000       Value *R = EmitGEPOffset(GEPRHS);
1001       return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
1002     }
1003   }
1004 
1005   // Try convert this to an indexed compare by looking through PHIs/casts as a
1006   // last resort.
1007   return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
1008 }
1009 
1010 Instruction *InstCombinerImpl::foldAllocaCmp(ICmpInst &ICI,
1011                                              const AllocaInst *Alloca,
1012                                              const Value *Other) {
1013   assert(ICI.isEquality() && "Cannot fold non-equality comparison.");
1014 
1015   // It would be tempting to fold away comparisons between allocas and any
1016   // pointer not based on that alloca (e.g. an argument). However, even
1017   // though such pointers cannot alias, they can still compare equal.
1018   //
1019   // But LLVM doesn't specify where allocas get their memory, so if the alloca
1020   // doesn't escape we can argue that it's impossible to guess its value, and we
1021   // can therefore act as if any such guesses are wrong.
1022   //
1023   // The code below checks that the alloca doesn't escape, and that it's only
1024   // used in a comparison once (the current instruction). The
1025   // single-comparison-use condition ensures that we're trivially folding all
1026   // comparisons against the alloca consistently, and avoids the risk of
1027   // erroneously folding a comparison of the pointer with itself.
1028 
1029   unsigned MaxIter = 32; // Break cycles and bound to constant-time.
1030 
1031   SmallVector<const Use *, 32> Worklist;
1032   for (const Use &U : Alloca->uses()) {
1033     if (Worklist.size() >= MaxIter)
1034       return nullptr;
1035     Worklist.push_back(&U);
1036   }
1037 
1038   unsigned NumCmps = 0;
1039   while (!Worklist.empty()) {
1040     assert(Worklist.size() <= MaxIter);
1041     const Use *U = Worklist.pop_back_val();
1042     const Value *V = U->getUser();
1043     --MaxIter;
1044 
1045     if (isa<BitCastInst>(V) || isa<GetElementPtrInst>(V) || isa<PHINode>(V) ||
1046         isa<SelectInst>(V)) {
1047       // Track the uses.
1048     } else if (isa<LoadInst>(V)) {
1049       // Loading from the pointer doesn't escape it.
1050       continue;
1051     } else if (const auto *SI = dyn_cast<StoreInst>(V)) {
1052       // Storing *to* the pointer is fine, but storing the pointer escapes it.
1053       if (SI->getValueOperand() == U->get())
1054         return nullptr;
1055       continue;
1056     } else if (isa<ICmpInst>(V)) {
1057       if (NumCmps++)
1058         return nullptr; // Found more than one cmp.
1059       continue;
1060     } else if (const auto *Intrin = dyn_cast<IntrinsicInst>(V)) {
1061       switch (Intrin->getIntrinsicID()) {
1062         // These intrinsics don't escape or compare the pointer. Memset is safe
1063         // because we don't allow ptrtoint. Memcpy and memmove are safe because
1064         // we don't allow stores, so src cannot point to V.
1065         case Intrinsic::lifetime_start: case Intrinsic::lifetime_end:
1066         case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset:
1067           continue;
1068         default:
1069           return nullptr;
1070       }
1071     } else {
1072       return nullptr;
1073     }
1074     for (const Use &U : V->uses()) {
1075       if (Worklist.size() >= MaxIter)
1076         return nullptr;
1077       Worklist.push_back(&U);
1078     }
1079   }
1080 
1081   Type *CmpTy = CmpInst::makeCmpResultType(Other->getType());
1082   return replaceInstUsesWith(
1083       ICI,
1084       ConstantInt::get(CmpTy, !CmpInst::isTrueWhenEqual(ICI.getPredicate())));
1085 }
1086 
1087 /// Fold "icmp pred (X+C), X".
1088 Instruction *InstCombinerImpl::foldICmpAddOpConst(Value *X, const APInt &C,
1089                                                   ICmpInst::Predicate Pred) {
1090   // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
1091   // so the values can never be equal.  Similarly for all other "or equals"
1092   // operators.
1093   assert(!!C && "C should not be zero!");
1094 
1095   // (X+1) <u X        --> X >u (MAXUINT-1)        --> X == 255
1096   // (X+2) <u X        --> X >u (MAXUINT-2)        --> X > 253
1097   // (X+MAXUINT) <u X  --> X >u (MAXUINT-MAXUINT)  --> X != 0
1098   if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
1099     Constant *R = ConstantInt::get(X->getType(),
1100                                    APInt::getMaxValue(C.getBitWidth()) - C);
1101     return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
1102   }
1103 
1104   // (X+1) >u X        --> X <u (0-1)        --> X != 255
1105   // (X+2) >u X        --> X <u (0-2)        --> X <u 254
1106   // (X+MAXUINT) >u X  --> X <u (0-MAXUINT)  --> X <u 1  --> X == 0
1107   if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
1108     return new ICmpInst(ICmpInst::ICMP_ULT, X,
1109                         ConstantInt::get(X->getType(), -C));
1110 
1111   APInt SMax = APInt::getSignedMaxValue(C.getBitWidth());
1112 
1113   // (X+ 1) <s X       --> X >s (MAXSINT-1)          --> X == 127
1114   // (X+ 2) <s X       --> X >s (MAXSINT-2)          --> X >s 125
1115   // (X+MAXSINT) <s X  --> X >s (MAXSINT-MAXSINT)    --> X >s 0
1116   // (X+MINSINT) <s X  --> X >s (MAXSINT-MINSINT)    --> X >s -1
1117   // (X+ -2) <s X      --> X >s (MAXSINT- -2)        --> X >s 126
1118   // (X+ -1) <s X      --> X >s (MAXSINT- -1)        --> X != 127
1119   if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
1120     return new ICmpInst(ICmpInst::ICMP_SGT, X,
1121                         ConstantInt::get(X->getType(), SMax - C));
1122 
1123   // (X+ 1) >s X       --> X <s (MAXSINT-(1-1))       --> X != 127
1124   // (X+ 2) >s X       --> X <s (MAXSINT-(2-1))       --> X <s 126
1125   // (X+MAXSINT) >s X  --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
1126   // (X+MINSINT) >s X  --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
1127   // (X+ -2) >s X      --> X <s (MAXSINT-(-2-1))      --> X <s -126
1128   // (X+ -1) >s X      --> X <s (MAXSINT-(-1-1))      --> X == -128
1129 
1130   assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
1131   return new ICmpInst(ICmpInst::ICMP_SLT, X,
1132                       ConstantInt::get(X->getType(), SMax - (C - 1)));
1133 }
1134 
1135 /// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" ->
1136 /// (icmp eq/ne A, Log2(AP2/AP1)) ->
1137 /// (icmp eq/ne A, Log2(AP2) - Log2(AP1)).
1138 Instruction *InstCombinerImpl::foldICmpShrConstConst(ICmpInst &I, Value *A,
1139                                                      const APInt &AP1,
1140                                                      const APInt &AP2) {
1141   assert(I.isEquality() && "Cannot fold icmp gt/lt");
1142 
1143   auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1144     if (I.getPredicate() == I.ICMP_NE)
1145       Pred = CmpInst::getInversePredicate(Pred);
1146     return new ICmpInst(Pred, LHS, RHS);
1147   };
1148 
1149   // Don't bother doing any work for cases which InstSimplify handles.
1150   if (AP2.isNullValue())
1151     return nullptr;
1152 
1153   bool IsAShr = isa<AShrOperator>(I.getOperand(0));
1154   if (IsAShr) {
1155     if (AP2.isAllOnesValue())
1156       return nullptr;
1157     if (AP2.isNegative() != AP1.isNegative())
1158       return nullptr;
1159     if (AP2.sgt(AP1))
1160       return nullptr;
1161   }
1162 
1163   if (!AP1)
1164     // 'A' must be large enough to shift out the highest set bit.
1165     return getICmp(I.ICMP_UGT, A,
1166                    ConstantInt::get(A->getType(), AP2.logBase2()));
1167 
1168   if (AP1 == AP2)
1169     return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1170 
1171   int Shift;
1172   if (IsAShr && AP1.isNegative())
1173     Shift = AP1.countLeadingOnes() - AP2.countLeadingOnes();
1174   else
1175     Shift = AP1.countLeadingZeros() - AP2.countLeadingZeros();
1176 
1177   if (Shift > 0) {
1178     if (IsAShr && AP1 == AP2.ashr(Shift)) {
1179       // There are multiple solutions if we are comparing against -1 and the LHS
1180       // of the ashr is not a power of two.
1181       if (AP1.isAllOnesValue() && !AP2.isPowerOf2())
1182         return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift));
1183       return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1184     } else if (AP1 == AP2.lshr(Shift)) {
1185       return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1186     }
1187   }
1188 
1189   // Shifting const2 will never be equal to const1.
1190   // FIXME: This should always be handled by InstSimplify?
1191   auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1192   return replaceInstUsesWith(I, TorF);
1193 }
1194 
1195 /// Handle "(icmp eq/ne (shl AP2, A), AP1)" ->
1196 /// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
1197 Instruction *InstCombinerImpl::foldICmpShlConstConst(ICmpInst &I, Value *A,
1198                                                      const APInt &AP1,
1199                                                      const APInt &AP2) {
1200   assert(I.isEquality() && "Cannot fold icmp gt/lt");
1201 
1202   auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1203     if (I.getPredicate() == I.ICMP_NE)
1204       Pred = CmpInst::getInversePredicate(Pred);
1205     return new ICmpInst(Pred, LHS, RHS);
1206   };
1207 
1208   // Don't bother doing any work for cases which InstSimplify handles.
1209   if (AP2.isNullValue())
1210     return nullptr;
1211 
1212   unsigned AP2TrailingZeros = AP2.countTrailingZeros();
1213 
1214   if (!AP1 && AP2TrailingZeros != 0)
1215     return getICmp(
1216         I.ICMP_UGE, A,
1217         ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros));
1218 
1219   if (AP1 == AP2)
1220     return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1221 
1222   // Get the distance between the lowest bits that are set.
1223   int Shift = AP1.countTrailingZeros() - AP2TrailingZeros;
1224 
1225   if (Shift > 0 && AP2.shl(Shift) == AP1)
1226     return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1227 
1228   // Shifting const2 will never be equal to const1.
1229   // FIXME: This should always be handled by InstSimplify?
1230   auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1231   return replaceInstUsesWith(I, TorF);
1232 }
1233 
1234 /// The caller has matched a pattern of the form:
1235 ///   I = icmp ugt (add (add A, B), CI2), CI1
1236 /// If this is of the form:
1237 ///   sum = a + b
1238 ///   if (sum+128 >u 255)
1239 /// Then replace it with llvm.sadd.with.overflow.i8.
1240 ///
1241 static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
1242                                           ConstantInt *CI2, ConstantInt *CI1,
1243                                           InstCombinerImpl &IC) {
1244   // The transformation we're trying to do here is to transform this into an
1245   // llvm.sadd.with.overflow.  To do this, we have to replace the original add
1246   // with a narrower add, and discard the add-with-constant that is part of the
1247   // range check (if we can't eliminate it, this isn't profitable).
1248 
1249   // In order to eliminate the add-with-constant, the compare can be its only
1250   // use.
1251   Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
1252   if (!AddWithCst->hasOneUse())
1253     return nullptr;
1254 
1255   // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
1256   if (!CI2->getValue().isPowerOf2())
1257     return nullptr;
1258   unsigned NewWidth = CI2->getValue().countTrailingZeros();
1259   if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1260     return nullptr;
1261 
1262   // The width of the new add formed is 1 more than the bias.
1263   ++NewWidth;
1264 
1265   // Check to see that CI1 is an all-ones value with NewWidth bits.
1266   if (CI1->getBitWidth() == NewWidth ||
1267       CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
1268     return nullptr;
1269 
1270   // This is only really a signed overflow check if the inputs have been
1271   // sign-extended; check for that condition. For example, if CI2 is 2^31 and
1272   // the operands of the add are 64 bits wide, we need at least 33 sign bits.
1273   unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1;
1274   if (IC.ComputeNumSignBits(A, 0, &I) < NeededSignBits ||
1275       IC.ComputeNumSignBits(B, 0, &I) < NeededSignBits)
1276     return nullptr;
1277 
1278   // In order to replace the original add with a narrower
1279   // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
1280   // and truncates that discard the high bits of the add.  Verify that this is
1281   // the case.
1282   Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
1283   for (User *U : OrigAdd->users()) {
1284     if (U == AddWithCst)
1285       continue;
1286 
1287     // Only accept truncates for now.  We would really like a nice recursive
1288     // predicate like SimplifyDemandedBits, but which goes downwards the use-def
1289     // chain to see which bits of a value are actually demanded.  If the
1290     // original add had another add which was then immediately truncated, we
1291     // could still do the transformation.
1292     TruncInst *TI = dyn_cast<TruncInst>(U);
1293     if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
1294       return nullptr;
1295   }
1296 
1297   // If the pattern matches, truncate the inputs to the narrower type and
1298   // use the sadd_with_overflow intrinsic to efficiently compute both the
1299   // result and the overflow bit.
1300   Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
1301   Function *F = Intrinsic::getDeclaration(
1302       I.getModule(), Intrinsic::sadd_with_overflow, NewType);
1303 
1304   InstCombiner::BuilderTy &Builder = IC.Builder;
1305 
1306   // Put the new code above the original add, in case there are any uses of the
1307   // add between the add and the compare.
1308   Builder.SetInsertPoint(OrigAdd);
1309 
1310   Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc");
1311   Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc");
1312   CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd");
1313   Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result");
1314   Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType());
1315 
1316   // The inner add was the result of the narrow add, zero extended to the
1317   // wider type.  Replace it with the result computed by the intrinsic.
1318   IC.replaceInstUsesWith(*OrigAdd, ZExt);
1319   IC.eraseInstFromFunction(*OrigAdd);
1320 
1321   // The original icmp gets replaced with the overflow value.
1322   return ExtractValueInst::Create(Call, 1, "sadd.overflow");
1323 }
1324 
1325 /// If we have:
1326 ///   icmp eq/ne (urem/srem %x, %y), 0
1327 /// iff %y is a power-of-two, we can replace this with a bit test:
1328 ///   icmp eq/ne (and %x, (add %y, -1)), 0
1329 Instruction *InstCombinerImpl::foldIRemByPowerOfTwoToBitTest(ICmpInst &I) {
1330   // This fold is only valid for equality predicates.
1331   if (!I.isEquality())
1332     return nullptr;
1333   ICmpInst::Predicate Pred;
1334   Value *X, *Y, *Zero;
1335   if (!match(&I, m_ICmp(Pred, m_OneUse(m_IRem(m_Value(X), m_Value(Y))),
1336                         m_CombineAnd(m_Zero(), m_Value(Zero)))))
1337     return nullptr;
1338   if (!isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, 0, &I))
1339     return nullptr;
1340   // This may increase instruction count, we don't enforce that Y is a constant.
1341   Value *Mask = Builder.CreateAdd(Y, Constant::getAllOnesValue(Y->getType()));
1342   Value *Masked = Builder.CreateAnd(X, Mask);
1343   return ICmpInst::Create(Instruction::ICmp, Pred, Masked, Zero);
1344 }
1345 
1346 /// Fold equality-comparison between zero and any (maybe truncated) right-shift
1347 /// by one-less-than-bitwidth into a sign test on the original value.
1348 Instruction *InstCombinerImpl::foldSignBitTest(ICmpInst &I) {
1349   Instruction *Val;
1350   ICmpInst::Predicate Pred;
1351   if (!I.isEquality() || !match(&I, m_ICmp(Pred, m_Instruction(Val), m_Zero())))
1352     return nullptr;
1353 
1354   Value *X;
1355   Type *XTy;
1356 
1357   Constant *C;
1358   if (match(Val, m_TruncOrSelf(m_Shr(m_Value(X), m_Constant(C))))) {
1359     XTy = X->getType();
1360     unsigned XBitWidth = XTy->getScalarSizeInBits();
1361     if (!match(C, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
1362                                      APInt(XBitWidth, XBitWidth - 1))))
1363       return nullptr;
1364   } else if (isa<BinaryOperator>(Val) &&
1365              (X = reassociateShiftAmtsOfTwoSameDirectionShifts(
1366                   cast<BinaryOperator>(Val), SQ.getWithInstruction(Val),
1367                   /*AnalyzeForSignBitExtraction=*/true))) {
1368     XTy = X->getType();
1369   } else
1370     return nullptr;
1371 
1372   return ICmpInst::Create(Instruction::ICmp,
1373                           Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_SGE
1374                                                     : ICmpInst::ICMP_SLT,
1375                           X, ConstantInt::getNullValue(XTy));
1376 }
1377 
1378 // Handle  icmp pred X, 0
1379 Instruction *InstCombinerImpl::foldICmpWithZero(ICmpInst &Cmp) {
1380   CmpInst::Predicate Pred = Cmp.getPredicate();
1381   if (!match(Cmp.getOperand(1), m_Zero()))
1382     return nullptr;
1383 
1384   // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0)
1385   if (Pred == ICmpInst::ICMP_SGT) {
1386     Value *A, *B;
1387     SelectPatternResult SPR = matchSelectPattern(Cmp.getOperand(0), A, B);
1388     if (SPR.Flavor == SPF_SMIN) {
1389       if (isKnownPositive(A, DL, 0, &AC, &Cmp, &DT))
1390         return new ICmpInst(Pred, B, Cmp.getOperand(1));
1391       if (isKnownPositive(B, DL, 0, &AC, &Cmp, &DT))
1392         return new ICmpInst(Pred, A, Cmp.getOperand(1));
1393     }
1394   }
1395 
1396   if (Instruction *New = foldIRemByPowerOfTwoToBitTest(Cmp))
1397     return New;
1398 
1399   // Given:
1400   //   icmp eq/ne (urem %x, %y), 0
1401   // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem':
1402   //   icmp eq/ne %x, 0
1403   Value *X, *Y;
1404   if (match(Cmp.getOperand(0), m_URem(m_Value(X), m_Value(Y))) &&
1405       ICmpInst::isEquality(Pred)) {
1406     KnownBits XKnown = computeKnownBits(X, 0, &Cmp);
1407     KnownBits YKnown = computeKnownBits(Y, 0, &Cmp);
1408     if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2)
1409       return new ICmpInst(Pred, X, Cmp.getOperand(1));
1410   }
1411 
1412   return nullptr;
1413 }
1414 
1415 /// Fold icmp Pred X, C.
1416 /// TODO: This code structure does not make sense. The saturating add fold
1417 /// should be moved to some other helper and extended as noted below (it is also
1418 /// possible that code has been made unnecessary - do we canonicalize IR to
1419 /// overflow/saturating intrinsics or not?).
1420 Instruction *InstCombinerImpl::foldICmpWithConstant(ICmpInst &Cmp) {
1421   // Match the following pattern, which is a common idiom when writing
1422   // overflow-safe integer arithmetic functions. The source performs an addition
1423   // in wider type and explicitly checks for overflow using comparisons against
1424   // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic.
1425   //
1426   // TODO: This could probably be generalized to handle other overflow-safe
1427   // operations if we worked out the formulas to compute the appropriate magic
1428   // constants.
1429   //
1430   // sum = a + b
1431   // if (sum+128 >u 255)  ...  -> llvm.sadd.with.overflow.i8
1432   CmpInst::Predicate Pred = Cmp.getPredicate();
1433   Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1);
1434   Value *A, *B;
1435   ConstantInt *CI, *CI2; // I = icmp ugt (add (add A, B), CI2), CI
1436   if (Pred == ICmpInst::ICMP_UGT && match(Op1, m_ConstantInt(CI)) &&
1437       match(Op0, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
1438     if (Instruction *Res = processUGT_ADDCST_ADD(Cmp, A, B, CI2, CI, *this))
1439       return Res;
1440 
1441   // icmp(phi(C1, C2, ...), C) -> phi(icmp(C1, C), icmp(C2, C), ...).
1442   Constant *C = dyn_cast<Constant>(Op1);
1443   if (!C || C->canTrap())
1444     return nullptr;
1445 
1446   if (auto *Phi = dyn_cast<PHINode>(Op0))
1447     if (all_of(Phi->operands(), [](Value *V) { return isa<Constant>(V); })) {
1448       Type *Ty = Cmp.getType();
1449       Builder.SetInsertPoint(Phi);
1450       PHINode *NewPhi =
1451           Builder.CreatePHI(Ty, Phi->getNumOperands());
1452       for (BasicBlock *Predecessor : predecessors(Phi->getParent())) {
1453         auto *Input =
1454             cast<Constant>(Phi->getIncomingValueForBlock(Predecessor));
1455         auto *BoolInput = ConstantExpr::getCompare(Pred, Input, C);
1456         NewPhi->addIncoming(BoolInput, Predecessor);
1457       }
1458       NewPhi->takeName(&Cmp);
1459       return replaceInstUsesWith(Cmp, NewPhi);
1460     }
1461 
1462   return nullptr;
1463 }
1464 
1465 /// Canonicalize icmp instructions based on dominating conditions.
1466 Instruction *InstCombinerImpl::foldICmpWithDominatingICmp(ICmpInst &Cmp) {
1467   // This is a cheap/incomplete check for dominance - just match a single
1468   // predecessor with a conditional branch.
1469   BasicBlock *CmpBB = Cmp.getParent();
1470   BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1471   if (!DomBB)
1472     return nullptr;
1473 
1474   Value *DomCond;
1475   BasicBlock *TrueBB, *FalseBB;
1476   if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1477     return nullptr;
1478 
1479   assert((TrueBB == CmpBB || FalseBB == CmpBB) &&
1480          "Predecessor block does not point to successor?");
1481 
1482   // The branch should get simplified. Don't bother simplifying this condition.
1483   if (TrueBB == FalseBB)
1484     return nullptr;
1485 
1486   // Try to simplify this compare to T/F based on the dominating condition.
1487   Optional<bool> Imp = isImpliedCondition(DomCond, &Cmp, DL, TrueBB == CmpBB);
1488   if (Imp)
1489     return replaceInstUsesWith(Cmp, ConstantInt::get(Cmp.getType(), *Imp));
1490 
1491   CmpInst::Predicate Pred = Cmp.getPredicate();
1492   Value *X = Cmp.getOperand(0), *Y = Cmp.getOperand(1);
1493   ICmpInst::Predicate DomPred;
1494   const APInt *C, *DomC;
1495   if (match(DomCond, m_ICmp(DomPred, m_Specific(X), m_APInt(DomC))) &&
1496       match(Y, m_APInt(C))) {
1497     // We have 2 compares of a variable with constants. Calculate the constant
1498     // ranges of those compares to see if we can transform the 2nd compare:
1499     // DomBB:
1500     //   DomCond = icmp DomPred X, DomC
1501     //   br DomCond, CmpBB, FalseBB
1502     // CmpBB:
1503     //   Cmp = icmp Pred X, C
1504     ConstantRange CR = ConstantRange::makeExactICmpRegion(Pred, *C);
1505     ConstantRange DominatingCR =
1506         (CmpBB == TrueBB) ? ConstantRange::makeExactICmpRegion(DomPred, *DomC)
1507                           : ConstantRange::makeExactICmpRegion(
1508                                 CmpInst::getInversePredicate(DomPred), *DomC);
1509     ConstantRange Intersection = DominatingCR.intersectWith(CR);
1510     ConstantRange Difference = DominatingCR.difference(CR);
1511     if (Intersection.isEmptySet())
1512       return replaceInstUsesWith(Cmp, Builder.getFalse());
1513     if (Difference.isEmptySet())
1514       return replaceInstUsesWith(Cmp, Builder.getTrue());
1515 
1516     // Canonicalizing a sign bit comparison that gets used in a branch,
1517     // pessimizes codegen by generating branch on zero instruction instead
1518     // of a test and branch. So we avoid canonicalizing in such situations
1519     // because test and branch instruction has better branch displacement
1520     // than compare and branch instruction.
1521     bool UnusedBit;
1522     bool IsSignBit = isSignBitCheck(Pred, *C, UnusedBit);
1523     if (Cmp.isEquality() || (IsSignBit && hasBranchUse(Cmp)))
1524       return nullptr;
1525 
1526     // Avoid an infinite loop with min/max canonicalization.
1527     // TODO: This will be unnecessary if we canonicalize to min/max intrinsics.
1528     if (Cmp.hasOneUse() &&
1529         match(Cmp.user_back(), m_MaxOrMin(m_Value(), m_Value())))
1530       return nullptr;
1531 
1532     if (const APInt *EqC = Intersection.getSingleElement())
1533       return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*EqC));
1534     if (const APInt *NeC = Difference.getSingleElement())
1535       return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*NeC));
1536   }
1537 
1538   return nullptr;
1539 }
1540 
1541 /// Fold icmp (trunc X, Y), C.
1542 Instruction *InstCombinerImpl::foldICmpTruncConstant(ICmpInst &Cmp,
1543                                                      TruncInst *Trunc,
1544                                                      const APInt &C) {
1545   ICmpInst::Predicate Pred = Cmp.getPredicate();
1546   Value *X = Trunc->getOperand(0);
1547   if (C.isOneValue() && C.getBitWidth() > 1) {
1548     // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1
1549     Value *V = nullptr;
1550     if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V))))
1551       return new ICmpInst(ICmpInst::ICMP_SLT, V,
1552                           ConstantInt::get(V->getType(), 1));
1553   }
1554 
1555   unsigned DstBits = Trunc->getType()->getScalarSizeInBits(),
1556            SrcBits = X->getType()->getScalarSizeInBits();
1557   if (Cmp.isEquality() && Trunc->hasOneUse()) {
1558     // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
1559     // of the high bits truncated out of x are known.
1560     KnownBits Known = computeKnownBits(X, 0, &Cmp);
1561 
1562     // If all the high bits are known, we can do this xform.
1563     if ((Known.Zero | Known.One).countLeadingOnes() >= SrcBits - DstBits) {
1564       // Pull in the high bits from known-ones set.
1565       APInt NewRHS = C.zext(SrcBits);
1566       NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits);
1567       return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), NewRHS));
1568     }
1569   }
1570 
1571   // Look through truncated right-shift of the sign-bit for a sign-bit check:
1572   // trunc iN (ShOp >> ShAmtC) to i[N - ShAmtC] < 0  --> ShOp <  0
1573   // trunc iN (ShOp >> ShAmtC) to i[N - ShAmtC] > -1 --> ShOp > -1
1574   Value *ShOp;
1575   const APInt *ShAmtC;
1576   bool TrueIfSigned;
1577   if (isSignBitCheck(Pred, C, TrueIfSigned) &&
1578       match(X, m_Shr(m_Value(ShOp), m_APInt(ShAmtC))) &&
1579       DstBits == SrcBits - ShAmtC->getZExtValue()) {
1580     return TrueIfSigned
1581                ? new ICmpInst(ICmpInst::ICMP_SLT, ShOp,
1582                               ConstantInt::getNullValue(X->getType()))
1583                : new ICmpInst(ICmpInst::ICMP_SGT, ShOp,
1584                               ConstantInt::getAllOnesValue(X->getType()));
1585   }
1586 
1587   return nullptr;
1588 }
1589 
1590 /// Fold icmp (xor X, Y), C.
1591 Instruction *InstCombinerImpl::foldICmpXorConstant(ICmpInst &Cmp,
1592                                                    BinaryOperator *Xor,
1593                                                    const APInt &C) {
1594   Value *X = Xor->getOperand(0);
1595   Value *Y = Xor->getOperand(1);
1596   const APInt *XorC;
1597   if (!match(Y, m_APInt(XorC)))
1598     return nullptr;
1599 
1600   // If this is a comparison that tests the signbit (X < 0) or (x > -1),
1601   // fold the xor.
1602   ICmpInst::Predicate Pred = Cmp.getPredicate();
1603   bool TrueIfSigned = false;
1604   if (isSignBitCheck(Cmp.getPredicate(), C, TrueIfSigned)) {
1605 
1606     // If the sign bit of the XorCst is not set, there is no change to
1607     // the operation, just stop using the Xor.
1608     if (!XorC->isNegative())
1609       return replaceOperand(Cmp, 0, X);
1610 
1611     // Emit the opposite comparison.
1612     if (TrueIfSigned)
1613       return new ICmpInst(ICmpInst::ICMP_SGT, X,
1614                           ConstantInt::getAllOnesValue(X->getType()));
1615     else
1616       return new ICmpInst(ICmpInst::ICMP_SLT, X,
1617                           ConstantInt::getNullValue(X->getType()));
1618   }
1619 
1620   if (Xor->hasOneUse()) {
1621     // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask))
1622     if (!Cmp.isEquality() && XorC->isSignMask()) {
1623       Pred = Cmp.getFlippedSignednessPredicate();
1624       return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1625     }
1626 
1627     // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask))
1628     if (!Cmp.isEquality() && XorC->isMaxSignedValue()) {
1629       Pred = Cmp.getFlippedSignednessPredicate();
1630       Pred = Cmp.getSwappedPredicate(Pred);
1631       return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1632     }
1633   }
1634 
1635   // Mask constant magic can eliminate an 'xor' with unsigned compares.
1636   if (Pred == ICmpInst::ICMP_UGT) {
1637     // (xor X, ~C) >u C --> X <u ~C (when C+1 is a power of 2)
1638     if (*XorC == ~C && (C + 1).isPowerOf2())
1639       return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
1640     // (xor X, C) >u C --> X >u C (when C+1 is a power of 2)
1641     if (*XorC == C && (C + 1).isPowerOf2())
1642       return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
1643   }
1644   if (Pred == ICmpInst::ICMP_ULT) {
1645     // (xor X, -C) <u C --> X >u ~C (when C is a power of 2)
1646     if (*XorC == -C && C.isPowerOf2())
1647       return new ICmpInst(ICmpInst::ICMP_UGT, X,
1648                           ConstantInt::get(X->getType(), ~C));
1649     // (xor X, C) <u C --> X >u ~C (when -C is a power of 2)
1650     if (*XorC == C && (-C).isPowerOf2())
1651       return new ICmpInst(ICmpInst::ICMP_UGT, X,
1652                           ConstantInt::get(X->getType(), ~C));
1653   }
1654   return nullptr;
1655 }
1656 
1657 /// Fold icmp (and (sh X, Y), C2), C1.
1658 Instruction *InstCombinerImpl::foldICmpAndShift(ICmpInst &Cmp,
1659                                                 BinaryOperator *And,
1660                                                 const APInt &C1,
1661                                                 const APInt &C2) {
1662   BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0));
1663   if (!Shift || !Shift->isShift())
1664     return nullptr;
1665 
1666   // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could
1667   // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in
1668   // code produced by the clang front-end, for bitfield access.
1669   // This seemingly simple opportunity to fold away a shift turns out to be
1670   // rather complicated. See PR17827 for details.
1671   unsigned ShiftOpcode = Shift->getOpcode();
1672   bool IsShl = ShiftOpcode == Instruction::Shl;
1673   const APInt *C3;
1674   if (match(Shift->getOperand(1), m_APInt(C3))) {
1675     APInt NewAndCst, NewCmpCst;
1676     bool AnyCmpCstBitsShiftedOut;
1677     if (ShiftOpcode == Instruction::Shl) {
1678       // For a left shift, we can fold if the comparison is not signed. We can
1679       // also fold a signed comparison if the mask value and comparison value
1680       // are not negative. These constraints may not be obvious, but we can
1681       // prove that they are correct using an SMT solver.
1682       if (Cmp.isSigned() && (C2.isNegative() || C1.isNegative()))
1683         return nullptr;
1684 
1685       NewCmpCst = C1.lshr(*C3);
1686       NewAndCst = C2.lshr(*C3);
1687       AnyCmpCstBitsShiftedOut = NewCmpCst.shl(*C3) != C1;
1688     } else if (ShiftOpcode == Instruction::LShr) {
1689       // For a logical right shift, we can fold if the comparison is not signed.
1690       // We can also fold a signed comparison if the shifted mask value and the
1691       // shifted comparison value are not negative. These constraints may not be
1692       // obvious, but we can prove that they are correct using an SMT solver.
1693       NewCmpCst = C1.shl(*C3);
1694       NewAndCst = C2.shl(*C3);
1695       AnyCmpCstBitsShiftedOut = NewCmpCst.lshr(*C3) != C1;
1696       if (Cmp.isSigned() && (NewAndCst.isNegative() || NewCmpCst.isNegative()))
1697         return nullptr;
1698     } else {
1699       // For an arithmetic shift, check that both constants don't use (in a
1700       // signed sense) the top bits being shifted out.
1701       assert(ShiftOpcode == Instruction::AShr && "Unknown shift opcode");
1702       NewCmpCst = C1.shl(*C3);
1703       NewAndCst = C2.shl(*C3);
1704       AnyCmpCstBitsShiftedOut = NewCmpCst.ashr(*C3) != C1;
1705       if (NewAndCst.ashr(*C3) != C2)
1706         return nullptr;
1707     }
1708 
1709     if (AnyCmpCstBitsShiftedOut) {
1710       // If we shifted bits out, the fold is not going to work out. As a
1711       // special case, check to see if this means that the result is always
1712       // true or false now.
1713       if (Cmp.getPredicate() == ICmpInst::ICMP_EQ)
1714         return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType()));
1715       if (Cmp.getPredicate() == ICmpInst::ICMP_NE)
1716         return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType()));
1717     } else {
1718       Value *NewAnd = Builder.CreateAnd(
1719           Shift->getOperand(0), ConstantInt::get(And->getType(), NewAndCst));
1720       return new ICmpInst(Cmp.getPredicate(),
1721           NewAnd, ConstantInt::get(And->getType(), NewCmpCst));
1722     }
1723   }
1724 
1725   // Turn ((X >> Y) & C2) == 0  into  (X & (C2 << Y)) == 0.  The latter is
1726   // preferable because it allows the C2 << Y expression to be hoisted out of a
1727   // loop if Y is invariant and X is not.
1728   if (Shift->hasOneUse() && C1.isNullValue() && Cmp.isEquality() &&
1729       !Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) {
1730     // Compute C2 << Y.
1731     Value *NewShift =
1732         IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1))
1733               : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1));
1734 
1735     // Compute X & (C2 << Y).
1736     Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift);
1737     return replaceOperand(Cmp, 0, NewAnd);
1738   }
1739 
1740   return nullptr;
1741 }
1742 
1743 /// Fold icmp (and X, C2), C1.
1744 Instruction *InstCombinerImpl::foldICmpAndConstConst(ICmpInst &Cmp,
1745                                                      BinaryOperator *And,
1746                                                      const APInt &C1) {
1747   bool isICMP_NE = Cmp.getPredicate() == ICmpInst::ICMP_NE;
1748 
1749   // For vectors: icmp ne (and X, 1), 0 --> trunc X to N x i1
1750   // TODO: We canonicalize to the longer form for scalars because we have
1751   // better analysis/folds for icmp, and codegen may be better with icmp.
1752   if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.isNullValue() &&
1753       match(And->getOperand(1), m_One()))
1754     return new TruncInst(And->getOperand(0), Cmp.getType());
1755 
1756   const APInt *C2;
1757   Value *X;
1758   if (!match(And, m_And(m_Value(X), m_APInt(C2))))
1759     return nullptr;
1760 
1761   // Don't perform the following transforms if the AND has multiple uses
1762   if (!And->hasOneUse())
1763     return nullptr;
1764 
1765   if (Cmp.isEquality() && C1.isNullValue()) {
1766     // Restrict this fold to single-use 'and' (PR10267).
1767     // Replace (and X, (1 << size(X)-1) != 0) with X s< 0
1768     if (C2->isSignMask()) {
1769       Constant *Zero = Constant::getNullValue(X->getType());
1770       auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
1771       return new ICmpInst(NewPred, X, Zero);
1772     }
1773 
1774     // Restrict this fold only for single-use 'and' (PR10267).
1775     // ((%x & C) == 0) --> %x u< (-C)  iff (-C) is power of two.
1776     if ((~(*C2) + 1).isPowerOf2()) {
1777       Constant *NegBOC =
1778           ConstantExpr::getNeg(cast<Constant>(And->getOperand(1)));
1779       auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
1780       return new ICmpInst(NewPred, X, NegBOC);
1781     }
1782   }
1783 
1784   // If the LHS is an 'and' of a truncate and we can widen the and/compare to
1785   // the input width without changing the value produced, eliminate the cast:
1786   //
1787   // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1'
1788   //
1789   // We can do this transformation if the constants do not have their sign bits
1790   // set or if it is an equality comparison. Extending a relational comparison
1791   // when we're checking the sign bit would not work.
1792   Value *W;
1793   if (match(And->getOperand(0), m_OneUse(m_Trunc(m_Value(W)))) &&
1794       (Cmp.isEquality() || (!C1.isNegative() && !C2->isNegative()))) {
1795     // TODO: Is this a good transform for vectors? Wider types may reduce
1796     // throughput. Should this transform be limited (even for scalars) by using
1797     // shouldChangeType()?
1798     if (!Cmp.getType()->isVectorTy()) {
1799       Type *WideType = W->getType();
1800       unsigned WideScalarBits = WideType->getScalarSizeInBits();
1801       Constant *ZextC1 = ConstantInt::get(WideType, C1.zext(WideScalarBits));
1802       Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits));
1803       Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName());
1804       return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1805     }
1806   }
1807 
1808   if (Instruction *I = foldICmpAndShift(Cmp, And, C1, *C2))
1809     return I;
1810 
1811   // (icmp pred (and (or (lshr A, B), A), 1), 0) -->
1812   // (icmp pred (and A, (or (shl 1, B), 1), 0))
1813   //
1814   // iff pred isn't signed
1815   if (!Cmp.isSigned() && C1.isNullValue() && And->getOperand(0)->hasOneUse() &&
1816       match(And->getOperand(1), m_One())) {
1817     Constant *One = cast<Constant>(And->getOperand(1));
1818     Value *Or = And->getOperand(0);
1819     Value *A, *B, *LShr;
1820     if (match(Or, m_Or(m_Value(LShr), m_Value(A))) &&
1821         match(LShr, m_LShr(m_Specific(A), m_Value(B)))) {
1822       unsigned UsesRemoved = 0;
1823       if (And->hasOneUse())
1824         ++UsesRemoved;
1825       if (Or->hasOneUse())
1826         ++UsesRemoved;
1827       if (LShr->hasOneUse())
1828         ++UsesRemoved;
1829 
1830       // Compute A & ((1 << B) | 1)
1831       Value *NewOr = nullptr;
1832       if (auto *C = dyn_cast<Constant>(B)) {
1833         if (UsesRemoved >= 1)
1834           NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One);
1835       } else {
1836         if (UsesRemoved >= 3)
1837           NewOr = Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(),
1838                                                      /*HasNUW=*/true),
1839                                    One, Or->getName());
1840       }
1841       if (NewOr) {
1842         Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName());
1843         return replaceOperand(Cmp, 0, NewAnd);
1844       }
1845     }
1846   }
1847 
1848   return nullptr;
1849 }
1850 
1851 /// Fold icmp (and X, Y), C.
1852 Instruction *InstCombinerImpl::foldICmpAndConstant(ICmpInst &Cmp,
1853                                                    BinaryOperator *And,
1854                                                    const APInt &C) {
1855   if (Instruction *I = foldICmpAndConstConst(Cmp, And, C))
1856     return I;
1857 
1858   // TODO: These all require that Y is constant too, so refactor with the above.
1859 
1860   // Try to optimize things like "A[i] & 42 == 0" to index computations.
1861   Value *X = And->getOperand(0);
1862   Value *Y = And->getOperand(1);
1863   if (auto *LI = dyn_cast<LoadInst>(X))
1864     if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
1865       if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
1866         if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
1867             !LI->isVolatile() && isa<ConstantInt>(Y)) {
1868           ConstantInt *C2 = cast<ConstantInt>(Y);
1869           if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, Cmp, C2))
1870             return Res;
1871         }
1872 
1873   if (!Cmp.isEquality())
1874     return nullptr;
1875 
1876   // X & -C == -C -> X >  u ~C
1877   // X & -C != -C -> X <= u ~C
1878   //   iff C is a power of 2
1879   if (Cmp.getOperand(1) == Y && (-C).isPowerOf2()) {
1880     auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGT
1881                                                           : CmpInst::ICMP_ULE;
1882     return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1))));
1883   }
1884 
1885   // (X & C2) == 0 -> (trunc X) >= 0
1886   // (X & C2) != 0 -> (trunc X) <  0
1887   //   iff C2 is a power of 2 and it masks the sign bit of a legal integer type.
1888   const APInt *C2;
1889   if (And->hasOneUse() && C.isNullValue() && match(Y, m_APInt(C2))) {
1890     int32_t ExactLogBase2 = C2->exactLogBase2();
1891     if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) {
1892       Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1);
1893       if (auto *AndVTy = dyn_cast<VectorType>(And->getType()))
1894         NTy = VectorType::get(NTy, AndVTy->getElementCount());
1895       Value *Trunc = Builder.CreateTrunc(X, NTy);
1896       auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE
1897                                                             : CmpInst::ICMP_SLT;
1898       return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy));
1899     }
1900   }
1901 
1902   return nullptr;
1903 }
1904 
1905 /// Fold icmp (or X, Y), C.
1906 Instruction *InstCombinerImpl::foldICmpOrConstant(ICmpInst &Cmp,
1907                                                   BinaryOperator *Or,
1908                                                   const APInt &C) {
1909   ICmpInst::Predicate Pred = Cmp.getPredicate();
1910   if (C.isOneValue()) {
1911     // icmp slt signum(V) 1 --> icmp slt V, 1
1912     Value *V = nullptr;
1913     if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V))))
1914       return new ICmpInst(ICmpInst::ICMP_SLT, V,
1915                           ConstantInt::get(V->getType(), 1));
1916   }
1917 
1918   Value *OrOp0 = Or->getOperand(0), *OrOp1 = Or->getOperand(1);
1919   const APInt *MaskC;
1920   if (match(OrOp1, m_APInt(MaskC)) && Cmp.isEquality()) {
1921     if (*MaskC == C && (C + 1).isPowerOf2()) {
1922       // X | C == C --> X <=u C
1923       // X | C != C --> X  >u C
1924       //   iff C+1 is a power of 2 (C is a bitmask of the low bits)
1925       Pred = (Pred == CmpInst::ICMP_EQ) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT;
1926       return new ICmpInst(Pred, OrOp0, OrOp1);
1927     }
1928 
1929     // More general: canonicalize 'equality with set bits mask' to
1930     // 'equality with clear bits mask'.
1931     // (X | MaskC) == C --> (X & ~MaskC) == C ^ MaskC
1932     // (X | MaskC) != C --> (X & ~MaskC) != C ^ MaskC
1933     if (Or->hasOneUse()) {
1934       Value *And = Builder.CreateAnd(OrOp0, ~(*MaskC));
1935       Constant *NewC = ConstantInt::get(Or->getType(), C ^ (*MaskC));
1936       return new ICmpInst(Pred, And, NewC);
1937     }
1938   }
1939 
1940   if (!Cmp.isEquality() || !C.isNullValue() || !Or->hasOneUse())
1941     return nullptr;
1942 
1943   Value *P, *Q;
1944   if (match(Or, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
1945     // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
1946     // -> and (icmp eq P, null), (icmp eq Q, null).
1947     Value *CmpP =
1948         Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
1949     Value *CmpQ =
1950         Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
1951     auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1952     return BinaryOperator::Create(BOpc, CmpP, CmpQ);
1953   }
1954 
1955   // Are we using xors to bitwise check for a pair of (in)equalities? Convert to
1956   // a shorter form that has more potential to be folded even further.
1957   Value *X1, *X2, *X3, *X4;
1958   if (match(OrOp0, m_OneUse(m_Xor(m_Value(X1), m_Value(X2)))) &&
1959       match(OrOp1, m_OneUse(m_Xor(m_Value(X3), m_Value(X4))))) {
1960     // ((X1 ^ X2) || (X3 ^ X4)) == 0 --> (X1 == X2) && (X3 == X4)
1961     // ((X1 ^ X2) || (X3 ^ X4)) != 0 --> (X1 != X2) || (X3 != X4)
1962     Value *Cmp12 = Builder.CreateICmp(Pred, X1, X2);
1963     Value *Cmp34 = Builder.CreateICmp(Pred, X3, X4);
1964     auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1965     return BinaryOperator::Create(BOpc, Cmp12, Cmp34);
1966   }
1967 
1968   return nullptr;
1969 }
1970 
1971 /// Fold icmp (mul X, Y), C.
1972 Instruction *InstCombinerImpl::foldICmpMulConstant(ICmpInst &Cmp,
1973                                                    BinaryOperator *Mul,
1974                                                    const APInt &C) {
1975   const APInt *MulC;
1976   if (!match(Mul->getOperand(1), m_APInt(MulC)))
1977     return nullptr;
1978 
1979   // If this is a test of the sign bit and the multiply is sign-preserving with
1980   // a constant operand, use the multiply LHS operand instead.
1981   ICmpInst::Predicate Pred = Cmp.getPredicate();
1982   if (isSignTest(Pred, C) && Mul->hasNoSignedWrap()) {
1983     if (MulC->isNegative())
1984       Pred = ICmpInst::getSwappedPredicate(Pred);
1985     return new ICmpInst(Pred, Mul->getOperand(0),
1986                         Constant::getNullValue(Mul->getType()));
1987   }
1988 
1989   // If the multiply does not wrap, try to divide the compare constant by the
1990   // multiplication factor.
1991   if (Cmp.isEquality() && !MulC->isNullValue()) {
1992     // (mul nsw X, MulC) == C --> X == C /s MulC
1993     if (Mul->hasNoSignedWrap() && C.srem(*MulC).isNullValue()) {
1994       Constant *NewC = ConstantInt::get(Mul->getType(), C.sdiv(*MulC));
1995       return new ICmpInst(Pred, Mul->getOperand(0), NewC);
1996     }
1997     // (mul nuw X, MulC) == C --> X == C /u MulC
1998     if (Mul->hasNoUnsignedWrap() && C.urem(*MulC).isNullValue()) {
1999       Constant *NewC = ConstantInt::get(Mul->getType(), C.udiv(*MulC));
2000       return new ICmpInst(Pred, Mul->getOperand(0), NewC);
2001     }
2002   }
2003 
2004   return nullptr;
2005 }
2006 
2007 /// Fold icmp (shl 1, Y), C.
2008 static Instruction *foldICmpShlOne(ICmpInst &Cmp, Instruction *Shl,
2009                                    const APInt &C) {
2010   Value *Y;
2011   if (!match(Shl, m_Shl(m_One(), m_Value(Y))))
2012     return nullptr;
2013 
2014   Type *ShiftType = Shl->getType();
2015   unsigned TypeBits = C.getBitWidth();
2016   bool CIsPowerOf2 = C.isPowerOf2();
2017   ICmpInst::Predicate Pred = Cmp.getPredicate();
2018   if (Cmp.isUnsigned()) {
2019     // (1 << Y) pred C -> Y pred Log2(C)
2020     if (!CIsPowerOf2) {
2021       // (1 << Y) <  30 -> Y <= 4
2022       // (1 << Y) <= 30 -> Y <= 4
2023       // (1 << Y) >= 30 -> Y >  4
2024       // (1 << Y) >  30 -> Y >  4
2025       if (Pred == ICmpInst::ICMP_ULT)
2026         Pred = ICmpInst::ICMP_ULE;
2027       else if (Pred == ICmpInst::ICMP_UGE)
2028         Pred = ICmpInst::ICMP_UGT;
2029     }
2030 
2031     // (1 << Y) >= 2147483648 -> Y >= 31 -> Y == 31
2032     // (1 << Y) <  2147483648 -> Y <  31 -> Y != 31
2033     unsigned CLog2 = C.logBase2();
2034     if (CLog2 == TypeBits - 1) {
2035       if (Pred == ICmpInst::ICMP_UGE)
2036         Pred = ICmpInst::ICMP_EQ;
2037       else if (Pred == ICmpInst::ICMP_ULT)
2038         Pred = ICmpInst::ICMP_NE;
2039     }
2040     return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2));
2041   } else if (Cmp.isSigned()) {
2042     Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
2043     if (C.isAllOnesValue()) {
2044       // (1 << Y) <= -1 -> Y == 31
2045       if (Pred == ICmpInst::ICMP_SLE)
2046         return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
2047 
2048       // (1 << Y) >  -1 -> Y != 31
2049       if (Pred == ICmpInst::ICMP_SGT)
2050         return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
2051     } else if (!C) {
2052       // (1 << Y) <  0 -> Y == 31
2053       // (1 << Y) <= 0 -> Y == 31
2054       if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
2055         return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
2056 
2057       // (1 << Y) >= 0 -> Y != 31
2058       // (1 << Y) >  0 -> Y != 31
2059       if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
2060         return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
2061     }
2062   } else if (Cmp.isEquality() && CIsPowerOf2) {
2063     return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, C.logBase2()));
2064   }
2065 
2066   return nullptr;
2067 }
2068 
2069 /// Fold icmp (shl X, Y), C.
2070 Instruction *InstCombinerImpl::foldICmpShlConstant(ICmpInst &Cmp,
2071                                                    BinaryOperator *Shl,
2072                                                    const APInt &C) {
2073   const APInt *ShiftVal;
2074   if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal)))
2075     return foldICmpShlConstConst(Cmp, Shl->getOperand(1), C, *ShiftVal);
2076 
2077   const APInt *ShiftAmt;
2078   if (!match(Shl->getOperand(1), m_APInt(ShiftAmt)))
2079     return foldICmpShlOne(Cmp, Shl, C);
2080 
2081   // Check that the shift amount is in range. If not, don't perform undefined
2082   // shifts. When the shift is visited, it will be simplified.
2083   unsigned TypeBits = C.getBitWidth();
2084   if (ShiftAmt->uge(TypeBits))
2085     return nullptr;
2086 
2087   ICmpInst::Predicate Pred = Cmp.getPredicate();
2088   Value *X = Shl->getOperand(0);
2089   Type *ShType = Shl->getType();
2090 
2091   // NSW guarantees that we are only shifting out sign bits from the high bits,
2092   // so we can ASHR the compare constant without needing a mask and eliminate
2093   // the shift.
2094   if (Shl->hasNoSignedWrap()) {
2095     if (Pred == ICmpInst::ICMP_SGT) {
2096       // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt)
2097       APInt ShiftedC = C.ashr(*ShiftAmt);
2098       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2099     }
2100     if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2101         C.ashr(*ShiftAmt).shl(*ShiftAmt) == C) {
2102       APInt ShiftedC = C.ashr(*ShiftAmt);
2103       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2104     }
2105     if (Pred == ICmpInst::ICMP_SLT) {
2106       // SLE is the same as above, but SLE is canonicalized to SLT, so convert:
2107       // (X << S) <=s C is equiv to X <=s (C >> S) for all C
2108       // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX
2109       // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN
2110       assert(!C.isMinSignedValue() && "Unexpected icmp slt");
2111       APInt ShiftedC = (C - 1).ashr(*ShiftAmt) + 1;
2112       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2113     }
2114     // If this is a signed comparison to 0 and the shift is sign preserving,
2115     // use the shift LHS operand instead; isSignTest may change 'Pred', so only
2116     // do that if we're sure to not continue on in this function.
2117     if (isSignTest(Pred, C))
2118       return new ICmpInst(Pred, X, Constant::getNullValue(ShType));
2119   }
2120 
2121   // NUW guarantees that we are only shifting out zero bits from the high bits,
2122   // so we can LSHR the compare constant without needing a mask and eliminate
2123   // the shift.
2124   if (Shl->hasNoUnsignedWrap()) {
2125     if (Pred == ICmpInst::ICMP_UGT) {
2126       // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt)
2127       APInt ShiftedC = C.lshr(*ShiftAmt);
2128       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2129     }
2130     if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2131         C.lshr(*ShiftAmt).shl(*ShiftAmt) == C) {
2132       APInt ShiftedC = C.lshr(*ShiftAmt);
2133       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2134     }
2135     if (Pred == ICmpInst::ICMP_ULT) {
2136       // ULE is the same as above, but ULE is canonicalized to ULT, so convert:
2137       // (X << S) <=u C is equiv to X <=u (C >> S) for all C
2138       // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u
2139       // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0
2140       assert(C.ugt(0) && "ult 0 should have been eliminated");
2141       APInt ShiftedC = (C - 1).lshr(*ShiftAmt) + 1;
2142       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2143     }
2144   }
2145 
2146   if (Cmp.isEquality() && Shl->hasOneUse()) {
2147     // Strength-reduce the shift into an 'and'.
2148     Constant *Mask = ConstantInt::get(
2149         ShType,
2150         APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
2151     Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2152     Constant *LShrC = ConstantInt::get(ShType, C.lshr(*ShiftAmt));
2153     return new ICmpInst(Pred, And, LShrC);
2154   }
2155 
2156   // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
2157   bool TrueIfSigned = false;
2158   if (Shl->hasOneUse() && isSignBitCheck(Pred, C, TrueIfSigned)) {
2159     // (X << 31) <s 0  --> (X & 1) != 0
2160     Constant *Mask = ConstantInt::get(
2161         ShType,
2162         APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
2163     Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2164     return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
2165                         And, Constant::getNullValue(ShType));
2166   }
2167 
2168   // Simplify 'shl' inequality test into 'and' equality test.
2169   if (Cmp.isUnsigned() && Shl->hasOneUse()) {
2170     // (X l<< C2) u<=/u> C1 iff C1+1 is power of two -> X & (~C1 l>> C2) ==/!= 0
2171     if ((C + 1).isPowerOf2() &&
2172         (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT)) {
2173       Value *And = Builder.CreateAnd(X, (~C).lshr(ShiftAmt->getZExtValue()));
2174       return new ICmpInst(Pred == ICmpInst::ICMP_ULE ? ICmpInst::ICMP_EQ
2175                                                      : ICmpInst::ICMP_NE,
2176                           And, Constant::getNullValue(ShType));
2177     }
2178     // (X l<< C2) u</u>= C1 iff C1 is power of two -> X & (-C1 l>> C2) ==/!= 0
2179     if (C.isPowerOf2() &&
2180         (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) {
2181       Value *And =
2182           Builder.CreateAnd(X, (~(C - 1)).lshr(ShiftAmt->getZExtValue()));
2183       return new ICmpInst(Pred == ICmpInst::ICMP_ULT ? ICmpInst::ICMP_EQ
2184                                                      : ICmpInst::ICMP_NE,
2185                           And, Constant::getNullValue(ShType));
2186     }
2187   }
2188 
2189   // Transform (icmp pred iM (shl iM %v, N), C)
2190   // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N))
2191   // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N.
2192   // This enables us to get rid of the shift in favor of a trunc that may be
2193   // free on the target. It has the additional benefit of comparing to a
2194   // smaller constant that may be more target-friendly.
2195   unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1);
2196   if (Shl->hasOneUse() && Amt != 0 && C.countTrailingZeros() >= Amt &&
2197       DL.isLegalInteger(TypeBits - Amt)) {
2198     Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt);
2199     if (auto *ShVTy = dyn_cast<VectorType>(ShType))
2200       TruncTy = VectorType::get(TruncTy, ShVTy->getElementCount());
2201     Constant *NewC =
2202         ConstantInt::get(TruncTy, C.ashr(*ShiftAmt).trunc(TypeBits - Amt));
2203     return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC);
2204   }
2205 
2206   return nullptr;
2207 }
2208 
2209 /// Fold icmp ({al}shr X, Y), C.
2210 Instruction *InstCombinerImpl::foldICmpShrConstant(ICmpInst &Cmp,
2211                                                    BinaryOperator *Shr,
2212                                                    const APInt &C) {
2213   // An exact shr only shifts out zero bits, so:
2214   // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0
2215   Value *X = Shr->getOperand(0);
2216   CmpInst::Predicate Pred = Cmp.getPredicate();
2217   if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() &&
2218       C.isNullValue())
2219     return new ICmpInst(Pred, X, Cmp.getOperand(1));
2220 
2221   const APInt *ShiftVal;
2222   if (Cmp.isEquality() && match(Shr->getOperand(0), m_APInt(ShiftVal)))
2223     return foldICmpShrConstConst(Cmp, Shr->getOperand(1), C, *ShiftVal);
2224 
2225   const APInt *ShiftAmt;
2226   if (!match(Shr->getOperand(1), m_APInt(ShiftAmt)))
2227     return nullptr;
2228 
2229   // Check that the shift amount is in range. If not, don't perform undefined
2230   // shifts. When the shift is visited it will be simplified.
2231   unsigned TypeBits = C.getBitWidth();
2232   unsigned ShAmtVal = ShiftAmt->getLimitedValue(TypeBits);
2233   if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2234     return nullptr;
2235 
2236   bool IsAShr = Shr->getOpcode() == Instruction::AShr;
2237   bool IsExact = Shr->isExact();
2238   Type *ShrTy = Shr->getType();
2239   // TODO: If we could guarantee that InstSimplify would handle all of the
2240   // constant-value-based preconditions in the folds below, then we could assert
2241   // those conditions rather than checking them. This is difficult because of
2242   // undef/poison (PR34838).
2243   if (IsAShr) {
2244     if (Pred == CmpInst::ICMP_SLT || (Pred == CmpInst::ICMP_SGT && IsExact)) {
2245       // icmp slt (ashr X, ShAmtC), C --> icmp slt X, (C << ShAmtC)
2246       // icmp sgt (ashr exact X, ShAmtC), C --> icmp sgt X, (C << ShAmtC)
2247       APInt ShiftedC = C.shl(ShAmtVal);
2248       if (ShiftedC.ashr(ShAmtVal) == C)
2249         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2250     }
2251     if (Pred == CmpInst::ICMP_SGT) {
2252       // icmp sgt (ashr X, ShAmtC), C --> icmp sgt X, ((C + 1) << ShAmtC) - 1
2253       APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2254       if (!C.isMaxSignedValue() && !(C + 1).shl(ShAmtVal).isMinSignedValue() &&
2255           (ShiftedC + 1).ashr(ShAmtVal) == (C + 1))
2256         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2257     }
2258 
2259     // If the compare constant has significant bits above the lowest sign-bit,
2260     // then convert an unsigned cmp to a test of the sign-bit:
2261     // (ashr X, ShiftC) u> C --> X s< 0
2262     // (ashr X, ShiftC) u< C --> X s> -1
2263     if (C.getBitWidth() > 2 && C.getNumSignBits() <= ShAmtVal) {
2264       if (Pred == CmpInst::ICMP_UGT) {
2265         return new ICmpInst(CmpInst::ICMP_SLT, X,
2266                             ConstantInt::getNullValue(ShrTy));
2267       }
2268       if (Pred == CmpInst::ICMP_ULT) {
2269         return new ICmpInst(CmpInst::ICMP_SGT, X,
2270                             ConstantInt::getAllOnesValue(ShrTy));
2271       }
2272     }
2273   } else {
2274     if (Pred == CmpInst::ICMP_ULT || (Pred == CmpInst::ICMP_UGT && IsExact)) {
2275       // icmp ult (lshr X, ShAmtC), C --> icmp ult X, (C << ShAmtC)
2276       // icmp ugt (lshr exact X, ShAmtC), C --> icmp ugt X, (C << ShAmtC)
2277       APInt ShiftedC = C.shl(ShAmtVal);
2278       if (ShiftedC.lshr(ShAmtVal) == C)
2279         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2280     }
2281     if (Pred == CmpInst::ICMP_UGT) {
2282       // icmp ugt (lshr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
2283       APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2284       if ((ShiftedC + 1).lshr(ShAmtVal) == (C + 1))
2285         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2286     }
2287   }
2288 
2289   if (!Cmp.isEquality())
2290     return nullptr;
2291 
2292   // Handle equality comparisons of shift-by-constant.
2293 
2294   // If the comparison constant changes with the shift, the comparison cannot
2295   // succeed (bits of the comparison constant cannot match the shifted value).
2296   // This should be known by InstSimplify and already be folded to true/false.
2297   assert(((IsAShr && C.shl(ShAmtVal).ashr(ShAmtVal) == C) ||
2298           (!IsAShr && C.shl(ShAmtVal).lshr(ShAmtVal) == C)) &&
2299          "Expected icmp+shr simplify did not occur.");
2300 
2301   // If the bits shifted out are known zero, compare the unshifted value:
2302   //  (X & 4) >> 1 == 2  --> (X & 4) == 4.
2303   if (Shr->isExact())
2304     return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, C << ShAmtVal));
2305 
2306   if (C.isNullValue()) {
2307     // == 0 is u< 1.
2308     if (Pred == CmpInst::ICMP_EQ)
2309       return new ICmpInst(CmpInst::ICMP_ULT, X,
2310                           ConstantInt::get(ShrTy, (C + 1).shl(ShAmtVal)));
2311     else
2312       return new ICmpInst(CmpInst::ICMP_UGT, X,
2313                           ConstantInt::get(ShrTy, (C + 1).shl(ShAmtVal) - 1));
2314   }
2315 
2316   if (Shr->hasOneUse()) {
2317     // Canonicalize the shift into an 'and':
2318     // icmp eq/ne (shr X, ShAmt), C --> icmp eq/ne (and X, HiMask), (C << ShAmt)
2319     APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
2320     Constant *Mask = ConstantInt::get(ShrTy, Val);
2321     Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask");
2322     return new ICmpInst(Pred, And, ConstantInt::get(ShrTy, C << ShAmtVal));
2323   }
2324 
2325   return nullptr;
2326 }
2327 
2328 Instruction *InstCombinerImpl::foldICmpSRemConstant(ICmpInst &Cmp,
2329                                                     BinaryOperator *SRem,
2330                                                     const APInt &C) {
2331   // Match an 'is positive' or 'is negative' comparison of remainder by a
2332   // constant power-of-2 value:
2333   // (X % pow2C) sgt/slt 0
2334   const ICmpInst::Predicate Pred = Cmp.getPredicate();
2335   if (Pred != ICmpInst::ICMP_SGT && Pred != ICmpInst::ICMP_SLT)
2336     return nullptr;
2337 
2338   // TODO: The one-use check is standard because we do not typically want to
2339   //       create longer instruction sequences, but this might be a special-case
2340   //       because srem is not good for analysis or codegen.
2341   if (!SRem->hasOneUse())
2342     return nullptr;
2343 
2344   const APInt *DivisorC;
2345   if (!C.isNullValue() || !match(SRem->getOperand(1), m_Power2(DivisorC)))
2346     return nullptr;
2347 
2348   // Mask off the sign bit and the modulo bits (low-bits).
2349   Type *Ty = SRem->getType();
2350   APInt SignMask = APInt::getSignMask(Ty->getScalarSizeInBits());
2351   Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1));
2352   Value *And = Builder.CreateAnd(SRem->getOperand(0), MaskC);
2353 
2354   // For 'is positive?' check that the sign-bit is clear and at least 1 masked
2355   // bit is set. Example:
2356   // (i8 X % 32) s> 0 --> (X & 159) s> 0
2357   if (Pred == ICmpInst::ICMP_SGT)
2358     return new ICmpInst(ICmpInst::ICMP_SGT, And, ConstantInt::getNullValue(Ty));
2359 
2360   // For 'is negative?' check that the sign-bit is set and at least 1 masked
2361   // bit is set. Example:
2362   // (i16 X % 4) s< 0 --> (X & 32771) u> 32768
2363   return new ICmpInst(ICmpInst::ICMP_UGT, And, ConstantInt::get(Ty, SignMask));
2364 }
2365 
2366 /// Fold icmp (udiv X, Y), C.
2367 Instruction *InstCombinerImpl::foldICmpUDivConstant(ICmpInst &Cmp,
2368                                                     BinaryOperator *UDiv,
2369                                                     const APInt &C) {
2370   const APInt *C2;
2371   if (!match(UDiv->getOperand(0), m_APInt(C2)))
2372     return nullptr;
2373 
2374   assert(*C2 != 0 && "udiv 0, X should have been simplified already.");
2375 
2376   // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1))
2377   Value *Y = UDiv->getOperand(1);
2378   if (Cmp.getPredicate() == ICmpInst::ICMP_UGT) {
2379     assert(!C.isMaxValue() &&
2380            "icmp ugt X, UINT_MAX should have been simplified already.");
2381     return new ICmpInst(ICmpInst::ICMP_ULE, Y,
2382                         ConstantInt::get(Y->getType(), C2->udiv(C + 1)));
2383   }
2384 
2385   // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C)
2386   if (Cmp.getPredicate() == ICmpInst::ICMP_ULT) {
2387     assert(C != 0 && "icmp ult X, 0 should have been simplified already.");
2388     return new ICmpInst(ICmpInst::ICMP_UGT, Y,
2389                         ConstantInt::get(Y->getType(), C2->udiv(C)));
2390   }
2391 
2392   return nullptr;
2393 }
2394 
2395 /// Fold icmp ({su}div X, Y), C.
2396 Instruction *InstCombinerImpl::foldICmpDivConstant(ICmpInst &Cmp,
2397                                                    BinaryOperator *Div,
2398                                                    const APInt &C) {
2399   // Fold: icmp pred ([us]div X, C2), C -> range test
2400   // Fold this div into the comparison, producing a range check.
2401   // Determine, based on the divide type, what the range is being
2402   // checked.  If there is an overflow on the low or high side, remember
2403   // it, otherwise compute the range [low, hi) bounding the new value.
2404   // See: InsertRangeTest above for the kinds of replacements possible.
2405   const APInt *C2;
2406   if (!match(Div->getOperand(1), m_APInt(C2)))
2407     return nullptr;
2408 
2409   // FIXME: If the operand types don't match the type of the divide
2410   // then don't attempt this transform. The code below doesn't have the
2411   // logic to deal with a signed divide and an unsigned compare (and
2412   // vice versa). This is because (x /s C2) <s C  produces different
2413   // results than (x /s C2) <u C or (x /u C2) <s C or even
2414   // (x /u C2) <u C.  Simply casting the operands and result won't
2415   // work. :(  The if statement below tests that condition and bails
2416   // if it finds it.
2417   bool DivIsSigned = Div->getOpcode() == Instruction::SDiv;
2418   if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2419     return nullptr;
2420 
2421   // The ProdOV computation fails on divide by 0 and divide by -1. Cases with
2422   // INT_MIN will also fail if the divisor is 1. Although folds of all these
2423   // division-by-constant cases should be present, we can not assert that they
2424   // have happened before we reach this icmp instruction.
2425   if (C2->isNullValue() || C2->isOneValue() ||
2426       (DivIsSigned && C2->isAllOnesValue()))
2427     return nullptr;
2428 
2429   // Compute Prod = C * C2. We are essentially solving an equation of
2430   // form X / C2 = C. We solve for X by multiplying C2 and C.
2431   // By solving for X, we can turn this into a range check instead of computing
2432   // a divide.
2433   APInt Prod = C * *C2;
2434 
2435   // Determine if the product overflows by seeing if the product is not equal to
2436   // the divide. Make sure we do the same kind of divide as in the LHS
2437   // instruction that we're folding.
2438   bool ProdOV = (DivIsSigned ? Prod.sdiv(*C2) : Prod.udiv(*C2)) != C;
2439 
2440   ICmpInst::Predicate Pred = Cmp.getPredicate();
2441 
2442   // If the division is known to be exact, then there is no remainder from the
2443   // divide, so the covered range size is unit, otherwise it is the divisor.
2444   APInt RangeSize = Div->isExact() ? APInt(C2->getBitWidth(), 1) : *C2;
2445 
2446   // Figure out the interval that is being checked.  For example, a comparison
2447   // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
2448   // Compute this interval based on the constants involved and the signedness of
2449   // the compare/divide.  This computes a half-open interval, keeping track of
2450   // whether either value in the interval overflows.  After analysis each
2451   // overflow variable is set to 0 if it's corresponding bound variable is valid
2452   // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
2453   int LoOverflow = 0, HiOverflow = 0;
2454   APInt LoBound, HiBound;
2455 
2456   if (!DivIsSigned) {  // udiv
2457     // e.g. X/5 op 3  --> [15, 20)
2458     LoBound = Prod;
2459     HiOverflow = LoOverflow = ProdOV;
2460     if (!HiOverflow) {
2461       // If this is not an exact divide, then many values in the range collapse
2462       // to the same result value.
2463       HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false);
2464     }
2465   } else if (C2->isStrictlyPositive()) { // Divisor is > 0.
2466     if (C.isNullValue()) {       // (X / pos) op 0
2467       // Can't overflow.  e.g.  X/2 op 0 --> [-1, 2)
2468       LoBound = -(RangeSize - 1);
2469       HiBound = RangeSize;
2470     } else if (C.isStrictlyPositive()) {   // (X / pos) op pos
2471       LoBound = Prod;     // e.g.   X/5 op 3 --> [15, 20)
2472       HiOverflow = LoOverflow = ProdOV;
2473       if (!HiOverflow)
2474         HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true);
2475     } else {                       // (X / pos) op neg
2476       // e.g. X/5 op -3  --> [-15-4, -15+1) --> [-19, -14)
2477       HiBound = Prod + 1;
2478       LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2479       if (!LoOverflow) {
2480         APInt DivNeg = -RangeSize;
2481         LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
2482       }
2483     }
2484   } else if (C2->isNegative()) { // Divisor is < 0.
2485     if (Div->isExact())
2486       RangeSize.negate();
2487     if (C.isNullValue()) { // (X / neg) op 0
2488       // e.g. X/-5 op 0  --> [-4, 5)
2489       LoBound = RangeSize + 1;
2490       HiBound = -RangeSize;
2491       if (HiBound == *C2) {        // -INTMIN = INTMIN
2492         HiOverflow = 1;            // [INTMIN+1, overflow)
2493         HiBound = APInt();         // e.g. X/INTMIN = 0 --> X > INTMIN
2494       }
2495     } else if (C.isStrictlyPositive()) {   // (X / neg) op pos
2496       // e.g. X/-5 op 3  --> [-19, -14)
2497       HiBound = Prod + 1;
2498       HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2499       if (!LoOverflow)
2500         LoOverflow = addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0;
2501     } else {                       // (X / neg) op neg
2502       LoBound = Prod;       // e.g. X/-5 op -3  --> [15, 20)
2503       LoOverflow = HiOverflow = ProdOV;
2504       if (!HiOverflow)
2505         HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true);
2506     }
2507 
2508     // Dividing by a negative swaps the condition.  LT <-> GT
2509     Pred = ICmpInst::getSwappedPredicate(Pred);
2510   }
2511 
2512   Value *X = Div->getOperand(0);
2513   switch (Pred) {
2514     default: llvm_unreachable("Unhandled icmp opcode!");
2515     case ICmpInst::ICMP_EQ:
2516       if (LoOverflow && HiOverflow)
2517         return replaceInstUsesWith(Cmp, Builder.getFalse());
2518       if (HiOverflow)
2519         return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2520                             ICmpInst::ICMP_UGE, X,
2521                             ConstantInt::get(Div->getType(), LoBound));
2522       if (LoOverflow)
2523         return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2524                             ICmpInst::ICMP_ULT, X,
2525                             ConstantInt::get(Div->getType(), HiBound));
2526       return replaceInstUsesWith(
2527           Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, true));
2528     case ICmpInst::ICMP_NE:
2529       if (LoOverflow && HiOverflow)
2530         return replaceInstUsesWith(Cmp, Builder.getTrue());
2531       if (HiOverflow)
2532         return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2533                             ICmpInst::ICMP_ULT, X,
2534                             ConstantInt::get(Div->getType(), LoBound));
2535       if (LoOverflow)
2536         return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2537                             ICmpInst::ICMP_UGE, X,
2538                             ConstantInt::get(Div->getType(), HiBound));
2539       return replaceInstUsesWith(Cmp,
2540                                  insertRangeTest(X, LoBound, HiBound,
2541                                                  DivIsSigned, false));
2542     case ICmpInst::ICMP_ULT:
2543     case ICmpInst::ICMP_SLT:
2544       if (LoOverflow == +1)   // Low bound is greater than input range.
2545         return replaceInstUsesWith(Cmp, Builder.getTrue());
2546       if (LoOverflow == -1)   // Low bound is less than input range.
2547         return replaceInstUsesWith(Cmp, Builder.getFalse());
2548       return new ICmpInst(Pred, X, ConstantInt::get(Div->getType(), LoBound));
2549     case ICmpInst::ICMP_UGT:
2550     case ICmpInst::ICMP_SGT:
2551       if (HiOverflow == +1)       // High bound greater than input range.
2552         return replaceInstUsesWith(Cmp, Builder.getFalse());
2553       if (HiOverflow == -1)       // High bound less than input range.
2554         return replaceInstUsesWith(Cmp, Builder.getTrue());
2555       if (Pred == ICmpInst::ICMP_UGT)
2556         return new ICmpInst(ICmpInst::ICMP_UGE, X,
2557                             ConstantInt::get(Div->getType(), HiBound));
2558       return new ICmpInst(ICmpInst::ICMP_SGE, X,
2559                           ConstantInt::get(Div->getType(), HiBound));
2560   }
2561 
2562   return nullptr;
2563 }
2564 
2565 /// Fold icmp (sub X, Y), C.
2566 Instruction *InstCombinerImpl::foldICmpSubConstant(ICmpInst &Cmp,
2567                                                    BinaryOperator *Sub,
2568                                                    const APInt &C) {
2569   Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1);
2570   ICmpInst::Predicate Pred = Cmp.getPredicate();
2571   const APInt *C2;
2572   APInt SubResult;
2573 
2574   // icmp eq/ne (sub C, Y), C -> icmp eq/ne Y, 0
2575   if (match(X, m_APInt(C2)) && *C2 == C && Cmp.isEquality())
2576     return new ICmpInst(Cmp.getPredicate(), Y,
2577                         ConstantInt::get(Y->getType(), 0));
2578 
2579   // (icmp P (sub nuw|nsw C2, Y), C) -> (icmp swap(P) Y, C2-C)
2580   if (match(X, m_APInt(C2)) &&
2581       ((Cmp.isUnsigned() && Sub->hasNoUnsignedWrap()) ||
2582        (Cmp.isSigned() && Sub->hasNoSignedWrap())) &&
2583       !subWithOverflow(SubResult, *C2, C, Cmp.isSigned()))
2584     return new ICmpInst(Cmp.getSwappedPredicate(), Y,
2585                         ConstantInt::get(Y->getType(), SubResult));
2586 
2587   // The following transforms are only worth it if the only user of the subtract
2588   // is the icmp.
2589   if (!Sub->hasOneUse())
2590     return nullptr;
2591 
2592   if (Sub->hasNoSignedWrap()) {
2593     // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y)
2594     if (Pred == ICmpInst::ICMP_SGT && C.isAllOnesValue())
2595       return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
2596 
2597     // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y)
2598     if (Pred == ICmpInst::ICMP_SGT && C.isNullValue())
2599       return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
2600 
2601     // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y)
2602     if (Pred == ICmpInst::ICMP_SLT && C.isNullValue())
2603       return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
2604 
2605     // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y)
2606     if (Pred == ICmpInst::ICMP_SLT && C.isOneValue())
2607       return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
2608   }
2609 
2610   if (!match(X, m_APInt(C2)))
2611     return nullptr;
2612 
2613   // C2 - Y <u C -> (Y | (C - 1)) == C2
2614   //   iff (C2 & (C - 1)) == C - 1 and C is a power of 2
2615   if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() &&
2616       (*C2 & (C - 1)) == (C - 1))
2617     return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, C - 1), X);
2618 
2619   // C2 - Y >u C -> (Y | C) != C2
2620   //   iff C2 & C == C and C + 1 is a power of 2
2621   if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == C)
2622     return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, C), X);
2623 
2624   return nullptr;
2625 }
2626 
2627 /// Fold icmp (add X, Y), C.
2628 Instruction *InstCombinerImpl::foldICmpAddConstant(ICmpInst &Cmp,
2629                                                    BinaryOperator *Add,
2630                                                    const APInt &C) {
2631   Value *Y = Add->getOperand(1);
2632   const APInt *C2;
2633   if (Cmp.isEquality() || !match(Y, m_APInt(C2)))
2634     return nullptr;
2635 
2636   // Fold icmp pred (add X, C2), C.
2637   Value *X = Add->getOperand(0);
2638   Type *Ty = Add->getType();
2639   const CmpInst::Predicate Pred = Cmp.getPredicate();
2640   const APInt SMax = APInt::getSignedMaxValue(Ty->getScalarSizeInBits());
2641   const APInt SMin = APInt::getSignedMinValue(Ty->getScalarSizeInBits());
2642 
2643   // Fold compare with offset to opposite sign compare if it eliminates offset:
2644   // (X + C2) >u C --> X <s -C2 (if C == C2 + SMAX)
2645   if (Pred == CmpInst::ICMP_UGT && C == *C2 + SMax)
2646     return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, -(*C2)));
2647 
2648   // (X + C2) <u C --> X >s ~C2 (if C == C2 + SMIN)
2649   if (Pred == CmpInst::ICMP_ULT && C == *C2 + SMin)
2650     return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantInt::get(Ty, ~(*C2)));
2651 
2652   // (X + C2) >s C --> X <u (SMAX - C) (if C == C2 - 1)
2653   if (Pred == CmpInst::ICMP_SGT && C == *C2 - 1)
2654     return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, SMax - C));
2655 
2656   // (X + C2) <s C --> X >u (C ^ SMAX) (if C == C2)
2657   if (Pred == CmpInst::ICMP_SLT && C == *C2)
2658     return new ICmpInst(ICmpInst::ICMP_UGT, X, ConstantInt::get(Ty, C ^ SMax));
2659 
2660   // If the add does not wrap, we can always adjust the compare by subtracting
2661   // the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE
2662   // are canonicalized to SGT/SLT/UGT/ULT.
2663   if ((Add->hasNoSignedWrap() &&
2664        (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) ||
2665       (Add->hasNoUnsignedWrap() &&
2666        (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT))) {
2667     bool Overflow;
2668     APInt NewC =
2669         Cmp.isSigned() ? C.ssub_ov(*C2, Overflow) : C.usub_ov(*C2, Overflow);
2670     // If there is overflow, the result must be true or false.
2671     // TODO: Can we assert there is no overflow because InstSimplify always
2672     // handles those cases?
2673     if (!Overflow)
2674       // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2)
2675       return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC));
2676   }
2677 
2678   auto CR = ConstantRange::makeExactICmpRegion(Pred, C).subtract(*C2);
2679   const APInt &Upper = CR.getUpper();
2680   const APInt &Lower = CR.getLower();
2681   if (Cmp.isSigned()) {
2682     if (Lower.isSignMask())
2683       return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper));
2684     if (Upper.isSignMask())
2685       return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower));
2686   } else {
2687     if (Lower.isMinValue())
2688       return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper));
2689     if (Upper.isMinValue())
2690       return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower));
2691   }
2692 
2693   if (!Add->hasOneUse())
2694     return nullptr;
2695 
2696   // X+C <u C2 -> (X & -C2) == C
2697   //   iff C & (C2-1) == 0
2698   //       C2 is a power of 2
2699   if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && (*C2 & (C - 1)) == 0)
2700     return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -C),
2701                         ConstantExpr::getNeg(cast<Constant>(Y)));
2702 
2703   // X+C >u C2 -> (X & ~C2) != C
2704   //   iff C & C2 == 0
2705   //       C2+1 is a power of 2
2706   if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == 0)
2707     return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~C),
2708                         ConstantExpr::getNeg(cast<Constant>(Y)));
2709 
2710   return nullptr;
2711 }
2712 
2713 bool InstCombinerImpl::matchThreeWayIntCompare(SelectInst *SI, Value *&LHS,
2714                                                Value *&RHS, ConstantInt *&Less,
2715                                                ConstantInt *&Equal,
2716                                                ConstantInt *&Greater) {
2717   // TODO: Generalize this to work with other comparison idioms or ensure
2718   // they get canonicalized into this form.
2719 
2720   // select i1 (a == b),
2721   //        i32 Equal,
2722   //        i32 (select i1 (a < b), i32 Less, i32 Greater)
2723   // where Equal, Less and Greater are placeholders for any three constants.
2724   ICmpInst::Predicate PredA;
2725   if (!match(SI->getCondition(), m_ICmp(PredA, m_Value(LHS), m_Value(RHS))) ||
2726       !ICmpInst::isEquality(PredA))
2727     return false;
2728   Value *EqualVal = SI->getTrueValue();
2729   Value *UnequalVal = SI->getFalseValue();
2730   // We still can get non-canonical predicate here, so canonicalize.
2731   if (PredA == ICmpInst::ICMP_NE)
2732     std::swap(EqualVal, UnequalVal);
2733   if (!match(EqualVal, m_ConstantInt(Equal)))
2734     return false;
2735   ICmpInst::Predicate PredB;
2736   Value *LHS2, *RHS2;
2737   if (!match(UnequalVal, m_Select(m_ICmp(PredB, m_Value(LHS2), m_Value(RHS2)),
2738                                   m_ConstantInt(Less), m_ConstantInt(Greater))))
2739     return false;
2740   // We can get predicate mismatch here, so canonicalize if possible:
2741   // First, ensure that 'LHS' match.
2742   if (LHS2 != LHS) {
2743     // x sgt y <--> y slt x
2744     std::swap(LHS2, RHS2);
2745     PredB = ICmpInst::getSwappedPredicate(PredB);
2746   }
2747   if (LHS2 != LHS)
2748     return false;
2749   // We also need to canonicalize 'RHS'.
2750   if (PredB == ICmpInst::ICMP_SGT && isa<Constant>(RHS2)) {
2751     // x sgt C-1  <-->  x sge C  <-->  not(x slt C)
2752     auto FlippedStrictness =
2753         InstCombiner::getFlippedStrictnessPredicateAndConstant(
2754             PredB, cast<Constant>(RHS2));
2755     if (!FlippedStrictness)
2756       return false;
2757     assert(FlippedStrictness->first == ICmpInst::ICMP_SGE && "Sanity check");
2758     RHS2 = FlippedStrictness->second;
2759     // And kind-of perform the result swap.
2760     std::swap(Less, Greater);
2761     PredB = ICmpInst::ICMP_SLT;
2762   }
2763   return PredB == ICmpInst::ICMP_SLT && RHS == RHS2;
2764 }
2765 
2766 Instruction *InstCombinerImpl::foldICmpSelectConstant(ICmpInst &Cmp,
2767                                                       SelectInst *Select,
2768                                                       ConstantInt *C) {
2769 
2770   assert(C && "Cmp RHS should be a constant int!");
2771   // If we're testing a constant value against the result of a three way
2772   // comparison, the result can be expressed directly in terms of the
2773   // original values being compared.  Note: We could possibly be more
2774   // aggressive here and remove the hasOneUse test. The original select is
2775   // really likely to simplify or sink when we remove a test of the result.
2776   Value *OrigLHS, *OrigRHS;
2777   ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
2778   if (Cmp.hasOneUse() &&
2779       matchThreeWayIntCompare(Select, OrigLHS, OrigRHS, C1LessThan, C2Equal,
2780                               C3GreaterThan)) {
2781     assert(C1LessThan && C2Equal && C3GreaterThan);
2782 
2783     bool TrueWhenLessThan =
2784         ConstantExpr::getCompare(Cmp.getPredicate(), C1LessThan, C)
2785             ->isAllOnesValue();
2786     bool TrueWhenEqual =
2787         ConstantExpr::getCompare(Cmp.getPredicate(), C2Equal, C)
2788             ->isAllOnesValue();
2789     bool TrueWhenGreaterThan =
2790         ConstantExpr::getCompare(Cmp.getPredicate(), C3GreaterThan, C)
2791             ->isAllOnesValue();
2792 
2793     // This generates the new instruction that will replace the original Cmp
2794     // Instruction. Instead of enumerating the various combinations when
2795     // TrueWhenLessThan, TrueWhenEqual and TrueWhenGreaterThan are true versus
2796     // false, we rely on chaining of ORs and future passes of InstCombine to
2797     // simplify the OR further (i.e. a s< b || a == b becomes a s<= b).
2798 
2799     // When none of the three constants satisfy the predicate for the RHS (C),
2800     // the entire original Cmp can be simplified to a false.
2801     Value *Cond = Builder.getFalse();
2802     if (TrueWhenLessThan)
2803       Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT,
2804                                                        OrigLHS, OrigRHS));
2805     if (TrueWhenEqual)
2806       Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ,
2807                                                        OrigLHS, OrigRHS));
2808     if (TrueWhenGreaterThan)
2809       Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT,
2810                                                        OrigLHS, OrigRHS));
2811 
2812     return replaceInstUsesWith(Cmp, Cond);
2813   }
2814   return nullptr;
2815 }
2816 
2817 static Instruction *foldICmpBitCast(ICmpInst &Cmp,
2818                                     InstCombiner::BuilderTy &Builder) {
2819   auto *Bitcast = dyn_cast<BitCastInst>(Cmp.getOperand(0));
2820   if (!Bitcast)
2821     return nullptr;
2822 
2823   ICmpInst::Predicate Pred = Cmp.getPredicate();
2824   Value *Op1 = Cmp.getOperand(1);
2825   Value *BCSrcOp = Bitcast->getOperand(0);
2826 
2827   // Make sure the bitcast doesn't change the number of vector elements.
2828   if (Bitcast->getSrcTy()->getScalarSizeInBits() ==
2829           Bitcast->getDestTy()->getScalarSizeInBits()) {
2830     // Zero-equality and sign-bit checks are preserved through sitofp + bitcast.
2831     Value *X;
2832     if (match(BCSrcOp, m_SIToFP(m_Value(X)))) {
2833       // icmp  eq (bitcast (sitofp X)), 0 --> icmp  eq X, 0
2834       // icmp  ne (bitcast (sitofp X)), 0 --> icmp  ne X, 0
2835       // icmp slt (bitcast (sitofp X)), 0 --> icmp slt X, 0
2836       // icmp sgt (bitcast (sitofp X)), 0 --> icmp sgt X, 0
2837       if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_SLT ||
2838            Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT) &&
2839           match(Op1, m_Zero()))
2840         return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
2841 
2842       // icmp slt (bitcast (sitofp X)), 1 --> icmp slt X, 1
2843       if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_One()))
2844         return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), 1));
2845 
2846       // icmp sgt (bitcast (sitofp X)), -1 --> icmp sgt X, -1
2847       if (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))
2848         return new ICmpInst(Pred, X,
2849                             ConstantInt::getAllOnesValue(X->getType()));
2850     }
2851 
2852     // Zero-equality checks are preserved through unsigned floating-point casts:
2853     // icmp eq (bitcast (uitofp X)), 0 --> icmp eq X, 0
2854     // icmp ne (bitcast (uitofp X)), 0 --> icmp ne X, 0
2855     if (match(BCSrcOp, m_UIToFP(m_Value(X))))
2856       if (Cmp.isEquality() && match(Op1, m_Zero()))
2857         return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
2858 
2859     // If this is a sign-bit test of a bitcast of a casted FP value, eliminate
2860     // the FP extend/truncate because that cast does not change the sign-bit.
2861     // This is true for all standard IEEE-754 types and the X86 80-bit type.
2862     // The sign-bit is always the most significant bit in those types.
2863     const APInt *C;
2864     bool TrueIfSigned;
2865     if (match(Op1, m_APInt(C)) && Bitcast->hasOneUse() &&
2866         InstCombiner::isSignBitCheck(Pred, *C, TrueIfSigned)) {
2867       if (match(BCSrcOp, m_FPExt(m_Value(X))) ||
2868           match(BCSrcOp, m_FPTrunc(m_Value(X)))) {
2869         // (bitcast (fpext/fptrunc X)) to iX) < 0 --> (bitcast X to iY) < 0
2870         // (bitcast (fpext/fptrunc X)) to iX) > -1 --> (bitcast X to iY) > -1
2871         Type *XType = X->getType();
2872 
2873         // We can't currently handle Power style floating point operations here.
2874         if (!(XType->isPPC_FP128Ty() || BCSrcOp->getType()->isPPC_FP128Ty())) {
2875 
2876           Type *NewType = Builder.getIntNTy(XType->getScalarSizeInBits());
2877           if (auto *XVTy = dyn_cast<VectorType>(XType))
2878             NewType = VectorType::get(NewType, XVTy->getElementCount());
2879           Value *NewBitcast = Builder.CreateBitCast(X, NewType);
2880           if (TrueIfSigned)
2881             return new ICmpInst(ICmpInst::ICMP_SLT, NewBitcast,
2882                                 ConstantInt::getNullValue(NewType));
2883           else
2884             return new ICmpInst(ICmpInst::ICMP_SGT, NewBitcast,
2885                                 ConstantInt::getAllOnesValue(NewType));
2886         }
2887       }
2888     }
2889   }
2890 
2891   // Test to see if the operands of the icmp are casted versions of other
2892   // values. If the ptr->ptr cast can be stripped off both arguments, do so.
2893   if (Bitcast->getType()->isPointerTy() &&
2894       (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
2895     // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
2896     // so eliminate it as well.
2897     if (auto *BC2 = dyn_cast<BitCastInst>(Op1))
2898       Op1 = BC2->getOperand(0);
2899 
2900     Op1 = Builder.CreateBitCast(Op1, BCSrcOp->getType());
2901     return new ICmpInst(Pred, BCSrcOp, Op1);
2902   }
2903 
2904   // Folding: icmp <pred> iN X, C
2905   //  where X = bitcast <M x iK> (shufflevector <M x iK> %vec, undef, SC)) to iN
2906   //    and C is a splat of a K-bit pattern
2907   //    and SC is a constant vector = <C', C', C', ..., C'>
2908   // Into:
2909   //   %E = extractelement <M x iK> %vec, i32 C'
2910   //   icmp <pred> iK %E, trunc(C)
2911   const APInt *C;
2912   if (!match(Cmp.getOperand(1), m_APInt(C)) ||
2913       !Bitcast->getType()->isIntegerTy() ||
2914       !Bitcast->getSrcTy()->isIntOrIntVectorTy())
2915     return nullptr;
2916 
2917   Value *Vec;
2918   ArrayRef<int> Mask;
2919   if (match(BCSrcOp, m_Shuffle(m_Value(Vec), m_Undef(), m_Mask(Mask)))) {
2920     // Check whether every element of Mask is the same constant
2921     if (is_splat(Mask)) {
2922       auto *VecTy = cast<VectorType>(BCSrcOp->getType());
2923       auto *EltTy = cast<IntegerType>(VecTy->getElementType());
2924       if (C->isSplat(EltTy->getBitWidth())) {
2925         // Fold the icmp based on the value of C
2926         // If C is M copies of an iK sized bit pattern,
2927         // then:
2928         //   =>  %E = extractelement <N x iK> %vec, i32 Elem
2929         //       icmp <pred> iK %SplatVal, <pattern>
2930         Value *Elem = Builder.getInt32(Mask[0]);
2931         Value *Extract = Builder.CreateExtractElement(Vec, Elem);
2932         Value *NewC = ConstantInt::get(EltTy, C->trunc(EltTy->getBitWidth()));
2933         return new ICmpInst(Pred, Extract, NewC);
2934       }
2935     }
2936   }
2937   return nullptr;
2938 }
2939 
2940 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C
2941 /// where X is some kind of instruction.
2942 Instruction *InstCombinerImpl::foldICmpInstWithConstant(ICmpInst &Cmp) {
2943   const APInt *C;
2944   if (!match(Cmp.getOperand(1), m_APInt(C)))
2945     return nullptr;
2946 
2947   if (auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0))) {
2948     switch (BO->getOpcode()) {
2949     case Instruction::Xor:
2950       if (Instruction *I = foldICmpXorConstant(Cmp, BO, *C))
2951         return I;
2952       break;
2953     case Instruction::And:
2954       if (Instruction *I = foldICmpAndConstant(Cmp, BO, *C))
2955         return I;
2956       break;
2957     case Instruction::Or:
2958       if (Instruction *I = foldICmpOrConstant(Cmp, BO, *C))
2959         return I;
2960       break;
2961     case Instruction::Mul:
2962       if (Instruction *I = foldICmpMulConstant(Cmp, BO, *C))
2963         return I;
2964       break;
2965     case Instruction::Shl:
2966       if (Instruction *I = foldICmpShlConstant(Cmp, BO, *C))
2967         return I;
2968       break;
2969     case Instruction::LShr:
2970     case Instruction::AShr:
2971       if (Instruction *I = foldICmpShrConstant(Cmp, BO, *C))
2972         return I;
2973       break;
2974     case Instruction::SRem:
2975       if (Instruction *I = foldICmpSRemConstant(Cmp, BO, *C))
2976         return I;
2977       break;
2978     case Instruction::UDiv:
2979       if (Instruction *I = foldICmpUDivConstant(Cmp, BO, *C))
2980         return I;
2981       LLVM_FALLTHROUGH;
2982     case Instruction::SDiv:
2983       if (Instruction *I = foldICmpDivConstant(Cmp, BO, *C))
2984         return I;
2985       break;
2986     case Instruction::Sub:
2987       if (Instruction *I = foldICmpSubConstant(Cmp, BO, *C))
2988         return I;
2989       break;
2990     case Instruction::Add:
2991       if (Instruction *I = foldICmpAddConstant(Cmp, BO, *C))
2992         return I;
2993       break;
2994     default:
2995       break;
2996     }
2997     // TODO: These folds could be refactored to be part of the above calls.
2998     if (Instruction *I = foldICmpBinOpEqualityWithConstant(Cmp, BO, *C))
2999       return I;
3000   }
3001 
3002   // Match against CmpInst LHS being instructions other than binary operators.
3003 
3004   if (auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0))) {
3005     // For now, we only support constant integers while folding the
3006     // ICMP(SELECT)) pattern. We can extend this to support vector of integers
3007     // similar to the cases handled by binary ops above.
3008     if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1)))
3009       if (Instruction *I = foldICmpSelectConstant(Cmp, SI, ConstRHS))
3010         return I;
3011   }
3012 
3013   if (auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0))) {
3014     if (Instruction *I = foldICmpTruncConstant(Cmp, TI, *C))
3015       return I;
3016   }
3017 
3018   if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)))
3019     if (Instruction *I = foldICmpIntrinsicWithConstant(Cmp, II, *C))
3020       return I;
3021 
3022   return nullptr;
3023 }
3024 
3025 /// Fold an icmp equality instruction with binary operator LHS and constant RHS:
3026 /// icmp eq/ne BO, C.
3027 Instruction *InstCombinerImpl::foldICmpBinOpEqualityWithConstant(
3028     ICmpInst &Cmp, BinaryOperator *BO, const APInt &C) {
3029   // TODO: Some of these folds could work with arbitrary constants, but this
3030   // function is limited to scalar and vector splat constants.
3031   if (!Cmp.isEquality())
3032     return nullptr;
3033 
3034   ICmpInst::Predicate Pred = Cmp.getPredicate();
3035   bool isICMP_NE = Pred == ICmpInst::ICMP_NE;
3036   Constant *RHS = cast<Constant>(Cmp.getOperand(1));
3037   Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
3038 
3039   switch (BO->getOpcode()) {
3040   case Instruction::SRem:
3041     // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
3042     if (C.isNullValue() && BO->hasOneUse()) {
3043       const APInt *BOC;
3044       if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) {
3045         Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName());
3046         return new ICmpInst(Pred, NewRem,
3047                             Constant::getNullValue(BO->getType()));
3048       }
3049     }
3050     break;
3051   case Instruction::Add: {
3052     // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
3053     if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
3054       if (BO->hasOneUse())
3055         return new ICmpInst(Pred, BOp0, ConstantExpr::getSub(RHS, BOC));
3056     } else if (C.isNullValue()) {
3057       // Replace ((add A, B) != 0) with (A != -B) if A or B is
3058       // efficiently invertible, or if the add has just this one use.
3059       if (Value *NegVal = dyn_castNegVal(BOp1))
3060         return new ICmpInst(Pred, BOp0, NegVal);
3061       if (Value *NegVal = dyn_castNegVal(BOp0))
3062         return new ICmpInst(Pred, NegVal, BOp1);
3063       if (BO->hasOneUse()) {
3064         Value *Neg = Builder.CreateNeg(BOp1);
3065         Neg->takeName(BO);
3066         return new ICmpInst(Pred, BOp0, Neg);
3067       }
3068     }
3069     break;
3070   }
3071   case Instruction::Xor:
3072     if (BO->hasOneUse()) {
3073       if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
3074         // For the xor case, we can xor two constants together, eliminating
3075         // the explicit xor.
3076         return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC));
3077       } else if (C.isNullValue()) {
3078         // Replace ((xor A, B) != 0) with (A != B)
3079         return new ICmpInst(Pred, BOp0, BOp1);
3080       }
3081     }
3082     break;
3083   case Instruction::Sub:
3084     if (BO->hasOneUse()) {
3085       // Only check for constant LHS here, as constant RHS will be canonicalized
3086       // to add and use the fold above.
3087       if (Constant *BOC = dyn_cast<Constant>(BOp0)) {
3088         // Replace ((sub BOC, B) != C) with (B != BOC-C).
3089         return new ICmpInst(Pred, BOp1, ConstantExpr::getSub(BOC, RHS));
3090       } else if (C.isNullValue()) {
3091         // Replace ((sub A, B) != 0) with (A != B).
3092         return new ICmpInst(Pred, BOp0, BOp1);
3093       }
3094     }
3095     break;
3096   case Instruction::Or: {
3097     const APInt *BOC;
3098     if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) {
3099       // Comparing if all bits outside of a constant mask are set?
3100       // Replace (X | C) == -1 with (X & ~C) == ~C.
3101       // This removes the -1 constant.
3102       Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1));
3103       Value *And = Builder.CreateAnd(BOp0, NotBOC);
3104       return new ICmpInst(Pred, And, NotBOC);
3105     }
3106     break;
3107   }
3108   case Instruction::And: {
3109     const APInt *BOC;
3110     if (match(BOp1, m_APInt(BOC))) {
3111       // If we have ((X & C) == C), turn it into ((X & C) != 0).
3112       if (C == *BOC && C.isPowerOf2())
3113         return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE,
3114                             BO, Constant::getNullValue(RHS->getType()));
3115     }
3116     break;
3117   }
3118   case Instruction::UDiv:
3119     if (C.isNullValue()) {
3120       // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A)
3121       auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3122       return new ICmpInst(NewPred, BOp1, BOp0);
3123     }
3124     break;
3125   default:
3126     break;
3127   }
3128   return nullptr;
3129 }
3130 
3131 /// Fold an equality icmp with LLVM intrinsic and constant operand.
3132 Instruction *InstCombinerImpl::foldICmpEqIntrinsicWithConstant(
3133     ICmpInst &Cmp, IntrinsicInst *II, const APInt &C) {
3134   Type *Ty = II->getType();
3135   unsigned BitWidth = C.getBitWidth();
3136   switch (II->getIntrinsicID()) {
3137   case Intrinsic::abs:
3138     // abs(A) == 0  ->  A == 0
3139     // abs(A) == INT_MIN  ->  A == INT_MIN
3140     if (C.isNullValue() || C.isMinSignedValue())
3141       return new ICmpInst(Cmp.getPredicate(), II->getArgOperand(0),
3142                           ConstantInt::get(Ty, C));
3143     break;
3144 
3145   case Intrinsic::bswap:
3146     // bswap(A) == C  ->  A == bswap(C)
3147     return new ICmpInst(Cmp.getPredicate(), II->getArgOperand(0),
3148                         ConstantInt::get(Ty, C.byteSwap()));
3149 
3150   case Intrinsic::ctlz:
3151   case Intrinsic::cttz: {
3152     // ctz(A) == bitwidth(A)  ->  A == 0 and likewise for !=
3153     if (C == BitWidth)
3154       return new ICmpInst(Cmp.getPredicate(), II->getArgOperand(0),
3155                           ConstantInt::getNullValue(Ty));
3156 
3157     // ctz(A) == C -> A & Mask1 == Mask2, where Mask2 only has bit C set
3158     // and Mask1 has bits 0..C+1 set. Similar for ctl, but for high bits.
3159     // Limit to one use to ensure we don't increase instruction count.
3160     unsigned Num = C.getLimitedValue(BitWidth);
3161     if (Num != BitWidth && II->hasOneUse()) {
3162       bool IsTrailing = II->getIntrinsicID() == Intrinsic::cttz;
3163       APInt Mask1 = IsTrailing ? APInt::getLowBitsSet(BitWidth, Num + 1)
3164                                : APInt::getHighBitsSet(BitWidth, Num + 1);
3165       APInt Mask2 = IsTrailing
3166         ? APInt::getOneBitSet(BitWidth, Num)
3167         : APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
3168       return new ICmpInst(Cmp.getPredicate(),
3169           Builder.CreateAnd(II->getArgOperand(0), Mask1),
3170           ConstantInt::get(Ty, Mask2));
3171     }
3172     break;
3173   }
3174 
3175   case Intrinsic::ctpop: {
3176     // popcount(A) == 0  ->  A == 0 and likewise for !=
3177     // popcount(A) == bitwidth(A)  ->  A == -1 and likewise for !=
3178     bool IsZero = C.isNullValue();
3179     if (IsZero || C == BitWidth)
3180       return new ICmpInst(Cmp.getPredicate(), II->getArgOperand(0),
3181           IsZero ? Constant::getNullValue(Ty) : Constant::getAllOnesValue(Ty));
3182 
3183     break;
3184   }
3185 
3186   case Intrinsic::uadd_sat: {
3187     // uadd.sat(a, b) == 0  ->  (a | b) == 0
3188     if (C.isNullValue()) {
3189       Value *Or = Builder.CreateOr(II->getArgOperand(0), II->getArgOperand(1));
3190       return new ICmpInst(Cmp.getPredicate(), Or, Constant::getNullValue(Ty));
3191     }
3192     break;
3193   }
3194 
3195   case Intrinsic::usub_sat: {
3196     // usub.sat(a, b) == 0  ->  a <= b
3197     if (C.isNullValue()) {
3198       ICmpInst::Predicate NewPred = Cmp.getPredicate() == ICmpInst::ICMP_EQ
3199           ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3200       return new ICmpInst(NewPred, II->getArgOperand(0), II->getArgOperand(1));
3201     }
3202     break;
3203   }
3204   default:
3205     break;
3206   }
3207 
3208   return nullptr;
3209 }
3210 
3211 /// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
3212 Instruction *InstCombinerImpl::foldICmpIntrinsicWithConstant(ICmpInst &Cmp,
3213                                                              IntrinsicInst *II,
3214                                                              const APInt &C) {
3215   if (Cmp.isEquality())
3216     return foldICmpEqIntrinsicWithConstant(Cmp, II, C);
3217 
3218   Type *Ty = II->getType();
3219   unsigned BitWidth = C.getBitWidth();
3220   ICmpInst::Predicate Pred = Cmp.getPredicate();
3221   switch (II->getIntrinsicID()) {
3222   case Intrinsic::ctpop: {
3223     // (ctpop X > BitWidth - 1) --> X == -1
3224     Value *X = II->getArgOperand(0);
3225     if (C == BitWidth - 1 && Pred == ICmpInst::ICMP_UGT)
3226       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ, X,
3227                              ConstantInt::getAllOnesValue(Ty));
3228     // (ctpop X < BitWidth) --> X != -1
3229     if (C == BitWidth && Pred == ICmpInst::ICMP_ULT)
3230       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE, X,
3231                              ConstantInt::getAllOnesValue(Ty));
3232     break;
3233   }
3234   case Intrinsic::ctlz: {
3235     // ctlz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX < 0b00010000
3236     if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
3237       unsigned Num = C.getLimitedValue();
3238       APInt Limit = APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
3239       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_ULT,
3240                              II->getArgOperand(0), ConstantInt::get(Ty, Limit));
3241     }
3242 
3243     // ctlz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX > 0b00011111
3244     if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) {
3245       unsigned Num = C.getLimitedValue();
3246       APInt Limit = APInt::getLowBitsSet(BitWidth, BitWidth - Num);
3247       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_UGT,
3248                              II->getArgOperand(0), ConstantInt::get(Ty, Limit));
3249     }
3250     break;
3251   }
3252   case Intrinsic::cttz: {
3253     // Limit to one use to ensure we don't increase instruction count.
3254     if (!II->hasOneUse())
3255       return nullptr;
3256 
3257     // cttz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX & 0b00001111 == 0
3258     if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
3259       APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue() + 1);
3260       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ,
3261                              Builder.CreateAnd(II->getArgOperand(0), Mask),
3262                              ConstantInt::getNullValue(Ty));
3263     }
3264 
3265     // cttz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX & 0b00000111 != 0
3266     if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) {
3267       APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue());
3268       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE,
3269                              Builder.CreateAnd(II->getArgOperand(0), Mask),
3270                              ConstantInt::getNullValue(Ty));
3271     }
3272     break;
3273   }
3274   default:
3275     break;
3276   }
3277 
3278   return nullptr;
3279 }
3280 
3281 /// Handle icmp with constant (but not simple integer constant) RHS.
3282 Instruction *InstCombinerImpl::foldICmpInstWithConstantNotInt(ICmpInst &I) {
3283   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3284   Constant *RHSC = dyn_cast<Constant>(Op1);
3285   Instruction *LHSI = dyn_cast<Instruction>(Op0);
3286   if (!RHSC || !LHSI)
3287     return nullptr;
3288 
3289   switch (LHSI->getOpcode()) {
3290   case Instruction::GetElementPtr:
3291     // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
3292     if (RHSC->isNullValue() &&
3293         cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices())
3294       return new ICmpInst(
3295           I.getPredicate(), LHSI->getOperand(0),
3296           Constant::getNullValue(LHSI->getOperand(0)->getType()));
3297     break;
3298   case Instruction::PHI:
3299     // Only fold icmp into the PHI if the phi and icmp are in the same
3300     // block.  If in the same block, we're encouraging jump threading.  If
3301     // not, we are just pessimizing the code by making an i1 phi.
3302     if (LHSI->getParent() == I.getParent())
3303       if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
3304         return NV;
3305     break;
3306   case Instruction::Select: {
3307     // If either operand of the select is a constant, we can fold the
3308     // comparison into the select arms, which will cause one to be
3309     // constant folded and the select turned into a bitwise or.
3310     Value *Op1 = nullptr, *Op2 = nullptr;
3311     ConstantInt *CI = nullptr;
3312 
3313     auto SimplifyOp = [&](Value *V) {
3314       Value *Op = nullptr;
3315       if (Constant *C = dyn_cast<Constant>(V)) {
3316         Op = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
3317       } else if (RHSC->isNullValue()) {
3318         // If null is being compared, check if it can be further simplified.
3319         Op = SimplifyICmpInst(I.getPredicate(), V, RHSC, SQ);
3320       }
3321       return Op;
3322     };
3323     Op1 = SimplifyOp(LHSI->getOperand(1));
3324     if (Op1)
3325       CI = dyn_cast<ConstantInt>(Op1);
3326 
3327     Op2 = SimplifyOp(LHSI->getOperand(2));
3328     if (Op2)
3329       CI = dyn_cast<ConstantInt>(Op2);
3330 
3331     // We only want to perform this transformation if it will not lead to
3332     // additional code. This is true if either both sides of the select
3333     // fold to a constant (in which case the icmp is replaced with a select
3334     // which will usually simplify) or this is the only user of the
3335     // select (in which case we are trading a select+icmp for a simpler
3336     // select+icmp) or all uses of the select can be replaced based on
3337     // dominance information ("Global cases").
3338     bool Transform = false;
3339     if (Op1 && Op2)
3340       Transform = true;
3341     else if (Op1 || Op2) {
3342       // Local case
3343       if (LHSI->hasOneUse())
3344         Transform = true;
3345       // Global cases
3346       else if (CI && !CI->isZero())
3347         // When Op1 is constant try replacing select with second operand.
3348         // Otherwise Op2 is constant and try replacing select with first
3349         // operand.
3350         Transform =
3351             replacedSelectWithOperand(cast<SelectInst>(LHSI), &I, Op1 ? 2 : 1);
3352     }
3353     if (Transform) {
3354       if (!Op1)
3355         Op1 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC,
3356                                  I.getName());
3357       if (!Op2)
3358         Op2 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC,
3359                                  I.getName());
3360       return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
3361     }
3362     break;
3363   }
3364   case Instruction::IntToPtr:
3365     // icmp pred inttoptr(X), null -> icmp pred X, 0
3366     if (RHSC->isNullValue() &&
3367         DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
3368       return new ICmpInst(
3369           I.getPredicate(), LHSI->getOperand(0),
3370           Constant::getNullValue(LHSI->getOperand(0)->getType()));
3371     break;
3372 
3373   case Instruction::Load:
3374     // Try to optimize things like "A[i] > 4" to index computations.
3375     if (GetElementPtrInst *GEP =
3376             dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
3377       if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
3378         if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
3379             !cast<LoadInst>(LHSI)->isVolatile())
3380           if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
3381             return Res;
3382     }
3383     break;
3384   }
3385 
3386   return nullptr;
3387 }
3388 
3389 /// Some comparisons can be simplified.
3390 /// In this case, we are looking for comparisons that look like
3391 /// a check for a lossy truncation.
3392 /// Folds:
3393 ///   icmp SrcPred (x & Mask), x    to    icmp DstPred x, Mask
3394 /// Where Mask is some pattern that produces all-ones in low bits:
3395 ///    (-1 >> y)
3396 ///    ((-1 << y) >> y)     <- non-canonical, has extra uses
3397 ///   ~(-1 << y)
3398 ///    ((1 << y) + (-1))    <- non-canonical, has extra uses
3399 /// The Mask can be a constant, too.
3400 /// For some predicates, the operands are commutative.
3401 /// For others, x can only be on a specific side.
3402 static Value *foldICmpWithLowBitMaskedVal(ICmpInst &I,
3403                                           InstCombiner::BuilderTy &Builder) {
3404   ICmpInst::Predicate SrcPred;
3405   Value *X, *M, *Y;
3406   auto m_VariableMask = m_CombineOr(
3407       m_CombineOr(m_Not(m_Shl(m_AllOnes(), m_Value())),
3408                   m_Add(m_Shl(m_One(), m_Value()), m_AllOnes())),
3409       m_CombineOr(m_LShr(m_AllOnes(), m_Value()),
3410                   m_LShr(m_Shl(m_AllOnes(), m_Value(Y)), m_Deferred(Y))));
3411   auto m_Mask = m_CombineOr(m_VariableMask, m_LowBitMask());
3412   if (!match(&I, m_c_ICmp(SrcPred,
3413                           m_c_And(m_CombineAnd(m_Mask, m_Value(M)), m_Value(X)),
3414                           m_Deferred(X))))
3415     return nullptr;
3416 
3417   ICmpInst::Predicate DstPred;
3418   switch (SrcPred) {
3419   case ICmpInst::Predicate::ICMP_EQ:
3420     //  x & (-1 >> y) == x    ->    x u<= (-1 >> y)
3421     DstPred = ICmpInst::Predicate::ICMP_ULE;
3422     break;
3423   case ICmpInst::Predicate::ICMP_NE:
3424     //  x & (-1 >> y) != x    ->    x u> (-1 >> y)
3425     DstPred = ICmpInst::Predicate::ICMP_UGT;
3426     break;
3427   case ICmpInst::Predicate::ICMP_ULT:
3428     //  x & (-1 >> y) u< x    ->    x u> (-1 >> y)
3429     //  x u> x & (-1 >> y)    ->    x u> (-1 >> y)
3430     DstPred = ICmpInst::Predicate::ICMP_UGT;
3431     break;
3432   case ICmpInst::Predicate::ICMP_UGE:
3433     //  x & (-1 >> y) u>= x    ->    x u<= (-1 >> y)
3434     //  x u<= x & (-1 >> y)    ->    x u<= (-1 >> y)
3435     DstPred = ICmpInst::Predicate::ICMP_ULE;
3436     break;
3437   case ICmpInst::Predicate::ICMP_SLT:
3438     //  x & (-1 >> y) s< x    ->    x s> (-1 >> y)
3439     //  x s> x & (-1 >> y)    ->    x s> (-1 >> y)
3440     if (!match(M, m_Constant())) // Can not do this fold with non-constant.
3441       return nullptr;
3442     if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
3443       return nullptr;
3444     DstPred = ICmpInst::Predicate::ICMP_SGT;
3445     break;
3446   case ICmpInst::Predicate::ICMP_SGE:
3447     //  x & (-1 >> y) s>= x    ->    x s<= (-1 >> y)
3448     //  x s<= x & (-1 >> y)    ->    x s<= (-1 >> y)
3449     if (!match(M, m_Constant())) // Can not do this fold with non-constant.
3450       return nullptr;
3451     if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
3452       return nullptr;
3453     DstPred = ICmpInst::Predicate::ICMP_SLE;
3454     break;
3455   case ICmpInst::Predicate::ICMP_SGT:
3456   case ICmpInst::Predicate::ICMP_SLE:
3457     return nullptr;
3458   case ICmpInst::Predicate::ICMP_UGT:
3459   case ICmpInst::Predicate::ICMP_ULE:
3460     llvm_unreachable("Instsimplify took care of commut. variant");
3461     break;
3462   default:
3463     llvm_unreachable("All possible folds are handled.");
3464   }
3465 
3466   // The mask value may be a vector constant that has undefined elements. But it
3467   // may not be safe to propagate those undefs into the new compare, so replace
3468   // those elements by copying an existing, defined, and safe scalar constant.
3469   Type *OpTy = M->getType();
3470   auto *VecC = dyn_cast<Constant>(M);
3471   auto *OpVTy = dyn_cast<FixedVectorType>(OpTy);
3472   if (OpVTy && VecC && VecC->containsUndefOrPoisonElement()) {
3473     Constant *SafeReplacementConstant = nullptr;
3474     for (unsigned i = 0, e = OpVTy->getNumElements(); i != e; ++i) {
3475       if (!isa<UndefValue>(VecC->getAggregateElement(i))) {
3476         SafeReplacementConstant = VecC->getAggregateElement(i);
3477         break;
3478       }
3479     }
3480     assert(SafeReplacementConstant && "Failed to find undef replacement");
3481     M = Constant::replaceUndefsWith(VecC, SafeReplacementConstant);
3482   }
3483 
3484   return Builder.CreateICmp(DstPred, X, M);
3485 }
3486 
3487 /// Some comparisons can be simplified.
3488 /// In this case, we are looking for comparisons that look like
3489 /// a check for a lossy signed truncation.
3490 /// Folds:   (MaskedBits is a constant.)
3491 ///   ((%x << MaskedBits) a>> MaskedBits) SrcPred %x
3492 /// Into:
3493 ///   (add %x, (1 << (KeptBits-1))) DstPred (1 << KeptBits)
3494 /// Where  KeptBits = bitwidth(%x) - MaskedBits
3495 static Value *
3496 foldICmpWithTruncSignExtendedVal(ICmpInst &I,
3497                                  InstCombiner::BuilderTy &Builder) {
3498   ICmpInst::Predicate SrcPred;
3499   Value *X;
3500   const APInt *C0, *C1; // FIXME: non-splats, potentially with undef.
3501   // We are ok with 'shl' having multiple uses, but 'ashr' must be one-use.
3502   if (!match(&I, m_c_ICmp(SrcPred,
3503                           m_OneUse(m_AShr(m_Shl(m_Value(X), m_APInt(C0)),
3504                                           m_APInt(C1))),
3505                           m_Deferred(X))))
3506     return nullptr;
3507 
3508   // Potential handling of non-splats: for each element:
3509   //  * if both are undef, replace with constant 0.
3510   //    Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0.
3511   //  * if both are not undef, and are different, bailout.
3512   //  * else, only one is undef, then pick the non-undef one.
3513 
3514   // The shift amount must be equal.
3515   if (*C0 != *C1)
3516     return nullptr;
3517   const APInt &MaskedBits = *C0;
3518   assert(MaskedBits != 0 && "shift by zero should be folded away already.");
3519 
3520   ICmpInst::Predicate DstPred;
3521   switch (SrcPred) {
3522   case ICmpInst::Predicate::ICMP_EQ:
3523     // ((%x << MaskedBits) a>> MaskedBits) == %x
3524     //   =>
3525     // (add %x, (1 << (KeptBits-1))) u< (1 << KeptBits)
3526     DstPred = ICmpInst::Predicate::ICMP_ULT;
3527     break;
3528   case ICmpInst::Predicate::ICMP_NE:
3529     // ((%x << MaskedBits) a>> MaskedBits) != %x
3530     //   =>
3531     // (add %x, (1 << (KeptBits-1))) u>= (1 << KeptBits)
3532     DstPred = ICmpInst::Predicate::ICMP_UGE;
3533     break;
3534   // FIXME: are more folds possible?
3535   default:
3536     return nullptr;
3537   }
3538 
3539   auto *XType = X->getType();
3540   const unsigned XBitWidth = XType->getScalarSizeInBits();
3541   const APInt BitWidth = APInt(XBitWidth, XBitWidth);
3542   assert(BitWidth.ugt(MaskedBits) && "shifts should leave some bits untouched");
3543 
3544   // KeptBits = bitwidth(%x) - MaskedBits
3545   const APInt KeptBits = BitWidth - MaskedBits;
3546   assert(KeptBits.ugt(0) && KeptBits.ult(BitWidth) && "unreachable");
3547   // ICmpCst = (1 << KeptBits)
3548   const APInt ICmpCst = APInt(XBitWidth, 1).shl(KeptBits);
3549   assert(ICmpCst.isPowerOf2());
3550   // AddCst = (1 << (KeptBits-1))
3551   const APInt AddCst = ICmpCst.lshr(1);
3552   assert(AddCst.ult(ICmpCst) && AddCst.isPowerOf2());
3553 
3554   // T0 = add %x, AddCst
3555   Value *T0 = Builder.CreateAdd(X, ConstantInt::get(XType, AddCst));
3556   // T1 = T0 DstPred ICmpCst
3557   Value *T1 = Builder.CreateICmp(DstPred, T0, ConstantInt::get(XType, ICmpCst));
3558 
3559   return T1;
3560 }
3561 
3562 // Given pattern:
3563 //   icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
3564 // we should move shifts to the same hand of 'and', i.e. rewrite as
3565 //   icmp eq/ne (and (x shift (Q+K)), y), 0  iff (Q+K) u< bitwidth(x)
3566 // We are only interested in opposite logical shifts here.
3567 // One of the shifts can be truncated.
3568 // If we can, we want to end up creating 'lshr' shift.
3569 static Value *
3570 foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ,
3571                                            InstCombiner::BuilderTy &Builder) {
3572   if (!I.isEquality() || !match(I.getOperand(1), m_Zero()) ||
3573       !I.getOperand(0)->hasOneUse())
3574     return nullptr;
3575 
3576   auto m_AnyLogicalShift = m_LogicalShift(m_Value(), m_Value());
3577 
3578   // Look for an 'and' of two logical shifts, one of which may be truncated.
3579   // We use m_TruncOrSelf() on the RHS to correctly handle commutative case.
3580   Instruction *XShift, *MaybeTruncation, *YShift;
3581   if (!match(
3582           I.getOperand(0),
3583           m_c_And(m_CombineAnd(m_AnyLogicalShift, m_Instruction(XShift)),
3584                   m_CombineAnd(m_TruncOrSelf(m_CombineAnd(
3585                                    m_AnyLogicalShift, m_Instruction(YShift))),
3586                                m_Instruction(MaybeTruncation)))))
3587     return nullptr;
3588 
3589   // We potentially looked past 'trunc', but only when matching YShift,
3590   // therefore YShift must have the widest type.
3591   Instruction *WidestShift = YShift;
3592   // Therefore XShift must have the shallowest type.
3593   // Or they both have identical types if there was no truncation.
3594   Instruction *NarrowestShift = XShift;
3595 
3596   Type *WidestTy = WidestShift->getType();
3597   Type *NarrowestTy = NarrowestShift->getType();
3598   assert(NarrowestTy == I.getOperand(0)->getType() &&
3599          "We did not look past any shifts while matching XShift though.");
3600   bool HadTrunc = WidestTy != I.getOperand(0)->getType();
3601 
3602   // If YShift is a 'lshr', swap the shifts around.
3603   if (match(YShift, m_LShr(m_Value(), m_Value())))
3604     std::swap(XShift, YShift);
3605 
3606   // The shifts must be in opposite directions.
3607   auto XShiftOpcode = XShift->getOpcode();
3608   if (XShiftOpcode == YShift->getOpcode())
3609     return nullptr; // Do not care about same-direction shifts here.
3610 
3611   Value *X, *XShAmt, *Y, *YShAmt;
3612   match(XShift, m_BinOp(m_Value(X), m_ZExtOrSelf(m_Value(XShAmt))));
3613   match(YShift, m_BinOp(m_Value(Y), m_ZExtOrSelf(m_Value(YShAmt))));
3614 
3615   // If one of the values being shifted is a constant, then we will end with
3616   // and+icmp, and [zext+]shift instrs will be constant-folded. If they are not,
3617   // however, we will need to ensure that we won't increase instruction count.
3618   if (!isa<Constant>(X) && !isa<Constant>(Y)) {
3619     // At least one of the hands of the 'and' should be one-use shift.
3620     if (!match(I.getOperand(0),
3621                m_c_And(m_OneUse(m_AnyLogicalShift), m_Value())))
3622       return nullptr;
3623     if (HadTrunc) {
3624       // Due to the 'trunc', we will need to widen X. For that either the old
3625       // 'trunc' or the shift amt in the non-truncated shift should be one-use.
3626       if (!MaybeTruncation->hasOneUse() &&
3627           !NarrowestShift->getOperand(1)->hasOneUse())
3628         return nullptr;
3629     }
3630   }
3631 
3632   // We have two shift amounts from two different shifts. The types of those
3633   // shift amounts may not match. If that's the case let's bailout now.
3634   if (XShAmt->getType() != YShAmt->getType())
3635     return nullptr;
3636 
3637   // As input, we have the following pattern:
3638   //   icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
3639   // We want to rewrite that as:
3640   //   icmp eq/ne (and (x shift (Q+K)), y), 0  iff (Q+K) u< bitwidth(x)
3641   // While we know that originally (Q+K) would not overflow
3642   // (because  2 * (N-1) u<= iN -1), we have looked past extensions of
3643   // shift amounts. so it may now overflow in smaller bitwidth.
3644   // To ensure that does not happen, we need to ensure that the total maximal
3645   // shift amount is still representable in that smaller bit width.
3646   unsigned MaximalPossibleTotalShiftAmount =
3647       (WidestTy->getScalarSizeInBits() - 1) +
3648       (NarrowestTy->getScalarSizeInBits() - 1);
3649   APInt MaximalRepresentableShiftAmount =
3650       APInt::getAllOnesValue(XShAmt->getType()->getScalarSizeInBits());
3651   if (MaximalRepresentableShiftAmount.ult(MaximalPossibleTotalShiftAmount))
3652     return nullptr;
3653 
3654   // Can we fold (XShAmt+YShAmt) ?
3655   auto *NewShAmt = dyn_cast_or_null<Constant>(
3656       SimplifyAddInst(XShAmt, YShAmt, /*isNSW=*/false,
3657                       /*isNUW=*/false, SQ.getWithInstruction(&I)));
3658   if (!NewShAmt)
3659     return nullptr;
3660   NewShAmt = ConstantExpr::getZExtOrBitCast(NewShAmt, WidestTy);
3661   unsigned WidestBitWidth = WidestTy->getScalarSizeInBits();
3662 
3663   // Is the new shift amount smaller than the bit width?
3664   // FIXME: could also rely on ConstantRange.
3665   if (!match(NewShAmt,
3666              m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_ULT,
3667                                 APInt(WidestBitWidth, WidestBitWidth))))
3668     return nullptr;
3669 
3670   // An extra legality check is needed if we had trunc-of-lshr.
3671   if (HadTrunc && match(WidestShift, m_LShr(m_Value(), m_Value()))) {
3672     auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
3673                     WidestShift]() {
3674       // It isn't obvious whether it's worth it to analyze non-constants here.
3675       // Also, let's basically give up on non-splat cases, pessimizing vectors.
3676       // If *any* of these preconditions matches we can perform the fold.
3677       Constant *NewShAmtSplat = NewShAmt->getType()->isVectorTy()
3678                                     ? NewShAmt->getSplatValue()
3679                                     : NewShAmt;
3680       // If it's edge-case shift (by 0 or by WidestBitWidth-1) we can fold.
3681       if (NewShAmtSplat &&
3682           (NewShAmtSplat->isNullValue() ||
3683            NewShAmtSplat->getUniqueInteger() == WidestBitWidth - 1))
3684         return true;
3685       // We consider *min* leading zeros so a single outlier
3686       // blocks the transform as opposed to allowing it.
3687       if (auto *C = dyn_cast<Constant>(NarrowestShift->getOperand(0))) {
3688         KnownBits Known = computeKnownBits(C, SQ.DL);
3689         unsigned MinLeadZero = Known.countMinLeadingZeros();
3690         // If the value being shifted has at most lowest bit set we can fold.
3691         unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
3692         if (MaxActiveBits <= 1)
3693           return true;
3694         // Precondition:  NewShAmt u<= countLeadingZeros(C)
3695         if (NewShAmtSplat && NewShAmtSplat->getUniqueInteger().ule(MinLeadZero))
3696           return true;
3697       }
3698       if (auto *C = dyn_cast<Constant>(WidestShift->getOperand(0))) {
3699         KnownBits Known = computeKnownBits(C, SQ.DL);
3700         unsigned MinLeadZero = Known.countMinLeadingZeros();
3701         // If the value being shifted has at most lowest bit set we can fold.
3702         unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
3703         if (MaxActiveBits <= 1)
3704           return true;
3705         // Precondition:  ((WidestBitWidth-1)-NewShAmt) u<= countLeadingZeros(C)
3706         if (NewShAmtSplat) {
3707           APInt AdjNewShAmt =
3708               (WidestBitWidth - 1) - NewShAmtSplat->getUniqueInteger();
3709           if (AdjNewShAmt.ule(MinLeadZero))
3710             return true;
3711         }
3712       }
3713       return false; // Can't tell if it's ok.
3714     };
3715     if (!CanFold())
3716       return nullptr;
3717   }
3718 
3719   // All good, we can do this fold.
3720   X = Builder.CreateZExt(X, WidestTy);
3721   Y = Builder.CreateZExt(Y, WidestTy);
3722   // The shift is the same that was for X.
3723   Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
3724                   ? Builder.CreateLShr(X, NewShAmt)
3725                   : Builder.CreateShl(X, NewShAmt);
3726   Value *T1 = Builder.CreateAnd(T0, Y);
3727   return Builder.CreateICmp(I.getPredicate(), T1,
3728                             Constant::getNullValue(WidestTy));
3729 }
3730 
3731 /// Fold
3732 ///   (-1 u/ x) u< y
3733 ///   ((x * y) u/ x) != y
3734 /// to
3735 ///   @llvm.umul.with.overflow(x, y) plus extraction of overflow bit
3736 /// Note that the comparison is commutative, while inverted (u>=, ==) predicate
3737 /// will mean that we are looking for the opposite answer.
3738 Value *InstCombinerImpl::foldUnsignedMultiplicationOverflowCheck(ICmpInst &I) {
3739   ICmpInst::Predicate Pred;
3740   Value *X, *Y;
3741   Instruction *Mul;
3742   bool NeedNegation;
3743   // Look for: (-1 u/ x) u</u>= y
3744   if (!I.isEquality() &&
3745       match(&I, m_c_ICmp(Pred, m_OneUse(m_UDiv(m_AllOnes(), m_Value(X))),
3746                          m_Value(Y)))) {
3747     Mul = nullptr;
3748 
3749     // Are we checking that overflow does not happen, or does happen?
3750     switch (Pred) {
3751     case ICmpInst::Predicate::ICMP_ULT:
3752       NeedNegation = false;
3753       break; // OK
3754     case ICmpInst::Predicate::ICMP_UGE:
3755       NeedNegation = true;
3756       break; // OK
3757     default:
3758       return nullptr; // Wrong predicate.
3759     }
3760   } else // Look for: ((x * y) u/ x) !=/== y
3761       if (I.isEquality() &&
3762           match(&I, m_c_ICmp(Pred, m_Value(Y),
3763                              m_OneUse(m_UDiv(m_CombineAnd(m_c_Mul(m_Deferred(Y),
3764                                                                   m_Value(X)),
3765                                                           m_Instruction(Mul)),
3766                                              m_Deferred(X)))))) {
3767     NeedNegation = Pred == ICmpInst::Predicate::ICMP_EQ;
3768   } else
3769     return nullptr;
3770 
3771   BuilderTy::InsertPointGuard Guard(Builder);
3772   // If the pattern included (x * y), we'll want to insert new instructions
3773   // right before that original multiplication so that we can replace it.
3774   bool MulHadOtherUses = Mul && !Mul->hasOneUse();
3775   if (MulHadOtherUses)
3776     Builder.SetInsertPoint(Mul);
3777 
3778   Function *F = Intrinsic::getDeclaration(
3779       I.getModule(), Intrinsic::umul_with_overflow, X->getType());
3780   CallInst *Call = Builder.CreateCall(F, {X, Y}, "umul");
3781 
3782   // If the multiplication was used elsewhere, to ensure that we don't leave
3783   // "duplicate" instructions, replace uses of that original multiplication
3784   // with the multiplication result from the with.overflow intrinsic.
3785   if (MulHadOtherUses)
3786     replaceInstUsesWith(*Mul, Builder.CreateExtractValue(Call, 0, "umul.val"));
3787 
3788   Value *Res = Builder.CreateExtractValue(Call, 1, "umul.ov");
3789   if (NeedNegation) // This technically increases instruction count.
3790     Res = Builder.CreateNot(Res, "umul.not.ov");
3791 
3792   // If we replaced the mul, erase it. Do this after all uses of Builder,
3793   // as the mul is used as insertion point.
3794   if (MulHadOtherUses)
3795     eraseInstFromFunction(*Mul);
3796 
3797   return Res;
3798 }
3799 
3800 static Instruction *foldICmpXNegX(ICmpInst &I) {
3801   CmpInst::Predicate Pred;
3802   Value *X;
3803   if (!match(&I, m_c_ICmp(Pred, m_NSWNeg(m_Value(X)), m_Deferred(X))))
3804     return nullptr;
3805 
3806   if (ICmpInst::isSigned(Pred))
3807     Pred = ICmpInst::getSwappedPredicate(Pred);
3808   else if (ICmpInst::isUnsigned(Pred))
3809     Pred = ICmpInst::getSignedPredicate(Pred);
3810   // else for equality-comparisons just keep the predicate.
3811 
3812   return ICmpInst::Create(Instruction::ICmp, Pred, X,
3813                           Constant::getNullValue(X->getType()), I.getName());
3814 }
3815 
3816 /// Try to fold icmp (binop), X or icmp X, (binop).
3817 /// TODO: A large part of this logic is duplicated in InstSimplify's
3818 /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
3819 /// duplication.
3820 Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
3821                                              const SimplifyQuery &SQ) {
3822   const SimplifyQuery Q = SQ.getWithInstruction(&I);
3823   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3824 
3825   // Special logic for binary operators.
3826   BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0);
3827   BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1);
3828   if (!BO0 && !BO1)
3829     return nullptr;
3830 
3831   if (Instruction *NewICmp = foldICmpXNegX(I))
3832     return NewICmp;
3833 
3834   const CmpInst::Predicate Pred = I.getPredicate();
3835   Value *X;
3836 
3837   // Convert add-with-unsigned-overflow comparisons into a 'not' with compare.
3838   // (Op1 + X) u</u>= Op1 --> ~Op1 u</u>= X
3839   if (match(Op0, m_OneUse(m_c_Add(m_Specific(Op1), m_Value(X)))) &&
3840       (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
3841     return new ICmpInst(Pred, Builder.CreateNot(Op1), X);
3842   // Op0 u>/u<= (Op0 + X) --> X u>/u<= ~Op0
3843   if (match(Op1, m_OneUse(m_c_Add(m_Specific(Op0), m_Value(X)))) &&
3844       (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
3845     return new ICmpInst(Pred, X, Builder.CreateNot(Op0));
3846 
3847   bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
3848   if (BO0 && isa<OverflowingBinaryOperator>(BO0))
3849     NoOp0WrapProblem =
3850         ICmpInst::isEquality(Pred) ||
3851         (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) ||
3852         (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap());
3853   if (BO1 && isa<OverflowingBinaryOperator>(BO1))
3854     NoOp1WrapProblem =
3855         ICmpInst::isEquality(Pred) ||
3856         (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) ||
3857         (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap());
3858 
3859   // Analyze the case when either Op0 or Op1 is an add instruction.
3860   // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
3861   Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
3862   if (BO0 && BO0->getOpcode() == Instruction::Add) {
3863     A = BO0->getOperand(0);
3864     B = BO0->getOperand(1);
3865   }
3866   if (BO1 && BO1->getOpcode() == Instruction::Add) {
3867     C = BO1->getOperand(0);
3868     D = BO1->getOperand(1);
3869   }
3870 
3871   // icmp (A+B), A -> icmp B, 0 for equalities or if there is no overflow.
3872   // icmp (A+B), B -> icmp A, 0 for equalities or if there is no overflow.
3873   if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
3874     return new ICmpInst(Pred, A == Op1 ? B : A,
3875                         Constant::getNullValue(Op1->getType()));
3876 
3877   // icmp C, (C+D) -> icmp 0, D for equalities or if there is no overflow.
3878   // icmp D, (C+D) -> icmp 0, C for equalities or if there is no overflow.
3879   if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
3880     return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
3881                         C == Op0 ? D : C);
3882 
3883   // icmp (A+B), (A+D) -> icmp B, D for equalities or if there is no overflow.
3884   if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem &&
3885       NoOp1WrapProblem) {
3886     // Determine Y and Z in the form icmp (X+Y), (X+Z).
3887     Value *Y, *Z;
3888     if (A == C) {
3889       // C + B == C + D  ->  B == D
3890       Y = B;
3891       Z = D;
3892     } else if (A == D) {
3893       // D + B == C + D  ->  B == C
3894       Y = B;
3895       Z = C;
3896     } else if (B == C) {
3897       // A + C == C + D  ->  A == D
3898       Y = A;
3899       Z = D;
3900     } else {
3901       assert(B == D);
3902       // A + D == C + D  ->  A == C
3903       Y = A;
3904       Z = C;
3905     }
3906     return new ICmpInst(Pred, Y, Z);
3907   }
3908 
3909   // icmp slt (A + -1), Op1 -> icmp sle A, Op1
3910   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT &&
3911       match(B, m_AllOnes()))
3912     return new ICmpInst(CmpInst::ICMP_SLE, A, Op1);
3913 
3914   // icmp sge (A + -1), Op1 -> icmp sgt A, Op1
3915   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE &&
3916       match(B, m_AllOnes()))
3917     return new ICmpInst(CmpInst::ICMP_SGT, A, Op1);
3918 
3919   // icmp sle (A + 1), Op1 -> icmp slt A, Op1
3920   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE && match(B, m_One()))
3921     return new ICmpInst(CmpInst::ICMP_SLT, A, Op1);
3922 
3923   // icmp sgt (A + 1), Op1 -> icmp sge A, Op1
3924   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT && match(B, m_One()))
3925     return new ICmpInst(CmpInst::ICMP_SGE, A, Op1);
3926 
3927   // icmp sgt Op0, (C + -1) -> icmp sge Op0, C
3928   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGT &&
3929       match(D, m_AllOnes()))
3930     return new ICmpInst(CmpInst::ICMP_SGE, Op0, C);
3931 
3932   // icmp sle Op0, (C + -1) -> icmp slt Op0, C
3933   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLE &&
3934       match(D, m_AllOnes()))
3935     return new ICmpInst(CmpInst::ICMP_SLT, Op0, C);
3936 
3937   // icmp sge Op0, (C + 1) -> icmp sgt Op0, C
3938   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGE && match(D, m_One()))
3939     return new ICmpInst(CmpInst::ICMP_SGT, Op0, C);
3940 
3941   // icmp slt Op0, (C + 1) -> icmp sle Op0, C
3942   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLT && match(D, m_One()))
3943     return new ICmpInst(CmpInst::ICMP_SLE, Op0, C);
3944 
3945   // TODO: The subtraction-related identities shown below also hold, but
3946   // canonicalization from (X -nuw 1) to (X + -1) means that the combinations
3947   // wouldn't happen even if they were implemented.
3948   //
3949   // icmp ult (A - 1), Op1 -> icmp ule A, Op1
3950   // icmp uge (A - 1), Op1 -> icmp ugt A, Op1
3951   // icmp ugt Op0, (C - 1) -> icmp uge Op0, C
3952   // icmp ule Op0, (C - 1) -> icmp ult Op0, C
3953 
3954   // icmp ule (A + 1), Op0 -> icmp ult A, Op1
3955   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_ULE && match(B, m_One()))
3956     return new ICmpInst(CmpInst::ICMP_ULT, A, Op1);
3957 
3958   // icmp ugt (A + 1), Op0 -> icmp uge A, Op1
3959   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_UGT && match(B, m_One()))
3960     return new ICmpInst(CmpInst::ICMP_UGE, A, Op1);
3961 
3962   // icmp uge Op0, (C + 1) -> icmp ugt Op0, C
3963   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_UGE && match(D, m_One()))
3964     return new ICmpInst(CmpInst::ICMP_UGT, Op0, C);
3965 
3966   // icmp ult Op0, (C + 1) -> icmp ule Op0, C
3967   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_ULT && match(D, m_One()))
3968     return new ICmpInst(CmpInst::ICMP_ULE, Op0, C);
3969 
3970   // if C1 has greater magnitude than C2:
3971   //  icmp (A + C1), (C + C2) -> icmp (A + C3), C
3972   //  s.t. C3 = C1 - C2
3973   //
3974   // if C2 has greater magnitude than C1:
3975   //  icmp (A + C1), (C + C2) -> icmp A, (C + C3)
3976   //  s.t. C3 = C2 - C1
3977   if (A && C && NoOp0WrapProblem && NoOp1WrapProblem &&
3978       (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned())
3979     if (ConstantInt *C1 = dyn_cast<ConstantInt>(B))
3980       if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) {
3981         const APInt &AP1 = C1->getValue();
3982         const APInt &AP2 = C2->getValue();
3983         if (AP1.isNegative() == AP2.isNegative()) {
3984           APInt AP1Abs = C1->getValue().abs();
3985           APInt AP2Abs = C2->getValue().abs();
3986           if (AP1Abs.uge(AP2Abs)) {
3987             ConstantInt *C3 = Builder.getInt(AP1 - AP2);
3988             bool HasNUW = BO0->hasNoUnsignedWrap() && C3->getValue().ule(AP1);
3989             bool HasNSW = BO0->hasNoSignedWrap();
3990             Value *NewAdd = Builder.CreateAdd(A, C3, "", HasNUW, HasNSW);
3991             return new ICmpInst(Pred, NewAdd, C);
3992           } else {
3993             ConstantInt *C3 = Builder.getInt(AP2 - AP1);
3994             bool HasNUW = BO1->hasNoUnsignedWrap() && C3->getValue().ule(AP2);
3995             bool HasNSW = BO1->hasNoSignedWrap();
3996             Value *NewAdd = Builder.CreateAdd(C, C3, "", HasNUW, HasNSW);
3997             return new ICmpInst(Pred, A, NewAdd);
3998           }
3999         }
4000       }
4001 
4002   // Analyze the case when either Op0 or Op1 is a sub instruction.
4003   // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
4004   A = nullptr;
4005   B = nullptr;
4006   C = nullptr;
4007   D = nullptr;
4008   if (BO0 && BO0->getOpcode() == Instruction::Sub) {
4009     A = BO0->getOperand(0);
4010     B = BO0->getOperand(1);
4011   }
4012   if (BO1 && BO1->getOpcode() == Instruction::Sub) {
4013     C = BO1->getOperand(0);
4014     D = BO1->getOperand(1);
4015   }
4016 
4017   // icmp (A-B), A -> icmp 0, B for equalities or if there is no overflow.
4018   if (A == Op1 && NoOp0WrapProblem)
4019     return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
4020   // icmp C, (C-D) -> icmp D, 0 for equalities or if there is no overflow.
4021   if (C == Op0 && NoOp1WrapProblem)
4022     return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
4023 
4024   // Convert sub-with-unsigned-overflow comparisons into a comparison of args.
4025   // (A - B) u>/u<= A --> B u>/u<= A
4026   if (A == Op1 && (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
4027     return new ICmpInst(Pred, B, A);
4028   // C u</u>= (C - D) --> C u</u>= D
4029   if (C == Op0 && (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
4030     return new ICmpInst(Pred, C, D);
4031   // (A - B) u>=/u< A --> B u>/u<= A  iff B != 0
4032   if (A == Op1 && (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
4033       isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
4034     return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), B, A);
4035   // C u<=/u> (C - D) --> C u</u>= D  iff B != 0
4036   if (C == Op0 && (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
4037       isKnownNonZero(D, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
4038     return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), C, D);
4039 
4040   // icmp (A-B), (C-B) -> icmp A, C for equalities or if there is no overflow.
4041   if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem)
4042     return new ICmpInst(Pred, A, C);
4043 
4044   // icmp (A-B), (A-D) -> icmp D, B for equalities or if there is no overflow.
4045   if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem)
4046     return new ICmpInst(Pred, D, B);
4047 
4048   // icmp (0-X) < cst --> x > -cst
4049   if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
4050     Value *X;
4051     if (match(BO0, m_Neg(m_Value(X))))
4052       if (Constant *RHSC = dyn_cast<Constant>(Op1))
4053         if (RHSC->isNotMinSignedValue())
4054           return new ICmpInst(I.getSwappedPredicate(), X,
4055                               ConstantExpr::getNeg(RHSC));
4056   }
4057 
4058   {
4059     // Try to remove shared constant multiplier from equality comparison:
4060     // X * C == Y * C (with no overflowing/aliasing) --> X == Y
4061     Value *X, *Y;
4062     const APInt *C;
4063     if (match(Op0, m_Mul(m_Value(X), m_APInt(C))) && *C != 0 &&
4064         match(Op1, m_Mul(m_Value(Y), m_SpecificInt(*C))) && I.isEquality())
4065       if (!C->countTrailingZeros() ||
4066           (BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap()) ||
4067           (BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap()))
4068       return new ICmpInst(Pred, X, Y);
4069   }
4070 
4071   BinaryOperator *SRem = nullptr;
4072   // icmp (srem X, Y), Y
4073   if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1))
4074     SRem = BO0;
4075   // icmp Y, (srem X, Y)
4076   else if (BO1 && BO1->getOpcode() == Instruction::SRem &&
4077            Op0 == BO1->getOperand(1))
4078     SRem = BO1;
4079   if (SRem) {
4080     // We don't check hasOneUse to avoid increasing register pressure because
4081     // the value we use is the same value this instruction was already using.
4082     switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
4083     default:
4084       break;
4085     case ICmpInst::ICMP_EQ:
4086       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4087     case ICmpInst::ICMP_NE:
4088       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4089     case ICmpInst::ICMP_SGT:
4090     case ICmpInst::ICMP_SGE:
4091       return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
4092                           Constant::getAllOnesValue(SRem->getType()));
4093     case ICmpInst::ICMP_SLT:
4094     case ICmpInst::ICMP_SLE:
4095       return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1),
4096                           Constant::getNullValue(SRem->getType()));
4097     }
4098   }
4099 
4100   if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && BO0->hasOneUse() &&
4101       BO1->hasOneUse() && BO0->getOperand(1) == BO1->getOperand(1)) {
4102     switch (BO0->getOpcode()) {
4103     default:
4104       break;
4105     case Instruction::Add:
4106     case Instruction::Sub:
4107     case Instruction::Xor: {
4108       if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
4109         return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4110 
4111       const APInt *C;
4112       if (match(BO0->getOperand(1), m_APInt(C))) {
4113         // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
4114         if (C->isSignMask()) {
4115           ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate();
4116           return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
4117         }
4118 
4119         // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
4120         if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) {
4121           ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate();
4122           NewPred = I.getSwappedPredicate(NewPred);
4123           return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
4124         }
4125       }
4126       break;
4127     }
4128     case Instruction::Mul: {
4129       if (!I.isEquality())
4130         break;
4131 
4132       const APInt *C;
4133       if (match(BO0->getOperand(1), m_APInt(C)) && !C->isNullValue() &&
4134           !C->isOneValue()) {
4135         // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask)
4136         // Mask = -1 >> count-trailing-zeros(C).
4137         if (unsigned TZs = C->countTrailingZeros()) {
4138           Constant *Mask = ConstantInt::get(
4139               BO0->getType(),
4140               APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs));
4141           Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask);
4142           Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask);
4143           return new ICmpInst(Pred, And1, And2);
4144         }
4145       }
4146       break;
4147     }
4148     case Instruction::UDiv:
4149     case Instruction::LShr:
4150       if (I.isSigned() || !BO0->isExact() || !BO1->isExact())
4151         break;
4152       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4153 
4154     case Instruction::SDiv:
4155       if (!I.isEquality() || !BO0->isExact() || !BO1->isExact())
4156         break;
4157       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4158 
4159     case Instruction::AShr:
4160       if (!BO0->isExact() || !BO1->isExact())
4161         break;
4162       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4163 
4164     case Instruction::Shl: {
4165       bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap();
4166       bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap();
4167       if (!NUW && !NSW)
4168         break;
4169       if (!NSW && I.isSigned())
4170         break;
4171       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4172     }
4173     }
4174   }
4175 
4176   if (BO0) {
4177     // Transform  A & (L - 1) `ult` L --> L != 0
4178     auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes());
4179     auto BitwiseAnd = m_c_And(m_Value(), LSubOne);
4180 
4181     if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) {
4182       auto *Zero = Constant::getNullValue(BO0->getType());
4183       return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero);
4184     }
4185   }
4186 
4187   if (Value *V = foldUnsignedMultiplicationOverflowCheck(I))
4188     return replaceInstUsesWith(I, V);
4189 
4190   if (Value *V = foldICmpWithLowBitMaskedVal(I, Builder))
4191     return replaceInstUsesWith(I, V);
4192 
4193   if (Value *V = foldICmpWithTruncSignExtendedVal(I, Builder))
4194     return replaceInstUsesWith(I, V);
4195 
4196   if (Value *V = foldShiftIntoShiftInAnotherHandOfAndInICmp(I, SQ, Builder))
4197     return replaceInstUsesWith(I, V);
4198 
4199   return nullptr;
4200 }
4201 
4202 /// Fold icmp Pred min|max(X, Y), X.
4203 static Instruction *foldICmpWithMinMax(ICmpInst &Cmp) {
4204   ICmpInst::Predicate Pred = Cmp.getPredicate();
4205   Value *Op0 = Cmp.getOperand(0);
4206   Value *X = Cmp.getOperand(1);
4207 
4208   // Canonicalize minimum or maximum operand to LHS of the icmp.
4209   if (match(X, m_c_SMin(m_Specific(Op0), m_Value())) ||
4210       match(X, m_c_SMax(m_Specific(Op0), m_Value())) ||
4211       match(X, m_c_UMin(m_Specific(Op0), m_Value())) ||
4212       match(X, m_c_UMax(m_Specific(Op0), m_Value()))) {
4213     std::swap(Op0, X);
4214     Pred = Cmp.getSwappedPredicate();
4215   }
4216 
4217   Value *Y;
4218   if (match(Op0, m_c_SMin(m_Specific(X), m_Value(Y)))) {
4219     // smin(X, Y)  == X --> X s<= Y
4220     // smin(X, Y) s>= X --> X s<= Y
4221     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SGE)
4222       return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
4223 
4224     // smin(X, Y) != X --> X s> Y
4225     // smin(X, Y) s< X --> X s> Y
4226     if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SLT)
4227       return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
4228 
4229     // These cases should be handled in InstSimplify:
4230     // smin(X, Y) s<= X --> true
4231     // smin(X, Y) s> X --> false
4232     return nullptr;
4233   }
4234 
4235   if (match(Op0, m_c_SMax(m_Specific(X), m_Value(Y)))) {
4236     // smax(X, Y)  == X --> X s>= Y
4237     // smax(X, Y) s<= X --> X s>= Y
4238     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLE)
4239       return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
4240 
4241     // smax(X, Y) != X --> X s< Y
4242     // smax(X, Y) s> X --> X s< Y
4243     if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SGT)
4244       return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
4245 
4246     // These cases should be handled in InstSimplify:
4247     // smax(X, Y) s>= X --> true
4248     // smax(X, Y) s< X --> false
4249     return nullptr;
4250   }
4251 
4252   if (match(Op0, m_c_UMin(m_Specific(X), m_Value(Y)))) {
4253     // umin(X, Y)  == X --> X u<= Y
4254     // umin(X, Y) u>= X --> X u<= Y
4255     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_UGE)
4256       return new ICmpInst(ICmpInst::ICMP_ULE, X, Y);
4257 
4258     // umin(X, Y) != X --> X u> Y
4259     // umin(X, Y) u< X --> X u> Y
4260     if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT)
4261       return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
4262 
4263     // These cases should be handled in InstSimplify:
4264     // umin(X, Y) u<= X --> true
4265     // umin(X, Y) u> X --> false
4266     return nullptr;
4267   }
4268 
4269   if (match(Op0, m_c_UMax(m_Specific(X), m_Value(Y)))) {
4270     // umax(X, Y)  == X --> X u>= Y
4271     // umax(X, Y) u<= X --> X u>= Y
4272     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_ULE)
4273       return new ICmpInst(ICmpInst::ICMP_UGE, X, Y);
4274 
4275     // umax(X, Y) != X --> X u< Y
4276     // umax(X, Y) u> X --> X u< Y
4277     if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_UGT)
4278       return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
4279 
4280     // These cases should be handled in InstSimplify:
4281     // umax(X, Y) u>= X --> true
4282     // umax(X, Y) u< X --> false
4283     return nullptr;
4284   }
4285 
4286   return nullptr;
4287 }
4288 
4289 Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) {
4290   if (!I.isEquality())
4291     return nullptr;
4292 
4293   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4294   const CmpInst::Predicate Pred = I.getPredicate();
4295   Value *A, *B, *C, *D;
4296   if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
4297     if (A == Op1 || B == Op1) { // (A^B) == A  ->  B == 0
4298       Value *OtherVal = A == Op1 ? B : A;
4299       return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
4300     }
4301 
4302     if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
4303       // A^c1 == C^c2 --> A == C^(c1^c2)
4304       ConstantInt *C1, *C2;
4305       if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) &&
4306           Op1->hasOneUse()) {
4307         Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue());
4308         Value *Xor = Builder.CreateXor(C, NC);
4309         return new ICmpInst(Pred, A, Xor);
4310       }
4311 
4312       // A^B == A^D -> B == D
4313       if (A == C)
4314         return new ICmpInst(Pred, B, D);
4315       if (A == D)
4316         return new ICmpInst(Pred, B, C);
4317       if (B == C)
4318         return new ICmpInst(Pred, A, D);
4319       if (B == D)
4320         return new ICmpInst(Pred, A, C);
4321     }
4322   }
4323 
4324   if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) {
4325     // A == (A^B)  ->  B == 0
4326     Value *OtherVal = A == Op0 ? B : A;
4327     return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
4328   }
4329 
4330   // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
4331   if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) &&
4332       match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) {
4333     Value *X = nullptr, *Y = nullptr, *Z = nullptr;
4334 
4335     if (A == C) {
4336       X = B;
4337       Y = D;
4338       Z = A;
4339     } else if (A == D) {
4340       X = B;
4341       Y = C;
4342       Z = A;
4343     } else if (B == C) {
4344       X = A;
4345       Y = D;
4346       Z = B;
4347     } else if (B == D) {
4348       X = A;
4349       Y = C;
4350       Z = B;
4351     }
4352 
4353     if (X) { // Build (X^Y) & Z
4354       Op1 = Builder.CreateXor(X, Y);
4355       Op1 = Builder.CreateAnd(Op1, Z);
4356       return new ICmpInst(Pred, Op1, Constant::getNullValue(Op1->getType()));
4357     }
4358   }
4359 
4360   // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B)
4361   // and       (B & (1<<X)-1) == (zext A) --> A == (trunc B)
4362   ConstantInt *Cst1;
4363   if ((Op0->hasOneUse() && match(Op0, m_ZExt(m_Value(A))) &&
4364        match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) ||
4365       (Op1->hasOneUse() && match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) &&
4366        match(Op1, m_ZExt(m_Value(A))))) {
4367     APInt Pow2 = Cst1->getValue() + 1;
4368     if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) &&
4369         Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth())
4370       return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType()));
4371   }
4372 
4373   // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
4374   // For lshr and ashr pairs.
4375   if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_ConstantInt(Cst1)))) &&
4376        match(Op1, m_OneUse(m_LShr(m_Value(B), m_Specific(Cst1))))) ||
4377       (match(Op0, m_OneUse(m_AShr(m_Value(A), m_ConstantInt(Cst1)))) &&
4378        match(Op1, m_OneUse(m_AShr(m_Value(B), m_Specific(Cst1)))))) {
4379     unsigned TypeBits = Cst1->getBitWidth();
4380     unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
4381     if (ShAmt < TypeBits && ShAmt != 0) {
4382       ICmpInst::Predicate NewPred =
4383           Pred == ICmpInst::ICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
4384       Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
4385       APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
4386       return new ICmpInst(NewPred, Xor, Builder.getInt(CmpVal));
4387     }
4388   }
4389 
4390   // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0
4391   if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) &&
4392       match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) {
4393     unsigned TypeBits = Cst1->getBitWidth();
4394     unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
4395     if (ShAmt < TypeBits && ShAmt != 0) {
4396       Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
4397       APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
4398       Value *And = Builder.CreateAnd(Xor, Builder.getInt(AndVal),
4399                                       I.getName() + ".mask");
4400       return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType()));
4401     }
4402   }
4403 
4404   // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
4405   // "icmp (and X, mask), cst"
4406   uint64_t ShAmt = 0;
4407   if (Op0->hasOneUse() &&
4408       match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) &&
4409       match(Op1, m_ConstantInt(Cst1)) &&
4410       // Only do this when A has multiple uses.  This is most important to do
4411       // when it exposes other optimizations.
4412       !A->hasOneUse()) {
4413     unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits();
4414 
4415     if (ShAmt < ASize) {
4416       APInt MaskV =
4417           APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits());
4418       MaskV <<= ShAmt;
4419 
4420       APInt CmpV = Cst1->getValue().zext(ASize);
4421       CmpV <<= ShAmt;
4422 
4423       Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV));
4424       return new ICmpInst(Pred, Mask, Builder.getInt(CmpV));
4425     }
4426   }
4427 
4428   // If both operands are byte-swapped or bit-reversed, just compare the
4429   // original values.
4430   // TODO: Move this to a function similar to foldICmpIntrinsicWithConstant()
4431   // and handle more intrinsics.
4432   if ((match(Op0, m_BSwap(m_Value(A))) && match(Op1, m_BSwap(m_Value(B)))) ||
4433       (match(Op0, m_BitReverse(m_Value(A))) &&
4434        match(Op1, m_BitReverse(m_Value(B)))))
4435     return new ICmpInst(Pred, A, B);
4436 
4437   // Canonicalize checking for a power-of-2-or-zero value:
4438   // (A & (A-1)) == 0 --> ctpop(A) < 2 (two commuted variants)
4439   // ((A-1) & A) != 0 --> ctpop(A) > 1 (two commuted variants)
4440   if (!match(Op0, m_OneUse(m_c_And(m_Add(m_Value(A), m_AllOnes()),
4441                                    m_Deferred(A)))) ||
4442       !match(Op1, m_ZeroInt()))
4443     A = nullptr;
4444 
4445   // (A & -A) == A --> ctpop(A) < 2 (four commuted variants)
4446   // (-A & A) != A --> ctpop(A) > 1 (four commuted variants)
4447   if (match(Op0, m_OneUse(m_c_And(m_Neg(m_Specific(Op1)), m_Specific(Op1)))))
4448     A = Op1;
4449   else if (match(Op1,
4450                  m_OneUse(m_c_And(m_Neg(m_Specific(Op0)), m_Specific(Op0)))))
4451     A = Op0;
4452 
4453   if (A) {
4454     Type *Ty = A->getType();
4455     CallInst *CtPop = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, A);
4456     return Pred == ICmpInst::ICMP_EQ
4457         ? new ICmpInst(ICmpInst::ICMP_ULT, CtPop, ConstantInt::get(Ty, 2))
4458         : new ICmpInst(ICmpInst::ICMP_UGT, CtPop, ConstantInt::get(Ty, 1));
4459   }
4460 
4461   return nullptr;
4462 }
4463 
4464 static Instruction *foldICmpWithZextOrSext(ICmpInst &ICmp,
4465                                            InstCombiner::BuilderTy &Builder) {
4466   assert(isa<CastInst>(ICmp.getOperand(0)) && "Expected cast for operand 0");
4467   auto *CastOp0 = cast<CastInst>(ICmp.getOperand(0));
4468   Value *X;
4469   if (!match(CastOp0, m_ZExtOrSExt(m_Value(X))))
4470     return nullptr;
4471 
4472   bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt;
4473   bool IsSignedCmp = ICmp.isSigned();
4474   if (auto *CastOp1 = dyn_cast<CastInst>(ICmp.getOperand(1))) {
4475     // If the signedness of the two casts doesn't agree (i.e. one is a sext
4476     // and the other is a zext), then we can't handle this.
4477     // TODO: This is too strict. We can handle some predicates (equality?).
4478     if (CastOp0->getOpcode() != CastOp1->getOpcode())
4479       return nullptr;
4480 
4481     // Not an extension from the same type?
4482     Value *Y = CastOp1->getOperand(0);
4483     Type *XTy = X->getType(), *YTy = Y->getType();
4484     if (XTy != YTy) {
4485       // One of the casts must have one use because we are creating a new cast.
4486       if (!CastOp0->hasOneUse() && !CastOp1->hasOneUse())
4487         return nullptr;
4488       // Extend the narrower operand to the type of the wider operand.
4489       if (XTy->getScalarSizeInBits() < YTy->getScalarSizeInBits())
4490         X = Builder.CreateCast(CastOp0->getOpcode(), X, YTy);
4491       else if (YTy->getScalarSizeInBits() < XTy->getScalarSizeInBits())
4492         Y = Builder.CreateCast(CastOp0->getOpcode(), Y, XTy);
4493       else
4494         return nullptr;
4495     }
4496 
4497     // (zext X) == (zext Y) --> X == Y
4498     // (sext X) == (sext Y) --> X == Y
4499     if (ICmp.isEquality())
4500       return new ICmpInst(ICmp.getPredicate(), X, Y);
4501 
4502     // A signed comparison of sign extended values simplifies into a
4503     // signed comparison.
4504     if (IsSignedCmp && IsSignedExt)
4505       return new ICmpInst(ICmp.getPredicate(), X, Y);
4506 
4507     // The other three cases all fold into an unsigned comparison.
4508     return new ICmpInst(ICmp.getUnsignedPredicate(), X, Y);
4509   }
4510 
4511   // Below here, we are only folding a compare with constant.
4512   auto *C = dyn_cast<Constant>(ICmp.getOperand(1));
4513   if (!C)
4514     return nullptr;
4515 
4516   // Compute the constant that would happen if we truncated to SrcTy then
4517   // re-extended to DestTy.
4518   Type *SrcTy = CastOp0->getSrcTy();
4519   Type *DestTy = CastOp0->getDestTy();
4520   Constant *Res1 = ConstantExpr::getTrunc(C, SrcTy);
4521   Constant *Res2 = ConstantExpr::getCast(CastOp0->getOpcode(), Res1, DestTy);
4522 
4523   // If the re-extended constant didn't change...
4524   if (Res2 == C) {
4525     if (ICmp.isEquality())
4526       return new ICmpInst(ICmp.getPredicate(), X, Res1);
4527 
4528     // A signed comparison of sign extended values simplifies into a
4529     // signed comparison.
4530     if (IsSignedExt && IsSignedCmp)
4531       return new ICmpInst(ICmp.getPredicate(), X, Res1);
4532 
4533     // The other three cases all fold into an unsigned comparison.
4534     return new ICmpInst(ICmp.getUnsignedPredicate(), X, Res1);
4535   }
4536 
4537   // The re-extended constant changed, partly changed (in the case of a vector),
4538   // or could not be determined to be equal (in the case of a constant
4539   // expression), so the constant cannot be represented in the shorter type.
4540   // All the cases that fold to true or false will have already been handled
4541   // by SimplifyICmpInst, so only deal with the tricky case.
4542   if (IsSignedCmp || !IsSignedExt || !isa<ConstantInt>(C))
4543     return nullptr;
4544 
4545   // Is source op positive?
4546   // icmp ult (sext X), C --> icmp sgt X, -1
4547   if (ICmp.getPredicate() == ICmpInst::ICMP_ULT)
4548     return new ICmpInst(CmpInst::ICMP_SGT, X, Constant::getAllOnesValue(SrcTy));
4549 
4550   // Is source op negative?
4551   // icmp ugt (sext X), C --> icmp slt X, 0
4552   assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
4553   return new ICmpInst(CmpInst::ICMP_SLT, X, Constant::getNullValue(SrcTy));
4554 }
4555 
4556 /// Handle icmp (cast x), (cast or constant).
4557 Instruction *InstCombinerImpl::foldICmpWithCastOp(ICmpInst &ICmp) {
4558   auto *CastOp0 = dyn_cast<CastInst>(ICmp.getOperand(0));
4559   if (!CastOp0)
4560     return nullptr;
4561   if (!isa<Constant>(ICmp.getOperand(1)) && !isa<CastInst>(ICmp.getOperand(1)))
4562     return nullptr;
4563 
4564   Value *Op0Src = CastOp0->getOperand(0);
4565   Type *SrcTy = CastOp0->getSrcTy();
4566   Type *DestTy = CastOp0->getDestTy();
4567 
4568   // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
4569   // integer type is the same size as the pointer type.
4570   auto CompatibleSizes = [&](Type *SrcTy, Type *DestTy) {
4571     if (isa<VectorType>(SrcTy)) {
4572       SrcTy = cast<VectorType>(SrcTy)->getElementType();
4573       DestTy = cast<VectorType>(DestTy)->getElementType();
4574     }
4575     return DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth();
4576   };
4577   if (CastOp0->getOpcode() == Instruction::PtrToInt &&
4578       CompatibleSizes(SrcTy, DestTy)) {
4579     Value *NewOp1 = nullptr;
4580     if (auto *PtrToIntOp1 = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) {
4581       Value *PtrSrc = PtrToIntOp1->getOperand(0);
4582       if (PtrSrc->getType()->getPointerAddressSpace() ==
4583           Op0Src->getType()->getPointerAddressSpace()) {
4584         NewOp1 = PtrToIntOp1->getOperand(0);
4585         // If the pointer types don't match, insert a bitcast.
4586         if (Op0Src->getType() != NewOp1->getType())
4587           NewOp1 = Builder.CreateBitCast(NewOp1, Op0Src->getType());
4588       }
4589     } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
4590       NewOp1 = ConstantExpr::getIntToPtr(RHSC, SrcTy);
4591     }
4592 
4593     if (NewOp1)
4594       return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1);
4595   }
4596 
4597   return foldICmpWithZextOrSext(ICmp, Builder);
4598 }
4599 
4600 static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS) {
4601   switch (BinaryOp) {
4602     default:
4603       llvm_unreachable("Unsupported binary op");
4604     case Instruction::Add:
4605     case Instruction::Sub:
4606       return match(RHS, m_Zero());
4607     case Instruction::Mul:
4608       return match(RHS, m_One());
4609   }
4610 }
4611 
4612 OverflowResult
4613 InstCombinerImpl::computeOverflow(Instruction::BinaryOps BinaryOp,
4614                                   bool IsSigned, Value *LHS, Value *RHS,
4615                                   Instruction *CxtI) const {
4616   switch (BinaryOp) {
4617     default:
4618       llvm_unreachable("Unsupported binary op");
4619     case Instruction::Add:
4620       if (IsSigned)
4621         return computeOverflowForSignedAdd(LHS, RHS, CxtI);
4622       else
4623         return computeOverflowForUnsignedAdd(LHS, RHS, CxtI);
4624     case Instruction::Sub:
4625       if (IsSigned)
4626         return computeOverflowForSignedSub(LHS, RHS, CxtI);
4627       else
4628         return computeOverflowForUnsignedSub(LHS, RHS, CxtI);
4629     case Instruction::Mul:
4630       if (IsSigned)
4631         return computeOverflowForSignedMul(LHS, RHS, CxtI);
4632       else
4633         return computeOverflowForUnsignedMul(LHS, RHS, CxtI);
4634   }
4635 }
4636 
4637 bool InstCombinerImpl::OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp,
4638                                              bool IsSigned, Value *LHS,
4639                                              Value *RHS, Instruction &OrigI,
4640                                              Value *&Result,
4641                                              Constant *&Overflow) {
4642   if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
4643     std::swap(LHS, RHS);
4644 
4645   // If the overflow check was an add followed by a compare, the insertion point
4646   // may be pointing to the compare.  We want to insert the new instructions
4647   // before the add in case there are uses of the add between the add and the
4648   // compare.
4649   Builder.SetInsertPoint(&OrigI);
4650 
4651   Type *OverflowTy = Type::getInt1Ty(LHS->getContext());
4652   if (auto *LHSTy = dyn_cast<VectorType>(LHS->getType()))
4653     OverflowTy = VectorType::get(OverflowTy, LHSTy->getElementCount());
4654 
4655   if (isNeutralValue(BinaryOp, RHS)) {
4656     Result = LHS;
4657     Overflow = ConstantInt::getFalse(OverflowTy);
4658     return true;
4659   }
4660 
4661   switch (computeOverflow(BinaryOp, IsSigned, LHS, RHS, &OrigI)) {
4662     case OverflowResult::MayOverflow:
4663       return false;
4664     case OverflowResult::AlwaysOverflowsLow:
4665     case OverflowResult::AlwaysOverflowsHigh:
4666       Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
4667       Result->takeName(&OrigI);
4668       Overflow = ConstantInt::getTrue(OverflowTy);
4669       return true;
4670     case OverflowResult::NeverOverflows:
4671       Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
4672       Result->takeName(&OrigI);
4673       Overflow = ConstantInt::getFalse(OverflowTy);
4674       if (auto *Inst = dyn_cast<Instruction>(Result)) {
4675         if (IsSigned)
4676           Inst->setHasNoSignedWrap();
4677         else
4678           Inst->setHasNoUnsignedWrap();
4679       }
4680       return true;
4681   }
4682 
4683   llvm_unreachable("Unexpected overflow result");
4684 }
4685 
4686 /// Recognize and process idiom involving test for multiplication
4687 /// overflow.
4688 ///
4689 /// The caller has matched a pattern of the form:
4690 ///   I = cmp u (mul(zext A, zext B), V
4691 /// The function checks if this is a test for overflow and if so replaces
4692 /// multiplication with call to 'mul.with.overflow' intrinsic.
4693 ///
4694 /// \param I Compare instruction.
4695 /// \param MulVal Result of 'mult' instruction.  It is one of the arguments of
4696 ///               the compare instruction.  Must be of integer type.
4697 /// \param OtherVal The other argument of compare instruction.
4698 /// \returns Instruction which must replace the compare instruction, NULL if no
4699 ///          replacement required.
4700 static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
4701                                          Value *OtherVal,
4702                                          InstCombinerImpl &IC) {
4703   // Don't bother doing this transformation for pointers, don't do it for
4704   // vectors.
4705   if (!isa<IntegerType>(MulVal->getType()))
4706     return nullptr;
4707 
4708   assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal);
4709   assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal);
4710   auto *MulInstr = dyn_cast<Instruction>(MulVal);
4711   if (!MulInstr)
4712     return nullptr;
4713   assert(MulInstr->getOpcode() == Instruction::Mul);
4714 
4715   auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)),
4716        *RHS = cast<ZExtOperator>(MulInstr->getOperand(1));
4717   assert(LHS->getOpcode() == Instruction::ZExt);
4718   assert(RHS->getOpcode() == Instruction::ZExt);
4719   Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
4720 
4721   // Calculate type and width of the result produced by mul.with.overflow.
4722   Type *TyA = A->getType(), *TyB = B->getType();
4723   unsigned WidthA = TyA->getPrimitiveSizeInBits(),
4724            WidthB = TyB->getPrimitiveSizeInBits();
4725   unsigned MulWidth;
4726   Type *MulType;
4727   if (WidthB > WidthA) {
4728     MulWidth = WidthB;
4729     MulType = TyB;
4730   } else {
4731     MulWidth = WidthA;
4732     MulType = TyA;
4733   }
4734 
4735   // In order to replace the original mul with a narrower mul.with.overflow,
4736   // all uses must ignore upper bits of the product.  The number of used low
4737   // bits must be not greater than the width of mul.with.overflow.
4738   if (MulVal->hasNUsesOrMore(2))
4739     for (User *U : MulVal->users()) {
4740       if (U == &I)
4741         continue;
4742       if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
4743         // Check if truncation ignores bits above MulWidth.
4744         unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
4745         if (TruncWidth > MulWidth)
4746           return nullptr;
4747       } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
4748         // Check if AND ignores bits above MulWidth.
4749         if (BO->getOpcode() != Instruction::And)
4750           return nullptr;
4751         if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
4752           const APInt &CVal = CI->getValue();
4753           if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth)
4754             return nullptr;
4755         } else {
4756           // In this case we could have the operand of the binary operation
4757           // being defined in another block, and performing the replacement
4758           // could break the dominance relation.
4759           return nullptr;
4760         }
4761       } else {
4762         // Other uses prohibit this transformation.
4763         return nullptr;
4764       }
4765     }
4766 
4767   // Recognize patterns
4768   switch (I.getPredicate()) {
4769   case ICmpInst::ICMP_EQ:
4770   case ICmpInst::ICMP_NE:
4771     // Recognize pattern:
4772     //   mulval = mul(zext A, zext B)
4773     //   cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits.
4774     ConstantInt *CI;
4775     Value *ValToMask;
4776     if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) {
4777       if (ValToMask != MulVal)
4778         return nullptr;
4779       const APInt &CVal = CI->getValue() + 1;
4780       if (CVal.isPowerOf2()) {
4781         unsigned MaskWidth = CVal.logBase2();
4782         if (MaskWidth == MulWidth)
4783           break; // Recognized
4784       }
4785     }
4786     return nullptr;
4787 
4788   case ICmpInst::ICMP_UGT:
4789     // Recognize pattern:
4790     //   mulval = mul(zext A, zext B)
4791     //   cmp ugt mulval, max
4792     if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4793       APInt MaxVal = APInt::getMaxValue(MulWidth);
4794       MaxVal = MaxVal.zext(CI->getBitWidth());
4795       if (MaxVal.eq(CI->getValue()))
4796         break; // Recognized
4797     }
4798     return nullptr;
4799 
4800   case ICmpInst::ICMP_UGE:
4801     // Recognize pattern:
4802     //   mulval = mul(zext A, zext B)
4803     //   cmp uge mulval, max+1
4804     if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4805       APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
4806       if (MaxVal.eq(CI->getValue()))
4807         break; // Recognized
4808     }
4809     return nullptr;
4810 
4811   case ICmpInst::ICMP_ULE:
4812     // Recognize pattern:
4813     //   mulval = mul(zext A, zext B)
4814     //   cmp ule mulval, max
4815     if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4816       APInt MaxVal = APInt::getMaxValue(MulWidth);
4817       MaxVal = MaxVal.zext(CI->getBitWidth());
4818       if (MaxVal.eq(CI->getValue()))
4819         break; // Recognized
4820     }
4821     return nullptr;
4822 
4823   case ICmpInst::ICMP_ULT:
4824     // Recognize pattern:
4825     //   mulval = mul(zext A, zext B)
4826     //   cmp ule mulval, max + 1
4827     if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4828       APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
4829       if (MaxVal.eq(CI->getValue()))
4830         break; // Recognized
4831     }
4832     return nullptr;
4833 
4834   default:
4835     return nullptr;
4836   }
4837 
4838   InstCombiner::BuilderTy &Builder = IC.Builder;
4839   Builder.SetInsertPoint(MulInstr);
4840 
4841   // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
4842   Value *MulA = A, *MulB = B;
4843   if (WidthA < MulWidth)
4844     MulA = Builder.CreateZExt(A, MulType);
4845   if (WidthB < MulWidth)
4846     MulB = Builder.CreateZExt(B, MulType);
4847   Function *F = Intrinsic::getDeclaration(
4848       I.getModule(), Intrinsic::umul_with_overflow, MulType);
4849   CallInst *Call = Builder.CreateCall(F, {MulA, MulB}, "umul");
4850   IC.addToWorklist(MulInstr);
4851 
4852   // If there are uses of mul result other than the comparison, we know that
4853   // they are truncation or binary AND. Change them to use result of
4854   // mul.with.overflow and adjust properly mask/size.
4855   if (MulVal->hasNUsesOrMore(2)) {
4856     Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value");
4857     for (User *U : make_early_inc_range(MulVal->users())) {
4858       if (U == &I || U == OtherVal)
4859         continue;
4860       if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
4861         if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
4862           IC.replaceInstUsesWith(*TI, Mul);
4863         else
4864           TI->setOperand(0, Mul);
4865       } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
4866         assert(BO->getOpcode() == Instruction::And);
4867         // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
4868         ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
4869         APInt ShortMask = CI->getValue().trunc(MulWidth);
4870         Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask);
4871         Value *Zext = Builder.CreateZExt(ShortAnd, BO->getType());
4872         IC.replaceInstUsesWith(*BO, Zext);
4873       } else {
4874         llvm_unreachable("Unexpected Binary operation");
4875       }
4876       IC.addToWorklist(cast<Instruction>(U));
4877     }
4878   }
4879   if (isa<Instruction>(OtherVal))
4880     IC.addToWorklist(cast<Instruction>(OtherVal));
4881 
4882   // The original icmp gets replaced with the overflow value, maybe inverted
4883   // depending on predicate.
4884   bool Inverse = false;
4885   switch (I.getPredicate()) {
4886   case ICmpInst::ICMP_NE:
4887     break;
4888   case ICmpInst::ICMP_EQ:
4889     Inverse = true;
4890     break;
4891   case ICmpInst::ICMP_UGT:
4892   case ICmpInst::ICMP_UGE:
4893     if (I.getOperand(0) == MulVal)
4894       break;
4895     Inverse = true;
4896     break;
4897   case ICmpInst::ICMP_ULT:
4898   case ICmpInst::ICMP_ULE:
4899     if (I.getOperand(1) == MulVal)
4900       break;
4901     Inverse = true;
4902     break;
4903   default:
4904     llvm_unreachable("Unexpected predicate");
4905   }
4906   if (Inverse) {
4907     Value *Res = Builder.CreateExtractValue(Call, 1);
4908     return BinaryOperator::CreateNot(Res);
4909   }
4910 
4911   return ExtractValueInst::Create(Call, 1);
4912 }
4913 
4914 /// When performing a comparison against a constant, it is possible that not all
4915 /// the bits in the LHS are demanded. This helper method computes the mask that
4916 /// IS demanded.
4917 static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth) {
4918   const APInt *RHS;
4919   if (!match(I.getOperand(1), m_APInt(RHS)))
4920     return APInt::getAllOnesValue(BitWidth);
4921 
4922   // If this is a normal comparison, it demands all bits. If it is a sign bit
4923   // comparison, it only demands the sign bit.
4924   bool UnusedBit;
4925   if (InstCombiner::isSignBitCheck(I.getPredicate(), *RHS, UnusedBit))
4926     return APInt::getSignMask(BitWidth);
4927 
4928   switch (I.getPredicate()) {
4929   // For a UGT comparison, we don't care about any bits that
4930   // correspond to the trailing ones of the comparand.  The value of these
4931   // bits doesn't impact the outcome of the comparison, because any value
4932   // greater than the RHS must differ in a bit higher than these due to carry.
4933   case ICmpInst::ICMP_UGT:
4934     return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingOnes());
4935 
4936   // Similarly, for a ULT comparison, we don't care about the trailing zeros.
4937   // Any value less than the RHS must differ in a higher bit because of carries.
4938   case ICmpInst::ICMP_ULT:
4939     return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingZeros());
4940 
4941   default:
4942     return APInt::getAllOnesValue(BitWidth);
4943   }
4944 }
4945 
4946 /// Check if the order of \p Op0 and \p Op1 as operands in an ICmpInst
4947 /// should be swapped.
4948 /// The decision is based on how many times these two operands are reused
4949 /// as subtract operands and their positions in those instructions.
4950 /// The rationale is that several architectures use the same instruction for
4951 /// both subtract and cmp. Thus, it is better if the order of those operands
4952 /// match.
4953 /// \return true if Op0 and Op1 should be swapped.
4954 static bool swapMayExposeCSEOpportunities(const Value *Op0, const Value *Op1) {
4955   // Filter out pointer values as those cannot appear directly in subtract.
4956   // FIXME: we may want to go through inttoptrs or bitcasts.
4957   if (Op0->getType()->isPointerTy())
4958     return false;
4959   // If a subtract already has the same operands as a compare, swapping would be
4960   // bad. If a subtract has the same operands as a compare but in reverse order,
4961   // then swapping is good.
4962   int GoodToSwap = 0;
4963   for (const User *U : Op0->users()) {
4964     if (match(U, m_Sub(m_Specific(Op1), m_Specific(Op0))))
4965       GoodToSwap++;
4966     else if (match(U, m_Sub(m_Specific(Op0), m_Specific(Op1))))
4967       GoodToSwap--;
4968   }
4969   return GoodToSwap > 0;
4970 }
4971 
4972 /// Check that one use is in the same block as the definition and all
4973 /// other uses are in blocks dominated by a given block.
4974 ///
4975 /// \param DI Definition
4976 /// \param UI Use
4977 /// \param DB Block that must dominate all uses of \p DI outside
4978 ///           the parent block
4979 /// \return true when \p UI is the only use of \p DI in the parent block
4980 /// and all other uses of \p DI are in blocks dominated by \p DB.
4981 ///
4982 bool InstCombinerImpl::dominatesAllUses(const Instruction *DI,
4983                                         const Instruction *UI,
4984                                         const BasicBlock *DB) const {
4985   assert(DI && UI && "Instruction not defined\n");
4986   // Ignore incomplete definitions.
4987   if (!DI->getParent())
4988     return false;
4989   // DI and UI must be in the same block.
4990   if (DI->getParent() != UI->getParent())
4991     return false;
4992   // Protect from self-referencing blocks.
4993   if (DI->getParent() == DB)
4994     return false;
4995   for (const User *U : DI->users()) {
4996     auto *Usr = cast<Instruction>(U);
4997     if (Usr != UI && !DT.dominates(DB, Usr->getParent()))
4998       return false;
4999   }
5000   return true;
5001 }
5002 
5003 /// Return true when the instruction sequence within a block is select-cmp-br.
5004 static bool isChainSelectCmpBranch(const SelectInst *SI) {
5005   const BasicBlock *BB = SI->getParent();
5006   if (!BB)
5007     return false;
5008   auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator());
5009   if (!BI || BI->getNumSuccessors() != 2)
5010     return false;
5011   auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
5012   if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
5013     return false;
5014   return true;
5015 }
5016 
5017 /// True when a select result is replaced by one of its operands
5018 /// in select-icmp sequence. This will eventually result in the elimination
5019 /// of the select.
5020 ///
5021 /// \param SI    Select instruction
5022 /// \param Icmp  Compare instruction
5023 /// \param SIOpd Operand that replaces the select
5024 ///
5025 /// Notes:
5026 /// - The replacement is global and requires dominator information
5027 /// - The caller is responsible for the actual replacement
5028 ///
5029 /// Example:
5030 ///
5031 /// entry:
5032 ///  %4 = select i1 %3, %C* %0, %C* null
5033 ///  %5 = icmp eq %C* %4, null
5034 ///  br i1 %5, label %9, label %7
5035 ///  ...
5036 ///  ; <label>:7                                       ; preds = %entry
5037 ///  %8 = getelementptr inbounds %C* %4, i64 0, i32 0
5038 ///  ...
5039 ///
5040 /// can be transformed to
5041 ///
5042 ///  %5 = icmp eq %C* %0, null
5043 ///  %6 = select i1 %3, i1 %5, i1 true
5044 ///  br i1 %6, label %9, label %7
5045 ///  ...
5046 ///  ; <label>:7                                       ; preds = %entry
5047 ///  %8 = getelementptr inbounds %C* %0, i64 0, i32 0  // replace by %0!
5048 ///
5049 /// Similar when the first operand of the select is a constant or/and
5050 /// the compare is for not equal rather than equal.
5051 ///
5052 /// NOTE: The function is only called when the select and compare constants
5053 /// are equal, the optimization can work only for EQ predicates. This is not a
5054 /// major restriction since a NE compare should be 'normalized' to an equal
5055 /// compare, which usually happens in the combiner and test case
5056 /// select-cmp-br.ll checks for it.
5057 bool InstCombinerImpl::replacedSelectWithOperand(SelectInst *SI,
5058                                                  const ICmpInst *Icmp,
5059                                                  const unsigned SIOpd) {
5060   assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!");
5061   if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) {
5062     BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
5063     // The check for the single predecessor is not the best that can be
5064     // done. But it protects efficiently against cases like when SI's
5065     // home block has two successors, Succ and Succ1, and Succ1 predecessor
5066     // of Succ. Then SI can't be replaced by SIOpd because the use that gets
5067     // replaced can be reached on either path. So the uniqueness check
5068     // guarantees that the path all uses of SI (outside SI's parent) are on
5069     // is disjoint from all other paths out of SI. But that information
5070     // is more expensive to compute, and the trade-off here is in favor
5071     // of compile-time. It should also be noticed that we check for a single
5072     // predecessor and not only uniqueness. This to handle the situation when
5073     // Succ and Succ1 points to the same basic block.
5074     if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) {
5075       NumSel++;
5076       SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
5077       return true;
5078     }
5079   }
5080   return false;
5081 }
5082 
5083 /// Try to fold the comparison based on range information we can get by checking
5084 /// whether bits are known to be zero or one in the inputs.
5085 Instruction *InstCombinerImpl::foldICmpUsingKnownBits(ICmpInst &I) {
5086   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5087   Type *Ty = Op0->getType();
5088   ICmpInst::Predicate Pred = I.getPredicate();
5089 
5090   // Get scalar or pointer size.
5091   unsigned BitWidth = Ty->isIntOrIntVectorTy()
5092                           ? Ty->getScalarSizeInBits()
5093                           : DL.getPointerTypeSizeInBits(Ty->getScalarType());
5094 
5095   if (!BitWidth)
5096     return nullptr;
5097 
5098   KnownBits Op0Known(BitWidth);
5099   KnownBits Op1Known(BitWidth);
5100 
5101   if (SimplifyDemandedBits(&I, 0,
5102                            getDemandedBitsLHSMask(I, BitWidth),
5103                            Op0Known, 0))
5104     return &I;
5105 
5106   if (SimplifyDemandedBits(&I, 1, APInt::getAllOnesValue(BitWidth),
5107                            Op1Known, 0))
5108     return &I;
5109 
5110   // Given the known and unknown bits, compute a range that the LHS could be
5111   // in.  Compute the Min, Max and RHS values based on the known bits. For the
5112   // EQ and NE we use unsigned values.
5113   APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
5114   APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
5115   if (I.isSigned()) {
5116     Op0Min = Op0Known.getSignedMinValue();
5117     Op0Max = Op0Known.getSignedMaxValue();
5118     Op1Min = Op1Known.getSignedMinValue();
5119     Op1Max = Op1Known.getSignedMaxValue();
5120   } else {
5121     Op0Min = Op0Known.getMinValue();
5122     Op0Max = Op0Known.getMaxValue();
5123     Op1Min = Op1Known.getMinValue();
5124     Op1Max = Op1Known.getMaxValue();
5125   }
5126 
5127   // If Min and Max are known to be the same, then SimplifyDemandedBits figured
5128   // out that the LHS or RHS is a constant. Constant fold this now, so that
5129   // code below can assume that Min != Max.
5130   if (!isa<Constant>(Op0) && Op0Min == Op0Max)
5131     return new ICmpInst(Pred, ConstantExpr::getIntegerValue(Ty, Op0Min), Op1);
5132   if (!isa<Constant>(Op1) && Op1Min == Op1Max)
5133     return new ICmpInst(Pred, Op0, ConstantExpr::getIntegerValue(Ty, Op1Min));
5134 
5135   // Based on the range information we know about the LHS, see if we can
5136   // simplify this comparison.  For example, (x&4) < 8 is always true.
5137   switch (Pred) {
5138   default:
5139     llvm_unreachable("Unknown icmp opcode!");
5140   case ICmpInst::ICMP_EQ:
5141   case ICmpInst::ICMP_NE: {
5142     if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
5143       return replaceInstUsesWith(
5144           I, ConstantInt::getBool(I.getType(), Pred == CmpInst::ICMP_NE));
5145 
5146     // If all bits are known zero except for one, then we know at most one bit
5147     // is set. If the comparison is against zero, then this is a check to see if
5148     // *that* bit is set.
5149     APInt Op0KnownZeroInverted = ~Op0Known.Zero;
5150     if (Op1Known.isZero()) {
5151       // If the LHS is an AND with the same constant, look through it.
5152       Value *LHS = nullptr;
5153       const APInt *LHSC;
5154       if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) ||
5155           *LHSC != Op0KnownZeroInverted)
5156         LHS = Op0;
5157 
5158       Value *X;
5159       if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
5160         APInt ValToCheck = Op0KnownZeroInverted;
5161         Type *XTy = X->getType();
5162         if (ValToCheck.isPowerOf2()) {
5163           // ((1 << X) & 8) == 0 -> X != 3
5164           // ((1 << X) & 8) != 0 -> X == 3
5165           auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
5166           auto NewPred = ICmpInst::getInversePredicate(Pred);
5167           return new ICmpInst(NewPred, X, CmpC);
5168         } else if ((++ValToCheck).isPowerOf2()) {
5169           // ((1 << X) & 7) == 0 -> X >= 3
5170           // ((1 << X) & 7) != 0 -> X  < 3
5171           auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
5172           auto NewPred =
5173               Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT;
5174           return new ICmpInst(NewPred, X, CmpC);
5175         }
5176       }
5177 
5178       // Check if the LHS is 8 >>u x and the result is a power of 2 like 1.
5179       const APInt *CI;
5180       if (Op0KnownZeroInverted.isOneValue() &&
5181           match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) {
5182         // ((8 >>u X) & 1) == 0 -> X != 3
5183         // ((8 >>u X) & 1) != 0 -> X == 3
5184         unsigned CmpVal = CI->countTrailingZeros();
5185         auto NewPred = ICmpInst::getInversePredicate(Pred);
5186         return new ICmpInst(NewPred, X, ConstantInt::get(X->getType(), CmpVal));
5187       }
5188     }
5189     break;
5190   }
5191   case ICmpInst::ICMP_ULT: {
5192     if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
5193       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5194     if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
5195       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5196     if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
5197       return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5198 
5199     const APInt *CmpC;
5200     if (match(Op1, m_APInt(CmpC))) {
5201       // A <u C -> A == C-1 if min(A)+1 == C
5202       if (*CmpC == Op0Min + 1)
5203         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5204                             ConstantInt::get(Op1->getType(), *CmpC - 1));
5205       // X <u C --> X == 0, if the number of zero bits in the bottom of X
5206       // exceeds the log2 of C.
5207       if (Op0Known.countMinTrailingZeros() >= CmpC->ceilLogBase2())
5208         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5209                             Constant::getNullValue(Op1->getType()));
5210     }
5211     break;
5212   }
5213   case ICmpInst::ICMP_UGT: {
5214     if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
5215       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5216     if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
5217       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5218     if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
5219       return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5220 
5221     const APInt *CmpC;
5222     if (match(Op1, m_APInt(CmpC))) {
5223       // A >u C -> A == C+1 if max(a)-1 == C
5224       if (*CmpC == Op0Max - 1)
5225         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5226                             ConstantInt::get(Op1->getType(), *CmpC + 1));
5227       // X >u C --> X != 0, if the number of zero bits in the bottom of X
5228       // exceeds the log2 of C.
5229       if (Op0Known.countMinTrailingZeros() >= CmpC->getActiveBits())
5230         return new ICmpInst(ICmpInst::ICMP_NE, Op0,
5231                             Constant::getNullValue(Op1->getType()));
5232     }
5233     break;
5234   }
5235   case ICmpInst::ICMP_SLT: {
5236     if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
5237       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5238     if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
5239       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5240     if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
5241       return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5242     const APInt *CmpC;
5243     if (match(Op1, m_APInt(CmpC))) {
5244       if (*CmpC == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C
5245         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5246                             ConstantInt::get(Op1->getType(), *CmpC - 1));
5247     }
5248     break;
5249   }
5250   case ICmpInst::ICMP_SGT: {
5251     if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
5252       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5253     if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
5254       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5255     if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
5256       return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5257     const APInt *CmpC;
5258     if (match(Op1, m_APInt(CmpC))) {
5259       if (*CmpC == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C
5260         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5261                             ConstantInt::get(Op1->getType(), *CmpC + 1));
5262     }
5263     break;
5264   }
5265   case ICmpInst::ICMP_SGE:
5266     assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
5267     if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
5268       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5269     if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
5270       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5271     if (Op1Min == Op0Max) // A >=s B -> A == B if max(A) == min(B)
5272       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5273     break;
5274   case ICmpInst::ICMP_SLE:
5275     assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
5276     if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
5277       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5278     if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
5279       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5280     if (Op1Max == Op0Min) // A <=s B -> A == B if min(A) == max(B)
5281       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5282     break;
5283   case ICmpInst::ICMP_UGE:
5284     assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
5285     if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
5286       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5287     if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
5288       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5289     if (Op1Min == Op0Max) // A >=u B -> A == B if max(A) == min(B)
5290       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5291     break;
5292   case ICmpInst::ICMP_ULE:
5293     assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
5294     if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
5295       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5296     if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
5297       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5298     if (Op1Max == Op0Min) // A <=u B -> A == B if min(A) == max(B)
5299       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5300     break;
5301   }
5302 
5303   // Turn a signed comparison into an unsigned one if both operands are known to
5304   // have the same sign.
5305   if (I.isSigned() &&
5306       ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) ||
5307        (Op0Known.One.isNegative() && Op1Known.One.isNegative())))
5308     return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
5309 
5310   return nullptr;
5311 }
5312 
5313 llvm::Optional<std::pair<CmpInst::Predicate, Constant *>>
5314 InstCombiner::getFlippedStrictnessPredicateAndConstant(CmpInst::Predicate Pred,
5315                                                        Constant *C) {
5316   assert(ICmpInst::isRelational(Pred) && ICmpInst::isIntPredicate(Pred) &&
5317          "Only for relational integer predicates.");
5318 
5319   Type *Type = C->getType();
5320   bool IsSigned = ICmpInst::isSigned(Pred);
5321 
5322   CmpInst::Predicate UnsignedPred = ICmpInst::getUnsignedPredicate(Pred);
5323   bool WillIncrement =
5324       UnsignedPred == ICmpInst::ICMP_ULE || UnsignedPred == ICmpInst::ICMP_UGT;
5325 
5326   // Check if the constant operand can be safely incremented/decremented
5327   // without overflowing/underflowing.
5328   auto ConstantIsOk = [WillIncrement, IsSigned](ConstantInt *C) {
5329     return WillIncrement ? !C->isMaxValue(IsSigned) : !C->isMinValue(IsSigned);
5330   };
5331 
5332   Constant *SafeReplacementConstant = nullptr;
5333   if (auto *CI = dyn_cast<ConstantInt>(C)) {
5334     // Bail out if the constant can't be safely incremented/decremented.
5335     if (!ConstantIsOk(CI))
5336       return llvm::None;
5337   } else if (auto *FVTy = dyn_cast<FixedVectorType>(Type)) {
5338     unsigned NumElts = FVTy->getNumElements();
5339     for (unsigned i = 0; i != NumElts; ++i) {
5340       Constant *Elt = C->getAggregateElement(i);
5341       if (!Elt)
5342         return llvm::None;
5343 
5344       if (isa<UndefValue>(Elt))
5345         continue;
5346 
5347       // Bail out if we can't determine if this constant is min/max or if we
5348       // know that this constant is min/max.
5349       auto *CI = dyn_cast<ConstantInt>(Elt);
5350       if (!CI || !ConstantIsOk(CI))
5351         return llvm::None;
5352 
5353       if (!SafeReplacementConstant)
5354         SafeReplacementConstant = CI;
5355     }
5356   } else {
5357     // ConstantExpr?
5358     return llvm::None;
5359   }
5360 
5361   // It may not be safe to change a compare predicate in the presence of
5362   // undefined elements, so replace those elements with the first safe constant
5363   // that we found.
5364   // TODO: in case of poison, it is safe; let's replace undefs only.
5365   if (C->containsUndefOrPoisonElement()) {
5366     assert(SafeReplacementConstant && "Replacement constant not set");
5367     C = Constant::replaceUndefsWith(C, SafeReplacementConstant);
5368   }
5369 
5370   CmpInst::Predicate NewPred = CmpInst::getFlippedStrictnessPredicate(Pred);
5371 
5372   // Increment or decrement the constant.
5373   Constant *OneOrNegOne = ConstantInt::get(Type, WillIncrement ? 1 : -1, true);
5374   Constant *NewC = ConstantExpr::getAdd(C, OneOrNegOne);
5375 
5376   return std::make_pair(NewPred, NewC);
5377 }
5378 
5379 /// If we have an icmp le or icmp ge instruction with a constant operand, turn
5380 /// it into the appropriate icmp lt or icmp gt instruction. This transform
5381 /// allows them to be folded in visitICmpInst.
5382 static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) {
5383   ICmpInst::Predicate Pred = I.getPredicate();
5384   if (ICmpInst::isEquality(Pred) || !ICmpInst::isIntPredicate(Pred) ||
5385       InstCombiner::isCanonicalPredicate(Pred))
5386     return nullptr;
5387 
5388   Value *Op0 = I.getOperand(0);
5389   Value *Op1 = I.getOperand(1);
5390   auto *Op1C = dyn_cast<Constant>(Op1);
5391   if (!Op1C)
5392     return nullptr;
5393 
5394   auto FlippedStrictness =
5395       InstCombiner::getFlippedStrictnessPredicateAndConstant(Pred, Op1C);
5396   if (!FlippedStrictness)
5397     return nullptr;
5398 
5399   return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second);
5400 }
5401 
5402 /// If we have a comparison with a non-canonical predicate, if we can update
5403 /// all the users, invert the predicate and adjust all the users.
5404 CmpInst *InstCombinerImpl::canonicalizeICmpPredicate(CmpInst &I) {
5405   // Is the predicate already canonical?
5406   CmpInst::Predicate Pred = I.getPredicate();
5407   if (InstCombiner::isCanonicalPredicate(Pred))
5408     return nullptr;
5409 
5410   // Can all users be adjusted to predicate inversion?
5411   if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr))
5412     return nullptr;
5413 
5414   // Ok, we can canonicalize comparison!
5415   // Let's first invert the comparison's predicate.
5416   I.setPredicate(CmpInst::getInversePredicate(Pred));
5417   I.setName(I.getName() + ".not");
5418 
5419   // And, adapt users.
5420   freelyInvertAllUsersOf(&I);
5421 
5422   return &I;
5423 }
5424 
5425 /// Integer compare with boolean values can always be turned into bitwise ops.
5426 static Instruction *canonicalizeICmpBool(ICmpInst &I,
5427                                          InstCombiner::BuilderTy &Builder) {
5428   Value *A = I.getOperand(0), *B = I.getOperand(1);
5429   assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only");
5430 
5431   // A boolean compared to true/false can be simplified to Op0/true/false in
5432   // 14 out of the 20 (10 predicates * 2 constants) possible combinations.
5433   // Cases not handled by InstSimplify are always 'not' of Op0.
5434   if (match(B, m_Zero())) {
5435     switch (I.getPredicate()) {
5436       case CmpInst::ICMP_EQ:  // A ==   0 -> !A
5437       case CmpInst::ICMP_ULE: // A <=u  0 -> !A
5438       case CmpInst::ICMP_SGE: // A >=s  0 -> !A
5439         return BinaryOperator::CreateNot(A);
5440       default:
5441         llvm_unreachable("ICmp i1 X, C not simplified as expected.");
5442     }
5443   } else if (match(B, m_One())) {
5444     switch (I.getPredicate()) {
5445       case CmpInst::ICMP_NE:  // A !=  1 -> !A
5446       case CmpInst::ICMP_ULT: // A <u  1 -> !A
5447       case CmpInst::ICMP_SGT: // A >s -1 -> !A
5448         return BinaryOperator::CreateNot(A);
5449       default:
5450         llvm_unreachable("ICmp i1 X, C not simplified as expected.");
5451     }
5452   }
5453 
5454   switch (I.getPredicate()) {
5455   default:
5456     llvm_unreachable("Invalid icmp instruction!");
5457   case ICmpInst::ICMP_EQ:
5458     // icmp eq i1 A, B -> ~(A ^ B)
5459     return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
5460 
5461   case ICmpInst::ICMP_NE:
5462     // icmp ne i1 A, B -> A ^ B
5463     return BinaryOperator::CreateXor(A, B);
5464 
5465   case ICmpInst::ICMP_UGT:
5466     // icmp ugt -> icmp ult
5467     std::swap(A, B);
5468     LLVM_FALLTHROUGH;
5469   case ICmpInst::ICMP_ULT:
5470     // icmp ult i1 A, B -> ~A & B
5471     return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
5472 
5473   case ICmpInst::ICMP_SGT:
5474     // icmp sgt -> icmp slt
5475     std::swap(A, B);
5476     LLVM_FALLTHROUGH;
5477   case ICmpInst::ICMP_SLT:
5478     // icmp slt i1 A, B -> A & ~B
5479     return BinaryOperator::CreateAnd(Builder.CreateNot(B), A);
5480 
5481   case ICmpInst::ICMP_UGE:
5482     // icmp uge -> icmp ule
5483     std::swap(A, B);
5484     LLVM_FALLTHROUGH;
5485   case ICmpInst::ICMP_ULE:
5486     // icmp ule i1 A, B -> ~A | B
5487     return BinaryOperator::CreateOr(Builder.CreateNot(A), B);
5488 
5489   case ICmpInst::ICMP_SGE:
5490     // icmp sge -> icmp sle
5491     std::swap(A, B);
5492     LLVM_FALLTHROUGH;
5493   case ICmpInst::ICMP_SLE:
5494     // icmp sle i1 A, B -> A | ~B
5495     return BinaryOperator::CreateOr(Builder.CreateNot(B), A);
5496   }
5497 }
5498 
5499 // Transform pattern like:
5500 //   (1 << Y) u<= X  or  ~(-1 << Y) u<  X  or  ((1 << Y)+(-1)) u<  X
5501 //   (1 << Y) u>  X  or  ~(-1 << Y) u>= X  or  ((1 << Y)+(-1)) u>= X
5502 // Into:
5503 //   (X l>> Y) != 0
5504 //   (X l>> Y) == 0
5505 static Instruction *foldICmpWithHighBitMask(ICmpInst &Cmp,
5506                                             InstCombiner::BuilderTy &Builder) {
5507   ICmpInst::Predicate Pred, NewPred;
5508   Value *X, *Y;
5509   if (match(&Cmp,
5510             m_c_ICmp(Pred, m_OneUse(m_Shl(m_One(), m_Value(Y))), m_Value(X)))) {
5511     switch (Pred) {
5512     case ICmpInst::ICMP_ULE:
5513       NewPred = ICmpInst::ICMP_NE;
5514       break;
5515     case ICmpInst::ICMP_UGT:
5516       NewPred = ICmpInst::ICMP_EQ;
5517       break;
5518     default:
5519       return nullptr;
5520     }
5521   } else if (match(&Cmp, m_c_ICmp(Pred,
5522                                   m_OneUse(m_CombineOr(
5523                                       m_Not(m_Shl(m_AllOnes(), m_Value(Y))),
5524                                       m_Add(m_Shl(m_One(), m_Value(Y)),
5525                                             m_AllOnes()))),
5526                                   m_Value(X)))) {
5527     // The variant with 'add' is not canonical, (the variant with 'not' is)
5528     // we only get it because it has extra uses, and can't be canonicalized,
5529 
5530     switch (Pred) {
5531     case ICmpInst::ICMP_ULT:
5532       NewPred = ICmpInst::ICMP_NE;
5533       break;
5534     case ICmpInst::ICMP_UGE:
5535       NewPred = ICmpInst::ICMP_EQ;
5536       break;
5537     default:
5538       return nullptr;
5539     }
5540   } else
5541     return nullptr;
5542 
5543   Value *NewX = Builder.CreateLShr(X, Y, X->getName() + ".highbits");
5544   Constant *Zero = Constant::getNullValue(NewX->getType());
5545   return CmpInst::Create(Instruction::ICmp, NewPred, NewX, Zero);
5546 }
5547 
5548 static Instruction *foldVectorCmp(CmpInst &Cmp,
5549                                   InstCombiner::BuilderTy &Builder) {
5550   const CmpInst::Predicate Pred = Cmp.getPredicate();
5551   Value *LHS = Cmp.getOperand(0), *RHS = Cmp.getOperand(1);
5552   Value *V1, *V2;
5553   ArrayRef<int> M;
5554   if (!match(LHS, m_Shuffle(m_Value(V1), m_Undef(), m_Mask(M))))
5555     return nullptr;
5556 
5557   // If both arguments of the cmp are shuffles that use the same mask and
5558   // shuffle within a single vector, move the shuffle after the cmp:
5559   // cmp (shuffle V1, M), (shuffle V2, M) --> shuffle (cmp V1, V2), M
5560   Type *V1Ty = V1->getType();
5561   if (match(RHS, m_Shuffle(m_Value(V2), m_Undef(), m_SpecificMask(M))) &&
5562       V1Ty == V2->getType() && (LHS->hasOneUse() || RHS->hasOneUse())) {
5563     Value *NewCmp = Builder.CreateCmp(Pred, V1, V2);
5564     return new ShuffleVectorInst(NewCmp, UndefValue::get(NewCmp->getType()), M);
5565   }
5566 
5567   // Try to canonicalize compare with splatted operand and splat constant.
5568   // TODO: We could generalize this for more than splats. See/use the code in
5569   //       InstCombiner::foldVectorBinop().
5570   Constant *C;
5571   if (!LHS->hasOneUse() || !match(RHS, m_Constant(C)))
5572     return nullptr;
5573 
5574   // Length-changing splats are ok, so adjust the constants as needed:
5575   // cmp (shuffle V1, M), C --> shuffle (cmp V1, C'), M
5576   Constant *ScalarC = C->getSplatValue(/* AllowUndefs */ true);
5577   int MaskSplatIndex;
5578   if (ScalarC && match(M, m_SplatOrUndefMask(MaskSplatIndex))) {
5579     // We allow undefs in matching, but this transform removes those for safety.
5580     // Demanded elements analysis should be able to recover some/all of that.
5581     C = ConstantVector::getSplat(cast<VectorType>(V1Ty)->getElementCount(),
5582                                  ScalarC);
5583     SmallVector<int, 8> NewM(M.size(), MaskSplatIndex);
5584     Value *NewCmp = Builder.CreateCmp(Pred, V1, C);
5585     return new ShuffleVectorInst(NewCmp, UndefValue::get(NewCmp->getType()),
5586                                  NewM);
5587   }
5588 
5589   return nullptr;
5590 }
5591 
5592 // extract(uadd.with.overflow(A, B), 0) ult A
5593 //  -> extract(uadd.with.overflow(A, B), 1)
5594 static Instruction *foldICmpOfUAddOv(ICmpInst &I) {
5595   CmpInst::Predicate Pred = I.getPredicate();
5596   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5597 
5598   Value *UAddOv;
5599   Value *A, *B;
5600   auto UAddOvResultPat = m_ExtractValue<0>(
5601       m_Intrinsic<Intrinsic::uadd_with_overflow>(m_Value(A), m_Value(B)));
5602   if (match(Op0, UAddOvResultPat) &&
5603       ((Pred == ICmpInst::ICMP_ULT && (Op1 == A || Op1 == B)) ||
5604        (Pred == ICmpInst::ICMP_EQ && match(Op1, m_ZeroInt()) &&
5605         (match(A, m_One()) || match(B, m_One()))) ||
5606        (Pred == ICmpInst::ICMP_NE && match(Op1, m_AllOnes()) &&
5607         (match(A, m_AllOnes()) || match(B, m_AllOnes())))))
5608     // extract(uadd.with.overflow(A, B), 0) < A
5609     // extract(uadd.with.overflow(A, 1), 0) == 0
5610     // extract(uadd.with.overflow(A, -1), 0) != -1
5611     UAddOv = cast<ExtractValueInst>(Op0)->getAggregateOperand();
5612   else if (match(Op1, UAddOvResultPat) &&
5613            Pred == ICmpInst::ICMP_UGT && (Op0 == A || Op0 == B))
5614     // A > extract(uadd.with.overflow(A, B), 0)
5615     UAddOv = cast<ExtractValueInst>(Op1)->getAggregateOperand();
5616   else
5617     return nullptr;
5618 
5619   return ExtractValueInst::Create(UAddOv, 1);
5620 }
5621 
5622 Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
5623   bool Changed = false;
5624   const SimplifyQuery Q = SQ.getWithInstruction(&I);
5625   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5626   unsigned Op0Cplxity = getComplexity(Op0);
5627   unsigned Op1Cplxity = getComplexity(Op1);
5628 
5629   /// Orders the operands of the compare so that they are listed from most
5630   /// complex to least complex.  This puts constants before unary operators,
5631   /// before binary operators.
5632   if (Op0Cplxity < Op1Cplxity ||
5633       (Op0Cplxity == Op1Cplxity && swapMayExposeCSEOpportunities(Op0, Op1))) {
5634     I.swapOperands();
5635     std::swap(Op0, Op1);
5636     Changed = true;
5637   }
5638 
5639   if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, Q))
5640     return replaceInstUsesWith(I, V);
5641 
5642   // Comparing -val or val with non-zero is the same as just comparing val
5643   // ie, abs(val) != 0 -> val != 0
5644   if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) {
5645     Value *Cond, *SelectTrue, *SelectFalse;
5646     if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
5647                             m_Value(SelectFalse)))) {
5648       if (Value *V = dyn_castNegVal(SelectTrue)) {
5649         if (V == SelectFalse)
5650           return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
5651       }
5652       else if (Value *V = dyn_castNegVal(SelectFalse)) {
5653         if (V == SelectTrue)
5654           return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
5655       }
5656     }
5657   }
5658 
5659   if (Op0->getType()->isIntOrIntVectorTy(1))
5660     if (Instruction *Res = canonicalizeICmpBool(I, Builder))
5661       return Res;
5662 
5663   if (Instruction *Res = canonicalizeCmpWithConstant(I))
5664     return Res;
5665 
5666   if (Instruction *Res = canonicalizeICmpPredicate(I))
5667     return Res;
5668 
5669   if (Instruction *Res = foldICmpWithConstant(I))
5670     return Res;
5671 
5672   if (Instruction *Res = foldICmpWithDominatingICmp(I))
5673     return Res;
5674 
5675   if (Instruction *Res = foldICmpBinOp(I, Q))
5676     return Res;
5677 
5678   if (Instruction *Res = foldICmpUsingKnownBits(I))
5679     return Res;
5680 
5681   // Test if the ICmpInst instruction is used exclusively by a select as
5682   // part of a minimum or maximum operation. If so, refrain from doing
5683   // any other folding. This helps out other analyses which understand
5684   // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
5685   // and CodeGen. And in this case, at least one of the comparison
5686   // operands has at least one user besides the compare (the select),
5687   // which would often largely negate the benefit of folding anyway.
5688   //
5689   // Do the same for the other patterns recognized by matchSelectPattern.
5690   if (I.hasOneUse())
5691     if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
5692       Value *A, *B;
5693       SelectPatternResult SPR = matchSelectPattern(SI, A, B);
5694       if (SPR.Flavor != SPF_UNKNOWN)
5695         return nullptr;
5696     }
5697 
5698   // Do this after checking for min/max to prevent infinite looping.
5699   if (Instruction *Res = foldICmpWithZero(I))
5700     return Res;
5701 
5702   // FIXME: We only do this after checking for min/max to prevent infinite
5703   // looping caused by a reverse canonicalization of these patterns for min/max.
5704   // FIXME: The organization of folds is a mess. These would naturally go into
5705   // canonicalizeCmpWithConstant(), but we can't move all of the above folds
5706   // down here after the min/max restriction.
5707   ICmpInst::Predicate Pred = I.getPredicate();
5708   const APInt *C;
5709   if (match(Op1, m_APInt(C))) {
5710     // For i32: x >u 2147483647 -> x <s 0  -> true if sign bit set
5711     if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) {
5712       Constant *Zero = Constant::getNullValue(Op0->getType());
5713       return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero);
5714     }
5715 
5716     // For i32: x <u 2147483648 -> x >s -1  -> true if sign bit clear
5717     if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) {
5718       Constant *AllOnes = Constant::getAllOnesValue(Op0->getType());
5719       return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes);
5720     }
5721   }
5722 
5723   if (Instruction *Res = foldICmpInstWithConstant(I))
5724     return Res;
5725 
5726   // Try to match comparison as a sign bit test. Intentionally do this after
5727   // foldICmpInstWithConstant() to potentially let other folds to happen first.
5728   if (Instruction *New = foldSignBitTest(I))
5729     return New;
5730 
5731   if (Instruction *Res = foldICmpInstWithConstantNotInt(I))
5732     return Res;
5733 
5734   // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
5735   if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
5736     if (Instruction *NI = foldGEPICmp(GEP, Op1, I.getPredicate(), I))
5737       return NI;
5738   if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
5739     if (Instruction *NI = foldGEPICmp(GEP, Op0,
5740                            ICmpInst::getSwappedPredicate(I.getPredicate()), I))
5741       return NI;
5742 
5743   // Try to optimize equality comparisons against alloca-based pointers.
5744   if (Op0->getType()->isPointerTy() && I.isEquality()) {
5745     assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?");
5746     if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op0)))
5747       if (Instruction *New = foldAllocaCmp(I, Alloca, Op1))
5748         return New;
5749     if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op1)))
5750       if (Instruction *New = foldAllocaCmp(I, Alloca, Op0))
5751         return New;
5752   }
5753 
5754   if (Instruction *Res = foldICmpBitCast(I, Builder))
5755     return Res;
5756 
5757   // TODO: Hoist this above the min/max bailout.
5758   if (Instruction *R = foldICmpWithCastOp(I))
5759     return R;
5760 
5761   if (Instruction *Res = foldICmpWithMinMax(I))
5762     return Res;
5763 
5764   {
5765     Value *A, *B;
5766     // Transform (A & ~B) == 0 --> (A & B) != 0
5767     // and       (A & ~B) != 0 --> (A & B) == 0
5768     // if A is a power of 2.
5769     if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
5770         match(Op1, m_Zero()) &&
5771         isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality())
5772       return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(A, B),
5773                           Op1);
5774 
5775     // ~X < ~Y --> Y < X
5776     // ~X < C -->  X > ~C
5777     if (match(Op0, m_Not(m_Value(A)))) {
5778       if (match(Op1, m_Not(m_Value(B))))
5779         return new ICmpInst(I.getPredicate(), B, A);
5780 
5781       const APInt *C;
5782       if (match(Op1, m_APInt(C)))
5783         return new ICmpInst(I.getSwappedPredicate(), A,
5784                             ConstantInt::get(Op1->getType(), ~(*C)));
5785     }
5786 
5787     Instruction *AddI = nullptr;
5788     if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B),
5789                                      m_Instruction(AddI))) &&
5790         isa<IntegerType>(A->getType())) {
5791       Value *Result;
5792       Constant *Overflow;
5793       // m_UAddWithOverflow can match patterns that do not include  an explicit
5794       // "add" instruction, so check the opcode of the matched op.
5795       if (AddI->getOpcode() == Instruction::Add &&
5796           OptimizeOverflowCheck(Instruction::Add, /*Signed*/ false, A, B, *AddI,
5797                                 Result, Overflow)) {
5798         replaceInstUsesWith(*AddI, Result);
5799         eraseInstFromFunction(*AddI);
5800         return replaceInstUsesWith(I, Overflow);
5801       }
5802     }
5803 
5804     // (zext a) * (zext b)  --> llvm.umul.with.overflow.
5805     if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
5806       if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this))
5807         return R;
5808     }
5809     if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
5810       if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this))
5811         return R;
5812     }
5813   }
5814 
5815   if (Instruction *Res = foldICmpEquality(I))
5816     return Res;
5817 
5818   if (Instruction *Res = foldICmpOfUAddOv(I))
5819     return Res;
5820 
5821   // The 'cmpxchg' instruction returns an aggregate containing the old value and
5822   // an i1 which indicates whether or not we successfully did the swap.
5823   //
5824   // Replace comparisons between the old value and the expected value with the
5825   // indicator that 'cmpxchg' returns.
5826   //
5827   // N.B.  This transform is only valid when the 'cmpxchg' is not permitted to
5828   // spuriously fail.  In those cases, the old value may equal the expected
5829   // value but it is possible for the swap to not occur.
5830   if (I.getPredicate() == ICmpInst::ICMP_EQ)
5831     if (auto *EVI = dyn_cast<ExtractValueInst>(Op0))
5832       if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
5833         if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
5834             !ACXI->isWeak())
5835           return ExtractValueInst::Create(ACXI, 1);
5836 
5837   {
5838     Value *X;
5839     const APInt *C;
5840     // icmp X+Cst, X
5841     if (match(Op0, m_Add(m_Value(X), m_APInt(C))) && Op1 == X)
5842       return foldICmpAddOpConst(X, *C, I.getPredicate());
5843 
5844     // icmp X, X+Cst
5845     if (match(Op1, m_Add(m_Value(X), m_APInt(C))) && Op0 == X)
5846       return foldICmpAddOpConst(X, *C, I.getSwappedPredicate());
5847   }
5848 
5849   if (Instruction *Res = foldICmpWithHighBitMask(I, Builder))
5850     return Res;
5851 
5852   if (I.getType()->isVectorTy())
5853     if (Instruction *Res = foldVectorCmp(I, Builder))
5854       return Res;
5855 
5856   return Changed ? &I : nullptr;
5857 }
5858 
5859 /// Fold fcmp ([us]itofp x, cst) if possible.
5860 Instruction *InstCombinerImpl::foldFCmpIntToFPConst(FCmpInst &I,
5861                                                     Instruction *LHSI,
5862                                                     Constant *RHSC) {
5863   if (!isa<ConstantFP>(RHSC)) return nullptr;
5864   const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
5865 
5866   // Get the width of the mantissa.  We don't want to hack on conversions that
5867   // might lose information from the integer, e.g. "i64 -> float"
5868   int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
5869   if (MantissaWidth == -1) return nullptr;  // Unknown.
5870 
5871   IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
5872 
5873   bool LHSUnsigned = isa<UIToFPInst>(LHSI);
5874 
5875   if (I.isEquality()) {
5876     FCmpInst::Predicate P = I.getPredicate();
5877     bool IsExact = false;
5878     APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned);
5879     RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact);
5880 
5881     // If the floating point constant isn't an integer value, we know if we will
5882     // ever compare equal / not equal to it.
5883     if (!IsExact) {
5884       // TODO: Can never be -0.0 and other non-representable values
5885       APFloat RHSRoundInt(RHS);
5886       RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven);
5887       if (RHS != RHSRoundInt) {
5888         if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ)
5889           return replaceInstUsesWith(I, Builder.getFalse());
5890 
5891         assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE);
5892         return replaceInstUsesWith(I, Builder.getTrue());
5893       }
5894     }
5895 
5896     // TODO: If the constant is exactly representable, is it always OK to do
5897     // equality compares as integer?
5898   }
5899 
5900   // Check to see that the input is converted from an integer type that is small
5901   // enough that preserves all bits.  TODO: check here for "known" sign bits.
5902   // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
5903   unsigned InputSize = IntTy->getScalarSizeInBits();
5904 
5905   // Following test does NOT adjust InputSize downwards for signed inputs,
5906   // because the most negative value still requires all the mantissa bits
5907   // to distinguish it from one less than that value.
5908   if ((int)InputSize > MantissaWidth) {
5909     // Conversion would lose accuracy. Check if loss can impact comparison.
5910     int Exp = ilogb(RHS);
5911     if (Exp == APFloat::IEK_Inf) {
5912       int MaxExponent = ilogb(APFloat::getLargest(RHS.getSemantics()));
5913       if (MaxExponent < (int)InputSize - !LHSUnsigned)
5914         // Conversion could create infinity.
5915         return nullptr;
5916     } else {
5917       // Note that if RHS is zero or NaN, then Exp is negative
5918       // and first condition is trivially false.
5919       if (MantissaWidth <= Exp && Exp <= (int)InputSize - !LHSUnsigned)
5920         // Conversion could affect comparison.
5921         return nullptr;
5922     }
5923   }
5924 
5925   // Otherwise, we can potentially simplify the comparison.  We know that it
5926   // will always come through as an integer value and we know the constant is
5927   // not a NAN (it would have been previously simplified).
5928   assert(!RHS.isNaN() && "NaN comparison not already folded!");
5929 
5930   ICmpInst::Predicate Pred;
5931   switch (I.getPredicate()) {
5932   default: llvm_unreachable("Unexpected predicate!");
5933   case FCmpInst::FCMP_UEQ:
5934   case FCmpInst::FCMP_OEQ:
5935     Pred = ICmpInst::ICMP_EQ;
5936     break;
5937   case FCmpInst::FCMP_UGT:
5938   case FCmpInst::FCMP_OGT:
5939     Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
5940     break;
5941   case FCmpInst::FCMP_UGE:
5942   case FCmpInst::FCMP_OGE:
5943     Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
5944     break;
5945   case FCmpInst::FCMP_ULT:
5946   case FCmpInst::FCMP_OLT:
5947     Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
5948     break;
5949   case FCmpInst::FCMP_ULE:
5950   case FCmpInst::FCMP_OLE:
5951     Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
5952     break;
5953   case FCmpInst::FCMP_UNE:
5954   case FCmpInst::FCMP_ONE:
5955     Pred = ICmpInst::ICMP_NE;
5956     break;
5957   case FCmpInst::FCMP_ORD:
5958     return replaceInstUsesWith(I, Builder.getTrue());
5959   case FCmpInst::FCMP_UNO:
5960     return replaceInstUsesWith(I, Builder.getFalse());
5961   }
5962 
5963   // Now we know that the APFloat is a normal number, zero or inf.
5964 
5965   // See if the FP constant is too large for the integer.  For example,
5966   // comparing an i8 to 300.0.
5967   unsigned IntWidth = IntTy->getScalarSizeInBits();
5968 
5969   if (!LHSUnsigned) {
5970     // If the RHS value is > SignedMax, fold the comparison.  This handles +INF
5971     // and large values.
5972     APFloat SMax(RHS.getSemantics());
5973     SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
5974                           APFloat::rmNearestTiesToEven);
5975     if (SMax < RHS) { // smax < 13123.0
5976       if (Pred == ICmpInst::ICMP_NE  || Pred == ICmpInst::ICMP_SLT ||
5977           Pred == ICmpInst::ICMP_SLE)
5978         return replaceInstUsesWith(I, Builder.getTrue());
5979       return replaceInstUsesWith(I, Builder.getFalse());
5980     }
5981   } else {
5982     // If the RHS value is > UnsignedMax, fold the comparison. This handles
5983     // +INF and large values.
5984     APFloat UMax(RHS.getSemantics());
5985     UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
5986                           APFloat::rmNearestTiesToEven);
5987     if (UMax < RHS) { // umax < 13123.0
5988       if (Pred == ICmpInst::ICMP_NE  || Pred == ICmpInst::ICMP_ULT ||
5989           Pred == ICmpInst::ICMP_ULE)
5990         return replaceInstUsesWith(I, Builder.getTrue());
5991       return replaceInstUsesWith(I, Builder.getFalse());
5992     }
5993   }
5994 
5995   if (!LHSUnsigned) {
5996     // See if the RHS value is < SignedMin.
5997     APFloat SMin(RHS.getSemantics());
5998     SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
5999                           APFloat::rmNearestTiesToEven);
6000     if (SMin > RHS) { // smin > 12312.0
6001       if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
6002           Pred == ICmpInst::ICMP_SGE)
6003         return replaceInstUsesWith(I, Builder.getTrue());
6004       return replaceInstUsesWith(I, Builder.getFalse());
6005     }
6006   } else {
6007     // See if the RHS value is < UnsignedMin.
6008     APFloat UMin(RHS.getSemantics());
6009     UMin.convertFromAPInt(APInt::getMinValue(IntWidth), false,
6010                           APFloat::rmNearestTiesToEven);
6011     if (UMin > RHS) { // umin > 12312.0
6012       if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
6013           Pred == ICmpInst::ICMP_UGE)
6014         return replaceInstUsesWith(I, Builder.getTrue());
6015       return replaceInstUsesWith(I, Builder.getFalse());
6016     }
6017   }
6018 
6019   // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
6020   // [0, UMAX], but it may still be fractional.  See if it is fractional by
6021   // casting the FP value to the integer value and back, checking for equality.
6022   // Don't do this for zero, because -0.0 is not fractional.
6023   Constant *RHSInt = LHSUnsigned
6024     ? ConstantExpr::getFPToUI(RHSC, IntTy)
6025     : ConstantExpr::getFPToSI(RHSC, IntTy);
6026   if (!RHS.isZero()) {
6027     bool Equal = LHSUnsigned
6028       ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
6029       : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
6030     if (!Equal) {
6031       // If we had a comparison against a fractional value, we have to adjust
6032       // the compare predicate and sometimes the value.  RHSC is rounded towards
6033       // zero at this point.
6034       switch (Pred) {
6035       default: llvm_unreachable("Unexpected integer comparison!");
6036       case ICmpInst::ICMP_NE:  // (float)int != 4.4   --> true
6037         return replaceInstUsesWith(I, Builder.getTrue());
6038       case ICmpInst::ICMP_EQ:  // (float)int == 4.4   --> false
6039         return replaceInstUsesWith(I, Builder.getFalse());
6040       case ICmpInst::ICMP_ULE:
6041         // (float)int <= 4.4   --> int <= 4
6042         // (float)int <= -4.4  --> false
6043         if (RHS.isNegative())
6044           return replaceInstUsesWith(I, Builder.getFalse());
6045         break;
6046       case ICmpInst::ICMP_SLE:
6047         // (float)int <= 4.4   --> int <= 4
6048         // (float)int <= -4.4  --> int < -4
6049         if (RHS.isNegative())
6050           Pred = ICmpInst::ICMP_SLT;
6051         break;
6052       case ICmpInst::ICMP_ULT:
6053         // (float)int < -4.4   --> false
6054         // (float)int < 4.4    --> int <= 4
6055         if (RHS.isNegative())
6056           return replaceInstUsesWith(I, Builder.getFalse());
6057         Pred = ICmpInst::ICMP_ULE;
6058         break;
6059       case ICmpInst::ICMP_SLT:
6060         // (float)int < -4.4   --> int < -4
6061         // (float)int < 4.4    --> int <= 4
6062         if (!RHS.isNegative())
6063           Pred = ICmpInst::ICMP_SLE;
6064         break;
6065       case ICmpInst::ICMP_UGT:
6066         // (float)int > 4.4    --> int > 4
6067         // (float)int > -4.4   --> true
6068         if (RHS.isNegative())
6069           return replaceInstUsesWith(I, Builder.getTrue());
6070         break;
6071       case ICmpInst::ICMP_SGT:
6072         // (float)int > 4.4    --> int > 4
6073         // (float)int > -4.4   --> int >= -4
6074         if (RHS.isNegative())
6075           Pred = ICmpInst::ICMP_SGE;
6076         break;
6077       case ICmpInst::ICMP_UGE:
6078         // (float)int >= -4.4   --> true
6079         // (float)int >= 4.4    --> int > 4
6080         if (RHS.isNegative())
6081           return replaceInstUsesWith(I, Builder.getTrue());
6082         Pred = ICmpInst::ICMP_UGT;
6083         break;
6084       case ICmpInst::ICMP_SGE:
6085         // (float)int >= -4.4   --> int >= -4
6086         // (float)int >= 4.4    --> int > 4
6087         if (!RHS.isNegative())
6088           Pred = ICmpInst::ICMP_SGT;
6089         break;
6090       }
6091     }
6092   }
6093 
6094   // Lower this FP comparison into an appropriate integer version of the
6095   // comparison.
6096   return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
6097 }
6098 
6099 /// Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
6100 static Instruction *foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI,
6101                                               Constant *RHSC) {
6102   // When C is not 0.0 and infinities are not allowed:
6103   // (C / X) < 0.0 is a sign-bit test of X
6104   // (C / X) < 0.0 --> X < 0.0 (if C is positive)
6105   // (C / X) < 0.0 --> X > 0.0 (if C is negative, swap the predicate)
6106   //
6107   // Proof:
6108   // Multiply (C / X) < 0.0 by X * X / C.
6109   // - X is non zero, if it is the flag 'ninf' is violated.
6110   // - C defines the sign of X * X * C. Thus it also defines whether to swap
6111   //   the predicate. C is also non zero by definition.
6112   //
6113   // Thus X * X / C is non zero and the transformation is valid. [qed]
6114 
6115   FCmpInst::Predicate Pred = I.getPredicate();
6116 
6117   // Check that predicates are valid.
6118   if ((Pred != FCmpInst::FCMP_OGT) && (Pred != FCmpInst::FCMP_OLT) &&
6119       (Pred != FCmpInst::FCMP_OGE) && (Pred != FCmpInst::FCMP_OLE))
6120     return nullptr;
6121 
6122   // Check that RHS operand is zero.
6123   if (!match(RHSC, m_AnyZeroFP()))
6124     return nullptr;
6125 
6126   // Check fastmath flags ('ninf').
6127   if (!LHSI->hasNoInfs() || !I.hasNoInfs())
6128     return nullptr;
6129 
6130   // Check the properties of the dividend. It must not be zero to avoid a
6131   // division by zero (see Proof).
6132   const APFloat *C;
6133   if (!match(LHSI->getOperand(0), m_APFloat(C)))
6134     return nullptr;
6135 
6136   if (C->isZero())
6137     return nullptr;
6138 
6139   // Get swapped predicate if necessary.
6140   if (C->isNegative())
6141     Pred = I.getSwappedPredicate();
6142 
6143   return new FCmpInst(Pred, LHSI->getOperand(1), RHSC, "", &I);
6144 }
6145 
6146 /// Optimize fabs(X) compared with zero.
6147 static Instruction *foldFabsWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC) {
6148   Value *X;
6149   if (!match(I.getOperand(0), m_FAbs(m_Value(X))) ||
6150       !match(I.getOperand(1), m_PosZeroFP()))
6151     return nullptr;
6152 
6153   auto replacePredAndOp0 = [&IC](FCmpInst *I, FCmpInst::Predicate P, Value *X) {
6154     I->setPredicate(P);
6155     return IC.replaceOperand(*I, 0, X);
6156   };
6157 
6158   switch (I.getPredicate()) {
6159   case FCmpInst::FCMP_UGE:
6160   case FCmpInst::FCMP_OLT:
6161     // fabs(X) >= 0.0 --> true
6162     // fabs(X) <  0.0 --> false
6163     llvm_unreachable("fcmp should have simplified");
6164 
6165   case FCmpInst::FCMP_OGT:
6166     // fabs(X) > 0.0 --> X != 0.0
6167     return replacePredAndOp0(&I, FCmpInst::FCMP_ONE, X);
6168 
6169   case FCmpInst::FCMP_UGT:
6170     // fabs(X) u> 0.0 --> X u!= 0.0
6171     return replacePredAndOp0(&I, FCmpInst::FCMP_UNE, X);
6172 
6173   case FCmpInst::FCMP_OLE:
6174     // fabs(X) <= 0.0 --> X == 0.0
6175     return replacePredAndOp0(&I, FCmpInst::FCMP_OEQ, X);
6176 
6177   case FCmpInst::FCMP_ULE:
6178     // fabs(X) u<= 0.0 --> X u== 0.0
6179     return replacePredAndOp0(&I, FCmpInst::FCMP_UEQ, X);
6180 
6181   case FCmpInst::FCMP_OGE:
6182     // fabs(X) >= 0.0 --> !isnan(X)
6183     assert(!I.hasNoNaNs() && "fcmp should have simplified");
6184     return replacePredAndOp0(&I, FCmpInst::FCMP_ORD, X);
6185 
6186   case FCmpInst::FCMP_ULT:
6187     // fabs(X) u< 0.0 --> isnan(X)
6188     assert(!I.hasNoNaNs() && "fcmp should have simplified");
6189     return replacePredAndOp0(&I, FCmpInst::FCMP_UNO, X);
6190 
6191   case FCmpInst::FCMP_OEQ:
6192   case FCmpInst::FCMP_UEQ:
6193   case FCmpInst::FCMP_ONE:
6194   case FCmpInst::FCMP_UNE:
6195   case FCmpInst::FCMP_ORD:
6196   case FCmpInst::FCMP_UNO:
6197     // Look through the fabs() because it doesn't change anything but the sign.
6198     // fabs(X) == 0.0 --> X == 0.0,
6199     // fabs(X) != 0.0 --> X != 0.0
6200     // isnan(fabs(X)) --> isnan(X)
6201     // !isnan(fabs(X) --> !isnan(X)
6202     return replacePredAndOp0(&I, I.getPredicate(), X);
6203 
6204   default:
6205     return nullptr;
6206   }
6207 }
6208 
6209 Instruction *InstCombinerImpl::visitFCmpInst(FCmpInst &I) {
6210   bool Changed = false;
6211 
6212   /// Orders the operands of the compare so that they are listed from most
6213   /// complex to least complex.  This puts constants before unary operators,
6214   /// before binary operators.
6215   if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
6216     I.swapOperands();
6217     Changed = true;
6218   }
6219 
6220   const CmpInst::Predicate Pred = I.getPredicate();
6221   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6222   if (Value *V = SimplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(),
6223                                   SQ.getWithInstruction(&I)))
6224     return replaceInstUsesWith(I, V);
6225 
6226   // Simplify 'fcmp pred X, X'
6227   Type *OpType = Op0->getType();
6228   assert(OpType == Op1->getType() && "fcmp with different-typed operands?");
6229   if (Op0 == Op1) {
6230     switch (Pred) {
6231       default: break;
6232     case FCmpInst::FCMP_UNO:    // True if unordered: isnan(X) | isnan(Y)
6233     case FCmpInst::FCMP_ULT:    // True if unordered or less than
6234     case FCmpInst::FCMP_UGT:    // True if unordered or greater than
6235     case FCmpInst::FCMP_UNE:    // True if unordered or not equal
6236       // Canonicalize these to be 'fcmp uno %X, 0.0'.
6237       I.setPredicate(FCmpInst::FCMP_UNO);
6238       I.setOperand(1, Constant::getNullValue(OpType));
6239       return &I;
6240 
6241     case FCmpInst::FCMP_ORD:    // True if ordered (no nans)
6242     case FCmpInst::FCMP_OEQ:    // True if ordered and equal
6243     case FCmpInst::FCMP_OGE:    // True if ordered and greater than or equal
6244     case FCmpInst::FCMP_OLE:    // True if ordered and less than or equal
6245       // Canonicalize these to be 'fcmp ord %X, 0.0'.
6246       I.setPredicate(FCmpInst::FCMP_ORD);
6247       I.setOperand(1, Constant::getNullValue(OpType));
6248       return &I;
6249     }
6250   }
6251 
6252   // If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand,
6253   // then canonicalize the operand to 0.0.
6254   if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
6255     if (!match(Op0, m_PosZeroFP()) && isKnownNeverNaN(Op0, &TLI))
6256       return replaceOperand(I, 0, ConstantFP::getNullValue(OpType));
6257 
6258     if (!match(Op1, m_PosZeroFP()) && isKnownNeverNaN(Op1, &TLI))
6259       return replaceOperand(I, 1, ConstantFP::getNullValue(OpType));
6260   }
6261 
6262   // fcmp pred (fneg X), (fneg Y) -> fcmp swap(pred) X, Y
6263   Value *X, *Y;
6264   if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
6265     return new FCmpInst(I.getSwappedPredicate(), X, Y, "", &I);
6266 
6267   // Test if the FCmpInst instruction is used exclusively by a select as
6268   // part of a minimum or maximum operation. If so, refrain from doing
6269   // any other folding. This helps out other analyses which understand
6270   // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
6271   // and CodeGen. And in this case, at least one of the comparison
6272   // operands has at least one user besides the compare (the select),
6273   // which would often largely negate the benefit of folding anyway.
6274   if (I.hasOneUse())
6275     if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
6276       Value *A, *B;
6277       SelectPatternResult SPR = matchSelectPattern(SI, A, B);
6278       if (SPR.Flavor != SPF_UNKNOWN)
6279         return nullptr;
6280     }
6281 
6282   // The sign of 0.0 is ignored by fcmp, so canonicalize to +0.0:
6283   // fcmp Pred X, -0.0 --> fcmp Pred X, 0.0
6284   if (match(Op1, m_AnyZeroFP()) && !match(Op1, m_PosZeroFP()))
6285     return replaceOperand(I, 1, ConstantFP::getNullValue(OpType));
6286 
6287   // Handle fcmp with instruction LHS and constant RHS.
6288   Instruction *LHSI;
6289   Constant *RHSC;
6290   if (match(Op0, m_Instruction(LHSI)) && match(Op1, m_Constant(RHSC))) {
6291     switch (LHSI->getOpcode()) {
6292     case Instruction::PHI:
6293       // Only fold fcmp into the PHI if the phi and fcmp are in the same
6294       // block.  If in the same block, we're encouraging jump threading.  If
6295       // not, we are just pessimizing the code by making an i1 phi.
6296       if (LHSI->getParent() == I.getParent())
6297         if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
6298           return NV;
6299       break;
6300     case Instruction::SIToFP:
6301     case Instruction::UIToFP:
6302       if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC))
6303         return NV;
6304       break;
6305     case Instruction::FDiv:
6306       if (Instruction *NV = foldFCmpReciprocalAndZero(I, LHSI, RHSC))
6307         return NV;
6308       break;
6309     case Instruction::Load:
6310       if (auto *GEP = dyn_cast<GetElementPtrInst>(LHSI->getOperand(0)))
6311         if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
6312           if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
6313               !cast<LoadInst>(LHSI)->isVolatile())
6314             if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
6315               return Res;
6316       break;
6317   }
6318   }
6319 
6320   if (Instruction *R = foldFabsWithFcmpZero(I, *this))
6321     return R;
6322 
6323   if (match(Op0, m_FNeg(m_Value(X)))) {
6324     // fcmp pred (fneg X), C --> fcmp swap(pred) X, -C
6325     Constant *C;
6326     if (match(Op1, m_Constant(C))) {
6327       Constant *NegC = ConstantExpr::getFNeg(C);
6328       return new FCmpInst(I.getSwappedPredicate(), X, NegC, "", &I);
6329     }
6330   }
6331 
6332   if (match(Op0, m_FPExt(m_Value(X)))) {
6333     // fcmp (fpext X), (fpext Y) -> fcmp X, Y
6334     if (match(Op1, m_FPExt(m_Value(Y))) && X->getType() == Y->getType())
6335       return new FCmpInst(Pred, X, Y, "", &I);
6336 
6337     // fcmp (fpext X), C -> fcmp X, (fptrunc C) if fptrunc is lossless
6338     const APFloat *C;
6339     if (match(Op1, m_APFloat(C))) {
6340       const fltSemantics &FPSem =
6341           X->getType()->getScalarType()->getFltSemantics();
6342       bool Lossy;
6343       APFloat TruncC = *C;
6344       TruncC.convert(FPSem, APFloat::rmNearestTiesToEven, &Lossy);
6345 
6346       // Avoid lossy conversions and denormals.
6347       // Zero is a special case that's OK to convert.
6348       APFloat Fabs = TruncC;
6349       Fabs.clearSign();
6350       if (!Lossy &&
6351           (!(Fabs < APFloat::getSmallestNormalized(FPSem)) || Fabs.isZero())) {
6352         Constant *NewC = ConstantFP::get(X->getType(), TruncC);
6353         return new FCmpInst(Pred, X, NewC, "", &I);
6354       }
6355     }
6356   }
6357 
6358   // Convert a sign-bit test of an FP value into a cast and integer compare.
6359   // TODO: Simplify if the copysign constant is 0.0 or NaN.
6360   // TODO: Handle non-zero compare constants.
6361   // TODO: Handle other predicates.
6362   const APFloat *C;
6363   if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::copysign>(m_APFloat(C),
6364                                                            m_Value(X)))) &&
6365       match(Op1, m_AnyZeroFP()) && !C->isZero() && !C->isNaN()) {
6366     Type *IntType = Builder.getIntNTy(X->getType()->getScalarSizeInBits());
6367     if (auto *VecTy = dyn_cast<VectorType>(OpType))
6368       IntType = VectorType::get(IntType, VecTy->getElementCount());
6369 
6370     // copysign(non-zero constant, X) < 0.0 --> (bitcast X) < 0
6371     if (Pred == FCmpInst::FCMP_OLT) {
6372       Value *IntX = Builder.CreateBitCast(X, IntType);
6373       return new ICmpInst(ICmpInst::ICMP_SLT, IntX,
6374                           ConstantInt::getNullValue(IntType));
6375     }
6376   }
6377 
6378   if (I.getType()->isVectorTy())
6379     if (Instruction *Res = foldVectorCmp(I, Builder))
6380       return Res;
6381 
6382   return Changed ? &I : nullptr;
6383 }
6384