1 //===- InstCombineCompares.cpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitICmp and visitFCmp functions.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APSInt.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/CmpInstAnalysis.h"
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/IR/ConstantRange.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/GetElementPtrTypeIterator.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/PatternMatch.h"
25 #include "llvm/Support/KnownBits.h"
26 #include "llvm/Transforms/InstCombine/InstCombiner.h"
27
28 using namespace llvm;
29 using namespace PatternMatch;
30
31 #define DEBUG_TYPE "instcombine"
32
33 // How many times is a select replaced by one of its operands?
34 STATISTIC(NumSel, "Number of select opts");
35
36
37 /// Compute Result = In1+In2, returning true if the result overflowed for this
38 /// type.
addWithOverflow(APInt & Result,const APInt & In1,const APInt & In2,bool IsSigned=false)39 static bool addWithOverflow(APInt &Result, const APInt &In1,
40 const APInt &In2, bool IsSigned = false) {
41 bool Overflow;
42 if (IsSigned)
43 Result = In1.sadd_ov(In2, Overflow);
44 else
45 Result = In1.uadd_ov(In2, Overflow);
46
47 return Overflow;
48 }
49
50 /// Compute Result = In1-In2, returning true if the result overflowed for this
51 /// type.
subWithOverflow(APInt & Result,const APInt & In1,const APInt & In2,bool IsSigned=false)52 static bool subWithOverflow(APInt &Result, const APInt &In1,
53 const APInt &In2, bool IsSigned = false) {
54 bool Overflow;
55 if (IsSigned)
56 Result = In1.ssub_ov(In2, Overflow);
57 else
58 Result = In1.usub_ov(In2, Overflow);
59
60 return Overflow;
61 }
62
63 /// Given an icmp instruction, return true if any use of this comparison is a
64 /// branch on sign bit comparison.
hasBranchUse(ICmpInst & I)65 static bool hasBranchUse(ICmpInst &I) {
66 for (auto *U : I.users())
67 if (isa<BranchInst>(U))
68 return true;
69 return false;
70 }
71
72 /// Returns true if the exploded icmp can be expressed as a signed comparison
73 /// to zero and updates the predicate accordingly.
74 /// The signedness of the comparison is preserved.
75 /// TODO: Refactor with decomposeBitTestICmp()?
isSignTest(ICmpInst::Predicate & Pred,const APInt & C)76 static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) {
77 if (!ICmpInst::isSigned(Pred))
78 return false;
79
80 if (C.isZero())
81 return ICmpInst::isRelational(Pred);
82
83 if (C.isOne()) {
84 if (Pred == ICmpInst::ICMP_SLT) {
85 Pred = ICmpInst::ICMP_SLE;
86 return true;
87 }
88 } else if (C.isAllOnes()) {
89 if (Pred == ICmpInst::ICMP_SGT) {
90 Pred = ICmpInst::ICMP_SGE;
91 return true;
92 }
93 }
94
95 return false;
96 }
97
98 /// This is called when we see this pattern:
99 /// cmp pred (load (gep GV, ...)), cmpcst
100 /// where GV is a global variable with a constant initializer. Try to simplify
101 /// this into some simple computation that does not need the load. For example
102 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
103 ///
104 /// If AndCst is non-null, then the loaded value is masked with that constant
105 /// before doing the comparison. This handles cases like "A[i]&4 == 0".
foldCmpLoadFromIndexedGlobal(LoadInst * LI,GetElementPtrInst * GEP,GlobalVariable * GV,CmpInst & ICI,ConstantInt * AndCst)106 Instruction *InstCombinerImpl::foldCmpLoadFromIndexedGlobal(
107 LoadInst *LI, GetElementPtrInst *GEP, GlobalVariable *GV, CmpInst &ICI,
108 ConstantInt *AndCst) {
109 if (LI->isVolatile() || LI->getType() != GEP->getResultElementType() ||
110 GV->getValueType() != GEP->getSourceElementType() ||
111 !GV->isConstant() || !GV->hasDefinitiveInitializer())
112 return nullptr;
113
114 Constant *Init = GV->getInitializer();
115 if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
116 return nullptr;
117
118 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
119 // Don't blow up on huge arrays.
120 if (ArrayElementCount > MaxArraySizeForCombine)
121 return nullptr;
122
123 // There are many forms of this optimization we can handle, for now, just do
124 // the simple index into a single-dimensional array.
125 //
126 // Require: GEP GV, 0, i {{, constant indices}}
127 if (GEP->getNumOperands() < 3 ||
128 !isa<ConstantInt>(GEP->getOperand(1)) ||
129 !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
130 isa<Constant>(GEP->getOperand(2)))
131 return nullptr;
132
133 // Check that indices after the variable are constants and in-range for the
134 // type they index. Collect the indices. This is typically for arrays of
135 // structs.
136 SmallVector<unsigned, 4> LaterIndices;
137
138 Type *EltTy = Init->getType()->getArrayElementType();
139 for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
140 ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
141 if (!Idx) return nullptr; // Variable index.
142
143 uint64_t IdxVal = Idx->getZExtValue();
144 if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index.
145
146 if (StructType *STy = dyn_cast<StructType>(EltTy))
147 EltTy = STy->getElementType(IdxVal);
148 else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
149 if (IdxVal >= ATy->getNumElements()) return nullptr;
150 EltTy = ATy->getElementType();
151 } else {
152 return nullptr; // Unknown type.
153 }
154
155 LaterIndices.push_back(IdxVal);
156 }
157
158 enum { Overdefined = -3, Undefined = -2 };
159
160 // Variables for our state machines.
161
162 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
163 // "i == 47 | i == 87", where 47 is the first index the condition is true for,
164 // and 87 is the second (and last) index. FirstTrueElement is -2 when
165 // undefined, otherwise set to the first true element. SecondTrueElement is
166 // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
167 int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
168
169 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
170 // form "i != 47 & i != 87". Same state transitions as for true elements.
171 int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
172
173 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
174 /// define a state machine that triggers for ranges of values that the index
175 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'.
176 /// This is -2 when undefined, -3 when overdefined, and otherwise the last
177 /// index in the range (inclusive). We use -2 for undefined here because we
178 /// use relative comparisons and don't want 0-1 to match -1.
179 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
180
181 // MagicBitvector - This is a magic bitvector where we set a bit if the
182 // comparison is true for element 'i'. If there are 64 elements or less in
183 // the array, this will fully represent all the comparison results.
184 uint64_t MagicBitvector = 0;
185
186 // Scan the array and see if one of our patterns matches.
187 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
188 for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
189 Constant *Elt = Init->getAggregateElement(i);
190 if (!Elt) return nullptr;
191
192 // If this is indexing an array of structures, get the structure element.
193 if (!LaterIndices.empty()) {
194 Elt = ConstantFoldExtractValueInstruction(Elt, LaterIndices);
195 if (!Elt)
196 return nullptr;
197 }
198
199 // If the element is masked, handle it.
200 if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst);
201
202 // Find out if the comparison would be true or false for the i'th element.
203 Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
204 CompareRHS, DL, &TLI);
205 // If the result is undef for this element, ignore it.
206 if (isa<UndefValue>(C)) {
207 // Extend range state machines to cover this element in case there is an
208 // undef in the middle of the range.
209 if (TrueRangeEnd == (int)i-1)
210 TrueRangeEnd = i;
211 if (FalseRangeEnd == (int)i-1)
212 FalseRangeEnd = i;
213 continue;
214 }
215
216 // If we can't compute the result for any of the elements, we have to give
217 // up evaluating the entire conditional.
218 if (!isa<ConstantInt>(C)) return nullptr;
219
220 // Otherwise, we know if the comparison is true or false for this element,
221 // update our state machines.
222 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
223
224 // State machine for single/double/range index comparison.
225 if (IsTrueForElt) {
226 // Update the TrueElement state machine.
227 if (FirstTrueElement == Undefined)
228 FirstTrueElement = TrueRangeEnd = i; // First true element.
229 else {
230 // Update double-compare state machine.
231 if (SecondTrueElement == Undefined)
232 SecondTrueElement = i;
233 else
234 SecondTrueElement = Overdefined;
235
236 // Update range state machine.
237 if (TrueRangeEnd == (int)i-1)
238 TrueRangeEnd = i;
239 else
240 TrueRangeEnd = Overdefined;
241 }
242 } else {
243 // Update the FalseElement state machine.
244 if (FirstFalseElement == Undefined)
245 FirstFalseElement = FalseRangeEnd = i; // First false element.
246 else {
247 // Update double-compare state machine.
248 if (SecondFalseElement == Undefined)
249 SecondFalseElement = i;
250 else
251 SecondFalseElement = Overdefined;
252
253 // Update range state machine.
254 if (FalseRangeEnd == (int)i-1)
255 FalseRangeEnd = i;
256 else
257 FalseRangeEnd = Overdefined;
258 }
259 }
260
261 // If this element is in range, update our magic bitvector.
262 if (i < 64 && IsTrueForElt)
263 MagicBitvector |= 1ULL << i;
264
265 // If all of our states become overdefined, bail out early. Since the
266 // predicate is expensive, only check it every 8 elements. This is only
267 // really useful for really huge arrays.
268 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
269 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
270 FalseRangeEnd == Overdefined)
271 return nullptr;
272 }
273
274 // Now that we've scanned the entire array, emit our new comparison(s). We
275 // order the state machines in complexity of the generated code.
276 Value *Idx = GEP->getOperand(2);
277
278 // If the index is larger than the pointer size of the target, truncate the
279 // index down like the GEP would do implicitly. We don't have to do this for
280 // an inbounds GEP because the index can't be out of range.
281 if (!GEP->isInBounds()) {
282 Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
283 unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
284 if (Idx->getType()->getPrimitiveSizeInBits().getFixedSize() > PtrSize)
285 Idx = Builder.CreateTrunc(Idx, IntPtrTy);
286 }
287
288 // If inbounds keyword is not present, Idx * ElementSize can overflow.
289 // Let's assume that ElementSize is 2 and the wanted value is at offset 0.
290 // Then, there are two possible values for Idx to match offset 0:
291 // 0x00..00, 0x80..00.
292 // Emitting 'icmp eq Idx, 0' isn't correct in this case because the
293 // comparison is false if Idx was 0x80..00.
294 // We need to erase the highest countTrailingZeros(ElementSize) bits of Idx.
295 unsigned ElementSize =
296 DL.getTypeAllocSize(Init->getType()->getArrayElementType());
297 auto MaskIdx = [&](Value* Idx){
298 if (!GEP->isInBounds() && countTrailingZeros(ElementSize) != 0) {
299 Value *Mask = ConstantInt::get(Idx->getType(), -1);
300 Mask = Builder.CreateLShr(Mask, countTrailingZeros(ElementSize));
301 Idx = Builder.CreateAnd(Idx, Mask);
302 }
303 return Idx;
304 };
305
306 // If the comparison is only true for one or two elements, emit direct
307 // comparisons.
308 if (SecondTrueElement != Overdefined) {
309 Idx = MaskIdx(Idx);
310 // None true -> false.
311 if (FirstTrueElement == Undefined)
312 return replaceInstUsesWith(ICI, Builder.getFalse());
313
314 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
315
316 // True for one element -> 'i == 47'.
317 if (SecondTrueElement == Undefined)
318 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
319
320 // True for two elements -> 'i == 47 | i == 72'.
321 Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx);
322 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
323 Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx);
324 return BinaryOperator::CreateOr(C1, C2);
325 }
326
327 // If the comparison is only false for one or two elements, emit direct
328 // comparisons.
329 if (SecondFalseElement != Overdefined) {
330 Idx = MaskIdx(Idx);
331 // None false -> true.
332 if (FirstFalseElement == Undefined)
333 return replaceInstUsesWith(ICI, Builder.getTrue());
334
335 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
336
337 // False for one element -> 'i != 47'.
338 if (SecondFalseElement == Undefined)
339 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
340
341 // False for two elements -> 'i != 47 & i != 72'.
342 Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx);
343 Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
344 Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx);
345 return BinaryOperator::CreateAnd(C1, C2);
346 }
347
348 // If the comparison can be replaced with a range comparison for the elements
349 // where it is true, emit the range check.
350 if (TrueRangeEnd != Overdefined) {
351 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
352 Idx = MaskIdx(Idx);
353
354 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
355 if (FirstTrueElement) {
356 Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
357 Idx = Builder.CreateAdd(Idx, Offs);
358 }
359
360 Value *End = ConstantInt::get(Idx->getType(),
361 TrueRangeEnd-FirstTrueElement+1);
362 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
363 }
364
365 // False range check.
366 if (FalseRangeEnd != Overdefined) {
367 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
368 Idx = MaskIdx(Idx);
369 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
370 if (FirstFalseElement) {
371 Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
372 Idx = Builder.CreateAdd(Idx, Offs);
373 }
374
375 Value *End = ConstantInt::get(Idx->getType(),
376 FalseRangeEnd-FirstFalseElement);
377 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
378 }
379
380 // If a magic bitvector captures the entire comparison state
381 // of this load, replace it with computation that does:
382 // ((magic_cst >> i) & 1) != 0
383 {
384 Type *Ty = nullptr;
385
386 // Look for an appropriate type:
387 // - The type of Idx if the magic fits
388 // - The smallest fitting legal type
389 if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
390 Ty = Idx->getType();
391 else
392 Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
393
394 if (Ty) {
395 Idx = MaskIdx(Idx);
396 Value *V = Builder.CreateIntCast(Idx, Ty, false);
397 V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
398 V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
399 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
400 }
401 }
402
403 return nullptr;
404 }
405
406 /// Return a value that can be used to compare the *offset* implied by a GEP to
407 /// zero. For example, if we have &A[i], we want to return 'i' for
408 /// "icmp ne i, 0". Note that, in general, indices can be complex, and scales
409 /// are involved. The above expression would also be legal to codegen as
410 /// "icmp ne (i*4), 0" (assuming A is a pointer to i32).
411 /// This latter form is less amenable to optimization though, and we are allowed
412 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
413 ///
414 /// If we can't emit an optimized form for this expression, this returns null.
415 ///
evaluateGEPOffsetExpression(User * GEP,InstCombinerImpl & IC,const DataLayout & DL)416 static Value *evaluateGEPOffsetExpression(User *GEP, InstCombinerImpl &IC,
417 const DataLayout &DL) {
418 gep_type_iterator GTI = gep_type_begin(GEP);
419
420 // Check to see if this gep only has a single variable index. If so, and if
421 // any constant indices are a multiple of its scale, then we can compute this
422 // in terms of the scale of the variable index. For example, if the GEP
423 // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
424 // because the expression will cross zero at the same point.
425 unsigned i, e = GEP->getNumOperands();
426 int64_t Offset = 0;
427 for (i = 1; i != e; ++i, ++GTI) {
428 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
429 // Compute the aggregate offset of constant indices.
430 if (CI->isZero()) continue;
431
432 // Handle a struct index, which adds its field offset to the pointer.
433 if (StructType *STy = GTI.getStructTypeOrNull()) {
434 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
435 } else {
436 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
437 Offset += Size*CI->getSExtValue();
438 }
439 } else {
440 // Found our variable index.
441 break;
442 }
443 }
444
445 // If there are no variable indices, we must have a constant offset, just
446 // evaluate it the general way.
447 if (i == e) return nullptr;
448
449 Value *VariableIdx = GEP->getOperand(i);
450 // Determine the scale factor of the variable element. For example, this is
451 // 4 if the variable index is into an array of i32.
452 uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());
453
454 // Verify that there are no other variable indices. If so, emit the hard way.
455 for (++i, ++GTI; i != e; ++i, ++GTI) {
456 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
457 if (!CI) return nullptr;
458
459 // Compute the aggregate offset of constant indices.
460 if (CI->isZero()) continue;
461
462 // Handle a struct index, which adds its field offset to the pointer.
463 if (StructType *STy = GTI.getStructTypeOrNull()) {
464 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
465 } else {
466 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
467 Offset += Size*CI->getSExtValue();
468 }
469 }
470
471 // Okay, we know we have a single variable index, which must be a
472 // pointer/array/vector index. If there is no offset, life is simple, return
473 // the index.
474 Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());
475 unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();
476 if (Offset == 0) {
477 // Cast to intptrty in case a truncation occurs. If an extension is needed,
478 // we don't need to bother extending: the extension won't affect where the
479 // computation crosses zero.
480 if (VariableIdx->getType()->getPrimitiveSizeInBits().getFixedSize() >
481 IntPtrWidth) {
482 VariableIdx = IC.Builder.CreateTrunc(VariableIdx, IntPtrTy);
483 }
484 return VariableIdx;
485 }
486
487 // Otherwise, there is an index. The computation we will do will be modulo
488 // the pointer size.
489 Offset = SignExtend64(Offset, IntPtrWidth);
490 VariableScale = SignExtend64(VariableScale, IntPtrWidth);
491
492 // To do this transformation, any constant index must be a multiple of the
493 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
494 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
495 // multiple of the variable scale.
496 int64_t NewOffs = Offset / (int64_t)VariableScale;
497 if (Offset != NewOffs*(int64_t)VariableScale)
498 return nullptr;
499
500 // Okay, we can do this evaluation. Start by converting the index to intptr.
501 if (VariableIdx->getType() != IntPtrTy)
502 VariableIdx = IC.Builder.CreateIntCast(VariableIdx, IntPtrTy,
503 true /*Signed*/);
504 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
505 return IC.Builder.CreateAdd(VariableIdx, OffsetVal, "offset");
506 }
507
508 /// Returns true if we can rewrite Start as a GEP with pointer Base
509 /// and some integer offset. The nodes that need to be re-written
510 /// for this transformation will be added to Explored.
canRewriteGEPAsOffset(Type * ElemTy,Value * Start,Value * Base,const DataLayout & DL,SetVector<Value * > & Explored)511 static bool canRewriteGEPAsOffset(Type *ElemTy, Value *Start, Value *Base,
512 const DataLayout &DL,
513 SetVector<Value *> &Explored) {
514 SmallVector<Value *, 16> WorkList(1, Start);
515 Explored.insert(Base);
516
517 // The following traversal gives us an order which can be used
518 // when doing the final transformation. Since in the final
519 // transformation we create the PHI replacement instructions first,
520 // we don't have to get them in any particular order.
521 //
522 // However, for other instructions we will have to traverse the
523 // operands of an instruction first, which means that we have to
524 // do a post-order traversal.
525 while (!WorkList.empty()) {
526 SetVector<PHINode *> PHIs;
527
528 while (!WorkList.empty()) {
529 if (Explored.size() >= 100)
530 return false;
531
532 Value *V = WorkList.back();
533
534 if (Explored.contains(V)) {
535 WorkList.pop_back();
536 continue;
537 }
538
539 if (!isa<IntToPtrInst>(V) && !isa<PtrToIntInst>(V) &&
540 !isa<GetElementPtrInst>(V) && !isa<PHINode>(V))
541 // We've found some value that we can't explore which is different from
542 // the base. Therefore we can't do this transformation.
543 return false;
544
545 if (isa<IntToPtrInst>(V) || isa<PtrToIntInst>(V)) {
546 auto *CI = cast<CastInst>(V);
547 if (!CI->isNoopCast(DL))
548 return false;
549
550 if (!Explored.contains(CI->getOperand(0)))
551 WorkList.push_back(CI->getOperand(0));
552 }
553
554 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
555 // We're limiting the GEP to having one index. This will preserve
556 // the original pointer type. We could handle more cases in the
557 // future.
558 if (GEP->getNumIndices() != 1 || !GEP->isInBounds() ||
559 GEP->getSourceElementType() != ElemTy)
560 return false;
561
562 if (!Explored.contains(GEP->getOperand(0)))
563 WorkList.push_back(GEP->getOperand(0));
564 }
565
566 if (WorkList.back() == V) {
567 WorkList.pop_back();
568 // We've finished visiting this node, mark it as such.
569 Explored.insert(V);
570 }
571
572 if (auto *PN = dyn_cast<PHINode>(V)) {
573 // We cannot transform PHIs on unsplittable basic blocks.
574 if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
575 return false;
576 Explored.insert(PN);
577 PHIs.insert(PN);
578 }
579 }
580
581 // Explore the PHI nodes further.
582 for (auto *PN : PHIs)
583 for (Value *Op : PN->incoming_values())
584 if (!Explored.contains(Op))
585 WorkList.push_back(Op);
586 }
587
588 // Make sure that we can do this. Since we can't insert GEPs in a basic
589 // block before a PHI node, we can't easily do this transformation if
590 // we have PHI node users of transformed instructions.
591 for (Value *Val : Explored) {
592 for (Value *Use : Val->uses()) {
593
594 auto *PHI = dyn_cast<PHINode>(Use);
595 auto *Inst = dyn_cast<Instruction>(Val);
596
597 if (Inst == Base || Inst == PHI || !Inst || !PHI ||
598 !Explored.contains(PHI))
599 continue;
600
601 if (PHI->getParent() == Inst->getParent())
602 return false;
603 }
604 }
605 return true;
606 }
607
608 // Sets the appropriate insert point on Builder where we can add
609 // a replacement Instruction for V (if that is possible).
setInsertionPoint(IRBuilder<> & Builder,Value * V,bool Before=true)610 static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
611 bool Before = true) {
612 if (auto *PHI = dyn_cast<PHINode>(V)) {
613 Builder.SetInsertPoint(&*PHI->getParent()->getFirstInsertionPt());
614 return;
615 }
616 if (auto *I = dyn_cast<Instruction>(V)) {
617 if (!Before)
618 I = &*std::next(I->getIterator());
619 Builder.SetInsertPoint(I);
620 return;
621 }
622 if (auto *A = dyn_cast<Argument>(V)) {
623 // Set the insertion point in the entry block.
624 BasicBlock &Entry = A->getParent()->getEntryBlock();
625 Builder.SetInsertPoint(&*Entry.getFirstInsertionPt());
626 return;
627 }
628 // Otherwise, this is a constant and we don't need to set a new
629 // insertion point.
630 assert(isa<Constant>(V) && "Setting insertion point for unknown value!");
631 }
632
633 /// Returns a re-written value of Start as an indexed GEP using Base as a
634 /// pointer.
rewriteGEPAsOffset(Type * ElemTy,Value * Start,Value * Base,const DataLayout & DL,SetVector<Value * > & Explored)635 static Value *rewriteGEPAsOffset(Type *ElemTy, Value *Start, Value *Base,
636 const DataLayout &DL,
637 SetVector<Value *> &Explored) {
638 // Perform all the substitutions. This is a bit tricky because we can
639 // have cycles in our use-def chains.
640 // 1. Create the PHI nodes without any incoming values.
641 // 2. Create all the other values.
642 // 3. Add the edges for the PHI nodes.
643 // 4. Emit GEPs to get the original pointers.
644 // 5. Remove the original instructions.
645 Type *IndexType = IntegerType::get(
646 Base->getContext(), DL.getIndexTypeSizeInBits(Start->getType()));
647
648 DenseMap<Value *, Value *> NewInsts;
649 NewInsts[Base] = ConstantInt::getNullValue(IndexType);
650
651 // Create the new PHI nodes, without adding any incoming values.
652 for (Value *Val : Explored) {
653 if (Val == Base)
654 continue;
655 // Create empty phi nodes. This avoids cyclic dependencies when creating
656 // the remaining instructions.
657 if (auto *PHI = dyn_cast<PHINode>(Val))
658 NewInsts[PHI] = PHINode::Create(IndexType, PHI->getNumIncomingValues(),
659 PHI->getName() + ".idx", PHI);
660 }
661 IRBuilder<> Builder(Base->getContext());
662
663 // Create all the other instructions.
664 for (Value *Val : Explored) {
665
666 if (NewInsts.find(Val) != NewInsts.end())
667 continue;
668
669 if (auto *CI = dyn_cast<CastInst>(Val)) {
670 // Don't get rid of the intermediate variable here; the store can grow
671 // the map which will invalidate the reference to the input value.
672 Value *V = NewInsts[CI->getOperand(0)];
673 NewInsts[CI] = V;
674 continue;
675 }
676 if (auto *GEP = dyn_cast<GEPOperator>(Val)) {
677 Value *Index = NewInsts[GEP->getOperand(1)] ? NewInsts[GEP->getOperand(1)]
678 : GEP->getOperand(1);
679 setInsertionPoint(Builder, GEP);
680 // Indices might need to be sign extended. GEPs will magically do
681 // this, but we need to do it ourselves here.
682 if (Index->getType()->getScalarSizeInBits() !=
683 NewInsts[GEP->getOperand(0)]->getType()->getScalarSizeInBits()) {
684 Index = Builder.CreateSExtOrTrunc(
685 Index, NewInsts[GEP->getOperand(0)]->getType(),
686 GEP->getOperand(0)->getName() + ".sext");
687 }
688
689 auto *Op = NewInsts[GEP->getOperand(0)];
690 if (isa<ConstantInt>(Op) && cast<ConstantInt>(Op)->isZero())
691 NewInsts[GEP] = Index;
692 else
693 NewInsts[GEP] = Builder.CreateNSWAdd(
694 Op, Index, GEP->getOperand(0)->getName() + ".add");
695 continue;
696 }
697 if (isa<PHINode>(Val))
698 continue;
699
700 llvm_unreachable("Unexpected instruction type");
701 }
702
703 // Add the incoming values to the PHI nodes.
704 for (Value *Val : Explored) {
705 if (Val == Base)
706 continue;
707 // All the instructions have been created, we can now add edges to the
708 // phi nodes.
709 if (auto *PHI = dyn_cast<PHINode>(Val)) {
710 PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]);
711 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
712 Value *NewIncoming = PHI->getIncomingValue(I);
713
714 if (NewInsts.find(NewIncoming) != NewInsts.end())
715 NewIncoming = NewInsts[NewIncoming];
716
717 NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I));
718 }
719 }
720 }
721
722 PointerType *PtrTy =
723 ElemTy->getPointerTo(Start->getType()->getPointerAddressSpace());
724 for (Value *Val : Explored) {
725 if (Val == Base)
726 continue;
727
728 // Depending on the type, for external users we have to emit
729 // a GEP or a GEP + ptrtoint.
730 setInsertionPoint(Builder, Val, false);
731
732 // Cast base to the expected type.
733 Value *NewVal = Builder.CreateBitOrPointerCast(
734 Base, PtrTy, Start->getName() + "to.ptr");
735 NewVal = Builder.CreateInBoundsGEP(
736 ElemTy, NewVal, makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr");
737 NewVal = Builder.CreateBitOrPointerCast(
738 NewVal, Val->getType(), Val->getName() + ".conv");
739 Val->replaceAllUsesWith(NewVal);
740 }
741
742 return NewInsts[Start];
743 }
744
745 /// Looks through GEPs, IntToPtrInsts and PtrToIntInsts in order to express
746 /// the input Value as a constant indexed GEP. Returns a pair containing
747 /// the GEPs Pointer and Index.
748 static std::pair<Value *, Value *>
getAsConstantIndexedAddress(Type * ElemTy,Value * V,const DataLayout & DL)749 getAsConstantIndexedAddress(Type *ElemTy, Value *V, const DataLayout &DL) {
750 Type *IndexType = IntegerType::get(V->getContext(),
751 DL.getIndexTypeSizeInBits(V->getType()));
752
753 Constant *Index = ConstantInt::getNullValue(IndexType);
754 while (true) {
755 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
756 // We accept only inbouds GEPs here to exclude the possibility of
757 // overflow.
758 if (!GEP->isInBounds())
759 break;
760 if (GEP->hasAllConstantIndices() && GEP->getNumIndices() == 1 &&
761 GEP->getSourceElementType() == ElemTy) {
762 V = GEP->getOperand(0);
763 Constant *GEPIndex = static_cast<Constant *>(GEP->getOperand(1));
764 Index = ConstantExpr::getAdd(
765 Index, ConstantExpr::getSExtOrTrunc(GEPIndex, IndexType));
766 continue;
767 }
768 break;
769 }
770 if (auto *CI = dyn_cast<IntToPtrInst>(V)) {
771 if (!CI->isNoopCast(DL))
772 break;
773 V = CI->getOperand(0);
774 continue;
775 }
776 if (auto *CI = dyn_cast<PtrToIntInst>(V)) {
777 if (!CI->isNoopCast(DL))
778 break;
779 V = CI->getOperand(0);
780 continue;
781 }
782 break;
783 }
784 return {V, Index};
785 }
786
787 /// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
788 /// We can look through PHIs, GEPs and casts in order to determine a common base
789 /// between GEPLHS and RHS.
transformToIndexedCompare(GEPOperator * GEPLHS,Value * RHS,ICmpInst::Predicate Cond,const DataLayout & DL)790 static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS,
791 ICmpInst::Predicate Cond,
792 const DataLayout &DL) {
793 // FIXME: Support vector of pointers.
794 if (GEPLHS->getType()->isVectorTy())
795 return nullptr;
796
797 if (!GEPLHS->hasAllConstantIndices())
798 return nullptr;
799
800 Type *ElemTy = GEPLHS->getSourceElementType();
801 Value *PtrBase, *Index;
802 std::tie(PtrBase, Index) = getAsConstantIndexedAddress(ElemTy, GEPLHS, DL);
803
804 // The set of nodes that will take part in this transformation.
805 SetVector<Value *> Nodes;
806
807 if (!canRewriteGEPAsOffset(ElemTy, RHS, PtrBase, DL, Nodes))
808 return nullptr;
809
810 // We know we can re-write this as
811 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)
812 // Since we've only looked through inbouds GEPs we know that we
813 // can't have overflow on either side. We can therefore re-write
814 // this as:
815 // OFFSET1 cmp OFFSET2
816 Value *NewRHS = rewriteGEPAsOffset(ElemTy, RHS, PtrBase, DL, Nodes);
817
818 // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written
819 // GEP having PtrBase as the pointer base, and has returned in NewRHS the
820 // offset. Since Index is the offset of LHS to the base pointer, we will now
821 // compare the offsets instead of comparing the pointers.
822 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Index, NewRHS);
823 }
824
825 /// Fold comparisons between a GEP instruction and something else. At this point
826 /// we know that the GEP is on the LHS of the comparison.
foldGEPICmp(GEPOperator * GEPLHS,Value * RHS,ICmpInst::Predicate Cond,Instruction & I)827 Instruction *InstCombinerImpl::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
828 ICmpInst::Predicate Cond,
829 Instruction &I) {
830 // Don't transform signed compares of GEPs into index compares. Even if the
831 // GEP is inbounds, the final add of the base pointer can have signed overflow
832 // and would change the result of the icmp.
833 // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
834 // the maximum signed value for the pointer type.
835 if (ICmpInst::isSigned(Cond))
836 return nullptr;
837
838 // Look through bitcasts and addrspacecasts. We do not however want to remove
839 // 0 GEPs.
840 if (!isa<GetElementPtrInst>(RHS))
841 RHS = RHS->stripPointerCasts();
842
843 Value *PtrBase = GEPLHS->getOperand(0);
844 // FIXME: Support vector pointer GEPs.
845 if (PtrBase == RHS && GEPLHS->isInBounds() &&
846 !GEPLHS->getType()->isVectorTy()) {
847 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
848 // This transformation (ignoring the base and scales) is valid because we
849 // know pointers can't overflow since the gep is inbounds. See if we can
850 // output an optimized form.
851 Value *Offset = evaluateGEPOffsetExpression(GEPLHS, *this, DL);
852
853 // If not, synthesize the offset the hard way.
854 if (!Offset)
855 Offset = EmitGEPOffset(GEPLHS);
856 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
857 Constant::getNullValue(Offset->getType()));
858 }
859
860 if (GEPLHS->isInBounds() && ICmpInst::isEquality(Cond) &&
861 isa<Constant>(RHS) && cast<Constant>(RHS)->isNullValue() &&
862 !NullPointerIsDefined(I.getFunction(),
863 RHS->getType()->getPointerAddressSpace())) {
864 // For most address spaces, an allocation can't be placed at null, but null
865 // itself is treated as a 0 size allocation in the in bounds rules. Thus,
866 // the only valid inbounds address derived from null, is null itself.
867 // Thus, we have four cases to consider:
868 // 1) Base == nullptr, Offset == 0 -> inbounds, null
869 // 2) Base == nullptr, Offset != 0 -> poison as the result is out of bounds
870 // 3) Base != nullptr, Offset == (-base) -> poison (crossing allocations)
871 // 4) Base != nullptr, Offset != (-base) -> nonnull (and possibly poison)
872 //
873 // (Note if we're indexing a type of size 0, that simply collapses into one
874 // of the buckets above.)
875 //
876 // In general, we're allowed to make values less poison (i.e. remove
877 // sources of full UB), so in this case, we just select between the two
878 // non-poison cases (1 and 4 above).
879 //
880 // For vectors, we apply the same reasoning on a per-lane basis.
881 auto *Base = GEPLHS->getPointerOperand();
882 if (GEPLHS->getType()->isVectorTy() && Base->getType()->isPointerTy()) {
883 auto EC = cast<VectorType>(GEPLHS->getType())->getElementCount();
884 Base = Builder.CreateVectorSplat(EC, Base);
885 }
886 return new ICmpInst(Cond, Base,
887 ConstantExpr::getPointerBitCastOrAddrSpaceCast(
888 cast<Constant>(RHS), Base->getType()));
889 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
890 // If the base pointers are different, but the indices are the same, just
891 // compare the base pointer.
892 if (PtrBase != GEPRHS->getOperand(0)) {
893 bool IndicesTheSame =
894 GEPLHS->getNumOperands() == GEPRHS->getNumOperands() &&
895 GEPLHS->getPointerOperand()->getType() ==
896 GEPRHS->getPointerOperand()->getType() &&
897 GEPLHS->getSourceElementType() == GEPRHS->getSourceElementType();
898 if (IndicesTheSame)
899 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
900 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
901 IndicesTheSame = false;
902 break;
903 }
904
905 // If all indices are the same, just compare the base pointers.
906 Type *BaseType = GEPLHS->getOperand(0)->getType();
907 if (IndicesTheSame && CmpInst::makeCmpResultType(BaseType) == I.getType())
908 return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0));
909
910 // If we're comparing GEPs with two base pointers that only differ in type
911 // and both GEPs have only constant indices or just one use, then fold
912 // the compare with the adjusted indices.
913 // FIXME: Support vector of pointers.
914 if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
915 (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
916 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
917 PtrBase->stripPointerCasts() ==
918 GEPRHS->getOperand(0)->stripPointerCasts() &&
919 !GEPLHS->getType()->isVectorTy()) {
920 Value *LOffset = EmitGEPOffset(GEPLHS);
921 Value *ROffset = EmitGEPOffset(GEPRHS);
922
923 // If we looked through an addrspacecast between different sized address
924 // spaces, the LHS and RHS pointers are different sized
925 // integers. Truncate to the smaller one.
926 Type *LHSIndexTy = LOffset->getType();
927 Type *RHSIndexTy = ROffset->getType();
928 if (LHSIndexTy != RHSIndexTy) {
929 if (LHSIndexTy->getPrimitiveSizeInBits().getFixedSize() <
930 RHSIndexTy->getPrimitiveSizeInBits().getFixedSize()) {
931 ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy);
932 } else
933 LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy);
934 }
935
936 Value *Cmp = Builder.CreateICmp(ICmpInst::getSignedPredicate(Cond),
937 LOffset, ROffset);
938 return replaceInstUsesWith(I, Cmp);
939 }
940
941 // Otherwise, the base pointers are different and the indices are
942 // different. Try convert this to an indexed compare by looking through
943 // PHIs/casts.
944 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
945 }
946
947 // If one of the GEPs has all zero indices, recurse.
948 // FIXME: Handle vector of pointers.
949 if (!GEPLHS->getType()->isVectorTy() && GEPLHS->hasAllZeroIndices())
950 return foldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
951 ICmpInst::getSwappedPredicate(Cond), I);
952
953 // If the other GEP has all zero indices, recurse.
954 // FIXME: Handle vector of pointers.
955 if (!GEPRHS->getType()->isVectorTy() && GEPRHS->hasAllZeroIndices())
956 return foldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
957
958 bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds();
959 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands() &&
960 GEPLHS->getSourceElementType() == GEPRHS->getSourceElementType()) {
961 // If the GEPs only differ by one index, compare it.
962 unsigned NumDifferences = 0; // Keep track of # differences.
963 unsigned DiffOperand = 0; // The operand that differs.
964 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
965 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
966 Type *LHSType = GEPLHS->getOperand(i)->getType();
967 Type *RHSType = GEPRHS->getOperand(i)->getType();
968 // FIXME: Better support for vector of pointers.
969 if (LHSType->getPrimitiveSizeInBits() !=
970 RHSType->getPrimitiveSizeInBits() ||
971 (GEPLHS->getType()->isVectorTy() &&
972 (!LHSType->isVectorTy() || !RHSType->isVectorTy()))) {
973 // Irreconcilable differences.
974 NumDifferences = 2;
975 break;
976 }
977
978 if (NumDifferences++) break;
979 DiffOperand = i;
980 }
981
982 if (NumDifferences == 0) // SAME GEP?
983 return replaceInstUsesWith(I, // No comparison is needed here.
984 ConstantInt::get(I.getType(), ICmpInst::isTrueWhenEqual(Cond)));
985
986 else if (NumDifferences == 1 && GEPsInBounds) {
987 Value *LHSV = GEPLHS->getOperand(DiffOperand);
988 Value *RHSV = GEPRHS->getOperand(DiffOperand);
989 // Make sure we do a signed comparison here.
990 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
991 }
992 }
993
994 // Only lower this if the icmp is the only user of the GEP or if we expect
995 // the result to fold to a constant!
996 if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
997 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
998 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
999 Value *L = EmitGEPOffset(GEPLHS);
1000 Value *R = EmitGEPOffset(GEPRHS);
1001 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
1002 }
1003 }
1004
1005 // Try convert this to an indexed compare by looking through PHIs/casts as a
1006 // last resort.
1007 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
1008 }
1009
foldAllocaCmp(ICmpInst & ICI,const AllocaInst * Alloca)1010 Instruction *InstCombinerImpl::foldAllocaCmp(ICmpInst &ICI,
1011 const AllocaInst *Alloca) {
1012 assert(ICI.isEquality() && "Cannot fold non-equality comparison.");
1013
1014 // It would be tempting to fold away comparisons between allocas and any
1015 // pointer not based on that alloca (e.g. an argument). However, even
1016 // though such pointers cannot alias, they can still compare equal.
1017 //
1018 // But LLVM doesn't specify where allocas get their memory, so if the alloca
1019 // doesn't escape we can argue that it's impossible to guess its value, and we
1020 // can therefore act as if any such guesses are wrong.
1021 //
1022 // The code below checks that the alloca doesn't escape, and that it's only
1023 // used in a comparison once (the current instruction). The
1024 // single-comparison-use condition ensures that we're trivially folding all
1025 // comparisons against the alloca consistently, and avoids the risk of
1026 // erroneously folding a comparison of the pointer with itself.
1027
1028 unsigned MaxIter = 32; // Break cycles and bound to constant-time.
1029
1030 SmallVector<const Use *, 32> Worklist;
1031 for (const Use &U : Alloca->uses()) {
1032 if (Worklist.size() >= MaxIter)
1033 return nullptr;
1034 Worklist.push_back(&U);
1035 }
1036
1037 unsigned NumCmps = 0;
1038 while (!Worklist.empty()) {
1039 assert(Worklist.size() <= MaxIter);
1040 const Use *U = Worklist.pop_back_val();
1041 const Value *V = U->getUser();
1042 --MaxIter;
1043
1044 if (isa<BitCastInst>(V) || isa<GetElementPtrInst>(V) || isa<PHINode>(V) ||
1045 isa<SelectInst>(V)) {
1046 // Track the uses.
1047 } else if (isa<LoadInst>(V)) {
1048 // Loading from the pointer doesn't escape it.
1049 continue;
1050 } else if (const auto *SI = dyn_cast<StoreInst>(V)) {
1051 // Storing *to* the pointer is fine, but storing the pointer escapes it.
1052 if (SI->getValueOperand() == U->get())
1053 return nullptr;
1054 continue;
1055 } else if (isa<ICmpInst>(V)) {
1056 if (NumCmps++)
1057 return nullptr; // Found more than one cmp.
1058 continue;
1059 } else if (const auto *Intrin = dyn_cast<IntrinsicInst>(V)) {
1060 switch (Intrin->getIntrinsicID()) {
1061 // These intrinsics don't escape or compare the pointer. Memset is safe
1062 // because we don't allow ptrtoint. Memcpy and memmove are safe because
1063 // we don't allow stores, so src cannot point to V.
1064 case Intrinsic::lifetime_start: case Intrinsic::lifetime_end:
1065 case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset:
1066 continue;
1067 default:
1068 return nullptr;
1069 }
1070 } else {
1071 return nullptr;
1072 }
1073 for (const Use &U : V->uses()) {
1074 if (Worklist.size() >= MaxIter)
1075 return nullptr;
1076 Worklist.push_back(&U);
1077 }
1078 }
1079
1080 auto *Res = ConstantInt::get(ICI.getType(),
1081 !CmpInst::isTrueWhenEqual(ICI.getPredicate()));
1082 return replaceInstUsesWith(ICI, Res);
1083 }
1084
1085 /// Fold "icmp pred (X+C), X".
foldICmpAddOpConst(Value * X,const APInt & C,ICmpInst::Predicate Pred)1086 Instruction *InstCombinerImpl::foldICmpAddOpConst(Value *X, const APInt &C,
1087 ICmpInst::Predicate Pred) {
1088 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
1089 // so the values can never be equal. Similarly for all other "or equals"
1090 // operators.
1091 assert(!!C && "C should not be zero!");
1092
1093 // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255
1094 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253
1095 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0
1096 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
1097 Constant *R = ConstantInt::get(X->getType(),
1098 APInt::getMaxValue(C.getBitWidth()) - C);
1099 return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
1100 }
1101
1102 // (X+1) >u X --> X <u (0-1) --> X != 255
1103 // (X+2) >u X --> X <u (0-2) --> X <u 254
1104 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0
1105 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
1106 return new ICmpInst(ICmpInst::ICMP_ULT, X,
1107 ConstantInt::get(X->getType(), -C));
1108
1109 APInt SMax = APInt::getSignedMaxValue(C.getBitWidth());
1110
1111 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127
1112 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125
1113 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0
1114 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1
1115 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126
1116 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127
1117 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
1118 return new ICmpInst(ICmpInst::ICMP_SGT, X,
1119 ConstantInt::get(X->getType(), SMax - C));
1120
1121 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127
1122 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126
1123 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
1124 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
1125 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126
1126 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
1127
1128 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
1129 return new ICmpInst(ICmpInst::ICMP_SLT, X,
1130 ConstantInt::get(X->getType(), SMax - (C - 1)));
1131 }
1132
1133 /// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" ->
1134 /// (icmp eq/ne A, Log2(AP2/AP1)) ->
1135 /// (icmp eq/ne A, Log2(AP2) - Log2(AP1)).
foldICmpShrConstConst(ICmpInst & I,Value * A,const APInt & AP1,const APInt & AP2)1136 Instruction *InstCombinerImpl::foldICmpShrConstConst(ICmpInst &I, Value *A,
1137 const APInt &AP1,
1138 const APInt &AP2) {
1139 assert(I.isEquality() && "Cannot fold icmp gt/lt");
1140
1141 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1142 if (I.getPredicate() == I.ICMP_NE)
1143 Pred = CmpInst::getInversePredicate(Pred);
1144 return new ICmpInst(Pred, LHS, RHS);
1145 };
1146
1147 // Don't bother doing any work for cases which InstSimplify handles.
1148 if (AP2.isZero())
1149 return nullptr;
1150
1151 bool IsAShr = isa<AShrOperator>(I.getOperand(0));
1152 if (IsAShr) {
1153 if (AP2.isAllOnes())
1154 return nullptr;
1155 if (AP2.isNegative() != AP1.isNegative())
1156 return nullptr;
1157 if (AP2.sgt(AP1))
1158 return nullptr;
1159 }
1160
1161 if (!AP1)
1162 // 'A' must be large enough to shift out the highest set bit.
1163 return getICmp(I.ICMP_UGT, A,
1164 ConstantInt::get(A->getType(), AP2.logBase2()));
1165
1166 if (AP1 == AP2)
1167 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1168
1169 int Shift;
1170 if (IsAShr && AP1.isNegative())
1171 Shift = AP1.countLeadingOnes() - AP2.countLeadingOnes();
1172 else
1173 Shift = AP1.countLeadingZeros() - AP2.countLeadingZeros();
1174
1175 if (Shift > 0) {
1176 if (IsAShr && AP1 == AP2.ashr(Shift)) {
1177 // There are multiple solutions if we are comparing against -1 and the LHS
1178 // of the ashr is not a power of two.
1179 if (AP1.isAllOnes() && !AP2.isPowerOf2())
1180 return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift));
1181 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1182 } else if (AP1 == AP2.lshr(Shift)) {
1183 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1184 }
1185 }
1186
1187 // Shifting const2 will never be equal to const1.
1188 // FIXME: This should always be handled by InstSimplify?
1189 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1190 return replaceInstUsesWith(I, TorF);
1191 }
1192
1193 /// Handle "(icmp eq/ne (shl AP2, A), AP1)" ->
1194 /// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
foldICmpShlConstConst(ICmpInst & I,Value * A,const APInt & AP1,const APInt & AP2)1195 Instruction *InstCombinerImpl::foldICmpShlConstConst(ICmpInst &I, Value *A,
1196 const APInt &AP1,
1197 const APInt &AP2) {
1198 assert(I.isEquality() && "Cannot fold icmp gt/lt");
1199
1200 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1201 if (I.getPredicate() == I.ICMP_NE)
1202 Pred = CmpInst::getInversePredicate(Pred);
1203 return new ICmpInst(Pred, LHS, RHS);
1204 };
1205
1206 // Don't bother doing any work for cases which InstSimplify handles.
1207 if (AP2.isZero())
1208 return nullptr;
1209
1210 unsigned AP2TrailingZeros = AP2.countTrailingZeros();
1211
1212 if (!AP1 && AP2TrailingZeros != 0)
1213 return getICmp(
1214 I.ICMP_UGE, A,
1215 ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros));
1216
1217 if (AP1 == AP2)
1218 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1219
1220 // Get the distance between the lowest bits that are set.
1221 int Shift = AP1.countTrailingZeros() - AP2TrailingZeros;
1222
1223 if (Shift > 0 && AP2.shl(Shift) == AP1)
1224 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1225
1226 // Shifting const2 will never be equal to const1.
1227 // FIXME: This should always be handled by InstSimplify?
1228 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1229 return replaceInstUsesWith(I, TorF);
1230 }
1231
1232 /// The caller has matched a pattern of the form:
1233 /// I = icmp ugt (add (add A, B), CI2), CI1
1234 /// If this is of the form:
1235 /// sum = a + b
1236 /// if (sum+128 >u 255)
1237 /// Then replace it with llvm.sadd.with.overflow.i8.
1238 ///
processUGT_ADDCST_ADD(ICmpInst & I,Value * A,Value * B,ConstantInt * CI2,ConstantInt * CI1,InstCombinerImpl & IC)1239 static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
1240 ConstantInt *CI2, ConstantInt *CI1,
1241 InstCombinerImpl &IC) {
1242 // The transformation we're trying to do here is to transform this into an
1243 // llvm.sadd.with.overflow. To do this, we have to replace the original add
1244 // with a narrower add, and discard the add-with-constant that is part of the
1245 // range check (if we can't eliminate it, this isn't profitable).
1246
1247 // In order to eliminate the add-with-constant, the compare can be its only
1248 // use.
1249 Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
1250 if (!AddWithCst->hasOneUse())
1251 return nullptr;
1252
1253 // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
1254 if (!CI2->getValue().isPowerOf2())
1255 return nullptr;
1256 unsigned NewWidth = CI2->getValue().countTrailingZeros();
1257 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1258 return nullptr;
1259
1260 // The width of the new add formed is 1 more than the bias.
1261 ++NewWidth;
1262
1263 // Check to see that CI1 is an all-ones value with NewWidth bits.
1264 if (CI1->getBitWidth() == NewWidth ||
1265 CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
1266 return nullptr;
1267
1268 // This is only really a signed overflow check if the inputs have been
1269 // sign-extended; check for that condition. For example, if CI2 is 2^31 and
1270 // the operands of the add are 64 bits wide, we need at least 33 sign bits.
1271 if (IC.ComputeMaxSignificantBits(A, 0, &I) > NewWidth ||
1272 IC.ComputeMaxSignificantBits(B, 0, &I) > NewWidth)
1273 return nullptr;
1274
1275 // In order to replace the original add with a narrower
1276 // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
1277 // and truncates that discard the high bits of the add. Verify that this is
1278 // the case.
1279 Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
1280 for (User *U : OrigAdd->users()) {
1281 if (U == AddWithCst)
1282 continue;
1283
1284 // Only accept truncates for now. We would really like a nice recursive
1285 // predicate like SimplifyDemandedBits, but which goes downwards the use-def
1286 // chain to see which bits of a value are actually demanded. If the
1287 // original add had another add which was then immediately truncated, we
1288 // could still do the transformation.
1289 TruncInst *TI = dyn_cast<TruncInst>(U);
1290 if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
1291 return nullptr;
1292 }
1293
1294 // If the pattern matches, truncate the inputs to the narrower type and
1295 // use the sadd_with_overflow intrinsic to efficiently compute both the
1296 // result and the overflow bit.
1297 Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
1298 Function *F = Intrinsic::getDeclaration(
1299 I.getModule(), Intrinsic::sadd_with_overflow, NewType);
1300
1301 InstCombiner::BuilderTy &Builder = IC.Builder;
1302
1303 // Put the new code above the original add, in case there are any uses of the
1304 // add between the add and the compare.
1305 Builder.SetInsertPoint(OrigAdd);
1306
1307 Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc");
1308 Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc");
1309 CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd");
1310 Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result");
1311 Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType());
1312
1313 // The inner add was the result of the narrow add, zero extended to the
1314 // wider type. Replace it with the result computed by the intrinsic.
1315 IC.replaceInstUsesWith(*OrigAdd, ZExt);
1316 IC.eraseInstFromFunction(*OrigAdd);
1317
1318 // The original icmp gets replaced with the overflow value.
1319 return ExtractValueInst::Create(Call, 1, "sadd.overflow");
1320 }
1321
1322 /// If we have:
1323 /// icmp eq/ne (urem/srem %x, %y), 0
1324 /// iff %y is a power-of-two, we can replace this with a bit test:
1325 /// icmp eq/ne (and %x, (add %y, -1)), 0
foldIRemByPowerOfTwoToBitTest(ICmpInst & I)1326 Instruction *InstCombinerImpl::foldIRemByPowerOfTwoToBitTest(ICmpInst &I) {
1327 // This fold is only valid for equality predicates.
1328 if (!I.isEquality())
1329 return nullptr;
1330 ICmpInst::Predicate Pred;
1331 Value *X, *Y, *Zero;
1332 if (!match(&I, m_ICmp(Pred, m_OneUse(m_IRem(m_Value(X), m_Value(Y))),
1333 m_CombineAnd(m_Zero(), m_Value(Zero)))))
1334 return nullptr;
1335 if (!isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, 0, &I))
1336 return nullptr;
1337 // This may increase instruction count, we don't enforce that Y is a constant.
1338 Value *Mask = Builder.CreateAdd(Y, Constant::getAllOnesValue(Y->getType()));
1339 Value *Masked = Builder.CreateAnd(X, Mask);
1340 return ICmpInst::Create(Instruction::ICmp, Pred, Masked, Zero);
1341 }
1342
1343 /// Fold equality-comparison between zero and any (maybe truncated) right-shift
1344 /// by one-less-than-bitwidth into a sign test on the original value.
foldSignBitTest(ICmpInst & I)1345 Instruction *InstCombinerImpl::foldSignBitTest(ICmpInst &I) {
1346 Instruction *Val;
1347 ICmpInst::Predicate Pred;
1348 if (!I.isEquality() || !match(&I, m_ICmp(Pred, m_Instruction(Val), m_Zero())))
1349 return nullptr;
1350
1351 Value *X;
1352 Type *XTy;
1353
1354 Constant *C;
1355 if (match(Val, m_TruncOrSelf(m_Shr(m_Value(X), m_Constant(C))))) {
1356 XTy = X->getType();
1357 unsigned XBitWidth = XTy->getScalarSizeInBits();
1358 if (!match(C, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
1359 APInt(XBitWidth, XBitWidth - 1))))
1360 return nullptr;
1361 } else if (isa<BinaryOperator>(Val) &&
1362 (X = reassociateShiftAmtsOfTwoSameDirectionShifts(
1363 cast<BinaryOperator>(Val), SQ.getWithInstruction(Val),
1364 /*AnalyzeForSignBitExtraction=*/true))) {
1365 XTy = X->getType();
1366 } else
1367 return nullptr;
1368
1369 return ICmpInst::Create(Instruction::ICmp,
1370 Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_SGE
1371 : ICmpInst::ICMP_SLT,
1372 X, ConstantInt::getNullValue(XTy));
1373 }
1374
1375 // Handle icmp pred X, 0
foldICmpWithZero(ICmpInst & Cmp)1376 Instruction *InstCombinerImpl::foldICmpWithZero(ICmpInst &Cmp) {
1377 CmpInst::Predicate Pred = Cmp.getPredicate();
1378 if (!match(Cmp.getOperand(1), m_Zero()))
1379 return nullptr;
1380
1381 // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0)
1382 if (Pred == ICmpInst::ICMP_SGT) {
1383 Value *A, *B;
1384 if (match(Cmp.getOperand(0), m_SMin(m_Value(A), m_Value(B)))) {
1385 if (isKnownPositive(A, DL, 0, &AC, &Cmp, &DT))
1386 return new ICmpInst(Pred, B, Cmp.getOperand(1));
1387 if (isKnownPositive(B, DL, 0, &AC, &Cmp, &DT))
1388 return new ICmpInst(Pred, A, Cmp.getOperand(1));
1389 }
1390 }
1391
1392 if (Instruction *New = foldIRemByPowerOfTwoToBitTest(Cmp))
1393 return New;
1394
1395 // Given:
1396 // icmp eq/ne (urem %x, %y), 0
1397 // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem':
1398 // icmp eq/ne %x, 0
1399 Value *X, *Y;
1400 if (match(Cmp.getOperand(0), m_URem(m_Value(X), m_Value(Y))) &&
1401 ICmpInst::isEquality(Pred)) {
1402 KnownBits XKnown = computeKnownBits(X, 0, &Cmp);
1403 KnownBits YKnown = computeKnownBits(Y, 0, &Cmp);
1404 if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2)
1405 return new ICmpInst(Pred, X, Cmp.getOperand(1));
1406 }
1407
1408 return nullptr;
1409 }
1410
1411 /// Fold icmp Pred X, C.
1412 /// TODO: This code structure does not make sense. The saturating add fold
1413 /// should be moved to some other helper and extended as noted below (it is also
1414 /// possible that code has been made unnecessary - do we canonicalize IR to
1415 /// overflow/saturating intrinsics or not?).
foldICmpWithConstant(ICmpInst & Cmp)1416 Instruction *InstCombinerImpl::foldICmpWithConstant(ICmpInst &Cmp) {
1417 // Match the following pattern, which is a common idiom when writing
1418 // overflow-safe integer arithmetic functions. The source performs an addition
1419 // in wider type and explicitly checks for overflow using comparisons against
1420 // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic.
1421 //
1422 // TODO: This could probably be generalized to handle other overflow-safe
1423 // operations if we worked out the formulas to compute the appropriate magic
1424 // constants.
1425 //
1426 // sum = a + b
1427 // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8
1428 CmpInst::Predicate Pred = Cmp.getPredicate();
1429 Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1);
1430 Value *A, *B;
1431 ConstantInt *CI, *CI2; // I = icmp ugt (add (add A, B), CI2), CI
1432 if (Pred == ICmpInst::ICMP_UGT && match(Op1, m_ConstantInt(CI)) &&
1433 match(Op0, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
1434 if (Instruction *Res = processUGT_ADDCST_ADD(Cmp, A, B, CI2, CI, *this))
1435 return Res;
1436
1437 // icmp(phi(C1, C2, ...), C) -> phi(icmp(C1, C), icmp(C2, C), ...).
1438 Constant *C = dyn_cast<Constant>(Op1);
1439 if (!C)
1440 return nullptr;
1441
1442 if (auto *Phi = dyn_cast<PHINode>(Op0))
1443 if (all_of(Phi->operands(), [](Value *V) { return isa<Constant>(V); })) {
1444 Type *Ty = Cmp.getType();
1445 Builder.SetInsertPoint(Phi);
1446 PHINode *NewPhi =
1447 Builder.CreatePHI(Ty, Phi->getNumOperands());
1448 for (BasicBlock *Predecessor : predecessors(Phi->getParent())) {
1449 auto *Input =
1450 cast<Constant>(Phi->getIncomingValueForBlock(Predecessor));
1451 auto *BoolInput = ConstantExpr::getCompare(Pred, Input, C);
1452 NewPhi->addIncoming(BoolInput, Predecessor);
1453 }
1454 NewPhi->takeName(&Cmp);
1455 return replaceInstUsesWith(Cmp, NewPhi);
1456 }
1457
1458 return nullptr;
1459 }
1460
1461 /// Canonicalize icmp instructions based on dominating conditions.
foldICmpWithDominatingICmp(ICmpInst & Cmp)1462 Instruction *InstCombinerImpl::foldICmpWithDominatingICmp(ICmpInst &Cmp) {
1463 // This is a cheap/incomplete check for dominance - just match a single
1464 // predecessor with a conditional branch.
1465 BasicBlock *CmpBB = Cmp.getParent();
1466 BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1467 if (!DomBB)
1468 return nullptr;
1469
1470 Value *DomCond;
1471 BasicBlock *TrueBB, *FalseBB;
1472 if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1473 return nullptr;
1474
1475 assert((TrueBB == CmpBB || FalseBB == CmpBB) &&
1476 "Predecessor block does not point to successor?");
1477
1478 // The branch should get simplified. Don't bother simplifying this condition.
1479 if (TrueBB == FalseBB)
1480 return nullptr;
1481
1482 // Try to simplify this compare to T/F based on the dominating condition.
1483 Optional<bool> Imp = isImpliedCondition(DomCond, &Cmp, DL, TrueBB == CmpBB);
1484 if (Imp)
1485 return replaceInstUsesWith(Cmp, ConstantInt::get(Cmp.getType(), *Imp));
1486
1487 CmpInst::Predicate Pred = Cmp.getPredicate();
1488 Value *X = Cmp.getOperand(0), *Y = Cmp.getOperand(1);
1489 ICmpInst::Predicate DomPred;
1490 const APInt *C, *DomC;
1491 if (match(DomCond, m_ICmp(DomPred, m_Specific(X), m_APInt(DomC))) &&
1492 match(Y, m_APInt(C))) {
1493 // We have 2 compares of a variable with constants. Calculate the constant
1494 // ranges of those compares to see if we can transform the 2nd compare:
1495 // DomBB:
1496 // DomCond = icmp DomPred X, DomC
1497 // br DomCond, CmpBB, FalseBB
1498 // CmpBB:
1499 // Cmp = icmp Pred X, C
1500 ConstantRange CR = ConstantRange::makeExactICmpRegion(Pred, *C);
1501 ConstantRange DominatingCR =
1502 (CmpBB == TrueBB) ? ConstantRange::makeExactICmpRegion(DomPred, *DomC)
1503 : ConstantRange::makeExactICmpRegion(
1504 CmpInst::getInversePredicate(DomPred), *DomC);
1505 ConstantRange Intersection = DominatingCR.intersectWith(CR);
1506 ConstantRange Difference = DominatingCR.difference(CR);
1507 if (Intersection.isEmptySet())
1508 return replaceInstUsesWith(Cmp, Builder.getFalse());
1509 if (Difference.isEmptySet())
1510 return replaceInstUsesWith(Cmp, Builder.getTrue());
1511
1512 // Canonicalizing a sign bit comparison that gets used in a branch,
1513 // pessimizes codegen by generating branch on zero instruction instead
1514 // of a test and branch. So we avoid canonicalizing in such situations
1515 // because test and branch instruction has better branch displacement
1516 // than compare and branch instruction.
1517 bool UnusedBit;
1518 bool IsSignBit = isSignBitCheck(Pred, *C, UnusedBit);
1519 if (Cmp.isEquality() || (IsSignBit && hasBranchUse(Cmp)))
1520 return nullptr;
1521
1522 // Avoid an infinite loop with min/max canonicalization.
1523 // TODO: This will be unnecessary if we canonicalize to min/max intrinsics.
1524 if (Cmp.hasOneUse() &&
1525 match(Cmp.user_back(), m_MaxOrMin(m_Value(), m_Value())))
1526 return nullptr;
1527
1528 if (const APInt *EqC = Intersection.getSingleElement())
1529 return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*EqC));
1530 if (const APInt *NeC = Difference.getSingleElement())
1531 return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*NeC));
1532 }
1533
1534 return nullptr;
1535 }
1536
1537 /// Fold icmp (trunc X), C.
foldICmpTruncConstant(ICmpInst & Cmp,TruncInst * Trunc,const APInt & C)1538 Instruction *InstCombinerImpl::foldICmpTruncConstant(ICmpInst &Cmp,
1539 TruncInst *Trunc,
1540 const APInt &C) {
1541 ICmpInst::Predicate Pred = Cmp.getPredicate();
1542 Value *X = Trunc->getOperand(0);
1543 if (C.isOne() && C.getBitWidth() > 1) {
1544 // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1
1545 Value *V = nullptr;
1546 if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V))))
1547 return new ICmpInst(ICmpInst::ICMP_SLT, V,
1548 ConstantInt::get(V->getType(), 1));
1549 }
1550
1551 unsigned DstBits = Trunc->getType()->getScalarSizeInBits(),
1552 SrcBits = X->getType()->getScalarSizeInBits();
1553 if (Cmp.isEquality() && Trunc->hasOneUse()) {
1554 // Canonicalize to a mask and wider compare if the wide type is suitable:
1555 // (trunc X to i8) == C --> (X & 0xff) == (zext C)
1556 if (!X->getType()->isVectorTy() && shouldChangeType(DstBits, SrcBits)) {
1557 Constant *Mask = ConstantInt::get(X->getType(),
1558 APInt::getLowBitsSet(SrcBits, DstBits));
1559 Value *And = Builder.CreateAnd(X, Mask);
1560 Constant *WideC = ConstantInt::get(X->getType(), C.zext(SrcBits));
1561 return new ICmpInst(Pred, And, WideC);
1562 }
1563
1564 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
1565 // of the high bits truncated out of x are known.
1566 KnownBits Known = computeKnownBits(X, 0, &Cmp);
1567
1568 // If all the high bits are known, we can do this xform.
1569 if ((Known.Zero | Known.One).countLeadingOnes() >= SrcBits - DstBits) {
1570 // Pull in the high bits from known-ones set.
1571 APInt NewRHS = C.zext(SrcBits);
1572 NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits);
1573 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), NewRHS));
1574 }
1575 }
1576
1577 // Look through truncated right-shift of the sign-bit for a sign-bit check:
1578 // trunc iN (ShOp >> ShAmtC) to i[N - ShAmtC] < 0 --> ShOp < 0
1579 // trunc iN (ShOp >> ShAmtC) to i[N - ShAmtC] > -1 --> ShOp > -1
1580 Value *ShOp;
1581 const APInt *ShAmtC;
1582 bool TrueIfSigned;
1583 if (isSignBitCheck(Pred, C, TrueIfSigned) &&
1584 match(X, m_Shr(m_Value(ShOp), m_APInt(ShAmtC))) &&
1585 DstBits == SrcBits - ShAmtC->getZExtValue()) {
1586 return TrueIfSigned
1587 ? new ICmpInst(ICmpInst::ICMP_SLT, ShOp,
1588 ConstantInt::getNullValue(X->getType()))
1589 : new ICmpInst(ICmpInst::ICMP_SGT, ShOp,
1590 ConstantInt::getAllOnesValue(X->getType()));
1591 }
1592
1593 return nullptr;
1594 }
1595
1596 /// Fold icmp (xor X, Y), C.
foldICmpXorConstant(ICmpInst & Cmp,BinaryOperator * Xor,const APInt & C)1597 Instruction *InstCombinerImpl::foldICmpXorConstant(ICmpInst &Cmp,
1598 BinaryOperator *Xor,
1599 const APInt &C) {
1600 Value *X = Xor->getOperand(0);
1601 Value *Y = Xor->getOperand(1);
1602 const APInt *XorC;
1603 if (!match(Y, m_APInt(XorC)))
1604 return nullptr;
1605
1606 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
1607 // fold the xor.
1608 ICmpInst::Predicate Pred = Cmp.getPredicate();
1609 bool TrueIfSigned = false;
1610 if (isSignBitCheck(Cmp.getPredicate(), C, TrueIfSigned)) {
1611
1612 // If the sign bit of the XorCst is not set, there is no change to
1613 // the operation, just stop using the Xor.
1614 if (!XorC->isNegative())
1615 return replaceOperand(Cmp, 0, X);
1616
1617 // Emit the opposite comparison.
1618 if (TrueIfSigned)
1619 return new ICmpInst(ICmpInst::ICMP_SGT, X,
1620 ConstantInt::getAllOnesValue(X->getType()));
1621 else
1622 return new ICmpInst(ICmpInst::ICMP_SLT, X,
1623 ConstantInt::getNullValue(X->getType()));
1624 }
1625
1626 if (Xor->hasOneUse()) {
1627 // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask))
1628 if (!Cmp.isEquality() && XorC->isSignMask()) {
1629 Pred = Cmp.getFlippedSignednessPredicate();
1630 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1631 }
1632
1633 // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask))
1634 if (!Cmp.isEquality() && XorC->isMaxSignedValue()) {
1635 Pred = Cmp.getFlippedSignednessPredicate();
1636 Pred = Cmp.getSwappedPredicate(Pred);
1637 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1638 }
1639 }
1640
1641 // Mask constant magic can eliminate an 'xor' with unsigned compares.
1642 if (Pred == ICmpInst::ICMP_UGT) {
1643 // (xor X, ~C) >u C --> X <u ~C (when C+1 is a power of 2)
1644 if (*XorC == ~C && (C + 1).isPowerOf2())
1645 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
1646 // (xor X, C) >u C --> X >u C (when C+1 is a power of 2)
1647 if (*XorC == C && (C + 1).isPowerOf2())
1648 return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
1649 }
1650 if (Pred == ICmpInst::ICMP_ULT) {
1651 // (xor X, -C) <u C --> X >u ~C (when C is a power of 2)
1652 if (*XorC == -C && C.isPowerOf2())
1653 return new ICmpInst(ICmpInst::ICMP_UGT, X,
1654 ConstantInt::get(X->getType(), ~C));
1655 // (xor X, C) <u C --> X >u ~C (when -C is a power of 2)
1656 if (*XorC == C && (-C).isPowerOf2())
1657 return new ICmpInst(ICmpInst::ICMP_UGT, X,
1658 ConstantInt::get(X->getType(), ~C));
1659 }
1660 return nullptr;
1661 }
1662
1663 /// Fold icmp (and (sh X, Y), C2), C1.
foldICmpAndShift(ICmpInst & Cmp,BinaryOperator * And,const APInt & C1,const APInt & C2)1664 Instruction *InstCombinerImpl::foldICmpAndShift(ICmpInst &Cmp,
1665 BinaryOperator *And,
1666 const APInt &C1,
1667 const APInt &C2) {
1668 BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0));
1669 if (!Shift || !Shift->isShift())
1670 return nullptr;
1671
1672 // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could
1673 // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in
1674 // code produced by the clang front-end, for bitfield access.
1675 // This seemingly simple opportunity to fold away a shift turns out to be
1676 // rather complicated. See PR17827 for details.
1677 unsigned ShiftOpcode = Shift->getOpcode();
1678 bool IsShl = ShiftOpcode == Instruction::Shl;
1679 const APInt *C3;
1680 if (match(Shift->getOperand(1), m_APInt(C3))) {
1681 APInt NewAndCst, NewCmpCst;
1682 bool AnyCmpCstBitsShiftedOut;
1683 if (ShiftOpcode == Instruction::Shl) {
1684 // For a left shift, we can fold if the comparison is not signed. We can
1685 // also fold a signed comparison if the mask value and comparison value
1686 // are not negative. These constraints may not be obvious, but we can
1687 // prove that they are correct using an SMT solver.
1688 if (Cmp.isSigned() && (C2.isNegative() || C1.isNegative()))
1689 return nullptr;
1690
1691 NewCmpCst = C1.lshr(*C3);
1692 NewAndCst = C2.lshr(*C3);
1693 AnyCmpCstBitsShiftedOut = NewCmpCst.shl(*C3) != C1;
1694 } else if (ShiftOpcode == Instruction::LShr) {
1695 // For a logical right shift, we can fold if the comparison is not signed.
1696 // We can also fold a signed comparison if the shifted mask value and the
1697 // shifted comparison value are not negative. These constraints may not be
1698 // obvious, but we can prove that they are correct using an SMT solver.
1699 NewCmpCst = C1.shl(*C3);
1700 NewAndCst = C2.shl(*C3);
1701 AnyCmpCstBitsShiftedOut = NewCmpCst.lshr(*C3) != C1;
1702 if (Cmp.isSigned() && (NewAndCst.isNegative() || NewCmpCst.isNegative()))
1703 return nullptr;
1704 } else {
1705 // For an arithmetic shift, check that both constants don't use (in a
1706 // signed sense) the top bits being shifted out.
1707 assert(ShiftOpcode == Instruction::AShr && "Unknown shift opcode");
1708 NewCmpCst = C1.shl(*C3);
1709 NewAndCst = C2.shl(*C3);
1710 AnyCmpCstBitsShiftedOut = NewCmpCst.ashr(*C3) != C1;
1711 if (NewAndCst.ashr(*C3) != C2)
1712 return nullptr;
1713 }
1714
1715 if (AnyCmpCstBitsShiftedOut) {
1716 // If we shifted bits out, the fold is not going to work out. As a
1717 // special case, check to see if this means that the result is always
1718 // true or false now.
1719 if (Cmp.getPredicate() == ICmpInst::ICMP_EQ)
1720 return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType()));
1721 if (Cmp.getPredicate() == ICmpInst::ICMP_NE)
1722 return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType()));
1723 } else {
1724 Value *NewAnd = Builder.CreateAnd(
1725 Shift->getOperand(0), ConstantInt::get(And->getType(), NewAndCst));
1726 return new ICmpInst(Cmp.getPredicate(),
1727 NewAnd, ConstantInt::get(And->getType(), NewCmpCst));
1728 }
1729 }
1730
1731 // Turn ((X >> Y) & C2) == 0 into (X & (C2 << Y)) == 0. The latter is
1732 // preferable because it allows the C2 << Y expression to be hoisted out of a
1733 // loop if Y is invariant and X is not.
1734 if (Shift->hasOneUse() && C1.isZero() && Cmp.isEquality() &&
1735 !Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) {
1736 // Compute C2 << Y.
1737 Value *NewShift =
1738 IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1))
1739 : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1));
1740
1741 // Compute X & (C2 << Y).
1742 Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift);
1743 return replaceOperand(Cmp, 0, NewAnd);
1744 }
1745
1746 return nullptr;
1747 }
1748
1749 /// Fold icmp (and X, C2), C1.
foldICmpAndConstConst(ICmpInst & Cmp,BinaryOperator * And,const APInt & C1)1750 Instruction *InstCombinerImpl::foldICmpAndConstConst(ICmpInst &Cmp,
1751 BinaryOperator *And,
1752 const APInt &C1) {
1753 bool isICMP_NE = Cmp.getPredicate() == ICmpInst::ICMP_NE;
1754
1755 // For vectors: icmp ne (and X, 1), 0 --> trunc X to N x i1
1756 // TODO: We canonicalize to the longer form for scalars because we have
1757 // better analysis/folds for icmp, and codegen may be better with icmp.
1758 if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.isZero() &&
1759 match(And->getOperand(1), m_One()))
1760 return new TruncInst(And->getOperand(0), Cmp.getType());
1761
1762 const APInt *C2;
1763 Value *X;
1764 if (!match(And, m_And(m_Value(X), m_APInt(C2))))
1765 return nullptr;
1766
1767 // Don't perform the following transforms if the AND has multiple uses
1768 if (!And->hasOneUse())
1769 return nullptr;
1770
1771 if (Cmp.isEquality() && C1.isZero()) {
1772 // Restrict this fold to single-use 'and' (PR10267).
1773 // Replace (and X, (1 << size(X)-1) != 0) with X s< 0
1774 if (C2->isSignMask()) {
1775 Constant *Zero = Constant::getNullValue(X->getType());
1776 auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
1777 return new ICmpInst(NewPred, X, Zero);
1778 }
1779
1780 APInt NewC2 = *C2;
1781 KnownBits Know = computeKnownBits(And->getOperand(0), 0, And);
1782 // Set high zeros of C2 to allow matching negated power-of-2.
1783 NewC2 = *C2 + APInt::getHighBitsSet(C2->getBitWidth(),
1784 Know.countMinLeadingZeros());
1785
1786 // Restrict this fold only for single-use 'and' (PR10267).
1787 // ((%x & C) == 0) --> %x u< (-C) iff (-C) is power of two.
1788 if (NewC2.isNegatedPowerOf2()) {
1789 Constant *NegBOC = ConstantInt::get(And->getType(), -NewC2);
1790 auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
1791 return new ICmpInst(NewPred, X, NegBOC);
1792 }
1793 }
1794
1795 // If the LHS is an 'and' of a truncate and we can widen the and/compare to
1796 // the input width without changing the value produced, eliminate the cast:
1797 //
1798 // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1'
1799 //
1800 // We can do this transformation if the constants do not have their sign bits
1801 // set or if it is an equality comparison. Extending a relational comparison
1802 // when we're checking the sign bit would not work.
1803 Value *W;
1804 if (match(And->getOperand(0), m_OneUse(m_Trunc(m_Value(W)))) &&
1805 (Cmp.isEquality() || (!C1.isNegative() && !C2->isNegative()))) {
1806 // TODO: Is this a good transform for vectors? Wider types may reduce
1807 // throughput. Should this transform be limited (even for scalars) by using
1808 // shouldChangeType()?
1809 if (!Cmp.getType()->isVectorTy()) {
1810 Type *WideType = W->getType();
1811 unsigned WideScalarBits = WideType->getScalarSizeInBits();
1812 Constant *ZextC1 = ConstantInt::get(WideType, C1.zext(WideScalarBits));
1813 Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits));
1814 Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName());
1815 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1816 }
1817 }
1818
1819 if (Instruction *I = foldICmpAndShift(Cmp, And, C1, *C2))
1820 return I;
1821
1822 // (icmp pred (and (or (lshr A, B), A), 1), 0) -->
1823 // (icmp pred (and A, (or (shl 1, B), 1), 0))
1824 //
1825 // iff pred isn't signed
1826 if (!Cmp.isSigned() && C1.isZero() && And->getOperand(0)->hasOneUse() &&
1827 match(And->getOperand(1), m_One())) {
1828 Constant *One = cast<Constant>(And->getOperand(1));
1829 Value *Or = And->getOperand(0);
1830 Value *A, *B, *LShr;
1831 if (match(Or, m_Or(m_Value(LShr), m_Value(A))) &&
1832 match(LShr, m_LShr(m_Specific(A), m_Value(B)))) {
1833 unsigned UsesRemoved = 0;
1834 if (And->hasOneUse())
1835 ++UsesRemoved;
1836 if (Or->hasOneUse())
1837 ++UsesRemoved;
1838 if (LShr->hasOneUse())
1839 ++UsesRemoved;
1840
1841 // Compute A & ((1 << B) | 1)
1842 Value *NewOr = nullptr;
1843 if (auto *C = dyn_cast<Constant>(B)) {
1844 if (UsesRemoved >= 1)
1845 NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One);
1846 } else {
1847 if (UsesRemoved >= 3)
1848 NewOr = Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(),
1849 /*HasNUW=*/true),
1850 One, Or->getName());
1851 }
1852 if (NewOr) {
1853 Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName());
1854 return replaceOperand(Cmp, 0, NewAnd);
1855 }
1856 }
1857 }
1858
1859 return nullptr;
1860 }
1861
1862 /// Fold icmp (and X, Y), C.
foldICmpAndConstant(ICmpInst & Cmp,BinaryOperator * And,const APInt & C)1863 Instruction *InstCombinerImpl::foldICmpAndConstant(ICmpInst &Cmp,
1864 BinaryOperator *And,
1865 const APInt &C) {
1866 if (Instruction *I = foldICmpAndConstConst(Cmp, And, C))
1867 return I;
1868
1869 const ICmpInst::Predicate Pred = Cmp.getPredicate();
1870 bool TrueIfNeg;
1871 if (isSignBitCheck(Pred, C, TrueIfNeg)) {
1872 // ((X - 1) & ~X) < 0 --> X == 0
1873 // ((X - 1) & ~X) >= 0 --> X != 0
1874 Value *X;
1875 if (match(And->getOperand(0), m_Add(m_Value(X), m_AllOnes())) &&
1876 match(And->getOperand(1), m_Not(m_Specific(X)))) {
1877 auto NewPred = TrueIfNeg ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE;
1878 return new ICmpInst(NewPred, X, ConstantInt::getNullValue(X->getType()));
1879 }
1880 }
1881
1882 // TODO: These all require that Y is constant too, so refactor with the above.
1883
1884 // Try to optimize things like "A[i] & 42 == 0" to index computations.
1885 Value *X = And->getOperand(0);
1886 Value *Y = And->getOperand(1);
1887 if (auto *C2 = dyn_cast<ConstantInt>(Y))
1888 if (auto *LI = dyn_cast<LoadInst>(X))
1889 if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
1890 if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
1891 if (Instruction *Res =
1892 foldCmpLoadFromIndexedGlobal(LI, GEP, GV, Cmp, C2))
1893 return Res;
1894
1895 if (!Cmp.isEquality())
1896 return nullptr;
1897
1898 // X & -C == -C -> X > u ~C
1899 // X & -C != -C -> X <= u ~C
1900 // iff C is a power of 2
1901 if (Cmp.getOperand(1) == Y && C.isNegatedPowerOf2()) {
1902 auto NewPred =
1903 Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGT : CmpInst::ICMP_ULE;
1904 return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1))));
1905 }
1906
1907 return nullptr;
1908 }
1909
1910 /// Fold icmp (or X, Y), C.
foldICmpOrConstant(ICmpInst & Cmp,BinaryOperator * Or,const APInt & C)1911 Instruction *InstCombinerImpl::foldICmpOrConstant(ICmpInst &Cmp,
1912 BinaryOperator *Or,
1913 const APInt &C) {
1914 ICmpInst::Predicate Pred = Cmp.getPredicate();
1915 if (C.isOne()) {
1916 // icmp slt signum(V) 1 --> icmp slt V, 1
1917 Value *V = nullptr;
1918 if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V))))
1919 return new ICmpInst(ICmpInst::ICMP_SLT, V,
1920 ConstantInt::get(V->getType(), 1));
1921 }
1922
1923 Value *OrOp0 = Or->getOperand(0), *OrOp1 = Or->getOperand(1);
1924 const APInt *MaskC;
1925 if (match(OrOp1, m_APInt(MaskC)) && Cmp.isEquality()) {
1926 if (*MaskC == C && (C + 1).isPowerOf2()) {
1927 // X | C == C --> X <=u C
1928 // X | C != C --> X >u C
1929 // iff C+1 is a power of 2 (C is a bitmask of the low bits)
1930 Pred = (Pred == CmpInst::ICMP_EQ) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT;
1931 return new ICmpInst(Pred, OrOp0, OrOp1);
1932 }
1933
1934 // More general: canonicalize 'equality with set bits mask' to
1935 // 'equality with clear bits mask'.
1936 // (X | MaskC) == C --> (X & ~MaskC) == C ^ MaskC
1937 // (X | MaskC) != C --> (X & ~MaskC) != C ^ MaskC
1938 if (Or->hasOneUse()) {
1939 Value *And = Builder.CreateAnd(OrOp0, ~(*MaskC));
1940 Constant *NewC = ConstantInt::get(Or->getType(), C ^ (*MaskC));
1941 return new ICmpInst(Pred, And, NewC);
1942 }
1943 }
1944
1945 // (X | (X-1)) s< 0 --> X s< 1
1946 // (X | (X-1)) s> -1 --> X s> 0
1947 Value *X;
1948 bool TrueIfSigned;
1949 if (isSignBitCheck(Pred, C, TrueIfSigned) &&
1950 match(Or, m_c_Or(m_Add(m_Value(X), m_AllOnes()), m_Deferred(X)))) {
1951 auto NewPred = TrueIfSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGT;
1952 Constant *NewC = ConstantInt::get(X->getType(), TrueIfSigned ? 1 : 0);
1953 return new ICmpInst(NewPred, X, NewC);
1954 }
1955
1956 if (!Cmp.isEquality() || !C.isZero() || !Or->hasOneUse())
1957 return nullptr;
1958
1959 Value *P, *Q;
1960 if (match(Or, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
1961 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
1962 // -> and (icmp eq P, null), (icmp eq Q, null).
1963 Value *CmpP =
1964 Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
1965 Value *CmpQ =
1966 Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
1967 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1968 return BinaryOperator::Create(BOpc, CmpP, CmpQ);
1969 }
1970
1971 // Are we using xors to bitwise check for a pair of (in)equalities? Convert to
1972 // a shorter form that has more potential to be folded even further.
1973 Value *X1, *X2, *X3, *X4;
1974 if (match(OrOp0, m_OneUse(m_Xor(m_Value(X1), m_Value(X2)))) &&
1975 match(OrOp1, m_OneUse(m_Xor(m_Value(X3), m_Value(X4))))) {
1976 // ((X1 ^ X2) || (X3 ^ X4)) == 0 --> (X1 == X2) && (X3 == X4)
1977 // ((X1 ^ X2) || (X3 ^ X4)) != 0 --> (X1 != X2) || (X3 != X4)
1978 Value *Cmp12 = Builder.CreateICmp(Pred, X1, X2);
1979 Value *Cmp34 = Builder.CreateICmp(Pred, X3, X4);
1980 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1981 return BinaryOperator::Create(BOpc, Cmp12, Cmp34);
1982 }
1983
1984 return nullptr;
1985 }
1986
1987 /// Fold icmp (mul X, Y), C.
foldICmpMulConstant(ICmpInst & Cmp,BinaryOperator * Mul,const APInt & C)1988 Instruction *InstCombinerImpl::foldICmpMulConstant(ICmpInst &Cmp,
1989 BinaryOperator *Mul,
1990 const APInt &C) {
1991 const APInt *MulC;
1992 if (!match(Mul->getOperand(1), m_APInt(MulC)))
1993 return nullptr;
1994
1995 // If this is a test of the sign bit and the multiply is sign-preserving with
1996 // a constant operand, use the multiply LHS operand instead.
1997 ICmpInst::Predicate Pred = Cmp.getPredicate();
1998 if (isSignTest(Pred, C) && Mul->hasNoSignedWrap()) {
1999 if (MulC->isNegative())
2000 Pred = ICmpInst::getSwappedPredicate(Pred);
2001 return new ICmpInst(Pred, Mul->getOperand(0),
2002 Constant::getNullValue(Mul->getType()));
2003 }
2004
2005 if (MulC->isZero() || !(Mul->hasNoSignedWrap() || Mul->hasNoUnsignedWrap()))
2006 return nullptr;
2007
2008 // If the multiply does not wrap, try to divide the compare constant by the
2009 // multiplication factor.
2010 if (Cmp.isEquality()) {
2011 // (mul nsw X, MulC) == C --> X == C /s MulC
2012 if (Mul->hasNoSignedWrap() && C.srem(*MulC).isZero()) {
2013 Constant *NewC = ConstantInt::get(Mul->getType(), C.sdiv(*MulC));
2014 return new ICmpInst(Pred, Mul->getOperand(0), NewC);
2015 }
2016 // (mul nuw X, MulC) == C --> X == C /u MulC
2017 if (Mul->hasNoUnsignedWrap() && C.urem(*MulC).isZero()) {
2018 Constant *NewC = ConstantInt::get(Mul->getType(), C.udiv(*MulC));
2019 return new ICmpInst(Pred, Mul->getOperand(0), NewC);
2020 }
2021 }
2022
2023 Constant *NewC = nullptr;
2024
2025 // FIXME: Add assert that Pred is not equal to ICMP_SGE, ICMP_SLE,
2026 // ICMP_UGE, ICMP_ULE.
2027
2028 if (Mul->hasNoSignedWrap()) {
2029 if (MulC->isNegative()) {
2030 // MININT / -1 --> overflow.
2031 if (C.isMinSignedValue() && MulC->isAllOnes())
2032 return nullptr;
2033 Pred = ICmpInst::getSwappedPredicate(Pred);
2034 }
2035 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE)
2036 NewC = ConstantInt::get(
2037 Mul->getType(),
2038 APIntOps::RoundingSDiv(C, *MulC, APInt::Rounding::UP));
2039 if (Pred == ICmpInst::ICMP_SLE || Pred == ICmpInst::ICMP_SGT)
2040 NewC = ConstantInt::get(
2041 Mul->getType(),
2042 APIntOps::RoundingSDiv(C, *MulC, APInt::Rounding::DOWN));
2043 } else if (Mul->hasNoUnsignedWrap()) {
2044 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)
2045 NewC = ConstantInt::get(
2046 Mul->getType(),
2047 APIntOps::RoundingUDiv(C, *MulC, APInt::Rounding::UP));
2048 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT)
2049 NewC = ConstantInt::get(
2050 Mul->getType(),
2051 APIntOps::RoundingUDiv(C, *MulC, APInt::Rounding::DOWN));
2052 }
2053
2054 return NewC ? new ICmpInst(Pred, Mul->getOperand(0), NewC) : nullptr;
2055 }
2056
2057 /// Fold icmp (shl 1, Y), C.
foldICmpShlOne(ICmpInst & Cmp,Instruction * Shl,const APInt & C)2058 static Instruction *foldICmpShlOne(ICmpInst &Cmp, Instruction *Shl,
2059 const APInt &C) {
2060 Value *Y;
2061 if (!match(Shl, m_Shl(m_One(), m_Value(Y))))
2062 return nullptr;
2063
2064 Type *ShiftType = Shl->getType();
2065 unsigned TypeBits = C.getBitWidth();
2066 bool CIsPowerOf2 = C.isPowerOf2();
2067 ICmpInst::Predicate Pred = Cmp.getPredicate();
2068 if (Cmp.isUnsigned()) {
2069 // (1 << Y) pred C -> Y pred Log2(C)
2070 if (!CIsPowerOf2) {
2071 // (1 << Y) < 30 -> Y <= 4
2072 // (1 << Y) <= 30 -> Y <= 4
2073 // (1 << Y) >= 30 -> Y > 4
2074 // (1 << Y) > 30 -> Y > 4
2075 if (Pred == ICmpInst::ICMP_ULT)
2076 Pred = ICmpInst::ICMP_ULE;
2077 else if (Pred == ICmpInst::ICMP_UGE)
2078 Pred = ICmpInst::ICMP_UGT;
2079 }
2080
2081 // (1 << Y) >= 2147483648 -> Y >= 31 -> Y == 31
2082 // (1 << Y) < 2147483648 -> Y < 31 -> Y != 31
2083 unsigned CLog2 = C.logBase2();
2084 if (CLog2 == TypeBits - 1) {
2085 if (Pred == ICmpInst::ICMP_UGE)
2086 Pred = ICmpInst::ICMP_EQ;
2087 else if (Pred == ICmpInst::ICMP_ULT)
2088 Pred = ICmpInst::ICMP_NE;
2089 }
2090 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2));
2091 } else if (Cmp.isSigned()) {
2092 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
2093 if (C.isAllOnes()) {
2094 // (1 << Y) <= -1 -> Y == 31
2095 if (Pred == ICmpInst::ICMP_SLE)
2096 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
2097
2098 // (1 << Y) > -1 -> Y != 31
2099 if (Pred == ICmpInst::ICMP_SGT)
2100 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
2101 } else if (!C) {
2102 // (1 << Y) < 0 -> Y == 31
2103 // (1 << Y) <= 0 -> Y == 31
2104 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
2105 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
2106
2107 // (1 << Y) >= 0 -> Y != 31
2108 // (1 << Y) > 0 -> Y != 31
2109 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
2110 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
2111 }
2112 } else if (Cmp.isEquality() && CIsPowerOf2) {
2113 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, C.logBase2()));
2114 }
2115
2116 return nullptr;
2117 }
2118
2119 /// Fold icmp (shl X, Y), C.
foldICmpShlConstant(ICmpInst & Cmp,BinaryOperator * Shl,const APInt & C)2120 Instruction *InstCombinerImpl::foldICmpShlConstant(ICmpInst &Cmp,
2121 BinaryOperator *Shl,
2122 const APInt &C) {
2123 const APInt *ShiftVal;
2124 if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal)))
2125 return foldICmpShlConstConst(Cmp, Shl->getOperand(1), C, *ShiftVal);
2126
2127 const APInt *ShiftAmt;
2128 if (!match(Shl->getOperand(1), m_APInt(ShiftAmt)))
2129 return foldICmpShlOne(Cmp, Shl, C);
2130
2131 // Check that the shift amount is in range. If not, don't perform undefined
2132 // shifts. When the shift is visited, it will be simplified.
2133 unsigned TypeBits = C.getBitWidth();
2134 if (ShiftAmt->uge(TypeBits))
2135 return nullptr;
2136
2137 ICmpInst::Predicate Pred = Cmp.getPredicate();
2138 Value *X = Shl->getOperand(0);
2139 Type *ShType = Shl->getType();
2140
2141 // NSW guarantees that we are only shifting out sign bits from the high bits,
2142 // so we can ASHR the compare constant without needing a mask and eliminate
2143 // the shift.
2144 if (Shl->hasNoSignedWrap()) {
2145 if (Pred == ICmpInst::ICMP_SGT) {
2146 // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt)
2147 APInt ShiftedC = C.ashr(*ShiftAmt);
2148 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2149 }
2150 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2151 C.ashr(*ShiftAmt).shl(*ShiftAmt) == C) {
2152 APInt ShiftedC = C.ashr(*ShiftAmt);
2153 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2154 }
2155 if (Pred == ICmpInst::ICMP_SLT) {
2156 // SLE is the same as above, but SLE is canonicalized to SLT, so convert:
2157 // (X << S) <=s C is equiv to X <=s (C >> S) for all C
2158 // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX
2159 // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN
2160 assert(!C.isMinSignedValue() && "Unexpected icmp slt");
2161 APInt ShiftedC = (C - 1).ashr(*ShiftAmt) + 1;
2162 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2163 }
2164 // If this is a signed comparison to 0 and the shift is sign preserving,
2165 // use the shift LHS operand instead; isSignTest may change 'Pred', so only
2166 // do that if we're sure to not continue on in this function.
2167 if (isSignTest(Pred, C))
2168 return new ICmpInst(Pred, X, Constant::getNullValue(ShType));
2169 }
2170
2171 // NUW guarantees that we are only shifting out zero bits from the high bits,
2172 // so we can LSHR the compare constant without needing a mask and eliminate
2173 // the shift.
2174 if (Shl->hasNoUnsignedWrap()) {
2175 if (Pred == ICmpInst::ICMP_UGT) {
2176 // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt)
2177 APInt ShiftedC = C.lshr(*ShiftAmt);
2178 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2179 }
2180 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2181 C.lshr(*ShiftAmt).shl(*ShiftAmt) == C) {
2182 APInt ShiftedC = C.lshr(*ShiftAmt);
2183 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2184 }
2185 if (Pred == ICmpInst::ICMP_ULT) {
2186 // ULE is the same as above, but ULE is canonicalized to ULT, so convert:
2187 // (X << S) <=u C is equiv to X <=u (C >> S) for all C
2188 // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u
2189 // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0
2190 assert(C.ugt(0) && "ult 0 should have been eliminated");
2191 APInt ShiftedC = (C - 1).lshr(*ShiftAmt) + 1;
2192 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2193 }
2194 }
2195
2196 if (Cmp.isEquality() && Shl->hasOneUse()) {
2197 // Strength-reduce the shift into an 'and'.
2198 Constant *Mask = ConstantInt::get(
2199 ShType,
2200 APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
2201 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2202 Constant *LShrC = ConstantInt::get(ShType, C.lshr(*ShiftAmt));
2203 return new ICmpInst(Pred, And, LShrC);
2204 }
2205
2206 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
2207 bool TrueIfSigned = false;
2208 if (Shl->hasOneUse() && isSignBitCheck(Pred, C, TrueIfSigned)) {
2209 // (X << 31) <s 0 --> (X & 1) != 0
2210 Constant *Mask = ConstantInt::get(
2211 ShType,
2212 APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
2213 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2214 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
2215 And, Constant::getNullValue(ShType));
2216 }
2217
2218 // Simplify 'shl' inequality test into 'and' equality test.
2219 if (Cmp.isUnsigned() && Shl->hasOneUse()) {
2220 // (X l<< C2) u<=/u> C1 iff C1+1 is power of two -> X & (~C1 l>> C2) ==/!= 0
2221 if ((C + 1).isPowerOf2() &&
2222 (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT)) {
2223 Value *And = Builder.CreateAnd(X, (~C).lshr(ShiftAmt->getZExtValue()));
2224 return new ICmpInst(Pred == ICmpInst::ICMP_ULE ? ICmpInst::ICMP_EQ
2225 : ICmpInst::ICMP_NE,
2226 And, Constant::getNullValue(ShType));
2227 }
2228 // (X l<< C2) u</u>= C1 iff C1 is power of two -> X & (-C1 l>> C2) ==/!= 0
2229 if (C.isPowerOf2() &&
2230 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) {
2231 Value *And =
2232 Builder.CreateAnd(X, (~(C - 1)).lshr(ShiftAmt->getZExtValue()));
2233 return new ICmpInst(Pred == ICmpInst::ICMP_ULT ? ICmpInst::ICMP_EQ
2234 : ICmpInst::ICMP_NE,
2235 And, Constant::getNullValue(ShType));
2236 }
2237 }
2238
2239 // Transform (icmp pred iM (shl iM %v, N), C)
2240 // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N))
2241 // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N.
2242 // This enables us to get rid of the shift in favor of a trunc that may be
2243 // free on the target. It has the additional benefit of comparing to a
2244 // smaller constant that may be more target-friendly.
2245 unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1);
2246 if (Shl->hasOneUse() && Amt != 0 && C.countTrailingZeros() >= Amt &&
2247 DL.isLegalInteger(TypeBits - Amt)) {
2248 Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt);
2249 if (auto *ShVTy = dyn_cast<VectorType>(ShType))
2250 TruncTy = VectorType::get(TruncTy, ShVTy->getElementCount());
2251 Constant *NewC =
2252 ConstantInt::get(TruncTy, C.ashr(*ShiftAmt).trunc(TypeBits - Amt));
2253 return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC);
2254 }
2255
2256 return nullptr;
2257 }
2258
2259 /// Fold icmp ({al}shr X, Y), C.
foldICmpShrConstant(ICmpInst & Cmp,BinaryOperator * Shr,const APInt & C)2260 Instruction *InstCombinerImpl::foldICmpShrConstant(ICmpInst &Cmp,
2261 BinaryOperator *Shr,
2262 const APInt &C) {
2263 // An exact shr only shifts out zero bits, so:
2264 // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0
2265 Value *X = Shr->getOperand(0);
2266 CmpInst::Predicate Pred = Cmp.getPredicate();
2267 if (Cmp.isEquality() && Shr->isExact() && C.isZero())
2268 return new ICmpInst(Pred, X, Cmp.getOperand(1));
2269
2270 bool IsAShr = Shr->getOpcode() == Instruction::AShr;
2271 const APInt *ShiftValC;
2272 if (match(X, m_APInt(ShiftValC))) {
2273 if (Cmp.isEquality())
2274 return foldICmpShrConstConst(Cmp, Shr->getOperand(1), C, *ShiftValC);
2275
2276 // (ShiftValC >> Y) >s -1 --> Y != 0 with ShiftValC < 0
2277 // (ShiftValC >> Y) <s 0 --> Y == 0 with ShiftValC < 0
2278 bool TrueIfSigned;
2279 if (!IsAShr && ShiftValC->isNegative() &&
2280 isSignBitCheck(Pred, C, TrueIfSigned))
2281 return new ICmpInst(TrueIfSigned ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE,
2282 Shr->getOperand(1),
2283 ConstantInt::getNullValue(X->getType()));
2284
2285 // If the shifted constant is a power-of-2, test the shift amount directly:
2286 // (ShiftValC >> Y) >u C --> X <u (LZ(C) - LZ(ShiftValC))
2287 // (ShiftValC >> Y) <u C --> X >=u (LZ(C-1) - LZ(ShiftValC))
2288 if (!IsAShr && ShiftValC->isPowerOf2() &&
2289 (Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_ULT)) {
2290 bool IsUGT = Pred == CmpInst::ICMP_UGT;
2291 assert(ShiftValC->uge(C) && "Expected simplify of compare");
2292 assert((IsUGT || !C.isZero()) && "Expected X u< 0 to simplify");
2293
2294 unsigned CmpLZ =
2295 IsUGT ? C.countLeadingZeros() : (C - 1).countLeadingZeros();
2296 unsigned ShiftLZ = ShiftValC->countLeadingZeros();
2297 Constant *NewC = ConstantInt::get(Shr->getType(), CmpLZ - ShiftLZ);
2298 auto NewPred = IsUGT ? CmpInst::ICMP_ULT : CmpInst::ICMP_UGE;
2299 return new ICmpInst(NewPred, Shr->getOperand(1), NewC);
2300 }
2301 }
2302
2303 const APInt *ShiftAmtC;
2304 if (!match(Shr->getOperand(1), m_APInt(ShiftAmtC)))
2305 return nullptr;
2306
2307 // Check that the shift amount is in range. If not, don't perform undefined
2308 // shifts. When the shift is visited it will be simplified.
2309 unsigned TypeBits = C.getBitWidth();
2310 unsigned ShAmtVal = ShiftAmtC->getLimitedValue(TypeBits);
2311 if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2312 return nullptr;
2313
2314 bool IsExact = Shr->isExact();
2315 Type *ShrTy = Shr->getType();
2316 // TODO: If we could guarantee that InstSimplify would handle all of the
2317 // constant-value-based preconditions in the folds below, then we could assert
2318 // those conditions rather than checking them. This is difficult because of
2319 // undef/poison (PR34838).
2320 if (IsAShr) {
2321 if (IsExact || Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_ULT) {
2322 // When ShAmtC can be shifted losslessly:
2323 // icmp PRED (ashr exact X, ShAmtC), C --> icmp PRED X, (C << ShAmtC)
2324 // icmp slt/ult (ashr X, ShAmtC), C --> icmp slt/ult X, (C << ShAmtC)
2325 APInt ShiftedC = C.shl(ShAmtVal);
2326 if (ShiftedC.ashr(ShAmtVal) == C)
2327 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2328 }
2329 if (Pred == CmpInst::ICMP_SGT) {
2330 // icmp sgt (ashr X, ShAmtC), C --> icmp sgt X, ((C + 1) << ShAmtC) - 1
2331 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2332 if (!C.isMaxSignedValue() && !(C + 1).shl(ShAmtVal).isMinSignedValue() &&
2333 (ShiftedC + 1).ashr(ShAmtVal) == (C + 1))
2334 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2335 }
2336 if (Pred == CmpInst::ICMP_UGT) {
2337 // icmp ugt (ashr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
2338 // 'C + 1 << ShAmtC' can overflow as a signed number, so the 2nd
2339 // clause accounts for that pattern.
2340 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2341 if ((ShiftedC + 1).ashr(ShAmtVal) == (C + 1) ||
2342 (C + 1).shl(ShAmtVal).isMinSignedValue())
2343 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2344 }
2345
2346 // If the compare constant has significant bits above the lowest sign-bit,
2347 // then convert an unsigned cmp to a test of the sign-bit:
2348 // (ashr X, ShiftC) u> C --> X s< 0
2349 // (ashr X, ShiftC) u< C --> X s> -1
2350 if (C.getBitWidth() > 2 && C.getNumSignBits() <= ShAmtVal) {
2351 if (Pred == CmpInst::ICMP_UGT) {
2352 return new ICmpInst(CmpInst::ICMP_SLT, X,
2353 ConstantInt::getNullValue(ShrTy));
2354 }
2355 if (Pred == CmpInst::ICMP_ULT) {
2356 return new ICmpInst(CmpInst::ICMP_SGT, X,
2357 ConstantInt::getAllOnesValue(ShrTy));
2358 }
2359 }
2360 } else {
2361 if (Pred == CmpInst::ICMP_ULT || (Pred == CmpInst::ICMP_UGT && IsExact)) {
2362 // icmp ult (lshr X, ShAmtC), C --> icmp ult X, (C << ShAmtC)
2363 // icmp ugt (lshr exact X, ShAmtC), C --> icmp ugt X, (C << ShAmtC)
2364 APInt ShiftedC = C.shl(ShAmtVal);
2365 if (ShiftedC.lshr(ShAmtVal) == C)
2366 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2367 }
2368 if (Pred == CmpInst::ICMP_UGT) {
2369 // icmp ugt (lshr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
2370 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2371 if ((ShiftedC + 1).lshr(ShAmtVal) == (C + 1))
2372 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2373 }
2374 }
2375
2376 if (!Cmp.isEquality())
2377 return nullptr;
2378
2379 // Handle equality comparisons of shift-by-constant.
2380
2381 // If the comparison constant changes with the shift, the comparison cannot
2382 // succeed (bits of the comparison constant cannot match the shifted value).
2383 // This should be known by InstSimplify and already be folded to true/false.
2384 assert(((IsAShr && C.shl(ShAmtVal).ashr(ShAmtVal) == C) ||
2385 (!IsAShr && C.shl(ShAmtVal).lshr(ShAmtVal) == C)) &&
2386 "Expected icmp+shr simplify did not occur.");
2387
2388 // If the bits shifted out are known zero, compare the unshifted value:
2389 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
2390 if (Shr->isExact())
2391 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, C << ShAmtVal));
2392
2393 if (C.isZero()) {
2394 // == 0 is u< 1.
2395 if (Pred == CmpInst::ICMP_EQ)
2396 return new ICmpInst(CmpInst::ICMP_ULT, X,
2397 ConstantInt::get(ShrTy, (C + 1).shl(ShAmtVal)));
2398 else
2399 return new ICmpInst(CmpInst::ICMP_UGT, X,
2400 ConstantInt::get(ShrTy, (C + 1).shl(ShAmtVal) - 1));
2401 }
2402
2403 if (Shr->hasOneUse()) {
2404 // Canonicalize the shift into an 'and':
2405 // icmp eq/ne (shr X, ShAmt), C --> icmp eq/ne (and X, HiMask), (C << ShAmt)
2406 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
2407 Constant *Mask = ConstantInt::get(ShrTy, Val);
2408 Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask");
2409 return new ICmpInst(Pred, And, ConstantInt::get(ShrTy, C << ShAmtVal));
2410 }
2411
2412 return nullptr;
2413 }
2414
foldICmpSRemConstant(ICmpInst & Cmp,BinaryOperator * SRem,const APInt & C)2415 Instruction *InstCombinerImpl::foldICmpSRemConstant(ICmpInst &Cmp,
2416 BinaryOperator *SRem,
2417 const APInt &C) {
2418 // Match an 'is positive' or 'is negative' comparison of remainder by a
2419 // constant power-of-2 value:
2420 // (X % pow2C) sgt/slt 0
2421 const ICmpInst::Predicate Pred = Cmp.getPredicate();
2422 if (Pred != ICmpInst::ICMP_SGT && Pred != ICmpInst::ICMP_SLT &&
2423 Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
2424 return nullptr;
2425
2426 // TODO: The one-use check is standard because we do not typically want to
2427 // create longer instruction sequences, but this might be a special-case
2428 // because srem is not good for analysis or codegen.
2429 if (!SRem->hasOneUse())
2430 return nullptr;
2431
2432 const APInt *DivisorC;
2433 if (!match(SRem->getOperand(1), m_Power2(DivisorC)))
2434 return nullptr;
2435
2436 // For cmp_sgt/cmp_slt only zero valued C is handled.
2437 // For cmp_eq/cmp_ne only positive valued C is handled.
2438 if (((Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT) &&
2439 !C.isZero()) ||
2440 ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2441 !C.isStrictlyPositive()))
2442 return nullptr;
2443
2444 // Mask off the sign bit and the modulo bits (low-bits).
2445 Type *Ty = SRem->getType();
2446 APInt SignMask = APInt::getSignMask(Ty->getScalarSizeInBits());
2447 Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1));
2448 Value *And = Builder.CreateAnd(SRem->getOperand(0), MaskC);
2449
2450 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE)
2451 return new ICmpInst(Pred, And, ConstantInt::get(Ty, C));
2452
2453 // For 'is positive?' check that the sign-bit is clear and at least 1 masked
2454 // bit is set. Example:
2455 // (i8 X % 32) s> 0 --> (X & 159) s> 0
2456 if (Pred == ICmpInst::ICMP_SGT)
2457 return new ICmpInst(ICmpInst::ICMP_SGT, And, ConstantInt::getNullValue(Ty));
2458
2459 // For 'is negative?' check that the sign-bit is set and at least 1 masked
2460 // bit is set. Example:
2461 // (i16 X % 4) s< 0 --> (X & 32771) u> 32768
2462 return new ICmpInst(ICmpInst::ICMP_UGT, And, ConstantInt::get(Ty, SignMask));
2463 }
2464
2465 /// Fold icmp (udiv X, Y), C.
foldICmpUDivConstant(ICmpInst & Cmp,BinaryOperator * UDiv,const APInt & C)2466 Instruction *InstCombinerImpl::foldICmpUDivConstant(ICmpInst &Cmp,
2467 BinaryOperator *UDiv,
2468 const APInt &C) {
2469 ICmpInst::Predicate Pred = Cmp.getPredicate();
2470 Value *X = UDiv->getOperand(0);
2471 Value *Y = UDiv->getOperand(1);
2472 Type *Ty = UDiv->getType();
2473
2474 const APInt *C2;
2475 if (!match(X, m_APInt(C2)))
2476 return nullptr;
2477
2478 assert(*C2 != 0 && "udiv 0, X should have been simplified already.");
2479
2480 // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1))
2481 if (Pred == ICmpInst::ICMP_UGT) {
2482 assert(!C.isMaxValue() &&
2483 "icmp ugt X, UINT_MAX should have been simplified already.");
2484 return new ICmpInst(ICmpInst::ICMP_ULE, Y,
2485 ConstantInt::get(Ty, C2->udiv(C + 1)));
2486 }
2487
2488 // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C)
2489 if (Pred == ICmpInst::ICMP_ULT) {
2490 assert(C != 0 && "icmp ult X, 0 should have been simplified already.");
2491 return new ICmpInst(ICmpInst::ICMP_UGT, Y,
2492 ConstantInt::get(Ty, C2->udiv(C)));
2493 }
2494
2495 return nullptr;
2496 }
2497
2498 /// Fold icmp ({su}div X, Y), C.
foldICmpDivConstant(ICmpInst & Cmp,BinaryOperator * Div,const APInt & C)2499 Instruction *InstCombinerImpl::foldICmpDivConstant(ICmpInst &Cmp,
2500 BinaryOperator *Div,
2501 const APInt &C) {
2502 ICmpInst::Predicate Pred = Cmp.getPredicate();
2503 Value *X = Div->getOperand(0);
2504 Value *Y = Div->getOperand(1);
2505 Type *Ty = Div->getType();
2506 bool DivIsSigned = Div->getOpcode() == Instruction::SDiv;
2507
2508 // If unsigned division and the compare constant is bigger than
2509 // UMAX/2 (negative), there's only one pair of values that satisfies an
2510 // equality check, so eliminate the division:
2511 // (X u/ Y) == C --> (X == C) && (Y == 1)
2512 // (X u/ Y) != C --> (X != C) || (Y != 1)
2513 // Similarly, if signed division and the compare constant is exactly SMIN:
2514 // (X s/ Y) == SMIN --> (X == SMIN) && (Y == 1)
2515 // (X s/ Y) != SMIN --> (X != SMIN) || (Y != 1)
2516 if (Cmp.isEquality() && Div->hasOneUse() && C.isSignBitSet() &&
2517 (!DivIsSigned || C.isMinSignedValue())) {
2518 Value *XBig = Builder.CreateICmp(Pred, X, ConstantInt::get(Ty, C));
2519 Value *YOne = Builder.CreateICmp(Pred, Y, ConstantInt::get(Ty, 1));
2520 auto Logic = Pred == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
2521 return BinaryOperator::Create(Logic, XBig, YOne);
2522 }
2523
2524 // Fold: icmp pred ([us]div X, C2), C -> range test
2525 // Fold this div into the comparison, producing a range check.
2526 // Determine, based on the divide type, what the range is being
2527 // checked. If there is an overflow on the low or high side, remember
2528 // it, otherwise compute the range [low, hi) bounding the new value.
2529 // See: InsertRangeTest above for the kinds of replacements possible.
2530 const APInt *C2;
2531 if (!match(Y, m_APInt(C2)))
2532 return nullptr;
2533
2534 // FIXME: If the operand types don't match the type of the divide
2535 // then don't attempt this transform. The code below doesn't have the
2536 // logic to deal with a signed divide and an unsigned compare (and
2537 // vice versa). This is because (x /s C2) <s C produces different
2538 // results than (x /s C2) <u C or (x /u C2) <s C or even
2539 // (x /u C2) <u C. Simply casting the operands and result won't
2540 // work. :( The if statement below tests that condition and bails
2541 // if it finds it.
2542 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2543 return nullptr;
2544
2545 // The ProdOV computation fails on divide by 0 and divide by -1. Cases with
2546 // INT_MIN will also fail if the divisor is 1. Although folds of all these
2547 // division-by-constant cases should be present, we can not assert that they
2548 // have happened before we reach this icmp instruction.
2549 if (C2->isZero() || C2->isOne() || (DivIsSigned && C2->isAllOnes()))
2550 return nullptr;
2551
2552 // Compute Prod = C * C2. We are essentially solving an equation of
2553 // form X / C2 = C. We solve for X by multiplying C2 and C.
2554 // By solving for X, we can turn this into a range check instead of computing
2555 // a divide.
2556 APInt Prod = C * *C2;
2557
2558 // Determine if the product overflows by seeing if the product is not equal to
2559 // the divide. Make sure we do the same kind of divide as in the LHS
2560 // instruction that we're folding.
2561 bool ProdOV = (DivIsSigned ? Prod.sdiv(*C2) : Prod.udiv(*C2)) != C;
2562
2563 // If the division is known to be exact, then there is no remainder from the
2564 // divide, so the covered range size is unit, otherwise it is the divisor.
2565 APInt RangeSize = Div->isExact() ? APInt(C2->getBitWidth(), 1) : *C2;
2566
2567 // Figure out the interval that is being checked. For example, a comparison
2568 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
2569 // Compute this interval based on the constants involved and the signedness of
2570 // the compare/divide. This computes a half-open interval, keeping track of
2571 // whether either value in the interval overflows. After analysis each
2572 // overflow variable is set to 0 if it's corresponding bound variable is valid
2573 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
2574 int LoOverflow = 0, HiOverflow = 0;
2575 APInt LoBound, HiBound;
2576
2577 if (!DivIsSigned) { // udiv
2578 // e.g. X/5 op 3 --> [15, 20)
2579 LoBound = Prod;
2580 HiOverflow = LoOverflow = ProdOV;
2581 if (!HiOverflow) {
2582 // If this is not an exact divide, then many values in the range collapse
2583 // to the same result value.
2584 HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false);
2585 }
2586 } else if (C2->isStrictlyPositive()) { // Divisor is > 0.
2587 if (C.isZero()) { // (X / pos) op 0
2588 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
2589 LoBound = -(RangeSize - 1);
2590 HiBound = RangeSize;
2591 } else if (C.isStrictlyPositive()) { // (X / pos) op pos
2592 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
2593 HiOverflow = LoOverflow = ProdOV;
2594 if (!HiOverflow)
2595 HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true);
2596 } else { // (X / pos) op neg
2597 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
2598 HiBound = Prod + 1;
2599 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2600 if (!LoOverflow) {
2601 APInt DivNeg = -RangeSize;
2602 LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
2603 }
2604 }
2605 } else if (C2->isNegative()) { // Divisor is < 0.
2606 if (Div->isExact())
2607 RangeSize.negate();
2608 if (C.isZero()) { // (X / neg) op 0
2609 // e.g. X/-5 op 0 --> [-4, 5)
2610 LoBound = RangeSize + 1;
2611 HiBound = -RangeSize;
2612 if (HiBound == *C2) { // -INTMIN = INTMIN
2613 HiOverflow = 1; // [INTMIN+1, overflow)
2614 HiBound = APInt(); // e.g. X/INTMIN = 0 --> X > INTMIN
2615 }
2616 } else if (C.isStrictlyPositive()) { // (X / neg) op pos
2617 // e.g. X/-5 op 3 --> [-19, -14)
2618 HiBound = Prod + 1;
2619 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2620 if (!LoOverflow)
2621 LoOverflow =
2622 addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1 : 0;
2623 } else { // (X / neg) op neg
2624 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
2625 LoOverflow = HiOverflow = ProdOV;
2626 if (!HiOverflow)
2627 HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true);
2628 }
2629
2630 // Dividing by a negative swaps the condition. LT <-> GT
2631 Pred = ICmpInst::getSwappedPredicate(Pred);
2632 }
2633
2634 switch (Pred) {
2635 default:
2636 llvm_unreachable("Unhandled icmp predicate!");
2637 case ICmpInst::ICMP_EQ:
2638 if (LoOverflow && HiOverflow)
2639 return replaceInstUsesWith(Cmp, Builder.getFalse());
2640 if (HiOverflow)
2641 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE,
2642 X, ConstantInt::get(Ty, LoBound));
2643 if (LoOverflow)
2644 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
2645 X, ConstantInt::get(Ty, HiBound));
2646 return replaceInstUsesWith(
2647 Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, true));
2648 case ICmpInst::ICMP_NE:
2649 if (LoOverflow && HiOverflow)
2650 return replaceInstUsesWith(Cmp, Builder.getTrue());
2651 if (HiOverflow)
2652 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
2653 X, ConstantInt::get(Ty, LoBound));
2654 if (LoOverflow)
2655 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE,
2656 X, ConstantInt::get(Ty, HiBound));
2657 return replaceInstUsesWith(
2658 Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, false));
2659 case ICmpInst::ICMP_ULT:
2660 case ICmpInst::ICMP_SLT:
2661 if (LoOverflow == +1) // Low bound is greater than input range.
2662 return replaceInstUsesWith(Cmp, Builder.getTrue());
2663 if (LoOverflow == -1) // Low bound is less than input range.
2664 return replaceInstUsesWith(Cmp, Builder.getFalse());
2665 return new ICmpInst(Pred, X, ConstantInt::get(Ty, LoBound));
2666 case ICmpInst::ICMP_UGT:
2667 case ICmpInst::ICMP_SGT:
2668 if (HiOverflow == +1) // High bound greater than input range.
2669 return replaceInstUsesWith(Cmp, Builder.getFalse());
2670 if (HiOverflow == -1) // High bound less than input range.
2671 return replaceInstUsesWith(Cmp, Builder.getTrue());
2672 if (Pred == ICmpInst::ICMP_UGT)
2673 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, HiBound));
2674 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, HiBound));
2675 }
2676
2677 return nullptr;
2678 }
2679
2680 /// Fold icmp (sub X, Y), C.
foldICmpSubConstant(ICmpInst & Cmp,BinaryOperator * Sub,const APInt & C)2681 Instruction *InstCombinerImpl::foldICmpSubConstant(ICmpInst &Cmp,
2682 BinaryOperator *Sub,
2683 const APInt &C) {
2684 Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1);
2685 ICmpInst::Predicate Pred = Cmp.getPredicate();
2686 Type *Ty = Sub->getType();
2687
2688 // (SubC - Y) == C) --> Y == (SubC - C)
2689 // (SubC - Y) != C) --> Y != (SubC - C)
2690 Constant *SubC;
2691 if (Cmp.isEquality() && match(X, m_ImmConstant(SubC))) {
2692 return new ICmpInst(Pred, Y,
2693 ConstantExpr::getSub(SubC, ConstantInt::get(Ty, C)));
2694 }
2695
2696 // (icmp P (sub nuw|nsw C2, Y), C) -> (icmp swap(P) Y, C2-C)
2697 const APInt *C2;
2698 APInt SubResult;
2699 ICmpInst::Predicate SwappedPred = Cmp.getSwappedPredicate();
2700 bool HasNSW = Sub->hasNoSignedWrap();
2701 bool HasNUW = Sub->hasNoUnsignedWrap();
2702 if (match(X, m_APInt(C2)) &&
2703 ((Cmp.isUnsigned() && HasNUW) || (Cmp.isSigned() && HasNSW)) &&
2704 !subWithOverflow(SubResult, *C2, C, Cmp.isSigned()))
2705 return new ICmpInst(SwappedPred, Y, ConstantInt::get(Ty, SubResult));
2706
2707 // X - Y == 0 --> X == Y.
2708 // X - Y != 0 --> X != Y.
2709 // TODO: We allow this with multiple uses as long as the other uses are not
2710 // in phis. The phi use check is guarding against a codegen regression
2711 // for a loop test. If the backend could undo this (and possibly
2712 // subsequent transforms), we would not need this hack.
2713 if (Cmp.isEquality() && C.isZero() &&
2714 none_of((Sub->users()), [](const User *U) { return isa<PHINode>(U); }))
2715 return new ICmpInst(Pred, X, Y);
2716
2717 // The following transforms are only worth it if the only user of the subtract
2718 // is the icmp.
2719 // TODO: This is an artificial restriction for all of the transforms below
2720 // that only need a single replacement icmp. Can these use the phi test
2721 // like the transform above here?
2722 if (!Sub->hasOneUse())
2723 return nullptr;
2724
2725 if (Sub->hasNoSignedWrap()) {
2726 // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y)
2727 if (Pred == ICmpInst::ICMP_SGT && C.isAllOnes())
2728 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
2729
2730 // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y)
2731 if (Pred == ICmpInst::ICMP_SGT && C.isZero())
2732 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
2733
2734 // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y)
2735 if (Pred == ICmpInst::ICMP_SLT && C.isZero())
2736 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
2737
2738 // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y)
2739 if (Pred == ICmpInst::ICMP_SLT && C.isOne())
2740 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
2741 }
2742
2743 if (!match(X, m_APInt(C2)))
2744 return nullptr;
2745
2746 // C2 - Y <u C -> (Y | (C - 1)) == C2
2747 // iff (C2 & (C - 1)) == C - 1 and C is a power of 2
2748 if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() &&
2749 (*C2 & (C - 1)) == (C - 1))
2750 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, C - 1), X);
2751
2752 // C2 - Y >u C -> (Y | C) != C2
2753 // iff C2 & C == C and C + 1 is a power of 2
2754 if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == C)
2755 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, C), X);
2756
2757 // We have handled special cases that reduce.
2758 // Canonicalize any remaining sub to add as:
2759 // (C2 - Y) > C --> (Y + ~C2) < ~C
2760 Value *Add = Builder.CreateAdd(Y, ConstantInt::get(Ty, ~(*C2)), "notsub",
2761 HasNUW, HasNSW);
2762 return new ICmpInst(SwappedPred, Add, ConstantInt::get(Ty, ~C));
2763 }
2764
2765 /// Fold icmp (add X, Y), C.
foldICmpAddConstant(ICmpInst & Cmp,BinaryOperator * Add,const APInt & C)2766 Instruction *InstCombinerImpl::foldICmpAddConstant(ICmpInst &Cmp,
2767 BinaryOperator *Add,
2768 const APInt &C) {
2769 Value *Y = Add->getOperand(1);
2770 const APInt *C2;
2771 if (Cmp.isEquality() || !match(Y, m_APInt(C2)))
2772 return nullptr;
2773
2774 // Fold icmp pred (add X, C2), C.
2775 Value *X = Add->getOperand(0);
2776 Type *Ty = Add->getType();
2777 const CmpInst::Predicate Pred = Cmp.getPredicate();
2778
2779 // If the add does not wrap, we can always adjust the compare by subtracting
2780 // the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE
2781 // are canonicalized to SGT/SLT/UGT/ULT.
2782 if ((Add->hasNoSignedWrap() &&
2783 (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) ||
2784 (Add->hasNoUnsignedWrap() &&
2785 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT))) {
2786 bool Overflow;
2787 APInt NewC =
2788 Cmp.isSigned() ? C.ssub_ov(*C2, Overflow) : C.usub_ov(*C2, Overflow);
2789 // If there is overflow, the result must be true or false.
2790 // TODO: Can we assert there is no overflow because InstSimplify always
2791 // handles those cases?
2792 if (!Overflow)
2793 // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2)
2794 return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC));
2795 }
2796
2797 auto CR = ConstantRange::makeExactICmpRegion(Pred, C).subtract(*C2);
2798 const APInt &Upper = CR.getUpper();
2799 const APInt &Lower = CR.getLower();
2800 if (Cmp.isSigned()) {
2801 if (Lower.isSignMask())
2802 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper));
2803 if (Upper.isSignMask())
2804 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower));
2805 } else {
2806 if (Lower.isMinValue())
2807 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper));
2808 if (Upper.isMinValue())
2809 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower));
2810 }
2811
2812 // This set of folds is intentionally placed after folds that use no-wrapping
2813 // flags because those folds are likely better for later analysis/codegen.
2814 const APInt SMax = APInt::getSignedMaxValue(Ty->getScalarSizeInBits());
2815 const APInt SMin = APInt::getSignedMinValue(Ty->getScalarSizeInBits());
2816
2817 // Fold compare with offset to opposite sign compare if it eliminates offset:
2818 // (X + C2) >u C --> X <s -C2 (if C == C2 + SMAX)
2819 if (Pred == CmpInst::ICMP_UGT && C == *C2 + SMax)
2820 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, -(*C2)));
2821
2822 // (X + C2) <u C --> X >s ~C2 (if C == C2 + SMIN)
2823 if (Pred == CmpInst::ICMP_ULT && C == *C2 + SMin)
2824 return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantInt::get(Ty, ~(*C2)));
2825
2826 // (X + C2) >s C --> X <u (SMAX - C) (if C == C2 - 1)
2827 if (Pred == CmpInst::ICMP_SGT && C == *C2 - 1)
2828 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, SMax - C));
2829
2830 // (X + C2) <s C --> X >u (C ^ SMAX) (if C == C2)
2831 if (Pred == CmpInst::ICMP_SLT && C == *C2)
2832 return new ICmpInst(ICmpInst::ICMP_UGT, X, ConstantInt::get(Ty, C ^ SMax));
2833
2834 if (!Add->hasOneUse())
2835 return nullptr;
2836
2837 // X+C <u C2 -> (X & -C2) == C
2838 // iff C & (C2-1) == 0
2839 // C2 is a power of 2
2840 if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && (*C2 & (C - 1)) == 0)
2841 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -C),
2842 ConstantExpr::getNeg(cast<Constant>(Y)));
2843
2844 // X+C >u C2 -> (X & ~C2) != C
2845 // iff C & C2 == 0
2846 // C2+1 is a power of 2
2847 if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == 0)
2848 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~C),
2849 ConstantExpr::getNeg(cast<Constant>(Y)));
2850
2851 // The range test idiom can use either ult or ugt. Arbitrarily canonicalize
2852 // to the ult form.
2853 // X+C2 >u C -> X+(C2-C-1) <u ~C
2854 if (Pred == ICmpInst::ICMP_UGT)
2855 return new ICmpInst(ICmpInst::ICMP_ULT,
2856 Builder.CreateAdd(X, ConstantInt::get(Ty, *C2 - C - 1)),
2857 ConstantInt::get(Ty, ~C));
2858
2859 return nullptr;
2860 }
2861
matchThreeWayIntCompare(SelectInst * SI,Value * & LHS,Value * & RHS,ConstantInt * & Less,ConstantInt * & Equal,ConstantInt * & Greater)2862 bool InstCombinerImpl::matchThreeWayIntCompare(SelectInst *SI, Value *&LHS,
2863 Value *&RHS, ConstantInt *&Less,
2864 ConstantInt *&Equal,
2865 ConstantInt *&Greater) {
2866 // TODO: Generalize this to work with other comparison idioms or ensure
2867 // they get canonicalized into this form.
2868
2869 // select i1 (a == b),
2870 // i32 Equal,
2871 // i32 (select i1 (a < b), i32 Less, i32 Greater)
2872 // where Equal, Less and Greater are placeholders for any three constants.
2873 ICmpInst::Predicate PredA;
2874 if (!match(SI->getCondition(), m_ICmp(PredA, m_Value(LHS), m_Value(RHS))) ||
2875 !ICmpInst::isEquality(PredA))
2876 return false;
2877 Value *EqualVal = SI->getTrueValue();
2878 Value *UnequalVal = SI->getFalseValue();
2879 // We still can get non-canonical predicate here, so canonicalize.
2880 if (PredA == ICmpInst::ICMP_NE)
2881 std::swap(EqualVal, UnequalVal);
2882 if (!match(EqualVal, m_ConstantInt(Equal)))
2883 return false;
2884 ICmpInst::Predicate PredB;
2885 Value *LHS2, *RHS2;
2886 if (!match(UnequalVal, m_Select(m_ICmp(PredB, m_Value(LHS2), m_Value(RHS2)),
2887 m_ConstantInt(Less), m_ConstantInt(Greater))))
2888 return false;
2889 // We can get predicate mismatch here, so canonicalize if possible:
2890 // First, ensure that 'LHS' match.
2891 if (LHS2 != LHS) {
2892 // x sgt y <--> y slt x
2893 std::swap(LHS2, RHS2);
2894 PredB = ICmpInst::getSwappedPredicate(PredB);
2895 }
2896 if (LHS2 != LHS)
2897 return false;
2898 // We also need to canonicalize 'RHS'.
2899 if (PredB == ICmpInst::ICMP_SGT && isa<Constant>(RHS2)) {
2900 // x sgt C-1 <--> x sge C <--> not(x slt C)
2901 auto FlippedStrictness =
2902 InstCombiner::getFlippedStrictnessPredicateAndConstant(
2903 PredB, cast<Constant>(RHS2));
2904 if (!FlippedStrictness)
2905 return false;
2906 assert(FlippedStrictness->first == ICmpInst::ICMP_SGE &&
2907 "basic correctness failure");
2908 RHS2 = FlippedStrictness->second;
2909 // And kind-of perform the result swap.
2910 std::swap(Less, Greater);
2911 PredB = ICmpInst::ICMP_SLT;
2912 }
2913 return PredB == ICmpInst::ICMP_SLT && RHS == RHS2;
2914 }
2915
foldICmpSelectConstant(ICmpInst & Cmp,SelectInst * Select,ConstantInt * C)2916 Instruction *InstCombinerImpl::foldICmpSelectConstant(ICmpInst &Cmp,
2917 SelectInst *Select,
2918 ConstantInt *C) {
2919
2920 assert(C && "Cmp RHS should be a constant int!");
2921 // If we're testing a constant value against the result of a three way
2922 // comparison, the result can be expressed directly in terms of the
2923 // original values being compared. Note: We could possibly be more
2924 // aggressive here and remove the hasOneUse test. The original select is
2925 // really likely to simplify or sink when we remove a test of the result.
2926 Value *OrigLHS, *OrigRHS;
2927 ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
2928 if (Cmp.hasOneUse() &&
2929 matchThreeWayIntCompare(Select, OrigLHS, OrigRHS, C1LessThan, C2Equal,
2930 C3GreaterThan)) {
2931 assert(C1LessThan && C2Equal && C3GreaterThan);
2932
2933 bool TrueWhenLessThan =
2934 ConstantExpr::getCompare(Cmp.getPredicate(), C1LessThan, C)
2935 ->isAllOnesValue();
2936 bool TrueWhenEqual =
2937 ConstantExpr::getCompare(Cmp.getPredicate(), C2Equal, C)
2938 ->isAllOnesValue();
2939 bool TrueWhenGreaterThan =
2940 ConstantExpr::getCompare(Cmp.getPredicate(), C3GreaterThan, C)
2941 ->isAllOnesValue();
2942
2943 // This generates the new instruction that will replace the original Cmp
2944 // Instruction. Instead of enumerating the various combinations when
2945 // TrueWhenLessThan, TrueWhenEqual and TrueWhenGreaterThan are true versus
2946 // false, we rely on chaining of ORs and future passes of InstCombine to
2947 // simplify the OR further (i.e. a s< b || a == b becomes a s<= b).
2948
2949 // When none of the three constants satisfy the predicate for the RHS (C),
2950 // the entire original Cmp can be simplified to a false.
2951 Value *Cond = Builder.getFalse();
2952 if (TrueWhenLessThan)
2953 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT,
2954 OrigLHS, OrigRHS));
2955 if (TrueWhenEqual)
2956 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ,
2957 OrigLHS, OrigRHS));
2958 if (TrueWhenGreaterThan)
2959 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT,
2960 OrigLHS, OrigRHS));
2961
2962 return replaceInstUsesWith(Cmp, Cond);
2963 }
2964 return nullptr;
2965 }
2966
foldICmpBitCast(ICmpInst & Cmp)2967 Instruction *InstCombinerImpl::foldICmpBitCast(ICmpInst &Cmp) {
2968 auto *Bitcast = dyn_cast<BitCastInst>(Cmp.getOperand(0));
2969 if (!Bitcast)
2970 return nullptr;
2971
2972 ICmpInst::Predicate Pred = Cmp.getPredicate();
2973 Value *Op1 = Cmp.getOperand(1);
2974 Value *BCSrcOp = Bitcast->getOperand(0);
2975 Type *SrcType = Bitcast->getSrcTy();
2976 Type *DstType = Bitcast->getType();
2977
2978 // Make sure the bitcast doesn't change between scalar and vector and
2979 // doesn't change the number of vector elements.
2980 if (SrcType->isVectorTy() == DstType->isVectorTy() &&
2981 SrcType->getScalarSizeInBits() == DstType->getScalarSizeInBits()) {
2982 // Zero-equality and sign-bit checks are preserved through sitofp + bitcast.
2983 Value *X;
2984 if (match(BCSrcOp, m_SIToFP(m_Value(X)))) {
2985 // icmp eq (bitcast (sitofp X)), 0 --> icmp eq X, 0
2986 // icmp ne (bitcast (sitofp X)), 0 --> icmp ne X, 0
2987 // icmp slt (bitcast (sitofp X)), 0 --> icmp slt X, 0
2988 // icmp sgt (bitcast (sitofp X)), 0 --> icmp sgt X, 0
2989 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_SLT ||
2990 Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT) &&
2991 match(Op1, m_Zero()))
2992 return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
2993
2994 // icmp slt (bitcast (sitofp X)), 1 --> icmp slt X, 1
2995 if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_One()))
2996 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), 1));
2997
2998 // icmp sgt (bitcast (sitofp X)), -1 --> icmp sgt X, -1
2999 if (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))
3000 return new ICmpInst(Pred, X,
3001 ConstantInt::getAllOnesValue(X->getType()));
3002 }
3003
3004 // Zero-equality checks are preserved through unsigned floating-point casts:
3005 // icmp eq (bitcast (uitofp X)), 0 --> icmp eq X, 0
3006 // icmp ne (bitcast (uitofp X)), 0 --> icmp ne X, 0
3007 if (match(BCSrcOp, m_UIToFP(m_Value(X))))
3008 if (Cmp.isEquality() && match(Op1, m_Zero()))
3009 return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
3010
3011 // If this is a sign-bit test of a bitcast of a casted FP value, eliminate
3012 // the FP extend/truncate because that cast does not change the sign-bit.
3013 // This is true for all standard IEEE-754 types and the X86 80-bit type.
3014 // The sign-bit is always the most significant bit in those types.
3015 const APInt *C;
3016 bool TrueIfSigned;
3017 if (match(Op1, m_APInt(C)) && Bitcast->hasOneUse() &&
3018 isSignBitCheck(Pred, *C, TrueIfSigned)) {
3019 if (match(BCSrcOp, m_FPExt(m_Value(X))) ||
3020 match(BCSrcOp, m_FPTrunc(m_Value(X)))) {
3021 // (bitcast (fpext/fptrunc X)) to iX) < 0 --> (bitcast X to iY) < 0
3022 // (bitcast (fpext/fptrunc X)) to iX) > -1 --> (bitcast X to iY) > -1
3023 Type *XType = X->getType();
3024
3025 // We can't currently handle Power style floating point operations here.
3026 if (!(XType->isPPC_FP128Ty() || SrcType->isPPC_FP128Ty())) {
3027 Type *NewType = Builder.getIntNTy(XType->getScalarSizeInBits());
3028 if (auto *XVTy = dyn_cast<VectorType>(XType))
3029 NewType = VectorType::get(NewType, XVTy->getElementCount());
3030 Value *NewBitcast = Builder.CreateBitCast(X, NewType);
3031 if (TrueIfSigned)
3032 return new ICmpInst(ICmpInst::ICMP_SLT, NewBitcast,
3033 ConstantInt::getNullValue(NewType));
3034 else
3035 return new ICmpInst(ICmpInst::ICMP_SGT, NewBitcast,
3036 ConstantInt::getAllOnesValue(NewType));
3037 }
3038 }
3039 }
3040 }
3041
3042 // Test to see if the operands of the icmp are casted versions of other
3043 // values. If the ptr->ptr cast can be stripped off both arguments, do so.
3044 if (DstType->isPointerTy() && (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
3045 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
3046 // so eliminate it as well.
3047 if (auto *BC2 = dyn_cast<BitCastInst>(Op1))
3048 Op1 = BC2->getOperand(0);
3049
3050 Op1 = Builder.CreateBitCast(Op1, SrcType);
3051 return new ICmpInst(Pred, BCSrcOp, Op1);
3052 }
3053
3054 const APInt *C;
3055 if (!match(Cmp.getOperand(1), m_APInt(C)) || !DstType->isIntegerTy() ||
3056 !SrcType->isIntOrIntVectorTy())
3057 return nullptr;
3058
3059 // If this is checking if all elements of a vector compare are set or not,
3060 // invert the casted vector equality compare and test if all compare
3061 // elements are clear or not. Compare against zero is generally easier for
3062 // analysis and codegen.
3063 // icmp eq/ne (bitcast (not X) to iN), -1 --> icmp eq/ne (bitcast X to iN), 0
3064 // Example: are all elements equal? --> are zero elements not equal?
3065 // TODO: Try harder to reduce compare of 2 freely invertible operands?
3066 if (Cmp.isEquality() && C->isAllOnes() && Bitcast->hasOneUse() &&
3067 isFreeToInvert(BCSrcOp, BCSrcOp->hasOneUse())) {
3068 Value *Cast = Builder.CreateBitCast(Builder.CreateNot(BCSrcOp), DstType);
3069 return new ICmpInst(Pred, Cast, ConstantInt::getNullValue(DstType));
3070 }
3071
3072 // If this is checking if all elements of an extended vector are clear or not,
3073 // compare in a narrow type to eliminate the extend:
3074 // icmp eq/ne (bitcast (ext X) to iN), 0 --> icmp eq/ne (bitcast X to iM), 0
3075 Value *X;
3076 if (Cmp.isEquality() && C->isZero() && Bitcast->hasOneUse() &&
3077 match(BCSrcOp, m_ZExtOrSExt(m_Value(X)))) {
3078 if (auto *VecTy = dyn_cast<FixedVectorType>(X->getType())) {
3079 Type *NewType = Builder.getIntNTy(VecTy->getPrimitiveSizeInBits());
3080 Value *NewCast = Builder.CreateBitCast(X, NewType);
3081 return new ICmpInst(Pred, NewCast, ConstantInt::getNullValue(NewType));
3082 }
3083 }
3084
3085 // Folding: icmp <pred> iN X, C
3086 // where X = bitcast <M x iK> (shufflevector <M x iK> %vec, undef, SC)) to iN
3087 // and C is a splat of a K-bit pattern
3088 // and SC is a constant vector = <C', C', C', ..., C'>
3089 // Into:
3090 // %E = extractelement <M x iK> %vec, i32 C'
3091 // icmp <pred> iK %E, trunc(C)
3092 Value *Vec;
3093 ArrayRef<int> Mask;
3094 if (match(BCSrcOp, m_Shuffle(m_Value(Vec), m_Undef(), m_Mask(Mask)))) {
3095 // Check whether every element of Mask is the same constant
3096 if (is_splat(Mask)) {
3097 auto *VecTy = cast<VectorType>(SrcType);
3098 auto *EltTy = cast<IntegerType>(VecTy->getElementType());
3099 if (C->isSplat(EltTy->getBitWidth())) {
3100 // Fold the icmp based on the value of C
3101 // If C is M copies of an iK sized bit pattern,
3102 // then:
3103 // => %E = extractelement <N x iK> %vec, i32 Elem
3104 // icmp <pred> iK %SplatVal, <pattern>
3105 Value *Elem = Builder.getInt32(Mask[0]);
3106 Value *Extract = Builder.CreateExtractElement(Vec, Elem);
3107 Value *NewC = ConstantInt::get(EltTy, C->trunc(EltTy->getBitWidth()));
3108 return new ICmpInst(Pred, Extract, NewC);
3109 }
3110 }
3111 }
3112 return nullptr;
3113 }
3114
3115 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C
3116 /// where X is some kind of instruction.
foldICmpInstWithConstant(ICmpInst & Cmp)3117 Instruction *InstCombinerImpl::foldICmpInstWithConstant(ICmpInst &Cmp) {
3118 const APInt *C;
3119
3120 if (match(Cmp.getOperand(1), m_APInt(C))) {
3121 if (auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0)))
3122 if (Instruction *I = foldICmpBinOpWithConstant(Cmp, BO, *C))
3123 return I;
3124
3125 if (auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0)))
3126 // For now, we only support constant integers while folding the
3127 // ICMP(SELECT)) pattern. We can extend this to support vector of integers
3128 // similar to the cases handled by binary ops above.
3129 if (auto *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1)))
3130 if (Instruction *I = foldICmpSelectConstant(Cmp, SI, ConstRHS))
3131 return I;
3132
3133 if (auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0)))
3134 if (Instruction *I = foldICmpTruncConstant(Cmp, TI, *C))
3135 return I;
3136
3137 if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)))
3138 if (Instruction *I = foldICmpIntrinsicWithConstant(Cmp, II, *C))
3139 return I;
3140 }
3141
3142 if (match(Cmp.getOperand(1), m_APIntAllowUndef(C)))
3143 return foldICmpInstWithConstantAllowUndef(Cmp, *C);
3144
3145 return nullptr;
3146 }
3147
3148 /// Fold an icmp equality instruction with binary operator LHS and constant RHS:
3149 /// icmp eq/ne BO, C.
foldICmpBinOpEqualityWithConstant(ICmpInst & Cmp,BinaryOperator * BO,const APInt & C)3150 Instruction *InstCombinerImpl::foldICmpBinOpEqualityWithConstant(
3151 ICmpInst &Cmp, BinaryOperator *BO, const APInt &C) {
3152 // TODO: Some of these folds could work with arbitrary constants, but this
3153 // function is limited to scalar and vector splat constants.
3154 if (!Cmp.isEquality())
3155 return nullptr;
3156
3157 ICmpInst::Predicate Pred = Cmp.getPredicate();
3158 bool isICMP_NE = Pred == ICmpInst::ICMP_NE;
3159 Constant *RHS = cast<Constant>(Cmp.getOperand(1));
3160 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
3161
3162 switch (BO->getOpcode()) {
3163 case Instruction::SRem:
3164 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
3165 if (C.isZero() && BO->hasOneUse()) {
3166 const APInt *BOC;
3167 if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) {
3168 Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName());
3169 return new ICmpInst(Pred, NewRem,
3170 Constant::getNullValue(BO->getType()));
3171 }
3172 }
3173 break;
3174 case Instruction::Add: {
3175 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
3176 if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
3177 if (BO->hasOneUse())
3178 return new ICmpInst(Pred, BOp0, ConstantExpr::getSub(RHS, BOC));
3179 } else if (C.isZero()) {
3180 // Replace ((add A, B) != 0) with (A != -B) if A or B is
3181 // efficiently invertible, or if the add has just this one use.
3182 if (Value *NegVal = dyn_castNegVal(BOp1))
3183 return new ICmpInst(Pred, BOp0, NegVal);
3184 if (Value *NegVal = dyn_castNegVal(BOp0))
3185 return new ICmpInst(Pred, NegVal, BOp1);
3186 if (BO->hasOneUse()) {
3187 Value *Neg = Builder.CreateNeg(BOp1);
3188 Neg->takeName(BO);
3189 return new ICmpInst(Pred, BOp0, Neg);
3190 }
3191 }
3192 break;
3193 }
3194 case Instruction::Xor:
3195 if (BO->hasOneUse()) {
3196 if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
3197 // For the xor case, we can xor two constants together, eliminating
3198 // the explicit xor.
3199 return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC));
3200 } else if (C.isZero()) {
3201 // Replace ((xor A, B) != 0) with (A != B)
3202 return new ICmpInst(Pred, BOp0, BOp1);
3203 }
3204 }
3205 break;
3206 case Instruction::Or: {
3207 const APInt *BOC;
3208 if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) {
3209 // Comparing if all bits outside of a constant mask are set?
3210 // Replace (X | C) == -1 with (X & ~C) == ~C.
3211 // This removes the -1 constant.
3212 Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1));
3213 Value *And = Builder.CreateAnd(BOp0, NotBOC);
3214 return new ICmpInst(Pred, And, NotBOC);
3215 }
3216 break;
3217 }
3218 case Instruction::And: {
3219 const APInt *BOC;
3220 if (match(BOp1, m_APInt(BOC))) {
3221 // If we have ((X & C) == C), turn it into ((X & C) != 0).
3222 if (C == *BOC && C.isPowerOf2())
3223 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE,
3224 BO, Constant::getNullValue(RHS->getType()));
3225 }
3226 break;
3227 }
3228 case Instruction::UDiv:
3229 if (C.isZero()) {
3230 // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A)
3231 auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3232 return new ICmpInst(NewPred, BOp1, BOp0);
3233 }
3234 break;
3235 default:
3236 break;
3237 }
3238 return nullptr;
3239 }
3240
3241 /// Fold an equality icmp with LLVM intrinsic and constant operand.
foldICmpEqIntrinsicWithConstant(ICmpInst & Cmp,IntrinsicInst * II,const APInt & C)3242 Instruction *InstCombinerImpl::foldICmpEqIntrinsicWithConstant(
3243 ICmpInst &Cmp, IntrinsicInst *II, const APInt &C) {
3244 Type *Ty = II->getType();
3245 unsigned BitWidth = C.getBitWidth();
3246 const ICmpInst::Predicate Pred = Cmp.getPredicate();
3247
3248 switch (II->getIntrinsicID()) {
3249 case Intrinsic::abs:
3250 // abs(A) == 0 -> A == 0
3251 // abs(A) == INT_MIN -> A == INT_MIN
3252 if (C.isZero() || C.isMinSignedValue())
3253 return new ICmpInst(Pred, II->getArgOperand(0), ConstantInt::get(Ty, C));
3254 break;
3255
3256 case Intrinsic::bswap:
3257 // bswap(A) == C -> A == bswap(C)
3258 return new ICmpInst(Pred, II->getArgOperand(0),
3259 ConstantInt::get(Ty, C.byteSwap()));
3260
3261 case Intrinsic::ctlz:
3262 case Intrinsic::cttz: {
3263 // ctz(A) == bitwidth(A) -> A == 0 and likewise for !=
3264 if (C == BitWidth)
3265 return new ICmpInst(Pred, II->getArgOperand(0),
3266 ConstantInt::getNullValue(Ty));
3267
3268 // ctz(A) == C -> A & Mask1 == Mask2, where Mask2 only has bit C set
3269 // and Mask1 has bits 0..C+1 set. Similar for ctl, but for high bits.
3270 // Limit to one use to ensure we don't increase instruction count.
3271 unsigned Num = C.getLimitedValue(BitWidth);
3272 if (Num != BitWidth && II->hasOneUse()) {
3273 bool IsTrailing = II->getIntrinsicID() == Intrinsic::cttz;
3274 APInt Mask1 = IsTrailing ? APInt::getLowBitsSet(BitWidth, Num + 1)
3275 : APInt::getHighBitsSet(BitWidth, Num + 1);
3276 APInt Mask2 = IsTrailing
3277 ? APInt::getOneBitSet(BitWidth, Num)
3278 : APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
3279 return new ICmpInst(Pred, Builder.CreateAnd(II->getArgOperand(0), Mask1),
3280 ConstantInt::get(Ty, Mask2));
3281 }
3282 break;
3283 }
3284
3285 case Intrinsic::ctpop: {
3286 // popcount(A) == 0 -> A == 0 and likewise for !=
3287 // popcount(A) == bitwidth(A) -> A == -1 and likewise for !=
3288 bool IsZero = C.isZero();
3289 if (IsZero || C == BitWidth)
3290 return new ICmpInst(Pred, II->getArgOperand(0),
3291 IsZero ? Constant::getNullValue(Ty)
3292 : Constant::getAllOnesValue(Ty));
3293
3294 break;
3295 }
3296
3297 case Intrinsic::fshl:
3298 case Intrinsic::fshr:
3299 if (II->getArgOperand(0) == II->getArgOperand(1)) {
3300 const APInt *RotAmtC;
3301 // ror(X, RotAmtC) == C --> X == rol(C, RotAmtC)
3302 // rol(X, RotAmtC) == C --> X == ror(C, RotAmtC)
3303 if (match(II->getArgOperand(2), m_APInt(RotAmtC)))
3304 return new ICmpInst(Pred, II->getArgOperand(0),
3305 II->getIntrinsicID() == Intrinsic::fshl
3306 ? ConstantInt::get(Ty, C.rotr(*RotAmtC))
3307 : ConstantInt::get(Ty, C.rotl(*RotAmtC)));
3308 }
3309 break;
3310
3311 case Intrinsic::uadd_sat: {
3312 // uadd.sat(a, b) == 0 -> (a | b) == 0
3313 if (C.isZero()) {
3314 Value *Or = Builder.CreateOr(II->getArgOperand(0), II->getArgOperand(1));
3315 return new ICmpInst(Pred, Or, Constant::getNullValue(Ty));
3316 }
3317 break;
3318 }
3319
3320 case Intrinsic::usub_sat: {
3321 // usub.sat(a, b) == 0 -> a <= b
3322 if (C.isZero()) {
3323 ICmpInst::Predicate NewPred =
3324 Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3325 return new ICmpInst(NewPred, II->getArgOperand(0), II->getArgOperand(1));
3326 }
3327 break;
3328 }
3329 default:
3330 break;
3331 }
3332
3333 return nullptr;
3334 }
3335
3336 /// Fold an icmp with LLVM intrinsics
foldICmpIntrinsicWithIntrinsic(ICmpInst & Cmp)3337 static Instruction *foldICmpIntrinsicWithIntrinsic(ICmpInst &Cmp) {
3338 assert(Cmp.isEquality());
3339
3340 ICmpInst::Predicate Pred = Cmp.getPredicate();
3341 Value *Op0 = Cmp.getOperand(0);
3342 Value *Op1 = Cmp.getOperand(1);
3343 const auto *IIOp0 = dyn_cast<IntrinsicInst>(Op0);
3344 const auto *IIOp1 = dyn_cast<IntrinsicInst>(Op1);
3345 if (!IIOp0 || !IIOp1 || IIOp0->getIntrinsicID() != IIOp1->getIntrinsicID())
3346 return nullptr;
3347
3348 switch (IIOp0->getIntrinsicID()) {
3349 case Intrinsic::bswap:
3350 case Intrinsic::bitreverse:
3351 // If both operands are byte-swapped or bit-reversed, just compare the
3352 // original values.
3353 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3354 case Intrinsic::fshl:
3355 case Intrinsic::fshr:
3356 // If both operands are rotated by same amount, just compare the
3357 // original values.
3358 if (IIOp0->getOperand(0) != IIOp0->getOperand(1))
3359 break;
3360 if (IIOp1->getOperand(0) != IIOp1->getOperand(1))
3361 break;
3362 if (IIOp0->getOperand(2) != IIOp1->getOperand(2))
3363 break;
3364 return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3365 default:
3366 break;
3367 }
3368
3369 return nullptr;
3370 }
3371
3372 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C
3373 /// where X is some kind of instruction and C is AllowUndef.
3374 /// TODO: Move more folds which allow undef to this function.
3375 Instruction *
foldICmpInstWithConstantAllowUndef(ICmpInst & Cmp,const APInt & C)3376 InstCombinerImpl::foldICmpInstWithConstantAllowUndef(ICmpInst &Cmp,
3377 const APInt &C) {
3378 const ICmpInst::Predicate Pred = Cmp.getPredicate();
3379 if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0))) {
3380 switch (II->getIntrinsicID()) {
3381 default:
3382 break;
3383 case Intrinsic::fshl:
3384 case Intrinsic::fshr:
3385 if (Cmp.isEquality() && II->getArgOperand(0) == II->getArgOperand(1)) {
3386 // (rot X, ?) == 0/-1 --> X == 0/-1
3387 if (C.isZero() || C.isAllOnes())
3388 return new ICmpInst(Pred, II->getArgOperand(0), Cmp.getOperand(1));
3389 }
3390 break;
3391 }
3392 }
3393
3394 return nullptr;
3395 }
3396
3397 /// Fold an icmp with BinaryOp and constant operand: icmp Pred BO, C.
foldICmpBinOpWithConstant(ICmpInst & Cmp,BinaryOperator * BO,const APInt & C)3398 Instruction *InstCombinerImpl::foldICmpBinOpWithConstant(ICmpInst &Cmp,
3399 BinaryOperator *BO,
3400 const APInt &C) {
3401 switch (BO->getOpcode()) {
3402 case Instruction::Xor:
3403 if (Instruction *I = foldICmpXorConstant(Cmp, BO, C))
3404 return I;
3405 break;
3406 case Instruction::And:
3407 if (Instruction *I = foldICmpAndConstant(Cmp, BO, C))
3408 return I;
3409 break;
3410 case Instruction::Or:
3411 if (Instruction *I = foldICmpOrConstant(Cmp, BO, C))
3412 return I;
3413 break;
3414 case Instruction::Mul:
3415 if (Instruction *I = foldICmpMulConstant(Cmp, BO, C))
3416 return I;
3417 break;
3418 case Instruction::Shl:
3419 if (Instruction *I = foldICmpShlConstant(Cmp, BO, C))
3420 return I;
3421 break;
3422 case Instruction::LShr:
3423 case Instruction::AShr:
3424 if (Instruction *I = foldICmpShrConstant(Cmp, BO, C))
3425 return I;
3426 break;
3427 case Instruction::SRem:
3428 if (Instruction *I = foldICmpSRemConstant(Cmp, BO, C))
3429 return I;
3430 break;
3431 case Instruction::UDiv:
3432 if (Instruction *I = foldICmpUDivConstant(Cmp, BO, C))
3433 return I;
3434 LLVM_FALLTHROUGH;
3435 case Instruction::SDiv:
3436 if (Instruction *I = foldICmpDivConstant(Cmp, BO, C))
3437 return I;
3438 break;
3439 case Instruction::Sub:
3440 if (Instruction *I = foldICmpSubConstant(Cmp, BO, C))
3441 return I;
3442 break;
3443 case Instruction::Add:
3444 if (Instruction *I = foldICmpAddConstant(Cmp, BO, C))
3445 return I;
3446 break;
3447 default:
3448 break;
3449 }
3450
3451 // TODO: These folds could be refactored to be part of the above calls.
3452 return foldICmpBinOpEqualityWithConstant(Cmp, BO, C);
3453 }
3454
3455 /// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
foldICmpIntrinsicWithConstant(ICmpInst & Cmp,IntrinsicInst * II,const APInt & C)3456 Instruction *InstCombinerImpl::foldICmpIntrinsicWithConstant(ICmpInst &Cmp,
3457 IntrinsicInst *II,
3458 const APInt &C) {
3459 if (Cmp.isEquality())
3460 return foldICmpEqIntrinsicWithConstant(Cmp, II, C);
3461
3462 Type *Ty = II->getType();
3463 unsigned BitWidth = C.getBitWidth();
3464 ICmpInst::Predicate Pred = Cmp.getPredicate();
3465 switch (II->getIntrinsicID()) {
3466 case Intrinsic::ctpop: {
3467 // (ctpop X > BitWidth - 1) --> X == -1
3468 Value *X = II->getArgOperand(0);
3469 if (C == BitWidth - 1 && Pred == ICmpInst::ICMP_UGT)
3470 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ, X,
3471 ConstantInt::getAllOnesValue(Ty));
3472 // (ctpop X < BitWidth) --> X != -1
3473 if (C == BitWidth && Pred == ICmpInst::ICMP_ULT)
3474 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE, X,
3475 ConstantInt::getAllOnesValue(Ty));
3476 break;
3477 }
3478 case Intrinsic::ctlz: {
3479 // ctlz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX < 0b00010000
3480 if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
3481 unsigned Num = C.getLimitedValue();
3482 APInt Limit = APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
3483 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_ULT,
3484 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
3485 }
3486
3487 // ctlz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX > 0b00011111
3488 if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) {
3489 unsigned Num = C.getLimitedValue();
3490 APInt Limit = APInt::getLowBitsSet(BitWidth, BitWidth - Num);
3491 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_UGT,
3492 II->getArgOperand(0), ConstantInt::get(Ty, Limit));
3493 }
3494 break;
3495 }
3496 case Intrinsic::cttz: {
3497 // Limit to one use to ensure we don't increase instruction count.
3498 if (!II->hasOneUse())
3499 return nullptr;
3500
3501 // cttz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX & 0b00001111 == 0
3502 if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
3503 APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue() + 1);
3504 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ,
3505 Builder.CreateAnd(II->getArgOperand(0), Mask),
3506 ConstantInt::getNullValue(Ty));
3507 }
3508
3509 // cttz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX & 0b00000111 != 0
3510 if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) {
3511 APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue());
3512 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE,
3513 Builder.CreateAnd(II->getArgOperand(0), Mask),
3514 ConstantInt::getNullValue(Ty));
3515 }
3516 break;
3517 }
3518 default:
3519 break;
3520 }
3521
3522 return nullptr;
3523 }
3524
3525 /// Handle icmp with constant (but not simple integer constant) RHS.
foldICmpInstWithConstantNotInt(ICmpInst & I)3526 Instruction *InstCombinerImpl::foldICmpInstWithConstantNotInt(ICmpInst &I) {
3527 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3528 Constant *RHSC = dyn_cast<Constant>(Op1);
3529 Instruction *LHSI = dyn_cast<Instruction>(Op0);
3530 if (!RHSC || !LHSI)
3531 return nullptr;
3532
3533 switch (LHSI->getOpcode()) {
3534 case Instruction::GetElementPtr:
3535 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
3536 if (RHSC->isNullValue() &&
3537 cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices())
3538 return new ICmpInst(
3539 I.getPredicate(), LHSI->getOperand(0),
3540 Constant::getNullValue(LHSI->getOperand(0)->getType()));
3541 break;
3542 case Instruction::PHI:
3543 // Only fold icmp into the PHI if the phi and icmp are in the same
3544 // block. If in the same block, we're encouraging jump threading. If
3545 // not, we are just pessimizing the code by making an i1 phi.
3546 if (LHSI->getParent() == I.getParent())
3547 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
3548 return NV;
3549 break;
3550 case Instruction::IntToPtr:
3551 // icmp pred inttoptr(X), null -> icmp pred X, 0
3552 if (RHSC->isNullValue() &&
3553 DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
3554 return new ICmpInst(
3555 I.getPredicate(), LHSI->getOperand(0),
3556 Constant::getNullValue(LHSI->getOperand(0)->getType()));
3557 break;
3558
3559 case Instruction::Load:
3560 // Try to optimize things like "A[i] > 4" to index computations.
3561 if (GetElementPtrInst *GEP =
3562 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0)))
3563 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
3564 if (Instruction *Res =
3565 foldCmpLoadFromIndexedGlobal(cast<LoadInst>(LHSI), GEP, GV, I))
3566 return Res;
3567 break;
3568 }
3569
3570 return nullptr;
3571 }
3572
foldSelectICmp(ICmpInst::Predicate Pred,SelectInst * SI,Value * RHS,const ICmpInst & I)3573 Instruction *InstCombinerImpl::foldSelectICmp(ICmpInst::Predicate Pred,
3574 SelectInst *SI, Value *RHS,
3575 const ICmpInst &I) {
3576 // Try to fold the comparison into the select arms, which will cause the
3577 // select to be converted into a logical and/or.
3578 auto SimplifyOp = [&](Value *Op, bool SelectCondIsTrue) -> Value * {
3579 if (Value *Res = simplifyICmpInst(Pred, Op, RHS, SQ))
3580 return Res;
3581 if (Optional<bool> Impl = isImpliedCondition(SI->getCondition(), Pred, Op,
3582 RHS, DL, SelectCondIsTrue))
3583 return ConstantInt::get(I.getType(), *Impl);
3584 return nullptr;
3585 };
3586
3587 ConstantInt *CI = nullptr;
3588 Value *Op1 = SimplifyOp(SI->getOperand(1), true);
3589 if (Op1)
3590 CI = dyn_cast<ConstantInt>(Op1);
3591
3592 Value *Op2 = SimplifyOp(SI->getOperand(2), false);
3593 if (Op2)
3594 CI = dyn_cast<ConstantInt>(Op2);
3595
3596 // We only want to perform this transformation if it will not lead to
3597 // additional code. This is true if either both sides of the select
3598 // fold to a constant (in which case the icmp is replaced with a select
3599 // which will usually simplify) or this is the only user of the
3600 // select (in which case we are trading a select+icmp for a simpler
3601 // select+icmp) or all uses of the select can be replaced based on
3602 // dominance information ("Global cases").
3603 bool Transform = false;
3604 if (Op1 && Op2)
3605 Transform = true;
3606 else if (Op1 || Op2) {
3607 // Local case
3608 if (SI->hasOneUse())
3609 Transform = true;
3610 // Global cases
3611 else if (CI && !CI->isZero())
3612 // When Op1 is constant try replacing select with second operand.
3613 // Otherwise Op2 is constant and try replacing select with first
3614 // operand.
3615 Transform = replacedSelectWithOperand(SI, &I, Op1 ? 2 : 1);
3616 }
3617 if (Transform) {
3618 if (!Op1)
3619 Op1 = Builder.CreateICmp(Pred, SI->getOperand(1), RHS, I.getName());
3620 if (!Op2)
3621 Op2 = Builder.CreateICmp(Pred, SI->getOperand(2), RHS, I.getName());
3622 return SelectInst::Create(SI->getOperand(0), Op1, Op2);
3623 }
3624
3625 return nullptr;
3626 }
3627
3628 /// Some comparisons can be simplified.
3629 /// In this case, we are looking for comparisons that look like
3630 /// a check for a lossy truncation.
3631 /// Folds:
3632 /// icmp SrcPred (x & Mask), x to icmp DstPred x, Mask
3633 /// Where Mask is some pattern that produces all-ones in low bits:
3634 /// (-1 >> y)
3635 /// ((-1 << y) >> y) <- non-canonical, has extra uses
3636 /// ~(-1 << y)
3637 /// ((1 << y) + (-1)) <- non-canonical, has extra uses
3638 /// The Mask can be a constant, too.
3639 /// For some predicates, the operands are commutative.
3640 /// For others, x can only be on a specific side.
foldICmpWithLowBitMaskedVal(ICmpInst & I,InstCombiner::BuilderTy & Builder)3641 static Value *foldICmpWithLowBitMaskedVal(ICmpInst &I,
3642 InstCombiner::BuilderTy &Builder) {
3643 ICmpInst::Predicate SrcPred;
3644 Value *X, *M, *Y;
3645 auto m_VariableMask = m_CombineOr(
3646 m_CombineOr(m_Not(m_Shl(m_AllOnes(), m_Value())),
3647 m_Add(m_Shl(m_One(), m_Value()), m_AllOnes())),
3648 m_CombineOr(m_LShr(m_AllOnes(), m_Value()),
3649 m_LShr(m_Shl(m_AllOnes(), m_Value(Y)), m_Deferred(Y))));
3650 auto m_Mask = m_CombineOr(m_VariableMask, m_LowBitMask());
3651 if (!match(&I, m_c_ICmp(SrcPred,
3652 m_c_And(m_CombineAnd(m_Mask, m_Value(M)), m_Value(X)),
3653 m_Deferred(X))))
3654 return nullptr;
3655
3656 ICmpInst::Predicate DstPred;
3657 switch (SrcPred) {
3658 case ICmpInst::Predicate::ICMP_EQ:
3659 // x & (-1 >> y) == x -> x u<= (-1 >> y)
3660 DstPred = ICmpInst::Predicate::ICMP_ULE;
3661 break;
3662 case ICmpInst::Predicate::ICMP_NE:
3663 // x & (-1 >> y) != x -> x u> (-1 >> y)
3664 DstPred = ICmpInst::Predicate::ICMP_UGT;
3665 break;
3666 case ICmpInst::Predicate::ICMP_ULT:
3667 // x & (-1 >> y) u< x -> x u> (-1 >> y)
3668 // x u> x & (-1 >> y) -> x u> (-1 >> y)
3669 DstPred = ICmpInst::Predicate::ICMP_UGT;
3670 break;
3671 case ICmpInst::Predicate::ICMP_UGE:
3672 // x & (-1 >> y) u>= x -> x u<= (-1 >> y)
3673 // x u<= x & (-1 >> y) -> x u<= (-1 >> y)
3674 DstPred = ICmpInst::Predicate::ICMP_ULE;
3675 break;
3676 case ICmpInst::Predicate::ICMP_SLT:
3677 // x & (-1 >> y) s< x -> x s> (-1 >> y)
3678 // x s> x & (-1 >> y) -> x s> (-1 >> y)
3679 if (!match(M, m_Constant())) // Can not do this fold with non-constant.
3680 return nullptr;
3681 if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
3682 return nullptr;
3683 DstPred = ICmpInst::Predicate::ICMP_SGT;
3684 break;
3685 case ICmpInst::Predicate::ICMP_SGE:
3686 // x & (-1 >> y) s>= x -> x s<= (-1 >> y)
3687 // x s<= x & (-1 >> y) -> x s<= (-1 >> y)
3688 if (!match(M, m_Constant())) // Can not do this fold with non-constant.
3689 return nullptr;
3690 if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
3691 return nullptr;
3692 DstPred = ICmpInst::Predicate::ICMP_SLE;
3693 break;
3694 case ICmpInst::Predicate::ICMP_SGT:
3695 case ICmpInst::Predicate::ICMP_SLE:
3696 return nullptr;
3697 case ICmpInst::Predicate::ICMP_UGT:
3698 case ICmpInst::Predicate::ICMP_ULE:
3699 llvm_unreachable("Instsimplify took care of commut. variant");
3700 break;
3701 default:
3702 llvm_unreachable("All possible folds are handled.");
3703 }
3704
3705 // The mask value may be a vector constant that has undefined elements. But it
3706 // may not be safe to propagate those undefs into the new compare, so replace
3707 // those elements by copying an existing, defined, and safe scalar constant.
3708 Type *OpTy = M->getType();
3709 auto *VecC = dyn_cast<Constant>(M);
3710 auto *OpVTy = dyn_cast<FixedVectorType>(OpTy);
3711 if (OpVTy && VecC && VecC->containsUndefOrPoisonElement()) {
3712 Constant *SafeReplacementConstant = nullptr;
3713 for (unsigned i = 0, e = OpVTy->getNumElements(); i != e; ++i) {
3714 if (!isa<UndefValue>(VecC->getAggregateElement(i))) {
3715 SafeReplacementConstant = VecC->getAggregateElement(i);
3716 break;
3717 }
3718 }
3719 assert(SafeReplacementConstant && "Failed to find undef replacement");
3720 M = Constant::replaceUndefsWith(VecC, SafeReplacementConstant);
3721 }
3722
3723 return Builder.CreateICmp(DstPred, X, M);
3724 }
3725
3726 /// Some comparisons can be simplified.
3727 /// In this case, we are looking for comparisons that look like
3728 /// a check for a lossy signed truncation.
3729 /// Folds: (MaskedBits is a constant.)
3730 /// ((%x << MaskedBits) a>> MaskedBits) SrcPred %x
3731 /// Into:
3732 /// (add %x, (1 << (KeptBits-1))) DstPred (1 << KeptBits)
3733 /// Where KeptBits = bitwidth(%x) - MaskedBits
3734 static Value *
foldICmpWithTruncSignExtendedVal(ICmpInst & I,InstCombiner::BuilderTy & Builder)3735 foldICmpWithTruncSignExtendedVal(ICmpInst &I,
3736 InstCombiner::BuilderTy &Builder) {
3737 ICmpInst::Predicate SrcPred;
3738 Value *X;
3739 const APInt *C0, *C1; // FIXME: non-splats, potentially with undef.
3740 // We are ok with 'shl' having multiple uses, but 'ashr' must be one-use.
3741 if (!match(&I, m_c_ICmp(SrcPred,
3742 m_OneUse(m_AShr(m_Shl(m_Value(X), m_APInt(C0)),
3743 m_APInt(C1))),
3744 m_Deferred(X))))
3745 return nullptr;
3746
3747 // Potential handling of non-splats: for each element:
3748 // * if both are undef, replace with constant 0.
3749 // Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0.
3750 // * if both are not undef, and are different, bailout.
3751 // * else, only one is undef, then pick the non-undef one.
3752
3753 // The shift amount must be equal.
3754 if (*C0 != *C1)
3755 return nullptr;
3756 const APInt &MaskedBits = *C0;
3757 assert(MaskedBits != 0 && "shift by zero should be folded away already.");
3758
3759 ICmpInst::Predicate DstPred;
3760 switch (SrcPred) {
3761 case ICmpInst::Predicate::ICMP_EQ:
3762 // ((%x << MaskedBits) a>> MaskedBits) == %x
3763 // =>
3764 // (add %x, (1 << (KeptBits-1))) u< (1 << KeptBits)
3765 DstPred = ICmpInst::Predicate::ICMP_ULT;
3766 break;
3767 case ICmpInst::Predicate::ICMP_NE:
3768 // ((%x << MaskedBits) a>> MaskedBits) != %x
3769 // =>
3770 // (add %x, (1 << (KeptBits-1))) u>= (1 << KeptBits)
3771 DstPred = ICmpInst::Predicate::ICMP_UGE;
3772 break;
3773 // FIXME: are more folds possible?
3774 default:
3775 return nullptr;
3776 }
3777
3778 auto *XType = X->getType();
3779 const unsigned XBitWidth = XType->getScalarSizeInBits();
3780 const APInt BitWidth = APInt(XBitWidth, XBitWidth);
3781 assert(BitWidth.ugt(MaskedBits) && "shifts should leave some bits untouched");
3782
3783 // KeptBits = bitwidth(%x) - MaskedBits
3784 const APInt KeptBits = BitWidth - MaskedBits;
3785 assert(KeptBits.ugt(0) && KeptBits.ult(BitWidth) && "unreachable");
3786 // ICmpCst = (1 << KeptBits)
3787 const APInt ICmpCst = APInt(XBitWidth, 1).shl(KeptBits);
3788 assert(ICmpCst.isPowerOf2());
3789 // AddCst = (1 << (KeptBits-1))
3790 const APInt AddCst = ICmpCst.lshr(1);
3791 assert(AddCst.ult(ICmpCst) && AddCst.isPowerOf2());
3792
3793 // T0 = add %x, AddCst
3794 Value *T0 = Builder.CreateAdd(X, ConstantInt::get(XType, AddCst));
3795 // T1 = T0 DstPred ICmpCst
3796 Value *T1 = Builder.CreateICmp(DstPred, T0, ConstantInt::get(XType, ICmpCst));
3797
3798 return T1;
3799 }
3800
3801 // Given pattern:
3802 // icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
3803 // we should move shifts to the same hand of 'and', i.e. rewrite as
3804 // icmp eq/ne (and (x shift (Q+K)), y), 0 iff (Q+K) u< bitwidth(x)
3805 // We are only interested in opposite logical shifts here.
3806 // One of the shifts can be truncated.
3807 // If we can, we want to end up creating 'lshr' shift.
3808 static Value *
foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst & I,const SimplifyQuery SQ,InstCombiner::BuilderTy & Builder)3809 foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ,
3810 InstCombiner::BuilderTy &Builder) {
3811 if (!I.isEquality() || !match(I.getOperand(1), m_Zero()) ||
3812 !I.getOperand(0)->hasOneUse())
3813 return nullptr;
3814
3815 auto m_AnyLogicalShift = m_LogicalShift(m_Value(), m_Value());
3816
3817 // Look for an 'and' of two logical shifts, one of which may be truncated.
3818 // We use m_TruncOrSelf() on the RHS to correctly handle commutative case.
3819 Instruction *XShift, *MaybeTruncation, *YShift;
3820 if (!match(
3821 I.getOperand(0),
3822 m_c_And(m_CombineAnd(m_AnyLogicalShift, m_Instruction(XShift)),
3823 m_CombineAnd(m_TruncOrSelf(m_CombineAnd(
3824 m_AnyLogicalShift, m_Instruction(YShift))),
3825 m_Instruction(MaybeTruncation)))))
3826 return nullptr;
3827
3828 // We potentially looked past 'trunc', but only when matching YShift,
3829 // therefore YShift must have the widest type.
3830 Instruction *WidestShift = YShift;
3831 // Therefore XShift must have the shallowest type.
3832 // Or they both have identical types if there was no truncation.
3833 Instruction *NarrowestShift = XShift;
3834
3835 Type *WidestTy = WidestShift->getType();
3836 Type *NarrowestTy = NarrowestShift->getType();
3837 assert(NarrowestTy == I.getOperand(0)->getType() &&
3838 "We did not look past any shifts while matching XShift though.");
3839 bool HadTrunc = WidestTy != I.getOperand(0)->getType();
3840
3841 // If YShift is a 'lshr', swap the shifts around.
3842 if (match(YShift, m_LShr(m_Value(), m_Value())))
3843 std::swap(XShift, YShift);
3844
3845 // The shifts must be in opposite directions.
3846 auto XShiftOpcode = XShift->getOpcode();
3847 if (XShiftOpcode == YShift->getOpcode())
3848 return nullptr; // Do not care about same-direction shifts here.
3849
3850 Value *X, *XShAmt, *Y, *YShAmt;
3851 match(XShift, m_BinOp(m_Value(X), m_ZExtOrSelf(m_Value(XShAmt))));
3852 match(YShift, m_BinOp(m_Value(Y), m_ZExtOrSelf(m_Value(YShAmt))));
3853
3854 // If one of the values being shifted is a constant, then we will end with
3855 // and+icmp, and [zext+]shift instrs will be constant-folded. If they are not,
3856 // however, we will need to ensure that we won't increase instruction count.
3857 if (!isa<Constant>(X) && !isa<Constant>(Y)) {
3858 // At least one of the hands of the 'and' should be one-use shift.
3859 if (!match(I.getOperand(0),
3860 m_c_And(m_OneUse(m_AnyLogicalShift), m_Value())))
3861 return nullptr;
3862 if (HadTrunc) {
3863 // Due to the 'trunc', we will need to widen X. For that either the old
3864 // 'trunc' or the shift amt in the non-truncated shift should be one-use.
3865 if (!MaybeTruncation->hasOneUse() &&
3866 !NarrowestShift->getOperand(1)->hasOneUse())
3867 return nullptr;
3868 }
3869 }
3870
3871 // We have two shift amounts from two different shifts. The types of those
3872 // shift amounts may not match. If that's the case let's bailout now.
3873 if (XShAmt->getType() != YShAmt->getType())
3874 return nullptr;
3875
3876 // As input, we have the following pattern:
3877 // icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
3878 // We want to rewrite that as:
3879 // icmp eq/ne (and (x shift (Q+K)), y), 0 iff (Q+K) u< bitwidth(x)
3880 // While we know that originally (Q+K) would not overflow
3881 // (because 2 * (N-1) u<= iN -1), we have looked past extensions of
3882 // shift amounts. so it may now overflow in smaller bitwidth.
3883 // To ensure that does not happen, we need to ensure that the total maximal
3884 // shift amount is still representable in that smaller bit width.
3885 unsigned MaximalPossibleTotalShiftAmount =
3886 (WidestTy->getScalarSizeInBits() - 1) +
3887 (NarrowestTy->getScalarSizeInBits() - 1);
3888 APInt MaximalRepresentableShiftAmount =
3889 APInt::getAllOnes(XShAmt->getType()->getScalarSizeInBits());
3890 if (MaximalRepresentableShiftAmount.ult(MaximalPossibleTotalShiftAmount))
3891 return nullptr;
3892
3893 // Can we fold (XShAmt+YShAmt) ?
3894 auto *NewShAmt = dyn_cast_or_null<Constant>(
3895 simplifyAddInst(XShAmt, YShAmt, /*isNSW=*/false,
3896 /*isNUW=*/false, SQ.getWithInstruction(&I)));
3897 if (!NewShAmt)
3898 return nullptr;
3899 NewShAmt = ConstantExpr::getZExtOrBitCast(NewShAmt, WidestTy);
3900 unsigned WidestBitWidth = WidestTy->getScalarSizeInBits();
3901
3902 // Is the new shift amount smaller than the bit width?
3903 // FIXME: could also rely on ConstantRange.
3904 if (!match(NewShAmt,
3905 m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_ULT,
3906 APInt(WidestBitWidth, WidestBitWidth))))
3907 return nullptr;
3908
3909 // An extra legality check is needed if we had trunc-of-lshr.
3910 if (HadTrunc && match(WidestShift, m_LShr(m_Value(), m_Value()))) {
3911 auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
3912 WidestShift]() {
3913 // It isn't obvious whether it's worth it to analyze non-constants here.
3914 // Also, let's basically give up on non-splat cases, pessimizing vectors.
3915 // If *any* of these preconditions matches we can perform the fold.
3916 Constant *NewShAmtSplat = NewShAmt->getType()->isVectorTy()
3917 ? NewShAmt->getSplatValue()
3918 : NewShAmt;
3919 // If it's edge-case shift (by 0 or by WidestBitWidth-1) we can fold.
3920 if (NewShAmtSplat &&
3921 (NewShAmtSplat->isNullValue() ||
3922 NewShAmtSplat->getUniqueInteger() == WidestBitWidth - 1))
3923 return true;
3924 // We consider *min* leading zeros so a single outlier
3925 // blocks the transform as opposed to allowing it.
3926 if (auto *C = dyn_cast<Constant>(NarrowestShift->getOperand(0))) {
3927 KnownBits Known = computeKnownBits(C, SQ.DL);
3928 unsigned MinLeadZero = Known.countMinLeadingZeros();
3929 // If the value being shifted has at most lowest bit set we can fold.
3930 unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
3931 if (MaxActiveBits <= 1)
3932 return true;
3933 // Precondition: NewShAmt u<= countLeadingZeros(C)
3934 if (NewShAmtSplat && NewShAmtSplat->getUniqueInteger().ule(MinLeadZero))
3935 return true;
3936 }
3937 if (auto *C = dyn_cast<Constant>(WidestShift->getOperand(0))) {
3938 KnownBits Known = computeKnownBits(C, SQ.DL);
3939 unsigned MinLeadZero = Known.countMinLeadingZeros();
3940 // If the value being shifted has at most lowest bit set we can fold.
3941 unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
3942 if (MaxActiveBits <= 1)
3943 return true;
3944 // Precondition: ((WidestBitWidth-1)-NewShAmt) u<= countLeadingZeros(C)
3945 if (NewShAmtSplat) {
3946 APInt AdjNewShAmt =
3947 (WidestBitWidth - 1) - NewShAmtSplat->getUniqueInteger();
3948 if (AdjNewShAmt.ule(MinLeadZero))
3949 return true;
3950 }
3951 }
3952 return false; // Can't tell if it's ok.
3953 };
3954 if (!CanFold())
3955 return nullptr;
3956 }
3957
3958 // All good, we can do this fold.
3959 X = Builder.CreateZExt(X, WidestTy);
3960 Y = Builder.CreateZExt(Y, WidestTy);
3961 // The shift is the same that was for X.
3962 Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
3963 ? Builder.CreateLShr(X, NewShAmt)
3964 : Builder.CreateShl(X, NewShAmt);
3965 Value *T1 = Builder.CreateAnd(T0, Y);
3966 return Builder.CreateICmp(I.getPredicate(), T1,
3967 Constant::getNullValue(WidestTy));
3968 }
3969
3970 /// Fold
3971 /// (-1 u/ x) u< y
3972 /// ((x * y) ?/ x) != y
3973 /// to
3974 /// @llvm.?mul.with.overflow(x, y) plus extraction of overflow bit
3975 /// Note that the comparison is commutative, while inverted (u>=, ==) predicate
3976 /// will mean that we are looking for the opposite answer.
foldMultiplicationOverflowCheck(ICmpInst & I)3977 Value *InstCombinerImpl::foldMultiplicationOverflowCheck(ICmpInst &I) {
3978 ICmpInst::Predicate Pred;
3979 Value *X, *Y;
3980 Instruction *Mul;
3981 Instruction *Div;
3982 bool NeedNegation;
3983 // Look for: (-1 u/ x) u</u>= y
3984 if (!I.isEquality() &&
3985 match(&I, m_c_ICmp(Pred,
3986 m_CombineAnd(m_OneUse(m_UDiv(m_AllOnes(), m_Value(X))),
3987 m_Instruction(Div)),
3988 m_Value(Y)))) {
3989 Mul = nullptr;
3990
3991 // Are we checking that overflow does not happen, or does happen?
3992 switch (Pred) {
3993 case ICmpInst::Predicate::ICMP_ULT:
3994 NeedNegation = false;
3995 break; // OK
3996 case ICmpInst::Predicate::ICMP_UGE:
3997 NeedNegation = true;
3998 break; // OK
3999 default:
4000 return nullptr; // Wrong predicate.
4001 }
4002 } else // Look for: ((x * y) / x) !=/== y
4003 if (I.isEquality() &&
4004 match(&I,
4005 m_c_ICmp(Pred, m_Value(Y),
4006 m_CombineAnd(
4007 m_OneUse(m_IDiv(m_CombineAnd(m_c_Mul(m_Deferred(Y),
4008 m_Value(X)),
4009 m_Instruction(Mul)),
4010 m_Deferred(X))),
4011 m_Instruction(Div))))) {
4012 NeedNegation = Pred == ICmpInst::Predicate::ICMP_EQ;
4013 } else
4014 return nullptr;
4015
4016 BuilderTy::InsertPointGuard Guard(Builder);
4017 // If the pattern included (x * y), we'll want to insert new instructions
4018 // right before that original multiplication so that we can replace it.
4019 bool MulHadOtherUses = Mul && !Mul->hasOneUse();
4020 if (MulHadOtherUses)
4021 Builder.SetInsertPoint(Mul);
4022
4023 Function *F = Intrinsic::getDeclaration(I.getModule(),
4024 Div->getOpcode() == Instruction::UDiv
4025 ? Intrinsic::umul_with_overflow
4026 : Intrinsic::smul_with_overflow,
4027 X->getType());
4028 CallInst *Call = Builder.CreateCall(F, {X, Y}, "mul");
4029
4030 // If the multiplication was used elsewhere, to ensure that we don't leave
4031 // "duplicate" instructions, replace uses of that original multiplication
4032 // with the multiplication result from the with.overflow intrinsic.
4033 if (MulHadOtherUses)
4034 replaceInstUsesWith(*Mul, Builder.CreateExtractValue(Call, 0, "mul.val"));
4035
4036 Value *Res = Builder.CreateExtractValue(Call, 1, "mul.ov");
4037 if (NeedNegation) // This technically increases instruction count.
4038 Res = Builder.CreateNot(Res, "mul.not.ov");
4039
4040 // If we replaced the mul, erase it. Do this after all uses of Builder,
4041 // as the mul is used as insertion point.
4042 if (MulHadOtherUses)
4043 eraseInstFromFunction(*Mul);
4044
4045 return Res;
4046 }
4047
foldICmpXNegX(ICmpInst & I)4048 static Instruction *foldICmpXNegX(ICmpInst &I) {
4049 CmpInst::Predicate Pred;
4050 Value *X;
4051 if (!match(&I, m_c_ICmp(Pred, m_NSWNeg(m_Value(X)), m_Deferred(X))))
4052 return nullptr;
4053
4054 if (ICmpInst::isSigned(Pred))
4055 Pred = ICmpInst::getSwappedPredicate(Pred);
4056 else if (ICmpInst::isUnsigned(Pred))
4057 Pred = ICmpInst::getSignedPredicate(Pred);
4058 // else for equality-comparisons just keep the predicate.
4059
4060 return ICmpInst::Create(Instruction::ICmp, Pred, X,
4061 Constant::getNullValue(X->getType()), I.getName());
4062 }
4063
4064 /// Try to fold icmp (binop), X or icmp X, (binop).
4065 /// TODO: A large part of this logic is duplicated in InstSimplify's
4066 /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
4067 /// duplication.
foldICmpBinOp(ICmpInst & I,const SimplifyQuery & SQ)4068 Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
4069 const SimplifyQuery &SQ) {
4070 const SimplifyQuery Q = SQ.getWithInstruction(&I);
4071 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4072
4073 // Special logic for binary operators.
4074 BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0);
4075 BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1);
4076 if (!BO0 && !BO1)
4077 return nullptr;
4078
4079 if (Instruction *NewICmp = foldICmpXNegX(I))
4080 return NewICmp;
4081
4082 const CmpInst::Predicate Pred = I.getPredicate();
4083 Value *X;
4084
4085 // Convert add-with-unsigned-overflow comparisons into a 'not' with compare.
4086 // (Op1 + X) u</u>= Op1 --> ~Op1 u</u>= X
4087 if (match(Op0, m_OneUse(m_c_Add(m_Specific(Op1), m_Value(X)))) &&
4088 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
4089 return new ICmpInst(Pred, Builder.CreateNot(Op1), X);
4090 // Op0 u>/u<= (Op0 + X) --> X u>/u<= ~Op0
4091 if (match(Op1, m_OneUse(m_c_Add(m_Specific(Op0), m_Value(X)))) &&
4092 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
4093 return new ICmpInst(Pred, X, Builder.CreateNot(Op0));
4094
4095 {
4096 // (Op1 + X) + C u</u>= Op1 --> ~C - X u</u>= Op1
4097 Constant *C;
4098 if (match(Op0, m_OneUse(m_Add(m_c_Add(m_Specific(Op1), m_Value(X)),
4099 m_ImmConstant(C)))) &&
4100 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) {
4101 Constant *C2 = ConstantExpr::getNot(C);
4102 return new ICmpInst(Pred, Builder.CreateSub(C2, X), Op1);
4103 }
4104 // Op0 u>/u<= (Op0 + X) + C --> Op0 u>/u<= ~C - X
4105 if (match(Op1, m_OneUse(m_Add(m_c_Add(m_Specific(Op0), m_Value(X)),
4106 m_ImmConstant(C)))) &&
4107 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE)) {
4108 Constant *C2 = ConstantExpr::getNot(C);
4109 return new ICmpInst(Pred, Op0, Builder.CreateSub(C2, X));
4110 }
4111 }
4112
4113 {
4114 // Similar to above: an unsigned overflow comparison may use offset + mask:
4115 // ((Op1 + C) & C) u< Op1 --> Op1 != 0
4116 // ((Op1 + C) & C) u>= Op1 --> Op1 == 0
4117 // Op0 u> ((Op0 + C) & C) --> Op0 != 0
4118 // Op0 u<= ((Op0 + C) & C) --> Op0 == 0
4119 BinaryOperator *BO;
4120 const APInt *C;
4121 if ((Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE) &&
4122 match(Op0, m_And(m_BinOp(BO), m_LowBitMask(C))) &&
4123 match(BO, m_Add(m_Specific(Op1), m_SpecificIntAllowUndef(*C)))) {
4124 CmpInst::Predicate NewPred =
4125 Pred == ICmpInst::ICMP_ULT ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
4126 Constant *Zero = ConstantInt::getNullValue(Op1->getType());
4127 return new ICmpInst(NewPred, Op1, Zero);
4128 }
4129
4130 if ((Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE) &&
4131 match(Op1, m_And(m_BinOp(BO), m_LowBitMask(C))) &&
4132 match(BO, m_Add(m_Specific(Op0), m_SpecificIntAllowUndef(*C)))) {
4133 CmpInst::Predicate NewPred =
4134 Pred == ICmpInst::ICMP_UGT ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
4135 Constant *Zero = ConstantInt::getNullValue(Op1->getType());
4136 return new ICmpInst(NewPred, Op0, Zero);
4137 }
4138 }
4139
4140 bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
4141 if (BO0 && isa<OverflowingBinaryOperator>(BO0))
4142 NoOp0WrapProblem =
4143 ICmpInst::isEquality(Pred) ||
4144 (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) ||
4145 (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap());
4146 if (BO1 && isa<OverflowingBinaryOperator>(BO1))
4147 NoOp1WrapProblem =
4148 ICmpInst::isEquality(Pred) ||
4149 (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) ||
4150 (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap());
4151
4152 // Analyze the case when either Op0 or Op1 is an add instruction.
4153 // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
4154 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
4155 if (BO0 && BO0->getOpcode() == Instruction::Add) {
4156 A = BO0->getOperand(0);
4157 B = BO0->getOperand(1);
4158 }
4159 if (BO1 && BO1->getOpcode() == Instruction::Add) {
4160 C = BO1->getOperand(0);
4161 D = BO1->getOperand(1);
4162 }
4163
4164 // icmp (A+B), A -> icmp B, 0 for equalities or if there is no overflow.
4165 // icmp (A+B), B -> icmp A, 0 for equalities or if there is no overflow.
4166 if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
4167 return new ICmpInst(Pred, A == Op1 ? B : A,
4168 Constant::getNullValue(Op1->getType()));
4169
4170 // icmp C, (C+D) -> icmp 0, D for equalities or if there is no overflow.
4171 // icmp D, (C+D) -> icmp 0, C for equalities or if there is no overflow.
4172 if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
4173 return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
4174 C == Op0 ? D : C);
4175
4176 // icmp (A+B), (A+D) -> icmp B, D for equalities or if there is no overflow.
4177 if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem &&
4178 NoOp1WrapProblem) {
4179 // Determine Y and Z in the form icmp (X+Y), (X+Z).
4180 Value *Y, *Z;
4181 if (A == C) {
4182 // C + B == C + D -> B == D
4183 Y = B;
4184 Z = D;
4185 } else if (A == D) {
4186 // D + B == C + D -> B == C
4187 Y = B;
4188 Z = C;
4189 } else if (B == C) {
4190 // A + C == C + D -> A == D
4191 Y = A;
4192 Z = D;
4193 } else {
4194 assert(B == D);
4195 // A + D == C + D -> A == C
4196 Y = A;
4197 Z = C;
4198 }
4199 return new ICmpInst(Pred, Y, Z);
4200 }
4201
4202 // icmp slt (A + -1), Op1 -> icmp sle A, Op1
4203 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT &&
4204 match(B, m_AllOnes()))
4205 return new ICmpInst(CmpInst::ICMP_SLE, A, Op1);
4206
4207 // icmp sge (A + -1), Op1 -> icmp sgt A, Op1
4208 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE &&
4209 match(B, m_AllOnes()))
4210 return new ICmpInst(CmpInst::ICMP_SGT, A, Op1);
4211
4212 // icmp sle (A + 1), Op1 -> icmp slt A, Op1
4213 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE && match(B, m_One()))
4214 return new ICmpInst(CmpInst::ICMP_SLT, A, Op1);
4215
4216 // icmp sgt (A + 1), Op1 -> icmp sge A, Op1
4217 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT && match(B, m_One()))
4218 return new ICmpInst(CmpInst::ICMP_SGE, A, Op1);
4219
4220 // icmp sgt Op0, (C + -1) -> icmp sge Op0, C
4221 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGT &&
4222 match(D, m_AllOnes()))
4223 return new ICmpInst(CmpInst::ICMP_SGE, Op0, C);
4224
4225 // icmp sle Op0, (C + -1) -> icmp slt Op0, C
4226 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLE &&
4227 match(D, m_AllOnes()))
4228 return new ICmpInst(CmpInst::ICMP_SLT, Op0, C);
4229
4230 // icmp sge Op0, (C + 1) -> icmp sgt Op0, C
4231 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGE && match(D, m_One()))
4232 return new ICmpInst(CmpInst::ICMP_SGT, Op0, C);
4233
4234 // icmp slt Op0, (C + 1) -> icmp sle Op0, C
4235 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLT && match(D, m_One()))
4236 return new ICmpInst(CmpInst::ICMP_SLE, Op0, C);
4237
4238 // TODO: The subtraction-related identities shown below also hold, but
4239 // canonicalization from (X -nuw 1) to (X + -1) means that the combinations
4240 // wouldn't happen even if they were implemented.
4241 //
4242 // icmp ult (A - 1), Op1 -> icmp ule A, Op1
4243 // icmp uge (A - 1), Op1 -> icmp ugt A, Op1
4244 // icmp ugt Op0, (C - 1) -> icmp uge Op0, C
4245 // icmp ule Op0, (C - 1) -> icmp ult Op0, C
4246
4247 // icmp ule (A + 1), Op0 -> icmp ult A, Op1
4248 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_ULE && match(B, m_One()))
4249 return new ICmpInst(CmpInst::ICMP_ULT, A, Op1);
4250
4251 // icmp ugt (A + 1), Op0 -> icmp uge A, Op1
4252 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_UGT && match(B, m_One()))
4253 return new ICmpInst(CmpInst::ICMP_UGE, A, Op1);
4254
4255 // icmp uge Op0, (C + 1) -> icmp ugt Op0, C
4256 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_UGE && match(D, m_One()))
4257 return new ICmpInst(CmpInst::ICMP_UGT, Op0, C);
4258
4259 // icmp ult Op0, (C + 1) -> icmp ule Op0, C
4260 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_ULT && match(D, m_One()))
4261 return new ICmpInst(CmpInst::ICMP_ULE, Op0, C);
4262
4263 // if C1 has greater magnitude than C2:
4264 // icmp (A + C1), (C + C2) -> icmp (A + C3), C
4265 // s.t. C3 = C1 - C2
4266 //
4267 // if C2 has greater magnitude than C1:
4268 // icmp (A + C1), (C + C2) -> icmp A, (C + C3)
4269 // s.t. C3 = C2 - C1
4270 if (A && C && NoOp0WrapProblem && NoOp1WrapProblem &&
4271 (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned()) {
4272 const APInt *AP1, *AP2;
4273 // TODO: Support non-uniform vectors.
4274 // TODO: Allow undef passthrough if B AND D's element is undef.
4275 if (match(B, m_APIntAllowUndef(AP1)) && match(D, m_APIntAllowUndef(AP2)) &&
4276 AP1->isNegative() == AP2->isNegative()) {
4277 APInt AP1Abs = AP1->abs();
4278 APInt AP2Abs = AP2->abs();
4279 if (AP1Abs.uge(AP2Abs)) {
4280 APInt Diff = *AP1 - *AP2;
4281 bool HasNUW = BO0->hasNoUnsignedWrap() && Diff.ule(*AP1);
4282 bool HasNSW = BO0->hasNoSignedWrap();
4283 Constant *C3 = Constant::getIntegerValue(BO0->getType(), Diff);
4284 Value *NewAdd = Builder.CreateAdd(A, C3, "", HasNUW, HasNSW);
4285 return new ICmpInst(Pred, NewAdd, C);
4286 } else {
4287 APInt Diff = *AP2 - *AP1;
4288 bool HasNUW = BO1->hasNoUnsignedWrap() && Diff.ule(*AP2);
4289 bool HasNSW = BO1->hasNoSignedWrap();
4290 Constant *C3 = Constant::getIntegerValue(BO0->getType(), Diff);
4291 Value *NewAdd = Builder.CreateAdd(C, C3, "", HasNUW, HasNSW);
4292 return new ICmpInst(Pred, A, NewAdd);
4293 }
4294 }
4295 Constant *Cst1, *Cst2;
4296 if (match(B, m_ImmConstant(Cst1)) && match(D, m_ImmConstant(Cst2)) &&
4297 ICmpInst::isEquality(Pred)) {
4298 Constant *Diff = ConstantExpr::getSub(Cst2, Cst1);
4299 Value *NewAdd = Builder.CreateAdd(C, Diff);
4300 return new ICmpInst(Pred, A, NewAdd);
4301 }
4302 }
4303
4304 // Analyze the case when either Op0 or Op1 is a sub instruction.
4305 // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
4306 A = nullptr;
4307 B = nullptr;
4308 C = nullptr;
4309 D = nullptr;
4310 if (BO0 && BO0->getOpcode() == Instruction::Sub) {
4311 A = BO0->getOperand(0);
4312 B = BO0->getOperand(1);
4313 }
4314 if (BO1 && BO1->getOpcode() == Instruction::Sub) {
4315 C = BO1->getOperand(0);
4316 D = BO1->getOperand(1);
4317 }
4318
4319 // icmp (A-B), A -> icmp 0, B for equalities or if there is no overflow.
4320 if (A == Op1 && NoOp0WrapProblem)
4321 return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
4322 // icmp C, (C-D) -> icmp D, 0 for equalities or if there is no overflow.
4323 if (C == Op0 && NoOp1WrapProblem)
4324 return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
4325
4326 // Convert sub-with-unsigned-overflow comparisons into a comparison of args.
4327 // (A - B) u>/u<= A --> B u>/u<= A
4328 if (A == Op1 && (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
4329 return new ICmpInst(Pred, B, A);
4330 // C u</u>= (C - D) --> C u</u>= D
4331 if (C == Op0 && (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
4332 return new ICmpInst(Pred, C, D);
4333 // (A - B) u>=/u< A --> B u>/u<= A iff B != 0
4334 if (A == Op1 && (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
4335 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
4336 return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), B, A);
4337 // C u<=/u> (C - D) --> C u</u>= D iff B != 0
4338 if (C == Op0 && (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
4339 isKnownNonZero(D, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
4340 return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), C, D);
4341
4342 // icmp (A-B), (C-B) -> icmp A, C for equalities or if there is no overflow.
4343 if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem)
4344 return new ICmpInst(Pred, A, C);
4345
4346 // icmp (A-B), (A-D) -> icmp D, B for equalities or if there is no overflow.
4347 if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem)
4348 return new ICmpInst(Pred, D, B);
4349
4350 // icmp (0-X) < cst --> x > -cst
4351 if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
4352 Value *X;
4353 if (match(BO0, m_Neg(m_Value(X))))
4354 if (Constant *RHSC = dyn_cast<Constant>(Op1))
4355 if (RHSC->isNotMinSignedValue())
4356 return new ICmpInst(I.getSwappedPredicate(), X,
4357 ConstantExpr::getNeg(RHSC));
4358 }
4359
4360 {
4361 // Try to remove shared constant multiplier from equality comparison:
4362 // X * C == Y * C (with no overflowing/aliasing) --> X == Y
4363 Value *X, *Y;
4364 const APInt *C;
4365 if (match(Op0, m_Mul(m_Value(X), m_APInt(C))) && *C != 0 &&
4366 match(Op1, m_Mul(m_Value(Y), m_SpecificInt(*C))) && I.isEquality())
4367 if (!C->countTrailingZeros() ||
4368 (BO0 && BO1 && BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap()) ||
4369 (BO0 && BO1 && BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap()))
4370 return new ICmpInst(Pred, X, Y);
4371 }
4372
4373 BinaryOperator *SRem = nullptr;
4374 // icmp (srem X, Y), Y
4375 if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1))
4376 SRem = BO0;
4377 // icmp Y, (srem X, Y)
4378 else if (BO1 && BO1->getOpcode() == Instruction::SRem &&
4379 Op0 == BO1->getOperand(1))
4380 SRem = BO1;
4381 if (SRem) {
4382 // We don't check hasOneUse to avoid increasing register pressure because
4383 // the value we use is the same value this instruction was already using.
4384 switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
4385 default:
4386 break;
4387 case ICmpInst::ICMP_EQ:
4388 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
4389 case ICmpInst::ICMP_NE:
4390 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4391 case ICmpInst::ICMP_SGT:
4392 case ICmpInst::ICMP_SGE:
4393 return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
4394 Constant::getAllOnesValue(SRem->getType()));
4395 case ICmpInst::ICMP_SLT:
4396 case ICmpInst::ICMP_SLE:
4397 return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1),
4398 Constant::getNullValue(SRem->getType()));
4399 }
4400 }
4401
4402 if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && BO0->hasOneUse() &&
4403 BO1->hasOneUse() && BO0->getOperand(1) == BO1->getOperand(1)) {
4404 switch (BO0->getOpcode()) {
4405 default:
4406 break;
4407 case Instruction::Add:
4408 case Instruction::Sub:
4409 case Instruction::Xor: {
4410 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
4411 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4412
4413 const APInt *C;
4414 if (match(BO0->getOperand(1), m_APInt(C))) {
4415 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
4416 if (C->isSignMask()) {
4417 ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate();
4418 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
4419 }
4420
4421 // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
4422 if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) {
4423 ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate();
4424 NewPred = I.getSwappedPredicate(NewPred);
4425 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
4426 }
4427 }
4428 break;
4429 }
4430 case Instruction::Mul: {
4431 if (!I.isEquality())
4432 break;
4433
4434 const APInt *C;
4435 if (match(BO0->getOperand(1), m_APInt(C)) && !C->isZero() &&
4436 !C->isOne()) {
4437 // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask)
4438 // Mask = -1 >> count-trailing-zeros(C).
4439 if (unsigned TZs = C->countTrailingZeros()) {
4440 Constant *Mask = ConstantInt::get(
4441 BO0->getType(),
4442 APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs));
4443 Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask);
4444 Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask);
4445 return new ICmpInst(Pred, And1, And2);
4446 }
4447 }
4448 break;
4449 }
4450 case Instruction::UDiv:
4451 case Instruction::LShr:
4452 if (I.isSigned() || !BO0->isExact() || !BO1->isExact())
4453 break;
4454 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4455
4456 case Instruction::SDiv:
4457 if (!I.isEquality() || !BO0->isExact() || !BO1->isExact())
4458 break;
4459 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4460
4461 case Instruction::AShr:
4462 if (!BO0->isExact() || !BO1->isExact())
4463 break;
4464 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4465
4466 case Instruction::Shl: {
4467 bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap();
4468 bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap();
4469 if (!NUW && !NSW)
4470 break;
4471 if (!NSW && I.isSigned())
4472 break;
4473 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4474 }
4475 }
4476 }
4477
4478 if (BO0) {
4479 // Transform A & (L - 1) `ult` L --> L != 0
4480 auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes());
4481 auto BitwiseAnd = m_c_And(m_Value(), LSubOne);
4482
4483 if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) {
4484 auto *Zero = Constant::getNullValue(BO0->getType());
4485 return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero);
4486 }
4487 }
4488
4489 if (Value *V = foldMultiplicationOverflowCheck(I))
4490 return replaceInstUsesWith(I, V);
4491
4492 if (Value *V = foldICmpWithLowBitMaskedVal(I, Builder))
4493 return replaceInstUsesWith(I, V);
4494
4495 if (Value *V = foldICmpWithTruncSignExtendedVal(I, Builder))
4496 return replaceInstUsesWith(I, V);
4497
4498 if (Value *V = foldShiftIntoShiftInAnotherHandOfAndInICmp(I, SQ, Builder))
4499 return replaceInstUsesWith(I, V);
4500
4501 return nullptr;
4502 }
4503
4504 /// Fold icmp Pred min|max(X, Y), X.
foldICmpWithMinMax(ICmpInst & Cmp)4505 static Instruction *foldICmpWithMinMax(ICmpInst &Cmp) {
4506 ICmpInst::Predicate Pred = Cmp.getPredicate();
4507 Value *Op0 = Cmp.getOperand(0);
4508 Value *X = Cmp.getOperand(1);
4509
4510 // Canonicalize minimum or maximum operand to LHS of the icmp.
4511 if (match(X, m_c_SMin(m_Specific(Op0), m_Value())) ||
4512 match(X, m_c_SMax(m_Specific(Op0), m_Value())) ||
4513 match(X, m_c_UMin(m_Specific(Op0), m_Value())) ||
4514 match(X, m_c_UMax(m_Specific(Op0), m_Value()))) {
4515 std::swap(Op0, X);
4516 Pred = Cmp.getSwappedPredicate();
4517 }
4518
4519 Value *Y;
4520 if (match(Op0, m_c_SMin(m_Specific(X), m_Value(Y)))) {
4521 // smin(X, Y) == X --> X s<= Y
4522 // smin(X, Y) s>= X --> X s<= Y
4523 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SGE)
4524 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
4525
4526 // smin(X, Y) != X --> X s> Y
4527 // smin(X, Y) s< X --> X s> Y
4528 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SLT)
4529 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
4530
4531 // These cases should be handled in InstSimplify:
4532 // smin(X, Y) s<= X --> true
4533 // smin(X, Y) s> X --> false
4534 return nullptr;
4535 }
4536
4537 if (match(Op0, m_c_SMax(m_Specific(X), m_Value(Y)))) {
4538 // smax(X, Y) == X --> X s>= Y
4539 // smax(X, Y) s<= X --> X s>= Y
4540 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLE)
4541 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
4542
4543 // smax(X, Y) != X --> X s< Y
4544 // smax(X, Y) s> X --> X s< Y
4545 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SGT)
4546 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
4547
4548 // These cases should be handled in InstSimplify:
4549 // smax(X, Y) s>= X --> true
4550 // smax(X, Y) s< X --> false
4551 return nullptr;
4552 }
4553
4554 if (match(Op0, m_c_UMin(m_Specific(X), m_Value(Y)))) {
4555 // umin(X, Y) == X --> X u<= Y
4556 // umin(X, Y) u>= X --> X u<= Y
4557 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_UGE)
4558 return new ICmpInst(ICmpInst::ICMP_ULE, X, Y);
4559
4560 // umin(X, Y) != X --> X u> Y
4561 // umin(X, Y) u< X --> X u> Y
4562 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT)
4563 return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
4564
4565 // These cases should be handled in InstSimplify:
4566 // umin(X, Y) u<= X --> true
4567 // umin(X, Y) u> X --> false
4568 return nullptr;
4569 }
4570
4571 if (match(Op0, m_c_UMax(m_Specific(X), m_Value(Y)))) {
4572 // umax(X, Y) == X --> X u>= Y
4573 // umax(X, Y) u<= X --> X u>= Y
4574 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_ULE)
4575 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y);
4576
4577 // umax(X, Y) != X --> X u< Y
4578 // umax(X, Y) u> X --> X u< Y
4579 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_UGT)
4580 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
4581
4582 // These cases should be handled in InstSimplify:
4583 // umax(X, Y) u>= X --> true
4584 // umax(X, Y) u< X --> false
4585 return nullptr;
4586 }
4587
4588 return nullptr;
4589 }
4590
foldICmpEquality(ICmpInst & I)4591 Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) {
4592 if (!I.isEquality())
4593 return nullptr;
4594
4595 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4596 const CmpInst::Predicate Pred = I.getPredicate();
4597 Value *A, *B, *C, *D;
4598 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
4599 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
4600 Value *OtherVal = A == Op1 ? B : A;
4601 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
4602 }
4603
4604 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
4605 // A^c1 == C^c2 --> A == C^(c1^c2)
4606 ConstantInt *C1, *C2;
4607 if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) &&
4608 Op1->hasOneUse()) {
4609 Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue());
4610 Value *Xor = Builder.CreateXor(C, NC);
4611 return new ICmpInst(Pred, A, Xor);
4612 }
4613
4614 // A^B == A^D -> B == D
4615 if (A == C)
4616 return new ICmpInst(Pred, B, D);
4617 if (A == D)
4618 return new ICmpInst(Pred, B, C);
4619 if (B == C)
4620 return new ICmpInst(Pred, A, D);
4621 if (B == D)
4622 return new ICmpInst(Pred, A, C);
4623 }
4624 }
4625
4626 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) {
4627 // A == (A^B) -> B == 0
4628 Value *OtherVal = A == Op0 ? B : A;
4629 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
4630 }
4631
4632 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
4633 if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) &&
4634 match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) {
4635 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
4636
4637 if (A == C) {
4638 X = B;
4639 Y = D;
4640 Z = A;
4641 } else if (A == D) {
4642 X = B;
4643 Y = C;
4644 Z = A;
4645 } else if (B == C) {
4646 X = A;
4647 Y = D;
4648 Z = B;
4649 } else if (B == D) {
4650 X = A;
4651 Y = C;
4652 Z = B;
4653 }
4654
4655 if (X) { // Build (X^Y) & Z
4656 Op1 = Builder.CreateXor(X, Y);
4657 Op1 = Builder.CreateAnd(Op1, Z);
4658 return new ICmpInst(Pred, Op1, Constant::getNullValue(Op1->getType()));
4659 }
4660 }
4661
4662 {
4663 // Similar to above, but specialized for constant because invert is needed:
4664 // (X | C) == (Y | C) --> (X ^ Y) & ~C == 0
4665 Value *X, *Y;
4666 Constant *C;
4667 if (match(Op0, m_OneUse(m_Or(m_Value(X), m_Constant(C)))) &&
4668 match(Op1, m_OneUse(m_Or(m_Value(Y), m_Specific(C))))) {
4669 Value *Xor = Builder.CreateXor(X, Y);
4670 Value *And = Builder.CreateAnd(Xor, ConstantExpr::getNot(C));
4671 return new ICmpInst(Pred, And, Constant::getNullValue(And->getType()));
4672 }
4673 }
4674
4675 // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B)
4676 // and (B & (1<<X)-1) == (zext A) --> A == (trunc B)
4677 ConstantInt *Cst1;
4678 if ((Op0->hasOneUse() && match(Op0, m_ZExt(m_Value(A))) &&
4679 match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) ||
4680 (Op1->hasOneUse() && match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) &&
4681 match(Op1, m_ZExt(m_Value(A))))) {
4682 APInt Pow2 = Cst1->getValue() + 1;
4683 if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) &&
4684 Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth())
4685 return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType()));
4686 }
4687
4688 // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
4689 // For lshr and ashr pairs.
4690 const APInt *AP1, *AP2;
4691 if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_APIntAllowUndef(AP1)))) &&
4692 match(Op1, m_OneUse(m_LShr(m_Value(B), m_APIntAllowUndef(AP2))))) ||
4693 (match(Op0, m_OneUse(m_AShr(m_Value(A), m_APIntAllowUndef(AP1)))) &&
4694 match(Op1, m_OneUse(m_AShr(m_Value(B), m_APIntAllowUndef(AP2)))))) {
4695 if (AP1 != AP2)
4696 return nullptr;
4697 unsigned TypeBits = AP1->getBitWidth();
4698 unsigned ShAmt = AP1->getLimitedValue(TypeBits);
4699 if (ShAmt < TypeBits && ShAmt != 0) {
4700 ICmpInst::Predicate NewPred =
4701 Pred == ICmpInst::ICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
4702 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
4703 APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
4704 return new ICmpInst(NewPred, Xor, ConstantInt::get(A->getType(), CmpVal));
4705 }
4706 }
4707
4708 // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0
4709 if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) &&
4710 match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) {
4711 unsigned TypeBits = Cst1->getBitWidth();
4712 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
4713 if (ShAmt < TypeBits && ShAmt != 0) {
4714 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
4715 APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
4716 Value *And = Builder.CreateAnd(Xor, Builder.getInt(AndVal),
4717 I.getName() + ".mask");
4718 return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType()));
4719 }
4720 }
4721
4722 // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
4723 // "icmp (and X, mask), cst"
4724 uint64_t ShAmt = 0;
4725 if (Op0->hasOneUse() &&
4726 match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) &&
4727 match(Op1, m_ConstantInt(Cst1)) &&
4728 // Only do this when A has multiple uses. This is most important to do
4729 // when it exposes other optimizations.
4730 !A->hasOneUse()) {
4731 unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits();
4732
4733 if (ShAmt < ASize) {
4734 APInt MaskV =
4735 APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits());
4736 MaskV <<= ShAmt;
4737
4738 APInt CmpV = Cst1->getValue().zext(ASize);
4739 CmpV <<= ShAmt;
4740
4741 Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV));
4742 return new ICmpInst(Pred, Mask, Builder.getInt(CmpV));
4743 }
4744 }
4745
4746 if (Instruction *ICmp = foldICmpIntrinsicWithIntrinsic(I))
4747 return ICmp;
4748
4749 // Canonicalize checking for a power-of-2-or-zero value:
4750 // (A & (A-1)) == 0 --> ctpop(A) < 2 (two commuted variants)
4751 // ((A-1) & A) != 0 --> ctpop(A) > 1 (two commuted variants)
4752 if (!match(Op0, m_OneUse(m_c_And(m_Add(m_Value(A), m_AllOnes()),
4753 m_Deferred(A)))) ||
4754 !match(Op1, m_ZeroInt()))
4755 A = nullptr;
4756
4757 // (A & -A) == A --> ctpop(A) < 2 (four commuted variants)
4758 // (-A & A) != A --> ctpop(A) > 1 (four commuted variants)
4759 if (match(Op0, m_OneUse(m_c_And(m_Neg(m_Specific(Op1)), m_Specific(Op1)))))
4760 A = Op1;
4761 else if (match(Op1,
4762 m_OneUse(m_c_And(m_Neg(m_Specific(Op0)), m_Specific(Op0)))))
4763 A = Op0;
4764
4765 if (A) {
4766 Type *Ty = A->getType();
4767 CallInst *CtPop = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, A);
4768 return Pred == ICmpInst::ICMP_EQ
4769 ? new ICmpInst(ICmpInst::ICMP_ULT, CtPop, ConstantInt::get(Ty, 2))
4770 : new ICmpInst(ICmpInst::ICMP_UGT, CtPop, ConstantInt::get(Ty, 1));
4771 }
4772
4773 // Match icmp eq (trunc (lshr A, BW), (ashr (trunc A), BW-1)), which checks the
4774 // top BW/2 + 1 bits are all the same. Create "A >=s INT_MIN && A <=s INT_MAX",
4775 // which we generate as "icmp ult (add A, 2^(BW-1)), 2^BW" to skip a few steps
4776 // of instcombine.
4777 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
4778 if (match(Op0, m_AShr(m_Trunc(m_Value(A)), m_SpecificInt(BitWidth - 1))) &&
4779 match(Op1, m_Trunc(m_LShr(m_Specific(A), m_SpecificInt(BitWidth)))) &&
4780 A->getType()->getScalarSizeInBits() == BitWidth * 2 &&
4781 (I.getOperand(0)->hasOneUse() || I.getOperand(1)->hasOneUse())) {
4782 APInt C = APInt::getOneBitSet(BitWidth * 2, BitWidth - 1);
4783 Value *Add = Builder.CreateAdd(A, ConstantInt::get(A->getType(), C));
4784 return new ICmpInst(Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_ULT
4785 : ICmpInst::ICMP_UGE,
4786 Add, ConstantInt::get(A->getType(), C.shl(1)));
4787 }
4788
4789 return nullptr;
4790 }
4791
foldICmpWithTrunc(ICmpInst & ICmp,InstCombiner::BuilderTy & Builder)4792 static Instruction *foldICmpWithTrunc(ICmpInst &ICmp,
4793 InstCombiner::BuilderTy &Builder) {
4794 ICmpInst::Predicate Pred = ICmp.getPredicate();
4795 Value *Op0 = ICmp.getOperand(0), *Op1 = ICmp.getOperand(1);
4796
4797 // Try to canonicalize trunc + compare-to-constant into a mask + cmp.
4798 // The trunc masks high bits while the compare may effectively mask low bits.
4799 Value *X;
4800 const APInt *C;
4801 if (!match(Op0, m_OneUse(m_Trunc(m_Value(X)))) || !match(Op1, m_APInt(C)))
4802 return nullptr;
4803
4804 // This matches patterns corresponding to tests of the signbit as well as:
4805 // (trunc X) u< C --> (X & -C) == 0 (are all masked-high-bits clear?)
4806 // (trunc X) u> C --> (X & ~C) != 0 (are any masked-high-bits set?)
4807 APInt Mask;
4808 if (decomposeBitTestICmp(Op0, Op1, Pred, X, Mask, true /* WithTrunc */)) {
4809 Value *And = Builder.CreateAnd(X, Mask);
4810 Constant *Zero = ConstantInt::getNullValue(X->getType());
4811 return new ICmpInst(Pred, And, Zero);
4812 }
4813
4814 unsigned SrcBits = X->getType()->getScalarSizeInBits();
4815 if (Pred == ICmpInst::ICMP_ULT && C->isNegatedPowerOf2()) {
4816 // If C is a negative power-of-2 (high-bit mask):
4817 // (trunc X) u< C --> (X & C) != C (are any masked-high-bits clear?)
4818 Constant *MaskC = ConstantInt::get(X->getType(), C->zext(SrcBits));
4819 Value *And = Builder.CreateAnd(X, MaskC);
4820 return new ICmpInst(ICmpInst::ICMP_NE, And, MaskC);
4821 }
4822
4823 if (Pred == ICmpInst::ICMP_UGT && (~*C).isPowerOf2()) {
4824 // If C is not-of-power-of-2 (one clear bit):
4825 // (trunc X) u> C --> (X & (C+1)) == C+1 (are all masked-high-bits set?)
4826 Constant *MaskC = ConstantInt::get(X->getType(), (*C + 1).zext(SrcBits));
4827 Value *And = Builder.CreateAnd(X, MaskC);
4828 return new ICmpInst(ICmpInst::ICMP_EQ, And, MaskC);
4829 }
4830
4831 return nullptr;
4832 }
4833
foldICmpWithZextOrSext(ICmpInst & ICmp)4834 Instruction *InstCombinerImpl::foldICmpWithZextOrSext(ICmpInst &ICmp) {
4835 assert(isa<CastInst>(ICmp.getOperand(0)) && "Expected cast for operand 0");
4836 auto *CastOp0 = cast<CastInst>(ICmp.getOperand(0));
4837 Value *X;
4838 if (!match(CastOp0, m_ZExtOrSExt(m_Value(X))))
4839 return nullptr;
4840
4841 bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt;
4842 bool IsSignedCmp = ICmp.isSigned();
4843
4844 // icmp Pred (ext X), (ext Y)
4845 Value *Y;
4846 if (match(ICmp.getOperand(1), m_ZExtOrSExt(m_Value(Y)))) {
4847 bool IsZext0 = isa<ZExtOperator>(ICmp.getOperand(0));
4848 bool IsZext1 = isa<ZExtOperator>(ICmp.getOperand(1));
4849
4850 // If we have mismatched casts, treat the zext of a non-negative source as
4851 // a sext to simulate matching casts. Otherwise, we are done.
4852 // TODO: Can we handle some predicates (equality) without non-negative?
4853 if (IsZext0 != IsZext1) {
4854 if ((IsZext0 && isKnownNonNegative(X, DL, 0, &AC, &ICmp, &DT)) ||
4855 (IsZext1 && isKnownNonNegative(Y, DL, 0, &AC, &ICmp, &DT)))
4856 IsSignedExt = true;
4857 else
4858 return nullptr;
4859 }
4860
4861 // Not an extension from the same type?
4862 Type *XTy = X->getType(), *YTy = Y->getType();
4863 if (XTy != YTy) {
4864 // One of the casts must have one use because we are creating a new cast.
4865 if (!ICmp.getOperand(0)->hasOneUse() && !ICmp.getOperand(1)->hasOneUse())
4866 return nullptr;
4867 // Extend the narrower operand to the type of the wider operand.
4868 CastInst::CastOps CastOpcode =
4869 IsSignedExt ? Instruction::SExt : Instruction::ZExt;
4870 if (XTy->getScalarSizeInBits() < YTy->getScalarSizeInBits())
4871 X = Builder.CreateCast(CastOpcode, X, YTy);
4872 else if (YTy->getScalarSizeInBits() < XTy->getScalarSizeInBits())
4873 Y = Builder.CreateCast(CastOpcode, Y, XTy);
4874 else
4875 return nullptr;
4876 }
4877
4878 // (zext X) == (zext Y) --> X == Y
4879 // (sext X) == (sext Y) --> X == Y
4880 if (ICmp.isEquality())
4881 return new ICmpInst(ICmp.getPredicate(), X, Y);
4882
4883 // A signed comparison of sign extended values simplifies into a
4884 // signed comparison.
4885 if (IsSignedCmp && IsSignedExt)
4886 return new ICmpInst(ICmp.getPredicate(), X, Y);
4887
4888 // The other three cases all fold into an unsigned comparison.
4889 return new ICmpInst(ICmp.getUnsignedPredicate(), X, Y);
4890 }
4891
4892 // Below here, we are only folding a compare with constant.
4893 auto *C = dyn_cast<Constant>(ICmp.getOperand(1));
4894 if (!C)
4895 return nullptr;
4896
4897 // Compute the constant that would happen if we truncated to SrcTy then
4898 // re-extended to DestTy.
4899 Type *SrcTy = CastOp0->getSrcTy();
4900 Type *DestTy = CastOp0->getDestTy();
4901 Constant *Res1 = ConstantExpr::getTrunc(C, SrcTy);
4902 Constant *Res2 = ConstantExpr::getCast(CastOp0->getOpcode(), Res1, DestTy);
4903
4904 // If the re-extended constant didn't change...
4905 if (Res2 == C) {
4906 if (ICmp.isEquality())
4907 return new ICmpInst(ICmp.getPredicate(), X, Res1);
4908
4909 // A signed comparison of sign extended values simplifies into a
4910 // signed comparison.
4911 if (IsSignedExt && IsSignedCmp)
4912 return new ICmpInst(ICmp.getPredicate(), X, Res1);
4913
4914 // The other three cases all fold into an unsigned comparison.
4915 return new ICmpInst(ICmp.getUnsignedPredicate(), X, Res1);
4916 }
4917
4918 // The re-extended constant changed, partly changed (in the case of a vector),
4919 // or could not be determined to be equal (in the case of a constant
4920 // expression), so the constant cannot be represented in the shorter type.
4921 // All the cases that fold to true or false will have already been handled
4922 // by simplifyICmpInst, so only deal with the tricky case.
4923 if (IsSignedCmp || !IsSignedExt || !isa<ConstantInt>(C))
4924 return nullptr;
4925
4926 // Is source op positive?
4927 // icmp ult (sext X), C --> icmp sgt X, -1
4928 if (ICmp.getPredicate() == ICmpInst::ICMP_ULT)
4929 return new ICmpInst(CmpInst::ICMP_SGT, X, Constant::getAllOnesValue(SrcTy));
4930
4931 // Is source op negative?
4932 // icmp ugt (sext X), C --> icmp slt X, 0
4933 assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
4934 return new ICmpInst(CmpInst::ICMP_SLT, X, Constant::getNullValue(SrcTy));
4935 }
4936
4937 /// Handle icmp (cast x), (cast or constant).
foldICmpWithCastOp(ICmpInst & ICmp)4938 Instruction *InstCombinerImpl::foldICmpWithCastOp(ICmpInst &ICmp) {
4939 // If any operand of ICmp is a inttoptr roundtrip cast then remove it as
4940 // icmp compares only pointer's value.
4941 // icmp (inttoptr (ptrtoint p1)), p2 --> icmp p1, p2.
4942 Value *SimplifiedOp0 = simplifyIntToPtrRoundTripCast(ICmp.getOperand(0));
4943 Value *SimplifiedOp1 = simplifyIntToPtrRoundTripCast(ICmp.getOperand(1));
4944 if (SimplifiedOp0 || SimplifiedOp1)
4945 return new ICmpInst(ICmp.getPredicate(),
4946 SimplifiedOp0 ? SimplifiedOp0 : ICmp.getOperand(0),
4947 SimplifiedOp1 ? SimplifiedOp1 : ICmp.getOperand(1));
4948
4949 auto *CastOp0 = dyn_cast<CastInst>(ICmp.getOperand(0));
4950 if (!CastOp0)
4951 return nullptr;
4952 if (!isa<Constant>(ICmp.getOperand(1)) && !isa<CastInst>(ICmp.getOperand(1)))
4953 return nullptr;
4954
4955 Value *Op0Src = CastOp0->getOperand(0);
4956 Type *SrcTy = CastOp0->getSrcTy();
4957 Type *DestTy = CastOp0->getDestTy();
4958
4959 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
4960 // integer type is the same size as the pointer type.
4961 auto CompatibleSizes = [&](Type *SrcTy, Type *DestTy) {
4962 if (isa<VectorType>(SrcTy)) {
4963 SrcTy = cast<VectorType>(SrcTy)->getElementType();
4964 DestTy = cast<VectorType>(DestTy)->getElementType();
4965 }
4966 return DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth();
4967 };
4968 if (CastOp0->getOpcode() == Instruction::PtrToInt &&
4969 CompatibleSizes(SrcTy, DestTy)) {
4970 Value *NewOp1 = nullptr;
4971 if (auto *PtrToIntOp1 = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) {
4972 Value *PtrSrc = PtrToIntOp1->getOperand(0);
4973 if (PtrSrc->getType()->getPointerAddressSpace() ==
4974 Op0Src->getType()->getPointerAddressSpace()) {
4975 NewOp1 = PtrToIntOp1->getOperand(0);
4976 // If the pointer types don't match, insert a bitcast.
4977 if (Op0Src->getType() != NewOp1->getType())
4978 NewOp1 = Builder.CreateBitCast(NewOp1, Op0Src->getType());
4979 }
4980 } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
4981 NewOp1 = ConstantExpr::getIntToPtr(RHSC, SrcTy);
4982 }
4983
4984 if (NewOp1)
4985 return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1);
4986 }
4987
4988 if (Instruction *R = foldICmpWithTrunc(ICmp, Builder))
4989 return R;
4990
4991 return foldICmpWithZextOrSext(ICmp);
4992 }
4993
isNeutralValue(Instruction::BinaryOps BinaryOp,Value * RHS)4994 static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS) {
4995 switch (BinaryOp) {
4996 default:
4997 llvm_unreachable("Unsupported binary op");
4998 case Instruction::Add:
4999 case Instruction::Sub:
5000 return match(RHS, m_Zero());
5001 case Instruction::Mul:
5002 return match(RHS, m_One());
5003 }
5004 }
5005
5006 OverflowResult
computeOverflow(Instruction::BinaryOps BinaryOp,bool IsSigned,Value * LHS,Value * RHS,Instruction * CxtI) const5007 InstCombinerImpl::computeOverflow(Instruction::BinaryOps BinaryOp,
5008 bool IsSigned, Value *LHS, Value *RHS,
5009 Instruction *CxtI) const {
5010 switch (BinaryOp) {
5011 default:
5012 llvm_unreachable("Unsupported binary op");
5013 case Instruction::Add:
5014 if (IsSigned)
5015 return computeOverflowForSignedAdd(LHS, RHS, CxtI);
5016 else
5017 return computeOverflowForUnsignedAdd(LHS, RHS, CxtI);
5018 case Instruction::Sub:
5019 if (IsSigned)
5020 return computeOverflowForSignedSub(LHS, RHS, CxtI);
5021 else
5022 return computeOverflowForUnsignedSub(LHS, RHS, CxtI);
5023 case Instruction::Mul:
5024 if (IsSigned)
5025 return computeOverflowForSignedMul(LHS, RHS, CxtI);
5026 else
5027 return computeOverflowForUnsignedMul(LHS, RHS, CxtI);
5028 }
5029 }
5030
OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp,bool IsSigned,Value * LHS,Value * RHS,Instruction & OrigI,Value * & Result,Constant * & Overflow)5031 bool InstCombinerImpl::OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp,
5032 bool IsSigned, Value *LHS,
5033 Value *RHS, Instruction &OrigI,
5034 Value *&Result,
5035 Constant *&Overflow) {
5036 if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
5037 std::swap(LHS, RHS);
5038
5039 // If the overflow check was an add followed by a compare, the insertion point
5040 // may be pointing to the compare. We want to insert the new instructions
5041 // before the add in case there are uses of the add between the add and the
5042 // compare.
5043 Builder.SetInsertPoint(&OrigI);
5044
5045 Type *OverflowTy = Type::getInt1Ty(LHS->getContext());
5046 if (auto *LHSTy = dyn_cast<VectorType>(LHS->getType()))
5047 OverflowTy = VectorType::get(OverflowTy, LHSTy->getElementCount());
5048
5049 if (isNeutralValue(BinaryOp, RHS)) {
5050 Result = LHS;
5051 Overflow = ConstantInt::getFalse(OverflowTy);
5052 return true;
5053 }
5054
5055 switch (computeOverflow(BinaryOp, IsSigned, LHS, RHS, &OrigI)) {
5056 case OverflowResult::MayOverflow:
5057 return false;
5058 case OverflowResult::AlwaysOverflowsLow:
5059 case OverflowResult::AlwaysOverflowsHigh:
5060 Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
5061 Result->takeName(&OrigI);
5062 Overflow = ConstantInt::getTrue(OverflowTy);
5063 return true;
5064 case OverflowResult::NeverOverflows:
5065 Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
5066 Result->takeName(&OrigI);
5067 Overflow = ConstantInt::getFalse(OverflowTy);
5068 if (auto *Inst = dyn_cast<Instruction>(Result)) {
5069 if (IsSigned)
5070 Inst->setHasNoSignedWrap();
5071 else
5072 Inst->setHasNoUnsignedWrap();
5073 }
5074 return true;
5075 }
5076
5077 llvm_unreachable("Unexpected overflow result");
5078 }
5079
5080 /// Recognize and process idiom involving test for multiplication
5081 /// overflow.
5082 ///
5083 /// The caller has matched a pattern of the form:
5084 /// I = cmp u (mul(zext A, zext B), V
5085 /// The function checks if this is a test for overflow and if so replaces
5086 /// multiplication with call to 'mul.with.overflow' intrinsic.
5087 ///
5088 /// \param I Compare instruction.
5089 /// \param MulVal Result of 'mult' instruction. It is one of the arguments of
5090 /// the compare instruction. Must be of integer type.
5091 /// \param OtherVal The other argument of compare instruction.
5092 /// \returns Instruction which must replace the compare instruction, NULL if no
5093 /// replacement required.
processUMulZExtIdiom(ICmpInst & I,Value * MulVal,Value * OtherVal,InstCombinerImpl & IC)5094 static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
5095 Value *OtherVal,
5096 InstCombinerImpl &IC) {
5097 // Don't bother doing this transformation for pointers, don't do it for
5098 // vectors.
5099 if (!isa<IntegerType>(MulVal->getType()))
5100 return nullptr;
5101
5102 assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal);
5103 assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal);
5104 auto *MulInstr = dyn_cast<Instruction>(MulVal);
5105 if (!MulInstr)
5106 return nullptr;
5107 assert(MulInstr->getOpcode() == Instruction::Mul);
5108
5109 auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)),
5110 *RHS = cast<ZExtOperator>(MulInstr->getOperand(1));
5111 assert(LHS->getOpcode() == Instruction::ZExt);
5112 assert(RHS->getOpcode() == Instruction::ZExt);
5113 Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
5114
5115 // Calculate type and width of the result produced by mul.with.overflow.
5116 Type *TyA = A->getType(), *TyB = B->getType();
5117 unsigned WidthA = TyA->getPrimitiveSizeInBits(),
5118 WidthB = TyB->getPrimitiveSizeInBits();
5119 unsigned MulWidth;
5120 Type *MulType;
5121 if (WidthB > WidthA) {
5122 MulWidth = WidthB;
5123 MulType = TyB;
5124 } else {
5125 MulWidth = WidthA;
5126 MulType = TyA;
5127 }
5128
5129 // In order to replace the original mul with a narrower mul.with.overflow,
5130 // all uses must ignore upper bits of the product. The number of used low
5131 // bits must be not greater than the width of mul.with.overflow.
5132 if (MulVal->hasNUsesOrMore(2))
5133 for (User *U : MulVal->users()) {
5134 if (U == &I)
5135 continue;
5136 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
5137 // Check if truncation ignores bits above MulWidth.
5138 unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
5139 if (TruncWidth > MulWidth)
5140 return nullptr;
5141 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
5142 // Check if AND ignores bits above MulWidth.
5143 if (BO->getOpcode() != Instruction::And)
5144 return nullptr;
5145 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
5146 const APInt &CVal = CI->getValue();
5147 if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth)
5148 return nullptr;
5149 } else {
5150 // In this case we could have the operand of the binary operation
5151 // being defined in another block, and performing the replacement
5152 // could break the dominance relation.
5153 return nullptr;
5154 }
5155 } else {
5156 // Other uses prohibit this transformation.
5157 return nullptr;
5158 }
5159 }
5160
5161 // Recognize patterns
5162 switch (I.getPredicate()) {
5163 case ICmpInst::ICMP_EQ:
5164 case ICmpInst::ICMP_NE:
5165 // Recognize pattern:
5166 // mulval = mul(zext A, zext B)
5167 // cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits.
5168 ConstantInt *CI;
5169 Value *ValToMask;
5170 if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) {
5171 if (ValToMask != MulVal)
5172 return nullptr;
5173 const APInt &CVal = CI->getValue() + 1;
5174 if (CVal.isPowerOf2()) {
5175 unsigned MaskWidth = CVal.logBase2();
5176 if (MaskWidth == MulWidth)
5177 break; // Recognized
5178 }
5179 }
5180 return nullptr;
5181
5182 case ICmpInst::ICMP_UGT:
5183 // Recognize pattern:
5184 // mulval = mul(zext A, zext B)
5185 // cmp ugt mulval, max
5186 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
5187 APInt MaxVal = APInt::getMaxValue(MulWidth);
5188 MaxVal = MaxVal.zext(CI->getBitWidth());
5189 if (MaxVal.eq(CI->getValue()))
5190 break; // Recognized
5191 }
5192 return nullptr;
5193
5194 case ICmpInst::ICMP_UGE:
5195 // Recognize pattern:
5196 // mulval = mul(zext A, zext B)
5197 // cmp uge mulval, max+1
5198 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
5199 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
5200 if (MaxVal.eq(CI->getValue()))
5201 break; // Recognized
5202 }
5203 return nullptr;
5204
5205 case ICmpInst::ICMP_ULE:
5206 // Recognize pattern:
5207 // mulval = mul(zext A, zext B)
5208 // cmp ule mulval, max
5209 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
5210 APInt MaxVal = APInt::getMaxValue(MulWidth);
5211 MaxVal = MaxVal.zext(CI->getBitWidth());
5212 if (MaxVal.eq(CI->getValue()))
5213 break; // Recognized
5214 }
5215 return nullptr;
5216
5217 case ICmpInst::ICMP_ULT:
5218 // Recognize pattern:
5219 // mulval = mul(zext A, zext B)
5220 // cmp ule mulval, max + 1
5221 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
5222 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
5223 if (MaxVal.eq(CI->getValue()))
5224 break; // Recognized
5225 }
5226 return nullptr;
5227
5228 default:
5229 return nullptr;
5230 }
5231
5232 InstCombiner::BuilderTy &Builder = IC.Builder;
5233 Builder.SetInsertPoint(MulInstr);
5234
5235 // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
5236 Value *MulA = A, *MulB = B;
5237 if (WidthA < MulWidth)
5238 MulA = Builder.CreateZExt(A, MulType);
5239 if (WidthB < MulWidth)
5240 MulB = Builder.CreateZExt(B, MulType);
5241 Function *F = Intrinsic::getDeclaration(
5242 I.getModule(), Intrinsic::umul_with_overflow, MulType);
5243 CallInst *Call = Builder.CreateCall(F, {MulA, MulB}, "umul");
5244 IC.addToWorklist(MulInstr);
5245
5246 // If there are uses of mul result other than the comparison, we know that
5247 // they are truncation or binary AND. Change them to use result of
5248 // mul.with.overflow and adjust properly mask/size.
5249 if (MulVal->hasNUsesOrMore(2)) {
5250 Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value");
5251 for (User *U : make_early_inc_range(MulVal->users())) {
5252 if (U == &I || U == OtherVal)
5253 continue;
5254 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
5255 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
5256 IC.replaceInstUsesWith(*TI, Mul);
5257 else
5258 TI->setOperand(0, Mul);
5259 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
5260 assert(BO->getOpcode() == Instruction::And);
5261 // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
5262 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
5263 APInt ShortMask = CI->getValue().trunc(MulWidth);
5264 Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask);
5265 Value *Zext = Builder.CreateZExt(ShortAnd, BO->getType());
5266 IC.replaceInstUsesWith(*BO, Zext);
5267 } else {
5268 llvm_unreachable("Unexpected Binary operation");
5269 }
5270 IC.addToWorklist(cast<Instruction>(U));
5271 }
5272 }
5273 if (isa<Instruction>(OtherVal))
5274 IC.addToWorklist(cast<Instruction>(OtherVal));
5275
5276 // The original icmp gets replaced with the overflow value, maybe inverted
5277 // depending on predicate.
5278 bool Inverse = false;
5279 switch (I.getPredicate()) {
5280 case ICmpInst::ICMP_NE:
5281 break;
5282 case ICmpInst::ICMP_EQ:
5283 Inverse = true;
5284 break;
5285 case ICmpInst::ICMP_UGT:
5286 case ICmpInst::ICMP_UGE:
5287 if (I.getOperand(0) == MulVal)
5288 break;
5289 Inverse = true;
5290 break;
5291 case ICmpInst::ICMP_ULT:
5292 case ICmpInst::ICMP_ULE:
5293 if (I.getOperand(1) == MulVal)
5294 break;
5295 Inverse = true;
5296 break;
5297 default:
5298 llvm_unreachable("Unexpected predicate");
5299 }
5300 if (Inverse) {
5301 Value *Res = Builder.CreateExtractValue(Call, 1);
5302 return BinaryOperator::CreateNot(Res);
5303 }
5304
5305 return ExtractValueInst::Create(Call, 1);
5306 }
5307
5308 /// When performing a comparison against a constant, it is possible that not all
5309 /// the bits in the LHS are demanded. This helper method computes the mask that
5310 /// IS demanded.
getDemandedBitsLHSMask(ICmpInst & I,unsigned BitWidth)5311 static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth) {
5312 const APInt *RHS;
5313 if (!match(I.getOperand(1), m_APInt(RHS)))
5314 return APInt::getAllOnes(BitWidth);
5315
5316 // If this is a normal comparison, it demands all bits. If it is a sign bit
5317 // comparison, it only demands the sign bit.
5318 bool UnusedBit;
5319 if (InstCombiner::isSignBitCheck(I.getPredicate(), *RHS, UnusedBit))
5320 return APInt::getSignMask(BitWidth);
5321
5322 switch (I.getPredicate()) {
5323 // For a UGT comparison, we don't care about any bits that
5324 // correspond to the trailing ones of the comparand. The value of these
5325 // bits doesn't impact the outcome of the comparison, because any value
5326 // greater than the RHS must differ in a bit higher than these due to carry.
5327 case ICmpInst::ICMP_UGT:
5328 return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingOnes());
5329
5330 // Similarly, for a ULT comparison, we don't care about the trailing zeros.
5331 // Any value less than the RHS must differ in a higher bit because of carries.
5332 case ICmpInst::ICMP_ULT:
5333 return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingZeros());
5334
5335 default:
5336 return APInt::getAllOnes(BitWidth);
5337 }
5338 }
5339
5340 /// Check if the order of \p Op0 and \p Op1 as operands in an ICmpInst
5341 /// should be swapped.
5342 /// The decision is based on how many times these two operands are reused
5343 /// as subtract operands and their positions in those instructions.
5344 /// The rationale is that several architectures use the same instruction for
5345 /// both subtract and cmp. Thus, it is better if the order of those operands
5346 /// match.
5347 /// \return true if Op0 and Op1 should be swapped.
swapMayExposeCSEOpportunities(const Value * Op0,const Value * Op1)5348 static bool swapMayExposeCSEOpportunities(const Value *Op0, const Value *Op1) {
5349 // Filter out pointer values as those cannot appear directly in subtract.
5350 // FIXME: we may want to go through inttoptrs or bitcasts.
5351 if (Op0->getType()->isPointerTy())
5352 return false;
5353 // If a subtract already has the same operands as a compare, swapping would be
5354 // bad. If a subtract has the same operands as a compare but in reverse order,
5355 // then swapping is good.
5356 int GoodToSwap = 0;
5357 for (const User *U : Op0->users()) {
5358 if (match(U, m_Sub(m_Specific(Op1), m_Specific(Op0))))
5359 GoodToSwap++;
5360 else if (match(U, m_Sub(m_Specific(Op0), m_Specific(Op1))))
5361 GoodToSwap--;
5362 }
5363 return GoodToSwap > 0;
5364 }
5365
5366 /// Check that one use is in the same block as the definition and all
5367 /// other uses are in blocks dominated by a given block.
5368 ///
5369 /// \param DI Definition
5370 /// \param UI Use
5371 /// \param DB Block that must dominate all uses of \p DI outside
5372 /// the parent block
5373 /// \return true when \p UI is the only use of \p DI in the parent block
5374 /// and all other uses of \p DI are in blocks dominated by \p DB.
5375 ///
dominatesAllUses(const Instruction * DI,const Instruction * UI,const BasicBlock * DB) const5376 bool InstCombinerImpl::dominatesAllUses(const Instruction *DI,
5377 const Instruction *UI,
5378 const BasicBlock *DB) const {
5379 assert(DI && UI && "Instruction not defined\n");
5380 // Ignore incomplete definitions.
5381 if (!DI->getParent())
5382 return false;
5383 // DI and UI must be in the same block.
5384 if (DI->getParent() != UI->getParent())
5385 return false;
5386 // Protect from self-referencing blocks.
5387 if (DI->getParent() == DB)
5388 return false;
5389 for (const User *U : DI->users()) {
5390 auto *Usr = cast<Instruction>(U);
5391 if (Usr != UI && !DT.dominates(DB, Usr->getParent()))
5392 return false;
5393 }
5394 return true;
5395 }
5396
5397 /// Return true when the instruction sequence within a block is select-cmp-br.
isChainSelectCmpBranch(const SelectInst * SI)5398 static bool isChainSelectCmpBranch(const SelectInst *SI) {
5399 const BasicBlock *BB = SI->getParent();
5400 if (!BB)
5401 return false;
5402 auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator());
5403 if (!BI || BI->getNumSuccessors() != 2)
5404 return false;
5405 auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
5406 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
5407 return false;
5408 return true;
5409 }
5410
5411 /// True when a select result is replaced by one of its operands
5412 /// in select-icmp sequence. This will eventually result in the elimination
5413 /// of the select.
5414 ///
5415 /// \param SI Select instruction
5416 /// \param Icmp Compare instruction
5417 /// \param SIOpd Operand that replaces the select
5418 ///
5419 /// Notes:
5420 /// - The replacement is global and requires dominator information
5421 /// - The caller is responsible for the actual replacement
5422 ///
5423 /// Example:
5424 ///
5425 /// entry:
5426 /// %4 = select i1 %3, %C* %0, %C* null
5427 /// %5 = icmp eq %C* %4, null
5428 /// br i1 %5, label %9, label %7
5429 /// ...
5430 /// ; <label>:7 ; preds = %entry
5431 /// %8 = getelementptr inbounds %C* %4, i64 0, i32 0
5432 /// ...
5433 ///
5434 /// can be transformed to
5435 ///
5436 /// %5 = icmp eq %C* %0, null
5437 /// %6 = select i1 %3, i1 %5, i1 true
5438 /// br i1 %6, label %9, label %7
5439 /// ...
5440 /// ; <label>:7 ; preds = %entry
5441 /// %8 = getelementptr inbounds %C* %0, i64 0, i32 0 // replace by %0!
5442 ///
5443 /// Similar when the first operand of the select is a constant or/and
5444 /// the compare is for not equal rather than equal.
5445 ///
5446 /// NOTE: The function is only called when the select and compare constants
5447 /// are equal, the optimization can work only for EQ predicates. This is not a
5448 /// major restriction since a NE compare should be 'normalized' to an equal
5449 /// compare, which usually happens in the combiner and test case
5450 /// select-cmp-br.ll checks for it.
replacedSelectWithOperand(SelectInst * SI,const ICmpInst * Icmp,const unsigned SIOpd)5451 bool InstCombinerImpl::replacedSelectWithOperand(SelectInst *SI,
5452 const ICmpInst *Icmp,
5453 const unsigned SIOpd) {
5454 assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!");
5455 if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) {
5456 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
5457 // The check for the single predecessor is not the best that can be
5458 // done. But it protects efficiently against cases like when SI's
5459 // home block has two successors, Succ and Succ1, and Succ1 predecessor
5460 // of Succ. Then SI can't be replaced by SIOpd because the use that gets
5461 // replaced can be reached on either path. So the uniqueness check
5462 // guarantees that the path all uses of SI (outside SI's parent) are on
5463 // is disjoint from all other paths out of SI. But that information
5464 // is more expensive to compute, and the trade-off here is in favor
5465 // of compile-time. It should also be noticed that we check for a single
5466 // predecessor and not only uniqueness. This to handle the situation when
5467 // Succ and Succ1 points to the same basic block.
5468 if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) {
5469 NumSel++;
5470 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
5471 return true;
5472 }
5473 }
5474 return false;
5475 }
5476
5477 /// Try to fold the comparison based on range information we can get by checking
5478 /// whether bits are known to be zero or one in the inputs.
foldICmpUsingKnownBits(ICmpInst & I)5479 Instruction *InstCombinerImpl::foldICmpUsingKnownBits(ICmpInst &I) {
5480 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5481 Type *Ty = Op0->getType();
5482 ICmpInst::Predicate Pred = I.getPredicate();
5483
5484 // Get scalar or pointer size.
5485 unsigned BitWidth = Ty->isIntOrIntVectorTy()
5486 ? Ty->getScalarSizeInBits()
5487 : DL.getPointerTypeSizeInBits(Ty->getScalarType());
5488
5489 if (!BitWidth)
5490 return nullptr;
5491
5492 KnownBits Op0Known(BitWidth);
5493 KnownBits Op1Known(BitWidth);
5494
5495 if (SimplifyDemandedBits(&I, 0,
5496 getDemandedBitsLHSMask(I, BitWidth),
5497 Op0Known, 0))
5498 return &I;
5499
5500 if (SimplifyDemandedBits(&I, 1, APInt::getAllOnes(BitWidth), Op1Known, 0))
5501 return &I;
5502
5503 // Given the known and unknown bits, compute a range that the LHS could be
5504 // in. Compute the Min, Max and RHS values based on the known bits. For the
5505 // EQ and NE we use unsigned values.
5506 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
5507 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
5508 if (I.isSigned()) {
5509 Op0Min = Op0Known.getSignedMinValue();
5510 Op0Max = Op0Known.getSignedMaxValue();
5511 Op1Min = Op1Known.getSignedMinValue();
5512 Op1Max = Op1Known.getSignedMaxValue();
5513 } else {
5514 Op0Min = Op0Known.getMinValue();
5515 Op0Max = Op0Known.getMaxValue();
5516 Op1Min = Op1Known.getMinValue();
5517 Op1Max = Op1Known.getMaxValue();
5518 }
5519
5520 // If Min and Max are known to be the same, then SimplifyDemandedBits figured
5521 // out that the LHS or RHS is a constant. Constant fold this now, so that
5522 // code below can assume that Min != Max.
5523 if (!isa<Constant>(Op0) && Op0Min == Op0Max)
5524 return new ICmpInst(Pred, ConstantExpr::getIntegerValue(Ty, Op0Min), Op1);
5525 if (!isa<Constant>(Op1) && Op1Min == Op1Max)
5526 return new ICmpInst(Pred, Op0, ConstantExpr::getIntegerValue(Ty, Op1Min));
5527
5528 // Don't break up a clamp pattern -- (min(max X, Y), Z) -- by replacing a
5529 // min/max canonical compare with some other compare. That could lead to
5530 // conflict with select canonicalization and infinite looping.
5531 // FIXME: This constraint may go away if min/max intrinsics are canonical.
5532 auto isMinMaxCmp = [&](Instruction &Cmp) {
5533 if (!Cmp.hasOneUse())
5534 return false;
5535 Value *A, *B;
5536 SelectPatternFlavor SPF = matchSelectPattern(Cmp.user_back(), A, B).Flavor;
5537 if (!SelectPatternResult::isMinOrMax(SPF))
5538 return false;
5539 return match(Op0, m_MaxOrMin(m_Value(), m_Value())) ||
5540 match(Op1, m_MaxOrMin(m_Value(), m_Value()));
5541 };
5542 if (!isMinMaxCmp(I)) {
5543 switch (Pred) {
5544 default:
5545 break;
5546 case ICmpInst::ICMP_ULT: {
5547 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
5548 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5549 const APInt *CmpC;
5550 if (match(Op1, m_APInt(CmpC))) {
5551 // A <u C -> A == C-1 if min(A)+1 == C
5552 if (*CmpC == Op0Min + 1)
5553 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5554 ConstantInt::get(Op1->getType(), *CmpC - 1));
5555 // X <u C --> X == 0, if the number of zero bits in the bottom of X
5556 // exceeds the log2 of C.
5557 if (Op0Known.countMinTrailingZeros() >= CmpC->ceilLogBase2())
5558 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5559 Constant::getNullValue(Op1->getType()));
5560 }
5561 break;
5562 }
5563 case ICmpInst::ICMP_UGT: {
5564 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
5565 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5566 const APInt *CmpC;
5567 if (match(Op1, m_APInt(CmpC))) {
5568 // A >u C -> A == C+1 if max(a)-1 == C
5569 if (*CmpC == Op0Max - 1)
5570 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5571 ConstantInt::get(Op1->getType(), *CmpC + 1));
5572 // X >u C --> X != 0, if the number of zero bits in the bottom of X
5573 // exceeds the log2 of C.
5574 if (Op0Known.countMinTrailingZeros() >= CmpC->getActiveBits())
5575 return new ICmpInst(ICmpInst::ICMP_NE, Op0,
5576 Constant::getNullValue(Op1->getType()));
5577 }
5578 break;
5579 }
5580 case ICmpInst::ICMP_SLT: {
5581 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
5582 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5583 const APInt *CmpC;
5584 if (match(Op1, m_APInt(CmpC))) {
5585 if (*CmpC == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C
5586 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5587 ConstantInt::get(Op1->getType(), *CmpC - 1));
5588 }
5589 break;
5590 }
5591 case ICmpInst::ICMP_SGT: {
5592 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
5593 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5594 const APInt *CmpC;
5595 if (match(Op1, m_APInt(CmpC))) {
5596 if (*CmpC == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C
5597 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5598 ConstantInt::get(Op1->getType(), *CmpC + 1));
5599 }
5600 break;
5601 }
5602 }
5603 }
5604
5605 // Based on the range information we know about the LHS, see if we can
5606 // simplify this comparison. For example, (x&4) < 8 is always true.
5607 switch (Pred) {
5608 default:
5609 llvm_unreachable("Unknown icmp opcode!");
5610 case ICmpInst::ICMP_EQ:
5611 case ICmpInst::ICMP_NE: {
5612 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
5613 return replaceInstUsesWith(
5614 I, ConstantInt::getBool(I.getType(), Pred == CmpInst::ICMP_NE));
5615
5616 // If all bits are known zero except for one, then we know at most one bit
5617 // is set. If the comparison is against zero, then this is a check to see if
5618 // *that* bit is set.
5619 APInt Op0KnownZeroInverted = ~Op0Known.Zero;
5620 if (Op1Known.isZero()) {
5621 // If the LHS is an AND with the same constant, look through it.
5622 Value *LHS = nullptr;
5623 const APInt *LHSC;
5624 if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) ||
5625 *LHSC != Op0KnownZeroInverted)
5626 LHS = Op0;
5627
5628 Value *X;
5629 const APInt *C1;
5630 if (match(LHS, m_Shl(m_Power2(C1), m_Value(X)))) {
5631 Type *XTy = X->getType();
5632 unsigned Log2C1 = C1->countTrailingZeros();
5633 APInt C2 = Op0KnownZeroInverted;
5634 APInt C2Pow2 = (C2 & ~(*C1 - 1)) + *C1;
5635 if (C2Pow2.isPowerOf2()) {
5636 // iff (C1 is pow2) & ((C2 & ~(C1-1)) + C1) is pow2):
5637 // ((C1 << X) & C2) == 0 -> X >= (Log2(C2+C1) - Log2(C1))
5638 // ((C1 << X) & C2) != 0 -> X < (Log2(C2+C1) - Log2(C1))
5639 unsigned Log2C2 = C2Pow2.countTrailingZeros();
5640 auto *CmpC = ConstantInt::get(XTy, Log2C2 - Log2C1);
5641 auto NewPred =
5642 Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT;
5643 return new ICmpInst(NewPred, X, CmpC);
5644 }
5645 }
5646 }
5647 break;
5648 }
5649 case ICmpInst::ICMP_ULT: {
5650 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
5651 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5652 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
5653 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5654 break;
5655 }
5656 case ICmpInst::ICMP_UGT: {
5657 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
5658 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5659 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
5660 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5661 break;
5662 }
5663 case ICmpInst::ICMP_SLT: {
5664 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
5665 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5666 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
5667 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5668 break;
5669 }
5670 case ICmpInst::ICMP_SGT: {
5671 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
5672 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5673 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
5674 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5675 break;
5676 }
5677 case ICmpInst::ICMP_SGE:
5678 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
5679 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
5680 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5681 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
5682 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5683 if (Op1Min == Op0Max) // A >=s B -> A == B if max(A) == min(B)
5684 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5685 break;
5686 case ICmpInst::ICMP_SLE:
5687 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
5688 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
5689 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5690 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
5691 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5692 if (Op1Max == Op0Min) // A <=s B -> A == B if min(A) == max(B)
5693 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5694 break;
5695 case ICmpInst::ICMP_UGE:
5696 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
5697 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
5698 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5699 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
5700 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5701 if (Op1Min == Op0Max) // A >=u B -> A == B if max(A) == min(B)
5702 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5703 break;
5704 case ICmpInst::ICMP_ULE:
5705 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
5706 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
5707 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5708 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
5709 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5710 if (Op1Max == Op0Min) // A <=u B -> A == B if min(A) == max(B)
5711 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5712 break;
5713 }
5714
5715 // Turn a signed comparison into an unsigned one if both operands are known to
5716 // have the same sign.
5717 if (I.isSigned() &&
5718 ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) ||
5719 (Op0Known.One.isNegative() && Op1Known.One.isNegative())))
5720 return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
5721
5722 return nullptr;
5723 }
5724
5725 /// If one operand of an icmp is effectively a bool (value range of {0,1}),
5726 /// then try to reduce patterns based on that limit.
foldICmpUsingBoolRange(ICmpInst & I,InstCombiner::BuilderTy & Builder)5727 static Instruction *foldICmpUsingBoolRange(ICmpInst &I,
5728 InstCombiner::BuilderTy &Builder) {
5729 Value *X, *Y;
5730 ICmpInst::Predicate Pred;
5731
5732 // X must be 0 and bool must be true for "ULT":
5733 // X <u (zext i1 Y) --> (X == 0) & Y
5734 if (match(&I, m_c_ICmp(Pred, m_Value(X), m_OneUse(m_ZExt(m_Value(Y))))) &&
5735 Y->getType()->isIntOrIntVectorTy(1) && Pred == ICmpInst::ICMP_ULT)
5736 return BinaryOperator::CreateAnd(Builder.CreateIsNull(X), Y);
5737
5738 // X must be 0 or bool must be true for "ULE":
5739 // X <=u (sext i1 Y) --> (X == 0) | Y
5740 if (match(&I, m_c_ICmp(Pred, m_Value(X), m_OneUse(m_SExt(m_Value(Y))))) &&
5741 Y->getType()->isIntOrIntVectorTy(1) && Pred == ICmpInst::ICMP_ULE)
5742 return BinaryOperator::CreateOr(Builder.CreateIsNull(X), Y);
5743
5744 return nullptr;
5745 }
5746
5747 llvm::Optional<std::pair<CmpInst::Predicate, Constant *>>
getFlippedStrictnessPredicateAndConstant(CmpInst::Predicate Pred,Constant * C)5748 InstCombiner::getFlippedStrictnessPredicateAndConstant(CmpInst::Predicate Pred,
5749 Constant *C) {
5750 assert(ICmpInst::isRelational(Pred) && ICmpInst::isIntPredicate(Pred) &&
5751 "Only for relational integer predicates.");
5752
5753 Type *Type = C->getType();
5754 bool IsSigned = ICmpInst::isSigned(Pred);
5755
5756 CmpInst::Predicate UnsignedPred = ICmpInst::getUnsignedPredicate(Pred);
5757 bool WillIncrement =
5758 UnsignedPred == ICmpInst::ICMP_ULE || UnsignedPred == ICmpInst::ICMP_UGT;
5759
5760 // Check if the constant operand can be safely incremented/decremented
5761 // without overflowing/underflowing.
5762 auto ConstantIsOk = [WillIncrement, IsSigned](ConstantInt *C) {
5763 return WillIncrement ? !C->isMaxValue(IsSigned) : !C->isMinValue(IsSigned);
5764 };
5765
5766 Constant *SafeReplacementConstant = nullptr;
5767 if (auto *CI = dyn_cast<ConstantInt>(C)) {
5768 // Bail out if the constant can't be safely incremented/decremented.
5769 if (!ConstantIsOk(CI))
5770 return llvm::None;
5771 } else if (auto *FVTy = dyn_cast<FixedVectorType>(Type)) {
5772 unsigned NumElts = FVTy->getNumElements();
5773 for (unsigned i = 0; i != NumElts; ++i) {
5774 Constant *Elt = C->getAggregateElement(i);
5775 if (!Elt)
5776 return llvm::None;
5777
5778 if (isa<UndefValue>(Elt))
5779 continue;
5780
5781 // Bail out if we can't determine if this constant is min/max or if we
5782 // know that this constant is min/max.
5783 auto *CI = dyn_cast<ConstantInt>(Elt);
5784 if (!CI || !ConstantIsOk(CI))
5785 return llvm::None;
5786
5787 if (!SafeReplacementConstant)
5788 SafeReplacementConstant = CI;
5789 }
5790 } else {
5791 // ConstantExpr?
5792 return llvm::None;
5793 }
5794
5795 // It may not be safe to change a compare predicate in the presence of
5796 // undefined elements, so replace those elements with the first safe constant
5797 // that we found.
5798 // TODO: in case of poison, it is safe; let's replace undefs only.
5799 if (C->containsUndefOrPoisonElement()) {
5800 assert(SafeReplacementConstant && "Replacement constant not set");
5801 C = Constant::replaceUndefsWith(C, SafeReplacementConstant);
5802 }
5803
5804 CmpInst::Predicate NewPred = CmpInst::getFlippedStrictnessPredicate(Pred);
5805
5806 // Increment or decrement the constant.
5807 Constant *OneOrNegOne = ConstantInt::get(Type, WillIncrement ? 1 : -1, true);
5808 Constant *NewC = ConstantExpr::getAdd(C, OneOrNegOne);
5809
5810 return std::make_pair(NewPred, NewC);
5811 }
5812
5813 /// If we have an icmp le or icmp ge instruction with a constant operand, turn
5814 /// it into the appropriate icmp lt or icmp gt instruction. This transform
5815 /// allows them to be folded in visitICmpInst.
canonicalizeCmpWithConstant(ICmpInst & I)5816 static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) {
5817 ICmpInst::Predicate Pred = I.getPredicate();
5818 if (ICmpInst::isEquality(Pred) || !ICmpInst::isIntPredicate(Pred) ||
5819 InstCombiner::isCanonicalPredicate(Pred))
5820 return nullptr;
5821
5822 Value *Op0 = I.getOperand(0);
5823 Value *Op1 = I.getOperand(1);
5824 auto *Op1C = dyn_cast<Constant>(Op1);
5825 if (!Op1C)
5826 return nullptr;
5827
5828 auto FlippedStrictness =
5829 InstCombiner::getFlippedStrictnessPredicateAndConstant(Pred, Op1C);
5830 if (!FlippedStrictness)
5831 return nullptr;
5832
5833 return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second);
5834 }
5835
5836 /// If we have a comparison with a non-canonical predicate, if we can update
5837 /// all the users, invert the predicate and adjust all the users.
canonicalizeICmpPredicate(CmpInst & I)5838 CmpInst *InstCombinerImpl::canonicalizeICmpPredicate(CmpInst &I) {
5839 // Is the predicate already canonical?
5840 CmpInst::Predicate Pred = I.getPredicate();
5841 if (InstCombiner::isCanonicalPredicate(Pred))
5842 return nullptr;
5843
5844 // Can all users be adjusted to predicate inversion?
5845 if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr))
5846 return nullptr;
5847
5848 // Ok, we can canonicalize comparison!
5849 // Let's first invert the comparison's predicate.
5850 I.setPredicate(CmpInst::getInversePredicate(Pred));
5851 I.setName(I.getName() + ".not");
5852
5853 // And, adapt users.
5854 freelyInvertAllUsersOf(&I);
5855
5856 return &I;
5857 }
5858
5859 /// Integer compare with boolean values can always be turned into bitwise ops.
canonicalizeICmpBool(ICmpInst & I,InstCombiner::BuilderTy & Builder)5860 static Instruction *canonicalizeICmpBool(ICmpInst &I,
5861 InstCombiner::BuilderTy &Builder) {
5862 Value *A = I.getOperand(0), *B = I.getOperand(1);
5863 assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only");
5864
5865 // A boolean compared to true/false can be simplified to Op0/true/false in
5866 // 14 out of the 20 (10 predicates * 2 constants) possible combinations.
5867 // Cases not handled by InstSimplify are always 'not' of Op0.
5868 if (match(B, m_Zero())) {
5869 switch (I.getPredicate()) {
5870 case CmpInst::ICMP_EQ: // A == 0 -> !A
5871 case CmpInst::ICMP_ULE: // A <=u 0 -> !A
5872 case CmpInst::ICMP_SGE: // A >=s 0 -> !A
5873 return BinaryOperator::CreateNot(A);
5874 default:
5875 llvm_unreachable("ICmp i1 X, C not simplified as expected.");
5876 }
5877 } else if (match(B, m_One())) {
5878 switch (I.getPredicate()) {
5879 case CmpInst::ICMP_NE: // A != 1 -> !A
5880 case CmpInst::ICMP_ULT: // A <u 1 -> !A
5881 case CmpInst::ICMP_SGT: // A >s -1 -> !A
5882 return BinaryOperator::CreateNot(A);
5883 default:
5884 llvm_unreachable("ICmp i1 X, C not simplified as expected.");
5885 }
5886 }
5887
5888 switch (I.getPredicate()) {
5889 default:
5890 llvm_unreachable("Invalid icmp instruction!");
5891 case ICmpInst::ICMP_EQ:
5892 // icmp eq i1 A, B -> ~(A ^ B)
5893 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
5894
5895 case ICmpInst::ICMP_NE:
5896 // icmp ne i1 A, B -> A ^ B
5897 return BinaryOperator::CreateXor(A, B);
5898
5899 case ICmpInst::ICMP_UGT:
5900 // icmp ugt -> icmp ult
5901 std::swap(A, B);
5902 LLVM_FALLTHROUGH;
5903 case ICmpInst::ICMP_ULT:
5904 // icmp ult i1 A, B -> ~A & B
5905 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
5906
5907 case ICmpInst::ICMP_SGT:
5908 // icmp sgt -> icmp slt
5909 std::swap(A, B);
5910 LLVM_FALLTHROUGH;
5911 case ICmpInst::ICMP_SLT:
5912 // icmp slt i1 A, B -> A & ~B
5913 return BinaryOperator::CreateAnd(Builder.CreateNot(B), A);
5914
5915 case ICmpInst::ICMP_UGE:
5916 // icmp uge -> icmp ule
5917 std::swap(A, B);
5918 LLVM_FALLTHROUGH;
5919 case ICmpInst::ICMP_ULE:
5920 // icmp ule i1 A, B -> ~A | B
5921 return BinaryOperator::CreateOr(Builder.CreateNot(A), B);
5922
5923 case ICmpInst::ICMP_SGE:
5924 // icmp sge -> icmp sle
5925 std::swap(A, B);
5926 LLVM_FALLTHROUGH;
5927 case ICmpInst::ICMP_SLE:
5928 // icmp sle i1 A, B -> A | ~B
5929 return BinaryOperator::CreateOr(Builder.CreateNot(B), A);
5930 }
5931 }
5932
5933 // Transform pattern like:
5934 // (1 << Y) u<= X or ~(-1 << Y) u< X or ((1 << Y)+(-1)) u< X
5935 // (1 << Y) u> X or ~(-1 << Y) u>= X or ((1 << Y)+(-1)) u>= X
5936 // Into:
5937 // (X l>> Y) != 0
5938 // (X l>> Y) == 0
foldICmpWithHighBitMask(ICmpInst & Cmp,InstCombiner::BuilderTy & Builder)5939 static Instruction *foldICmpWithHighBitMask(ICmpInst &Cmp,
5940 InstCombiner::BuilderTy &Builder) {
5941 ICmpInst::Predicate Pred, NewPred;
5942 Value *X, *Y;
5943 if (match(&Cmp,
5944 m_c_ICmp(Pred, m_OneUse(m_Shl(m_One(), m_Value(Y))), m_Value(X)))) {
5945 switch (Pred) {
5946 case ICmpInst::ICMP_ULE:
5947 NewPred = ICmpInst::ICMP_NE;
5948 break;
5949 case ICmpInst::ICMP_UGT:
5950 NewPred = ICmpInst::ICMP_EQ;
5951 break;
5952 default:
5953 return nullptr;
5954 }
5955 } else if (match(&Cmp, m_c_ICmp(Pred,
5956 m_OneUse(m_CombineOr(
5957 m_Not(m_Shl(m_AllOnes(), m_Value(Y))),
5958 m_Add(m_Shl(m_One(), m_Value(Y)),
5959 m_AllOnes()))),
5960 m_Value(X)))) {
5961 // The variant with 'add' is not canonical, (the variant with 'not' is)
5962 // we only get it because it has extra uses, and can't be canonicalized,
5963
5964 switch (Pred) {
5965 case ICmpInst::ICMP_ULT:
5966 NewPred = ICmpInst::ICMP_NE;
5967 break;
5968 case ICmpInst::ICMP_UGE:
5969 NewPred = ICmpInst::ICMP_EQ;
5970 break;
5971 default:
5972 return nullptr;
5973 }
5974 } else
5975 return nullptr;
5976
5977 Value *NewX = Builder.CreateLShr(X, Y, X->getName() + ".highbits");
5978 Constant *Zero = Constant::getNullValue(NewX->getType());
5979 return CmpInst::Create(Instruction::ICmp, NewPred, NewX, Zero);
5980 }
5981
foldVectorCmp(CmpInst & Cmp,InstCombiner::BuilderTy & Builder)5982 static Instruction *foldVectorCmp(CmpInst &Cmp,
5983 InstCombiner::BuilderTy &Builder) {
5984 const CmpInst::Predicate Pred = Cmp.getPredicate();
5985 Value *LHS = Cmp.getOperand(0), *RHS = Cmp.getOperand(1);
5986 Value *V1, *V2;
5987 ArrayRef<int> M;
5988 if (!match(LHS, m_Shuffle(m_Value(V1), m_Undef(), m_Mask(M))))
5989 return nullptr;
5990
5991 // If both arguments of the cmp are shuffles that use the same mask and
5992 // shuffle within a single vector, move the shuffle after the cmp:
5993 // cmp (shuffle V1, M), (shuffle V2, M) --> shuffle (cmp V1, V2), M
5994 Type *V1Ty = V1->getType();
5995 if (match(RHS, m_Shuffle(m_Value(V2), m_Undef(), m_SpecificMask(M))) &&
5996 V1Ty == V2->getType() && (LHS->hasOneUse() || RHS->hasOneUse())) {
5997 Value *NewCmp = Builder.CreateCmp(Pred, V1, V2);
5998 return new ShuffleVectorInst(NewCmp, M);
5999 }
6000
6001 // Try to canonicalize compare with splatted operand and splat constant.
6002 // TODO: We could generalize this for more than splats. See/use the code in
6003 // InstCombiner::foldVectorBinop().
6004 Constant *C;
6005 if (!LHS->hasOneUse() || !match(RHS, m_Constant(C)))
6006 return nullptr;
6007
6008 // Length-changing splats are ok, so adjust the constants as needed:
6009 // cmp (shuffle V1, M), C --> shuffle (cmp V1, C'), M
6010 Constant *ScalarC = C->getSplatValue(/* AllowUndefs */ true);
6011 int MaskSplatIndex;
6012 if (ScalarC && match(M, m_SplatOrUndefMask(MaskSplatIndex))) {
6013 // We allow undefs in matching, but this transform removes those for safety.
6014 // Demanded elements analysis should be able to recover some/all of that.
6015 C = ConstantVector::getSplat(cast<VectorType>(V1Ty)->getElementCount(),
6016 ScalarC);
6017 SmallVector<int, 8> NewM(M.size(), MaskSplatIndex);
6018 Value *NewCmp = Builder.CreateCmp(Pred, V1, C);
6019 return new ShuffleVectorInst(NewCmp, NewM);
6020 }
6021
6022 return nullptr;
6023 }
6024
6025 // extract(uadd.with.overflow(A, B), 0) ult A
6026 // -> extract(uadd.with.overflow(A, B), 1)
foldICmpOfUAddOv(ICmpInst & I)6027 static Instruction *foldICmpOfUAddOv(ICmpInst &I) {
6028 CmpInst::Predicate Pred = I.getPredicate();
6029 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6030
6031 Value *UAddOv;
6032 Value *A, *B;
6033 auto UAddOvResultPat = m_ExtractValue<0>(
6034 m_Intrinsic<Intrinsic::uadd_with_overflow>(m_Value(A), m_Value(B)));
6035 if (match(Op0, UAddOvResultPat) &&
6036 ((Pred == ICmpInst::ICMP_ULT && (Op1 == A || Op1 == B)) ||
6037 (Pred == ICmpInst::ICMP_EQ && match(Op1, m_ZeroInt()) &&
6038 (match(A, m_One()) || match(B, m_One()))) ||
6039 (Pred == ICmpInst::ICMP_NE && match(Op1, m_AllOnes()) &&
6040 (match(A, m_AllOnes()) || match(B, m_AllOnes())))))
6041 // extract(uadd.with.overflow(A, B), 0) < A
6042 // extract(uadd.with.overflow(A, 1), 0) == 0
6043 // extract(uadd.with.overflow(A, -1), 0) != -1
6044 UAddOv = cast<ExtractValueInst>(Op0)->getAggregateOperand();
6045 else if (match(Op1, UAddOvResultPat) &&
6046 Pred == ICmpInst::ICMP_UGT && (Op0 == A || Op0 == B))
6047 // A > extract(uadd.with.overflow(A, B), 0)
6048 UAddOv = cast<ExtractValueInst>(Op1)->getAggregateOperand();
6049 else
6050 return nullptr;
6051
6052 return ExtractValueInst::Create(UAddOv, 1);
6053 }
6054
foldICmpInvariantGroup(ICmpInst & I)6055 static Instruction *foldICmpInvariantGroup(ICmpInst &I) {
6056 if (!I.getOperand(0)->getType()->isPointerTy() ||
6057 NullPointerIsDefined(
6058 I.getParent()->getParent(),
6059 I.getOperand(0)->getType()->getPointerAddressSpace())) {
6060 return nullptr;
6061 }
6062 Instruction *Op;
6063 if (match(I.getOperand(0), m_Instruction(Op)) &&
6064 match(I.getOperand(1), m_Zero()) &&
6065 Op->isLaunderOrStripInvariantGroup()) {
6066 return ICmpInst::Create(Instruction::ICmp, I.getPredicate(),
6067 Op->getOperand(0), I.getOperand(1));
6068 }
6069 return nullptr;
6070 }
6071
6072 /// This function folds patterns produced by lowering of reduce idioms, such as
6073 /// llvm.vector.reduce.and which are lowered into instruction chains. This code
6074 /// attempts to generate fewer number of scalar comparisons instead of vector
6075 /// comparisons when possible.
foldReductionIdiom(ICmpInst & I,InstCombiner::BuilderTy & Builder,const DataLayout & DL)6076 static Instruction *foldReductionIdiom(ICmpInst &I,
6077 InstCombiner::BuilderTy &Builder,
6078 const DataLayout &DL) {
6079 if (I.getType()->isVectorTy())
6080 return nullptr;
6081 ICmpInst::Predicate OuterPred, InnerPred;
6082 Value *LHS, *RHS;
6083
6084 // Match lowering of @llvm.vector.reduce.and. Turn
6085 /// %vec_ne = icmp ne <8 x i8> %lhs, %rhs
6086 /// %scalar_ne = bitcast <8 x i1> %vec_ne to i8
6087 /// %res = icmp <pred> i8 %scalar_ne, 0
6088 ///
6089 /// into
6090 ///
6091 /// %lhs.scalar = bitcast <8 x i8> %lhs to i64
6092 /// %rhs.scalar = bitcast <8 x i8> %rhs to i64
6093 /// %res = icmp <pred> i64 %lhs.scalar, %rhs.scalar
6094 ///
6095 /// for <pred> in {ne, eq}.
6096 if (!match(&I, m_ICmp(OuterPred,
6097 m_OneUse(m_BitCast(m_OneUse(
6098 m_ICmp(InnerPred, m_Value(LHS), m_Value(RHS))))),
6099 m_Zero())))
6100 return nullptr;
6101 auto *LHSTy = dyn_cast<FixedVectorType>(LHS->getType());
6102 if (!LHSTy || !LHSTy->getElementType()->isIntegerTy())
6103 return nullptr;
6104 unsigned NumBits =
6105 LHSTy->getNumElements() * LHSTy->getElementType()->getIntegerBitWidth();
6106 // TODO: Relax this to "not wider than max legal integer type"?
6107 if (!DL.isLegalInteger(NumBits))
6108 return nullptr;
6109
6110 if (ICmpInst::isEquality(OuterPred) && InnerPred == ICmpInst::ICMP_NE) {
6111 auto *ScalarTy = Builder.getIntNTy(NumBits);
6112 LHS = Builder.CreateBitCast(LHS, ScalarTy, LHS->getName() + ".scalar");
6113 RHS = Builder.CreateBitCast(RHS, ScalarTy, RHS->getName() + ".scalar");
6114 return ICmpInst::Create(Instruction::ICmp, OuterPred, LHS, RHS,
6115 I.getName());
6116 }
6117
6118 return nullptr;
6119 }
6120
visitICmpInst(ICmpInst & I)6121 Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
6122 bool Changed = false;
6123 const SimplifyQuery Q = SQ.getWithInstruction(&I);
6124 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6125 unsigned Op0Cplxity = getComplexity(Op0);
6126 unsigned Op1Cplxity = getComplexity(Op1);
6127
6128 /// Orders the operands of the compare so that they are listed from most
6129 /// complex to least complex. This puts constants before unary operators,
6130 /// before binary operators.
6131 if (Op0Cplxity < Op1Cplxity ||
6132 (Op0Cplxity == Op1Cplxity && swapMayExposeCSEOpportunities(Op0, Op1))) {
6133 I.swapOperands();
6134 std::swap(Op0, Op1);
6135 Changed = true;
6136 }
6137
6138 if (Value *V = simplifyICmpInst(I.getPredicate(), Op0, Op1, Q))
6139 return replaceInstUsesWith(I, V);
6140
6141 // Comparing -val or val with non-zero is the same as just comparing val
6142 // ie, abs(val) != 0 -> val != 0
6143 if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) {
6144 Value *Cond, *SelectTrue, *SelectFalse;
6145 if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
6146 m_Value(SelectFalse)))) {
6147 if (Value *V = dyn_castNegVal(SelectTrue)) {
6148 if (V == SelectFalse)
6149 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
6150 }
6151 else if (Value *V = dyn_castNegVal(SelectFalse)) {
6152 if (V == SelectTrue)
6153 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
6154 }
6155 }
6156 }
6157
6158 if (Op0->getType()->isIntOrIntVectorTy(1))
6159 if (Instruction *Res = canonicalizeICmpBool(I, Builder))
6160 return Res;
6161
6162 if (Instruction *Res = canonicalizeCmpWithConstant(I))
6163 return Res;
6164
6165 if (Instruction *Res = canonicalizeICmpPredicate(I))
6166 return Res;
6167
6168 if (Instruction *Res = foldICmpWithConstant(I))
6169 return Res;
6170
6171 if (Instruction *Res = foldICmpWithDominatingICmp(I))
6172 return Res;
6173
6174 if (Instruction *Res = foldICmpUsingBoolRange(I, Builder))
6175 return Res;
6176
6177 if (Instruction *Res = foldICmpUsingKnownBits(I))
6178 return Res;
6179
6180 // Test if the ICmpInst instruction is used exclusively by a select as
6181 // part of a minimum or maximum operation. If so, refrain from doing
6182 // any other folding. This helps out other analyses which understand
6183 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
6184 // and CodeGen. And in this case, at least one of the comparison
6185 // operands has at least one user besides the compare (the select),
6186 // which would often largely negate the benefit of folding anyway.
6187 //
6188 // Do the same for the other patterns recognized by matchSelectPattern.
6189 if (I.hasOneUse())
6190 if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
6191 Value *A, *B;
6192 SelectPatternResult SPR = matchSelectPattern(SI, A, B);
6193 if (SPR.Flavor != SPF_UNKNOWN)
6194 return nullptr;
6195 }
6196
6197 // Do this after checking for min/max to prevent infinite looping.
6198 if (Instruction *Res = foldICmpWithZero(I))
6199 return Res;
6200
6201 // FIXME: We only do this after checking for min/max to prevent infinite
6202 // looping caused by a reverse canonicalization of these patterns for min/max.
6203 // FIXME: The organization of folds is a mess. These would naturally go into
6204 // canonicalizeCmpWithConstant(), but we can't move all of the above folds
6205 // down here after the min/max restriction.
6206 ICmpInst::Predicate Pred = I.getPredicate();
6207 const APInt *C;
6208 if (match(Op1, m_APInt(C))) {
6209 // For i32: x >u 2147483647 -> x <s 0 -> true if sign bit set
6210 if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) {
6211 Constant *Zero = Constant::getNullValue(Op0->getType());
6212 return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero);
6213 }
6214
6215 // For i32: x <u 2147483648 -> x >s -1 -> true if sign bit clear
6216 if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) {
6217 Constant *AllOnes = Constant::getAllOnesValue(Op0->getType());
6218 return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes);
6219 }
6220 }
6221
6222 // The folds in here may rely on wrapping flags and special constants, so
6223 // they can break up min/max idioms in some cases but not seemingly similar
6224 // patterns.
6225 // FIXME: It may be possible to enhance select folding to make this
6226 // unnecessary. It may also be moot if we canonicalize to min/max
6227 // intrinsics.
6228 if (Instruction *Res = foldICmpBinOp(I, Q))
6229 return Res;
6230
6231 if (Instruction *Res = foldICmpInstWithConstant(I))
6232 return Res;
6233
6234 // Try to match comparison as a sign bit test. Intentionally do this after
6235 // foldICmpInstWithConstant() to potentially let other folds to happen first.
6236 if (Instruction *New = foldSignBitTest(I))
6237 return New;
6238
6239 if (Instruction *Res = foldICmpInstWithConstantNotInt(I))
6240 return Res;
6241
6242 // Try to optimize 'icmp GEP, P' or 'icmp P, GEP'.
6243 if (auto *GEP = dyn_cast<GEPOperator>(Op0))
6244 if (Instruction *NI = foldGEPICmp(GEP, Op1, I.getPredicate(), I))
6245 return NI;
6246 if (auto *GEP = dyn_cast<GEPOperator>(Op1))
6247 if (Instruction *NI = foldGEPICmp(GEP, Op0, I.getSwappedPredicate(), I))
6248 return NI;
6249
6250 if (auto *SI = dyn_cast<SelectInst>(Op0))
6251 if (Instruction *NI = foldSelectICmp(I.getPredicate(), SI, Op1, I))
6252 return NI;
6253 if (auto *SI = dyn_cast<SelectInst>(Op1))
6254 if (Instruction *NI = foldSelectICmp(I.getSwappedPredicate(), SI, Op0, I))
6255 return NI;
6256
6257 // Try to optimize equality comparisons against alloca-based pointers.
6258 if (Op0->getType()->isPointerTy() && I.isEquality()) {
6259 assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?");
6260 if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op0)))
6261 if (Instruction *New = foldAllocaCmp(I, Alloca))
6262 return New;
6263 if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op1)))
6264 if (Instruction *New = foldAllocaCmp(I, Alloca))
6265 return New;
6266 }
6267
6268 if (Instruction *Res = foldICmpBitCast(I))
6269 return Res;
6270
6271 // TODO: Hoist this above the min/max bailout.
6272 if (Instruction *R = foldICmpWithCastOp(I))
6273 return R;
6274
6275 if (Instruction *Res = foldICmpWithMinMax(I))
6276 return Res;
6277
6278 {
6279 Value *A, *B;
6280 // Transform (A & ~B) == 0 --> (A & B) != 0
6281 // and (A & ~B) != 0 --> (A & B) == 0
6282 // if A is a power of 2.
6283 if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
6284 match(Op1, m_Zero()) &&
6285 isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality())
6286 return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(A, B),
6287 Op1);
6288
6289 // ~X < ~Y --> Y < X
6290 // ~X < C --> X > ~C
6291 if (match(Op0, m_Not(m_Value(A)))) {
6292 if (match(Op1, m_Not(m_Value(B))))
6293 return new ICmpInst(I.getPredicate(), B, A);
6294
6295 const APInt *C;
6296 if (match(Op1, m_APInt(C)))
6297 return new ICmpInst(I.getSwappedPredicate(), A,
6298 ConstantInt::get(Op1->getType(), ~(*C)));
6299 }
6300
6301 Instruction *AddI = nullptr;
6302 if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B),
6303 m_Instruction(AddI))) &&
6304 isa<IntegerType>(A->getType())) {
6305 Value *Result;
6306 Constant *Overflow;
6307 // m_UAddWithOverflow can match patterns that do not include an explicit
6308 // "add" instruction, so check the opcode of the matched op.
6309 if (AddI->getOpcode() == Instruction::Add &&
6310 OptimizeOverflowCheck(Instruction::Add, /*Signed*/ false, A, B, *AddI,
6311 Result, Overflow)) {
6312 replaceInstUsesWith(*AddI, Result);
6313 eraseInstFromFunction(*AddI);
6314 return replaceInstUsesWith(I, Overflow);
6315 }
6316 }
6317
6318 // (zext a) * (zext b) --> llvm.umul.with.overflow.
6319 if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
6320 if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this))
6321 return R;
6322 }
6323 if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
6324 if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this))
6325 return R;
6326 }
6327 }
6328
6329 if (Instruction *Res = foldICmpEquality(I))
6330 return Res;
6331
6332 if (Instruction *Res = foldICmpOfUAddOv(I))
6333 return Res;
6334
6335 // The 'cmpxchg' instruction returns an aggregate containing the old value and
6336 // an i1 which indicates whether or not we successfully did the swap.
6337 //
6338 // Replace comparisons between the old value and the expected value with the
6339 // indicator that 'cmpxchg' returns.
6340 //
6341 // N.B. This transform is only valid when the 'cmpxchg' is not permitted to
6342 // spuriously fail. In those cases, the old value may equal the expected
6343 // value but it is possible for the swap to not occur.
6344 if (I.getPredicate() == ICmpInst::ICMP_EQ)
6345 if (auto *EVI = dyn_cast<ExtractValueInst>(Op0))
6346 if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
6347 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
6348 !ACXI->isWeak())
6349 return ExtractValueInst::Create(ACXI, 1);
6350
6351 {
6352 Value *X;
6353 const APInt *C;
6354 // icmp X+Cst, X
6355 if (match(Op0, m_Add(m_Value(X), m_APInt(C))) && Op1 == X)
6356 return foldICmpAddOpConst(X, *C, I.getPredicate());
6357
6358 // icmp X, X+Cst
6359 if (match(Op1, m_Add(m_Value(X), m_APInt(C))) && Op0 == X)
6360 return foldICmpAddOpConst(X, *C, I.getSwappedPredicate());
6361 }
6362
6363 if (Instruction *Res = foldICmpWithHighBitMask(I, Builder))
6364 return Res;
6365
6366 if (I.getType()->isVectorTy())
6367 if (Instruction *Res = foldVectorCmp(I, Builder))
6368 return Res;
6369
6370 if (Instruction *Res = foldICmpInvariantGroup(I))
6371 return Res;
6372
6373 if (Instruction *Res = foldReductionIdiom(I, Builder, DL))
6374 return Res;
6375
6376 return Changed ? &I : nullptr;
6377 }
6378
6379 /// Fold fcmp ([us]itofp x, cst) if possible.
foldFCmpIntToFPConst(FCmpInst & I,Instruction * LHSI,Constant * RHSC)6380 Instruction *InstCombinerImpl::foldFCmpIntToFPConst(FCmpInst &I,
6381 Instruction *LHSI,
6382 Constant *RHSC) {
6383 if (!isa<ConstantFP>(RHSC)) return nullptr;
6384 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
6385
6386 // Get the width of the mantissa. We don't want to hack on conversions that
6387 // might lose information from the integer, e.g. "i64 -> float"
6388 int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
6389 if (MantissaWidth == -1) return nullptr; // Unknown.
6390
6391 IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
6392
6393 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
6394
6395 if (I.isEquality()) {
6396 FCmpInst::Predicate P = I.getPredicate();
6397 bool IsExact = false;
6398 APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned);
6399 RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact);
6400
6401 // If the floating point constant isn't an integer value, we know if we will
6402 // ever compare equal / not equal to it.
6403 if (!IsExact) {
6404 // TODO: Can never be -0.0 and other non-representable values
6405 APFloat RHSRoundInt(RHS);
6406 RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven);
6407 if (RHS != RHSRoundInt) {
6408 if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ)
6409 return replaceInstUsesWith(I, Builder.getFalse());
6410
6411 assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE);
6412 return replaceInstUsesWith(I, Builder.getTrue());
6413 }
6414 }
6415
6416 // TODO: If the constant is exactly representable, is it always OK to do
6417 // equality compares as integer?
6418 }
6419
6420 // Check to see that the input is converted from an integer type that is small
6421 // enough that preserves all bits. TODO: check here for "known" sign bits.
6422 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
6423 unsigned InputSize = IntTy->getScalarSizeInBits();
6424
6425 // Following test does NOT adjust InputSize downwards for signed inputs,
6426 // because the most negative value still requires all the mantissa bits
6427 // to distinguish it from one less than that value.
6428 if ((int)InputSize > MantissaWidth) {
6429 // Conversion would lose accuracy. Check if loss can impact comparison.
6430 int Exp = ilogb(RHS);
6431 if (Exp == APFloat::IEK_Inf) {
6432 int MaxExponent = ilogb(APFloat::getLargest(RHS.getSemantics()));
6433 if (MaxExponent < (int)InputSize - !LHSUnsigned)
6434 // Conversion could create infinity.
6435 return nullptr;
6436 } else {
6437 // Note that if RHS is zero or NaN, then Exp is negative
6438 // and first condition is trivially false.
6439 if (MantissaWidth <= Exp && Exp <= (int)InputSize - !LHSUnsigned)
6440 // Conversion could affect comparison.
6441 return nullptr;
6442 }
6443 }
6444
6445 // Otherwise, we can potentially simplify the comparison. We know that it
6446 // will always come through as an integer value and we know the constant is
6447 // not a NAN (it would have been previously simplified).
6448 assert(!RHS.isNaN() && "NaN comparison not already folded!");
6449
6450 ICmpInst::Predicate Pred;
6451 switch (I.getPredicate()) {
6452 default: llvm_unreachable("Unexpected predicate!");
6453 case FCmpInst::FCMP_UEQ:
6454 case FCmpInst::FCMP_OEQ:
6455 Pred = ICmpInst::ICMP_EQ;
6456 break;
6457 case FCmpInst::FCMP_UGT:
6458 case FCmpInst::FCMP_OGT:
6459 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
6460 break;
6461 case FCmpInst::FCMP_UGE:
6462 case FCmpInst::FCMP_OGE:
6463 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
6464 break;
6465 case FCmpInst::FCMP_ULT:
6466 case FCmpInst::FCMP_OLT:
6467 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
6468 break;
6469 case FCmpInst::FCMP_ULE:
6470 case FCmpInst::FCMP_OLE:
6471 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
6472 break;
6473 case FCmpInst::FCMP_UNE:
6474 case FCmpInst::FCMP_ONE:
6475 Pred = ICmpInst::ICMP_NE;
6476 break;
6477 case FCmpInst::FCMP_ORD:
6478 return replaceInstUsesWith(I, Builder.getTrue());
6479 case FCmpInst::FCMP_UNO:
6480 return replaceInstUsesWith(I, Builder.getFalse());
6481 }
6482
6483 // Now we know that the APFloat is a normal number, zero or inf.
6484
6485 // See if the FP constant is too large for the integer. For example,
6486 // comparing an i8 to 300.0.
6487 unsigned IntWidth = IntTy->getScalarSizeInBits();
6488
6489 if (!LHSUnsigned) {
6490 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
6491 // and large values.
6492 APFloat SMax(RHS.getSemantics());
6493 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
6494 APFloat::rmNearestTiesToEven);
6495 if (SMax < RHS) { // smax < 13123.0
6496 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
6497 Pred == ICmpInst::ICMP_SLE)
6498 return replaceInstUsesWith(I, Builder.getTrue());
6499 return replaceInstUsesWith(I, Builder.getFalse());
6500 }
6501 } else {
6502 // If the RHS value is > UnsignedMax, fold the comparison. This handles
6503 // +INF and large values.
6504 APFloat UMax(RHS.getSemantics());
6505 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
6506 APFloat::rmNearestTiesToEven);
6507 if (UMax < RHS) { // umax < 13123.0
6508 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
6509 Pred == ICmpInst::ICMP_ULE)
6510 return replaceInstUsesWith(I, Builder.getTrue());
6511 return replaceInstUsesWith(I, Builder.getFalse());
6512 }
6513 }
6514
6515 if (!LHSUnsigned) {
6516 // See if the RHS value is < SignedMin.
6517 APFloat SMin(RHS.getSemantics());
6518 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
6519 APFloat::rmNearestTiesToEven);
6520 if (SMin > RHS) { // smin > 12312.0
6521 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
6522 Pred == ICmpInst::ICMP_SGE)
6523 return replaceInstUsesWith(I, Builder.getTrue());
6524 return replaceInstUsesWith(I, Builder.getFalse());
6525 }
6526 } else {
6527 // See if the RHS value is < UnsignedMin.
6528 APFloat UMin(RHS.getSemantics());
6529 UMin.convertFromAPInt(APInt::getMinValue(IntWidth), false,
6530 APFloat::rmNearestTiesToEven);
6531 if (UMin > RHS) { // umin > 12312.0
6532 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
6533 Pred == ICmpInst::ICMP_UGE)
6534 return replaceInstUsesWith(I, Builder.getTrue());
6535 return replaceInstUsesWith(I, Builder.getFalse());
6536 }
6537 }
6538
6539 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
6540 // [0, UMAX], but it may still be fractional. See if it is fractional by
6541 // casting the FP value to the integer value and back, checking for equality.
6542 // Don't do this for zero, because -0.0 is not fractional.
6543 Constant *RHSInt = LHSUnsigned
6544 ? ConstantExpr::getFPToUI(RHSC, IntTy)
6545 : ConstantExpr::getFPToSI(RHSC, IntTy);
6546 if (!RHS.isZero()) {
6547 bool Equal = LHSUnsigned
6548 ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
6549 : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
6550 if (!Equal) {
6551 // If we had a comparison against a fractional value, we have to adjust
6552 // the compare predicate and sometimes the value. RHSC is rounded towards
6553 // zero at this point.
6554 switch (Pred) {
6555 default: llvm_unreachable("Unexpected integer comparison!");
6556 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
6557 return replaceInstUsesWith(I, Builder.getTrue());
6558 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
6559 return replaceInstUsesWith(I, Builder.getFalse());
6560 case ICmpInst::ICMP_ULE:
6561 // (float)int <= 4.4 --> int <= 4
6562 // (float)int <= -4.4 --> false
6563 if (RHS.isNegative())
6564 return replaceInstUsesWith(I, Builder.getFalse());
6565 break;
6566 case ICmpInst::ICMP_SLE:
6567 // (float)int <= 4.4 --> int <= 4
6568 // (float)int <= -4.4 --> int < -4
6569 if (RHS.isNegative())
6570 Pred = ICmpInst::ICMP_SLT;
6571 break;
6572 case ICmpInst::ICMP_ULT:
6573 // (float)int < -4.4 --> false
6574 // (float)int < 4.4 --> int <= 4
6575 if (RHS.isNegative())
6576 return replaceInstUsesWith(I, Builder.getFalse());
6577 Pred = ICmpInst::ICMP_ULE;
6578 break;
6579 case ICmpInst::ICMP_SLT:
6580 // (float)int < -4.4 --> int < -4
6581 // (float)int < 4.4 --> int <= 4
6582 if (!RHS.isNegative())
6583 Pred = ICmpInst::ICMP_SLE;
6584 break;
6585 case ICmpInst::ICMP_UGT:
6586 // (float)int > 4.4 --> int > 4
6587 // (float)int > -4.4 --> true
6588 if (RHS.isNegative())
6589 return replaceInstUsesWith(I, Builder.getTrue());
6590 break;
6591 case ICmpInst::ICMP_SGT:
6592 // (float)int > 4.4 --> int > 4
6593 // (float)int > -4.4 --> int >= -4
6594 if (RHS.isNegative())
6595 Pred = ICmpInst::ICMP_SGE;
6596 break;
6597 case ICmpInst::ICMP_UGE:
6598 // (float)int >= -4.4 --> true
6599 // (float)int >= 4.4 --> int > 4
6600 if (RHS.isNegative())
6601 return replaceInstUsesWith(I, Builder.getTrue());
6602 Pred = ICmpInst::ICMP_UGT;
6603 break;
6604 case ICmpInst::ICMP_SGE:
6605 // (float)int >= -4.4 --> int >= -4
6606 // (float)int >= 4.4 --> int > 4
6607 if (!RHS.isNegative())
6608 Pred = ICmpInst::ICMP_SGT;
6609 break;
6610 }
6611 }
6612 }
6613
6614 // Lower this FP comparison into an appropriate integer version of the
6615 // comparison.
6616 return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
6617 }
6618
6619 /// Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
foldFCmpReciprocalAndZero(FCmpInst & I,Instruction * LHSI,Constant * RHSC)6620 static Instruction *foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI,
6621 Constant *RHSC) {
6622 // When C is not 0.0 and infinities are not allowed:
6623 // (C / X) < 0.0 is a sign-bit test of X
6624 // (C / X) < 0.0 --> X < 0.0 (if C is positive)
6625 // (C / X) < 0.0 --> X > 0.0 (if C is negative, swap the predicate)
6626 //
6627 // Proof:
6628 // Multiply (C / X) < 0.0 by X * X / C.
6629 // - X is non zero, if it is the flag 'ninf' is violated.
6630 // - C defines the sign of X * X * C. Thus it also defines whether to swap
6631 // the predicate. C is also non zero by definition.
6632 //
6633 // Thus X * X / C is non zero and the transformation is valid. [qed]
6634
6635 FCmpInst::Predicate Pred = I.getPredicate();
6636
6637 // Check that predicates are valid.
6638 if ((Pred != FCmpInst::FCMP_OGT) && (Pred != FCmpInst::FCMP_OLT) &&
6639 (Pred != FCmpInst::FCMP_OGE) && (Pred != FCmpInst::FCMP_OLE))
6640 return nullptr;
6641
6642 // Check that RHS operand is zero.
6643 if (!match(RHSC, m_AnyZeroFP()))
6644 return nullptr;
6645
6646 // Check fastmath flags ('ninf').
6647 if (!LHSI->hasNoInfs() || !I.hasNoInfs())
6648 return nullptr;
6649
6650 // Check the properties of the dividend. It must not be zero to avoid a
6651 // division by zero (see Proof).
6652 const APFloat *C;
6653 if (!match(LHSI->getOperand(0), m_APFloat(C)))
6654 return nullptr;
6655
6656 if (C->isZero())
6657 return nullptr;
6658
6659 // Get swapped predicate if necessary.
6660 if (C->isNegative())
6661 Pred = I.getSwappedPredicate();
6662
6663 return new FCmpInst(Pred, LHSI->getOperand(1), RHSC, "", &I);
6664 }
6665
6666 /// Optimize fabs(X) compared with zero.
foldFabsWithFcmpZero(FCmpInst & I,InstCombinerImpl & IC)6667 static Instruction *foldFabsWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC) {
6668 Value *X;
6669 if (!match(I.getOperand(0), m_FAbs(m_Value(X))) ||
6670 !match(I.getOperand(1), m_PosZeroFP()))
6671 return nullptr;
6672
6673 auto replacePredAndOp0 = [&IC](FCmpInst *I, FCmpInst::Predicate P, Value *X) {
6674 I->setPredicate(P);
6675 return IC.replaceOperand(*I, 0, X);
6676 };
6677
6678 switch (I.getPredicate()) {
6679 case FCmpInst::FCMP_UGE:
6680 case FCmpInst::FCMP_OLT:
6681 // fabs(X) >= 0.0 --> true
6682 // fabs(X) < 0.0 --> false
6683 llvm_unreachable("fcmp should have simplified");
6684
6685 case FCmpInst::FCMP_OGT:
6686 // fabs(X) > 0.0 --> X != 0.0
6687 return replacePredAndOp0(&I, FCmpInst::FCMP_ONE, X);
6688
6689 case FCmpInst::FCMP_UGT:
6690 // fabs(X) u> 0.0 --> X u!= 0.0
6691 return replacePredAndOp0(&I, FCmpInst::FCMP_UNE, X);
6692
6693 case FCmpInst::FCMP_OLE:
6694 // fabs(X) <= 0.0 --> X == 0.0
6695 return replacePredAndOp0(&I, FCmpInst::FCMP_OEQ, X);
6696
6697 case FCmpInst::FCMP_ULE:
6698 // fabs(X) u<= 0.0 --> X u== 0.0
6699 return replacePredAndOp0(&I, FCmpInst::FCMP_UEQ, X);
6700
6701 case FCmpInst::FCMP_OGE:
6702 // fabs(X) >= 0.0 --> !isnan(X)
6703 assert(!I.hasNoNaNs() && "fcmp should have simplified");
6704 return replacePredAndOp0(&I, FCmpInst::FCMP_ORD, X);
6705
6706 case FCmpInst::FCMP_ULT:
6707 // fabs(X) u< 0.0 --> isnan(X)
6708 assert(!I.hasNoNaNs() && "fcmp should have simplified");
6709 return replacePredAndOp0(&I, FCmpInst::FCMP_UNO, X);
6710
6711 case FCmpInst::FCMP_OEQ:
6712 case FCmpInst::FCMP_UEQ:
6713 case FCmpInst::FCMP_ONE:
6714 case FCmpInst::FCMP_UNE:
6715 case FCmpInst::FCMP_ORD:
6716 case FCmpInst::FCMP_UNO:
6717 // Look through the fabs() because it doesn't change anything but the sign.
6718 // fabs(X) == 0.0 --> X == 0.0,
6719 // fabs(X) != 0.0 --> X != 0.0
6720 // isnan(fabs(X)) --> isnan(X)
6721 // !isnan(fabs(X) --> !isnan(X)
6722 return replacePredAndOp0(&I, I.getPredicate(), X);
6723
6724 default:
6725 return nullptr;
6726 }
6727 }
6728
foldFCmpFNegCommonOp(FCmpInst & I)6729 static Instruction *foldFCmpFNegCommonOp(FCmpInst &I) {
6730 CmpInst::Predicate Pred = I.getPredicate();
6731 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6732
6733 // Canonicalize fneg as Op1.
6734 if (match(Op0, m_FNeg(m_Value())) && !match(Op1, m_FNeg(m_Value()))) {
6735 std::swap(Op0, Op1);
6736 Pred = I.getSwappedPredicate();
6737 }
6738
6739 if (!match(Op1, m_FNeg(m_Specific(Op0))))
6740 return nullptr;
6741
6742 // Replace the negated operand with 0.0:
6743 // fcmp Pred Op0, -Op0 --> fcmp Pred Op0, 0.0
6744 Constant *Zero = ConstantFP::getNullValue(Op0->getType());
6745 return new FCmpInst(Pred, Op0, Zero, "", &I);
6746 }
6747
visitFCmpInst(FCmpInst & I)6748 Instruction *InstCombinerImpl::visitFCmpInst(FCmpInst &I) {
6749 bool Changed = false;
6750
6751 /// Orders the operands of the compare so that they are listed from most
6752 /// complex to least complex. This puts constants before unary operators,
6753 /// before binary operators.
6754 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
6755 I.swapOperands();
6756 Changed = true;
6757 }
6758
6759 const CmpInst::Predicate Pred = I.getPredicate();
6760 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6761 if (Value *V = simplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(),
6762 SQ.getWithInstruction(&I)))
6763 return replaceInstUsesWith(I, V);
6764
6765 // Simplify 'fcmp pred X, X'
6766 Type *OpType = Op0->getType();
6767 assert(OpType == Op1->getType() && "fcmp with different-typed operands?");
6768 if (Op0 == Op1) {
6769 switch (Pred) {
6770 default: break;
6771 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
6772 case FCmpInst::FCMP_ULT: // True if unordered or less than
6773 case FCmpInst::FCMP_UGT: // True if unordered or greater than
6774 case FCmpInst::FCMP_UNE: // True if unordered or not equal
6775 // Canonicalize these to be 'fcmp uno %X, 0.0'.
6776 I.setPredicate(FCmpInst::FCMP_UNO);
6777 I.setOperand(1, Constant::getNullValue(OpType));
6778 return &I;
6779
6780 case FCmpInst::FCMP_ORD: // True if ordered (no nans)
6781 case FCmpInst::FCMP_OEQ: // True if ordered and equal
6782 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
6783 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
6784 // Canonicalize these to be 'fcmp ord %X, 0.0'.
6785 I.setPredicate(FCmpInst::FCMP_ORD);
6786 I.setOperand(1, Constant::getNullValue(OpType));
6787 return &I;
6788 }
6789 }
6790
6791 // If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand,
6792 // then canonicalize the operand to 0.0.
6793 if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
6794 if (!match(Op0, m_PosZeroFP()) && isKnownNeverNaN(Op0, &TLI))
6795 return replaceOperand(I, 0, ConstantFP::getNullValue(OpType));
6796
6797 if (!match(Op1, m_PosZeroFP()) && isKnownNeverNaN(Op1, &TLI))
6798 return replaceOperand(I, 1, ConstantFP::getNullValue(OpType));
6799 }
6800
6801 // fcmp pred (fneg X), (fneg Y) -> fcmp swap(pred) X, Y
6802 Value *X, *Y;
6803 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
6804 return new FCmpInst(I.getSwappedPredicate(), X, Y, "", &I);
6805
6806 if (Instruction *R = foldFCmpFNegCommonOp(I))
6807 return R;
6808
6809 // Test if the FCmpInst instruction is used exclusively by a select as
6810 // part of a minimum or maximum operation. If so, refrain from doing
6811 // any other folding. This helps out other analyses which understand
6812 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
6813 // and CodeGen. And in this case, at least one of the comparison
6814 // operands has at least one user besides the compare (the select),
6815 // which would often largely negate the benefit of folding anyway.
6816 if (I.hasOneUse())
6817 if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
6818 Value *A, *B;
6819 SelectPatternResult SPR = matchSelectPattern(SI, A, B);
6820 if (SPR.Flavor != SPF_UNKNOWN)
6821 return nullptr;
6822 }
6823
6824 // The sign of 0.0 is ignored by fcmp, so canonicalize to +0.0:
6825 // fcmp Pred X, -0.0 --> fcmp Pred X, 0.0
6826 if (match(Op1, m_AnyZeroFP()) && !match(Op1, m_PosZeroFP()))
6827 return replaceOperand(I, 1, ConstantFP::getNullValue(OpType));
6828
6829 // Handle fcmp with instruction LHS and constant RHS.
6830 Instruction *LHSI;
6831 Constant *RHSC;
6832 if (match(Op0, m_Instruction(LHSI)) && match(Op1, m_Constant(RHSC))) {
6833 switch (LHSI->getOpcode()) {
6834 case Instruction::PHI:
6835 // Only fold fcmp into the PHI if the phi and fcmp are in the same
6836 // block. If in the same block, we're encouraging jump threading. If
6837 // not, we are just pessimizing the code by making an i1 phi.
6838 if (LHSI->getParent() == I.getParent())
6839 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
6840 return NV;
6841 break;
6842 case Instruction::SIToFP:
6843 case Instruction::UIToFP:
6844 if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC))
6845 return NV;
6846 break;
6847 case Instruction::FDiv:
6848 if (Instruction *NV = foldFCmpReciprocalAndZero(I, LHSI, RHSC))
6849 return NV;
6850 break;
6851 case Instruction::Load:
6852 if (auto *GEP = dyn_cast<GetElementPtrInst>(LHSI->getOperand(0)))
6853 if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
6854 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(
6855 cast<LoadInst>(LHSI), GEP, GV, I))
6856 return Res;
6857 break;
6858 }
6859 }
6860
6861 if (Instruction *R = foldFabsWithFcmpZero(I, *this))
6862 return R;
6863
6864 if (match(Op0, m_FNeg(m_Value(X)))) {
6865 // fcmp pred (fneg X), C --> fcmp swap(pred) X, -C
6866 Constant *C;
6867 if (match(Op1, m_Constant(C))) {
6868 Constant *NegC = ConstantExpr::getFNeg(C);
6869 return new FCmpInst(I.getSwappedPredicate(), X, NegC, "", &I);
6870 }
6871 }
6872
6873 if (match(Op0, m_FPExt(m_Value(X)))) {
6874 // fcmp (fpext X), (fpext Y) -> fcmp X, Y
6875 if (match(Op1, m_FPExt(m_Value(Y))) && X->getType() == Y->getType())
6876 return new FCmpInst(Pred, X, Y, "", &I);
6877
6878 const APFloat *C;
6879 if (match(Op1, m_APFloat(C))) {
6880 const fltSemantics &FPSem =
6881 X->getType()->getScalarType()->getFltSemantics();
6882 bool Lossy;
6883 APFloat TruncC = *C;
6884 TruncC.convert(FPSem, APFloat::rmNearestTiesToEven, &Lossy);
6885
6886 if (Lossy) {
6887 // X can't possibly equal the higher-precision constant, so reduce any
6888 // equality comparison.
6889 // TODO: Other predicates can be handled via getFCmpCode().
6890 switch (Pred) {
6891 case FCmpInst::FCMP_OEQ:
6892 // X is ordered and equal to an impossible constant --> false
6893 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
6894 case FCmpInst::FCMP_ONE:
6895 // X is ordered and not equal to an impossible constant --> ordered
6896 return new FCmpInst(FCmpInst::FCMP_ORD, X,
6897 ConstantFP::getNullValue(X->getType()));
6898 case FCmpInst::FCMP_UEQ:
6899 // X is unordered or equal to an impossible constant --> unordered
6900 return new FCmpInst(FCmpInst::FCMP_UNO, X,
6901 ConstantFP::getNullValue(X->getType()));
6902 case FCmpInst::FCMP_UNE:
6903 // X is unordered or not equal to an impossible constant --> true
6904 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
6905 default:
6906 break;
6907 }
6908 }
6909
6910 // fcmp (fpext X), C -> fcmp X, (fptrunc C) if fptrunc is lossless
6911 // Avoid lossy conversions and denormals.
6912 // Zero is a special case that's OK to convert.
6913 APFloat Fabs = TruncC;
6914 Fabs.clearSign();
6915 if (!Lossy &&
6916 (!(Fabs < APFloat::getSmallestNormalized(FPSem)) || Fabs.isZero())) {
6917 Constant *NewC = ConstantFP::get(X->getType(), TruncC);
6918 return new FCmpInst(Pred, X, NewC, "", &I);
6919 }
6920 }
6921 }
6922
6923 // Convert a sign-bit test of an FP value into a cast and integer compare.
6924 // TODO: Simplify if the copysign constant is 0.0 or NaN.
6925 // TODO: Handle non-zero compare constants.
6926 // TODO: Handle other predicates.
6927 const APFloat *C;
6928 if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::copysign>(m_APFloat(C),
6929 m_Value(X)))) &&
6930 match(Op1, m_AnyZeroFP()) && !C->isZero() && !C->isNaN()) {
6931 Type *IntType = Builder.getIntNTy(X->getType()->getScalarSizeInBits());
6932 if (auto *VecTy = dyn_cast<VectorType>(OpType))
6933 IntType = VectorType::get(IntType, VecTy->getElementCount());
6934
6935 // copysign(non-zero constant, X) < 0.0 --> (bitcast X) < 0
6936 if (Pred == FCmpInst::FCMP_OLT) {
6937 Value *IntX = Builder.CreateBitCast(X, IntType);
6938 return new ICmpInst(ICmpInst::ICMP_SLT, IntX,
6939 ConstantInt::getNullValue(IntType));
6940 }
6941 }
6942
6943 if (I.getType()->isVectorTy())
6944 if (Instruction *Res = foldVectorCmp(I, Builder))
6945 return Res;
6946
6947 return Changed ? &I : nullptr;
6948 }
6949