1 //===- InstCombineCompares.cpp --------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visitICmp and visitFCmp functions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombineInternal.h" 15 #include "llvm/ADT/APSInt.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/Analysis/ConstantFolding.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/Analysis/TargetLibraryInfo.h" 21 #include "llvm/IR/ConstantRange.h" 22 #include "llvm/IR/DataLayout.h" 23 #include "llvm/IR/GetElementPtrTypeIterator.h" 24 #include "llvm/IR/IntrinsicInst.h" 25 #include "llvm/IR/PatternMatch.h" 26 #include "llvm/Support/Debug.h" 27 #include "llvm/Support/KnownBits.h" 28 29 using namespace llvm; 30 using namespace PatternMatch; 31 32 #define DEBUG_TYPE "instcombine" 33 34 // How many times is a select replaced by one of its operands? 35 STATISTIC(NumSel, "Number of select opts"); 36 37 38 /// Compute Result = In1+In2, returning true if the result overflowed for this 39 /// type. 40 static bool addWithOverflow(APInt &Result, const APInt &In1, 41 const APInt &In2, bool IsSigned = false) { 42 bool Overflow; 43 if (IsSigned) 44 Result = In1.sadd_ov(In2, Overflow); 45 else 46 Result = In1.uadd_ov(In2, Overflow); 47 48 return Overflow; 49 } 50 51 /// Compute Result = In1-In2, returning true if the result overflowed for this 52 /// type. 53 static bool subWithOverflow(APInt &Result, const APInt &In1, 54 const APInt &In2, bool IsSigned = false) { 55 bool Overflow; 56 if (IsSigned) 57 Result = In1.ssub_ov(In2, Overflow); 58 else 59 Result = In1.usub_ov(In2, Overflow); 60 61 return Overflow; 62 } 63 64 /// Given an icmp instruction, return true if any use of this comparison is a 65 /// branch on sign bit comparison. 66 static bool hasBranchUse(ICmpInst &I) { 67 for (auto *U : I.users()) 68 if (isa<BranchInst>(U)) 69 return true; 70 return false; 71 } 72 73 /// Given an exploded icmp instruction, return true if the comparison only 74 /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if the 75 /// result of the comparison is true when the input value is signed. 76 static bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, 77 bool &TrueIfSigned) { 78 switch (Pred) { 79 case ICmpInst::ICMP_SLT: // True if LHS s< 0 80 TrueIfSigned = true; 81 return RHS.isNullValue(); 82 case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1 83 TrueIfSigned = true; 84 return RHS.isAllOnesValue(); 85 case ICmpInst::ICMP_SGT: // True if LHS s> -1 86 TrueIfSigned = false; 87 return RHS.isAllOnesValue(); 88 case ICmpInst::ICMP_UGT: 89 // True if LHS u> RHS and RHS == high-bit-mask - 1 90 TrueIfSigned = true; 91 return RHS.isMaxSignedValue(); 92 case ICmpInst::ICMP_UGE: 93 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc) 94 TrueIfSigned = true; 95 return RHS.isSignMask(); 96 default: 97 return false; 98 } 99 } 100 101 /// Returns true if the exploded icmp can be expressed as a signed comparison 102 /// to zero and updates the predicate accordingly. 103 /// The signedness of the comparison is preserved. 104 /// TODO: Refactor with decomposeBitTestICmp()? 105 static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) { 106 if (!ICmpInst::isSigned(Pred)) 107 return false; 108 109 if (C.isNullValue()) 110 return ICmpInst::isRelational(Pred); 111 112 if (C.isOneValue()) { 113 if (Pred == ICmpInst::ICMP_SLT) { 114 Pred = ICmpInst::ICMP_SLE; 115 return true; 116 } 117 } else if (C.isAllOnesValue()) { 118 if (Pred == ICmpInst::ICMP_SGT) { 119 Pred = ICmpInst::ICMP_SGE; 120 return true; 121 } 122 } 123 124 return false; 125 } 126 127 /// Given a signed integer type and a set of known zero and one bits, compute 128 /// the maximum and minimum values that could have the specified known zero and 129 /// known one bits, returning them in Min/Max. 130 /// TODO: Move to method on KnownBits struct? 131 static void computeSignedMinMaxValuesFromKnownBits(const KnownBits &Known, 132 APInt &Min, APInt &Max) { 133 assert(Known.getBitWidth() == Min.getBitWidth() && 134 Known.getBitWidth() == Max.getBitWidth() && 135 "KnownZero, KnownOne and Min, Max must have equal bitwidth."); 136 APInt UnknownBits = ~(Known.Zero|Known.One); 137 138 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign 139 // bit if it is unknown. 140 Min = Known.One; 141 Max = Known.One|UnknownBits; 142 143 if (UnknownBits.isNegative()) { // Sign bit is unknown 144 Min.setSignBit(); 145 Max.clearSignBit(); 146 } 147 } 148 149 /// Given an unsigned integer type and a set of known zero and one bits, compute 150 /// the maximum and minimum values that could have the specified known zero and 151 /// known one bits, returning them in Min/Max. 152 /// TODO: Move to method on KnownBits struct? 153 static void computeUnsignedMinMaxValuesFromKnownBits(const KnownBits &Known, 154 APInt &Min, APInt &Max) { 155 assert(Known.getBitWidth() == Min.getBitWidth() && 156 Known.getBitWidth() == Max.getBitWidth() && 157 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth."); 158 APInt UnknownBits = ~(Known.Zero|Known.One); 159 160 // The minimum value is when the unknown bits are all zeros. 161 Min = Known.One; 162 // The maximum value is when the unknown bits are all ones. 163 Max = Known.One|UnknownBits; 164 } 165 166 /// This is called when we see this pattern: 167 /// cmp pred (load (gep GV, ...)), cmpcst 168 /// where GV is a global variable with a constant initializer. Try to simplify 169 /// this into some simple computation that does not need the load. For example 170 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3". 171 /// 172 /// If AndCst is non-null, then the loaded value is masked with that constant 173 /// before doing the comparison. This handles cases like "A[i]&4 == 0". 174 Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, 175 GlobalVariable *GV, 176 CmpInst &ICI, 177 ConstantInt *AndCst) { 178 Constant *Init = GV->getInitializer(); 179 if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init)) 180 return nullptr; 181 182 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements(); 183 // Don't blow up on huge arrays. 184 if (ArrayElementCount > MaxArraySizeForCombine) 185 return nullptr; 186 187 // There are many forms of this optimization we can handle, for now, just do 188 // the simple index into a single-dimensional array. 189 // 190 // Require: GEP GV, 0, i {{, constant indices}} 191 if (GEP->getNumOperands() < 3 || 192 !isa<ConstantInt>(GEP->getOperand(1)) || 193 !cast<ConstantInt>(GEP->getOperand(1))->isZero() || 194 isa<Constant>(GEP->getOperand(2))) 195 return nullptr; 196 197 // Check that indices after the variable are constants and in-range for the 198 // type they index. Collect the indices. This is typically for arrays of 199 // structs. 200 SmallVector<unsigned, 4> LaterIndices; 201 202 Type *EltTy = Init->getType()->getArrayElementType(); 203 for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) { 204 ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i)); 205 if (!Idx) return nullptr; // Variable index. 206 207 uint64_t IdxVal = Idx->getZExtValue(); 208 if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index. 209 210 if (StructType *STy = dyn_cast<StructType>(EltTy)) 211 EltTy = STy->getElementType(IdxVal); 212 else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) { 213 if (IdxVal >= ATy->getNumElements()) return nullptr; 214 EltTy = ATy->getElementType(); 215 } else { 216 return nullptr; // Unknown type. 217 } 218 219 LaterIndices.push_back(IdxVal); 220 } 221 222 enum { Overdefined = -3, Undefined = -2 }; 223 224 // Variables for our state machines. 225 226 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form 227 // "i == 47 | i == 87", where 47 is the first index the condition is true for, 228 // and 87 is the second (and last) index. FirstTrueElement is -2 when 229 // undefined, otherwise set to the first true element. SecondTrueElement is 230 // -2 when undefined, -3 when overdefined and >= 0 when that index is true. 231 int FirstTrueElement = Undefined, SecondTrueElement = Undefined; 232 233 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the 234 // form "i != 47 & i != 87". Same state transitions as for true elements. 235 int FirstFalseElement = Undefined, SecondFalseElement = Undefined; 236 237 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these 238 /// define a state machine that triggers for ranges of values that the index 239 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'. 240 /// This is -2 when undefined, -3 when overdefined, and otherwise the last 241 /// index in the range (inclusive). We use -2 for undefined here because we 242 /// use relative comparisons and don't want 0-1 to match -1. 243 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined; 244 245 // MagicBitvector - This is a magic bitvector where we set a bit if the 246 // comparison is true for element 'i'. If there are 64 elements or less in 247 // the array, this will fully represent all the comparison results. 248 uint64_t MagicBitvector = 0; 249 250 // Scan the array and see if one of our patterns matches. 251 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1)); 252 for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) { 253 Constant *Elt = Init->getAggregateElement(i); 254 if (!Elt) return nullptr; 255 256 // If this is indexing an array of structures, get the structure element. 257 if (!LaterIndices.empty()) 258 Elt = ConstantExpr::getExtractValue(Elt, LaterIndices); 259 260 // If the element is masked, handle it. 261 if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst); 262 263 // Find out if the comparison would be true or false for the i'th element. 264 Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt, 265 CompareRHS, DL, &TLI); 266 // If the result is undef for this element, ignore it. 267 if (isa<UndefValue>(C)) { 268 // Extend range state machines to cover this element in case there is an 269 // undef in the middle of the range. 270 if (TrueRangeEnd == (int)i-1) 271 TrueRangeEnd = i; 272 if (FalseRangeEnd == (int)i-1) 273 FalseRangeEnd = i; 274 continue; 275 } 276 277 // If we can't compute the result for any of the elements, we have to give 278 // up evaluating the entire conditional. 279 if (!isa<ConstantInt>(C)) return nullptr; 280 281 // Otherwise, we know if the comparison is true or false for this element, 282 // update our state machines. 283 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero(); 284 285 // State machine for single/double/range index comparison. 286 if (IsTrueForElt) { 287 // Update the TrueElement state machine. 288 if (FirstTrueElement == Undefined) 289 FirstTrueElement = TrueRangeEnd = i; // First true element. 290 else { 291 // Update double-compare state machine. 292 if (SecondTrueElement == Undefined) 293 SecondTrueElement = i; 294 else 295 SecondTrueElement = Overdefined; 296 297 // Update range state machine. 298 if (TrueRangeEnd == (int)i-1) 299 TrueRangeEnd = i; 300 else 301 TrueRangeEnd = Overdefined; 302 } 303 } else { 304 // Update the FalseElement state machine. 305 if (FirstFalseElement == Undefined) 306 FirstFalseElement = FalseRangeEnd = i; // First false element. 307 else { 308 // Update double-compare state machine. 309 if (SecondFalseElement == Undefined) 310 SecondFalseElement = i; 311 else 312 SecondFalseElement = Overdefined; 313 314 // Update range state machine. 315 if (FalseRangeEnd == (int)i-1) 316 FalseRangeEnd = i; 317 else 318 FalseRangeEnd = Overdefined; 319 } 320 } 321 322 // If this element is in range, update our magic bitvector. 323 if (i < 64 && IsTrueForElt) 324 MagicBitvector |= 1ULL << i; 325 326 // If all of our states become overdefined, bail out early. Since the 327 // predicate is expensive, only check it every 8 elements. This is only 328 // really useful for really huge arrays. 329 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined && 330 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined && 331 FalseRangeEnd == Overdefined) 332 return nullptr; 333 } 334 335 // Now that we've scanned the entire array, emit our new comparison(s). We 336 // order the state machines in complexity of the generated code. 337 Value *Idx = GEP->getOperand(2); 338 339 // If the index is larger than the pointer size of the target, truncate the 340 // index down like the GEP would do implicitly. We don't have to do this for 341 // an inbounds GEP because the index can't be out of range. 342 if (!GEP->isInBounds()) { 343 Type *IntPtrTy = DL.getIntPtrType(GEP->getType()); 344 unsigned PtrSize = IntPtrTy->getIntegerBitWidth(); 345 if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize) 346 Idx = Builder.CreateTrunc(Idx, IntPtrTy); 347 } 348 349 // If the comparison is only true for one or two elements, emit direct 350 // comparisons. 351 if (SecondTrueElement != Overdefined) { 352 // None true -> false. 353 if (FirstTrueElement == Undefined) 354 return replaceInstUsesWith(ICI, Builder.getFalse()); 355 356 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement); 357 358 // True for one element -> 'i == 47'. 359 if (SecondTrueElement == Undefined) 360 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx); 361 362 // True for two elements -> 'i == 47 | i == 72'. 363 Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx); 364 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement); 365 Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx); 366 return BinaryOperator::CreateOr(C1, C2); 367 } 368 369 // If the comparison is only false for one or two elements, emit direct 370 // comparisons. 371 if (SecondFalseElement != Overdefined) { 372 // None false -> true. 373 if (FirstFalseElement == Undefined) 374 return replaceInstUsesWith(ICI, Builder.getTrue()); 375 376 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement); 377 378 // False for one element -> 'i != 47'. 379 if (SecondFalseElement == Undefined) 380 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx); 381 382 // False for two elements -> 'i != 47 & i != 72'. 383 Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx); 384 Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement); 385 Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx); 386 return BinaryOperator::CreateAnd(C1, C2); 387 } 388 389 // If the comparison can be replaced with a range comparison for the elements 390 // where it is true, emit the range check. 391 if (TrueRangeEnd != Overdefined) { 392 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare"); 393 394 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1). 395 if (FirstTrueElement) { 396 Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement); 397 Idx = Builder.CreateAdd(Idx, Offs); 398 } 399 400 Value *End = ConstantInt::get(Idx->getType(), 401 TrueRangeEnd-FirstTrueElement+1); 402 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End); 403 } 404 405 // False range check. 406 if (FalseRangeEnd != Overdefined) { 407 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare"); 408 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse). 409 if (FirstFalseElement) { 410 Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement); 411 Idx = Builder.CreateAdd(Idx, Offs); 412 } 413 414 Value *End = ConstantInt::get(Idx->getType(), 415 FalseRangeEnd-FirstFalseElement); 416 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End); 417 } 418 419 // If a magic bitvector captures the entire comparison state 420 // of this load, replace it with computation that does: 421 // ((magic_cst >> i) & 1) != 0 422 { 423 Type *Ty = nullptr; 424 425 // Look for an appropriate type: 426 // - The type of Idx if the magic fits 427 // - The smallest fitting legal type 428 if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth()) 429 Ty = Idx->getType(); 430 else 431 Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount); 432 433 if (Ty) { 434 Value *V = Builder.CreateIntCast(Idx, Ty, false); 435 V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V); 436 V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V); 437 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0)); 438 } 439 } 440 441 return nullptr; 442 } 443 444 /// Return a value that can be used to compare the *offset* implied by a GEP to 445 /// zero. For example, if we have &A[i], we want to return 'i' for 446 /// "icmp ne i, 0". Note that, in general, indices can be complex, and scales 447 /// are involved. The above expression would also be legal to codegen as 448 /// "icmp ne (i*4), 0" (assuming A is a pointer to i32). 449 /// This latter form is less amenable to optimization though, and we are allowed 450 /// to generate the first by knowing that pointer arithmetic doesn't overflow. 451 /// 452 /// If we can't emit an optimized form for this expression, this returns null. 453 /// 454 static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC, 455 const DataLayout &DL) { 456 gep_type_iterator GTI = gep_type_begin(GEP); 457 458 // Check to see if this gep only has a single variable index. If so, and if 459 // any constant indices are a multiple of its scale, then we can compute this 460 // in terms of the scale of the variable index. For example, if the GEP 461 // implies an offset of "12 + i*4", then we can codegen this as "3 + i", 462 // because the expression will cross zero at the same point. 463 unsigned i, e = GEP->getNumOperands(); 464 int64_t Offset = 0; 465 for (i = 1; i != e; ++i, ++GTI) { 466 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 467 // Compute the aggregate offset of constant indices. 468 if (CI->isZero()) continue; 469 470 // Handle a struct index, which adds its field offset to the pointer. 471 if (StructType *STy = GTI.getStructTypeOrNull()) { 472 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); 473 } else { 474 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); 475 Offset += Size*CI->getSExtValue(); 476 } 477 } else { 478 // Found our variable index. 479 break; 480 } 481 } 482 483 // If there are no variable indices, we must have a constant offset, just 484 // evaluate it the general way. 485 if (i == e) return nullptr; 486 487 Value *VariableIdx = GEP->getOperand(i); 488 // Determine the scale factor of the variable element. For example, this is 489 // 4 if the variable index is into an array of i32. 490 uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType()); 491 492 // Verify that there are no other variable indices. If so, emit the hard way. 493 for (++i, ++GTI; i != e; ++i, ++GTI) { 494 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i)); 495 if (!CI) return nullptr; 496 497 // Compute the aggregate offset of constant indices. 498 if (CI->isZero()) continue; 499 500 // Handle a struct index, which adds its field offset to the pointer. 501 if (StructType *STy = GTI.getStructTypeOrNull()) { 502 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); 503 } else { 504 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); 505 Offset += Size*CI->getSExtValue(); 506 } 507 } 508 509 // Okay, we know we have a single variable index, which must be a 510 // pointer/array/vector index. If there is no offset, life is simple, return 511 // the index. 512 Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType()); 513 unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth(); 514 if (Offset == 0) { 515 // Cast to intptrty in case a truncation occurs. If an extension is needed, 516 // we don't need to bother extending: the extension won't affect where the 517 // computation crosses zero. 518 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) { 519 VariableIdx = IC.Builder.CreateTrunc(VariableIdx, IntPtrTy); 520 } 521 return VariableIdx; 522 } 523 524 // Otherwise, there is an index. The computation we will do will be modulo 525 // the pointer size, so get it. 526 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth); 527 528 Offset &= PtrSizeMask; 529 VariableScale &= PtrSizeMask; 530 531 // To do this transformation, any constant index must be a multiple of the 532 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i", 533 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a 534 // multiple of the variable scale. 535 int64_t NewOffs = Offset / (int64_t)VariableScale; 536 if (Offset != NewOffs*(int64_t)VariableScale) 537 return nullptr; 538 539 // Okay, we can do this evaluation. Start by converting the index to intptr. 540 if (VariableIdx->getType() != IntPtrTy) 541 VariableIdx = IC.Builder.CreateIntCast(VariableIdx, IntPtrTy, 542 true /*Signed*/); 543 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs); 544 return IC.Builder.CreateAdd(VariableIdx, OffsetVal, "offset"); 545 } 546 547 /// Returns true if we can rewrite Start as a GEP with pointer Base 548 /// and some integer offset. The nodes that need to be re-written 549 /// for this transformation will be added to Explored. 550 static bool canRewriteGEPAsOffset(Value *Start, Value *Base, 551 const DataLayout &DL, 552 SetVector<Value *> &Explored) { 553 SmallVector<Value *, 16> WorkList(1, Start); 554 Explored.insert(Base); 555 556 // The following traversal gives us an order which can be used 557 // when doing the final transformation. Since in the final 558 // transformation we create the PHI replacement instructions first, 559 // we don't have to get them in any particular order. 560 // 561 // However, for other instructions we will have to traverse the 562 // operands of an instruction first, which means that we have to 563 // do a post-order traversal. 564 while (!WorkList.empty()) { 565 SetVector<PHINode *> PHIs; 566 567 while (!WorkList.empty()) { 568 if (Explored.size() >= 100) 569 return false; 570 571 Value *V = WorkList.back(); 572 573 if (Explored.count(V) != 0) { 574 WorkList.pop_back(); 575 continue; 576 } 577 578 if (!isa<IntToPtrInst>(V) && !isa<PtrToIntInst>(V) && 579 !isa<GetElementPtrInst>(V) && !isa<PHINode>(V)) 580 // We've found some value that we can't explore which is different from 581 // the base. Therefore we can't do this transformation. 582 return false; 583 584 if (isa<IntToPtrInst>(V) || isa<PtrToIntInst>(V)) { 585 auto *CI = dyn_cast<CastInst>(V); 586 if (!CI->isNoopCast(DL)) 587 return false; 588 589 if (Explored.count(CI->getOperand(0)) == 0) 590 WorkList.push_back(CI->getOperand(0)); 591 } 592 593 if (auto *GEP = dyn_cast<GEPOperator>(V)) { 594 // We're limiting the GEP to having one index. This will preserve 595 // the original pointer type. We could handle more cases in the 596 // future. 597 if (GEP->getNumIndices() != 1 || !GEP->isInBounds() || 598 GEP->getType() != Start->getType()) 599 return false; 600 601 if (Explored.count(GEP->getOperand(0)) == 0) 602 WorkList.push_back(GEP->getOperand(0)); 603 } 604 605 if (WorkList.back() == V) { 606 WorkList.pop_back(); 607 // We've finished visiting this node, mark it as such. 608 Explored.insert(V); 609 } 610 611 if (auto *PN = dyn_cast<PHINode>(V)) { 612 // We cannot transform PHIs on unsplittable basic blocks. 613 if (isa<CatchSwitchInst>(PN->getParent()->getTerminator())) 614 return false; 615 Explored.insert(PN); 616 PHIs.insert(PN); 617 } 618 } 619 620 // Explore the PHI nodes further. 621 for (auto *PN : PHIs) 622 for (Value *Op : PN->incoming_values()) 623 if (Explored.count(Op) == 0) 624 WorkList.push_back(Op); 625 } 626 627 // Make sure that we can do this. Since we can't insert GEPs in a basic 628 // block before a PHI node, we can't easily do this transformation if 629 // we have PHI node users of transformed instructions. 630 for (Value *Val : Explored) { 631 for (Value *Use : Val->uses()) { 632 633 auto *PHI = dyn_cast<PHINode>(Use); 634 auto *Inst = dyn_cast<Instruction>(Val); 635 636 if (Inst == Base || Inst == PHI || !Inst || !PHI || 637 Explored.count(PHI) == 0) 638 continue; 639 640 if (PHI->getParent() == Inst->getParent()) 641 return false; 642 } 643 } 644 return true; 645 } 646 647 // Sets the appropriate insert point on Builder where we can add 648 // a replacement Instruction for V (if that is possible). 649 static void setInsertionPoint(IRBuilder<> &Builder, Value *V, 650 bool Before = true) { 651 if (auto *PHI = dyn_cast<PHINode>(V)) { 652 Builder.SetInsertPoint(&*PHI->getParent()->getFirstInsertionPt()); 653 return; 654 } 655 if (auto *I = dyn_cast<Instruction>(V)) { 656 if (!Before) 657 I = &*std::next(I->getIterator()); 658 Builder.SetInsertPoint(I); 659 return; 660 } 661 if (auto *A = dyn_cast<Argument>(V)) { 662 // Set the insertion point in the entry block. 663 BasicBlock &Entry = A->getParent()->getEntryBlock(); 664 Builder.SetInsertPoint(&*Entry.getFirstInsertionPt()); 665 return; 666 } 667 // Otherwise, this is a constant and we don't need to set a new 668 // insertion point. 669 assert(isa<Constant>(V) && "Setting insertion point for unknown value!"); 670 } 671 672 /// Returns a re-written value of Start as an indexed GEP using Base as a 673 /// pointer. 674 static Value *rewriteGEPAsOffset(Value *Start, Value *Base, 675 const DataLayout &DL, 676 SetVector<Value *> &Explored) { 677 // Perform all the substitutions. This is a bit tricky because we can 678 // have cycles in our use-def chains. 679 // 1. Create the PHI nodes without any incoming values. 680 // 2. Create all the other values. 681 // 3. Add the edges for the PHI nodes. 682 // 4. Emit GEPs to get the original pointers. 683 // 5. Remove the original instructions. 684 Type *IndexType = IntegerType::get( 685 Base->getContext(), DL.getIndexTypeSizeInBits(Start->getType())); 686 687 DenseMap<Value *, Value *> NewInsts; 688 NewInsts[Base] = ConstantInt::getNullValue(IndexType); 689 690 // Create the new PHI nodes, without adding any incoming values. 691 for (Value *Val : Explored) { 692 if (Val == Base) 693 continue; 694 // Create empty phi nodes. This avoids cyclic dependencies when creating 695 // the remaining instructions. 696 if (auto *PHI = dyn_cast<PHINode>(Val)) 697 NewInsts[PHI] = PHINode::Create(IndexType, PHI->getNumIncomingValues(), 698 PHI->getName() + ".idx", PHI); 699 } 700 IRBuilder<> Builder(Base->getContext()); 701 702 // Create all the other instructions. 703 for (Value *Val : Explored) { 704 705 if (NewInsts.find(Val) != NewInsts.end()) 706 continue; 707 708 if (auto *CI = dyn_cast<CastInst>(Val)) { 709 NewInsts[CI] = NewInsts[CI->getOperand(0)]; 710 continue; 711 } 712 if (auto *GEP = dyn_cast<GEPOperator>(Val)) { 713 Value *Index = NewInsts[GEP->getOperand(1)] ? NewInsts[GEP->getOperand(1)] 714 : GEP->getOperand(1); 715 setInsertionPoint(Builder, GEP); 716 // Indices might need to be sign extended. GEPs will magically do 717 // this, but we need to do it ourselves here. 718 if (Index->getType()->getScalarSizeInBits() != 719 NewInsts[GEP->getOperand(0)]->getType()->getScalarSizeInBits()) { 720 Index = Builder.CreateSExtOrTrunc( 721 Index, NewInsts[GEP->getOperand(0)]->getType(), 722 GEP->getOperand(0)->getName() + ".sext"); 723 } 724 725 auto *Op = NewInsts[GEP->getOperand(0)]; 726 if (isa<ConstantInt>(Op) && cast<ConstantInt>(Op)->isZero()) 727 NewInsts[GEP] = Index; 728 else 729 NewInsts[GEP] = Builder.CreateNSWAdd( 730 Op, Index, GEP->getOperand(0)->getName() + ".add"); 731 continue; 732 } 733 if (isa<PHINode>(Val)) 734 continue; 735 736 llvm_unreachable("Unexpected instruction type"); 737 } 738 739 // Add the incoming values to the PHI nodes. 740 for (Value *Val : Explored) { 741 if (Val == Base) 742 continue; 743 // All the instructions have been created, we can now add edges to the 744 // phi nodes. 745 if (auto *PHI = dyn_cast<PHINode>(Val)) { 746 PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]); 747 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) { 748 Value *NewIncoming = PHI->getIncomingValue(I); 749 750 if (NewInsts.find(NewIncoming) != NewInsts.end()) 751 NewIncoming = NewInsts[NewIncoming]; 752 753 NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I)); 754 } 755 } 756 } 757 758 for (Value *Val : Explored) { 759 if (Val == Base) 760 continue; 761 762 // Depending on the type, for external users we have to emit 763 // a GEP or a GEP + ptrtoint. 764 setInsertionPoint(Builder, Val, false); 765 766 // If required, create an inttoptr instruction for Base. 767 Value *NewBase = Base; 768 if (!Base->getType()->isPointerTy()) 769 NewBase = Builder.CreateBitOrPointerCast(Base, Start->getType(), 770 Start->getName() + "to.ptr"); 771 772 Value *GEP = Builder.CreateInBoundsGEP( 773 Start->getType()->getPointerElementType(), NewBase, 774 makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr"); 775 776 if (!Val->getType()->isPointerTy()) { 777 Value *Cast = Builder.CreatePointerCast(GEP, Val->getType(), 778 Val->getName() + ".conv"); 779 GEP = Cast; 780 } 781 Val->replaceAllUsesWith(GEP); 782 } 783 784 return NewInsts[Start]; 785 } 786 787 /// Looks through GEPs, IntToPtrInsts and PtrToIntInsts in order to express 788 /// the input Value as a constant indexed GEP. Returns a pair containing 789 /// the GEPs Pointer and Index. 790 static std::pair<Value *, Value *> 791 getAsConstantIndexedAddress(Value *V, const DataLayout &DL) { 792 Type *IndexType = IntegerType::get(V->getContext(), 793 DL.getIndexTypeSizeInBits(V->getType())); 794 795 Constant *Index = ConstantInt::getNullValue(IndexType); 796 while (true) { 797 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 798 // We accept only inbouds GEPs here to exclude the possibility of 799 // overflow. 800 if (!GEP->isInBounds()) 801 break; 802 if (GEP->hasAllConstantIndices() && GEP->getNumIndices() == 1 && 803 GEP->getType() == V->getType()) { 804 V = GEP->getOperand(0); 805 Constant *GEPIndex = static_cast<Constant *>(GEP->getOperand(1)); 806 Index = ConstantExpr::getAdd( 807 Index, ConstantExpr::getSExtOrBitCast(GEPIndex, IndexType)); 808 continue; 809 } 810 break; 811 } 812 if (auto *CI = dyn_cast<IntToPtrInst>(V)) { 813 if (!CI->isNoopCast(DL)) 814 break; 815 V = CI->getOperand(0); 816 continue; 817 } 818 if (auto *CI = dyn_cast<PtrToIntInst>(V)) { 819 if (!CI->isNoopCast(DL)) 820 break; 821 V = CI->getOperand(0); 822 continue; 823 } 824 break; 825 } 826 return {V, Index}; 827 } 828 829 /// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant. 830 /// We can look through PHIs, GEPs and casts in order to determine a common base 831 /// between GEPLHS and RHS. 832 static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS, 833 ICmpInst::Predicate Cond, 834 const DataLayout &DL) { 835 if (!GEPLHS->hasAllConstantIndices()) 836 return nullptr; 837 838 // Make sure the pointers have the same type. 839 if (GEPLHS->getType() != RHS->getType()) 840 return nullptr; 841 842 Value *PtrBase, *Index; 843 std::tie(PtrBase, Index) = getAsConstantIndexedAddress(GEPLHS, DL); 844 845 // The set of nodes that will take part in this transformation. 846 SetVector<Value *> Nodes; 847 848 if (!canRewriteGEPAsOffset(RHS, PtrBase, DL, Nodes)) 849 return nullptr; 850 851 // We know we can re-write this as 852 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) 853 // Since we've only looked through inbouds GEPs we know that we 854 // can't have overflow on either side. We can therefore re-write 855 // this as: 856 // OFFSET1 cmp OFFSET2 857 Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, DL, Nodes); 858 859 // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written 860 // GEP having PtrBase as the pointer base, and has returned in NewRHS the 861 // offset. Since Index is the offset of LHS to the base pointer, we will now 862 // compare the offsets instead of comparing the pointers. 863 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Index, NewRHS); 864 } 865 866 /// Fold comparisons between a GEP instruction and something else. At this point 867 /// we know that the GEP is on the LHS of the comparison. 868 Instruction *InstCombiner::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, 869 ICmpInst::Predicate Cond, 870 Instruction &I) { 871 // Don't transform signed compares of GEPs into index compares. Even if the 872 // GEP is inbounds, the final add of the base pointer can have signed overflow 873 // and would change the result of the icmp. 874 // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be 875 // the maximum signed value for the pointer type. 876 if (ICmpInst::isSigned(Cond)) 877 return nullptr; 878 879 // Look through bitcasts and addrspacecasts. We do not however want to remove 880 // 0 GEPs. 881 if (!isa<GetElementPtrInst>(RHS)) 882 RHS = RHS->stripPointerCasts(); 883 884 Value *PtrBase = GEPLHS->getOperand(0); 885 if (PtrBase == RHS && GEPLHS->isInBounds()) { 886 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0). 887 // This transformation (ignoring the base and scales) is valid because we 888 // know pointers can't overflow since the gep is inbounds. See if we can 889 // output an optimized form. 890 Value *Offset = evaluateGEPOffsetExpression(GEPLHS, *this, DL); 891 892 // If not, synthesize the offset the hard way. 893 if (!Offset) 894 Offset = EmitGEPOffset(GEPLHS); 895 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset, 896 Constant::getNullValue(Offset->getType())); 897 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) { 898 // If the base pointers are different, but the indices are the same, just 899 // compare the base pointer. 900 if (PtrBase != GEPRHS->getOperand(0)) { 901 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands(); 902 IndicesTheSame &= GEPLHS->getOperand(0)->getType() == 903 GEPRHS->getOperand(0)->getType(); 904 if (IndicesTheSame) 905 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i) 906 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) { 907 IndicesTheSame = false; 908 break; 909 } 910 911 // If all indices are the same, just compare the base pointers. 912 if (IndicesTheSame) 913 return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0)); 914 915 // If we're comparing GEPs with two base pointers that only differ in type 916 // and both GEPs have only constant indices or just one use, then fold 917 // the compare with the adjusted indices. 918 if (GEPLHS->isInBounds() && GEPRHS->isInBounds() && 919 (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) && 920 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) && 921 PtrBase->stripPointerCasts() == 922 GEPRHS->getOperand(0)->stripPointerCasts()) { 923 Value *LOffset = EmitGEPOffset(GEPLHS); 924 Value *ROffset = EmitGEPOffset(GEPRHS); 925 926 // If we looked through an addrspacecast between different sized address 927 // spaces, the LHS and RHS pointers are different sized 928 // integers. Truncate to the smaller one. 929 Type *LHSIndexTy = LOffset->getType(); 930 Type *RHSIndexTy = ROffset->getType(); 931 if (LHSIndexTy != RHSIndexTy) { 932 if (LHSIndexTy->getPrimitiveSizeInBits() < 933 RHSIndexTy->getPrimitiveSizeInBits()) { 934 ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy); 935 } else 936 LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy); 937 } 938 939 Value *Cmp = Builder.CreateICmp(ICmpInst::getSignedPredicate(Cond), 940 LOffset, ROffset); 941 return replaceInstUsesWith(I, Cmp); 942 } 943 944 // Otherwise, the base pointers are different and the indices are 945 // different. Try convert this to an indexed compare by looking through 946 // PHIs/casts. 947 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL); 948 } 949 950 // If one of the GEPs has all zero indices, recurse. 951 if (GEPLHS->hasAllZeroIndices()) 952 return foldGEPICmp(GEPRHS, GEPLHS->getOperand(0), 953 ICmpInst::getSwappedPredicate(Cond), I); 954 955 // If the other GEP has all zero indices, recurse. 956 if (GEPRHS->hasAllZeroIndices()) 957 return foldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I); 958 959 bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds(); 960 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) { 961 // If the GEPs only differ by one index, compare it. 962 unsigned NumDifferences = 0; // Keep track of # differences. 963 unsigned DiffOperand = 0; // The operand that differs. 964 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i) 965 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) { 966 if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() != 967 GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) { 968 // Irreconcilable differences. 969 NumDifferences = 2; 970 break; 971 } else { 972 if (NumDifferences++) break; 973 DiffOperand = i; 974 } 975 } 976 977 if (NumDifferences == 0) // SAME GEP? 978 return replaceInstUsesWith(I, // No comparison is needed here. 979 Builder.getInt1(ICmpInst::isTrueWhenEqual(Cond))); 980 981 else if (NumDifferences == 1 && GEPsInBounds) { 982 Value *LHSV = GEPLHS->getOperand(DiffOperand); 983 Value *RHSV = GEPRHS->getOperand(DiffOperand); 984 // Make sure we do a signed comparison here. 985 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV); 986 } 987 } 988 989 // Only lower this if the icmp is the only user of the GEP or if we expect 990 // the result to fold to a constant! 991 if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) && 992 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) { 993 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2) 994 Value *L = EmitGEPOffset(GEPLHS); 995 Value *R = EmitGEPOffset(GEPRHS); 996 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R); 997 } 998 } 999 1000 // Try convert this to an indexed compare by looking through PHIs/casts as a 1001 // last resort. 1002 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL); 1003 } 1004 1005 Instruction *InstCombiner::foldAllocaCmp(ICmpInst &ICI, 1006 const AllocaInst *Alloca, 1007 const Value *Other) { 1008 assert(ICI.isEquality() && "Cannot fold non-equality comparison."); 1009 1010 // It would be tempting to fold away comparisons between allocas and any 1011 // pointer not based on that alloca (e.g. an argument). However, even 1012 // though such pointers cannot alias, they can still compare equal. 1013 // 1014 // But LLVM doesn't specify where allocas get their memory, so if the alloca 1015 // doesn't escape we can argue that it's impossible to guess its value, and we 1016 // can therefore act as if any such guesses are wrong. 1017 // 1018 // The code below checks that the alloca doesn't escape, and that it's only 1019 // used in a comparison once (the current instruction). The 1020 // single-comparison-use condition ensures that we're trivially folding all 1021 // comparisons against the alloca consistently, and avoids the risk of 1022 // erroneously folding a comparison of the pointer with itself. 1023 1024 unsigned MaxIter = 32; // Break cycles and bound to constant-time. 1025 1026 SmallVector<const Use *, 32> Worklist; 1027 for (const Use &U : Alloca->uses()) { 1028 if (Worklist.size() >= MaxIter) 1029 return nullptr; 1030 Worklist.push_back(&U); 1031 } 1032 1033 unsigned NumCmps = 0; 1034 while (!Worklist.empty()) { 1035 assert(Worklist.size() <= MaxIter); 1036 const Use *U = Worklist.pop_back_val(); 1037 const Value *V = U->getUser(); 1038 --MaxIter; 1039 1040 if (isa<BitCastInst>(V) || isa<GetElementPtrInst>(V) || isa<PHINode>(V) || 1041 isa<SelectInst>(V)) { 1042 // Track the uses. 1043 } else if (isa<LoadInst>(V)) { 1044 // Loading from the pointer doesn't escape it. 1045 continue; 1046 } else if (const auto *SI = dyn_cast<StoreInst>(V)) { 1047 // Storing *to* the pointer is fine, but storing the pointer escapes it. 1048 if (SI->getValueOperand() == U->get()) 1049 return nullptr; 1050 continue; 1051 } else if (isa<ICmpInst>(V)) { 1052 if (NumCmps++) 1053 return nullptr; // Found more than one cmp. 1054 continue; 1055 } else if (const auto *Intrin = dyn_cast<IntrinsicInst>(V)) { 1056 switch (Intrin->getIntrinsicID()) { 1057 // These intrinsics don't escape or compare the pointer. Memset is safe 1058 // because we don't allow ptrtoint. Memcpy and memmove are safe because 1059 // we don't allow stores, so src cannot point to V. 1060 case Intrinsic::lifetime_start: case Intrinsic::lifetime_end: 1061 case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset: 1062 continue; 1063 default: 1064 return nullptr; 1065 } 1066 } else { 1067 return nullptr; 1068 } 1069 for (const Use &U : V->uses()) { 1070 if (Worklist.size() >= MaxIter) 1071 return nullptr; 1072 Worklist.push_back(&U); 1073 } 1074 } 1075 1076 Type *CmpTy = CmpInst::makeCmpResultType(Other->getType()); 1077 return replaceInstUsesWith( 1078 ICI, 1079 ConstantInt::get(CmpTy, !CmpInst::isTrueWhenEqual(ICI.getPredicate()))); 1080 } 1081 1082 /// Fold "icmp pred (X+C), X". 1083 Instruction *InstCombiner::foldICmpAddOpConst(Value *X, const APInt &C, 1084 ICmpInst::Predicate Pred) { 1085 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0, 1086 // so the values can never be equal. Similarly for all other "or equals" 1087 // operators. 1088 assert(!!C && "C should not be zero!"); 1089 1090 // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255 1091 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253 1092 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0 1093 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 1094 Constant *R = ConstantInt::get(X->getType(), 1095 APInt::getMaxValue(C.getBitWidth()) - C); 1096 return new ICmpInst(ICmpInst::ICMP_UGT, X, R); 1097 } 1098 1099 // (X+1) >u X --> X <u (0-1) --> X != 255 1100 // (X+2) >u X --> X <u (0-2) --> X <u 254 1101 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0 1102 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 1103 return new ICmpInst(ICmpInst::ICMP_ULT, X, 1104 ConstantInt::get(X->getType(), -C)); 1105 1106 APInt SMax = APInt::getSignedMaxValue(C.getBitWidth()); 1107 1108 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127 1109 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125 1110 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0 1111 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1 1112 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126 1113 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127 1114 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 1115 return new ICmpInst(ICmpInst::ICMP_SGT, X, 1116 ConstantInt::get(X->getType(), SMax - C)); 1117 1118 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127 1119 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126 1120 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1 1121 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2 1122 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126 1123 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128 1124 1125 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE); 1126 return new ICmpInst(ICmpInst::ICMP_SLT, X, 1127 ConstantInt::get(X->getType(), SMax - (C - 1))); 1128 } 1129 1130 /// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" -> 1131 /// (icmp eq/ne A, Log2(AP2/AP1)) -> 1132 /// (icmp eq/ne A, Log2(AP2) - Log2(AP1)). 1133 Instruction *InstCombiner::foldICmpShrConstConst(ICmpInst &I, Value *A, 1134 const APInt &AP1, 1135 const APInt &AP2) { 1136 assert(I.isEquality() && "Cannot fold icmp gt/lt"); 1137 1138 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) { 1139 if (I.getPredicate() == I.ICMP_NE) 1140 Pred = CmpInst::getInversePredicate(Pred); 1141 return new ICmpInst(Pred, LHS, RHS); 1142 }; 1143 1144 // Don't bother doing any work for cases which InstSimplify handles. 1145 if (AP2.isNullValue()) 1146 return nullptr; 1147 1148 bool IsAShr = isa<AShrOperator>(I.getOperand(0)); 1149 if (IsAShr) { 1150 if (AP2.isAllOnesValue()) 1151 return nullptr; 1152 if (AP2.isNegative() != AP1.isNegative()) 1153 return nullptr; 1154 if (AP2.sgt(AP1)) 1155 return nullptr; 1156 } 1157 1158 if (!AP1) 1159 // 'A' must be large enough to shift out the highest set bit. 1160 return getICmp(I.ICMP_UGT, A, 1161 ConstantInt::get(A->getType(), AP2.logBase2())); 1162 1163 if (AP1 == AP2) 1164 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType())); 1165 1166 int Shift; 1167 if (IsAShr && AP1.isNegative()) 1168 Shift = AP1.countLeadingOnes() - AP2.countLeadingOnes(); 1169 else 1170 Shift = AP1.countLeadingZeros() - AP2.countLeadingZeros(); 1171 1172 if (Shift > 0) { 1173 if (IsAShr && AP1 == AP2.ashr(Shift)) { 1174 // There are multiple solutions if we are comparing against -1 and the LHS 1175 // of the ashr is not a power of two. 1176 if (AP1.isAllOnesValue() && !AP2.isPowerOf2()) 1177 return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift)); 1178 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift)); 1179 } else if (AP1 == AP2.lshr(Shift)) { 1180 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift)); 1181 } 1182 } 1183 1184 // Shifting const2 will never be equal to const1. 1185 // FIXME: This should always be handled by InstSimplify? 1186 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE); 1187 return replaceInstUsesWith(I, TorF); 1188 } 1189 1190 /// Handle "(icmp eq/ne (shl AP2, A), AP1)" -> 1191 /// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)). 1192 Instruction *InstCombiner::foldICmpShlConstConst(ICmpInst &I, Value *A, 1193 const APInt &AP1, 1194 const APInt &AP2) { 1195 assert(I.isEquality() && "Cannot fold icmp gt/lt"); 1196 1197 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) { 1198 if (I.getPredicate() == I.ICMP_NE) 1199 Pred = CmpInst::getInversePredicate(Pred); 1200 return new ICmpInst(Pred, LHS, RHS); 1201 }; 1202 1203 // Don't bother doing any work for cases which InstSimplify handles. 1204 if (AP2.isNullValue()) 1205 return nullptr; 1206 1207 unsigned AP2TrailingZeros = AP2.countTrailingZeros(); 1208 1209 if (!AP1 && AP2TrailingZeros != 0) 1210 return getICmp( 1211 I.ICMP_UGE, A, 1212 ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros)); 1213 1214 if (AP1 == AP2) 1215 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType())); 1216 1217 // Get the distance between the lowest bits that are set. 1218 int Shift = AP1.countTrailingZeros() - AP2TrailingZeros; 1219 1220 if (Shift > 0 && AP2.shl(Shift) == AP1) 1221 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift)); 1222 1223 // Shifting const2 will never be equal to const1. 1224 // FIXME: This should always be handled by InstSimplify? 1225 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE); 1226 return replaceInstUsesWith(I, TorF); 1227 } 1228 1229 /// The caller has matched a pattern of the form: 1230 /// I = icmp ugt (add (add A, B), CI2), CI1 1231 /// If this is of the form: 1232 /// sum = a + b 1233 /// if (sum+128 >u 255) 1234 /// Then replace it with llvm.sadd.with.overflow.i8. 1235 /// 1236 static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B, 1237 ConstantInt *CI2, ConstantInt *CI1, 1238 InstCombiner &IC) { 1239 // The transformation we're trying to do here is to transform this into an 1240 // llvm.sadd.with.overflow. To do this, we have to replace the original add 1241 // with a narrower add, and discard the add-with-constant that is part of the 1242 // range check (if we can't eliminate it, this isn't profitable). 1243 1244 // In order to eliminate the add-with-constant, the compare can be its only 1245 // use. 1246 Instruction *AddWithCst = cast<Instruction>(I.getOperand(0)); 1247 if (!AddWithCst->hasOneUse()) 1248 return nullptr; 1249 1250 // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow. 1251 if (!CI2->getValue().isPowerOf2()) 1252 return nullptr; 1253 unsigned NewWidth = CI2->getValue().countTrailingZeros(); 1254 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31) 1255 return nullptr; 1256 1257 // The width of the new add formed is 1 more than the bias. 1258 ++NewWidth; 1259 1260 // Check to see that CI1 is an all-ones value with NewWidth bits. 1261 if (CI1->getBitWidth() == NewWidth || 1262 CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth)) 1263 return nullptr; 1264 1265 // This is only really a signed overflow check if the inputs have been 1266 // sign-extended; check for that condition. For example, if CI2 is 2^31 and 1267 // the operands of the add are 64 bits wide, we need at least 33 sign bits. 1268 unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1; 1269 if (IC.ComputeNumSignBits(A, 0, &I) < NeededSignBits || 1270 IC.ComputeNumSignBits(B, 0, &I) < NeededSignBits) 1271 return nullptr; 1272 1273 // In order to replace the original add with a narrower 1274 // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant 1275 // and truncates that discard the high bits of the add. Verify that this is 1276 // the case. 1277 Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0)); 1278 for (User *U : OrigAdd->users()) { 1279 if (U == AddWithCst) 1280 continue; 1281 1282 // Only accept truncates for now. We would really like a nice recursive 1283 // predicate like SimplifyDemandedBits, but which goes downwards the use-def 1284 // chain to see which bits of a value are actually demanded. If the 1285 // original add had another add which was then immediately truncated, we 1286 // could still do the transformation. 1287 TruncInst *TI = dyn_cast<TruncInst>(U); 1288 if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth) 1289 return nullptr; 1290 } 1291 1292 // If the pattern matches, truncate the inputs to the narrower type and 1293 // use the sadd_with_overflow intrinsic to efficiently compute both the 1294 // result and the overflow bit. 1295 Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth); 1296 Value *F = Intrinsic::getDeclaration(I.getModule(), 1297 Intrinsic::sadd_with_overflow, NewType); 1298 1299 InstCombiner::BuilderTy &Builder = IC.Builder; 1300 1301 // Put the new code above the original add, in case there are any uses of the 1302 // add between the add and the compare. 1303 Builder.SetInsertPoint(OrigAdd); 1304 1305 Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc"); 1306 Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc"); 1307 CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd"); 1308 Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result"); 1309 Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType()); 1310 1311 // The inner add was the result of the narrow add, zero extended to the 1312 // wider type. Replace it with the result computed by the intrinsic. 1313 IC.replaceInstUsesWith(*OrigAdd, ZExt); 1314 1315 // The original icmp gets replaced with the overflow value. 1316 return ExtractValueInst::Create(Call, 1, "sadd.overflow"); 1317 } 1318 1319 // Handle (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0) 1320 Instruction *InstCombiner::foldICmpWithZero(ICmpInst &Cmp) { 1321 CmpInst::Predicate Pred = Cmp.getPredicate(); 1322 Value *X = Cmp.getOperand(0); 1323 1324 if (match(Cmp.getOperand(1), m_Zero()) && Pred == ICmpInst::ICMP_SGT) { 1325 Value *A, *B; 1326 SelectPatternResult SPR = matchSelectPattern(X, A, B); 1327 if (SPR.Flavor == SPF_SMIN) { 1328 if (isKnownPositive(A, DL, 0, &AC, &Cmp, &DT)) 1329 return new ICmpInst(Pred, B, Cmp.getOperand(1)); 1330 if (isKnownPositive(B, DL, 0, &AC, &Cmp, &DT)) 1331 return new ICmpInst(Pred, A, Cmp.getOperand(1)); 1332 } 1333 } 1334 return nullptr; 1335 } 1336 1337 // Fold icmp Pred X, C. 1338 Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &Cmp) { 1339 CmpInst::Predicate Pred = Cmp.getPredicate(); 1340 Value *X = Cmp.getOperand(0); 1341 1342 const APInt *C; 1343 if (!match(Cmp.getOperand(1), m_APInt(C))) 1344 return nullptr; 1345 1346 Value *A = nullptr, *B = nullptr; 1347 1348 // Match the following pattern, which is a common idiom when writing 1349 // overflow-safe integer arithmetic functions. The source performs an addition 1350 // in wider type and explicitly checks for overflow using comparisons against 1351 // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic. 1352 // 1353 // TODO: This could probably be generalized to handle other overflow-safe 1354 // operations if we worked out the formulas to compute the appropriate magic 1355 // constants. 1356 // 1357 // sum = a + b 1358 // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8 1359 { 1360 ConstantInt *CI2; // I = icmp ugt (add (add A, B), CI2), CI 1361 if (Pred == ICmpInst::ICMP_UGT && 1362 match(X, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2)))) 1363 if (Instruction *Res = processUGT_ADDCST_ADD( 1364 Cmp, A, B, CI2, cast<ConstantInt>(Cmp.getOperand(1)), *this)) 1365 return Res; 1366 } 1367 1368 // FIXME: Use m_APInt to allow folds for splat constants. 1369 ConstantInt *CI = dyn_cast<ConstantInt>(Cmp.getOperand(1)); 1370 if (!CI) 1371 return nullptr; 1372 1373 // Canonicalize icmp instructions based on dominating conditions. 1374 BasicBlock *Parent = Cmp.getParent(); 1375 BasicBlock *Dom = Parent->getSinglePredecessor(); 1376 auto *BI = Dom ? dyn_cast<BranchInst>(Dom->getTerminator()) : nullptr; 1377 ICmpInst::Predicate Pred2; 1378 BasicBlock *TrueBB, *FalseBB; 1379 ConstantInt *CI2; 1380 if (BI && match(BI, m_Br(m_ICmp(Pred2, m_Specific(X), m_ConstantInt(CI2)), 1381 TrueBB, FalseBB)) && 1382 TrueBB != FalseBB) { 1383 ConstantRange CR = 1384 ConstantRange::makeAllowedICmpRegion(Pred, CI->getValue()); 1385 ConstantRange DominatingCR = 1386 (Parent == TrueBB) 1387 ? ConstantRange::makeExactICmpRegion(Pred2, CI2->getValue()) 1388 : ConstantRange::makeExactICmpRegion( 1389 CmpInst::getInversePredicate(Pred2), CI2->getValue()); 1390 ConstantRange Intersection = DominatingCR.intersectWith(CR); 1391 ConstantRange Difference = DominatingCR.difference(CR); 1392 if (Intersection.isEmptySet()) 1393 return replaceInstUsesWith(Cmp, Builder.getFalse()); 1394 if (Difference.isEmptySet()) 1395 return replaceInstUsesWith(Cmp, Builder.getTrue()); 1396 1397 // If this is a normal comparison, it demands all bits. If it is a sign 1398 // bit comparison, it only demands the sign bit. 1399 bool UnusedBit; 1400 bool IsSignBit = isSignBitCheck(Pred, CI->getValue(), UnusedBit); 1401 1402 // Canonicalizing a sign bit comparison that gets used in a branch, 1403 // pessimizes codegen by generating branch on zero instruction instead 1404 // of a test and branch. So we avoid canonicalizing in such situations 1405 // because test and branch instruction has better branch displacement 1406 // than compare and branch instruction. 1407 if (Cmp.isEquality() || (IsSignBit && hasBranchUse(Cmp))) 1408 return nullptr; 1409 1410 if (auto *AI = Intersection.getSingleElement()) 1411 return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*AI)); 1412 if (auto *AD = Difference.getSingleElement()) 1413 return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*AD)); 1414 } 1415 1416 return nullptr; 1417 } 1418 1419 /// Fold icmp (trunc X, Y), C. 1420 Instruction *InstCombiner::foldICmpTruncConstant(ICmpInst &Cmp, 1421 TruncInst *Trunc, 1422 const APInt &C) { 1423 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1424 Value *X = Trunc->getOperand(0); 1425 if (C.isOneValue() && C.getBitWidth() > 1) { 1426 // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1 1427 Value *V = nullptr; 1428 if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V)))) 1429 return new ICmpInst(ICmpInst::ICMP_SLT, V, 1430 ConstantInt::get(V->getType(), 1)); 1431 } 1432 1433 if (Cmp.isEquality() && Trunc->hasOneUse()) { 1434 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all 1435 // of the high bits truncated out of x are known. 1436 unsigned DstBits = Trunc->getType()->getScalarSizeInBits(), 1437 SrcBits = X->getType()->getScalarSizeInBits(); 1438 KnownBits Known = computeKnownBits(X, 0, &Cmp); 1439 1440 // If all the high bits are known, we can do this xform. 1441 if ((Known.Zero | Known.One).countLeadingOnes() >= SrcBits - DstBits) { 1442 // Pull in the high bits from known-ones set. 1443 APInt NewRHS = C.zext(SrcBits); 1444 NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits); 1445 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), NewRHS)); 1446 } 1447 } 1448 1449 return nullptr; 1450 } 1451 1452 /// Fold icmp (xor X, Y), C. 1453 Instruction *InstCombiner::foldICmpXorConstant(ICmpInst &Cmp, 1454 BinaryOperator *Xor, 1455 const APInt &C) { 1456 Value *X = Xor->getOperand(0); 1457 Value *Y = Xor->getOperand(1); 1458 const APInt *XorC; 1459 if (!match(Y, m_APInt(XorC))) 1460 return nullptr; 1461 1462 // If this is a comparison that tests the signbit (X < 0) or (x > -1), 1463 // fold the xor. 1464 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1465 bool TrueIfSigned = false; 1466 if (isSignBitCheck(Cmp.getPredicate(), C, TrueIfSigned)) { 1467 1468 // If the sign bit of the XorCst is not set, there is no change to 1469 // the operation, just stop using the Xor. 1470 if (!XorC->isNegative()) { 1471 Cmp.setOperand(0, X); 1472 Worklist.Add(Xor); 1473 return &Cmp; 1474 } 1475 1476 // Emit the opposite comparison. 1477 if (TrueIfSigned) 1478 return new ICmpInst(ICmpInst::ICMP_SGT, X, 1479 ConstantInt::getAllOnesValue(X->getType())); 1480 else 1481 return new ICmpInst(ICmpInst::ICMP_SLT, X, 1482 ConstantInt::getNullValue(X->getType())); 1483 } 1484 1485 if (Xor->hasOneUse()) { 1486 // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask)) 1487 if (!Cmp.isEquality() && XorC->isSignMask()) { 1488 Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate() 1489 : Cmp.getSignedPredicate(); 1490 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC)); 1491 } 1492 1493 // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask)) 1494 if (!Cmp.isEquality() && XorC->isMaxSignedValue()) { 1495 Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate() 1496 : Cmp.getSignedPredicate(); 1497 Pred = Cmp.getSwappedPredicate(Pred); 1498 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC)); 1499 } 1500 } 1501 1502 // (icmp ugt (xor X, C), ~C) -> (icmp ult X, C) 1503 // iff -C is a power of 2 1504 if (Pred == ICmpInst::ICMP_UGT && *XorC == ~C && (C + 1).isPowerOf2()) 1505 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y); 1506 1507 // (icmp ult (xor X, C), -C) -> (icmp uge X, C) 1508 // iff -C is a power of 2 1509 if (Pred == ICmpInst::ICMP_ULT && *XorC == -C && C.isPowerOf2()) 1510 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y); 1511 1512 return nullptr; 1513 } 1514 1515 /// Fold icmp (and (sh X, Y), C2), C1. 1516 Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, 1517 const APInt &C1, const APInt &C2) { 1518 BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0)); 1519 if (!Shift || !Shift->isShift()) 1520 return nullptr; 1521 1522 // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could 1523 // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in 1524 // code produced by the clang front-end, for bitfield access. 1525 // This seemingly simple opportunity to fold away a shift turns out to be 1526 // rather complicated. See PR17827 for details. 1527 unsigned ShiftOpcode = Shift->getOpcode(); 1528 bool IsShl = ShiftOpcode == Instruction::Shl; 1529 const APInt *C3; 1530 if (match(Shift->getOperand(1), m_APInt(C3))) { 1531 bool CanFold = false; 1532 if (ShiftOpcode == Instruction::Shl) { 1533 // For a left shift, we can fold if the comparison is not signed. We can 1534 // also fold a signed comparison if the mask value and comparison value 1535 // are not negative. These constraints may not be obvious, but we can 1536 // prove that they are correct using an SMT solver. 1537 if (!Cmp.isSigned() || (!C2.isNegative() && !C1.isNegative())) 1538 CanFold = true; 1539 } else { 1540 bool IsAshr = ShiftOpcode == Instruction::AShr; 1541 // For a logical right shift, we can fold if the comparison is not signed. 1542 // We can also fold a signed comparison if the shifted mask value and the 1543 // shifted comparison value are not negative. These constraints may not be 1544 // obvious, but we can prove that they are correct using an SMT solver. 1545 // For an arithmetic shift right we can do the same, if we ensure 1546 // the And doesn't use any bits being shifted in. Normally these would 1547 // be turned into lshr by SimplifyDemandedBits, but not if there is an 1548 // additional user. 1549 if (!IsAshr || (C2.shl(*C3).lshr(*C3) == C2)) { 1550 if (!Cmp.isSigned() || 1551 (!C2.shl(*C3).isNegative() && !C1.shl(*C3).isNegative())) 1552 CanFold = true; 1553 } 1554 } 1555 1556 if (CanFold) { 1557 APInt NewCst = IsShl ? C1.lshr(*C3) : C1.shl(*C3); 1558 APInt SameAsC1 = IsShl ? NewCst.shl(*C3) : NewCst.lshr(*C3); 1559 // Check to see if we are shifting out any of the bits being compared. 1560 if (SameAsC1 != C1) { 1561 // If we shifted bits out, the fold is not going to work out. As a 1562 // special case, check to see if this means that the result is always 1563 // true or false now. 1564 if (Cmp.getPredicate() == ICmpInst::ICMP_EQ) 1565 return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType())); 1566 if (Cmp.getPredicate() == ICmpInst::ICMP_NE) 1567 return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType())); 1568 } else { 1569 Cmp.setOperand(1, ConstantInt::get(And->getType(), NewCst)); 1570 APInt NewAndCst = IsShl ? C2.lshr(*C3) : C2.shl(*C3); 1571 And->setOperand(1, ConstantInt::get(And->getType(), NewAndCst)); 1572 And->setOperand(0, Shift->getOperand(0)); 1573 Worklist.Add(Shift); // Shift is dead. 1574 return &Cmp; 1575 } 1576 } 1577 } 1578 1579 // Turn ((X >> Y) & C2) == 0 into (X & (C2 << Y)) == 0. The latter is 1580 // preferable because it allows the C2 << Y expression to be hoisted out of a 1581 // loop if Y is invariant and X is not. 1582 if (Shift->hasOneUse() && C1.isNullValue() && Cmp.isEquality() && 1583 !Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) { 1584 // Compute C2 << Y. 1585 Value *NewShift = 1586 IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1)) 1587 : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1)); 1588 1589 // Compute X & (C2 << Y). 1590 Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift); 1591 Cmp.setOperand(0, NewAnd); 1592 return &Cmp; 1593 } 1594 1595 return nullptr; 1596 } 1597 1598 /// Fold icmp (and X, C2), C1. 1599 Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp, 1600 BinaryOperator *And, 1601 const APInt &C1) { 1602 const APInt *C2; 1603 if (!match(And->getOperand(1), m_APInt(C2))) 1604 return nullptr; 1605 1606 if (!And->hasOneUse()) 1607 return nullptr; 1608 1609 // If the LHS is an 'and' of a truncate and we can widen the and/compare to 1610 // the input width without changing the value produced, eliminate the cast: 1611 // 1612 // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1' 1613 // 1614 // We can do this transformation if the constants do not have their sign bits 1615 // set or if it is an equality comparison. Extending a relational comparison 1616 // when we're checking the sign bit would not work. 1617 Value *W; 1618 if (match(And->getOperand(0), m_OneUse(m_Trunc(m_Value(W)))) && 1619 (Cmp.isEquality() || (!C1.isNegative() && !C2->isNegative()))) { 1620 // TODO: Is this a good transform for vectors? Wider types may reduce 1621 // throughput. Should this transform be limited (even for scalars) by using 1622 // shouldChangeType()? 1623 if (!Cmp.getType()->isVectorTy()) { 1624 Type *WideType = W->getType(); 1625 unsigned WideScalarBits = WideType->getScalarSizeInBits(); 1626 Constant *ZextC1 = ConstantInt::get(WideType, C1.zext(WideScalarBits)); 1627 Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits)); 1628 Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName()); 1629 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1); 1630 } 1631 } 1632 1633 if (Instruction *I = foldICmpAndShift(Cmp, And, C1, *C2)) 1634 return I; 1635 1636 // (icmp pred (and (or (lshr A, B), A), 1), 0) --> 1637 // (icmp pred (and A, (or (shl 1, B), 1), 0)) 1638 // 1639 // iff pred isn't signed 1640 if (!Cmp.isSigned() && C1.isNullValue() && And->getOperand(0)->hasOneUse() && 1641 match(And->getOperand(1), m_One())) { 1642 Constant *One = cast<Constant>(And->getOperand(1)); 1643 Value *Or = And->getOperand(0); 1644 Value *A, *B, *LShr; 1645 if (match(Or, m_Or(m_Value(LShr), m_Value(A))) && 1646 match(LShr, m_LShr(m_Specific(A), m_Value(B)))) { 1647 unsigned UsesRemoved = 0; 1648 if (And->hasOneUse()) 1649 ++UsesRemoved; 1650 if (Or->hasOneUse()) 1651 ++UsesRemoved; 1652 if (LShr->hasOneUse()) 1653 ++UsesRemoved; 1654 1655 // Compute A & ((1 << B) | 1) 1656 Value *NewOr = nullptr; 1657 if (auto *C = dyn_cast<Constant>(B)) { 1658 if (UsesRemoved >= 1) 1659 NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One); 1660 } else { 1661 if (UsesRemoved >= 3) 1662 NewOr = Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(), 1663 /*HasNUW=*/true), 1664 One, Or->getName()); 1665 } 1666 if (NewOr) { 1667 Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName()); 1668 Cmp.setOperand(0, NewAnd); 1669 return &Cmp; 1670 } 1671 } 1672 } 1673 1674 return nullptr; 1675 } 1676 1677 /// Fold icmp (and X, Y), C. 1678 Instruction *InstCombiner::foldICmpAndConstant(ICmpInst &Cmp, 1679 BinaryOperator *And, 1680 const APInt &C) { 1681 if (Instruction *I = foldICmpAndConstConst(Cmp, And, C)) 1682 return I; 1683 1684 // TODO: These all require that Y is constant too, so refactor with the above. 1685 1686 // Try to optimize things like "A[i] & 42 == 0" to index computations. 1687 Value *X = And->getOperand(0); 1688 Value *Y = And->getOperand(1); 1689 if (auto *LI = dyn_cast<LoadInst>(X)) 1690 if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0))) 1691 if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 1692 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 1693 !LI->isVolatile() && isa<ConstantInt>(Y)) { 1694 ConstantInt *C2 = cast<ConstantInt>(Y); 1695 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, Cmp, C2)) 1696 return Res; 1697 } 1698 1699 if (!Cmp.isEquality()) 1700 return nullptr; 1701 1702 // X & -C == -C -> X > u ~C 1703 // X & -C != -C -> X <= u ~C 1704 // iff C is a power of 2 1705 if (Cmp.getOperand(1) == Y && (-C).isPowerOf2()) { 1706 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGT 1707 : CmpInst::ICMP_ULE; 1708 return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1)))); 1709 } 1710 1711 // (X & C2) == 0 -> (trunc X) >= 0 1712 // (X & C2) != 0 -> (trunc X) < 0 1713 // iff C2 is a power of 2 and it masks the sign bit of a legal integer type. 1714 const APInt *C2; 1715 if (And->hasOneUse() && C.isNullValue() && match(Y, m_APInt(C2))) { 1716 int32_t ExactLogBase2 = C2->exactLogBase2(); 1717 if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) { 1718 Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1); 1719 if (And->getType()->isVectorTy()) 1720 NTy = VectorType::get(NTy, And->getType()->getVectorNumElements()); 1721 Value *Trunc = Builder.CreateTrunc(X, NTy); 1722 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE 1723 : CmpInst::ICMP_SLT; 1724 return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy)); 1725 } 1726 } 1727 1728 return nullptr; 1729 } 1730 1731 /// Fold icmp (or X, Y), C. 1732 Instruction *InstCombiner::foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, 1733 const APInt &C) { 1734 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1735 if (C.isOneValue()) { 1736 // icmp slt signum(V) 1 --> icmp slt V, 1 1737 Value *V = nullptr; 1738 if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V)))) 1739 return new ICmpInst(ICmpInst::ICMP_SLT, V, 1740 ConstantInt::get(V->getType(), 1)); 1741 } 1742 1743 // X | C == C --> X <=u C 1744 // X | C != C --> X >u C 1745 // iff C+1 is a power of 2 (C is a bitmask of the low bits) 1746 if (Cmp.isEquality() && Cmp.getOperand(1) == Or->getOperand(1) && 1747 (C + 1).isPowerOf2()) { 1748 Pred = (Pred == CmpInst::ICMP_EQ) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT; 1749 return new ICmpInst(Pred, Or->getOperand(0), Or->getOperand(1)); 1750 } 1751 1752 if (!Cmp.isEquality() || !C.isNullValue() || !Or->hasOneUse()) 1753 return nullptr; 1754 1755 Value *P, *Q; 1756 if (match(Or, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) { 1757 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0 1758 // -> and (icmp eq P, null), (icmp eq Q, null). 1759 Value *CmpP = 1760 Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType())); 1761 Value *CmpQ = 1762 Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType())); 1763 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1764 return BinaryOperator::Create(BOpc, CmpP, CmpQ); 1765 } 1766 1767 // Are we using xors to bitwise check for a pair of (in)equalities? Convert to 1768 // a shorter form that has more potential to be folded even further. 1769 Value *X1, *X2, *X3, *X4; 1770 if (match(Or->getOperand(0), m_OneUse(m_Xor(m_Value(X1), m_Value(X2)))) && 1771 match(Or->getOperand(1), m_OneUse(m_Xor(m_Value(X3), m_Value(X4))))) { 1772 // ((X1 ^ X2) || (X3 ^ X4)) == 0 --> (X1 == X2) && (X3 == X4) 1773 // ((X1 ^ X2) || (X3 ^ X4)) != 0 --> (X1 != X2) || (X3 != X4) 1774 Value *Cmp12 = Builder.CreateICmp(Pred, X1, X2); 1775 Value *Cmp34 = Builder.CreateICmp(Pred, X3, X4); 1776 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1777 return BinaryOperator::Create(BOpc, Cmp12, Cmp34); 1778 } 1779 1780 return nullptr; 1781 } 1782 1783 /// Fold icmp (mul X, Y), C. 1784 Instruction *InstCombiner::foldICmpMulConstant(ICmpInst &Cmp, 1785 BinaryOperator *Mul, 1786 const APInt &C) { 1787 const APInt *MulC; 1788 if (!match(Mul->getOperand(1), m_APInt(MulC))) 1789 return nullptr; 1790 1791 // If this is a test of the sign bit and the multiply is sign-preserving with 1792 // a constant operand, use the multiply LHS operand instead. 1793 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1794 if (isSignTest(Pred, C) && Mul->hasNoSignedWrap()) { 1795 if (MulC->isNegative()) 1796 Pred = ICmpInst::getSwappedPredicate(Pred); 1797 return new ICmpInst(Pred, Mul->getOperand(0), 1798 Constant::getNullValue(Mul->getType())); 1799 } 1800 1801 return nullptr; 1802 } 1803 1804 /// Fold icmp (shl 1, Y), C. 1805 static Instruction *foldICmpShlOne(ICmpInst &Cmp, Instruction *Shl, 1806 const APInt &C) { 1807 Value *Y; 1808 if (!match(Shl, m_Shl(m_One(), m_Value(Y)))) 1809 return nullptr; 1810 1811 Type *ShiftType = Shl->getType(); 1812 unsigned TypeBits = C.getBitWidth(); 1813 bool CIsPowerOf2 = C.isPowerOf2(); 1814 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1815 if (Cmp.isUnsigned()) { 1816 // (1 << Y) pred C -> Y pred Log2(C) 1817 if (!CIsPowerOf2) { 1818 // (1 << Y) < 30 -> Y <= 4 1819 // (1 << Y) <= 30 -> Y <= 4 1820 // (1 << Y) >= 30 -> Y > 4 1821 // (1 << Y) > 30 -> Y > 4 1822 if (Pred == ICmpInst::ICMP_ULT) 1823 Pred = ICmpInst::ICMP_ULE; 1824 else if (Pred == ICmpInst::ICMP_UGE) 1825 Pred = ICmpInst::ICMP_UGT; 1826 } 1827 1828 // (1 << Y) >= 2147483648 -> Y >= 31 -> Y == 31 1829 // (1 << Y) < 2147483648 -> Y < 31 -> Y != 31 1830 unsigned CLog2 = C.logBase2(); 1831 if (CLog2 == TypeBits - 1) { 1832 if (Pred == ICmpInst::ICMP_UGE) 1833 Pred = ICmpInst::ICMP_EQ; 1834 else if (Pred == ICmpInst::ICMP_ULT) 1835 Pred = ICmpInst::ICMP_NE; 1836 } 1837 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2)); 1838 } else if (Cmp.isSigned()) { 1839 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1); 1840 if (C.isAllOnesValue()) { 1841 // (1 << Y) <= -1 -> Y == 31 1842 if (Pred == ICmpInst::ICMP_SLE) 1843 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne); 1844 1845 // (1 << Y) > -1 -> Y != 31 1846 if (Pred == ICmpInst::ICMP_SGT) 1847 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne); 1848 } else if (!C) { 1849 // (1 << Y) < 0 -> Y == 31 1850 // (1 << Y) <= 0 -> Y == 31 1851 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 1852 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne); 1853 1854 // (1 << Y) >= 0 -> Y != 31 1855 // (1 << Y) > 0 -> Y != 31 1856 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) 1857 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne); 1858 } 1859 } else if (Cmp.isEquality() && CIsPowerOf2) { 1860 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, C.logBase2())); 1861 } 1862 1863 return nullptr; 1864 } 1865 1866 /// Fold icmp (shl X, Y), C. 1867 Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp, 1868 BinaryOperator *Shl, 1869 const APInt &C) { 1870 const APInt *ShiftVal; 1871 if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal))) 1872 return foldICmpShlConstConst(Cmp, Shl->getOperand(1), C, *ShiftVal); 1873 1874 const APInt *ShiftAmt; 1875 if (!match(Shl->getOperand(1), m_APInt(ShiftAmt))) 1876 return foldICmpShlOne(Cmp, Shl, C); 1877 1878 // Check that the shift amount is in range. If not, don't perform undefined 1879 // shifts. When the shift is visited, it will be simplified. 1880 unsigned TypeBits = C.getBitWidth(); 1881 if (ShiftAmt->uge(TypeBits)) 1882 return nullptr; 1883 1884 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1885 Value *X = Shl->getOperand(0); 1886 Type *ShType = Shl->getType(); 1887 1888 // NSW guarantees that we are only shifting out sign bits from the high bits, 1889 // so we can ASHR the compare constant without needing a mask and eliminate 1890 // the shift. 1891 if (Shl->hasNoSignedWrap()) { 1892 if (Pred == ICmpInst::ICMP_SGT) { 1893 // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt) 1894 APInt ShiftedC = C.ashr(*ShiftAmt); 1895 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1896 } 1897 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) && 1898 C.ashr(*ShiftAmt).shl(*ShiftAmt) == C) { 1899 APInt ShiftedC = C.ashr(*ShiftAmt); 1900 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1901 } 1902 if (Pred == ICmpInst::ICMP_SLT) { 1903 // SLE is the same as above, but SLE is canonicalized to SLT, so convert: 1904 // (X << S) <=s C is equiv to X <=s (C >> S) for all C 1905 // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX 1906 // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN 1907 assert(!C.isMinSignedValue() && "Unexpected icmp slt"); 1908 APInt ShiftedC = (C - 1).ashr(*ShiftAmt) + 1; 1909 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1910 } 1911 // If this is a signed comparison to 0 and the shift is sign preserving, 1912 // use the shift LHS operand instead; isSignTest may change 'Pred', so only 1913 // do that if we're sure to not continue on in this function. 1914 if (isSignTest(Pred, C)) 1915 return new ICmpInst(Pred, X, Constant::getNullValue(ShType)); 1916 } 1917 1918 // NUW guarantees that we are only shifting out zero bits from the high bits, 1919 // so we can LSHR the compare constant without needing a mask and eliminate 1920 // the shift. 1921 if (Shl->hasNoUnsignedWrap()) { 1922 if (Pred == ICmpInst::ICMP_UGT) { 1923 // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt) 1924 APInt ShiftedC = C.lshr(*ShiftAmt); 1925 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1926 } 1927 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) && 1928 C.lshr(*ShiftAmt).shl(*ShiftAmt) == C) { 1929 APInt ShiftedC = C.lshr(*ShiftAmt); 1930 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1931 } 1932 if (Pred == ICmpInst::ICMP_ULT) { 1933 // ULE is the same as above, but ULE is canonicalized to ULT, so convert: 1934 // (X << S) <=u C is equiv to X <=u (C >> S) for all C 1935 // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u 1936 // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0 1937 assert(C.ugt(0) && "ult 0 should have been eliminated"); 1938 APInt ShiftedC = (C - 1).lshr(*ShiftAmt) + 1; 1939 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1940 } 1941 } 1942 1943 if (Cmp.isEquality() && Shl->hasOneUse()) { 1944 // Strength-reduce the shift into an 'and'. 1945 Constant *Mask = ConstantInt::get( 1946 ShType, 1947 APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue())); 1948 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask"); 1949 Constant *LShrC = ConstantInt::get(ShType, C.lshr(*ShiftAmt)); 1950 return new ICmpInst(Pred, And, LShrC); 1951 } 1952 1953 // Otherwise, if this is a comparison of the sign bit, simplify to and/test. 1954 bool TrueIfSigned = false; 1955 if (Shl->hasOneUse() && isSignBitCheck(Pred, C, TrueIfSigned)) { 1956 // (X << 31) <s 0 --> (X & 1) != 0 1957 Constant *Mask = ConstantInt::get( 1958 ShType, 1959 APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1)); 1960 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask"); 1961 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ, 1962 And, Constant::getNullValue(ShType)); 1963 } 1964 1965 // Transform (icmp pred iM (shl iM %v, N), C) 1966 // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N)) 1967 // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N. 1968 // This enables us to get rid of the shift in favor of a trunc that may be 1969 // free on the target. It has the additional benefit of comparing to a 1970 // smaller constant that may be more target-friendly. 1971 unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1); 1972 if (Shl->hasOneUse() && Amt != 0 && C.countTrailingZeros() >= Amt && 1973 DL.isLegalInteger(TypeBits - Amt)) { 1974 Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt); 1975 if (ShType->isVectorTy()) 1976 TruncTy = VectorType::get(TruncTy, ShType->getVectorNumElements()); 1977 Constant *NewC = 1978 ConstantInt::get(TruncTy, C.ashr(*ShiftAmt).trunc(TypeBits - Amt)); 1979 return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC); 1980 } 1981 1982 return nullptr; 1983 } 1984 1985 /// Fold icmp ({al}shr X, Y), C. 1986 Instruction *InstCombiner::foldICmpShrConstant(ICmpInst &Cmp, 1987 BinaryOperator *Shr, 1988 const APInt &C) { 1989 // An exact shr only shifts out zero bits, so: 1990 // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0 1991 Value *X = Shr->getOperand(0); 1992 CmpInst::Predicate Pred = Cmp.getPredicate(); 1993 if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() && 1994 C.isNullValue()) 1995 return new ICmpInst(Pred, X, Cmp.getOperand(1)); 1996 1997 const APInt *ShiftVal; 1998 if (Cmp.isEquality() && match(Shr->getOperand(0), m_APInt(ShiftVal))) 1999 return foldICmpShrConstConst(Cmp, Shr->getOperand(1), C, *ShiftVal); 2000 2001 const APInt *ShiftAmt; 2002 if (!match(Shr->getOperand(1), m_APInt(ShiftAmt))) 2003 return nullptr; 2004 2005 // Check that the shift amount is in range. If not, don't perform undefined 2006 // shifts. When the shift is visited it will be simplified. 2007 unsigned TypeBits = C.getBitWidth(); 2008 unsigned ShAmtVal = ShiftAmt->getLimitedValue(TypeBits); 2009 if (ShAmtVal >= TypeBits || ShAmtVal == 0) 2010 return nullptr; 2011 2012 bool IsAShr = Shr->getOpcode() == Instruction::AShr; 2013 bool IsExact = Shr->isExact(); 2014 Type *ShrTy = Shr->getType(); 2015 // TODO: If we could guarantee that InstSimplify would handle all of the 2016 // constant-value-based preconditions in the folds below, then we could assert 2017 // those conditions rather than checking them. This is difficult because of 2018 // undef/poison (PR34838). 2019 if (IsAShr) { 2020 if (Pred == CmpInst::ICMP_SLT || (Pred == CmpInst::ICMP_SGT && IsExact)) { 2021 // icmp slt (ashr X, ShAmtC), C --> icmp slt X, (C << ShAmtC) 2022 // icmp sgt (ashr exact X, ShAmtC), C --> icmp sgt X, (C << ShAmtC) 2023 APInt ShiftedC = C.shl(ShAmtVal); 2024 if (ShiftedC.ashr(ShAmtVal) == C) 2025 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC)); 2026 } 2027 if (Pred == CmpInst::ICMP_SGT) { 2028 // icmp sgt (ashr X, ShAmtC), C --> icmp sgt X, ((C + 1) << ShAmtC) - 1 2029 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1; 2030 if (!C.isMaxSignedValue() && !(C + 1).shl(ShAmtVal).isMinSignedValue() && 2031 (ShiftedC + 1).ashr(ShAmtVal) == (C + 1)) 2032 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC)); 2033 } 2034 } else { 2035 if (Pred == CmpInst::ICMP_ULT || (Pred == CmpInst::ICMP_UGT && IsExact)) { 2036 // icmp ult (lshr X, ShAmtC), C --> icmp ult X, (C << ShAmtC) 2037 // icmp ugt (lshr exact X, ShAmtC), C --> icmp ugt X, (C << ShAmtC) 2038 APInt ShiftedC = C.shl(ShAmtVal); 2039 if (ShiftedC.lshr(ShAmtVal) == C) 2040 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC)); 2041 } 2042 if (Pred == CmpInst::ICMP_UGT) { 2043 // icmp ugt (lshr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1 2044 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1; 2045 if ((ShiftedC + 1).lshr(ShAmtVal) == (C + 1)) 2046 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC)); 2047 } 2048 } 2049 2050 if (!Cmp.isEquality()) 2051 return nullptr; 2052 2053 // Handle equality comparisons of shift-by-constant. 2054 2055 // If the comparison constant changes with the shift, the comparison cannot 2056 // succeed (bits of the comparison constant cannot match the shifted value). 2057 // This should be known by InstSimplify and already be folded to true/false. 2058 assert(((IsAShr && C.shl(ShAmtVal).ashr(ShAmtVal) == C) || 2059 (!IsAShr && C.shl(ShAmtVal).lshr(ShAmtVal) == C)) && 2060 "Expected icmp+shr simplify did not occur."); 2061 2062 // If the bits shifted out are known zero, compare the unshifted value: 2063 // (X & 4) >> 1 == 2 --> (X & 4) == 4. 2064 if (Shr->isExact()) 2065 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, C << ShAmtVal)); 2066 2067 if (Shr->hasOneUse()) { 2068 // Canonicalize the shift into an 'and': 2069 // icmp eq/ne (shr X, ShAmt), C --> icmp eq/ne (and X, HiMask), (C << ShAmt) 2070 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal)); 2071 Constant *Mask = ConstantInt::get(ShrTy, Val); 2072 Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask"); 2073 return new ICmpInst(Pred, And, ConstantInt::get(ShrTy, C << ShAmtVal)); 2074 } 2075 2076 return nullptr; 2077 } 2078 2079 /// Fold icmp (udiv X, Y), C. 2080 Instruction *InstCombiner::foldICmpUDivConstant(ICmpInst &Cmp, 2081 BinaryOperator *UDiv, 2082 const APInt &C) { 2083 const APInt *C2; 2084 if (!match(UDiv->getOperand(0), m_APInt(C2))) 2085 return nullptr; 2086 2087 assert(*C2 != 0 && "udiv 0, X should have been simplified already."); 2088 2089 // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1)) 2090 Value *Y = UDiv->getOperand(1); 2091 if (Cmp.getPredicate() == ICmpInst::ICMP_UGT) { 2092 assert(!C.isMaxValue() && 2093 "icmp ugt X, UINT_MAX should have been simplified already."); 2094 return new ICmpInst(ICmpInst::ICMP_ULE, Y, 2095 ConstantInt::get(Y->getType(), C2->udiv(C + 1))); 2096 } 2097 2098 // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C) 2099 if (Cmp.getPredicate() == ICmpInst::ICMP_ULT) { 2100 assert(C != 0 && "icmp ult X, 0 should have been simplified already."); 2101 return new ICmpInst(ICmpInst::ICMP_UGT, Y, 2102 ConstantInt::get(Y->getType(), C2->udiv(C))); 2103 } 2104 2105 return nullptr; 2106 } 2107 2108 /// Fold icmp ({su}div X, Y), C. 2109 Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp, 2110 BinaryOperator *Div, 2111 const APInt &C) { 2112 // Fold: icmp pred ([us]div X, C2), C -> range test 2113 // Fold this div into the comparison, producing a range check. 2114 // Determine, based on the divide type, what the range is being 2115 // checked. If there is an overflow on the low or high side, remember 2116 // it, otherwise compute the range [low, hi) bounding the new value. 2117 // See: InsertRangeTest above for the kinds of replacements possible. 2118 const APInt *C2; 2119 if (!match(Div->getOperand(1), m_APInt(C2))) 2120 return nullptr; 2121 2122 // FIXME: If the operand types don't match the type of the divide 2123 // then don't attempt this transform. The code below doesn't have the 2124 // logic to deal with a signed divide and an unsigned compare (and 2125 // vice versa). This is because (x /s C2) <s C produces different 2126 // results than (x /s C2) <u C or (x /u C2) <s C or even 2127 // (x /u C2) <u C. Simply casting the operands and result won't 2128 // work. :( The if statement below tests that condition and bails 2129 // if it finds it. 2130 bool DivIsSigned = Div->getOpcode() == Instruction::SDiv; 2131 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned()) 2132 return nullptr; 2133 2134 // The ProdOV computation fails on divide by 0 and divide by -1. Cases with 2135 // INT_MIN will also fail if the divisor is 1. Although folds of all these 2136 // division-by-constant cases should be present, we can not assert that they 2137 // have happened before we reach this icmp instruction. 2138 if (C2->isNullValue() || C2->isOneValue() || 2139 (DivIsSigned && C2->isAllOnesValue())) 2140 return nullptr; 2141 2142 // Compute Prod = C * C2. We are essentially solving an equation of 2143 // form X / C2 = C. We solve for X by multiplying C2 and C. 2144 // By solving for X, we can turn this into a range check instead of computing 2145 // a divide. 2146 APInt Prod = C * *C2; 2147 2148 // Determine if the product overflows by seeing if the product is not equal to 2149 // the divide. Make sure we do the same kind of divide as in the LHS 2150 // instruction that we're folding. 2151 bool ProdOV = (DivIsSigned ? Prod.sdiv(*C2) : Prod.udiv(*C2)) != C; 2152 2153 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2154 2155 // If the division is known to be exact, then there is no remainder from the 2156 // divide, so the covered range size is unit, otherwise it is the divisor. 2157 APInt RangeSize = Div->isExact() ? APInt(C2->getBitWidth(), 1) : *C2; 2158 2159 // Figure out the interval that is being checked. For example, a comparison 2160 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5). 2161 // Compute this interval based on the constants involved and the signedness of 2162 // the compare/divide. This computes a half-open interval, keeping track of 2163 // whether either value in the interval overflows. After analysis each 2164 // overflow variable is set to 0 if it's corresponding bound variable is valid 2165 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end. 2166 int LoOverflow = 0, HiOverflow = 0; 2167 APInt LoBound, HiBound; 2168 2169 if (!DivIsSigned) { // udiv 2170 // e.g. X/5 op 3 --> [15, 20) 2171 LoBound = Prod; 2172 HiOverflow = LoOverflow = ProdOV; 2173 if (!HiOverflow) { 2174 // If this is not an exact divide, then many values in the range collapse 2175 // to the same result value. 2176 HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false); 2177 } 2178 } else if (C2->isStrictlyPositive()) { // Divisor is > 0. 2179 if (C.isNullValue()) { // (X / pos) op 0 2180 // Can't overflow. e.g. X/2 op 0 --> [-1, 2) 2181 LoBound = -(RangeSize - 1); 2182 HiBound = RangeSize; 2183 } else if (C.isStrictlyPositive()) { // (X / pos) op pos 2184 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20) 2185 HiOverflow = LoOverflow = ProdOV; 2186 if (!HiOverflow) 2187 HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true); 2188 } else { // (X / pos) op neg 2189 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14) 2190 HiBound = Prod + 1; 2191 LoOverflow = HiOverflow = ProdOV ? -1 : 0; 2192 if (!LoOverflow) { 2193 APInt DivNeg = -RangeSize; 2194 LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0; 2195 } 2196 } 2197 } else if (C2->isNegative()) { // Divisor is < 0. 2198 if (Div->isExact()) 2199 RangeSize.negate(); 2200 if (C.isNullValue()) { // (X / neg) op 0 2201 // e.g. X/-5 op 0 --> [-4, 5) 2202 LoBound = RangeSize + 1; 2203 HiBound = -RangeSize; 2204 if (HiBound == *C2) { // -INTMIN = INTMIN 2205 HiOverflow = 1; // [INTMIN+1, overflow) 2206 HiBound = APInt(); // e.g. X/INTMIN = 0 --> X > INTMIN 2207 } 2208 } else if (C.isStrictlyPositive()) { // (X / neg) op pos 2209 // e.g. X/-5 op 3 --> [-19, -14) 2210 HiBound = Prod + 1; 2211 HiOverflow = LoOverflow = ProdOV ? -1 : 0; 2212 if (!LoOverflow) 2213 LoOverflow = addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0; 2214 } else { // (X / neg) op neg 2215 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20) 2216 LoOverflow = HiOverflow = ProdOV; 2217 if (!HiOverflow) 2218 HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true); 2219 } 2220 2221 // Dividing by a negative swaps the condition. LT <-> GT 2222 Pred = ICmpInst::getSwappedPredicate(Pred); 2223 } 2224 2225 Value *X = Div->getOperand(0); 2226 switch (Pred) { 2227 default: llvm_unreachable("Unhandled icmp opcode!"); 2228 case ICmpInst::ICMP_EQ: 2229 if (LoOverflow && HiOverflow) 2230 return replaceInstUsesWith(Cmp, Builder.getFalse()); 2231 if (HiOverflow) 2232 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : 2233 ICmpInst::ICMP_UGE, X, 2234 ConstantInt::get(Div->getType(), LoBound)); 2235 if (LoOverflow) 2236 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : 2237 ICmpInst::ICMP_ULT, X, 2238 ConstantInt::get(Div->getType(), HiBound)); 2239 return replaceInstUsesWith( 2240 Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, true)); 2241 case ICmpInst::ICMP_NE: 2242 if (LoOverflow && HiOverflow) 2243 return replaceInstUsesWith(Cmp, Builder.getTrue()); 2244 if (HiOverflow) 2245 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : 2246 ICmpInst::ICMP_ULT, X, 2247 ConstantInt::get(Div->getType(), LoBound)); 2248 if (LoOverflow) 2249 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : 2250 ICmpInst::ICMP_UGE, X, 2251 ConstantInt::get(Div->getType(), HiBound)); 2252 return replaceInstUsesWith(Cmp, 2253 insertRangeTest(X, LoBound, HiBound, 2254 DivIsSigned, false)); 2255 case ICmpInst::ICMP_ULT: 2256 case ICmpInst::ICMP_SLT: 2257 if (LoOverflow == +1) // Low bound is greater than input range. 2258 return replaceInstUsesWith(Cmp, Builder.getTrue()); 2259 if (LoOverflow == -1) // Low bound is less than input range. 2260 return replaceInstUsesWith(Cmp, Builder.getFalse()); 2261 return new ICmpInst(Pred, X, ConstantInt::get(Div->getType(), LoBound)); 2262 case ICmpInst::ICMP_UGT: 2263 case ICmpInst::ICMP_SGT: 2264 if (HiOverflow == +1) // High bound greater than input range. 2265 return replaceInstUsesWith(Cmp, Builder.getFalse()); 2266 if (HiOverflow == -1) // High bound less than input range. 2267 return replaceInstUsesWith(Cmp, Builder.getTrue()); 2268 if (Pred == ICmpInst::ICMP_UGT) 2269 return new ICmpInst(ICmpInst::ICMP_UGE, X, 2270 ConstantInt::get(Div->getType(), HiBound)); 2271 return new ICmpInst(ICmpInst::ICMP_SGE, X, 2272 ConstantInt::get(Div->getType(), HiBound)); 2273 } 2274 2275 return nullptr; 2276 } 2277 2278 /// Fold icmp (sub X, Y), C. 2279 Instruction *InstCombiner::foldICmpSubConstant(ICmpInst &Cmp, 2280 BinaryOperator *Sub, 2281 const APInt &C) { 2282 Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1); 2283 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2284 2285 // The following transforms are only worth it if the only user of the subtract 2286 // is the icmp. 2287 if (!Sub->hasOneUse()) 2288 return nullptr; 2289 2290 if (Sub->hasNoSignedWrap()) { 2291 // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y) 2292 if (Pred == ICmpInst::ICMP_SGT && C.isAllOnesValue()) 2293 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y); 2294 2295 // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y) 2296 if (Pred == ICmpInst::ICMP_SGT && C.isNullValue()) 2297 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y); 2298 2299 // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y) 2300 if (Pred == ICmpInst::ICMP_SLT && C.isNullValue()) 2301 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y); 2302 2303 // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y) 2304 if (Pred == ICmpInst::ICMP_SLT && C.isOneValue()) 2305 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y); 2306 } 2307 2308 const APInt *C2; 2309 if (!match(X, m_APInt(C2))) 2310 return nullptr; 2311 2312 // C2 - Y <u C -> (Y | (C - 1)) == C2 2313 // iff (C2 & (C - 1)) == C - 1 and C is a power of 2 2314 if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && 2315 (*C2 & (C - 1)) == (C - 1)) 2316 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, C - 1), X); 2317 2318 // C2 - Y >u C -> (Y | C) != C2 2319 // iff C2 & C == C and C + 1 is a power of 2 2320 if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == C) 2321 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, C), X); 2322 2323 return nullptr; 2324 } 2325 2326 /// Fold icmp (add X, Y), C. 2327 Instruction *InstCombiner::foldICmpAddConstant(ICmpInst &Cmp, 2328 BinaryOperator *Add, 2329 const APInt &C) { 2330 Value *Y = Add->getOperand(1); 2331 const APInt *C2; 2332 if (Cmp.isEquality() || !match(Y, m_APInt(C2))) 2333 return nullptr; 2334 2335 // Fold icmp pred (add X, C2), C. 2336 Value *X = Add->getOperand(0); 2337 Type *Ty = Add->getType(); 2338 CmpInst::Predicate Pred = Cmp.getPredicate(); 2339 2340 // If the add does not wrap, we can always adjust the compare by subtracting 2341 // the constants. Equality comparisons are handled elsewhere. SGE/SLE are 2342 // canonicalized to SGT/SLT. 2343 if (Add->hasNoSignedWrap() && 2344 (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) { 2345 bool Overflow; 2346 APInt NewC = C.ssub_ov(*C2, Overflow); 2347 // If there is overflow, the result must be true or false. 2348 // TODO: Can we assert there is no overflow because InstSimplify always 2349 // handles those cases? 2350 if (!Overflow) 2351 // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2) 2352 return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC)); 2353 } 2354 2355 auto CR = ConstantRange::makeExactICmpRegion(Pred, C).subtract(*C2); 2356 const APInt &Upper = CR.getUpper(); 2357 const APInt &Lower = CR.getLower(); 2358 if (Cmp.isSigned()) { 2359 if (Lower.isSignMask()) 2360 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper)); 2361 if (Upper.isSignMask()) 2362 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower)); 2363 } else { 2364 if (Lower.isMinValue()) 2365 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper)); 2366 if (Upper.isMinValue()) 2367 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower)); 2368 } 2369 2370 if (!Add->hasOneUse()) 2371 return nullptr; 2372 2373 // X+C <u C2 -> (X & -C2) == C 2374 // iff C & (C2-1) == 0 2375 // C2 is a power of 2 2376 if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && (*C2 & (C - 1)) == 0) 2377 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -C), 2378 ConstantExpr::getNeg(cast<Constant>(Y))); 2379 2380 // X+C >u C2 -> (X & ~C2) != C 2381 // iff C & C2 == 0 2382 // C2+1 is a power of 2 2383 if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == 0) 2384 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~C), 2385 ConstantExpr::getNeg(cast<Constant>(Y))); 2386 2387 return nullptr; 2388 } 2389 2390 bool InstCombiner::matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, 2391 Value *&RHS, ConstantInt *&Less, 2392 ConstantInt *&Equal, 2393 ConstantInt *&Greater) { 2394 // TODO: Generalize this to work with other comparison idioms or ensure 2395 // they get canonicalized into this form. 2396 2397 // select i1 (a == b), i32 Equal, i32 (select i1 (a < b), i32 Less, i32 2398 // Greater), where Equal, Less and Greater are placeholders for any three 2399 // constants. 2400 ICmpInst::Predicate PredA, PredB; 2401 if (match(SI->getTrueValue(), m_ConstantInt(Equal)) && 2402 match(SI->getCondition(), m_ICmp(PredA, m_Value(LHS), m_Value(RHS))) && 2403 PredA == ICmpInst::ICMP_EQ && 2404 match(SI->getFalseValue(), 2405 m_Select(m_ICmp(PredB, m_Specific(LHS), m_Specific(RHS)), 2406 m_ConstantInt(Less), m_ConstantInt(Greater))) && 2407 PredB == ICmpInst::ICMP_SLT) { 2408 return true; 2409 } 2410 return false; 2411 } 2412 2413 Instruction *InstCombiner::foldICmpSelectConstant(ICmpInst &Cmp, 2414 SelectInst *Select, 2415 ConstantInt *C) { 2416 2417 assert(C && "Cmp RHS should be a constant int!"); 2418 // If we're testing a constant value against the result of a three way 2419 // comparison, the result can be expressed directly in terms of the 2420 // original values being compared. Note: We could possibly be more 2421 // aggressive here and remove the hasOneUse test. The original select is 2422 // really likely to simplify or sink when we remove a test of the result. 2423 Value *OrigLHS, *OrigRHS; 2424 ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan; 2425 if (Cmp.hasOneUse() && 2426 matchThreeWayIntCompare(Select, OrigLHS, OrigRHS, C1LessThan, C2Equal, 2427 C3GreaterThan)) { 2428 assert(C1LessThan && C2Equal && C3GreaterThan); 2429 2430 bool TrueWhenLessThan = 2431 ConstantExpr::getCompare(Cmp.getPredicate(), C1LessThan, C) 2432 ->isAllOnesValue(); 2433 bool TrueWhenEqual = 2434 ConstantExpr::getCompare(Cmp.getPredicate(), C2Equal, C) 2435 ->isAllOnesValue(); 2436 bool TrueWhenGreaterThan = 2437 ConstantExpr::getCompare(Cmp.getPredicate(), C3GreaterThan, C) 2438 ->isAllOnesValue(); 2439 2440 // This generates the new instruction that will replace the original Cmp 2441 // Instruction. Instead of enumerating the various combinations when 2442 // TrueWhenLessThan, TrueWhenEqual and TrueWhenGreaterThan are true versus 2443 // false, we rely on chaining of ORs and future passes of InstCombine to 2444 // simplify the OR further (i.e. a s< b || a == b becomes a s<= b). 2445 2446 // When none of the three constants satisfy the predicate for the RHS (C), 2447 // the entire original Cmp can be simplified to a false. 2448 Value *Cond = Builder.getFalse(); 2449 if (TrueWhenLessThan) 2450 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT, OrigLHS, OrigRHS)); 2451 if (TrueWhenEqual) 2452 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ, OrigLHS, OrigRHS)); 2453 if (TrueWhenGreaterThan) 2454 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT, OrigLHS, OrigRHS)); 2455 2456 return replaceInstUsesWith(Cmp, Cond); 2457 } 2458 return nullptr; 2459 } 2460 2461 Instruction *InstCombiner::foldICmpBitCastConstant(ICmpInst &Cmp, 2462 BitCastInst *Bitcast, 2463 const APInt &C) { 2464 // Folding: icmp <pred> iN X, C 2465 // where X = bitcast <M x iK> (shufflevector <M x iK> %vec, undef, SC)) to iN 2466 // and C is a splat of a K-bit pattern 2467 // and SC is a constant vector = <C', C', C', ..., C'> 2468 // Into: 2469 // %E = extractelement <M x iK> %vec, i32 C' 2470 // icmp <pred> iK %E, trunc(C) 2471 if (!Bitcast->getType()->isIntegerTy() || 2472 !Bitcast->getSrcTy()->isIntOrIntVectorTy()) 2473 return nullptr; 2474 2475 Value *BCIOp = Bitcast->getOperand(0); 2476 Value *Vec = nullptr; // 1st vector arg of the shufflevector 2477 Constant *Mask = nullptr; // Mask arg of the shufflevector 2478 if (match(BCIOp, 2479 m_ShuffleVector(m_Value(Vec), m_Undef(), m_Constant(Mask)))) { 2480 // Check whether every element of Mask is the same constant 2481 if (auto *Elem = dyn_cast_or_null<ConstantInt>(Mask->getSplatValue())) { 2482 auto *VecTy = cast<VectorType>(BCIOp->getType()); 2483 auto *EltTy = cast<IntegerType>(VecTy->getElementType()); 2484 auto Pred = Cmp.getPredicate(); 2485 if (C.isSplat(EltTy->getBitWidth())) { 2486 // Fold the icmp based on the value of C 2487 // If C is M copies of an iK sized bit pattern, 2488 // then: 2489 // => %E = extractelement <N x iK> %vec, i32 Elem 2490 // icmp <pred> iK %SplatVal, <pattern> 2491 Value *Extract = Builder.CreateExtractElement(Vec, Elem); 2492 Value *NewC = ConstantInt::get(EltTy, C.trunc(EltTy->getBitWidth())); 2493 return new ICmpInst(Pred, Extract, NewC); 2494 } 2495 } 2496 } 2497 return nullptr; 2498 } 2499 2500 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C 2501 /// where X is some kind of instruction. 2502 Instruction *InstCombiner::foldICmpInstWithConstant(ICmpInst &Cmp) { 2503 const APInt *C; 2504 if (!match(Cmp.getOperand(1), m_APInt(C))) 2505 return nullptr; 2506 2507 if (auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0))) { 2508 switch (BO->getOpcode()) { 2509 case Instruction::Xor: 2510 if (Instruction *I = foldICmpXorConstant(Cmp, BO, *C)) 2511 return I; 2512 break; 2513 case Instruction::And: 2514 if (Instruction *I = foldICmpAndConstant(Cmp, BO, *C)) 2515 return I; 2516 break; 2517 case Instruction::Or: 2518 if (Instruction *I = foldICmpOrConstant(Cmp, BO, *C)) 2519 return I; 2520 break; 2521 case Instruction::Mul: 2522 if (Instruction *I = foldICmpMulConstant(Cmp, BO, *C)) 2523 return I; 2524 break; 2525 case Instruction::Shl: 2526 if (Instruction *I = foldICmpShlConstant(Cmp, BO, *C)) 2527 return I; 2528 break; 2529 case Instruction::LShr: 2530 case Instruction::AShr: 2531 if (Instruction *I = foldICmpShrConstant(Cmp, BO, *C)) 2532 return I; 2533 break; 2534 case Instruction::UDiv: 2535 if (Instruction *I = foldICmpUDivConstant(Cmp, BO, *C)) 2536 return I; 2537 LLVM_FALLTHROUGH; 2538 case Instruction::SDiv: 2539 if (Instruction *I = foldICmpDivConstant(Cmp, BO, *C)) 2540 return I; 2541 break; 2542 case Instruction::Sub: 2543 if (Instruction *I = foldICmpSubConstant(Cmp, BO, *C)) 2544 return I; 2545 break; 2546 case Instruction::Add: 2547 if (Instruction *I = foldICmpAddConstant(Cmp, BO, *C)) 2548 return I; 2549 break; 2550 default: 2551 break; 2552 } 2553 // TODO: These folds could be refactored to be part of the above calls. 2554 if (Instruction *I = foldICmpBinOpEqualityWithConstant(Cmp, BO, *C)) 2555 return I; 2556 } 2557 2558 // Match against CmpInst LHS being instructions other than binary operators. 2559 2560 if (auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0))) { 2561 // For now, we only support constant integers while folding the 2562 // ICMP(SELECT)) pattern. We can extend this to support vector of integers 2563 // similar to the cases handled by binary ops above. 2564 if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1))) 2565 if (Instruction *I = foldICmpSelectConstant(Cmp, SI, ConstRHS)) 2566 return I; 2567 } 2568 2569 if (auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0))) { 2570 if (Instruction *I = foldICmpTruncConstant(Cmp, TI, *C)) 2571 return I; 2572 } 2573 2574 if (auto *BCI = dyn_cast<BitCastInst>(Cmp.getOperand(0))) { 2575 if (Instruction *I = foldICmpBitCastConstant(Cmp, BCI, *C)) 2576 return I; 2577 } 2578 2579 if (Instruction *I = foldICmpIntrinsicWithConstant(Cmp, *C)) 2580 return I; 2581 2582 return nullptr; 2583 } 2584 2585 /// Fold an icmp equality instruction with binary operator LHS and constant RHS: 2586 /// icmp eq/ne BO, C. 2587 Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, 2588 BinaryOperator *BO, 2589 const APInt &C) { 2590 // TODO: Some of these folds could work with arbitrary constants, but this 2591 // function is limited to scalar and vector splat constants. 2592 if (!Cmp.isEquality()) 2593 return nullptr; 2594 2595 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2596 bool isICMP_NE = Pred == ICmpInst::ICMP_NE; 2597 Constant *RHS = cast<Constant>(Cmp.getOperand(1)); 2598 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1); 2599 2600 switch (BO->getOpcode()) { 2601 case Instruction::SRem: 2602 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one. 2603 if (C.isNullValue() && BO->hasOneUse()) { 2604 const APInt *BOC; 2605 if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) { 2606 Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName()); 2607 return new ICmpInst(Pred, NewRem, 2608 Constant::getNullValue(BO->getType())); 2609 } 2610 } 2611 break; 2612 case Instruction::Add: { 2613 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants. 2614 const APInt *BOC; 2615 if (match(BOp1, m_APInt(BOC))) { 2616 if (BO->hasOneUse()) { 2617 Constant *SubC = ConstantExpr::getSub(RHS, cast<Constant>(BOp1)); 2618 return new ICmpInst(Pred, BOp0, SubC); 2619 } 2620 } else if (C.isNullValue()) { 2621 // Replace ((add A, B) != 0) with (A != -B) if A or B is 2622 // efficiently invertible, or if the add has just this one use. 2623 if (Value *NegVal = dyn_castNegVal(BOp1)) 2624 return new ICmpInst(Pred, BOp0, NegVal); 2625 if (Value *NegVal = dyn_castNegVal(BOp0)) 2626 return new ICmpInst(Pred, NegVal, BOp1); 2627 if (BO->hasOneUse()) { 2628 Value *Neg = Builder.CreateNeg(BOp1); 2629 Neg->takeName(BO); 2630 return new ICmpInst(Pred, BOp0, Neg); 2631 } 2632 } 2633 break; 2634 } 2635 case Instruction::Xor: 2636 if (BO->hasOneUse()) { 2637 if (Constant *BOC = dyn_cast<Constant>(BOp1)) { 2638 // For the xor case, we can xor two constants together, eliminating 2639 // the explicit xor. 2640 return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC)); 2641 } else if (C.isNullValue()) { 2642 // Replace ((xor A, B) != 0) with (A != B) 2643 return new ICmpInst(Pred, BOp0, BOp1); 2644 } 2645 } 2646 break; 2647 case Instruction::Sub: 2648 if (BO->hasOneUse()) { 2649 const APInt *BOC; 2650 if (match(BOp0, m_APInt(BOC))) { 2651 // Replace ((sub BOC, B) != C) with (B != BOC-C). 2652 Constant *SubC = ConstantExpr::getSub(cast<Constant>(BOp0), RHS); 2653 return new ICmpInst(Pred, BOp1, SubC); 2654 } else if (C.isNullValue()) { 2655 // Replace ((sub A, B) != 0) with (A != B). 2656 return new ICmpInst(Pred, BOp0, BOp1); 2657 } 2658 } 2659 break; 2660 case Instruction::Or: { 2661 const APInt *BOC; 2662 if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) { 2663 // Comparing if all bits outside of a constant mask are set? 2664 // Replace (X | C) == -1 with (X & ~C) == ~C. 2665 // This removes the -1 constant. 2666 Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1)); 2667 Value *And = Builder.CreateAnd(BOp0, NotBOC); 2668 return new ICmpInst(Pred, And, NotBOC); 2669 } 2670 break; 2671 } 2672 case Instruction::And: { 2673 const APInt *BOC; 2674 if (match(BOp1, m_APInt(BOC))) { 2675 // If we have ((X & C) == C), turn it into ((X & C) != 0). 2676 if (C == *BOC && C.isPowerOf2()) 2677 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE, 2678 BO, Constant::getNullValue(RHS->getType())); 2679 2680 // Don't perform the following transforms if the AND has multiple uses 2681 if (!BO->hasOneUse()) 2682 break; 2683 2684 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0 2685 if (BOC->isSignMask()) { 2686 Constant *Zero = Constant::getNullValue(BOp0->getType()); 2687 auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE; 2688 return new ICmpInst(NewPred, BOp0, Zero); 2689 } 2690 2691 // ((X & ~7) == 0) --> X < 8 2692 if (C.isNullValue() && (~(*BOC) + 1).isPowerOf2()) { 2693 Constant *NegBOC = ConstantExpr::getNeg(cast<Constant>(BOp1)); 2694 auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT; 2695 return new ICmpInst(NewPred, BOp0, NegBOC); 2696 } 2697 } 2698 break; 2699 } 2700 case Instruction::Mul: 2701 if (C.isNullValue() && BO->hasNoSignedWrap()) { 2702 const APInt *BOC; 2703 if (match(BOp1, m_APInt(BOC)) && !BOC->isNullValue()) { 2704 // The trivial case (mul X, 0) is handled by InstSimplify. 2705 // General case : (mul X, C) != 0 iff X != 0 2706 // (mul X, C) == 0 iff X == 0 2707 return new ICmpInst(Pred, BOp0, Constant::getNullValue(RHS->getType())); 2708 } 2709 } 2710 break; 2711 case Instruction::UDiv: 2712 if (C.isNullValue()) { 2713 // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A) 2714 auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT; 2715 return new ICmpInst(NewPred, BOp1, BOp0); 2716 } 2717 break; 2718 default: 2719 break; 2720 } 2721 return nullptr; 2722 } 2723 2724 /// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C. 2725 Instruction *InstCombiner::foldICmpIntrinsicWithConstant(ICmpInst &Cmp, 2726 const APInt &C) { 2727 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)); 2728 if (!II || !Cmp.isEquality()) 2729 return nullptr; 2730 2731 // Handle icmp {eq|ne} <intrinsic>, Constant. 2732 Type *Ty = II->getType(); 2733 switch (II->getIntrinsicID()) { 2734 case Intrinsic::bswap: 2735 Worklist.Add(II); 2736 Cmp.setOperand(0, II->getArgOperand(0)); 2737 Cmp.setOperand(1, ConstantInt::get(Ty, C.byteSwap())); 2738 return &Cmp; 2739 2740 case Intrinsic::ctlz: 2741 case Intrinsic::cttz: 2742 // ctz(A) == bitwidth(A) -> A == 0 and likewise for != 2743 if (C == C.getBitWidth()) { 2744 Worklist.Add(II); 2745 Cmp.setOperand(0, II->getArgOperand(0)); 2746 Cmp.setOperand(1, ConstantInt::getNullValue(Ty)); 2747 return &Cmp; 2748 } 2749 break; 2750 2751 case Intrinsic::ctpop: { 2752 // popcount(A) == 0 -> A == 0 and likewise for != 2753 // popcount(A) == bitwidth(A) -> A == -1 and likewise for != 2754 bool IsZero = C.isNullValue(); 2755 if (IsZero || C == C.getBitWidth()) { 2756 Worklist.Add(II); 2757 Cmp.setOperand(0, II->getArgOperand(0)); 2758 auto *NewOp = 2759 IsZero ? Constant::getNullValue(Ty) : Constant::getAllOnesValue(Ty); 2760 Cmp.setOperand(1, NewOp); 2761 return &Cmp; 2762 } 2763 break; 2764 } 2765 default: 2766 break; 2767 } 2768 2769 return nullptr; 2770 } 2771 2772 /// Handle icmp with constant (but not simple integer constant) RHS. 2773 Instruction *InstCombiner::foldICmpInstWithConstantNotInt(ICmpInst &I) { 2774 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2775 Constant *RHSC = dyn_cast<Constant>(Op1); 2776 Instruction *LHSI = dyn_cast<Instruction>(Op0); 2777 if (!RHSC || !LHSI) 2778 return nullptr; 2779 2780 switch (LHSI->getOpcode()) { 2781 case Instruction::GetElementPtr: 2782 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null 2783 if (RHSC->isNullValue() && 2784 cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices()) 2785 return new ICmpInst( 2786 I.getPredicate(), LHSI->getOperand(0), 2787 Constant::getNullValue(LHSI->getOperand(0)->getType())); 2788 break; 2789 case Instruction::PHI: 2790 // Only fold icmp into the PHI if the phi and icmp are in the same 2791 // block. If in the same block, we're encouraging jump threading. If 2792 // not, we are just pessimizing the code by making an i1 phi. 2793 if (LHSI->getParent() == I.getParent()) 2794 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI))) 2795 return NV; 2796 break; 2797 case Instruction::Select: { 2798 // If either operand of the select is a constant, we can fold the 2799 // comparison into the select arms, which will cause one to be 2800 // constant folded and the select turned into a bitwise or. 2801 Value *Op1 = nullptr, *Op2 = nullptr; 2802 ConstantInt *CI = nullptr; 2803 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) { 2804 Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); 2805 CI = dyn_cast<ConstantInt>(Op1); 2806 } 2807 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) { 2808 Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); 2809 CI = dyn_cast<ConstantInt>(Op2); 2810 } 2811 2812 // We only want to perform this transformation if it will not lead to 2813 // additional code. This is true if either both sides of the select 2814 // fold to a constant (in which case the icmp is replaced with a select 2815 // which will usually simplify) or this is the only user of the 2816 // select (in which case we are trading a select+icmp for a simpler 2817 // select+icmp) or all uses of the select can be replaced based on 2818 // dominance information ("Global cases"). 2819 bool Transform = false; 2820 if (Op1 && Op2) 2821 Transform = true; 2822 else if (Op1 || Op2) { 2823 // Local case 2824 if (LHSI->hasOneUse()) 2825 Transform = true; 2826 // Global cases 2827 else if (CI && !CI->isZero()) 2828 // When Op1 is constant try replacing select with second operand. 2829 // Otherwise Op2 is constant and try replacing select with first 2830 // operand. 2831 Transform = 2832 replacedSelectWithOperand(cast<SelectInst>(LHSI), &I, Op1 ? 2 : 1); 2833 } 2834 if (Transform) { 2835 if (!Op1) 2836 Op1 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC, 2837 I.getName()); 2838 if (!Op2) 2839 Op2 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC, 2840 I.getName()); 2841 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2); 2842 } 2843 break; 2844 } 2845 case Instruction::IntToPtr: 2846 // icmp pred inttoptr(X), null -> icmp pred X, 0 2847 if (RHSC->isNullValue() && 2848 DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType()) 2849 return new ICmpInst( 2850 I.getPredicate(), LHSI->getOperand(0), 2851 Constant::getNullValue(LHSI->getOperand(0)->getType())); 2852 break; 2853 2854 case Instruction::Load: 2855 // Try to optimize things like "A[i] > 4" to index computations. 2856 if (GetElementPtrInst *GEP = 2857 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) { 2858 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 2859 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 2860 !cast<LoadInst>(LHSI)->isVolatile()) 2861 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I)) 2862 return Res; 2863 } 2864 break; 2865 } 2866 2867 return nullptr; 2868 } 2869 2870 /// Some comparisons can be simplified. 2871 /// In this case, we are looking for comparisons that look like 2872 /// a check for a lossy truncation. 2873 /// Folds: 2874 /// x & (-1 >> y) SrcPred x to x DstPred (-1 >> y) 2875 /// The Mask can be a constant, too. 2876 /// For some predicates, the operands are commutative. 2877 /// For others, x can only be on a specific side. 2878 static Value *foldICmpWithLowBitMaskedVal(ICmpInst &I, 2879 InstCombiner::BuilderTy &Builder) { 2880 ICmpInst::Predicate SrcPred; 2881 Value *X, *M; 2882 auto m_Mask = m_CombineOr(m_LShr(m_AllOnes(), m_Value()), m_LowBitMask()); 2883 if (!match(&I, m_c_ICmp(SrcPred, 2884 m_c_And(m_CombineAnd(m_Mask, m_Value(M)), m_Value(X)), 2885 m_Deferred(X)))) 2886 return nullptr; 2887 2888 ICmpInst::Predicate DstPred; 2889 switch (SrcPred) { 2890 case ICmpInst::Predicate::ICMP_EQ: 2891 // x & (-1 >> y) == x -> x u<= (-1 >> y) 2892 DstPred = ICmpInst::Predicate::ICMP_ULE; 2893 break; 2894 case ICmpInst::Predicate::ICMP_NE: 2895 // x & (-1 >> y) != x -> x u> (-1 >> y) 2896 DstPred = ICmpInst::Predicate::ICMP_UGT; 2897 break; 2898 case ICmpInst::Predicate::ICMP_UGT: 2899 // x u> x & (-1 >> y) -> x u> (-1 >> y) 2900 assert(X == I.getOperand(0) && "instsimplify took care of commut. variant"); 2901 DstPred = ICmpInst::Predicate::ICMP_UGT; 2902 break; 2903 case ICmpInst::Predicate::ICMP_UGE: 2904 // x & (-1 >> y) u>= x -> x u<= (-1 >> y) 2905 assert(X == I.getOperand(1) && "instsimplify took care of commut. variant"); 2906 DstPred = ICmpInst::Predicate::ICMP_ULE; 2907 break; 2908 case ICmpInst::Predicate::ICMP_ULT: 2909 // x & (-1 >> y) u< x -> x u> (-1 >> y) 2910 assert(X == I.getOperand(1) && "instsimplify took care of commut. variant"); 2911 DstPred = ICmpInst::Predicate::ICMP_UGT; 2912 break; 2913 case ICmpInst::Predicate::ICMP_ULE: 2914 // x u<= x & (-1 >> y) -> x u<= (-1 >> y) 2915 assert(X == I.getOperand(0) && "instsimplify took care of commut. variant"); 2916 DstPred = ICmpInst::Predicate::ICMP_ULE; 2917 break; 2918 case ICmpInst::Predicate::ICMP_SGT: 2919 // x s> x & (-1 >> y) -> x s> (-1 >> y) 2920 if (X != I.getOperand(0)) // X must be on LHS of comparison! 2921 return nullptr; // Ignore the other case. 2922 DstPred = ICmpInst::Predicate::ICMP_SGT; 2923 break; 2924 case ICmpInst::Predicate::ICMP_SGE: 2925 // x & (-1 >> y) s>= x -> x s<= (-1 >> y) 2926 if (X != I.getOperand(1)) // X must be on RHS of comparison! 2927 return nullptr; // Ignore the other case. 2928 DstPred = ICmpInst::Predicate::ICMP_SLE; 2929 break; 2930 case ICmpInst::Predicate::ICMP_SLT: 2931 // x & (-1 >> y) s< x -> x s> (-1 >> y) 2932 if (X != I.getOperand(1)) // X must be on RHS of comparison! 2933 return nullptr; // Ignore the other case. 2934 DstPred = ICmpInst::Predicate::ICMP_SGT; 2935 break; 2936 case ICmpInst::Predicate::ICMP_SLE: 2937 // x s<= x & (-1 >> y) -> x s<= (-1 >> y) 2938 if (X != I.getOperand(0)) // X must be on LHS of comparison! 2939 return nullptr; // Ignore the other case. 2940 DstPred = ICmpInst::Predicate::ICMP_SLE; 2941 break; 2942 default: 2943 llvm_unreachable("All possible folds are handled."); 2944 } 2945 2946 return Builder.CreateICmp(DstPred, X, M); 2947 } 2948 2949 /// Some comparisons can be simplified. 2950 /// In this case, we are looking for comparisons that look like 2951 /// a check for a lossy signed truncation. 2952 /// Folds: (MaskedBits is a constant.) 2953 /// ((%x << MaskedBits) a>> MaskedBits) SrcPred %x 2954 /// Into: 2955 /// (add %x, (1 << (KeptBits-1))) DstPred (1 << KeptBits) 2956 /// Where KeptBits = bitwidth(%x) - MaskedBits 2957 static Value * 2958 foldICmpWithTruncSignExtendedVal(ICmpInst &I, 2959 InstCombiner::BuilderTy &Builder) { 2960 ICmpInst::Predicate SrcPred; 2961 Value *X; 2962 const APInt *C0, *C1; // FIXME: non-splats, potentially with undef. 2963 // We are ok with 'shl' having multiple uses, but 'ashr' must be one-use. 2964 if (!match(&I, m_c_ICmp(SrcPred, 2965 m_OneUse(m_AShr(m_Shl(m_Value(X), m_APInt(C0)), 2966 m_APInt(C1))), 2967 m_Deferred(X)))) 2968 return nullptr; 2969 2970 // Potential handling of non-splats: for each element: 2971 // * if both are undef, replace with constant 0. 2972 // Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0. 2973 // * if both are not undef, and are different, bailout. 2974 // * else, only one is undef, then pick the non-undef one. 2975 2976 // The shift amount must be equal. 2977 if (*C0 != *C1) 2978 return nullptr; 2979 const APInt &MaskedBits = *C0; 2980 assert(MaskedBits != 0 && "shift by zero should be folded away already."); 2981 2982 ICmpInst::Predicate DstPred; 2983 switch (SrcPred) { 2984 case ICmpInst::Predicate::ICMP_EQ: 2985 // ((%x << MaskedBits) a>> MaskedBits) == %x 2986 // => 2987 // (add %x, (1 << (KeptBits-1))) u< (1 << KeptBits) 2988 DstPred = ICmpInst::Predicate::ICMP_ULT; 2989 break; 2990 case ICmpInst::Predicate::ICMP_NE: 2991 // ((%x << MaskedBits) a>> MaskedBits) != %x 2992 // => 2993 // (add %x, (1 << (KeptBits-1))) u>= (1 << KeptBits) 2994 DstPred = ICmpInst::Predicate::ICMP_UGE; 2995 break; 2996 // FIXME: are more folds possible? 2997 default: 2998 return nullptr; 2999 } 3000 3001 auto *XType = X->getType(); 3002 const unsigned XBitWidth = XType->getScalarSizeInBits(); 3003 const APInt BitWidth = APInt(XBitWidth, XBitWidth); 3004 assert(BitWidth.ugt(MaskedBits) && "shifts should leave some bits untouched"); 3005 3006 // KeptBits = bitwidth(%x) - MaskedBits 3007 const APInt KeptBits = BitWidth - MaskedBits; 3008 assert(KeptBits.ugt(0) && KeptBits.ult(BitWidth) && "unreachable"); 3009 // ICmpCst = (1 << KeptBits) 3010 const APInt ICmpCst = APInt(XBitWidth, 1).shl(KeptBits); 3011 assert(ICmpCst.isPowerOf2()); 3012 // AddCst = (1 << (KeptBits-1)) 3013 const APInt AddCst = ICmpCst.lshr(1); 3014 assert(AddCst.ult(ICmpCst) && AddCst.isPowerOf2()); 3015 3016 // T0 = add %x, AddCst 3017 Value *T0 = Builder.CreateAdd(X, ConstantInt::get(XType, AddCst)); 3018 // T1 = T0 DstPred ICmpCst 3019 Value *T1 = Builder.CreateICmp(DstPred, T0, ConstantInt::get(XType, ICmpCst)); 3020 3021 return T1; 3022 } 3023 3024 /// Try to fold icmp (binop), X or icmp X, (binop). 3025 /// TODO: A large part of this logic is duplicated in InstSimplify's 3026 /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code 3027 /// duplication. 3028 Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) { 3029 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 3030 3031 // Special logic for binary operators. 3032 BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0); 3033 BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1); 3034 if (!BO0 && !BO1) 3035 return nullptr; 3036 3037 const CmpInst::Predicate Pred = I.getPredicate(); 3038 bool NoOp0WrapProblem = false, NoOp1WrapProblem = false; 3039 if (BO0 && isa<OverflowingBinaryOperator>(BO0)) 3040 NoOp0WrapProblem = 3041 ICmpInst::isEquality(Pred) || 3042 (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) || 3043 (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap()); 3044 if (BO1 && isa<OverflowingBinaryOperator>(BO1)) 3045 NoOp1WrapProblem = 3046 ICmpInst::isEquality(Pred) || 3047 (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) || 3048 (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap()); 3049 3050 // Analyze the case when either Op0 or Op1 is an add instruction. 3051 // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null). 3052 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr; 3053 if (BO0 && BO0->getOpcode() == Instruction::Add) { 3054 A = BO0->getOperand(0); 3055 B = BO0->getOperand(1); 3056 } 3057 if (BO1 && BO1->getOpcode() == Instruction::Add) { 3058 C = BO1->getOperand(0); 3059 D = BO1->getOperand(1); 3060 } 3061 3062 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow. 3063 if ((A == Op1 || B == Op1) && NoOp0WrapProblem) 3064 return new ICmpInst(Pred, A == Op1 ? B : A, 3065 Constant::getNullValue(Op1->getType())); 3066 3067 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow. 3068 if ((C == Op0 || D == Op0) && NoOp1WrapProblem) 3069 return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()), 3070 C == Op0 ? D : C); 3071 3072 // icmp (X+Y), (X+Z) -> icmp Y, Z for equalities or if there is no overflow. 3073 if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem && 3074 NoOp1WrapProblem && 3075 // Try not to increase register pressure. 3076 BO0->hasOneUse() && BO1->hasOneUse()) { 3077 // Determine Y and Z in the form icmp (X+Y), (X+Z). 3078 Value *Y, *Z; 3079 if (A == C) { 3080 // C + B == C + D -> B == D 3081 Y = B; 3082 Z = D; 3083 } else if (A == D) { 3084 // D + B == C + D -> B == C 3085 Y = B; 3086 Z = C; 3087 } else if (B == C) { 3088 // A + C == C + D -> A == D 3089 Y = A; 3090 Z = D; 3091 } else { 3092 assert(B == D); 3093 // A + D == C + D -> A == C 3094 Y = A; 3095 Z = C; 3096 } 3097 return new ICmpInst(Pred, Y, Z); 3098 } 3099 3100 // icmp slt (X + -1), Y -> icmp sle X, Y 3101 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT && 3102 match(B, m_AllOnes())) 3103 return new ICmpInst(CmpInst::ICMP_SLE, A, Op1); 3104 3105 // icmp sge (X + -1), Y -> icmp sgt X, Y 3106 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE && 3107 match(B, m_AllOnes())) 3108 return new ICmpInst(CmpInst::ICMP_SGT, A, Op1); 3109 3110 // icmp sle (X + 1), Y -> icmp slt X, Y 3111 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE && match(B, m_One())) 3112 return new ICmpInst(CmpInst::ICMP_SLT, A, Op1); 3113 3114 // icmp sgt (X + 1), Y -> icmp sge X, Y 3115 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT && match(B, m_One())) 3116 return new ICmpInst(CmpInst::ICMP_SGE, A, Op1); 3117 3118 // icmp sgt X, (Y + -1) -> icmp sge X, Y 3119 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGT && 3120 match(D, m_AllOnes())) 3121 return new ICmpInst(CmpInst::ICMP_SGE, Op0, C); 3122 3123 // icmp sle X, (Y + -1) -> icmp slt X, Y 3124 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLE && 3125 match(D, m_AllOnes())) 3126 return new ICmpInst(CmpInst::ICMP_SLT, Op0, C); 3127 3128 // icmp sge X, (Y + 1) -> icmp sgt X, Y 3129 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGE && match(D, m_One())) 3130 return new ICmpInst(CmpInst::ICMP_SGT, Op0, C); 3131 3132 // icmp slt X, (Y + 1) -> icmp sle X, Y 3133 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLT && match(D, m_One())) 3134 return new ICmpInst(CmpInst::ICMP_SLE, Op0, C); 3135 3136 // TODO: The subtraction-related identities shown below also hold, but 3137 // canonicalization from (X -nuw 1) to (X + -1) means that the combinations 3138 // wouldn't happen even if they were implemented. 3139 // 3140 // icmp ult (X - 1), Y -> icmp ule X, Y 3141 // icmp uge (X - 1), Y -> icmp ugt X, Y 3142 // icmp ugt X, (Y - 1) -> icmp uge X, Y 3143 // icmp ule X, (Y - 1) -> icmp ult X, Y 3144 3145 // icmp ule (X + 1), Y -> icmp ult X, Y 3146 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_ULE && match(B, m_One())) 3147 return new ICmpInst(CmpInst::ICMP_ULT, A, Op1); 3148 3149 // icmp ugt (X + 1), Y -> icmp uge X, Y 3150 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_UGT && match(B, m_One())) 3151 return new ICmpInst(CmpInst::ICMP_UGE, A, Op1); 3152 3153 // icmp uge X, (Y + 1) -> icmp ugt X, Y 3154 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_UGE && match(D, m_One())) 3155 return new ICmpInst(CmpInst::ICMP_UGT, Op0, C); 3156 3157 // icmp ult X, (Y + 1) -> icmp ule X, Y 3158 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_ULT && match(D, m_One())) 3159 return new ICmpInst(CmpInst::ICMP_ULE, Op0, C); 3160 3161 // if C1 has greater magnitude than C2: 3162 // icmp (X + C1), (Y + C2) -> icmp (X + C3), Y 3163 // s.t. C3 = C1 - C2 3164 // 3165 // if C2 has greater magnitude than C1: 3166 // icmp (X + C1), (Y + C2) -> icmp X, (Y + C3) 3167 // s.t. C3 = C2 - C1 3168 if (A && C && NoOp0WrapProblem && NoOp1WrapProblem && 3169 (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned()) 3170 if (ConstantInt *C1 = dyn_cast<ConstantInt>(B)) 3171 if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) { 3172 const APInt &AP1 = C1->getValue(); 3173 const APInt &AP2 = C2->getValue(); 3174 if (AP1.isNegative() == AP2.isNegative()) { 3175 APInt AP1Abs = C1->getValue().abs(); 3176 APInt AP2Abs = C2->getValue().abs(); 3177 if (AP1Abs.uge(AP2Abs)) { 3178 ConstantInt *C3 = Builder.getInt(AP1 - AP2); 3179 Value *NewAdd = Builder.CreateNSWAdd(A, C3); 3180 return new ICmpInst(Pred, NewAdd, C); 3181 } else { 3182 ConstantInt *C3 = Builder.getInt(AP2 - AP1); 3183 Value *NewAdd = Builder.CreateNSWAdd(C, C3); 3184 return new ICmpInst(Pred, A, NewAdd); 3185 } 3186 } 3187 } 3188 3189 // Analyze the case when either Op0 or Op1 is a sub instruction. 3190 // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null). 3191 A = nullptr; 3192 B = nullptr; 3193 C = nullptr; 3194 D = nullptr; 3195 if (BO0 && BO0->getOpcode() == Instruction::Sub) { 3196 A = BO0->getOperand(0); 3197 B = BO0->getOperand(1); 3198 } 3199 if (BO1 && BO1->getOpcode() == Instruction::Sub) { 3200 C = BO1->getOperand(0); 3201 D = BO1->getOperand(1); 3202 } 3203 3204 // icmp (X-Y), X -> icmp 0, Y for equalities or if there is no overflow. 3205 if (A == Op1 && NoOp0WrapProblem) 3206 return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B); 3207 // icmp X, (X-Y) -> icmp Y, 0 for equalities or if there is no overflow. 3208 if (C == Op0 && NoOp1WrapProblem) 3209 return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType())); 3210 3211 // (A - B) >u A --> A <u B 3212 if (A == Op1 && Pred == ICmpInst::ICMP_UGT) 3213 return new ICmpInst(ICmpInst::ICMP_ULT, A, B); 3214 // C <u (C - D) --> C <u D 3215 if (C == Op0 && Pred == ICmpInst::ICMP_ULT) 3216 return new ICmpInst(ICmpInst::ICMP_ULT, C, D); 3217 3218 // icmp (Y-X), (Z-X) -> icmp Y, Z for equalities or if there is no overflow. 3219 if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem && 3220 // Try not to increase register pressure. 3221 BO0->hasOneUse() && BO1->hasOneUse()) 3222 return new ICmpInst(Pred, A, C); 3223 // icmp (X-Y), (X-Z) -> icmp Z, Y for equalities or if there is no overflow. 3224 if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem && 3225 // Try not to increase register pressure. 3226 BO0->hasOneUse() && BO1->hasOneUse()) 3227 return new ICmpInst(Pred, D, B); 3228 3229 // icmp (0-X) < cst --> x > -cst 3230 if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) { 3231 Value *X; 3232 if (match(BO0, m_Neg(m_Value(X)))) 3233 if (Constant *RHSC = dyn_cast<Constant>(Op1)) 3234 if (RHSC->isNotMinSignedValue()) 3235 return new ICmpInst(I.getSwappedPredicate(), X, 3236 ConstantExpr::getNeg(RHSC)); 3237 } 3238 3239 BinaryOperator *SRem = nullptr; 3240 // icmp (srem X, Y), Y 3241 if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1)) 3242 SRem = BO0; 3243 // icmp Y, (srem X, Y) 3244 else if (BO1 && BO1->getOpcode() == Instruction::SRem && 3245 Op0 == BO1->getOperand(1)) 3246 SRem = BO1; 3247 if (SRem) { 3248 // We don't check hasOneUse to avoid increasing register pressure because 3249 // the value we use is the same value this instruction was already using. 3250 switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) { 3251 default: 3252 break; 3253 case ICmpInst::ICMP_EQ: 3254 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 3255 case ICmpInst::ICMP_NE: 3256 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 3257 case ICmpInst::ICMP_SGT: 3258 case ICmpInst::ICMP_SGE: 3259 return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1), 3260 Constant::getAllOnesValue(SRem->getType())); 3261 case ICmpInst::ICMP_SLT: 3262 case ICmpInst::ICMP_SLE: 3263 return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1), 3264 Constant::getNullValue(SRem->getType())); 3265 } 3266 } 3267 3268 if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && BO0->hasOneUse() && 3269 BO1->hasOneUse() && BO0->getOperand(1) == BO1->getOperand(1)) { 3270 switch (BO0->getOpcode()) { 3271 default: 3272 break; 3273 case Instruction::Add: 3274 case Instruction::Sub: 3275 case Instruction::Xor: { 3276 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b 3277 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3278 3279 const APInt *C; 3280 if (match(BO0->getOperand(1), m_APInt(C))) { 3281 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b 3282 if (C->isSignMask()) { 3283 ICmpInst::Predicate NewPred = 3284 I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate(); 3285 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0)); 3286 } 3287 3288 // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b 3289 if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) { 3290 ICmpInst::Predicate NewPred = 3291 I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate(); 3292 NewPred = I.getSwappedPredicate(NewPred); 3293 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0)); 3294 } 3295 } 3296 break; 3297 } 3298 case Instruction::Mul: { 3299 if (!I.isEquality()) 3300 break; 3301 3302 const APInt *C; 3303 if (match(BO0->getOperand(1), m_APInt(C)) && !C->isNullValue() && 3304 !C->isOneValue()) { 3305 // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask) 3306 // Mask = -1 >> count-trailing-zeros(C). 3307 if (unsigned TZs = C->countTrailingZeros()) { 3308 Constant *Mask = ConstantInt::get( 3309 BO0->getType(), 3310 APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs)); 3311 Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask); 3312 Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask); 3313 return new ICmpInst(Pred, And1, And2); 3314 } 3315 // If there are no trailing zeros in the multiplier, just eliminate 3316 // the multiplies (no masking is needed): 3317 // icmp eq/ne (X * C), (Y * C) --> icmp eq/ne X, Y 3318 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3319 } 3320 break; 3321 } 3322 case Instruction::UDiv: 3323 case Instruction::LShr: 3324 if (I.isSigned() || !BO0->isExact() || !BO1->isExact()) 3325 break; 3326 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3327 3328 case Instruction::SDiv: 3329 if (!I.isEquality() || !BO0->isExact() || !BO1->isExact()) 3330 break; 3331 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3332 3333 case Instruction::AShr: 3334 if (!BO0->isExact() || !BO1->isExact()) 3335 break; 3336 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3337 3338 case Instruction::Shl: { 3339 bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap(); 3340 bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap(); 3341 if (!NUW && !NSW) 3342 break; 3343 if (!NSW && I.isSigned()) 3344 break; 3345 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3346 } 3347 } 3348 } 3349 3350 if (BO0) { 3351 // Transform A & (L - 1) `ult` L --> L != 0 3352 auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes()); 3353 auto BitwiseAnd = m_c_And(m_Value(), LSubOne); 3354 3355 if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) { 3356 auto *Zero = Constant::getNullValue(BO0->getType()); 3357 return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero); 3358 } 3359 } 3360 3361 if (Value *V = foldICmpWithLowBitMaskedVal(I, Builder)) 3362 return replaceInstUsesWith(I, V); 3363 3364 if (Value *V = foldICmpWithTruncSignExtendedVal(I, Builder)) 3365 return replaceInstUsesWith(I, V); 3366 3367 return nullptr; 3368 } 3369 3370 /// Fold icmp Pred min|max(X, Y), X. 3371 static Instruction *foldICmpWithMinMax(ICmpInst &Cmp) { 3372 ICmpInst::Predicate Pred = Cmp.getPredicate(); 3373 Value *Op0 = Cmp.getOperand(0); 3374 Value *X = Cmp.getOperand(1); 3375 3376 // Canonicalize minimum or maximum operand to LHS of the icmp. 3377 if (match(X, m_c_SMin(m_Specific(Op0), m_Value())) || 3378 match(X, m_c_SMax(m_Specific(Op0), m_Value())) || 3379 match(X, m_c_UMin(m_Specific(Op0), m_Value())) || 3380 match(X, m_c_UMax(m_Specific(Op0), m_Value()))) { 3381 std::swap(Op0, X); 3382 Pred = Cmp.getSwappedPredicate(); 3383 } 3384 3385 Value *Y; 3386 if (match(Op0, m_c_SMin(m_Specific(X), m_Value(Y)))) { 3387 // smin(X, Y) == X --> X s<= Y 3388 // smin(X, Y) s>= X --> X s<= Y 3389 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SGE) 3390 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y); 3391 3392 // smin(X, Y) != X --> X s> Y 3393 // smin(X, Y) s< X --> X s> Y 3394 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SLT) 3395 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y); 3396 3397 // These cases should be handled in InstSimplify: 3398 // smin(X, Y) s<= X --> true 3399 // smin(X, Y) s> X --> false 3400 return nullptr; 3401 } 3402 3403 if (match(Op0, m_c_SMax(m_Specific(X), m_Value(Y)))) { 3404 // smax(X, Y) == X --> X s>= Y 3405 // smax(X, Y) s<= X --> X s>= Y 3406 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLE) 3407 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y); 3408 3409 // smax(X, Y) != X --> X s< Y 3410 // smax(X, Y) s> X --> X s< Y 3411 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SGT) 3412 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y); 3413 3414 // These cases should be handled in InstSimplify: 3415 // smax(X, Y) s>= X --> true 3416 // smax(X, Y) s< X --> false 3417 return nullptr; 3418 } 3419 3420 if (match(Op0, m_c_UMin(m_Specific(X), m_Value(Y)))) { 3421 // umin(X, Y) == X --> X u<= Y 3422 // umin(X, Y) u>= X --> X u<= Y 3423 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_UGE) 3424 return new ICmpInst(ICmpInst::ICMP_ULE, X, Y); 3425 3426 // umin(X, Y) != X --> X u> Y 3427 // umin(X, Y) u< X --> X u> Y 3428 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT) 3429 return new ICmpInst(ICmpInst::ICMP_UGT, X, Y); 3430 3431 // These cases should be handled in InstSimplify: 3432 // umin(X, Y) u<= X --> true 3433 // umin(X, Y) u> X --> false 3434 return nullptr; 3435 } 3436 3437 if (match(Op0, m_c_UMax(m_Specific(X), m_Value(Y)))) { 3438 // umax(X, Y) == X --> X u>= Y 3439 // umax(X, Y) u<= X --> X u>= Y 3440 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_ULE) 3441 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y); 3442 3443 // umax(X, Y) != X --> X u< Y 3444 // umax(X, Y) u> X --> X u< Y 3445 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_UGT) 3446 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y); 3447 3448 // These cases should be handled in InstSimplify: 3449 // umax(X, Y) u>= X --> true 3450 // umax(X, Y) u< X --> false 3451 return nullptr; 3452 } 3453 3454 return nullptr; 3455 } 3456 3457 Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) { 3458 if (!I.isEquality()) 3459 return nullptr; 3460 3461 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 3462 const CmpInst::Predicate Pred = I.getPredicate(); 3463 Value *A, *B, *C, *D; 3464 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) { 3465 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0 3466 Value *OtherVal = A == Op1 ? B : A; 3467 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType())); 3468 } 3469 3470 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) { 3471 // A^c1 == C^c2 --> A == C^(c1^c2) 3472 ConstantInt *C1, *C2; 3473 if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) && 3474 Op1->hasOneUse()) { 3475 Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue()); 3476 Value *Xor = Builder.CreateXor(C, NC); 3477 return new ICmpInst(Pred, A, Xor); 3478 } 3479 3480 // A^B == A^D -> B == D 3481 if (A == C) 3482 return new ICmpInst(Pred, B, D); 3483 if (A == D) 3484 return new ICmpInst(Pred, B, C); 3485 if (B == C) 3486 return new ICmpInst(Pred, A, D); 3487 if (B == D) 3488 return new ICmpInst(Pred, A, C); 3489 } 3490 } 3491 3492 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) { 3493 // A == (A^B) -> B == 0 3494 Value *OtherVal = A == Op0 ? B : A; 3495 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType())); 3496 } 3497 3498 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0 3499 if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) && 3500 match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) { 3501 Value *X = nullptr, *Y = nullptr, *Z = nullptr; 3502 3503 if (A == C) { 3504 X = B; 3505 Y = D; 3506 Z = A; 3507 } else if (A == D) { 3508 X = B; 3509 Y = C; 3510 Z = A; 3511 } else if (B == C) { 3512 X = A; 3513 Y = D; 3514 Z = B; 3515 } else if (B == D) { 3516 X = A; 3517 Y = C; 3518 Z = B; 3519 } 3520 3521 if (X) { // Build (X^Y) & Z 3522 Op1 = Builder.CreateXor(X, Y); 3523 Op1 = Builder.CreateAnd(Op1, Z); 3524 I.setOperand(0, Op1); 3525 I.setOperand(1, Constant::getNullValue(Op1->getType())); 3526 return &I; 3527 } 3528 } 3529 3530 // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B) 3531 // and (B & (1<<X)-1) == (zext A) --> A == (trunc B) 3532 ConstantInt *Cst1; 3533 if ((Op0->hasOneUse() && match(Op0, m_ZExt(m_Value(A))) && 3534 match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) || 3535 (Op1->hasOneUse() && match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) && 3536 match(Op1, m_ZExt(m_Value(A))))) { 3537 APInt Pow2 = Cst1->getValue() + 1; 3538 if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) && 3539 Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth()) 3540 return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType())); 3541 } 3542 3543 // (A >> C) == (B >> C) --> (A^B) u< (1 << C) 3544 // For lshr and ashr pairs. 3545 if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_ConstantInt(Cst1)))) && 3546 match(Op1, m_OneUse(m_LShr(m_Value(B), m_Specific(Cst1))))) || 3547 (match(Op0, m_OneUse(m_AShr(m_Value(A), m_ConstantInt(Cst1)))) && 3548 match(Op1, m_OneUse(m_AShr(m_Value(B), m_Specific(Cst1)))))) { 3549 unsigned TypeBits = Cst1->getBitWidth(); 3550 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits); 3551 if (ShAmt < TypeBits && ShAmt != 0) { 3552 ICmpInst::Predicate NewPred = 3553 Pred == ICmpInst::ICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT; 3554 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted"); 3555 APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt); 3556 return new ICmpInst(NewPred, Xor, Builder.getInt(CmpVal)); 3557 } 3558 } 3559 3560 // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0 3561 if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) && 3562 match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) { 3563 unsigned TypeBits = Cst1->getBitWidth(); 3564 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits); 3565 if (ShAmt < TypeBits && ShAmt != 0) { 3566 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted"); 3567 APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt); 3568 Value *And = Builder.CreateAnd(Xor, Builder.getInt(AndVal), 3569 I.getName() + ".mask"); 3570 return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType())); 3571 } 3572 } 3573 3574 // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to 3575 // "icmp (and X, mask), cst" 3576 uint64_t ShAmt = 0; 3577 if (Op0->hasOneUse() && 3578 match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) && 3579 match(Op1, m_ConstantInt(Cst1)) && 3580 // Only do this when A has multiple uses. This is most important to do 3581 // when it exposes other optimizations. 3582 !A->hasOneUse()) { 3583 unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits(); 3584 3585 if (ShAmt < ASize) { 3586 APInt MaskV = 3587 APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits()); 3588 MaskV <<= ShAmt; 3589 3590 APInt CmpV = Cst1->getValue().zext(ASize); 3591 CmpV <<= ShAmt; 3592 3593 Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV)); 3594 return new ICmpInst(Pred, Mask, Builder.getInt(CmpV)); 3595 } 3596 } 3597 3598 // If both operands are byte-swapped or bit-reversed, just compare the 3599 // original values. 3600 // TODO: Move this to a function similar to foldICmpIntrinsicWithConstant() 3601 // and handle more intrinsics. 3602 if ((match(Op0, m_BSwap(m_Value(A))) && match(Op1, m_BSwap(m_Value(B)))) || 3603 (match(Op0, m_BitReverse(m_Value(A))) && 3604 match(Op1, m_BitReverse(m_Value(B))))) 3605 return new ICmpInst(Pred, A, B); 3606 3607 return nullptr; 3608 } 3609 3610 /// Handle icmp (cast x to y), (cast/cst). We only handle extending casts so 3611 /// far. 3612 Instruction *InstCombiner::foldICmpWithCastAndCast(ICmpInst &ICmp) { 3613 const CastInst *LHSCI = cast<CastInst>(ICmp.getOperand(0)); 3614 Value *LHSCIOp = LHSCI->getOperand(0); 3615 Type *SrcTy = LHSCIOp->getType(); 3616 Type *DestTy = LHSCI->getType(); 3617 Value *RHSCIOp; 3618 3619 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the 3620 // integer type is the same size as the pointer type. 3621 const auto& CompatibleSizes = [&](Type* SrcTy, Type* DestTy) -> bool { 3622 if (isa<VectorType>(SrcTy)) { 3623 SrcTy = cast<VectorType>(SrcTy)->getElementType(); 3624 DestTy = cast<VectorType>(DestTy)->getElementType(); 3625 } 3626 return DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth(); 3627 }; 3628 if (LHSCI->getOpcode() == Instruction::PtrToInt && 3629 CompatibleSizes(SrcTy, DestTy)) { 3630 Value *RHSOp = nullptr; 3631 if (auto *RHSC = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) { 3632 Value *RHSCIOp = RHSC->getOperand(0); 3633 if (RHSCIOp->getType()->getPointerAddressSpace() == 3634 LHSCIOp->getType()->getPointerAddressSpace()) { 3635 RHSOp = RHSC->getOperand(0); 3636 // If the pointer types don't match, insert a bitcast. 3637 if (LHSCIOp->getType() != RHSOp->getType()) 3638 RHSOp = Builder.CreateBitCast(RHSOp, LHSCIOp->getType()); 3639 } 3640 } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) { 3641 RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy); 3642 } 3643 3644 if (RHSOp) 3645 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSOp); 3646 } 3647 3648 // The code below only handles extension cast instructions, so far. 3649 // Enforce this. 3650 if (LHSCI->getOpcode() != Instruction::ZExt && 3651 LHSCI->getOpcode() != Instruction::SExt) 3652 return nullptr; 3653 3654 bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt; 3655 bool isSignedCmp = ICmp.isSigned(); 3656 3657 if (auto *CI = dyn_cast<CastInst>(ICmp.getOperand(1))) { 3658 // Not an extension from the same type? 3659 RHSCIOp = CI->getOperand(0); 3660 if (RHSCIOp->getType() != LHSCIOp->getType()) 3661 return nullptr; 3662 3663 // If the signedness of the two casts doesn't agree (i.e. one is a sext 3664 // and the other is a zext), then we can't handle this. 3665 if (CI->getOpcode() != LHSCI->getOpcode()) 3666 return nullptr; 3667 3668 // Deal with equality cases early. 3669 if (ICmp.isEquality()) 3670 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSCIOp); 3671 3672 // A signed comparison of sign extended values simplifies into a 3673 // signed comparison. 3674 if (isSignedCmp && isSignedExt) 3675 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSCIOp); 3676 3677 // The other three cases all fold into an unsigned comparison. 3678 return new ICmpInst(ICmp.getUnsignedPredicate(), LHSCIOp, RHSCIOp); 3679 } 3680 3681 // If we aren't dealing with a constant on the RHS, exit early. 3682 auto *C = dyn_cast<Constant>(ICmp.getOperand(1)); 3683 if (!C) 3684 return nullptr; 3685 3686 // Compute the constant that would happen if we truncated to SrcTy then 3687 // re-extended to DestTy. 3688 Constant *Res1 = ConstantExpr::getTrunc(C, SrcTy); 3689 Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(), Res1, DestTy); 3690 3691 // If the re-extended constant didn't change... 3692 if (Res2 == C) { 3693 // Deal with equality cases early. 3694 if (ICmp.isEquality()) 3695 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, Res1); 3696 3697 // A signed comparison of sign extended values simplifies into a 3698 // signed comparison. 3699 if (isSignedExt && isSignedCmp) 3700 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, Res1); 3701 3702 // The other three cases all fold into an unsigned comparison. 3703 return new ICmpInst(ICmp.getUnsignedPredicate(), LHSCIOp, Res1); 3704 } 3705 3706 // The re-extended constant changed, partly changed (in the case of a vector), 3707 // or could not be determined to be equal (in the case of a constant 3708 // expression), so the constant cannot be represented in the shorter type. 3709 // Consequently, we cannot emit a simple comparison. 3710 // All the cases that fold to true or false will have already been handled 3711 // by SimplifyICmpInst, so only deal with the tricky case. 3712 3713 if (isSignedCmp || !isSignedExt || !isa<ConstantInt>(C)) 3714 return nullptr; 3715 3716 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases 3717 // should have been folded away previously and not enter in here. 3718 3719 // We're performing an unsigned comp with a sign extended value. 3720 // This is true if the input is >= 0. [aka >s -1] 3721 Constant *NegOne = Constant::getAllOnesValue(SrcTy); 3722 Value *Result = Builder.CreateICmpSGT(LHSCIOp, NegOne, ICmp.getName()); 3723 3724 // Finally, return the value computed. 3725 if (ICmp.getPredicate() == ICmpInst::ICMP_ULT) 3726 return replaceInstUsesWith(ICmp, Result); 3727 3728 assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!"); 3729 return BinaryOperator::CreateNot(Result); 3730 } 3731 3732 bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS, 3733 Value *RHS, Instruction &OrigI, 3734 Value *&Result, Constant *&Overflow) { 3735 if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS)) 3736 std::swap(LHS, RHS); 3737 3738 auto SetResult = [&](Value *OpResult, Constant *OverflowVal, bool ReuseName) { 3739 Result = OpResult; 3740 Overflow = OverflowVal; 3741 if (ReuseName) 3742 Result->takeName(&OrigI); 3743 return true; 3744 }; 3745 3746 // If the overflow check was an add followed by a compare, the insertion point 3747 // may be pointing to the compare. We want to insert the new instructions 3748 // before the add in case there are uses of the add between the add and the 3749 // compare. 3750 Builder.SetInsertPoint(&OrigI); 3751 3752 switch (OCF) { 3753 case OCF_INVALID: 3754 llvm_unreachable("bad overflow check kind!"); 3755 3756 case OCF_UNSIGNED_ADD: { 3757 OverflowResult OR = computeOverflowForUnsignedAdd(LHS, RHS, &OrigI); 3758 if (OR == OverflowResult::NeverOverflows) 3759 return SetResult(Builder.CreateNUWAdd(LHS, RHS), Builder.getFalse(), 3760 true); 3761 3762 if (OR == OverflowResult::AlwaysOverflows) 3763 return SetResult(Builder.CreateAdd(LHS, RHS), Builder.getTrue(), true); 3764 3765 // Fall through uadd into sadd 3766 LLVM_FALLTHROUGH; 3767 } 3768 case OCF_SIGNED_ADD: { 3769 // X + 0 -> {X, false} 3770 if (match(RHS, m_Zero())) 3771 return SetResult(LHS, Builder.getFalse(), false); 3772 3773 // We can strength reduce this signed add into a regular add if we can prove 3774 // that it will never overflow. 3775 if (OCF == OCF_SIGNED_ADD) 3776 if (willNotOverflowSignedAdd(LHS, RHS, OrigI)) 3777 return SetResult(Builder.CreateNSWAdd(LHS, RHS), Builder.getFalse(), 3778 true); 3779 break; 3780 } 3781 3782 case OCF_UNSIGNED_SUB: 3783 case OCF_SIGNED_SUB: { 3784 // X - 0 -> {X, false} 3785 if (match(RHS, m_Zero())) 3786 return SetResult(LHS, Builder.getFalse(), false); 3787 3788 if (OCF == OCF_SIGNED_SUB) { 3789 if (willNotOverflowSignedSub(LHS, RHS, OrigI)) 3790 return SetResult(Builder.CreateNSWSub(LHS, RHS), Builder.getFalse(), 3791 true); 3792 } else { 3793 if (willNotOverflowUnsignedSub(LHS, RHS, OrigI)) 3794 return SetResult(Builder.CreateNUWSub(LHS, RHS), Builder.getFalse(), 3795 true); 3796 } 3797 break; 3798 } 3799 3800 case OCF_UNSIGNED_MUL: { 3801 OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, &OrigI); 3802 if (OR == OverflowResult::NeverOverflows) 3803 return SetResult(Builder.CreateNUWMul(LHS, RHS), Builder.getFalse(), 3804 true); 3805 if (OR == OverflowResult::AlwaysOverflows) 3806 return SetResult(Builder.CreateMul(LHS, RHS), Builder.getTrue(), true); 3807 LLVM_FALLTHROUGH; 3808 } 3809 case OCF_SIGNED_MUL: 3810 // X * undef -> undef 3811 if (isa<UndefValue>(RHS)) 3812 return SetResult(RHS, UndefValue::get(Builder.getInt1Ty()), false); 3813 3814 // X * 0 -> {0, false} 3815 if (match(RHS, m_Zero())) 3816 return SetResult(RHS, Builder.getFalse(), false); 3817 3818 // X * 1 -> {X, false} 3819 if (match(RHS, m_One())) 3820 return SetResult(LHS, Builder.getFalse(), false); 3821 3822 if (OCF == OCF_SIGNED_MUL) 3823 if (willNotOverflowSignedMul(LHS, RHS, OrigI)) 3824 return SetResult(Builder.CreateNSWMul(LHS, RHS), Builder.getFalse(), 3825 true); 3826 break; 3827 } 3828 3829 return false; 3830 } 3831 3832 /// Recognize and process idiom involving test for multiplication 3833 /// overflow. 3834 /// 3835 /// The caller has matched a pattern of the form: 3836 /// I = cmp u (mul(zext A, zext B), V 3837 /// The function checks if this is a test for overflow and if so replaces 3838 /// multiplication with call to 'mul.with.overflow' intrinsic. 3839 /// 3840 /// \param I Compare instruction. 3841 /// \param MulVal Result of 'mult' instruction. It is one of the arguments of 3842 /// the compare instruction. Must be of integer type. 3843 /// \param OtherVal The other argument of compare instruction. 3844 /// \returns Instruction which must replace the compare instruction, NULL if no 3845 /// replacement required. 3846 static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal, 3847 Value *OtherVal, InstCombiner &IC) { 3848 // Don't bother doing this transformation for pointers, don't do it for 3849 // vectors. 3850 if (!isa<IntegerType>(MulVal->getType())) 3851 return nullptr; 3852 3853 assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal); 3854 assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal); 3855 auto *MulInstr = dyn_cast<Instruction>(MulVal); 3856 if (!MulInstr) 3857 return nullptr; 3858 assert(MulInstr->getOpcode() == Instruction::Mul); 3859 3860 auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)), 3861 *RHS = cast<ZExtOperator>(MulInstr->getOperand(1)); 3862 assert(LHS->getOpcode() == Instruction::ZExt); 3863 assert(RHS->getOpcode() == Instruction::ZExt); 3864 Value *A = LHS->getOperand(0), *B = RHS->getOperand(0); 3865 3866 // Calculate type and width of the result produced by mul.with.overflow. 3867 Type *TyA = A->getType(), *TyB = B->getType(); 3868 unsigned WidthA = TyA->getPrimitiveSizeInBits(), 3869 WidthB = TyB->getPrimitiveSizeInBits(); 3870 unsigned MulWidth; 3871 Type *MulType; 3872 if (WidthB > WidthA) { 3873 MulWidth = WidthB; 3874 MulType = TyB; 3875 } else { 3876 MulWidth = WidthA; 3877 MulType = TyA; 3878 } 3879 3880 // In order to replace the original mul with a narrower mul.with.overflow, 3881 // all uses must ignore upper bits of the product. The number of used low 3882 // bits must be not greater than the width of mul.with.overflow. 3883 if (MulVal->hasNUsesOrMore(2)) 3884 for (User *U : MulVal->users()) { 3885 if (U == &I) 3886 continue; 3887 if (TruncInst *TI = dyn_cast<TruncInst>(U)) { 3888 // Check if truncation ignores bits above MulWidth. 3889 unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits(); 3890 if (TruncWidth > MulWidth) 3891 return nullptr; 3892 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) { 3893 // Check if AND ignores bits above MulWidth. 3894 if (BO->getOpcode() != Instruction::And) 3895 return nullptr; 3896 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) { 3897 const APInt &CVal = CI->getValue(); 3898 if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth) 3899 return nullptr; 3900 } else { 3901 // In this case we could have the operand of the binary operation 3902 // being defined in another block, and performing the replacement 3903 // could break the dominance relation. 3904 return nullptr; 3905 } 3906 } else { 3907 // Other uses prohibit this transformation. 3908 return nullptr; 3909 } 3910 } 3911 3912 // Recognize patterns 3913 switch (I.getPredicate()) { 3914 case ICmpInst::ICMP_EQ: 3915 case ICmpInst::ICMP_NE: 3916 // Recognize pattern: 3917 // mulval = mul(zext A, zext B) 3918 // cmp eq/neq mulval, zext trunc mulval 3919 if (ZExtInst *Zext = dyn_cast<ZExtInst>(OtherVal)) 3920 if (Zext->hasOneUse()) { 3921 Value *ZextArg = Zext->getOperand(0); 3922 if (TruncInst *Trunc = dyn_cast<TruncInst>(ZextArg)) 3923 if (Trunc->getType()->getPrimitiveSizeInBits() == MulWidth) 3924 break; //Recognized 3925 } 3926 3927 // Recognize pattern: 3928 // mulval = mul(zext A, zext B) 3929 // cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits. 3930 ConstantInt *CI; 3931 Value *ValToMask; 3932 if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) { 3933 if (ValToMask != MulVal) 3934 return nullptr; 3935 const APInt &CVal = CI->getValue() + 1; 3936 if (CVal.isPowerOf2()) { 3937 unsigned MaskWidth = CVal.logBase2(); 3938 if (MaskWidth == MulWidth) 3939 break; // Recognized 3940 } 3941 } 3942 return nullptr; 3943 3944 case ICmpInst::ICMP_UGT: 3945 // Recognize pattern: 3946 // mulval = mul(zext A, zext B) 3947 // cmp ugt mulval, max 3948 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 3949 APInt MaxVal = APInt::getMaxValue(MulWidth); 3950 MaxVal = MaxVal.zext(CI->getBitWidth()); 3951 if (MaxVal.eq(CI->getValue())) 3952 break; // Recognized 3953 } 3954 return nullptr; 3955 3956 case ICmpInst::ICMP_UGE: 3957 // Recognize pattern: 3958 // mulval = mul(zext A, zext B) 3959 // cmp uge mulval, max+1 3960 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 3961 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth); 3962 if (MaxVal.eq(CI->getValue())) 3963 break; // Recognized 3964 } 3965 return nullptr; 3966 3967 case ICmpInst::ICMP_ULE: 3968 // Recognize pattern: 3969 // mulval = mul(zext A, zext B) 3970 // cmp ule mulval, max 3971 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 3972 APInt MaxVal = APInt::getMaxValue(MulWidth); 3973 MaxVal = MaxVal.zext(CI->getBitWidth()); 3974 if (MaxVal.eq(CI->getValue())) 3975 break; // Recognized 3976 } 3977 return nullptr; 3978 3979 case ICmpInst::ICMP_ULT: 3980 // Recognize pattern: 3981 // mulval = mul(zext A, zext B) 3982 // cmp ule mulval, max + 1 3983 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 3984 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth); 3985 if (MaxVal.eq(CI->getValue())) 3986 break; // Recognized 3987 } 3988 return nullptr; 3989 3990 default: 3991 return nullptr; 3992 } 3993 3994 InstCombiner::BuilderTy &Builder = IC.Builder; 3995 Builder.SetInsertPoint(MulInstr); 3996 3997 // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B) 3998 Value *MulA = A, *MulB = B; 3999 if (WidthA < MulWidth) 4000 MulA = Builder.CreateZExt(A, MulType); 4001 if (WidthB < MulWidth) 4002 MulB = Builder.CreateZExt(B, MulType); 4003 Value *F = Intrinsic::getDeclaration(I.getModule(), 4004 Intrinsic::umul_with_overflow, MulType); 4005 CallInst *Call = Builder.CreateCall(F, {MulA, MulB}, "umul"); 4006 IC.Worklist.Add(MulInstr); 4007 4008 // If there are uses of mul result other than the comparison, we know that 4009 // they are truncation or binary AND. Change them to use result of 4010 // mul.with.overflow and adjust properly mask/size. 4011 if (MulVal->hasNUsesOrMore(2)) { 4012 Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value"); 4013 for (auto UI = MulVal->user_begin(), UE = MulVal->user_end(); UI != UE;) { 4014 User *U = *UI++; 4015 if (U == &I || U == OtherVal) 4016 continue; 4017 if (TruncInst *TI = dyn_cast<TruncInst>(U)) { 4018 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth) 4019 IC.replaceInstUsesWith(*TI, Mul); 4020 else 4021 TI->setOperand(0, Mul); 4022 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) { 4023 assert(BO->getOpcode() == Instruction::And); 4024 // Replace (mul & mask) --> zext (mul.with.overflow & short_mask) 4025 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1)); 4026 APInt ShortMask = CI->getValue().trunc(MulWidth); 4027 Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask); 4028 Instruction *Zext = 4029 cast<Instruction>(Builder.CreateZExt(ShortAnd, BO->getType())); 4030 IC.Worklist.Add(Zext); 4031 IC.replaceInstUsesWith(*BO, Zext); 4032 } else { 4033 llvm_unreachable("Unexpected Binary operation"); 4034 } 4035 IC.Worklist.Add(cast<Instruction>(U)); 4036 } 4037 } 4038 if (isa<Instruction>(OtherVal)) 4039 IC.Worklist.Add(cast<Instruction>(OtherVal)); 4040 4041 // The original icmp gets replaced with the overflow value, maybe inverted 4042 // depending on predicate. 4043 bool Inverse = false; 4044 switch (I.getPredicate()) { 4045 case ICmpInst::ICMP_NE: 4046 break; 4047 case ICmpInst::ICMP_EQ: 4048 Inverse = true; 4049 break; 4050 case ICmpInst::ICMP_UGT: 4051 case ICmpInst::ICMP_UGE: 4052 if (I.getOperand(0) == MulVal) 4053 break; 4054 Inverse = true; 4055 break; 4056 case ICmpInst::ICMP_ULT: 4057 case ICmpInst::ICMP_ULE: 4058 if (I.getOperand(1) == MulVal) 4059 break; 4060 Inverse = true; 4061 break; 4062 default: 4063 llvm_unreachable("Unexpected predicate"); 4064 } 4065 if (Inverse) { 4066 Value *Res = Builder.CreateExtractValue(Call, 1); 4067 return BinaryOperator::CreateNot(Res); 4068 } 4069 4070 return ExtractValueInst::Create(Call, 1); 4071 } 4072 4073 /// When performing a comparison against a constant, it is possible that not all 4074 /// the bits in the LHS are demanded. This helper method computes the mask that 4075 /// IS demanded. 4076 static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth) { 4077 const APInt *RHS; 4078 if (!match(I.getOperand(1), m_APInt(RHS))) 4079 return APInt::getAllOnesValue(BitWidth); 4080 4081 // If this is a normal comparison, it demands all bits. If it is a sign bit 4082 // comparison, it only demands the sign bit. 4083 bool UnusedBit; 4084 if (isSignBitCheck(I.getPredicate(), *RHS, UnusedBit)) 4085 return APInt::getSignMask(BitWidth); 4086 4087 switch (I.getPredicate()) { 4088 // For a UGT comparison, we don't care about any bits that 4089 // correspond to the trailing ones of the comparand. The value of these 4090 // bits doesn't impact the outcome of the comparison, because any value 4091 // greater than the RHS must differ in a bit higher than these due to carry. 4092 case ICmpInst::ICMP_UGT: 4093 return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingOnes()); 4094 4095 // Similarly, for a ULT comparison, we don't care about the trailing zeros. 4096 // Any value less than the RHS must differ in a higher bit because of carries. 4097 case ICmpInst::ICMP_ULT: 4098 return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingZeros()); 4099 4100 default: 4101 return APInt::getAllOnesValue(BitWidth); 4102 } 4103 } 4104 4105 /// Check if the order of \p Op0 and \p Op1 as operands in an ICmpInst 4106 /// should be swapped. 4107 /// The decision is based on how many times these two operands are reused 4108 /// as subtract operands and their positions in those instructions. 4109 /// The rationale is that several architectures use the same instruction for 4110 /// both subtract and cmp. Thus, it is better if the order of those operands 4111 /// match. 4112 /// \return true if Op0 and Op1 should be swapped. 4113 static bool swapMayExposeCSEOpportunities(const Value *Op0, const Value *Op1) { 4114 // Filter out pointer values as those cannot appear directly in subtract. 4115 // FIXME: we may want to go through inttoptrs or bitcasts. 4116 if (Op0->getType()->isPointerTy()) 4117 return false; 4118 // If a subtract already has the same operands as a compare, swapping would be 4119 // bad. If a subtract has the same operands as a compare but in reverse order, 4120 // then swapping is good. 4121 int GoodToSwap = 0; 4122 for (const User *U : Op0->users()) { 4123 if (match(U, m_Sub(m_Specific(Op1), m_Specific(Op0)))) 4124 GoodToSwap++; 4125 else if (match(U, m_Sub(m_Specific(Op0), m_Specific(Op1)))) 4126 GoodToSwap--; 4127 } 4128 return GoodToSwap > 0; 4129 } 4130 4131 /// Check that one use is in the same block as the definition and all 4132 /// other uses are in blocks dominated by a given block. 4133 /// 4134 /// \param DI Definition 4135 /// \param UI Use 4136 /// \param DB Block that must dominate all uses of \p DI outside 4137 /// the parent block 4138 /// \return true when \p UI is the only use of \p DI in the parent block 4139 /// and all other uses of \p DI are in blocks dominated by \p DB. 4140 /// 4141 bool InstCombiner::dominatesAllUses(const Instruction *DI, 4142 const Instruction *UI, 4143 const BasicBlock *DB) const { 4144 assert(DI && UI && "Instruction not defined\n"); 4145 // Ignore incomplete definitions. 4146 if (!DI->getParent()) 4147 return false; 4148 // DI and UI must be in the same block. 4149 if (DI->getParent() != UI->getParent()) 4150 return false; 4151 // Protect from self-referencing blocks. 4152 if (DI->getParent() == DB) 4153 return false; 4154 for (const User *U : DI->users()) { 4155 auto *Usr = cast<Instruction>(U); 4156 if (Usr != UI && !DT.dominates(DB, Usr->getParent())) 4157 return false; 4158 } 4159 return true; 4160 } 4161 4162 /// Return true when the instruction sequence within a block is select-cmp-br. 4163 static bool isChainSelectCmpBranch(const SelectInst *SI) { 4164 const BasicBlock *BB = SI->getParent(); 4165 if (!BB) 4166 return false; 4167 auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator()); 4168 if (!BI || BI->getNumSuccessors() != 2) 4169 return false; 4170 auto *IC = dyn_cast<ICmpInst>(BI->getCondition()); 4171 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI)) 4172 return false; 4173 return true; 4174 } 4175 4176 /// True when a select result is replaced by one of its operands 4177 /// in select-icmp sequence. This will eventually result in the elimination 4178 /// of the select. 4179 /// 4180 /// \param SI Select instruction 4181 /// \param Icmp Compare instruction 4182 /// \param SIOpd Operand that replaces the select 4183 /// 4184 /// Notes: 4185 /// - The replacement is global and requires dominator information 4186 /// - The caller is responsible for the actual replacement 4187 /// 4188 /// Example: 4189 /// 4190 /// entry: 4191 /// %4 = select i1 %3, %C* %0, %C* null 4192 /// %5 = icmp eq %C* %4, null 4193 /// br i1 %5, label %9, label %7 4194 /// ... 4195 /// ; <label>:7 ; preds = %entry 4196 /// %8 = getelementptr inbounds %C* %4, i64 0, i32 0 4197 /// ... 4198 /// 4199 /// can be transformed to 4200 /// 4201 /// %5 = icmp eq %C* %0, null 4202 /// %6 = select i1 %3, i1 %5, i1 true 4203 /// br i1 %6, label %9, label %7 4204 /// ... 4205 /// ; <label>:7 ; preds = %entry 4206 /// %8 = getelementptr inbounds %C* %0, i64 0, i32 0 // replace by %0! 4207 /// 4208 /// Similar when the first operand of the select is a constant or/and 4209 /// the compare is for not equal rather than equal. 4210 /// 4211 /// NOTE: The function is only called when the select and compare constants 4212 /// are equal, the optimization can work only for EQ predicates. This is not a 4213 /// major restriction since a NE compare should be 'normalized' to an equal 4214 /// compare, which usually happens in the combiner and test case 4215 /// select-cmp-br.ll checks for it. 4216 bool InstCombiner::replacedSelectWithOperand(SelectInst *SI, 4217 const ICmpInst *Icmp, 4218 const unsigned SIOpd) { 4219 assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!"); 4220 if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) { 4221 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1); 4222 // The check for the single predecessor is not the best that can be 4223 // done. But it protects efficiently against cases like when SI's 4224 // home block has two successors, Succ and Succ1, and Succ1 predecessor 4225 // of Succ. Then SI can't be replaced by SIOpd because the use that gets 4226 // replaced can be reached on either path. So the uniqueness check 4227 // guarantees that the path all uses of SI (outside SI's parent) are on 4228 // is disjoint from all other paths out of SI. But that information 4229 // is more expensive to compute, and the trade-off here is in favor 4230 // of compile-time. It should also be noticed that we check for a single 4231 // predecessor and not only uniqueness. This to handle the situation when 4232 // Succ and Succ1 points to the same basic block. 4233 if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) { 4234 NumSel++; 4235 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent()); 4236 return true; 4237 } 4238 } 4239 return false; 4240 } 4241 4242 /// Try to fold the comparison based on range information we can get by checking 4243 /// whether bits are known to be zero or one in the inputs. 4244 Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) { 4245 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 4246 Type *Ty = Op0->getType(); 4247 ICmpInst::Predicate Pred = I.getPredicate(); 4248 4249 // Get scalar or pointer size. 4250 unsigned BitWidth = Ty->isIntOrIntVectorTy() 4251 ? Ty->getScalarSizeInBits() 4252 : DL.getIndexTypeSizeInBits(Ty->getScalarType()); 4253 4254 if (!BitWidth) 4255 return nullptr; 4256 4257 KnownBits Op0Known(BitWidth); 4258 KnownBits Op1Known(BitWidth); 4259 4260 if (SimplifyDemandedBits(&I, 0, 4261 getDemandedBitsLHSMask(I, BitWidth), 4262 Op0Known, 0)) 4263 return &I; 4264 4265 if (SimplifyDemandedBits(&I, 1, APInt::getAllOnesValue(BitWidth), 4266 Op1Known, 0)) 4267 return &I; 4268 4269 // Given the known and unknown bits, compute a range that the LHS could be 4270 // in. Compute the Min, Max and RHS values based on the known bits. For the 4271 // EQ and NE we use unsigned values. 4272 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0); 4273 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0); 4274 if (I.isSigned()) { 4275 computeSignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max); 4276 computeSignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max); 4277 } else { 4278 computeUnsignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max); 4279 computeUnsignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max); 4280 } 4281 4282 // If Min and Max are known to be the same, then SimplifyDemandedBits figured 4283 // out that the LHS or RHS is a constant. Constant fold this now, so that 4284 // code below can assume that Min != Max. 4285 if (!isa<Constant>(Op0) && Op0Min == Op0Max) 4286 return new ICmpInst(Pred, ConstantExpr::getIntegerValue(Ty, Op0Min), Op1); 4287 if (!isa<Constant>(Op1) && Op1Min == Op1Max) 4288 return new ICmpInst(Pred, Op0, ConstantExpr::getIntegerValue(Ty, Op1Min)); 4289 4290 // Based on the range information we know about the LHS, see if we can 4291 // simplify this comparison. For example, (x&4) < 8 is always true. 4292 switch (Pred) { 4293 default: 4294 llvm_unreachable("Unknown icmp opcode!"); 4295 case ICmpInst::ICMP_EQ: 4296 case ICmpInst::ICMP_NE: { 4297 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max)) { 4298 return Pred == CmpInst::ICMP_EQ 4299 ? replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())) 4300 : replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4301 } 4302 4303 // If all bits are known zero except for one, then we know at most one bit 4304 // is set. If the comparison is against zero, then this is a check to see if 4305 // *that* bit is set. 4306 APInt Op0KnownZeroInverted = ~Op0Known.Zero; 4307 if (Op1Known.isZero()) { 4308 // If the LHS is an AND with the same constant, look through it. 4309 Value *LHS = nullptr; 4310 const APInt *LHSC; 4311 if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) || 4312 *LHSC != Op0KnownZeroInverted) 4313 LHS = Op0; 4314 4315 Value *X; 4316 if (match(LHS, m_Shl(m_One(), m_Value(X)))) { 4317 APInt ValToCheck = Op0KnownZeroInverted; 4318 Type *XTy = X->getType(); 4319 if (ValToCheck.isPowerOf2()) { 4320 // ((1 << X) & 8) == 0 -> X != 3 4321 // ((1 << X) & 8) != 0 -> X == 3 4322 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros()); 4323 auto NewPred = ICmpInst::getInversePredicate(Pred); 4324 return new ICmpInst(NewPred, X, CmpC); 4325 } else if ((++ValToCheck).isPowerOf2()) { 4326 // ((1 << X) & 7) == 0 -> X >= 3 4327 // ((1 << X) & 7) != 0 -> X < 3 4328 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros()); 4329 auto NewPred = 4330 Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT; 4331 return new ICmpInst(NewPred, X, CmpC); 4332 } 4333 } 4334 4335 // Check if the LHS is 8 >>u x and the result is a power of 2 like 1. 4336 const APInt *CI; 4337 if (Op0KnownZeroInverted.isOneValue() && 4338 match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) { 4339 // ((8 >>u X) & 1) == 0 -> X != 3 4340 // ((8 >>u X) & 1) != 0 -> X == 3 4341 unsigned CmpVal = CI->countTrailingZeros(); 4342 auto NewPred = ICmpInst::getInversePredicate(Pred); 4343 return new ICmpInst(NewPred, X, ConstantInt::get(X->getType(), CmpVal)); 4344 } 4345 } 4346 break; 4347 } 4348 case ICmpInst::ICMP_ULT: { 4349 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B) 4350 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4351 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B) 4352 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4353 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B) 4354 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 4355 4356 const APInt *CmpC; 4357 if (match(Op1, m_APInt(CmpC))) { 4358 // A <u C -> A == C-1 if min(A)+1 == C 4359 if (*CmpC == Op0Min + 1) 4360 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 4361 ConstantInt::get(Op1->getType(), *CmpC - 1)); 4362 // X <u C --> X == 0, if the number of zero bits in the bottom of X 4363 // exceeds the log2 of C. 4364 if (Op0Known.countMinTrailingZeros() >= CmpC->ceilLogBase2()) 4365 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 4366 Constant::getNullValue(Op1->getType())); 4367 } 4368 break; 4369 } 4370 case ICmpInst::ICMP_UGT: { 4371 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B) 4372 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4373 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B) 4374 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4375 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B) 4376 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 4377 4378 const APInt *CmpC; 4379 if (match(Op1, m_APInt(CmpC))) { 4380 // A >u C -> A == C+1 if max(a)-1 == C 4381 if (*CmpC == Op0Max - 1) 4382 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 4383 ConstantInt::get(Op1->getType(), *CmpC + 1)); 4384 // X >u C --> X != 0, if the number of zero bits in the bottom of X 4385 // exceeds the log2 of C. 4386 if (Op0Known.countMinTrailingZeros() >= CmpC->getActiveBits()) 4387 return new ICmpInst(ICmpInst::ICMP_NE, Op0, 4388 Constant::getNullValue(Op1->getType())); 4389 } 4390 break; 4391 } 4392 case ICmpInst::ICMP_SLT: { 4393 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C) 4394 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4395 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C) 4396 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4397 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B) 4398 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 4399 const APInt *CmpC; 4400 if (match(Op1, m_APInt(CmpC))) { 4401 if (*CmpC == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C 4402 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 4403 ConstantInt::get(Op1->getType(), *CmpC - 1)); 4404 } 4405 break; 4406 } 4407 case ICmpInst::ICMP_SGT: { 4408 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B) 4409 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4410 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B) 4411 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4412 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B) 4413 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 4414 const APInt *CmpC; 4415 if (match(Op1, m_APInt(CmpC))) { 4416 if (*CmpC == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C 4417 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 4418 ConstantInt::get(Op1->getType(), *CmpC + 1)); 4419 } 4420 break; 4421 } 4422 case ICmpInst::ICMP_SGE: 4423 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!"); 4424 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B) 4425 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4426 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B) 4427 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4428 if (Op1Min == Op0Max) // A >=s B -> A == B if max(A) == min(B) 4429 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1); 4430 break; 4431 case ICmpInst::ICMP_SLE: 4432 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!"); 4433 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B) 4434 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4435 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B) 4436 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4437 if (Op1Max == Op0Min) // A <=s B -> A == B if min(A) == max(B) 4438 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1); 4439 break; 4440 case ICmpInst::ICMP_UGE: 4441 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!"); 4442 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B) 4443 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4444 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B) 4445 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4446 if (Op1Min == Op0Max) // A >=u B -> A == B if max(A) == min(B) 4447 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1); 4448 break; 4449 case ICmpInst::ICMP_ULE: 4450 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!"); 4451 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B) 4452 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4453 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B) 4454 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4455 if (Op1Max == Op0Min) // A <=u B -> A == B if min(A) == max(B) 4456 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1); 4457 break; 4458 } 4459 4460 // Turn a signed comparison into an unsigned one if both operands are known to 4461 // have the same sign. 4462 if (I.isSigned() && 4463 ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) || 4464 (Op0Known.One.isNegative() && Op1Known.One.isNegative()))) 4465 return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1); 4466 4467 return nullptr; 4468 } 4469 4470 /// If we have an icmp le or icmp ge instruction with a constant operand, turn 4471 /// it into the appropriate icmp lt or icmp gt instruction. This transform 4472 /// allows them to be folded in visitICmpInst. 4473 static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) { 4474 ICmpInst::Predicate Pred = I.getPredicate(); 4475 if (Pred != ICmpInst::ICMP_SLE && Pred != ICmpInst::ICMP_SGE && 4476 Pred != ICmpInst::ICMP_ULE && Pred != ICmpInst::ICMP_UGE) 4477 return nullptr; 4478 4479 Value *Op0 = I.getOperand(0); 4480 Value *Op1 = I.getOperand(1); 4481 auto *Op1C = dyn_cast<Constant>(Op1); 4482 if (!Op1C) 4483 return nullptr; 4484 4485 // Check if the constant operand can be safely incremented/decremented without 4486 // overflowing/underflowing. For scalars, SimplifyICmpInst has already handled 4487 // the edge cases for us, so we just assert on them. For vectors, we must 4488 // handle the edge cases. 4489 Type *Op1Type = Op1->getType(); 4490 bool IsSigned = I.isSigned(); 4491 bool IsLE = (Pred == ICmpInst::ICMP_SLE || Pred == ICmpInst::ICMP_ULE); 4492 auto *CI = dyn_cast<ConstantInt>(Op1C); 4493 if (CI) { 4494 // A <= MAX -> TRUE ; A >= MIN -> TRUE 4495 assert(IsLE ? !CI->isMaxValue(IsSigned) : !CI->isMinValue(IsSigned)); 4496 } else if (Op1Type->isVectorTy()) { 4497 // TODO? If the edge cases for vectors were guaranteed to be handled as they 4498 // are for scalar, we could remove the min/max checks. However, to do that, 4499 // we would have to use insertelement/shufflevector to replace edge values. 4500 unsigned NumElts = Op1Type->getVectorNumElements(); 4501 for (unsigned i = 0; i != NumElts; ++i) { 4502 Constant *Elt = Op1C->getAggregateElement(i); 4503 if (!Elt) 4504 return nullptr; 4505 4506 if (isa<UndefValue>(Elt)) 4507 continue; 4508 4509 // Bail out if we can't determine if this constant is min/max or if we 4510 // know that this constant is min/max. 4511 auto *CI = dyn_cast<ConstantInt>(Elt); 4512 if (!CI || (IsLE ? CI->isMaxValue(IsSigned) : CI->isMinValue(IsSigned))) 4513 return nullptr; 4514 } 4515 } else { 4516 // ConstantExpr? 4517 return nullptr; 4518 } 4519 4520 // Increment or decrement the constant and set the new comparison predicate: 4521 // ULE -> ULT ; UGE -> UGT ; SLE -> SLT ; SGE -> SGT 4522 Constant *OneOrNegOne = ConstantInt::get(Op1Type, IsLE ? 1 : -1, true); 4523 CmpInst::Predicate NewPred = IsLE ? ICmpInst::ICMP_ULT: ICmpInst::ICMP_UGT; 4524 NewPred = IsSigned ? ICmpInst::getSignedPredicate(NewPred) : NewPred; 4525 return new ICmpInst(NewPred, Op0, ConstantExpr::getAdd(Op1C, OneOrNegOne)); 4526 } 4527 4528 /// Integer compare with boolean values can always be turned into bitwise ops. 4529 static Instruction *canonicalizeICmpBool(ICmpInst &I, 4530 InstCombiner::BuilderTy &Builder) { 4531 Value *A = I.getOperand(0), *B = I.getOperand(1); 4532 assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only"); 4533 4534 // A boolean compared to true/false can be simplified to Op0/true/false in 4535 // 14 out of the 20 (10 predicates * 2 constants) possible combinations. 4536 // Cases not handled by InstSimplify are always 'not' of Op0. 4537 if (match(B, m_Zero())) { 4538 switch (I.getPredicate()) { 4539 case CmpInst::ICMP_EQ: // A == 0 -> !A 4540 case CmpInst::ICMP_ULE: // A <=u 0 -> !A 4541 case CmpInst::ICMP_SGE: // A >=s 0 -> !A 4542 return BinaryOperator::CreateNot(A); 4543 default: 4544 llvm_unreachable("ICmp i1 X, C not simplified as expected."); 4545 } 4546 } else if (match(B, m_One())) { 4547 switch (I.getPredicate()) { 4548 case CmpInst::ICMP_NE: // A != 1 -> !A 4549 case CmpInst::ICMP_ULT: // A <u 1 -> !A 4550 case CmpInst::ICMP_SGT: // A >s -1 -> !A 4551 return BinaryOperator::CreateNot(A); 4552 default: 4553 llvm_unreachable("ICmp i1 X, C not simplified as expected."); 4554 } 4555 } 4556 4557 switch (I.getPredicate()) { 4558 default: 4559 llvm_unreachable("Invalid icmp instruction!"); 4560 case ICmpInst::ICMP_EQ: 4561 // icmp eq i1 A, B -> ~(A ^ B) 4562 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 4563 4564 case ICmpInst::ICMP_NE: 4565 // icmp ne i1 A, B -> A ^ B 4566 return BinaryOperator::CreateXor(A, B); 4567 4568 case ICmpInst::ICMP_UGT: 4569 // icmp ugt -> icmp ult 4570 std::swap(A, B); 4571 LLVM_FALLTHROUGH; 4572 case ICmpInst::ICMP_ULT: 4573 // icmp ult i1 A, B -> ~A & B 4574 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B); 4575 4576 case ICmpInst::ICMP_SGT: 4577 // icmp sgt -> icmp slt 4578 std::swap(A, B); 4579 LLVM_FALLTHROUGH; 4580 case ICmpInst::ICMP_SLT: 4581 // icmp slt i1 A, B -> A & ~B 4582 return BinaryOperator::CreateAnd(Builder.CreateNot(B), A); 4583 4584 case ICmpInst::ICMP_UGE: 4585 // icmp uge -> icmp ule 4586 std::swap(A, B); 4587 LLVM_FALLTHROUGH; 4588 case ICmpInst::ICMP_ULE: 4589 // icmp ule i1 A, B -> ~A | B 4590 return BinaryOperator::CreateOr(Builder.CreateNot(A), B); 4591 4592 case ICmpInst::ICMP_SGE: 4593 // icmp sge -> icmp sle 4594 std::swap(A, B); 4595 LLVM_FALLTHROUGH; 4596 case ICmpInst::ICMP_SLE: 4597 // icmp sle i1 A, B -> A | ~B 4598 return BinaryOperator::CreateOr(Builder.CreateNot(B), A); 4599 } 4600 } 4601 4602 static Instruction *foldVectorCmp(CmpInst &Cmp, 4603 InstCombiner::BuilderTy &Builder) { 4604 // If both arguments of the cmp are shuffles that use the same mask and 4605 // shuffle within a single vector, move the shuffle after the cmp. 4606 Value *LHS = Cmp.getOperand(0), *RHS = Cmp.getOperand(1); 4607 Value *V1, *V2; 4608 Constant *M; 4609 if (match(LHS, m_ShuffleVector(m_Value(V1), m_Undef(), m_Constant(M))) && 4610 match(RHS, m_ShuffleVector(m_Value(V2), m_Undef(), m_Specific(M))) && 4611 V1->getType() == V2->getType() && 4612 (LHS->hasOneUse() || RHS->hasOneUse())) { 4613 // cmp (shuffle V1, M), (shuffle V2, M) --> shuffle (cmp V1, V2), M 4614 CmpInst::Predicate P = Cmp.getPredicate(); 4615 Value *NewCmp = isa<ICmpInst>(Cmp) ? Builder.CreateICmp(P, V1, V2) 4616 : Builder.CreateFCmp(P, V1, V2); 4617 return new ShuffleVectorInst(NewCmp, UndefValue::get(NewCmp->getType()), M); 4618 } 4619 return nullptr; 4620 } 4621 4622 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { 4623 bool Changed = false; 4624 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 4625 unsigned Op0Cplxity = getComplexity(Op0); 4626 unsigned Op1Cplxity = getComplexity(Op1); 4627 4628 /// Orders the operands of the compare so that they are listed from most 4629 /// complex to least complex. This puts constants before unary operators, 4630 /// before binary operators. 4631 if (Op0Cplxity < Op1Cplxity || 4632 (Op0Cplxity == Op1Cplxity && swapMayExposeCSEOpportunities(Op0, Op1))) { 4633 I.swapOperands(); 4634 std::swap(Op0, Op1); 4635 Changed = true; 4636 } 4637 4638 if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, 4639 SQ.getWithInstruction(&I))) 4640 return replaceInstUsesWith(I, V); 4641 4642 // Comparing -val or val with non-zero is the same as just comparing val 4643 // ie, abs(val) != 0 -> val != 0 4644 if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) { 4645 Value *Cond, *SelectTrue, *SelectFalse; 4646 if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue), 4647 m_Value(SelectFalse)))) { 4648 if (Value *V = dyn_castNegVal(SelectTrue)) { 4649 if (V == SelectFalse) 4650 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1); 4651 } 4652 else if (Value *V = dyn_castNegVal(SelectFalse)) { 4653 if (V == SelectTrue) 4654 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1); 4655 } 4656 } 4657 } 4658 4659 if (Op0->getType()->isIntOrIntVectorTy(1)) 4660 if (Instruction *Res = canonicalizeICmpBool(I, Builder)) 4661 return Res; 4662 4663 if (ICmpInst *NewICmp = canonicalizeCmpWithConstant(I)) 4664 return NewICmp; 4665 4666 if (Instruction *Res = foldICmpWithConstant(I)) 4667 return Res; 4668 4669 if (Instruction *Res = foldICmpUsingKnownBits(I)) 4670 return Res; 4671 4672 // Test if the ICmpInst instruction is used exclusively by a select as 4673 // part of a minimum or maximum operation. If so, refrain from doing 4674 // any other folding. This helps out other analyses which understand 4675 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution 4676 // and CodeGen. And in this case, at least one of the comparison 4677 // operands has at least one user besides the compare (the select), 4678 // which would often largely negate the benefit of folding anyway. 4679 // 4680 // Do the same for the other patterns recognized by matchSelectPattern. 4681 if (I.hasOneUse()) 4682 if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) { 4683 Value *A, *B; 4684 SelectPatternResult SPR = matchSelectPattern(SI, A, B); 4685 if (SPR.Flavor != SPF_UNKNOWN) 4686 return nullptr; 4687 } 4688 4689 // Do this after checking for min/max to prevent infinite looping. 4690 if (Instruction *Res = foldICmpWithZero(I)) 4691 return Res; 4692 4693 // FIXME: We only do this after checking for min/max to prevent infinite 4694 // looping caused by a reverse canonicalization of these patterns for min/max. 4695 // FIXME: The organization of folds is a mess. These would naturally go into 4696 // canonicalizeCmpWithConstant(), but we can't move all of the above folds 4697 // down here after the min/max restriction. 4698 ICmpInst::Predicate Pred = I.getPredicate(); 4699 const APInt *C; 4700 if (match(Op1, m_APInt(C))) { 4701 // For i32: x >u 2147483647 -> x <s 0 -> true if sign bit set 4702 if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) { 4703 Constant *Zero = Constant::getNullValue(Op0->getType()); 4704 return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero); 4705 } 4706 4707 // For i32: x <u 2147483648 -> x >s -1 -> true if sign bit clear 4708 if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) { 4709 Constant *AllOnes = Constant::getAllOnesValue(Op0->getType()); 4710 return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes); 4711 } 4712 } 4713 4714 if (Instruction *Res = foldICmpInstWithConstant(I)) 4715 return Res; 4716 4717 if (Instruction *Res = foldICmpInstWithConstantNotInt(I)) 4718 return Res; 4719 4720 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now. 4721 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0)) 4722 if (Instruction *NI = foldGEPICmp(GEP, Op1, I.getPredicate(), I)) 4723 return NI; 4724 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1)) 4725 if (Instruction *NI = foldGEPICmp(GEP, Op0, 4726 ICmpInst::getSwappedPredicate(I.getPredicate()), I)) 4727 return NI; 4728 4729 // Try to optimize equality comparisons against alloca-based pointers. 4730 if (Op0->getType()->isPointerTy() && I.isEquality()) { 4731 assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?"); 4732 if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op0, DL))) 4733 if (Instruction *New = foldAllocaCmp(I, Alloca, Op1)) 4734 return New; 4735 if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op1, DL))) 4736 if (Instruction *New = foldAllocaCmp(I, Alloca, Op0)) 4737 return New; 4738 } 4739 4740 // Zero-equality and sign-bit checks are preserved through sitofp + bitcast. 4741 Value *X; 4742 if (match(Op0, m_BitCast(m_SIToFP(m_Value(X))))) { 4743 // icmp eq (bitcast (sitofp X)), 0 --> icmp eq X, 0 4744 // icmp ne (bitcast (sitofp X)), 0 --> icmp ne X, 0 4745 // icmp slt (bitcast (sitofp X)), 0 --> icmp slt X, 0 4746 // icmp sgt (bitcast (sitofp X)), 0 --> icmp sgt X, 0 4747 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_SLT || 4748 Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT) && 4749 match(Op1, m_Zero())) 4750 return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType())); 4751 4752 // icmp slt (bitcast (sitofp X)), 1 --> icmp slt X, 1 4753 if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_One())) 4754 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), 1)); 4755 4756 // icmp sgt (bitcast (sitofp X)), -1 --> icmp sgt X, -1 4757 if (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes())) 4758 return new ICmpInst(Pred, X, ConstantInt::getAllOnesValue(X->getType())); 4759 } 4760 4761 // Zero-equality checks are preserved through unsigned floating-point casts: 4762 // icmp eq (bitcast (uitofp X)), 0 --> icmp eq X, 0 4763 // icmp ne (bitcast (uitofp X)), 0 --> icmp ne X, 0 4764 if (match(Op0, m_BitCast(m_UIToFP(m_Value(X))))) 4765 if (I.isEquality() && match(Op1, m_Zero())) 4766 return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType())); 4767 4768 // Test to see if the operands of the icmp are casted versions of other 4769 // values. If the ptr->ptr cast can be stripped off both arguments, we do so 4770 // now. 4771 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) { 4772 if (Op0->getType()->isPointerTy() && 4773 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) { 4774 // We keep moving the cast from the left operand over to the right 4775 // operand, where it can often be eliminated completely. 4776 Op0 = CI->getOperand(0); 4777 4778 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast 4779 // so eliminate it as well. 4780 if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1)) 4781 Op1 = CI2->getOperand(0); 4782 4783 // If Op1 is a constant, we can fold the cast into the constant. 4784 if (Op0->getType() != Op1->getType()) { 4785 if (Constant *Op1C = dyn_cast<Constant>(Op1)) { 4786 Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType()); 4787 } else { 4788 // Otherwise, cast the RHS right before the icmp 4789 Op1 = Builder.CreateBitCast(Op1, Op0->getType()); 4790 } 4791 } 4792 return new ICmpInst(I.getPredicate(), Op0, Op1); 4793 } 4794 } 4795 4796 if (isa<CastInst>(Op0)) { 4797 // Handle the special case of: icmp (cast bool to X), <cst> 4798 // This comes up when you have code like 4799 // int X = A < B; 4800 // if (X) ... 4801 // For generality, we handle any zero-extension of any operand comparison 4802 // with a constant or another cast from the same type. 4803 if (isa<Constant>(Op1) || isa<CastInst>(Op1)) 4804 if (Instruction *R = foldICmpWithCastAndCast(I)) 4805 return R; 4806 } 4807 4808 if (Instruction *Res = foldICmpBinOp(I)) 4809 return Res; 4810 4811 if (Instruction *Res = foldICmpWithMinMax(I)) 4812 return Res; 4813 4814 { 4815 Value *A, *B; 4816 // Transform (A & ~B) == 0 --> (A & B) != 0 4817 // and (A & ~B) != 0 --> (A & B) == 0 4818 // if A is a power of 2. 4819 if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) && 4820 match(Op1, m_Zero()) && 4821 isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality()) 4822 return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(A, B), 4823 Op1); 4824 4825 // ~X < ~Y --> Y < X 4826 // ~X < C --> X > ~C 4827 if (match(Op0, m_Not(m_Value(A)))) { 4828 if (match(Op1, m_Not(m_Value(B)))) 4829 return new ICmpInst(I.getPredicate(), B, A); 4830 4831 const APInt *C; 4832 if (match(Op1, m_APInt(C))) 4833 return new ICmpInst(I.getSwappedPredicate(), A, 4834 ConstantInt::get(Op1->getType(), ~(*C))); 4835 } 4836 4837 Instruction *AddI = nullptr; 4838 if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B), 4839 m_Instruction(AddI))) && 4840 isa<IntegerType>(A->getType())) { 4841 Value *Result; 4842 Constant *Overflow; 4843 if (OptimizeOverflowCheck(OCF_UNSIGNED_ADD, A, B, *AddI, Result, 4844 Overflow)) { 4845 replaceInstUsesWith(*AddI, Result); 4846 return replaceInstUsesWith(I, Overflow); 4847 } 4848 } 4849 4850 // (zext a) * (zext b) --> llvm.umul.with.overflow. 4851 if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) { 4852 if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this)) 4853 return R; 4854 } 4855 if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) { 4856 if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this)) 4857 return R; 4858 } 4859 } 4860 4861 if (Instruction *Res = foldICmpEquality(I)) 4862 return Res; 4863 4864 // The 'cmpxchg' instruction returns an aggregate containing the old value and 4865 // an i1 which indicates whether or not we successfully did the swap. 4866 // 4867 // Replace comparisons between the old value and the expected value with the 4868 // indicator that 'cmpxchg' returns. 4869 // 4870 // N.B. This transform is only valid when the 'cmpxchg' is not permitted to 4871 // spuriously fail. In those cases, the old value may equal the expected 4872 // value but it is possible for the swap to not occur. 4873 if (I.getPredicate() == ICmpInst::ICMP_EQ) 4874 if (auto *EVI = dyn_cast<ExtractValueInst>(Op0)) 4875 if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand())) 4876 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 && 4877 !ACXI->isWeak()) 4878 return ExtractValueInst::Create(ACXI, 1); 4879 4880 { 4881 Value *X; 4882 const APInt *C; 4883 // icmp X+Cst, X 4884 if (match(Op0, m_Add(m_Value(X), m_APInt(C))) && Op1 == X) 4885 return foldICmpAddOpConst(X, *C, I.getPredicate()); 4886 4887 // icmp X, X+Cst 4888 if (match(Op1, m_Add(m_Value(X), m_APInt(C))) && Op0 == X) 4889 return foldICmpAddOpConst(X, *C, I.getSwappedPredicate()); 4890 } 4891 4892 if (I.getType()->isVectorTy()) 4893 if (Instruction *Res = foldVectorCmp(I, Builder)) 4894 return Res; 4895 4896 return Changed ? &I : nullptr; 4897 } 4898 4899 /// Fold fcmp ([us]itofp x, cst) if possible. 4900 Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, 4901 Constant *RHSC) { 4902 if (!isa<ConstantFP>(RHSC)) return nullptr; 4903 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF(); 4904 4905 // Get the width of the mantissa. We don't want to hack on conversions that 4906 // might lose information from the integer, e.g. "i64 -> float" 4907 int MantissaWidth = LHSI->getType()->getFPMantissaWidth(); 4908 if (MantissaWidth == -1) return nullptr; // Unknown. 4909 4910 IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType()); 4911 4912 bool LHSUnsigned = isa<UIToFPInst>(LHSI); 4913 4914 if (I.isEquality()) { 4915 FCmpInst::Predicate P = I.getPredicate(); 4916 bool IsExact = false; 4917 APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned); 4918 RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact); 4919 4920 // If the floating point constant isn't an integer value, we know if we will 4921 // ever compare equal / not equal to it. 4922 if (!IsExact) { 4923 // TODO: Can never be -0.0 and other non-representable values 4924 APFloat RHSRoundInt(RHS); 4925 RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven); 4926 if (RHS.compare(RHSRoundInt) != APFloat::cmpEqual) { 4927 if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ) 4928 return replaceInstUsesWith(I, Builder.getFalse()); 4929 4930 assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE); 4931 return replaceInstUsesWith(I, Builder.getTrue()); 4932 } 4933 } 4934 4935 // TODO: If the constant is exactly representable, is it always OK to do 4936 // equality compares as integer? 4937 } 4938 4939 // Check to see that the input is converted from an integer type that is small 4940 // enough that preserves all bits. TODO: check here for "known" sign bits. 4941 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e. 4942 unsigned InputSize = IntTy->getScalarSizeInBits(); 4943 4944 // Following test does NOT adjust InputSize downwards for signed inputs, 4945 // because the most negative value still requires all the mantissa bits 4946 // to distinguish it from one less than that value. 4947 if ((int)InputSize > MantissaWidth) { 4948 // Conversion would lose accuracy. Check if loss can impact comparison. 4949 int Exp = ilogb(RHS); 4950 if (Exp == APFloat::IEK_Inf) { 4951 int MaxExponent = ilogb(APFloat::getLargest(RHS.getSemantics())); 4952 if (MaxExponent < (int)InputSize - !LHSUnsigned) 4953 // Conversion could create infinity. 4954 return nullptr; 4955 } else { 4956 // Note that if RHS is zero or NaN, then Exp is negative 4957 // and first condition is trivially false. 4958 if (MantissaWidth <= Exp && Exp <= (int)InputSize - !LHSUnsigned) 4959 // Conversion could affect comparison. 4960 return nullptr; 4961 } 4962 } 4963 4964 // Otherwise, we can potentially simplify the comparison. We know that it 4965 // will always come through as an integer value and we know the constant is 4966 // not a NAN (it would have been previously simplified). 4967 assert(!RHS.isNaN() && "NaN comparison not already folded!"); 4968 4969 ICmpInst::Predicate Pred; 4970 switch (I.getPredicate()) { 4971 default: llvm_unreachable("Unexpected predicate!"); 4972 case FCmpInst::FCMP_UEQ: 4973 case FCmpInst::FCMP_OEQ: 4974 Pred = ICmpInst::ICMP_EQ; 4975 break; 4976 case FCmpInst::FCMP_UGT: 4977 case FCmpInst::FCMP_OGT: 4978 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT; 4979 break; 4980 case FCmpInst::FCMP_UGE: 4981 case FCmpInst::FCMP_OGE: 4982 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE; 4983 break; 4984 case FCmpInst::FCMP_ULT: 4985 case FCmpInst::FCMP_OLT: 4986 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT; 4987 break; 4988 case FCmpInst::FCMP_ULE: 4989 case FCmpInst::FCMP_OLE: 4990 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE; 4991 break; 4992 case FCmpInst::FCMP_UNE: 4993 case FCmpInst::FCMP_ONE: 4994 Pred = ICmpInst::ICMP_NE; 4995 break; 4996 case FCmpInst::FCMP_ORD: 4997 return replaceInstUsesWith(I, Builder.getTrue()); 4998 case FCmpInst::FCMP_UNO: 4999 return replaceInstUsesWith(I, Builder.getFalse()); 5000 } 5001 5002 // Now we know that the APFloat is a normal number, zero or inf. 5003 5004 // See if the FP constant is too large for the integer. For example, 5005 // comparing an i8 to 300.0. 5006 unsigned IntWidth = IntTy->getScalarSizeInBits(); 5007 5008 if (!LHSUnsigned) { 5009 // If the RHS value is > SignedMax, fold the comparison. This handles +INF 5010 // and large values. 5011 APFloat SMax(RHS.getSemantics()); 5012 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true, 5013 APFloat::rmNearestTiesToEven); 5014 if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0 5015 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT || 5016 Pred == ICmpInst::ICMP_SLE) 5017 return replaceInstUsesWith(I, Builder.getTrue()); 5018 return replaceInstUsesWith(I, Builder.getFalse()); 5019 } 5020 } else { 5021 // If the RHS value is > UnsignedMax, fold the comparison. This handles 5022 // +INF and large values. 5023 APFloat UMax(RHS.getSemantics()); 5024 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false, 5025 APFloat::rmNearestTiesToEven); 5026 if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0 5027 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT || 5028 Pred == ICmpInst::ICMP_ULE) 5029 return replaceInstUsesWith(I, Builder.getTrue()); 5030 return replaceInstUsesWith(I, Builder.getFalse()); 5031 } 5032 } 5033 5034 if (!LHSUnsigned) { 5035 // See if the RHS value is < SignedMin. 5036 APFloat SMin(RHS.getSemantics()); 5037 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true, 5038 APFloat::rmNearestTiesToEven); 5039 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0 5040 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT || 5041 Pred == ICmpInst::ICMP_SGE) 5042 return replaceInstUsesWith(I, Builder.getTrue()); 5043 return replaceInstUsesWith(I, Builder.getFalse()); 5044 } 5045 } else { 5046 // See if the RHS value is < UnsignedMin. 5047 APFloat SMin(RHS.getSemantics()); 5048 SMin.convertFromAPInt(APInt::getMinValue(IntWidth), true, 5049 APFloat::rmNearestTiesToEven); 5050 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0 5051 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT || 5052 Pred == ICmpInst::ICMP_UGE) 5053 return replaceInstUsesWith(I, Builder.getTrue()); 5054 return replaceInstUsesWith(I, Builder.getFalse()); 5055 } 5056 } 5057 5058 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or 5059 // [0, UMAX], but it may still be fractional. See if it is fractional by 5060 // casting the FP value to the integer value and back, checking for equality. 5061 // Don't do this for zero, because -0.0 is not fractional. 5062 Constant *RHSInt = LHSUnsigned 5063 ? ConstantExpr::getFPToUI(RHSC, IntTy) 5064 : ConstantExpr::getFPToSI(RHSC, IntTy); 5065 if (!RHS.isZero()) { 5066 bool Equal = LHSUnsigned 5067 ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC 5068 : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC; 5069 if (!Equal) { 5070 // If we had a comparison against a fractional value, we have to adjust 5071 // the compare predicate and sometimes the value. RHSC is rounded towards 5072 // zero at this point. 5073 switch (Pred) { 5074 default: llvm_unreachable("Unexpected integer comparison!"); 5075 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true 5076 return replaceInstUsesWith(I, Builder.getTrue()); 5077 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false 5078 return replaceInstUsesWith(I, Builder.getFalse()); 5079 case ICmpInst::ICMP_ULE: 5080 // (float)int <= 4.4 --> int <= 4 5081 // (float)int <= -4.4 --> false 5082 if (RHS.isNegative()) 5083 return replaceInstUsesWith(I, Builder.getFalse()); 5084 break; 5085 case ICmpInst::ICMP_SLE: 5086 // (float)int <= 4.4 --> int <= 4 5087 // (float)int <= -4.4 --> int < -4 5088 if (RHS.isNegative()) 5089 Pred = ICmpInst::ICMP_SLT; 5090 break; 5091 case ICmpInst::ICMP_ULT: 5092 // (float)int < -4.4 --> false 5093 // (float)int < 4.4 --> int <= 4 5094 if (RHS.isNegative()) 5095 return replaceInstUsesWith(I, Builder.getFalse()); 5096 Pred = ICmpInst::ICMP_ULE; 5097 break; 5098 case ICmpInst::ICMP_SLT: 5099 // (float)int < -4.4 --> int < -4 5100 // (float)int < 4.4 --> int <= 4 5101 if (!RHS.isNegative()) 5102 Pred = ICmpInst::ICMP_SLE; 5103 break; 5104 case ICmpInst::ICMP_UGT: 5105 // (float)int > 4.4 --> int > 4 5106 // (float)int > -4.4 --> true 5107 if (RHS.isNegative()) 5108 return replaceInstUsesWith(I, Builder.getTrue()); 5109 break; 5110 case ICmpInst::ICMP_SGT: 5111 // (float)int > 4.4 --> int > 4 5112 // (float)int > -4.4 --> int >= -4 5113 if (RHS.isNegative()) 5114 Pred = ICmpInst::ICMP_SGE; 5115 break; 5116 case ICmpInst::ICMP_UGE: 5117 // (float)int >= -4.4 --> true 5118 // (float)int >= 4.4 --> int > 4 5119 if (RHS.isNegative()) 5120 return replaceInstUsesWith(I, Builder.getTrue()); 5121 Pred = ICmpInst::ICMP_UGT; 5122 break; 5123 case ICmpInst::ICMP_SGE: 5124 // (float)int >= -4.4 --> int >= -4 5125 // (float)int >= 4.4 --> int > 4 5126 if (!RHS.isNegative()) 5127 Pred = ICmpInst::ICMP_SGT; 5128 break; 5129 } 5130 } 5131 } 5132 5133 // Lower this FP comparison into an appropriate integer version of the 5134 // comparison. 5135 return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt); 5136 } 5137 5138 Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { 5139 bool Changed = false; 5140 5141 /// Orders the operands of the compare so that they are listed from most 5142 /// complex to least complex. This puts constants before unary operators, 5143 /// before binary operators. 5144 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) { 5145 I.swapOperands(); 5146 Changed = true; 5147 } 5148 5149 const CmpInst::Predicate Pred = I.getPredicate(); 5150 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 5151 if (Value *V = SimplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(), 5152 SQ.getWithInstruction(&I))) 5153 return replaceInstUsesWith(I, V); 5154 5155 // Simplify 'fcmp pred X, X' 5156 if (Op0 == Op1) { 5157 switch (Pred) { 5158 default: break; 5159 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y) 5160 case FCmpInst::FCMP_ULT: // True if unordered or less than 5161 case FCmpInst::FCMP_UGT: // True if unordered or greater than 5162 case FCmpInst::FCMP_UNE: // True if unordered or not equal 5163 // Canonicalize these to be 'fcmp uno %X, 0.0'. 5164 I.setPredicate(FCmpInst::FCMP_UNO); 5165 I.setOperand(1, Constant::getNullValue(Op0->getType())); 5166 return &I; 5167 5168 case FCmpInst::FCMP_ORD: // True if ordered (no nans) 5169 case FCmpInst::FCMP_OEQ: // True if ordered and equal 5170 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal 5171 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal 5172 // Canonicalize these to be 'fcmp ord %X, 0.0'. 5173 I.setPredicate(FCmpInst::FCMP_ORD); 5174 I.setOperand(1, Constant::getNullValue(Op0->getType())); 5175 return &I; 5176 } 5177 } 5178 5179 // If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand, 5180 // then canonicalize the operand to 0.0. 5181 if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) { 5182 if (!match(Op0, m_PosZeroFP()) && isKnownNeverNaN(Op0, &TLI)) { 5183 I.setOperand(0, ConstantFP::getNullValue(Op0->getType())); 5184 return &I; 5185 } 5186 if (!match(Op1, m_PosZeroFP()) && isKnownNeverNaN(Op1, &TLI)) { 5187 I.setOperand(1, ConstantFP::getNullValue(Op0->getType())); 5188 return &I; 5189 } 5190 } 5191 5192 // Test if the FCmpInst instruction is used exclusively by a select as 5193 // part of a minimum or maximum operation. If so, refrain from doing 5194 // any other folding. This helps out other analyses which understand 5195 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution 5196 // and CodeGen. And in this case, at least one of the comparison 5197 // operands has at least one user besides the compare (the select), 5198 // which would often largely negate the benefit of folding anyway. 5199 if (I.hasOneUse()) 5200 if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) { 5201 Value *A, *B; 5202 SelectPatternResult SPR = matchSelectPattern(SI, A, B); 5203 if (SPR.Flavor != SPF_UNKNOWN) 5204 return nullptr; 5205 } 5206 5207 // Handle fcmp with constant RHS 5208 if (Constant *RHSC = dyn_cast<Constant>(Op1)) { 5209 if (Instruction *LHSI = dyn_cast<Instruction>(Op0)) 5210 switch (LHSI->getOpcode()) { 5211 case Instruction::FPExt: { 5212 // fcmp (fpext x), C -> fcmp x, (fptrunc C) if fptrunc is lossless 5213 FPExtInst *LHSExt = cast<FPExtInst>(LHSI); 5214 ConstantFP *RHSF = dyn_cast<ConstantFP>(RHSC); 5215 if (!RHSF) 5216 break; 5217 5218 const fltSemantics *Sem; 5219 // FIXME: This shouldn't be here. 5220 if (LHSExt->getSrcTy()->isHalfTy()) 5221 Sem = &APFloat::IEEEhalf(); 5222 else if (LHSExt->getSrcTy()->isFloatTy()) 5223 Sem = &APFloat::IEEEsingle(); 5224 else if (LHSExt->getSrcTy()->isDoubleTy()) 5225 Sem = &APFloat::IEEEdouble(); 5226 else if (LHSExt->getSrcTy()->isFP128Ty()) 5227 Sem = &APFloat::IEEEquad(); 5228 else if (LHSExt->getSrcTy()->isX86_FP80Ty()) 5229 Sem = &APFloat::x87DoubleExtended(); 5230 else if (LHSExt->getSrcTy()->isPPC_FP128Ty()) 5231 Sem = &APFloat::PPCDoubleDouble(); 5232 else 5233 break; 5234 5235 bool Lossy; 5236 APFloat F = RHSF->getValueAPF(); 5237 F.convert(*Sem, APFloat::rmNearestTiesToEven, &Lossy); 5238 5239 // Avoid lossy conversions and denormals. Zero is a special case 5240 // that's OK to convert. 5241 APFloat Fabs = F; 5242 Fabs.clearSign(); 5243 if (!Lossy && 5244 ((Fabs.compare(APFloat::getSmallestNormalized(*Sem)) != 5245 APFloat::cmpLessThan) || Fabs.isZero())) 5246 5247 return new FCmpInst(Pred, LHSExt->getOperand(0), 5248 ConstantFP::get(RHSC->getContext(), F)); 5249 break; 5250 } 5251 case Instruction::PHI: 5252 // Only fold fcmp into the PHI if the phi and fcmp are in the same 5253 // block. If in the same block, we're encouraging jump threading. If 5254 // not, we are just pessimizing the code by making an i1 phi. 5255 if (LHSI->getParent() == I.getParent()) 5256 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI))) 5257 return NV; 5258 break; 5259 case Instruction::SIToFP: 5260 case Instruction::UIToFP: 5261 if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC)) 5262 return NV; 5263 break; 5264 case Instruction::FSub: { 5265 // fcmp pred (fneg x), C -> fcmp swap(pred) x, -C 5266 Value *Op; 5267 if (match(LHSI, m_FNeg(m_Value(Op)))) 5268 return new FCmpInst(I.getSwappedPredicate(), Op, 5269 ConstantExpr::getFNeg(RHSC)); 5270 break; 5271 } 5272 case Instruction::Load: 5273 if (GetElementPtrInst *GEP = 5274 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) { 5275 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 5276 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 5277 !cast<LoadInst>(LHSI)->isVolatile()) 5278 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I)) 5279 return Res; 5280 } 5281 break; 5282 case Instruction::Call: { 5283 if (!RHSC->isNullValue()) 5284 break; 5285 5286 CallInst *CI = cast<CallInst>(LHSI); 5287 Intrinsic::ID IID = getIntrinsicForCallSite(CI, &TLI); 5288 if (IID != Intrinsic::fabs) 5289 break; 5290 5291 // Various optimization for fabs compared with zero. 5292 switch (Pred) { 5293 default: 5294 break; 5295 // fabs(x) < 0 --> false 5296 case FCmpInst::FCMP_OLT: 5297 llvm_unreachable("handled by SimplifyFCmpInst"); 5298 // fabs(x) > 0 --> x != 0 5299 case FCmpInst::FCMP_OGT: 5300 return new FCmpInst(FCmpInst::FCMP_ONE, CI->getArgOperand(0), RHSC); 5301 // fabs(x) <= 0 --> x == 0 5302 case FCmpInst::FCMP_OLE: 5303 return new FCmpInst(FCmpInst::FCMP_OEQ, CI->getArgOperand(0), RHSC); 5304 // fabs(x) >= 0 --> !isnan(x) 5305 case FCmpInst::FCMP_OGE: 5306 return new FCmpInst(FCmpInst::FCMP_ORD, CI->getArgOperand(0), RHSC); 5307 // fabs(x) == 0 --> x == 0 5308 // fabs(x) != 0 --> x != 0 5309 case FCmpInst::FCMP_OEQ: 5310 case FCmpInst::FCMP_UEQ: 5311 case FCmpInst::FCMP_ONE: 5312 case FCmpInst::FCMP_UNE: 5313 return new FCmpInst(Pred, CI->getArgOperand(0), RHSC); 5314 } 5315 } 5316 } 5317 } 5318 5319 // fcmp pred (fneg x), (fneg y) -> fcmp swap(pred) x, y 5320 Value *X, *Y; 5321 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) 5322 return new FCmpInst(I.getSwappedPredicate(), X, Y); 5323 5324 // fcmp (fpext x), (fpext y) -> fcmp x, y 5325 if (FPExtInst *LHSExt = dyn_cast<FPExtInst>(Op0)) 5326 if (FPExtInst *RHSExt = dyn_cast<FPExtInst>(Op1)) 5327 if (LHSExt->getSrcTy() == RHSExt->getSrcTy()) 5328 return new FCmpInst(Pred, LHSExt->getOperand(0), RHSExt->getOperand(0)); 5329 5330 if (I.getType()->isVectorTy()) 5331 if (Instruction *Res = foldVectorCmp(I, Builder)) 5332 return Res; 5333 5334 return Changed ? &I : nullptr; 5335 } 5336