1 //===- InstCombineCompares.cpp --------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visitICmp and visitFCmp functions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombineInternal.h" 15 #include "llvm/ADT/APSInt.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/Analysis/ConstantFolding.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/Analysis/MemoryBuiltins.h" 21 #include "llvm/Analysis/TargetLibraryInfo.h" 22 #include "llvm/Analysis/VectorUtils.h" 23 #include "llvm/IR/ConstantRange.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/GetElementPtrTypeIterator.h" 26 #include "llvm/IR/IntrinsicInst.h" 27 #include "llvm/IR/PatternMatch.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/KnownBits.h" 30 31 using namespace llvm; 32 using namespace PatternMatch; 33 34 #define DEBUG_TYPE "instcombine" 35 36 // How many times is a select replaced by one of its operands? 37 STATISTIC(NumSel, "Number of select opts"); 38 39 40 /// Compute Result = In1+In2, returning true if the result overflowed for this 41 /// type. 42 static bool addWithOverflow(APInt &Result, const APInt &In1, 43 const APInt &In2, bool IsSigned = false) { 44 bool Overflow; 45 if (IsSigned) 46 Result = In1.sadd_ov(In2, Overflow); 47 else 48 Result = In1.uadd_ov(In2, Overflow); 49 50 return Overflow; 51 } 52 53 /// Compute Result = In1-In2, returning true if the result overflowed for this 54 /// type. 55 static bool subWithOverflow(APInt &Result, const APInt &In1, 56 const APInt &In2, bool IsSigned = false) { 57 bool Overflow; 58 if (IsSigned) 59 Result = In1.ssub_ov(In2, Overflow); 60 else 61 Result = In1.usub_ov(In2, Overflow); 62 63 return Overflow; 64 } 65 66 /// Given an icmp instruction, return true if any use of this comparison is a 67 /// branch on sign bit comparison. 68 static bool hasBranchUse(ICmpInst &I) { 69 for (auto *U : I.users()) 70 if (isa<BranchInst>(U)) 71 return true; 72 return false; 73 } 74 75 /// Given an exploded icmp instruction, return true if the comparison only 76 /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if the 77 /// result of the comparison is true when the input value is signed. 78 static bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, 79 bool &TrueIfSigned) { 80 switch (Pred) { 81 case ICmpInst::ICMP_SLT: // True if LHS s< 0 82 TrueIfSigned = true; 83 return RHS.isNullValue(); 84 case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1 85 TrueIfSigned = true; 86 return RHS.isAllOnesValue(); 87 case ICmpInst::ICMP_SGT: // True if LHS s> -1 88 TrueIfSigned = false; 89 return RHS.isAllOnesValue(); 90 case ICmpInst::ICMP_UGT: 91 // True if LHS u> RHS and RHS == high-bit-mask - 1 92 TrueIfSigned = true; 93 return RHS.isMaxSignedValue(); 94 case ICmpInst::ICMP_UGE: 95 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc) 96 TrueIfSigned = true; 97 return RHS.isSignMask(); 98 default: 99 return false; 100 } 101 } 102 103 /// Returns true if the exploded icmp can be expressed as a signed comparison 104 /// to zero and updates the predicate accordingly. 105 /// The signedness of the comparison is preserved. 106 /// TODO: Refactor with decomposeBitTestICmp()? 107 static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) { 108 if (!ICmpInst::isSigned(Pred)) 109 return false; 110 111 if (C.isNullValue()) 112 return ICmpInst::isRelational(Pred); 113 114 if (C.isOneValue()) { 115 if (Pred == ICmpInst::ICMP_SLT) { 116 Pred = ICmpInst::ICMP_SLE; 117 return true; 118 } 119 } else if (C.isAllOnesValue()) { 120 if (Pred == ICmpInst::ICMP_SGT) { 121 Pred = ICmpInst::ICMP_SGE; 122 return true; 123 } 124 } 125 126 return false; 127 } 128 129 /// Given a signed integer type and a set of known zero and one bits, compute 130 /// the maximum and minimum values that could have the specified known zero and 131 /// known one bits, returning them in Min/Max. 132 /// TODO: Move to method on KnownBits struct? 133 static void computeSignedMinMaxValuesFromKnownBits(const KnownBits &Known, 134 APInt &Min, APInt &Max) { 135 assert(Known.getBitWidth() == Min.getBitWidth() && 136 Known.getBitWidth() == Max.getBitWidth() && 137 "KnownZero, KnownOne and Min, Max must have equal bitwidth."); 138 APInt UnknownBits = ~(Known.Zero|Known.One); 139 140 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign 141 // bit if it is unknown. 142 Min = Known.One; 143 Max = Known.One|UnknownBits; 144 145 if (UnknownBits.isNegative()) { // Sign bit is unknown 146 Min.setSignBit(); 147 Max.clearSignBit(); 148 } 149 } 150 151 /// Given an unsigned integer type and a set of known zero and one bits, compute 152 /// the maximum and minimum values that could have the specified known zero and 153 /// known one bits, returning them in Min/Max. 154 /// TODO: Move to method on KnownBits struct? 155 static void computeUnsignedMinMaxValuesFromKnownBits(const KnownBits &Known, 156 APInt &Min, APInt &Max) { 157 assert(Known.getBitWidth() == Min.getBitWidth() && 158 Known.getBitWidth() == Max.getBitWidth() && 159 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth."); 160 APInt UnknownBits = ~(Known.Zero|Known.One); 161 162 // The minimum value is when the unknown bits are all zeros. 163 Min = Known.One; 164 // The maximum value is when the unknown bits are all ones. 165 Max = Known.One|UnknownBits; 166 } 167 168 /// This is called when we see this pattern: 169 /// cmp pred (load (gep GV, ...)), cmpcst 170 /// where GV is a global variable with a constant initializer. Try to simplify 171 /// this into some simple computation that does not need the load. For example 172 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3". 173 /// 174 /// If AndCst is non-null, then the loaded value is masked with that constant 175 /// before doing the comparison. This handles cases like "A[i]&4 == 0". 176 Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, 177 GlobalVariable *GV, 178 CmpInst &ICI, 179 ConstantInt *AndCst) { 180 Constant *Init = GV->getInitializer(); 181 if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init)) 182 return nullptr; 183 184 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements(); 185 // Don't blow up on huge arrays. 186 if (ArrayElementCount > MaxArraySizeForCombine) 187 return nullptr; 188 189 // There are many forms of this optimization we can handle, for now, just do 190 // the simple index into a single-dimensional array. 191 // 192 // Require: GEP GV, 0, i {{, constant indices}} 193 if (GEP->getNumOperands() < 3 || 194 !isa<ConstantInt>(GEP->getOperand(1)) || 195 !cast<ConstantInt>(GEP->getOperand(1))->isZero() || 196 isa<Constant>(GEP->getOperand(2))) 197 return nullptr; 198 199 // Check that indices after the variable are constants and in-range for the 200 // type they index. Collect the indices. This is typically for arrays of 201 // structs. 202 SmallVector<unsigned, 4> LaterIndices; 203 204 Type *EltTy = Init->getType()->getArrayElementType(); 205 for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) { 206 ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i)); 207 if (!Idx) return nullptr; // Variable index. 208 209 uint64_t IdxVal = Idx->getZExtValue(); 210 if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index. 211 212 if (StructType *STy = dyn_cast<StructType>(EltTy)) 213 EltTy = STy->getElementType(IdxVal); 214 else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) { 215 if (IdxVal >= ATy->getNumElements()) return nullptr; 216 EltTy = ATy->getElementType(); 217 } else { 218 return nullptr; // Unknown type. 219 } 220 221 LaterIndices.push_back(IdxVal); 222 } 223 224 enum { Overdefined = -3, Undefined = -2 }; 225 226 // Variables for our state machines. 227 228 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form 229 // "i == 47 | i == 87", where 47 is the first index the condition is true for, 230 // and 87 is the second (and last) index. FirstTrueElement is -2 when 231 // undefined, otherwise set to the first true element. SecondTrueElement is 232 // -2 when undefined, -3 when overdefined and >= 0 when that index is true. 233 int FirstTrueElement = Undefined, SecondTrueElement = Undefined; 234 235 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the 236 // form "i != 47 & i != 87". Same state transitions as for true elements. 237 int FirstFalseElement = Undefined, SecondFalseElement = Undefined; 238 239 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these 240 /// define a state machine that triggers for ranges of values that the index 241 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'. 242 /// This is -2 when undefined, -3 when overdefined, and otherwise the last 243 /// index in the range (inclusive). We use -2 for undefined here because we 244 /// use relative comparisons and don't want 0-1 to match -1. 245 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined; 246 247 // MagicBitvector - This is a magic bitvector where we set a bit if the 248 // comparison is true for element 'i'. If there are 64 elements or less in 249 // the array, this will fully represent all the comparison results. 250 uint64_t MagicBitvector = 0; 251 252 // Scan the array and see if one of our patterns matches. 253 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1)); 254 for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) { 255 Constant *Elt = Init->getAggregateElement(i); 256 if (!Elt) return nullptr; 257 258 // If this is indexing an array of structures, get the structure element. 259 if (!LaterIndices.empty()) 260 Elt = ConstantExpr::getExtractValue(Elt, LaterIndices); 261 262 // If the element is masked, handle it. 263 if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst); 264 265 // Find out if the comparison would be true or false for the i'th element. 266 Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt, 267 CompareRHS, DL, &TLI); 268 // If the result is undef for this element, ignore it. 269 if (isa<UndefValue>(C)) { 270 // Extend range state machines to cover this element in case there is an 271 // undef in the middle of the range. 272 if (TrueRangeEnd == (int)i-1) 273 TrueRangeEnd = i; 274 if (FalseRangeEnd == (int)i-1) 275 FalseRangeEnd = i; 276 continue; 277 } 278 279 // If we can't compute the result for any of the elements, we have to give 280 // up evaluating the entire conditional. 281 if (!isa<ConstantInt>(C)) return nullptr; 282 283 // Otherwise, we know if the comparison is true or false for this element, 284 // update our state machines. 285 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero(); 286 287 // State machine for single/double/range index comparison. 288 if (IsTrueForElt) { 289 // Update the TrueElement state machine. 290 if (FirstTrueElement == Undefined) 291 FirstTrueElement = TrueRangeEnd = i; // First true element. 292 else { 293 // Update double-compare state machine. 294 if (SecondTrueElement == Undefined) 295 SecondTrueElement = i; 296 else 297 SecondTrueElement = Overdefined; 298 299 // Update range state machine. 300 if (TrueRangeEnd == (int)i-1) 301 TrueRangeEnd = i; 302 else 303 TrueRangeEnd = Overdefined; 304 } 305 } else { 306 // Update the FalseElement state machine. 307 if (FirstFalseElement == Undefined) 308 FirstFalseElement = FalseRangeEnd = i; // First false element. 309 else { 310 // Update double-compare state machine. 311 if (SecondFalseElement == Undefined) 312 SecondFalseElement = i; 313 else 314 SecondFalseElement = Overdefined; 315 316 // Update range state machine. 317 if (FalseRangeEnd == (int)i-1) 318 FalseRangeEnd = i; 319 else 320 FalseRangeEnd = Overdefined; 321 } 322 } 323 324 // If this element is in range, update our magic bitvector. 325 if (i < 64 && IsTrueForElt) 326 MagicBitvector |= 1ULL << i; 327 328 // If all of our states become overdefined, bail out early. Since the 329 // predicate is expensive, only check it every 8 elements. This is only 330 // really useful for really huge arrays. 331 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined && 332 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined && 333 FalseRangeEnd == Overdefined) 334 return nullptr; 335 } 336 337 // Now that we've scanned the entire array, emit our new comparison(s). We 338 // order the state machines in complexity of the generated code. 339 Value *Idx = GEP->getOperand(2); 340 341 // If the index is larger than the pointer size of the target, truncate the 342 // index down like the GEP would do implicitly. We don't have to do this for 343 // an inbounds GEP because the index can't be out of range. 344 if (!GEP->isInBounds()) { 345 Type *IntPtrTy = DL.getIntPtrType(GEP->getType()); 346 unsigned PtrSize = IntPtrTy->getIntegerBitWidth(); 347 if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize) 348 Idx = Builder.CreateTrunc(Idx, IntPtrTy); 349 } 350 351 // If the comparison is only true for one or two elements, emit direct 352 // comparisons. 353 if (SecondTrueElement != Overdefined) { 354 // None true -> false. 355 if (FirstTrueElement == Undefined) 356 return replaceInstUsesWith(ICI, Builder.getFalse()); 357 358 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement); 359 360 // True for one element -> 'i == 47'. 361 if (SecondTrueElement == Undefined) 362 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx); 363 364 // True for two elements -> 'i == 47 | i == 72'. 365 Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx); 366 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement); 367 Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx); 368 return BinaryOperator::CreateOr(C1, C2); 369 } 370 371 // If the comparison is only false for one or two elements, emit direct 372 // comparisons. 373 if (SecondFalseElement != Overdefined) { 374 // None false -> true. 375 if (FirstFalseElement == Undefined) 376 return replaceInstUsesWith(ICI, Builder.getTrue()); 377 378 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement); 379 380 // False for one element -> 'i != 47'. 381 if (SecondFalseElement == Undefined) 382 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx); 383 384 // False for two elements -> 'i != 47 & i != 72'. 385 Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx); 386 Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement); 387 Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx); 388 return BinaryOperator::CreateAnd(C1, C2); 389 } 390 391 // If the comparison can be replaced with a range comparison for the elements 392 // where it is true, emit the range check. 393 if (TrueRangeEnd != Overdefined) { 394 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare"); 395 396 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1). 397 if (FirstTrueElement) { 398 Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement); 399 Idx = Builder.CreateAdd(Idx, Offs); 400 } 401 402 Value *End = ConstantInt::get(Idx->getType(), 403 TrueRangeEnd-FirstTrueElement+1); 404 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End); 405 } 406 407 // False range check. 408 if (FalseRangeEnd != Overdefined) { 409 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare"); 410 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse). 411 if (FirstFalseElement) { 412 Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement); 413 Idx = Builder.CreateAdd(Idx, Offs); 414 } 415 416 Value *End = ConstantInt::get(Idx->getType(), 417 FalseRangeEnd-FirstFalseElement); 418 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End); 419 } 420 421 // If a magic bitvector captures the entire comparison state 422 // of this load, replace it with computation that does: 423 // ((magic_cst >> i) & 1) != 0 424 { 425 Type *Ty = nullptr; 426 427 // Look for an appropriate type: 428 // - The type of Idx if the magic fits 429 // - The smallest fitting legal type if we have a DataLayout 430 // - Default to i32 431 if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth()) 432 Ty = Idx->getType(); 433 else 434 Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount); 435 436 if (Ty) { 437 Value *V = Builder.CreateIntCast(Idx, Ty, false); 438 V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V); 439 V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V); 440 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0)); 441 } 442 } 443 444 return nullptr; 445 } 446 447 /// Return a value that can be used to compare the *offset* implied by a GEP to 448 /// zero. For example, if we have &A[i], we want to return 'i' for 449 /// "icmp ne i, 0". Note that, in general, indices can be complex, and scales 450 /// are involved. The above expression would also be legal to codegen as 451 /// "icmp ne (i*4), 0" (assuming A is a pointer to i32). 452 /// This latter form is less amenable to optimization though, and we are allowed 453 /// to generate the first by knowing that pointer arithmetic doesn't overflow. 454 /// 455 /// If we can't emit an optimized form for this expression, this returns null. 456 /// 457 static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC, 458 const DataLayout &DL) { 459 gep_type_iterator GTI = gep_type_begin(GEP); 460 461 // Check to see if this gep only has a single variable index. If so, and if 462 // any constant indices are a multiple of its scale, then we can compute this 463 // in terms of the scale of the variable index. For example, if the GEP 464 // implies an offset of "12 + i*4", then we can codegen this as "3 + i", 465 // because the expression will cross zero at the same point. 466 unsigned i, e = GEP->getNumOperands(); 467 int64_t Offset = 0; 468 for (i = 1; i != e; ++i, ++GTI) { 469 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 470 // Compute the aggregate offset of constant indices. 471 if (CI->isZero()) continue; 472 473 // Handle a struct index, which adds its field offset to the pointer. 474 if (StructType *STy = GTI.getStructTypeOrNull()) { 475 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); 476 } else { 477 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); 478 Offset += Size*CI->getSExtValue(); 479 } 480 } else { 481 // Found our variable index. 482 break; 483 } 484 } 485 486 // If there are no variable indices, we must have a constant offset, just 487 // evaluate it the general way. 488 if (i == e) return nullptr; 489 490 Value *VariableIdx = GEP->getOperand(i); 491 // Determine the scale factor of the variable element. For example, this is 492 // 4 if the variable index is into an array of i32. 493 uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType()); 494 495 // Verify that there are no other variable indices. If so, emit the hard way. 496 for (++i, ++GTI; i != e; ++i, ++GTI) { 497 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i)); 498 if (!CI) return nullptr; 499 500 // Compute the aggregate offset of constant indices. 501 if (CI->isZero()) continue; 502 503 // Handle a struct index, which adds its field offset to the pointer. 504 if (StructType *STy = GTI.getStructTypeOrNull()) { 505 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); 506 } else { 507 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); 508 Offset += Size*CI->getSExtValue(); 509 } 510 } 511 512 // Okay, we know we have a single variable index, which must be a 513 // pointer/array/vector index. If there is no offset, life is simple, return 514 // the index. 515 Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType()); 516 unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth(); 517 if (Offset == 0) { 518 // Cast to intptrty in case a truncation occurs. If an extension is needed, 519 // we don't need to bother extending: the extension won't affect where the 520 // computation crosses zero. 521 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) { 522 VariableIdx = IC.Builder.CreateTrunc(VariableIdx, IntPtrTy); 523 } 524 return VariableIdx; 525 } 526 527 // Otherwise, there is an index. The computation we will do will be modulo 528 // the pointer size, so get it. 529 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth); 530 531 Offset &= PtrSizeMask; 532 VariableScale &= PtrSizeMask; 533 534 // To do this transformation, any constant index must be a multiple of the 535 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i", 536 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a 537 // multiple of the variable scale. 538 int64_t NewOffs = Offset / (int64_t)VariableScale; 539 if (Offset != NewOffs*(int64_t)VariableScale) 540 return nullptr; 541 542 // Okay, we can do this evaluation. Start by converting the index to intptr. 543 if (VariableIdx->getType() != IntPtrTy) 544 VariableIdx = IC.Builder.CreateIntCast(VariableIdx, IntPtrTy, 545 true /*Signed*/); 546 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs); 547 return IC.Builder.CreateAdd(VariableIdx, OffsetVal, "offset"); 548 } 549 550 /// Returns true if we can rewrite Start as a GEP with pointer Base 551 /// and some integer offset. The nodes that need to be re-written 552 /// for this transformation will be added to Explored. 553 static bool canRewriteGEPAsOffset(Value *Start, Value *Base, 554 const DataLayout &DL, 555 SetVector<Value *> &Explored) { 556 SmallVector<Value *, 16> WorkList(1, Start); 557 Explored.insert(Base); 558 559 // The following traversal gives us an order which can be used 560 // when doing the final transformation. Since in the final 561 // transformation we create the PHI replacement instructions first, 562 // we don't have to get them in any particular order. 563 // 564 // However, for other instructions we will have to traverse the 565 // operands of an instruction first, which means that we have to 566 // do a post-order traversal. 567 while (!WorkList.empty()) { 568 SetVector<PHINode *> PHIs; 569 570 while (!WorkList.empty()) { 571 if (Explored.size() >= 100) 572 return false; 573 574 Value *V = WorkList.back(); 575 576 if (Explored.count(V) != 0) { 577 WorkList.pop_back(); 578 continue; 579 } 580 581 if (!isa<IntToPtrInst>(V) && !isa<PtrToIntInst>(V) && 582 !isa<GetElementPtrInst>(V) && !isa<PHINode>(V)) 583 // We've found some value that we can't explore which is different from 584 // the base. Therefore we can't do this transformation. 585 return false; 586 587 if (isa<IntToPtrInst>(V) || isa<PtrToIntInst>(V)) { 588 auto *CI = dyn_cast<CastInst>(V); 589 if (!CI->isNoopCast(DL)) 590 return false; 591 592 if (Explored.count(CI->getOperand(0)) == 0) 593 WorkList.push_back(CI->getOperand(0)); 594 } 595 596 if (auto *GEP = dyn_cast<GEPOperator>(V)) { 597 // We're limiting the GEP to having one index. This will preserve 598 // the original pointer type. We could handle more cases in the 599 // future. 600 if (GEP->getNumIndices() != 1 || !GEP->isInBounds() || 601 GEP->getType() != Start->getType()) 602 return false; 603 604 if (Explored.count(GEP->getOperand(0)) == 0) 605 WorkList.push_back(GEP->getOperand(0)); 606 } 607 608 if (WorkList.back() == V) { 609 WorkList.pop_back(); 610 // We've finished visiting this node, mark it as such. 611 Explored.insert(V); 612 } 613 614 if (auto *PN = dyn_cast<PHINode>(V)) { 615 // We cannot transform PHIs on unsplittable basic blocks. 616 if (isa<CatchSwitchInst>(PN->getParent()->getTerminator())) 617 return false; 618 Explored.insert(PN); 619 PHIs.insert(PN); 620 } 621 } 622 623 // Explore the PHI nodes further. 624 for (auto *PN : PHIs) 625 for (Value *Op : PN->incoming_values()) 626 if (Explored.count(Op) == 0) 627 WorkList.push_back(Op); 628 } 629 630 // Make sure that we can do this. Since we can't insert GEPs in a basic 631 // block before a PHI node, we can't easily do this transformation if 632 // we have PHI node users of transformed instructions. 633 for (Value *Val : Explored) { 634 for (Value *Use : Val->uses()) { 635 636 auto *PHI = dyn_cast<PHINode>(Use); 637 auto *Inst = dyn_cast<Instruction>(Val); 638 639 if (Inst == Base || Inst == PHI || !Inst || !PHI || 640 Explored.count(PHI) == 0) 641 continue; 642 643 if (PHI->getParent() == Inst->getParent()) 644 return false; 645 } 646 } 647 return true; 648 } 649 650 // Sets the appropriate insert point on Builder where we can add 651 // a replacement Instruction for V (if that is possible). 652 static void setInsertionPoint(IRBuilder<> &Builder, Value *V, 653 bool Before = true) { 654 if (auto *PHI = dyn_cast<PHINode>(V)) { 655 Builder.SetInsertPoint(&*PHI->getParent()->getFirstInsertionPt()); 656 return; 657 } 658 if (auto *I = dyn_cast<Instruction>(V)) { 659 if (!Before) 660 I = &*std::next(I->getIterator()); 661 Builder.SetInsertPoint(I); 662 return; 663 } 664 if (auto *A = dyn_cast<Argument>(V)) { 665 // Set the insertion point in the entry block. 666 BasicBlock &Entry = A->getParent()->getEntryBlock(); 667 Builder.SetInsertPoint(&*Entry.getFirstInsertionPt()); 668 return; 669 } 670 // Otherwise, this is a constant and we don't need to set a new 671 // insertion point. 672 assert(isa<Constant>(V) && "Setting insertion point for unknown value!"); 673 } 674 675 /// Returns a re-written value of Start as an indexed GEP using Base as a 676 /// pointer. 677 static Value *rewriteGEPAsOffset(Value *Start, Value *Base, 678 const DataLayout &DL, 679 SetVector<Value *> &Explored) { 680 // Perform all the substitutions. This is a bit tricky because we can 681 // have cycles in our use-def chains. 682 // 1. Create the PHI nodes without any incoming values. 683 // 2. Create all the other values. 684 // 3. Add the edges for the PHI nodes. 685 // 4. Emit GEPs to get the original pointers. 686 // 5. Remove the original instructions. 687 Type *IndexType = IntegerType::get( 688 Base->getContext(), DL.getPointerTypeSizeInBits(Start->getType())); 689 690 DenseMap<Value *, Value *> NewInsts; 691 NewInsts[Base] = ConstantInt::getNullValue(IndexType); 692 693 // Create the new PHI nodes, without adding any incoming values. 694 for (Value *Val : Explored) { 695 if (Val == Base) 696 continue; 697 // Create empty phi nodes. This avoids cyclic dependencies when creating 698 // the remaining instructions. 699 if (auto *PHI = dyn_cast<PHINode>(Val)) 700 NewInsts[PHI] = PHINode::Create(IndexType, PHI->getNumIncomingValues(), 701 PHI->getName() + ".idx", PHI); 702 } 703 IRBuilder<> Builder(Base->getContext()); 704 705 // Create all the other instructions. 706 for (Value *Val : Explored) { 707 708 if (NewInsts.find(Val) != NewInsts.end()) 709 continue; 710 711 if (auto *CI = dyn_cast<CastInst>(Val)) { 712 NewInsts[CI] = NewInsts[CI->getOperand(0)]; 713 continue; 714 } 715 if (auto *GEP = dyn_cast<GEPOperator>(Val)) { 716 Value *Index = NewInsts[GEP->getOperand(1)] ? NewInsts[GEP->getOperand(1)] 717 : GEP->getOperand(1); 718 setInsertionPoint(Builder, GEP); 719 // Indices might need to be sign extended. GEPs will magically do 720 // this, but we need to do it ourselves here. 721 if (Index->getType()->getScalarSizeInBits() != 722 NewInsts[GEP->getOperand(0)]->getType()->getScalarSizeInBits()) { 723 Index = Builder.CreateSExtOrTrunc( 724 Index, NewInsts[GEP->getOperand(0)]->getType(), 725 GEP->getOperand(0)->getName() + ".sext"); 726 } 727 728 auto *Op = NewInsts[GEP->getOperand(0)]; 729 if (isa<ConstantInt>(Op) && dyn_cast<ConstantInt>(Op)->isZero()) 730 NewInsts[GEP] = Index; 731 else 732 NewInsts[GEP] = Builder.CreateNSWAdd( 733 Op, Index, GEP->getOperand(0)->getName() + ".add"); 734 continue; 735 } 736 if (isa<PHINode>(Val)) 737 continue; 738 739 llvm_unreachable("Unexpected instruction type"); 740 } 741 742 // Add the incoming values to the PHI nodes. 743 for (Value *Val : Explored) { 744 if (Val == Base) 745 continue; 746 // All the instructions have been created, we can now add edges to the 747 // phi nodes. 748 if (auto *PHI = dyn_cast<PHINode>(Val)) { 749 PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]); 750 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) { 751 Value *NewIncoming = PHI->getIncomingValue(I); 752 753 if (NewInsts.find(NewIncoming) != NewInsts.end()) 754 NewIncoming = NewInsts[NewIncoming]; 755 756 NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I)); 757 } 758 } 759 } 760 761 for (Value *Val : Explored) { 762 if (Val == Base) 763 continue; 764 765 // Depending on the type, for external users we have to emit 766 // a GEP or a GEP + ptrtoint. 767 setInsertionPoint(Builder, Val, false); 768 769 // If required, create an inttoptr instruction for Base. 770 Value *NewBase = Base; 771 if (!Base->getType()->isPointerTy()) 772 NewBase = Builder.CreateBitOrPointerCast(Base, Start->getType(), 773 Start->getName() + "to.ptr"); 774 775 Value *GEP = Builder.CreateInBoundsGEP( 776 Start->getType()->getPointerElementType(), NewBase, 777 makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr"); 778 779 if (!Val->getType()->isPointerTy()) { 780 Value *Cast = Builder.CreatePointerCast(GEP, Val->getType(), 781 Val->getName() + ".conv"); 782 GEP = Cast; 783 } 784 Val->replaceAllUsesWith(GEP); 785 } 786 787 return NewInsts[Start]; 788 } 789 790 /// Looks through GEPs, IntToPtrInsts and PtrToIntInsts in order to express 791 /// the input Value as a constant indexed GEP. Returns a pair containing 792 /// the GEPs Pointer and Index. 793 static std::pair<Value *, Value *> 794 getAsConstantIndexedAddress(Value *V, const DataLayout &DL) { 795 Type *IndexType = IntegerType::get(V->getContext(), 796 DL.getPointerTypeSizeInBits(V->getType())); 797 798 Constant *Index = ConstantInt::getNullValue(IndexType); 799 while (true) { 800 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 801 // We accept only inbouds GEPs here to exclude the possibility of 802 // overflow. 803 if (!GEP->isInBounds()) 804 break; 805 if (GEP->hasAllConstantIndices() && GEP->getNumIndices() == 1 && 806 GEP->getType() == V->getType()) { 807 V = GEP->getOperand(0); 808 Constant *GEPIndex = static_cast<Constant *>(GEP->getOperand(1)); 809 Index = ConstantExpr::getAdd( 810 Index, ConstantExpr::getSExtOrBitCast(GEPIndex, IndexType)); 811 continue; 812 } 813 break; 814 } 815 if (auto *CI = dyn_cast<IntToPtrInst>(V)) { 816 if (!CI->isNoopCast(DL)) 817 break; 818 V = CI->getOperand(0); 819 continue; 820 } 821 if (auto *CI = dyn_cast<PtrToIntInst>(V)) { 822 if (!CI->isNoopCast(DL)) 823 break; 824 V = CI->getOperand(0); 825 continue; 826 } 827 break; 828 } 829 return {V, Index}; 830 } 831 832 /// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant. 833 /// We can look through PHIs, GEPs and casts in order to determine a common base 834 /// between GEPLHS and RHS. 835 static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS, 836 ICmpInst::Predicate Cond, 837 const DataLayout &DL) { 838 if (!GEPLHS->hasAllConstantIndices()) 839 return nullptr; 840 841 // Make sure the pointers have the same type. 842 if (GEPLHS->getType() != RHS->getType()) 843 return nullptr; 844 845 Value *PtrBase, *Index; 846 std::tie(PtrBase, Index) = getAsConstantIndexedAddress(GEPLHS, DL); 847 848 // The set of nodes that will take part in this transformation. 849 SetVector<Value *> Nodes; 850 851 if (!canRewriteGEPAsOffset(RHS, PtrBase, DL, Nodes)) 852 return nullptr; 853 854 // We know we can re-write this as 855 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) 856 // Since we've only looked through inbouds GEPs we know that we 857 // can't have overflow on either side. We can therefore re-write 858 // this as: 859 // OFFSET1 cmp OFFSET2 860 Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, DL, Nodes); 861 862 // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written 863 // GEP having PtrBase as the pointer base, and has returned in NewRHS the 864 // offset. Since Index is the offset of LHS to the base pointer, we will now 865 // compare the offsets instead of comparing the pointers. 866 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Index, NewRHS); 867 } 868 869 /// Fold comparisons between a GEP instruction and something else. At this point 870 /// we know that the GEP is on the LHS of the comparison. 871 Instruction *InstCombiner::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, 872 ICmpInst::Predicate Cond, 873 Instruction &I) { 874 // Don't transform signed compares of GEPs into index compares. Even if the 875 // GEP is inbounds, the final add of the base pointer can have signed overflow 876 // and would change the result of the icmp. 877 // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be 878 // the maximum signed value for the pointer type. 879 if (ICmpInst::isSigned(Cond)) 880 return nullptr; 881 882 // Look through bitcasts and addrspacecasts. We do not however want to remove 883 // 0 GEPs. 884 if (!isa<GetElementPtrInst>(RHS)) 885 RHS = RHS->stripPointerCasts(); 886 887 Value *PtrBase = GEPLHS->getOperand(0); 888 if (PtrBase == RHS && GEPLHS->isInBounds()) { 889 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0). 890 // This transformation (ignoring the base and scales) is valid because we 891 // know pointers can't overflow since the gep is inbounds. See if we can 892 // output an optimized form. 893 Value *Offset = evaluateGEPOffsetExpression(GEPLHS, *this, DL); 894 895 // If not, synthesize the offset the hard way. 896 if (!Offset) 897 Offset = EmitGEPOffset(GEPLHS); 898 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset, 899 Constant::getNullValue(Offset->getType())); 900 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) { 901 // If the base pointers are different, but the indices are the same, just 902 // compare the base pointer. 903 if (PtrBase != GEPRHS->getOperand(0)) { 904 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands(); 905 IndicesTheSame &= GEPLHS->getOperand(0)->getType() == 906 GEPRHS->getOperand(0)->getType(); 907 if (IndicesTheSame) 908 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i) 909 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) { 910 IndicesTheSame = false; 911 break; 912 } 913 914 // If all indices are the same, just compare the base pointers. 915 if (IndicesTheSame) 916 return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0)); 917 918 // If we're comparing GEPs with two base pointers that only differ in type 919 // and both GEPs have only constant indices or just one use, then fold 920 // the compare with the adjusted indices. 921 if (GEPLHS->isInBounds() && GEPRHS->isInBounds() && 922 (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) && 923 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) && 924 PtrBase->stripPointerCasts() == 925 GEPRHS->getOperand(0)->stripPointerCasts()) { 926 Value *LOffset = EmitGEPOffset(GEPLHS); 927 Value *ROffset = EmitGEPOffset(GEPRHS); 928 929 // If we looked through an addrspacecast between different sized address 930 // spaces, the LHS and RHS pointers are different sized 931 // integers. Truncate to the smaller one. 932 Type *LHSIndexTy = LOffset->getType(); 933 Type *RHSIndexTy = ROffset->getType(); 934 if (LHSIndexTy != RHSIndexTy) { 935 if (LHSIndexTy->getPrimitiveSizeInBits() < 936 RHSIndexTy->getPrimitiveSizeInBits()) { 937 ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy); 938 } else 939 LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy); 940 } 941 942 Value *Cmp = Builder.CreateICmp(ICmpInst::getSignedPredicate(Cond), 943 LOffset, ROffset); 944 return replaceInstUsesWith(I, Cmp); 945 } 946 947 // Otherwise, the base pointers are different and the indices are 948 // different. Try convert this to an indexed compare by looking through 949 // PHIs/casts. 950 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL); 951 } 952 953 // If one of the GEPs has all zero indices, recurse. 954 if (GEPLHS->hasAllZeroIndices()) 955 return foldGEPICmp(GEPRHS, GEPLHS->getOperand(0), 956 ICmpInst::getSwappedPredicate(Cond), I); 957 958 // If the other GEP has all zero indices, recurse. 959 if (GEPRHS->hasAllZeroIndices()) 960 return foldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I); 961 962 bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds(); 963 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) { 964 // If the GEPs only differ by one index, compare it. 965 unsigned NumDifferences = 0; // Keep track of # differences. 966 unsigned DiffOperand = 0; // The operand that differs. 967 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i) 968 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) { 969 if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() != 970 GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) { 971 // Irreconcilable differences. 972 NumDifferences = 2; 973 break; 974 } else { 975 if (NumDifferences++) break; 976 DiffOperand = i; 977 } 978 } 979 980 if (NumDifferences == 0) // SAME GEP? 981 return replaceInstUsesWith(I, // No comparison is needed here. 982 Builder.getInt1(ICmpInst::isTrueWhenEqual(Cond))); 983 984 else if (NumDifferences == 1 && GEPsInBounds) { 985 Value *LHSV = GEPLHS->getOperand(DiffOperand); 986 Value *RHSV = GEPRHS->getOperand(DiffOperand); 987 // Make sure we do a signed comparison here. 988 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV); 989 } 990 } 991 992 // Only lower this if the icmp is the only user of the GEP or if we expect 993 // the result to fold to a constant! 994 if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) && 995 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) { 996 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2) 997 Value *L = EmitGEPOffset(GEPLHS); 998 Value *R = EmitGEPOffset(GEPRHS); 999 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R); 1000 } 1001 } 1002 1003 // Try convert this to an indexed compare by looking through PHIs/casts as a 1004 // last resort. 1005 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL); 1006 } 1007 1008 Instruction *InstCombiner::foldAllocaCmp(ICmpInst &ICI, 1009 const AllocaInst *Alloca, 1010 const Value *Other) { 1011 assert(ICI.isEquality() && "Cannot fold non-equality comparison."); 1012 1013 // It would be tempting to fold away comparisons between allocas and any 1014 // pointer not based on that alloca (e.g. an argument). However, even 1015 // though such pointers cannot alias, they can still compare equal. 1016 // 1017 // But LLVM doesn't specify where allocas get their memory, so if the alloca 1018 // doesn't escape we can argue that it's impossible to guess its value, and we 1019 // can therefore act as if any such guesses are wrong. 1020 // 1021 // The code below checks that the alloca doesn't escape, and that it's only 1022 // used in a comparison once (the current instruction). The 1023 // single-comparison-use condition ensures that we're trivially folding all 1024 // comparisons against the alloca consistently, and avoids the risk of 1025 // erroneously folding a comparison of the pointer with itself. 1026 1027 unsigned MaxIter = 32; // Break cycles and bound to constant-time. 1028 1029 SmallVector<const Use *, 32> Worklist; 1030 for (const Use &U : Alloca->uses()) { 1031 if (Worklist.size() >= MaxIter) 1032 return nullptr; 1033 Worklist.push_back(&U); 1034 } 1035 1036 unsigned NumCmps = 0; 1037 while (!Worklist.empty()) { 1038 assert(Worklist.size() <= MaxIter); 1039 const Use *U = Worklist.pop_back_val(); 1040 const Value *V = U->getUser(); 1041 --MaxIter; 1042 1043 if (isa<BitCastInst>(V) || isa<GetElementPtrInst>(V) || isa<PHINode>(V) || 1044 isa<SelectInst>(V)) { 1045 // Track the uses. 1046 } else if (isa<LoadInst>(V)) { 1047 // Loading from the pointer doesn't escape it. 1048 continue; 1049 } else if (const auto *SI = dyn_cast<StoreInst>(V)) { 1050 // Storing *to* the pointer is fine, but storing the pointer escapes it. 1051 if (SI->getValueOperand() == U->get()) 1052 return nullptr; 1053 continue; 1054 } else if (isa<ICmpInst>(V)) { 1055 if (NumCmps++) 1056 return nullptr; // Found more than one cmp. 1057 continue; 1058 } else if (const auto *Intrin = dyn_cast<IntrinsicInst>(V)) { 1059 switch (Intrin->getIntrinsicID()) { 1060 // These intrinsics don't escape or compare the pointer. Memset is safe 1061 // because we don't allow ptrtoint. Memcpy and memmove are safe because 1062 // we don't allow stores, so src cannot point to V. 1063 case Intrinsic::lifetime_start: case Intrinsic::lifetime_end: 1064 case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset: 1065 continue; 1066 default: 1067 return nullptr; 1068 } 1069 } else { 1070 return nullptr; 1071 } 1072 for (const Use &U : V->uses()) { 1073 if (Worklist.size() >= MaxIter) 1074 return nullptr; 1075 Worklist.push_back(&U); 1076 } 1077 } 1078 1079 Type *CmpTy = CmpInst::makeCmpResultType(Other->getType()); 1080 return replaceInstUsesWith( 1081 ICI, 1082 ConstantInt::get(CmpTy, !CmpInst::isTrueWhenEqual(ICI.getPredicate()))); 1083 } 1084 1085 /// Fold "icmp pred (X+CI), X". 1086 Instruction *InstCombiner::foldICmpAddOpConst(Value *X, ConstantInt *CI, 1087 ICmpInst::Predicate Pred) { 1088 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0, 1089 // so the values can never be equal. Similarly for all other "or equals" 1090 // operators. 1091 1092 // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255 1093 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253 1094 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0 1095 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 1096 Value *R = 1097 ConstantExpr::getSub(ConstantInt::getAllOnesValue(CI->getType()), CI); 1098 return new ICmpInst(ICmpInst::ICMP_UGT, X, R); 1099 } 1100 1101 // (X+1) >u X --> X <u (0-1) --> X != 255 1102 // (X+2) >u X --> X <u (0-2) --> X <u 254 1103 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0 1104 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 1105 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantExpr::getNeg(CI)); 1106 1107 unsigned BitWidth = CI->getType()->getPrimitiveSizeInBits(); 1108 ConstantInt *SMax = ConstantInt::get(X->getContext(), 1109 APInt::getSignedMaxValue(BitWidth)); 1110 1111 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127 1112 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125 1113 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0 1114 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1 1115 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126 1116 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127 1117 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 1118 return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantExpr::getSub(SMax, CI)); 1119 1120 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127 1121 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126 1122 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1 1123 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2 1124 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126 1125 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128 1126 1127 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE); 1128 Constant *C = Builder.getInt(CI->getValue() - 1); 1129 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C)); 1130 } 1131 1132 /// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" -> 1133 /// (icmp eq/ne A, Log2(AP2/AP1)) -> 1134 /// (icmp eq/ne A, Log2(AP2) - Log2(AP1)). 1135 Instruction *InstCombiner::foldICmpShrConstConst(ICmpInst &I, Value *A, 1136 const APInt &AP1, 1137 const APInt &AP2) { 1138 assert(I.isEquality() && "Cannot fold icmp gt/lt"); 1139 1140 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) { 1141 if (I.getPredicate() == I.ICMP_NE) 1142 Pred = CmpInst::getInversePredicate(Pred); 1143 return new ICmpInst(Pred, LHS, RHS); 1144 }; 1145 1146 // Don't bother doing any work for cases which InstSimplify handles. 1147 if (AP2.isNullValue()) 1148 return nullptr; 1149 1150 bool IsAShr = isa<AShrOperator>(I.getOperand(0)); 1151 if (IsAShr) { 1152 if (AP2.isAllOnesValue()) 1153 return nullptr; 1154 if (AP2.isNegative() != AP1.isNegative()) 1155 return nullptr; 1156 if (AP2.sgt(AP1)) 1157 return nullptr; 1158 } 1159 1160 if (!AP1) 1161 // 'A' must be large enough to shift out the highest set bit. 1162 return getICmp(I.ICMP_UGT, A, 1163 ConstantInt::get(A->getType(), AP2.logBase2())); 1164 1165 if (AP1 == AP2) 1166 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType())); 1167 1168 int Shift; 1169 if (IsAShr && AP1.isNegative()) 1170 Shift = AP1.countLeadingOnes() - AP2.countLeadingOnes(); 1171 else 1172 Shift = AP1.countLeadingZeros() - AP2.countLeadingZeros(); 1173 1174 if (Shift > 0) { 1175 if (IsAShr && AP1 == AP2.ashr(Shift)) { 1176 // There are multiple solutions if we are comparing against -1 and the LHS 1177 // of the ashr is not a power of two. 1178 if (AP1.isAllOnesValue() && !AP2.isPowerOf2()) 1179 return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift)); 1180 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift)); 1181 } else if (AP1 == AP2.lshr(Shift)) { 1182 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift)); 1183 } 1184 } 1185 1186 // Shifting const2 will never be equal to const1. 1187 // FIXME: This should always be handled by InstSimplify? 1188 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE); 1189 return replaceInstUsesWith(I, TorF); 1190 } 1191 1192 /// Handle "(icmp eq/ne (shl AP2, A), AP1)" -> 1193 /// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)). 1194 Instruction *InstCombiner::foldICmpShlConstConst(ICmpInst &I, Value *A, 1195 const APInt &AP1, 1196 const APInt &AP2) { 1197 assert(I.isEquality() && "Cannot fold icmp gt/lt"); 1198 1199 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) { 1200 if (I.getPredicate() == I.ICMP_NE) 1201 Pred = CmpInst::getInversePredicate(Pred); 1202 return new ICmpInst(Pred, LHS, RHS); 1203 }; 1204 1205 // Don't bother doing any work for cases which InstSimplify handles. 1206 if (AP2.isNullValue()) 1207 return nullptr; 1208 1209 unsigned AP2TrailingZeros = AP2.countTrailingZeros(); 1210 1211 if (!AP1 && AP2TrailingZeros != 0) 1212 return getICmp( 1213 I.ICMP_UGE, A, 1214 ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros)); 1215 1216 if (AP1 == AP2) 1217 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType())); 1218 1219 // Get the distance between the lowest bits that are set. 1220 int Shift = AP1.countTrailingZeros() - AP2TrailingZeros; 1221 1222 if (Shift > 0 && AP2.shl(Shift) == AP1) 1223 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift)); 1224 1225 // Shifting const2 will never be equal to const1. 1226 // FIXME: This should always be handled by InstSimplify? 1227 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE); 1228 return replaceInstUsesWith(I, TorF); 1229 } 1230 1231 /// The caller has matched a pattern of the form: 1232 /// I = icmp ugt (add (add A, B), CI2), CI1 1233 /// If this is of the form: 1234 /// sum = a + b 1235 /// if (sum+128 >u 255) 1236 /// Then replace it with llvm.sadd.with.overflow.i8. 1237 /// 1238 static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B, 1239 ConstantInt *CI2, ConstantInt *CI1, 1240 InstCombiner &IC) { 1241 // The transformation we're trying to do here is to transform this into an 1242 // llvm.sadd.with.overflow. To do this, we have to replace the original add 1243 // with a narrower add, and discard the add-with-constant that is part of the 1244 // range check (if we can't eliminate it, this isn't profitable). 1245 1246 // In order to eliminate the add-with-constant, the compare can be its only 1247 // use. 1248 Instruction *AddWithCst = cast<Instruction>(I.getOperand(0)); 1249 if (!AddWithCst->hasOneUse()) 1250 return nullptr; 1251 1252 // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow. 1253 if (!CI2->getValue().isPowerOf2()) 1254 return nullptr; 1255 unsigned NewWidth = CI2->getValue().countTrailingZeros(); 1256 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31) 1257 return nullptr; 1258 1259 // The width of the new add formed is 1 more than the bias. 1260 ++NewWidth; 1261 1262 // Check to see that CI1 is an all-ones value with NewWidth bits. 1263 if (CI1->getBitWidth() == NewWidth || 1264 CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth)) 1265 return nullptr; 1266 1267 // This is only really a signed overflow check if the inputs have been 1268 // sign-extended; check for that condition. For example, if CI2 is 2^31 and 1269 // the operands of the add are 64 bits wide, we need at least 33 sign bits. 1270 unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1; 1271 if (IC.ComputeNumSignBits(A, 0, &I) < NeededSignBits || 1272 IC.ComputeNumSignBits(B, 0, &I) < NeededSignBits) 1273 return nullptr; 1274 1275 // In order to replace the original add with a narrower 1276 // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant 1277 // and truncates that discard the high bits of the add. Verify that this is 1278 // the case. 1279 Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0)); 1280 for (User *U : OrigAdd->users()) { 1281 if (U == AddWithCst) 1282 continue; 1283 1284 // Only accept truncates for now. We would really like a nice recursive 1285 // predicate like SimplifyDemandedBits, but which goes downwards the use-def 1286 // chain to see which bits of a value are actually demanded. If the 1287 // original add had another add which was then immediately truncated, we 1288 // could still do the transformation. 1289 TruncInst *TI = dyn_cast<TruncInst>(U); 1290 if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth) 1291 return nullptr; 1292 } 1293 1294 // If the pattern matches, truncate the inputs to the narrower type and 1295 // use the sadd_with_overflow intrinsic to efficiently compute both the 1296 // result and the overflow bit. 1297 Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth); 1298 Value *F = Intrinsic::getDeclaration(I.getModule(), 1299 Intrinsic::sadd_with_overflow, NewType); 1300 1301 InstCombiner::BuilderTy &Builder = IC.Builder; 1302 1303 // Put the new code above the original add, in case there are any uses of the 1304 // add between the add and the compare. 1305 Builder.SetInsertPoint(OrigAdd); 1306 1307 Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc"); 1308 Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc"); 1309 CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd"); 1310 Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result"); 1311 Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType()); 1312 1313 // The inner add was the result of the narrow add, zero extended to the 1314 // wider type. Replace it with the result computed by the intrinsic. 1315 IC.replaceInstUsesWith(*OrigAdd, ZExt); 1316 1317 // The original icmp gets replaced with the overflow value. 1318 return ExtractValueInst::Create(Call, 1, "sadd.overflow"); 1319 } 1320 1321 // Fold icmp Pred X, C. 1322 Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &Cmp) { 1323 CmpInst::Predicate Pred = Cmp.getPredicate(); 1324 Value *X = Cmp.getOperand(0); 1325 1326 const APInt *C; 1327 if (!match(Cmp.getOperand(1), m_APInt(C))) 1328 return nullptr; 1329 1330 Value *A = nullptr, *B = nullptr; 1331 1332 // Match the following pattern, which is a common idiom when writing 1333 // overflow-safe integer arithmetic functions. The source performs an addition 1334 // in wider type and explicitly checks for overflow using comparisons against 1335 // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic. 1336 // 1337 // TODO: This could probably be generalized to handle other overflow-safe 1338 // operations if we worked out the formulas to compute the appropriate magic 1339 // constants. 1340 // 1341 // sum = a + b 1342 // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8 1343 { 1344 ConstantInt *CI2; // I = icmp ugt (add (add A, B), CI2), CI 1345 if (Pred == ICmpInst::ICMP_UGT && 1346 match(X, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2)))) 1347 if (Instruction *Res = processUGT_ADDCST_ADD( 1348 Cmp, A, B, CI2, cast<ConstantInt>(Cmp.getOperand(1)), *this)) 1349 return Res; 1350 } 1351 1352 // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0) 1353 if (C->isNullValue() && Pred == ICmpInst::ICMP_SGT) { 1354 SelectPatternResult SPR = matchSelectPattern(X, A, B); 1355 if (SPR.Flavor == SPF_SMIN) { 1356 if (isKnownPositive(A, DL, 0, &AC, &Cmp, &DT)) 1357 return new ICmpInst(Pred, B, Cmp.getOperand(1)); 1358 if (isKnownPositive(B, DL, 0, &AC, &Cmp, &DT)) 1359 return new ICmpInst(Pred, A, Cmp.getOperand(1)); 1360 } 1361 } 1362 1363 // FIXME: Use m_APInt to allow folds for splat constants. 1364 ConstantInt *CI = dyn_cast<ConstantInt>(Cmp.getOperand(1)); 1365 if (!CI) 1366 return nullptr; 1367 1368 // Canonicalize icmp instructions based on dominating conditions. 1369 BasicBlock *Parent = Cmp.getParent(); 1370 BasicBlock *Dom = Parent->getSinglePredecessor(); 1371 auto *BI = Dom ? dyn_cast<BranchInst>(Dom->getTerminator()) : nullptr; 1372 ICmpInst::Predicate Pred2; 1373 BasicBlock *TrueBB, *FalseBB; 1374 ConstantInt *CI2; 1375 if (BI && match(BI, m_Br(m_ICmp(Pred2, m_Specific(X), m_ConstantInt(CI2)), 1376 TrueBB, FalseBB)) && 1377 TrueBB != FalseBB) { 1378 ConstantRange CR = 1379 ConstantRange::makeAllowedICmpRegion(Pred, CI->getValue()); 1380 ConstantRange DominatingCR = 1381 (Parent == TrueBB) 1382 ? ConstantRange::makeExactICmpRegion(Pred2, CI2->getValue()) 1383 : ConstantRange::makeExactICmpRegion( 1384 CmpInst::getInversePredicate(Pred2), CI2->getValue()); 1385 ConstantRange Intersection = DominatingCR.intersectWith(CR); 1386 ConstantRange Difference = DominatingCR.difference(CR); 1387 if (Intersection.isEmptySet()) 1388 return replaceInstUsesWith(Cmp, Builder.getFalse()); 1389 if (Difference.isEmptySet()) 1390 return replaceInstUsesWith(Cmp, Builder.getTrue()); 1391 1392 // If this is a normal comparison, it demands all bits. If it is a sign 1393 // bit comparison, it only demands the sign bit. 1394 bool UnusedBit; 1395 bool IsSignBit = isSignBitCheck(Pred, CI->getValue(), UnusedBit); 1396 1397 // Canonicalizing a sign bit comparison that gets used in a branch, 1398 // pessimizes codegen by generating branch on zero instruction instead 1399 // of a test and branch. So we avoid canonicalizing in such situations 1400 // because test and branch instruction has better branch displacement 1401 // than compare and branch instruction. 1402 if (Cmp.isEquality() || (IsSignBit && hasBranchUse(Cmp))) 1403 return nullptr; 1404 1405 if (auto *AI = Intersection.getSingleElement()) 1406 return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*AI)); 1407 if (auto *AD = Difference.getSingleElement()) 1408 return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*AD)); 1409 } 1410 1411 return nullptr; 1412 } 1413 1414 /// Fold icmp (trunc X, Y), C. 1415 Instruction *InstCombiner::foldICmpTruncConstant(ICmpInst &Cmp, 1416 TruncInst *Trunc, 1417 const APInt &C) { 1418 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1419 Value *X = Trunc->getOperand(0); 1420 if (C.isOneValue() && C.getBitWidth() > 1) { 1421 // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1 1422 Value *V = nullptr; 1423 if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V)))) 1424 return new ICmpInst(ICmpInst::ICMP_SLT, V, 1425 ConstantInt::get(V->getType(), 1)); 1426 } 1427 1428 if (Cmp.isEquality() && Trunc->hasOneUse()) { 1429 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all 1430 // of the high bits truncated out of x are known. 1431 unsigned DstBits = Trunc->getType()->getScalarSizeInBits(), 1432 SrcBits = X->getType()->getScalarSizeInBits(); 1433 KnownBits Known = computeKnownBits(X, 0, &Cmp); 1434 1435 // If all the high bits are known, we can do this xform. 1436 if ((Known.Zero | Known.One).countLeadingOnes() >= SrcBits - DstBits) { 1437 // Pull in the high bits from known-ones set. 1438 APInt NewRHS = C.zext(SrcBits); 1439 NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits); 1440 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), NewRHS)); 1441 } 1442 } 1443 1444 return nullptr; 1445 } 1446 1447 /// Fold icmp (xor X, Y), C. 1448 Instruction *InstCombiner::foldICmpXorConstant(ICmpInst &Cmp, 1449 BinaryOperator *Xor, 1450 const APInt &C) { 1451 Value *X = Xor->getOperand(0); 1452 Value *Y = Xor->getOperand(1); 1453 const APInt *XorC; 1454 if (!match(Y, m_APInt(XorC))) 1455 return nullptr; 1456 1457 // If this is a comparison that tests the signbit (X < 0) or (x > -1), 1458 // fold the xor. 1459 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1460 bool TrueIfSigned = false; 1461 if (isSignBitCheck(Cmp.getPredicate(), C, TrueIfSigned)) { 1462 1463 // If the sign bit of the XorCst is not set, there is no change to 1464 // the operation, just stop using the Xor. 1465 if (!XorC->isNegative()) { 1466 Cmp.setOperand(0, X); 1467 Worklist.Add(Xor); 1468 return &Cmp; 1469 } 1470 1471 // Emit the opposite comparison. 1472 if (TrueIfSigned) 1473 return new ICmpInst(ICmpInst::ICMP_SGT, X, 1474 ConstantInt::getAllOnesValue(X->getType())); 1475 else 1476 return new ICmpInst(ICmpInst::ICMP_SLT, X, 1477 ConstantInt::getNullValue(X->getType())); 1478 } 1479 1480 if (Xor->hasOneUse()) { 1481 // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask)) 1482 if (!Cmp.isEquality() && XorC->isSignMask()) { 1483 Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate() 1484 : Cmp.getSignedPredicate(); 1485 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC)); 1486 } 1487 1488 // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask)) 1489 if (!Cmp.isEquality() && XorC->isMaxSignedValue()) { 1490 Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate() 1491 : Cmp.getSignedPredicate(); 1492 Pred = Cmp.getSwappedPredicate(Pred); 1493 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC)); 1494 } 1495 } 1496 1497 // (icmp ugt (xor X, C), ~C) -> (icmp ult X, C) 1498 // iff -C is a power of 2 1499 if (Pred == ICmpInst::ICMP_UGT && *XorC == ~C && (C + 1).isPowerOf2()) 1500 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y); 1501 1502 // (icmp ult (xor X, C), -C) -> (icmp uge X, C) 1503 // iff -C is a power of 2 1504 if (Pred == ICmpInst::ICMP_ULT && *XorC == -C && C.isPowerOf2()) 1505 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y); 1506 1507 return nullptr; 1508 } 1509 1510 /// Fold icmp (and (sh X, Y), C2), C1. 1511 Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, 1512 const APInt &C1, const APInt &C2) { 1513 BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0)); 1514 if (!Shift || !Shift->isShift()) 1515 return nullptr; 1516 1517 // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could 1518 // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in 1519 // code produced by the clang front-end, for bitfield access. 1520 // This seemingly simple opportunity to fold away a shift turns out to be 1521 // rather complicated. See PR17827 for details. 1522 unsigned ShiftOpcode = Shift->getOpcode(); 1523 bool IsShl = ShiftOpcode == Instruction::Shl; 1524 const APInt *C3; 1525 if (match(Shift->getOperand(1), m_APInt(C3))) { 1526 bool CanFold = false; 1527 if (ShiftOpcode == Instruction::Shl) { 1528 // For a left shift, we can fold if the comparison is not signed. We can 1529 // also fold a signed comparison if the mask value and comparison value 1530 // are not negative. These constraints may not be obvious, but we can 1531 // prove that they are correct using an SMT solver. 1532 if (!Cmp.isSigned() || (!C2.isNegative() && !C1.isNegative())) 1533 CanFold = true; 1534 } else { 1535 bool IsAshr = ShiftOpcode == Instruction::AShr; 1536 // For a logical right shift, we can fold if the comparison is not signed. 1537 // We can also fold a signed comparison if the shifted mask value and the 1538 // shifted comparison value are not negative. These constraints may not be 1539 // obvious, but we can prove that they are correct using an SMT solver. 1540 // For an arithmetic shift right we can do the same, if we ensure 1541 // the And doesn't use any bits being shifted in. Normally these would 1542 // be turned into lshr by SimplifyDemandedBits, but not if there is an 1543 // additional user. 1544 if (!IsAshr || (C2.shl(*C3).lshr(*C3) == C2)) { 1545 if (!Cmp.isSigned() || 1546 (!C2.shl(*C3).isNegative() && !C1.shl(*C3).isNegative())) 1547 CanFold = true; 1548 } 1549 } 1550 1551 if (CanFold) { 1552 APInt NewCst = IsShl ? C1.lshr(*C3) : C1.shl(*C3); 1553 APInt SameAsC1 = IsShl ? NewCst.shl(*C3) : NewCst.lshr(*C3); 1554 // Check to see if we are shifting out any of the bits being compared. 1555 if (SameAsC1 != C1) { 1556 // If we shifted bits out, the fold is not going to work out. As a 1557 // special case, check to see if this means that the result is always 1558 // true or false now. 1559 if (Cmp.getPredicate() == ICmpInst::ICMP_EQ) 1560 return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType())); 1561 if (Cmp.getPredicate() == ICmpInst::ICMP_NE) 1562 return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType())); 1563 } else { 1564 Cmp.setOperand(1, ConstantInt::get(And->getType(), NewCst)); 1565 APInt NewAndCst = IsShl ? C2.lshr(*C3) : C2.shl(*C3); 1566 And->setOperand(1, ConstantInt::get(And->getType(), NewAndCst)); 1567 And->setOperand(0, Shift->getOperand(0)); 1568 Worklist.Add(Shift); // Shift is dead. 1569 return &Cmp; 1570 } 1571 } 1572 } 1573 1574 // Turn ((X >> Y) & C2) == 0 into (X & (C2 << Y)) == 0. The latter is 1575 // preferable because it allows the C2 << Y expression to be hoisted out of a 1576 // loop if Y is invariant and X is not. 1577 if (Shift->hasOneUse() && C1.isNullValue() && Cmp.isEquality() && 1578 !Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) { 1579 // Compute C2 << Y. 1580 Value *NewShift = 1581 IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1)) 1582 : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1)); 1583 1584 // Compute X & (C2 << Y). 1585 Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift); 1586 Cmp.setOperand(0, NewAnd); 1587 return &Cmp; 1588 } 1589 1590 return nullptr; 1591 } 1592 1593 /// Fold icmp (and X, C2), C1. 1594 Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp, 1595 BinaryOperator *And, 1596 const APInt &C1) { 1597 const APInt *C2; 1598 if (!match(And->getOperand(1), m_APInt(C2))) 1599 return nullptr; 1600 1601 if (!And->hasOneUse()) 1602 return nullptr; 1603 1604 // If the LHS is an 'and' of a truncate and we can widen the and/compare to 1605 // the input width without changing the value produced, eliminate the cast: 1606 // 1607 // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1' 1608 // 1609 // We can do this transformation if the constants do not have their sign bits 1610 // set or if it is an equality comparison. Extending a relational comparison 1611 // when we're checking the sign bit would not work. 1612 Value *W; 1613 if (match(And->getOperand(0), m_OneUse(m_Trunc(m_Value(W)))) && 1614 (Cmp.isEquality() || (!C1.isNegative() && !C2->isNegative()))) { 1615 // TODO: Is this a good transform for vectors? Wider types may reduce 1616 // throughput. Should this transform be limited (even for scalars) by using 1617 // shouldChangeType()? 1618 if (!Cmp.getType()->isVectorTy()) { 1619 Type *WideType = W->getType(); 1620 unsigned WideScalarBits = WideType->getScalarSizeInBits(); 1621 Constant *ZextC1 = ConstantInt::get(WideType, C1.zext(WideScalarBits)); 1622 Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits)); 1623 Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName()); 1624 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1); 1625 } 1626 } 1627 1628 if (Instruction *I = foldICmpAndShift(Cmp, And, C1, *C2)) 1629 return I; 1630 1631 // (icmp pred (and (or (lshr A, B), A), 1), 0) --> 1632 // (icmp pred (and A, (or (shl 1, B), 1), 0)) 1633 // 1634 // iff pred isn't signed 1635 if (!Cmp.isSigned() && C1.isNullValue() && And->getOperand(0)->hasOneUse() && 1636 match(And->getOperand(1), m_One())) { 1637 Constant *One = cast<Constant>(And->getOperand(1)); 1638 Value *Or = And->getOperand(0); 1639 Value *A, *B, *LShr; 1640 if (match(Or, m_Or(m_Value(LShr), m_Value(A))) && 1641 match(LShr, m_LShr(m_Specific(A), m_Value(B)))) { 1642 unsigned UsesRemoved = 0; 1643 if (And->hasOneUse()) 1644 ++UsesRemoved; 1645 if (Or->hasOneUse()) 1646 ++UsesRemoved; 1647 if (LShr->hasOneUse()) 1648 ++UsesRemoved; 1649 1650 // Compute A & ((1 << B) | 1) 1651 Value *NewOr = nullptr; 1652 if (auto *C = dyn_cast<Constant>(B)) { 1653 if (UsesRemoved >= 1) 1654 NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One); 1655 } else { 1656 if (UsesRemoved >= 3) 1657 NewOr = Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(), 1658 /*HasNUW=*/true), 1659 One, Or->getName()); 1660 } 1661 if (NewOr) { 1662 Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName()); 1663 Cmp.setOperand(0, NewAnd); 1664 return &Cmp; 1665 } 1666 } 1667 } 1668 1669 return nullptr; 1670 } 1671 1672 /// Fold icmp (and X, Y), C. 1673 Instruction *InstCombiner::foldICmpAndConstant(ICmpInst &Cmp, 1674 BinaryOperator *And, 1675 const APInt &C) { 1676 if (Instruction *I = foldICmpAndConstConst(Cmp, And, C)) 1677 return I; 1678 1679 // TODO: These all require that Y is constant too, so refactor with the above. 1680 1681 // Try to optimize things like "A[i] & 42 == 0" to index computations. 1682 Value *X = And->getOperand(0); 1683 Value *Y = And->getOperand(1); 1684 if (auto *LI = dyn_cast<LoadInst>(X)) 1685 if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0))) 1686 if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 1687 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 1688 !LI->isVolatile() && isa<ConstantInt>(Y)) { 1689 ConstantInt *C2 = cast<ConstantInt>(Y); 1690 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, Cmp, C2)) 1691 return Res; 1692 } 1693 1694 if (!Cmp.isEquality()) 1695 return nullptr; 1696 1697 // X & -C == -C -> X > u ~C 1698 // X & -C != -C -> X <= u ~C 1699 // iff C is a power of 2 1700 if (Cmp.getOperand(1) == Y && (-C).isPowerOf2()) { 1701 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGT 1702 : CmpInst::ICMP_ULE; 1703 return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1)))); 1704 } 1705 1706 // (X & C2) == 0 -> (trunc X) >= 0 1707 // (X & C2) != 0 -> (trunc X) < 0 1708 // iff C2 is a power of 2 and it masks the sign bit of a legal integer type. 1709 const APInt *C2; 1710 if (And->hasOneUse() && C.isNullValue() && match(Y, m_APInt(C2))) { 1711 int32_t ExactLogBase2 = C2->exactLogBase2(); 1712 if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) { 1713 Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1); 1714 if (And->getType()->isVectorTy()) 1715 NTy = VectorType::get(NTy, And->getType()->getVectorNumElements()); 1716 Value *Trunc = Builder.CreateTrunc(X, NTy); 1717 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE 1718 : CmpInst::ICMP_SLT; 1719 return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy)); 1720 } 1721 } 1722 1723 return nullptr; 1724 } 1725 1726 /// Fold icmp (or X, Y), C. 1727 Instruction *InstCombiner::foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, 1728 const APInt &C) { 1729 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1730 if (C.isOneValue()) { 1731 // icmp slt signum(V) 1 --> icmp slt V, 1 1732 Value *V = nullptr; 1733 if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V)))) 1734 return new ICmpInst(ICmpInst::ICMP_SLT, V, 1735 ConstantInt::get(V->getType(), 1)); 1736 } 1737 1738 // X | C == C --> X <=u C 1739 // X | C != C --> X >u C 1740 // iff C+1 is a power of 2 (C is a bitmask of the low bits) 1741 if (Cmp.isEquality() && Cmp.getOperand(1) == Or->getOperand(1) && 1742 (C + 1).isPowerOf2()) { 1743 Pred = (Pred == CmpInst::ICMP_EQ) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT; 1744 return new ICmpInst(Pred, Or->getOperand(0), Or->getOperand(1)); 1745 } 1746 1747 if (!Cmp.isEquality() || !C.isNullValue() || !Or->hasOneUse()) 1748 return nullptr; 1749 1750 Value *P, *Q; 1751 if (match(Or, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) { 1752 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0 1753 // -> and (icmp eq P, null), (icmp eq Q, null). 1754 Value *CmpP = 1755 Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType())); 1756 Value *CmpQ = 1757 Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType())); 1758 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1759 return BinaryOperator::Create(BOpc, CmpP, CmpQ); 1760 } 1761 1762 // Are we using xors to bitwise check for a pair of (in)equalities? Convert to 1763 // a shorter form that has more potential to be folded even further. 1764 Value *X1, *X2, *X3, *X4; 1765 if (match(Or->getOperand(0), m_OneUse(m_Xor(m_Value(X1), m_Value(X2)))) && 1766 match(Or->getOperand(1), m_OneUse(m_Xor(m_Value(X3), m_Value(X4))))) { 1767 // ((X1 ^ X2) || (X3 ^ X4)) == 0 --> (X1 == X2) && (X3 == X4) 1768 // ((X1 ^ X2) || (X3 ^ X4)) != 0 --> (X1 != X2) || (X3 != X4) 1769 Value *Cmp12 = Builder.CreateICmp(Pred, X1, X2); 1770 Value *Cmp34 = Builder.CreateICmp(Pred, X3, X4); 1771 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1772 return BinaryOperator::Create(BOpc, Cmp12, Cmp34); 1773 } 1774 1775 return nullptr; 1776 } 1777 1778 /// Fold icmp (mul X, Y), C. 1779 Instruction *InstCombiner::foldICmpMulConstant(ICmpInst &Cmp, 1780 BinaryOperator *Mul, 1781 const APInt &C) { 1782 const APInt *MulC; 1783 if (!match(Mul->getOperand(1), m_APInt(MulC))) 1784 return nullptr; 1785 1786 // If this is a test of the sign bit and the multiply is sign-preserving with 1787 // a constant operand, use the multiply LHS operand instead. 1788 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1789 if (isSignTest(Pred, C) && Mul->hasNoSignedWrap()) { 1790 if (MulC->isNegative()) 1791 Pred = ICmpInst::getSwappedPredicate(Pred); 1792 return new ICmpInst(Pred, Mul->getOperand(0), 1793 Constant::getNullValue(Mul->getType())); 1794 } 1795 1796 return nullptr; 1797 } 1798 1799 /// Fold icmp (shl 1, Y), C. 1800 static Instruction *foldICmpShlOne(ICmpInst &Cmp, Instruction *Shl, 1801 const APInt &C) { 1802 Value *Y; 1803 if (!match(Shl, m_Shl(m_One(), m_Value(Y)))) 1804 return nullptr; 1805 1806 Type *ShiftType = Shl->getType(); 1807 unsigned TypeBits = C.getBitWidth(); 1808 bool CIsPowerOf2 = C.isPowerOf2(); 1809 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1810 if (Cmp.isUnsigned()) { 1811 // (1 << Y) pred C -> Y pred Log2(C) 1812 if (!CIsPowerOf2) { 1813 // (1 << Y) < 30 -> Y <= 4 1814 // (1 << Y) <= 30 -> Y <= 4 1815 // (1 << Y) >= 30 -> Y > 4 1816 // (1 << Y) > 30 -> Y > 4 1817 if (Pred == ICmpInst::ICMP_ULT) 1818 Pred = ICmpInst::ICMP_ULE; 1819 else if (Pred == ICmpInst::ICMP_UGE) 1820 Pred = ICmpInst::ICMP_UGT; 1821 } 1822 1823 // (1 << Y) >= 2147483648 -> Y >= 31 -> Y == 31 1824 // (1 << Y) < 2147483648 -> Y < 31 -> Y != 31 1825 unsigned CLog2 = C.logBase2(); 1826 if (CLog2 == TypeBits - 1) { 1827 if (Pred == ICmpInst::ICMP_UGE) 1828 Pred = ICmpInst::ICMP_EQ; 1829 else if (Pred == ICmpInst::ICMP_ULT) 1830 Pred = ICmpInst::ICMP_NE; 1831 } 1832 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2)); 1833 } else if (Cmp.isSigned()) { 1834 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1); 1835 if (C.isAllOnesValue()) { 1836 // (1 << Y) <= -1 -> Y == 31 1837 if (Pred == ICmpInst::ICMP_SLE) 1838 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne); 1839 1840 // (1 << Y) > -1 -> Y != 31 1841 if (Pred == ICmpInst::ICMP_SGT) 1842 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne); 1843 } else if (!C) { 1844 // (1 << Y) < 0 -> Y == 31 1845 // (1 << Y) <= 0 -> Y == 31 1846 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 1847 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne); 1848 1849 // (1 << Y) >= 0 -> Y != 31 1850 // (1 << Y) > 0 -> Y != 31 1851 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) 1852 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne); 1853 } 1854 } else if (Cmp.isEquality() && CIsPowerOf2) { 1855 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, C.logBase2())); 1856 } 1857 1858 return nullptr; 1859 } 1860 1861 /// Fold icmp (shl X, Y), C. 1862 Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp, 1863 BinaryOperator *Shl, 1864 const APInt &C) { 1865 const APInt *ShiftVal; 1866 if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal))) 1867 return foldICmpShlConstConst(Cmp, Shl->getOperand(1), C, *ShiftVal); 1868 1869 const APInt *ShiftAmt; 1870 if (!match(Shl->getOperand(1), m_APInt(ShiftAmt))) 1871 return foldICmpShlOne(Cmp, Shl, C); 1872 1873 // Check that the shift amount is in range. If not, don't perform undefined 1874 // shifts. When the shift is visited, it will be simplified. 1875 unsigned TypeBits = C.getBitWidth(); 1876 if (ShiftAmt->uge(TypeBits)) 1877 return nullptr; 1878 1879 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1880 Value *X = Shl->getOperand(0); 1881 Type *ShType = Shl->getType(); 1882 1883 // NSW guarantees that we are only shifting out sign bits from the high bits, 1884 // so we can ASHR the compare constant without needing a mask and eliminate 1885 // the shift. 1886 if (Shl->hasNoSignedWrap()) { 1887 if (Pred == ICmpInst::ICMP_SGT) { 1888 // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt) 1889 APInt ShiftedC = C.ashr(*ShiftAmt); 1890 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1891 } 1892 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) { 1893 // This is the same code as the SGT case, but assert the pre-condition 1894 // that is needed for this to work with equality predicates. 1895 assert(C.ashr(*ShiftAmt).shl(*ShiftAmt) == C && 1896 "Compare known true or false was not folded"); 1897 APInt ShiftedC = C.ashr(*ShiftAmt); 1898 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1899 } 1900 if (Pred == ICmpInst::ICMP_SLT) { 1901 // SLE is the same as above, but SLE is canonicalized to SLT, so convert: 1902 // (X << S) <=s C is equiv to X <=s (C >> S) for all C 1903 // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX 1904 // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN 1905 assert(!C.isMinSignedValue() && "Unexpected icmp slt"); 1906 APInt ShiftedC = (C - 1).ashr(*ShiftAmt) + 1; 1907 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1908 } 1909 // If this is a signed comparison to 0 and the shift is sign preserving, 1910 // use the shift LHS operand instead; isSignTest may change 'Pred', so only 1911 // do that if we're sure to not continue on in this function. 1912 if (isSignTest(Pred, C)) 1913 return new ICmpInst(Pred, X, Constant::getNullValue(ShType)); 1914 } 1915 1916 // NUW guarantees that we are only shifting out zero bits from the high bits, 1917 // so we can LSHR the compare constant without needing a mask and eliminate 1918 // the shift. 1919 if (Shl->hasNoUnsignedWrap()) { 1920 if (Pred == ICmpInst::ICMP_UGT) { 1921 // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt) 1922 APInt ShiftedC = C.lshr(*ShiftAmt); 1923 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1924 } 1925 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) { 1926 // This is the same code as the UGT case, but assert the pre-condition 1927 // that is needed for this to work with equality predicates. 1928 assert(C.lshr(*ShiftAmt).shl(*ShiftAmt) == C && 1929 "Compare known true or false was not folded"); 1930 APInt ShiftedC = C.lshr(*ShiftAmt); 1931 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1932 } 1933 if (Pred == ICmpInst::ICMP_ULT) { 1934 // ULE is the same as above, but ULE is canonicalized to ULT, so convert: 1935 // (X << S) <=u C is equiv to X <=u (C >> S) for all C 1936 // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u 1937 // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0 1938 assert(C.ugt(0) && "ult 0 should have been eliminated"); 1939 APInt ShiftedC = (C - 1).lshr(*ShiftAmt) + 1; 1940 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1941 } 1942 } 1943 1944 if (Cmp.isEquality() && Shl->hasOneUse()) { 1945 // Strength-reduce the shift into an 'and'. 1946 Constant *Mask = ConstantInt::get( 1947 ShType, 1948 APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue())); 1949 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask"); 1950 Constant *LShrC = ConstantInt::get(ShType, C.lshr(*ShiftAmt)); 1951 return new ICmpInst(Pred, And, LShrC); 1952 } 1953 1954 // Otherwise, if this is a comparison of the sign bit, simplify to and/test. 1955 bool TrueIfSigned = false; 1956 if (Shl->hasOneUse() && isSignBitCheck(Pred, C, TrueIfSigned)) { 1957 // (X << 31) <s 0 --> (X & 1) != 0 1958 Constant *Mask = ConstantInt::get( 1959 ShType, 1960 APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1)); 1961 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask"); 1962 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ, 1963 And, Constant::getNullValue(ShType)); 1964 } 1965 1966 // Transform (icmp pred iM (shl iM %v, N), C) 1967 // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N)) 1968 // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N. 1969 // This enables us to get rid of the shift in favor of a trunc that may be 1970 // free on the target. It has the additional benefit of comparing to a 1971 // smaller constant that may be more target-friendly. 1972 unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1); 1973 if (Shl->hasOneUse() && Amt != 0 && C.countTrailingZeros() >= Amt && 1974 DL.isLegalInteger(TypeBits - Amt)) { 1975 Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt); 1976 if (ShType->isVectorTy()) 1977 TruncTy = VectorType::get(TruncTy, ShType->getVectorNumElements()); 1978 Constant *NewC = 1979 ConstantInt::get(TruncTy, C.ashr(*ShiftAmt).trunc(TypeBits - Amt)); 1980 return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC); 1981 } 1982 1983 return nullptr; 1984 } 1985 1986 /// Fold icmp ({al}shr X, Y), C. 1987 Instruction *InstCombiner::foldICmpShrConstant(ICmpInst &Cmp, 1988 BinaryOperator *Shr, 1989 const APInt &C) { 1990 // An exact shr only shifts out zero bits, so: 1991 // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0 1992 Value *X = Shr->getOperand(0); 1993 CmpInst::Predicate Pred = Cmp.getPredicate(); 1994 if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() && 1995 C.isNullValue()) 1996 return new ICmpInst(Pred, X, Cmp.getOperand(1)); 1997 1998 const APInt *ShiftVal; 1999 if (Cmp.isEquality() && match(Shr->getOperand(0), m_APInt(ShiftVal))) 2000 return foldICmpShrConstConst(Cmp, Shr->getOperand(1), C, *ShiftVal); 2001 2002 const APInt *ShiftAmt; 2003 if (!match(Shr->getOperand(1), m_APInt(ShiftAmt))) 2004 return nullptr; 2005 2006 // Check that the shift amount is in range. If not, don't perform undefined 2007 // shifts. When the shift is visited it will be simplified. 2008 unsigned TypeBits = C.getBitWidth(); 2009 unsigned ShAmtVal = ShiftAmt->getLimitedValue(TypeBits); 2010 if (ShAmtVal >= TypeBits || ShAmtVal == 0) 2011 return nullptr; 2012 2013 bool IsAShr = Shr->getOpcode() == Instruction::AShr; 2014 bool IsExact = Shr->isExact(); 2015 Type *ShrTy = Shr->getType(); 2016 // TODO: If we could guarantee that InstSimplify would handle all of the 2017 // constant-value-based preconditions in the folds below, then we could assert 2018 // those conditions rather than checking them. This is difficult because of 2019 // undef/poison (PR34838). 2020 if (IsAShr) { 2021 if (Pred == CmpInst::ICMP_SLT || (Pred == CmpInst::ICMP_SGT && IsExact)) { 2022 // icmp slt (ashr X, ShAmtC), C --> icmp slt X, (C << ShAmtC) 2023 // icmp sgt (ashr exact X, ShAmtC), C --> icmp sgt X, (C << ShAmtC) 2024 APInt ShiftedC = C.shl(ShAmtVal); 2025 if (ShiftedC.ashr(ShAmtVal) == C) 2026 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC)); 2027 } 2028 if (Pred == CmpInst::ICMP_SGT) { 2029 // icmp sgt (ashr X, ShAmtC), C --> icmp sgt X, ((C + 1) << ShAmtC) - 1 2030 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1; 2031 if (!C.isMaxSignedValue() && !(C + 1).shl(ShAmtVal).isMinSignedValue() && 2032 (ShiftedC + 1).ashr(ShAmtVal) == (C + 1)) 2033 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC)); 2034 } 2035 } else { 2036 if (Pred == CmpInst::ICMP_ULT || (Pred == CmpInst::ICMP_UGT && IsExact)) { 2037 // icmp ult (lshr X, ShAmtC), C --> icmp ult X, (C << ShAmtC) 2038 // icmp ugt (lshr exact X, ShAmtC), C --> icmp ugt X, (C << ShAmtC) 2039 APInt ShiftedC = C.shl(ShAmtVal); 2040 if (ShiftedC.lshr(ShAmtVal) == C) 2041 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC)); 2042 } 2043 if (Pred == CmpInst::ICMP_UGT) { 2044 // icmp ugt (lshr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1 2045 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1; 2046 if ((ShiftedC + 1).lshr(ShAmtVal) == (C + 1)) 2047 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC)); 2048 } 2049 } 2050 2051 if (!Cmp.isEquality()) 2052 return nullptr; 2053 2054 // Handle equality comparisons of shift-by-constant. 2055 2056 // If the comparison constant changes with the shift, the comparison cannot 2057 // succeed (bits of the comparison constant cannot match the shifted value). 2058 // This should be known by InstSimplify and already be folded to true/false. 2059 assert(((IsAShr && C.shl(ShAmtVal).ashr(ShAmtVal) == C) || 2060 (!IsAShr && C.shl(ShAmtVal).lshr(ShAmtVal) == C)) && 2061 "Expected icmp+shr simplify did not occur."); 2062 2063 // Check if the bits shifted out are known to be zero. If so, we can compare 2064 // against the unshifted value: 2065 // (X & 4) >> 1 == 2 --> (X & 4) == 4. 2066 Constant *ShiftedCmpRHS = ConstantInt::get(ShrTy, C << ShAmtVal); 2067 if (Shr->hasOneUse()) { 2068 if (Shr->isExact()) 2069 return new ICmpInst(Pred, X, ShiftedCmpRHS); 2070 2071 // Otherwise strength reduce the shift into an 'and'. 2072 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal)); 2073 Constant *Mask = ConstantInt::get(ShrTy, Val); 2074 Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask"); 2075 return new ICmpInst(Pred, And, ShiftedCmpRHS); 2076 } 2077 2078 return nullptr; 2079 } 2080 2081 /// Fold icmp (udiv X, Y), C. 2082 Instruction *InstCombiner::foldICmpUDivConstant(ICmpInst &Cmp, 2083 BinaryOperator *UDiv, 2084 const APInt &C) { 2085 const APInt *C2; 2086 if (!match(UDiv->getOperand(0), m_APInt(C2))) 2087 return nullptr; 2088 2089 assert(*C2 != 0 && "udiv 0, X should have been simplified already."); 2090 2091 // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1)) 2092 Value *Y = UDiv->getOperand(1); 2093 if (Cmp.getPredicate() == ICmpInst::ICMP_UGT) { 2094 assert(!C.isMaxValue() && 2095 "icmp ugt X, UINT_MAX should have been simplified already."); 2096 return new ICmpInst(ICmpInst::ICMP_ULE, Y, 2097 ConstantInt::get(Y->getType(), C2->udiv(C + 1))); 2098 } 2099 2100 // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C) 2101 if (Cmp.getPredicate() == ICmpInst::ICMP_ULT) { 2102 assert(C != 0 && "icmp ult X, 0 should have been simplified already."); 2103 return new ICmpInst(ICmpInst::ICMP_UGT, Y, 2104 ConstantInt::get(Y->getType(), C2->udiv(C))); 2105 } 2106 2107 return nullptr; 2108 } 2109 2110 /// Fold icmp ({su}div X, Y), C. 2111 Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp, 2112 BinaryOperator *Div, 2113 const APInt &C) { 2114 // Fold: icmp pred ([us]div X, C2), C -> range test 2115 // Fold this div into the comparison, producing a range check. 2116 // Determine, based on the divide type, what the range is being 2117 // checked. If there is an overflow on the low or high side, remember 2118 // it, otherwise compute the range [low, hi) bounding the new value. 2119 // See: InsertRangeTest above for the kinds of replacements possible. 2120 const APInt *C2; 2121 if (!match(Div->getOperand(1), m_APInt(C2))) 2122 return nullptr; 2123 2124 // FIXME: If the operand types don't match the type of the divide 2125 // then don't attempt this transform. The code below doesn't have the 2126 // logic to deal with a signed divide and an unsigned compare (and 2127 // vice versa). This is because (x /s C2) <s C produces different 2128 // results than (x /s C2) <u C or (x /u C2) <s C or even 2129 // (x /u C2) <u C. Simply casting the operands and result won't 2130 // work. :( The if statement below tests that condition and bails 2131 // if it finds it. 2132 bool DivIsSigned = Div->getOpcode() == Instruction::SDiv; 2133 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned()) 2134 return nullptr; 2135 2136 // The ProdOV computation fails on divide by 0 and divide by -1. Cases with 2137 // INT_MIN will also fail if the divisor is 1. Although folds of all these 2138 // division-by-constant cases should be present, we can not assert that they 2139 // have happened before we reach this icmp instruction. 2140 if (C2->isNullValue() || C2->isOneValue() || 2141 (DivIsSigned && C2->isAllOnesValue())) 2142 return nullptr; 2143 2144 // Compute Prod = C * C2. We are essentially solving an equation of 2145 // form X / C2 = C. We solve for X by multiplying C2 and C. 2146 // By solving for X, we can turn this into a range check instead of computing 2147 // a divide. 2148 APInt Prod = C * *C2; 2149 2150 // Determine if the product overflows by seeing if the product is not equal to 2151 // the divide. Make sure we do the same kind of divide as in the LHS 2152 // instruction that we're folding. 2153 bool ProdOV = (DivIsSigned ? Prod.sdiv(*C2) : Prod.udiv(*C2)) != C; 2154 2155 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2156 2157 // If the division is known to be exact, then there is no remainder from the 2158 // divide, so the covered range size is unit, otherwise it is the divisor. 2159 APInt RangeSize = Div->isExact() ? APInt(C2->getBitWidth(), 1) : *C2; 2160 2161 // Figure out the interval that is being checked. For example, a comparison 2162 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5). 2163 // Compute this interval based on the constants involved and the signedness of 2164 // the compare/divide. This computes a half-open interval, keeping track of 2165 // whether either value in the interval overflows. After analysis each 2166 // overflow variable is set to 0 if it's corresponding bound variable is valid 2167 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end. 2168 int LoOverflow = 0, HiOverflow = 0; 2169 APInt LoBound, HiBound; 2170 2171 if (!DivIsSigned) { // udiv 2172 // e.g. X/5 op 3 --> [15, 20) 2173 LoBound = Prod; 2174 HiOverflow = LoOverflow = ProdOV; 2175 if (!HiOverflow) { 2176 // If this is not an exact divide, then many values in the range collapse 2177 // to the same result value. 2178 HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false); 2179 } 2180 } else if (C2->isStrictlyPositive()) { // Divisor is > 0. 2181 if (C.isNullValue()) { // (X / pos) op 0 2182 // Can't overflow. e.g. X/2 op 0 --> [-1, 2) 2183 LoBound = -(RangeSize - 1); 2184 HiBound = RangeSize; 2185 } else if (C.isStrictlyPositive()) { // (X / pos) op pos 2186 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20) 2187 HiOverflow = LoOverflow = ProdOV; 2188 if (!HiOverflow) 2189 HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true); 2190 } else { // (X / pos) op neg 2191 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14) 2192 HiBound = Prod + 1; 2193 LoOverflow = HiOverflow = ProdOV ? -1 : 0; 2194 if (!LoOverflow) { 2195 APInt DivNeg = -RangeSize; 2196 LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0; 2197 } 2198 } 2199 } else if (C2->isNegative()) { // Divisor is < 0. 2200 if (Div->isExact()) 2201 RangeSize.negate(); 2202 if (C.isNullValue()) { // (X / neg) op 0 2203 // e.g. X/-5 op 0 --> [-4, 5) 2204 LoBound = RangeSize + 1; 2205 HiBound = -RangeSize; 2206 if (HiBound == *C2) { // -INTMIN = INTMIN 2207 HiOverflow = 1; // [INTMIN+1, overflow) 2208 HiBound = APInt(); // e.g. X/INTMIN = 0 --> X > INTMIN 2209 } 2210 } else if (C.isStrictlyPositive()) { // (X / neg) op pos 2211 // e.g. X/-5 op 3 --> [-19, -14) 2212 HiBound = Prod + 1; 2213 HiOverflow = LoOverflow = ProdOV ? -1 : 0; 2214 if (!LoOverflow) 2215 LoOverflow = addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0; 2216 } else { // (X / neg) op neg 2217 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20) 2218 LoOverflow = HiOverflow = ProdOV; 2219 if (!HiOverflow) 2220 HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true); 2221 } 2222 2223 // Dividing by a negative swaps the condition. LT <-> GT 2224 Pred = ICmpInst::getSwappedPredicate(Pred); 2225 } 2226 2227 Value *X = Div->getOperand(0); 2228 switch (Pred) { 2229 default: llvm_unreachable("Unhandled icmp opcode!"); 2230 case ICmpInst::ICMP_EQ: 2231 if (LoOverflow && HiOverflow) 2232 return replaceInstUsesWith(Cmp, Builder.getFalse()); 2233 if (HiOverflow) 2234 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : 2235 ICmpInst::ICMP_UGE, X, 2236 ConstantInt::get(Div->getType(), LoBound)); 2237 if (LoOverflow) 2238 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : 2239 ICmpInst::ICMP_ULT, X, 2240 ConstantInt::get(Div->getType(), HiBound)); 2241 return replaceInstUsesWith( 2242 Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, true)); 2243 case ICmpInst::ICMP_NE: 2244 if (LoOverflow && HiOverflow) 2245 return replaceInstUsesWith(Cmp, Builder.getTrue()); 2246 if (HiOverflow) 2247 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : 2248 ICmpInst::ICMP_ULT, X, 2249 ConstantInt::get(Div->getType(), LoBound)); 2250 if (LoOverflow) 2251 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : 2252 ICmpInst::ICMP_UGE, X, 2253 ConstantInt::get(Div->getType(), HiBound)); 2254 return replaceInstUsesWith(Cmp, 2255 insertRangeTest(X, LoBound, HiBound, 2256 DivIsSigned, false)); 2257 case ICmpInst::ICMP_ULT: 2258 case ICmpInst::ICMP_SLT: 2259 if (LoOverflow == +1) // Low bound is greater than input range. 2260 return replaceInstUsesWith(Cmp, Builder.getTrue()); 2261 if (LoOverflow == -1) // Low bound is less than input range. 2262 return replaceInstUsesWith(Cmp, Builder.getFalse()); 2263 return new ICmpInst(Pred, X, ConstantInt::get(Div->getType(), LoBound)); 2264 case ICmpInst::ICMP_UGT: 2265 case ICmpInst::ICMP_SGT: 2266 if (HiOverflow == +1) // High bound greater than input range. 2267 return replaceInstUsesWith(Cmp, Builder.getFalse()); 2268 if (HiOverflow == -1) // High bound less than input range. 2269 return replaceInstUsesWith(Cmp, Builder.getTrue()); 2270 if (Pred == ICmpInst::ICMP_UGT) 2271 return new ICmpInst(ICmpInst::ICMP_UGE, X, 2272 ConstantInt::get(Div->getType(), HiBound)); 2273 return new ICmpInst(ICmpInst::ICMP_SGE, X, 2274 ConstantInt::get(Div->getType(), HiBound)); 2275 } 2276 2277 return nullptr; 2278 } 2279 2280 /// Fold icmp (sub X, Y), C. 2281 Instruction *InstCombiner::foldICmpSubConstant(ICmpInst &Cmp, 2282 BinaryOperator *Sub, 2283 const APInt &C) { 2284 Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1); 2285 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2286 2287 // The following transforms are only worth it if the only user of the subtract 2288 // is the icmp. 2289 if (!Sub->hasOneUse()) 2290 return nullptr; 2291 2292 if (Sub->hasNoSignedWrap()) { 2293 // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y) 2294 if (Pred == ICmpInst::ICMP_SGT && C.isAllOnesValue()) 2295 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y); 2296 2297 // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y) 2298 if (Pred == ICmpInst::ICMP_SGT && C.isNullValue()) 2299 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y); 2300 2301 // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y) 2302 if (Pred == ICmpInst::ICMP_SLT && C.isNullValue()) 2303 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y); 2304 2305 // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y) 2306 if (Pred == ICmpInst::ICMP_SLT && C.isOneValue()) 2307 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y); 2308 } 2309 2310 const APInt *C2; 2311 if (!match(X, m_APInt(C2))) 2312 return nullptr; 2313 2314 // C2 - Y <u C -> (Y | (C - 1)) == C2 2315 // iff (C2 & (C - 1)) == C - 1 and C is a power of 2 2316 if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && 2317 (*C2 & (C - 1)) == (C - 1)) 2318 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, C - 1), X); 2319 2320 // C2 - Y >u C -> (Y | C) != C2 2321 // iff C2 & C == C and C + 1 is a power of 2 2322 if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == C) 2323 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, C), X); 2324 2325 return nullptr; 2326 } 2327 2328 /// Fold icmp (add X, Y), C. 2329 Instruction *InstCombiner::foldICmpAddConstant(ICmpInst &Cmp, 2330 BinaryOperator *Add, 2331 const APInt &C) { 2332 Value *Y = Add->getOperand(1); 2333 const APInt *C2; 2334 if (Cmp.isEquality() || !match(Y, m_APInt(C2))) 2335 return nullptr; 2336 2337 // Fold icmp pred (add X, C2), C. 2338 Value *X = Add->getOperand(0); 2339 Type *Ty = Add->getType(); 2340 CmpInst::Predicate Pred = Cmp.getPredicate(); 2341 2342 // If the add does not wrap, we can always adjust the compare by subtracting 2343 // the constants. Equality comparisons are handled elsewhere. SGE/SLE are 2344 // canonicalized to SGT/SLT. 2345 if (Add->hasNoSignedWrap() && 2346 (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) { 2347 bool Overflow; 2348 APInt NewC = C.ssub_ov(*C2, Overflow); 2349 // If there is overflow, the result must be true or false. 2350 // TODO: Can we assert there is no overflow because InstSimplify always 2351 // handles those cases? 2352 if (!Overflow) 2353 // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2) 2354 return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC)); 2355 } 2356 2357 auto CR = ConstantRange::makeExactICmpRegion(Pred, C).subtract(*C2); 2358 const APInt &Upper = CR.getUpper(); 2359 const APInt &Lower = CR.getLower(); 2360 if (Cmp.isSigned()) { 2361 if (Lower.isSignMask()) 2362 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper)); 2363 if (Upper.isSignMask()) 2364 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower)); 2365 } else { 2366 if (Lower.isMinValue()) 2367 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper)); 2368 if (Upper.isMinValue()) 2369 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower)); 2370 } 2371 2372 if (!Add->hasOneUse()) 2373 return nullptr; 2374 2375 // X+C <u C2 -> (X & -C2) == C 2376 // iff C & (C2-1) == 0 2377 // C2 is a power of 2 2378 if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && (*C2 & (C - 1)) == 0) 2379 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -C), 2380 ConstantExpr::getNeg(cast<Constant>(Y))); 2381 2382 // X+C >u C2 -> (X & ~C2) != C 2383 // iff C & C2 == 0 2384 // C2+1 is a power of 2 2385 if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == 0) 2386 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~C), 2387 ConstantExpr::getNeg(cast<Constant>(Y))); 2388 2389 return nullptr; 2390 } 2391 2392 bool InstCombiner::matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, 2393 Value *&RHS, ConstantInt *&Less, 2394 ConstantInt *&Equal, 2395 ConstantInt *&Greater) { 2396 // TODO: Generalize this to work with other comparison idioms or ensure 2397 // they get canonicalized into this form. 2398 2399 // select i1 (a == b), i32 Equal, i32 (select i1 (a < b), i32 Less, i32 2400 // Greater), where Equal, Less and Greater are placeholders for any three 2401 // constants. 2402 ICmpInst::Predicate PredA, PredB; 2403 if (match(SI->getTrueValue(), m_ConstantInt(Equal)) && 2404 match(SI->getCondition(), m_ICmp(PredA, m_Value(LHS), m_Value(RHS))) && 2405 PredA == ICmpInst::ICMP_EQ && 2406 match(SI->getFalseValue(), 2407 m_Select(m_ICmp(PredB, m_Specific(LHS), m_Specific(RHS)), 2408 m_ConstantInt(Less), m_ConstantInt(Greater))) && 2409 PredB == ICmpInst::ICMP_SLT) { 2410 return true; 2411 } 2412 return false; 2413 } 2414 2415 Instruction *InstCombiner::foldICmpSelectConstant(ICmpInst &Cmp, 2416 SelectInst *Select, 2417 ConstantInt *C) { 2418 2419 assert(C && "Cmp RHS should be a constant int!"); 2420 // If we're testing a constant value against the result of a three way 2421 // comparison, the result can be expressed directly in terms of the 2422 // original values being compared. Note: We could possibly be more 2423 // aggressive here and remove the hasOneUse test. The original select is 2424 // really likely to simplify or sink when we remove a test of the result. 2425 Value *OrigLHS, *OrigRHS; 2426 ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan; 2427 if (Cmp.hasOneUse() && 2428 matchThreeWayIntCompare(Select, OrigLHS, OrigRHS, C1LessThan, C2Equal, 2429 C3GreaterThan)) { 2430 assert(C1LessThan && C2Equal && C3GreaterThan); 2431 2432 bool TrueWhenLessThan = 2433 ConstantExpr::getCompare(Cmp.getPredicate(), C1LessThan, C) 2434 ->isAllOnesValue(); 2435 bool TrueWhenEqual = 2436 ConstantExpr::getCompare(Cmp.getPredicate(), C2Equal, C) 2437 ->isAllOnesValue(); 2438 bool TrueWhenGreaterThan = 2439 ConstantExpr::getCompare(Cmp.getPredicate(), C3GreaterThan, C) 2440 ->isAllOnesValue(); 2441 2442 // This generates the new instruction that will replace the original Cmp 2443 // Instruction. Instead of enumerating the various combinations when 2444 // TrueWhenLessThan, TrueWhenEqual and TrueWhenGreaterThan are true versus 2445 // false, we rely on chaining of ORs and future passes of InstCombine to 2446 // simplify the OR further (i.e. a s< b || a == b becomes a s<= b). 2447 2448 // When none of the three constants satisfy the predicate for the RHS (C), 2449 // the entire original Cmp can be simplified to a false. 2450 Value *Cond = Builder.getFalse(); 2451 if (TrueWhenLessThan) 2452 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT, OrigLHS, OrigRHS)); 2453 if (TrueWhenEqual) 2454 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ, OrigLHS, OrigRHS)); 2455 if (TrueWhenGreaterThan) 2456 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT, OrigLHS, OrigRHS)); 2457 2458 return replaceInstUsesWith(Cmp, Cond); 2459 } 2460 return nullptr; 2461 } 2462 2463 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C 2464 /// where X is some kind of instruction. 2465 Instruction *InstCombiner::foldICmpInstWithConstant(ICmpInst &Cmp) { 2466 const APInt *C; 2467 if (!match(Cmp.getOperand(1), m_APInt(C))) 2468 return nullptr; 2469 2470 if (auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0))) { 2471 switch (BO->getOpcode()) { 2472 case Instruction::Xor: 2473 if (Instruction *I = foldICmpXorConstant(Cmp, BO, *C)) 2474 return I; 2475 break; 2476 case Instruction::And: 2477 if (Instruction *I = foldICmpAndConstant(Cmp, BO, *C)) 2478 return I; 2479 break; 2480 case Instruction::Or: 2481 if (Instruction *I = foldICmpOrConstant(Cmp, BO, *C)) 2482 return I; 2483 break; 2484 case Instruction::Mul: 2485 if (Instruction *I = foldICmpMulConstant(Cmp, BO, *C)) 2486 return I; 2487 break; 2488 case Instruction::Shl: 2489 if (Instruction *I = foldICmpShlConstant(Cmp, BO, *C)) 2490 return I; 2491 break; 2492 case Instruction::LShr: 2493 case Instruction::AShr: 2494 if (Instruction *I = foldICmpShrConstant(Cmp, BO, *C)) 2495 return I; 2496 break; 2497 case Instruction::UDiv: 2498 if (Instruction *I = foldICmpUDivConstant(Cmp, BO, *C)) 2499 return I; 2500 LLVM_FALLTHROUGH; 2501 case Instruction::SDiv: 2502 if (Instruction *I = foldICmpDivConstant(Cmp, BO, *C)) 2503 return I; 2504 break; 2505 case Instruction::Sub: 2506 if (Instruction *I = foldICmpSubConstant(Cmp, BO, *C)) 2507 return I; 2508 break; 2509 case Instruction::Add: 2510 if (Instruction *I = foldICmpAddConstant(Cmp, BO, *C)) 2511 return I; 2512 break; 2513 default: 2514 break; 2515 } 2516 // TODO: These folds could be refactored to be part of the above calls. 2517 if (Instruction *I = foldICmpBinOpEqualityWithConstant(Cmp, BO, *C)) 2518 return I; 2519 } 2520 2521 // Match against CmpInst LHS being instructions other than binary operators. 2522 2523 if (auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0))) { 2524 // For now, we only support constant integers while folding the 2525 // ICMP(SELECT)) pattern. We can extend this to support vector of integers 2526 // similar to the cases handled by binary ops above. 2527 if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1))) 2528 if (Instruction *I = foldICmpSelectConstant(Cmp, SI, ConstRHS)) 2529 return I; 2530 } 2531 2532 if (auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0))) { 2533 if (Instruction *I = foldICmpTruncConstant(Cmp, TI, *C)) 2534 return I; 2535 } 2536 2537 if (Instruction *I = foldICmpIntrinsicWithConstant(Cmp, *C)) 2538 return I; 2539 2540 return nullptr; 2541 } 2542 2543 /// Fold an icmp equality instruction with binary operator LHS and constant RHS: 2544 /// icmp eq/ne BO, C. 2545 Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, 2546 BinaryOperator *BO, 2547 const APInt &C) { 2548 // TODO: Some of these folds could work with arbitrary constants, but this 2549 // function is limited to scalar and vector splat constants. 2550 if (!Cmp.isEquality()) 2551 return nullptr; 2552 2553 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2554 bool isICMP_NE = Pred == ICmpInst::ICMP_NE; 2555 Constant *RHS = cast<Constant>(Cmp.getOperand(1)); 2556 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1); 2557 2558 switch (BO->getOpcode()) { 2559 case Instruction::SRem: 2560 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one. 2561 if (C.isNullValue() && BO->hasOneUse()) { 2562 const APInt *BOC; 2563 if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) { 2564 Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName()); 2565 return new ICmpInst(Pred, NewRem, 2566 Constant::getNullValue(BO->getType())); 2567 } 2568 } 2569 break; 2570 case Instruction::Add: { 2571 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants. 2572 const APInt *BOC; 2573 if (match(BOp1, m_APInt(BOC))) { 2574 if (BO->hasOneUse()) { 2575 Constant *SubC = ConstantExpr::getSub(RHS, cast<Constant>(BOp1)); 2576 return new ICmpInst(Pred, BOp0, SubC); 2577 } 2578 } else if (C.isNullValue()) { 2579 // Replace ((add A, B) != 0) with (A != -B) if A or B is 2580 // efficiently invertible, or if the add has just this one use. 2581 if (Value *NegVal = dyn_castNegVal(BOp1)) 2582 return new ICmpInst(Pred, BOp0, NegVal); 2583 if (Value *NegVal = dyn_castNegVal(BOp0)) 2584 return new ICmpInst(Pred, NegVal, BOp1); 2585 if (BO->hasOneUse()) { 2586 Value *Neg = Builder.CreateNeg(BOp1); 2587 Neg->takeName(BO); 2588 return new ICmpInst(Pred, BOp0, Neg); 2589 } 2590 } 2591 break; 2592 } 2593 case Instruction::Xor: 2594 if (BO->hasOneUse()) { 2595 if (Constant *BOC = dyn_cast<Constant>(BOp1)) { 2596 // For the xor case, we can xor two constants together, eliminating 2597 // the explicit xor. 2598 return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC)); 2599 } else if (C.isNullValue()) { 2600 // Replace ((xor A, B) != 0) with (A != B) 2601 return new ICmpInst(Pred, BOp0, BOp1); 2602 } 2603 } 2604 break; 2605 case Instruction::Sub: 2606 if (BO->hasOneUse()) { 2607 const APInt *BOC; 2608 if (match(BOp0, m_APInt(BOC))) { 2609 // Replace ((sub BOC, B) != C) with (B != BOC-C). 2610 Constant *SubC = ConstantExpr::getSub(cast<Constant>(BOp0), RHS); 2611 return new ICmpInst(Pred, BOp1, SubC); 2612 } else if (C.isNullValue()) { 2613 // Replace ((sub A, B) != 0) with (A != B). 2614 return new ICmpInst(Pred, BOp0, BOp1); 2615 } 2616 } 2617 break; 2618 case Instruction::Or: { 2619 const APInt *BOC; 2620 if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) { 2621 // Comparing if all bits outside of a constant mask are set? 2622 // Replace (X | C) == -1 with (X & ~C) == ~C. 2623 // This removes the -1 constant. 2624 Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1)); 2625 Value *And = Builder.CreateAnd(BOp0, NotBOC); 2626 return new ICmpInst(Pred, And, NotBOC); 2627 } 2628 break; 2629 } 2630 case Instruction::And: { 2631 const APInt *BOC; 2632 if (match(BOp1, m_APInt(BOC))) { 2633 // If we have ((X & C) == C), turn it into ((X & C) != 0). 2634 if (C == *BOC && C.isPowerOf2()) 2635 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE, 2636 BO, Constant::getNullValue(RHS->getType())); 2637 2638 // Don't perform the following transforms if the AND has multiple uses 2639 if (!BO->hasOneUse()) 2640 break; 2641 2642 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0 2643 if (BOC->isSignMask()) { 2644 Constant *Zero = Constant::getNullValue(BOp0->getType()); 2645 auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE; 2646 return new ICmpInst(NewPred, BOp0, Zero); 2647 } 2648 2649 // ((X & ~7) == 0) --> X < 8 2650 if (C.isNullValue() && (~(*BOC) + 1).isPowerOf2()) { 2651 Constant *NegBOC = ConstantExpr::getNeg(cast<Constant>(BOp1)); 2652 auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT; 2653 return new ICmpInst(NewPred, BOp0, NegBOC); 2654 } 2655 } 2656 break; 2657 } 2658 case Instruction::Mul: 2659 if (C.isNullValue() && BO->hasNoSignedWrap()) { 2660 const APInt *BOC; 2661 if (match(BOp1, m_APInt(BOC)) && !BOC->isNullValue()) { 2662 // The trivial case (mul X, 0) is handled by InstSimplify. 2663 // General case : (mul X, C) != 0 iff X != 0 2664 // (mul X, C) == 0 iff X == 0 2665 return new ICmpInst(Pred, BOp0, Constant::getNullValue(RHS->getType())); 2666 } 2667 } 2668 break; 2669 case Instruction::UDiv: 2670 if (C.isNullValue()) { 2671 // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A) 2672 auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT; 2673 return new ICmpInst(NewPred, BOp1, BOp0); 2674 } 2675 break; 2676 default: 2677 break; 2678 } 2679 return nullptr; 2680 } 2681 2682 /// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C. 2683 Instruction *InstCombiner::foldICmpIntrinsicWithConstant(ICmpInst &Cmp, 2684 const APInt &C) { 2685 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)); 2686 if (!II || !Cmp.isEquality()) 2687 return nullptr; 2688 2689 // Handle icmp {eq|ne} <intrinsic>, Constant. 2690 Type *Ty = II->getType(); 2691 switch (II->getIntrinsicID()) { 2692 case Intrinsic::bswap: 2693 Worklist.Add(II); 2694 Cmp.setOperand(0, II->getArgOperand(0)); 2695 Cmp.setOperand(1, ConstantInt::get(Ty, C.byteSwap())); 2696 return &Cmp; 2697 2698 case Intrinsic::ctlz: 2699 case Intrinsic::cttz: 2700 // ctz(A) == bitwidth(A) -> A == 0 and likewise for != 2701 if (C == C.getBitWidth()) { 2702 Worklist.Add(II); 2703 Cmp.setOperand(0, II->getArgOperand(0)); 2704 Cmp.setOperand(1, ConstantInt::getNullValue(Ty)); 2705 return &Cmp; 2706 } 2707 break; 2708 2709 case Intrinsic::ctpop: { 2710 // popcount(A) == 0 -> A == 0 and likewise for != 2711 // popcount(A) == bitwidth(A) -> A == -1 and likewise for != 2712 bool IsZero = C.isNullValue(); 2713 if (IsZero || C == C.getBitWidth()) { 2714 Worklist.Add(II); 2715 Cmp.setOperand(0, II->getArgOperand(0)); 2716 auto *NewOp = 2717 IsZero ? Constant::getNullValue(Ty) : Constant::getAllOnesValue(Ty); 2718 Cmp.setOperand(1, NewOp); 2719 return &Cmp; 2720 } 2721 break; 2722 } 2723 default: 2724 break; 2725 } 2726 2727 return nullptr; 2728 } 2729 2730 /// Handle icmp with constant (but not simple integer constant) RHS. 2731 Instruction *InstCombiner::foldICmpInstWithConstantNotInt(ICmpInst &I) { 2732 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2733 Constant *RHSC = dyn_cast<Constant>(Op1); 2734 Instruction *LHSI = dyn_cast<Instruction>(Op0); 2735 if (!RHSC || !LHSI) 2736 return nullptr; 2737 2738 switch (LHSI->getOpcode()) { 2739 case Instruction::GetElementPtr: 2740 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null 2741 if (RHSC->isNullValue() && 2742 cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices()) 2743 return new ICmpInst( 2744 I.getPredicate(), LHSI->getOperand(0), 2745 Constant::getNullValue(LHSI->getOperand(0)->getType())); 2746 break; 2747 case Instruction::PHI: 2748 // Only fold icmp into the PHI if the phi and icmp are in the same 2749 // block. If in the same block, we're encouraging jump threading. If 2750 // not, we are just pessimizing the code by making an i1 phi. 2751 if (LHSI->getParent() == I.getParent()) 2752 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI))) 2753 return NV; 2754 break; 2755 case Instruction::Select: { 2756 // If either operand of the select is a constant, we can fold the 2757 // comparison into the select arms, which will cause one to be 2758 // constant folded and the select turned into a bitwise or. 2759 Value *Op1 = nullptr, *Op2 = nullptr; 2760 ConstantInt *CI = nullptr; 2761 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) { 2762 Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); 2763 CI = dyn_cast<ConstantInt>(Op1); 2764 } 2765 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) { 2766 Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); 2767 CI = dyn_cast<ConstantInt>(Op2); 2768 } 2769 2770 // We only want to perform this transformation if it will not lead to 2771 // additional code. This is true if either both sides of the select 2772 // fold to a constant (in which case the icmp is replaced with a select 2773 // which will usually simplify) or this is the only user of the 2774 // select (in which case we are trading a select+icmp for a simpler 2775 // select+icmp) or all uses of the select can be replaced based on 2776 // dominance information ("Global cases"). 2777 bool Transform = false; 2778 if (Op1 && Op2) 2779 Transform = true; 2780 else if (Op1 || Op2) { 2781 // Local case 2782 if (LHSI->hasOneUse()) 2783 Transform = true; 2784 // Global cases 2785 else if (CI && !CI->isZero()) 2786 // When Op1 is constant try replacing select with second operand. 2787 // Otherwise Op2 is constant and try replacing select with first 2788 // operand. 2789 Transform = 2790 replacedSelectWithOperand(cast<SelectInst>(LHSI), &I, Op1 ? 2 : 1); 2791 } 2792 if (Transform) { 2793 if (!Op1) 2794 Op1 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC, 2795 I.getName()); 2796 if (!Op2) 2797 Op2 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC, 2798 I.getName()); 2799 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2); 2800 } 2801 break; 2802 } 2803 case Instruction::IntToPtr: 2804 // icmp pred inttoptr(X), null -> icmp pred X, 0 2805 if (RHSC->isNullValue() && 2806 DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType()) 2807 return new ICmpInst( 2808 I.getPredicate(), LHSI->getOperand(0), 2809 Constant::getNullValue(LHSI->getOperand(0)->getType())); 2810 break; 2811 2812 case Instruction::Load: 2813 // Try to optimize things like "A[i] > 4" to index computations. 2814 if (GetElementPtrInst *GEP = 2815 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) { 2816 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 2817 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 2818 !cast<LoadInst>(LHSI)->isVolatile()) 2819 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I)) 2820 return Res; 2821 } 2822 break; 2823 } 2824 2825 return nullptr; 2826 } 2827 2828 /// Try to fold icmp (binop), X or icmp X, (binop). 2829 /// TODO: A large part of this logic is duplicated in InstSimplify's 2830 /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code 2831 /// duplication. 2832 Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) { 2833 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2834 2835 // Special logic for binary operators. 2836 BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0); 2837 BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1); 2838 if (!BO0 && !BO1) 2839 return nullptr; 2840 2841 const CmpInst::Predicate Pred = I.getPredicate(); 2842 bool NoOp0WrapProblem = false, NoOp1WrapProblem = false; 2843 if (BO0 && isa<OverflowingBinaryOperator>(BO0)) 2844 NoOp0WrapProblem = 2845 ICmpInst::isEquality(Pred) || 2846 (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) || 2847 (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap()); 2848 if (BO1 && isa<OverflowingBinaryOperator>(BO1)) 2849 NoOp1WrapProblem = 2850 ICmpInst::isEquality(Pred) || 2851 (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) || 2852 (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap()); 2853 2854 // Analyze the case when either Op0 or Op1 is an add instruction. 2855 // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null). 2856 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr; 2857 if (BO0 && BO0->getOpcode() == Instruction::Add) { 2858 A = BO0->getOperand(0); 2859 B = BO0->getOperand(1); 2860 } 2861 if (BO1 && BO1->getOpcode() == Instruction::Add) { 2862 C = BO1->getOperand(0); 2863 D = BO1->getOperand(1); 2864 } 2865 2866 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow. 2867 if ((A == Op1 || B == Op1) && NoOp0WrapProblem) 2868 return new ICmpInst(Pred, A == Op1 ? B : A, 2869 Constant::getNullValue(Op1->getType())); 2870 2871 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow. 2872 if ((C == Op0 || D == Op0) && NoOp1WrapProblem) 2873 return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()), 2874 C == Op0 ? D : C); 2875 2876 // icmp (X+Y), (X+Z) -> icmp Y, Z for equalities or if there is no overflow. 2877 if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem && 2878 NoOp1WrapProblem && 2879 // Try not to increase register pressure. 2880 BO0->hasOneUse() && BO1->hasOneUse()) { 2881 // Determine Y and Z in the form icmp (X+Y), (X+Z). 2882 Value *Y, *Z; 2883 if (A == C) { 2884 // C + B == C + D -> B == D 2885 Y = B; 2886 Z = D; 2887 } else if (A == D) { 2888 // D + B == C + D -> B == C 2889 Y = B; 2890 Z = C; 2891 } else if (B == C) { 2892 // A + C == C + D -> A == D 2893 Y = A; 2894 Z = D; 2895 } else { 2896 assert(B == D); 2897 // A + D == C + D -> A == C 2898 Y = A; 2899 Z = C; 2900 } 2901 return new ICmpInst(Pred, Y, Z); 2902 } 2903 2904 // icmp slt (X + -1), Y -> icmp sle X, Y 2905 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT && 2906 match(B, m_AllOnes())) 2907 return new ICmpInst(CmpInst::ICMP_SLE, A, Op1); 2908 2909 // icmp sge (X + -1), Y -> icmp sgt X, Y 2910 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE && 2911 match(B, m_AllOnes())) 2912 return new ICmpInst(CmpInst::ICMP_SGT, A, Op1); 2913 2914 // icmp sle (X + 1), Y -> icmp slt X, Y 2915 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE && match(B, m_One())) 2916 return new ICmpInst(CmpInst::ICMP_SLT, A, Op1); 2917 2918 // icmp sgt (X + 1), Y -> icmp sge X, Y 2919 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT && match(B, m_One())) 2920 return new ICmpInst(CmpInst::ICMP_SGE, A, Op1); 2921 2922 // icmp sgt X, (Y + -1) -> icmp sge X, Y 2923 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGT && 2924 match(D, m_AllOnes())) 2925 return new ICmpInst(CmpInst::ICMP_SGE, Op0, C); 2926 2927 // icmp sle X, (Y + -1) -> icmp slt X, Y 2928 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLE && 2929 match(D, m_AllOnes())) 2930 return new ICmpInst(CmpInst::ICMP_SLT, Op0, C); 2931 2932 // icmp sge X, (Y + 1) -> icmp sgt X, Y 2933 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGE && match(D, m_One())) 2934 return new ICmpInst(CmpInst::ICMP_SGT, Op0, C); 2935 2936 // icmp slt X, (Y + 1) -> icmp sle X, Y 2937 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLT && match(D, m_One())) 2938 return new ICmpInst(CmpInst::ICMP_SLE, Op0, C); 2939 2940 // TODO: The subtraction-related identities shown below also hold, but 2941 // canonicalization from (X -nuw 1) to (X + -1) means that the combinations 2942 // wouldn't happen even if they were implemented. 2943 // 2944 // icmp ult (X - 1), Y -> icmp ule X, Y 2945 // icmp uge (X - 1), Y -> icmp ugt X, Y 2946 // icmp ugt X, (Y - 1) -> icmp uge X, Y 2947 // icmp ule X, (Y - 1) -> icmp ult X, Y 2948 2949 // icmp ule (X + 1), Y -> icmp ult X, Y 2950 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_ULE && match(B, m_One())) 2951 return new ICmpInst(CmpInst::ICMP_ULT, A, Op1); 2952 2953 // icmp ugt (X + 1), Y -> icmp uge X, Y 2954 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_UGT && match(B, m_One())) 2955 return new ICmpInst(CmpInst::ICMP_UGE, A, Op1); 2956 2957 // icmp uge X, (Y + 1) -> icmp ugt X, Y 2958 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_UGE && match(D, m_One())) 2959 return new ICmpInst(CmpInst::ICMP_UGT, Op0, C); 2960 2961 // icmp ult X, (Y + 1) -> icmp ule X, Y 2962 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_ULT && match(D, m_One())) 2963 return new ICmpInst(CmpInst::ICMP_ULE, Op0, C); 2964 2965 // if C1 has greater magnitude than C2: 2966 // icmp (X + C1), (Y + C2) -> icmp (X + C3), Y 2967 // s.t. C3 = C1 - C2 2968 // 2969 // if C2 has greater magnitude than C1: 2970 // icmp (X + C1), (Y + C2) -> icmp X, (Y + C3) 2971 // s.t. C3 = C2 - C1 2972 if (A && C && NoOp0WrapProblem && NoOp1WrapProblem && 2973 (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned()) 2974 if (ConstantInt *C1 = dyn_cast<ConstantInt>(B)) 2975 if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) { 2976 const APInt &AP1 = C1->getValue(); 2977 const APInt &AP2 = C2->getValue(); 2978 if (AP1.isNegative() == AP2.isNegative()) { 2979 APInt AP1Abs = C1->getValue().abs(); 2980 APInt AP2Abs = C2->getValue().abs(); 2981 if (AP1Abs.uge(AP2Abs)) { 2982 ConstantInt *C3 = Builder.getInt(AP1 - AP2); 2983 Value *NewAdd = Builder.CreateNSWAdd(A, C3); 2984 return new ICmpInst(Pred, NewAdd, C); 2985 } else { 2986 ConstantInt *C3 = Builder.getInt(AP2 - AP1); 2987 Value *NewAdd = Builder.CreateNSWAdd(C, C3); 2988 return new ICmpInst(Pred, A, NewAdd); 2989 } 2990 } 2991 } 2992 2993 // Analyze the case when either Op0 or Op1 is a sub instruction. 2994 // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null). 2995 A = nullptr; 2996 B = nullptr; 2997 C = nullptr; 2998 D = nullptr; 2999 if (BO0 && BO0->getOpcode() == Instruction::Sub) { 3000 A = BO0->getOperand(0); 3001 B = BO0->getOperand(1); 3002 } 3003 if (BO1 && BO1->getOpcode() == Instruction::Sub) { 3004 C = BO1->getOperand(0); 3005 D = BO1->getOperand(1); 3006 } 3007 3008 // icmp (X-Y), X -> icmp 0, Y for equalities or if there is no overflow. 3009 if (A == Op1 && NoOp0WrapProblem) 3010 return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B); 3011 3012 // icmp X, (X-Y) -> icmp Y, 0 for equalities or if there is no overflow. 3013 if (C == Op0 && NoOp1WrapProblem) 3014 return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType())); 3015 3016 // icmp (Y-X), (Z-X) -> icmp Y, Z for equalities or if there is no overflow. 3017 if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem && 3018 // Try not to increase register pressure. 3019 BO0->hasOneUse() && BO1->hasOneUse()) 3020 return new ICmpInst(Pred, A, C); 3021 3022 // icmp (X-Y), (X-Z) -> icmp Z, Y for equalities or if there is no overflow. 3023 if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem && 3024 // Try not to increase register pressure. 3025 BO0->hasOneUse() && BO1->hasOneUse()) 3026 return new ICmpInst(Pred, D, B); 3027 3028 // icmp (0-X) < cst --> x > -cst 3029 if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) { 3030 Value *X; 3031 if (match(BO0, m_Neg(m_Value(X)))) 3032 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1)) 3033 if (!RHSC->isMinValue(/*isSigned=*/true)) 3034 return new ICmpInst(I.getSwappedPredicate(), X, 3035 ConstantExpr::getNeg(RHSC)); 3036 } 3037 3038 BinaryOperator *SRem = nullptr; 3039 // icmp (srem X, Y), Y 3040 if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1)) 3041 SRem = BO0; 3042 // icmp Y, (srem X, Y) 3043 else if (BO1 && BO1->getOpcode() == Instruction::SRem && 3044 Op0 == BO1->getOperand(1)) 3045 SRem = BO1; 3046 if (SRem) { 3047 // We don't check hasOneUse to avoid increasing register pressure because 3048 // the value we use is the same value this instruction was already using. 3049 switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) { 3050 default: 3051 break; 3052 case ICmpInst::ICMP_EQ: 3053 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 3054 case ICmpInst::ICMP_NE: 3055 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 3056 case ICmpInst::ICMP_SGT: 3057 case ICmpInst::ICMP_SGE: 3058 return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1), 3059 Constant::getAllOnesValue(SRem->getType())); 3060 case ICmpInst::ICMP_SLT: 3061 case ICmpInst::ICMP_SLE: 3062 return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1), 3063 Constant::getNullValue(SRem->getType())); 3064 } 3065 } 3066 3067 if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && BO0->hasOneUse() && 3068 BO1->hasOneUse() && BO0->getOperand(1) == BO1->getOperand(1)) { 3069 switch (BO0->getOpcode()) { 3070 default: 3071 break; 3072 case Instruction::Add: 3073 case Instruction::Sub: 3074 case Instruction::Xor: { 3075 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b 3076 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3077 3078 const APInt *C; 3079 if (match(BO0->getOperand(1), m_APInt(C))) { 3080 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b 3081 if (C->isSignMask()) { 3082 ICmpInst::Predicate NewPred = 3083 I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate(); 3084 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0)); 3085 } 3086 3087 // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b 3088 if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) { 3089 ICmpInst::Predicate NewPred = 3090 I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate(); 3091 NewPred = I.getSwappedPredicate(NewPred); 3092 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0)); 3093 } 3094 } 3095 break; 3096 } 3097 case Instruction::Mul: { 3098 if (!I.isEquality()) 3099 break; 3100 3101 const APInt *C; 3102 if (match(BO0->getOperand(1), m_APInt(C)) && !C->isNullValue() && 3103 !C->isOneValue()) { 3104 // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask) 3105 // Mask = -1 >> count-trailing-zeros(C). 3106 if (unsigned TZs = C->countTrailingZeros()) { 3107 Constant *Mask = ConstantInt::get( 3108 BO0->getType(), 3109 APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs)); 3110 Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask); 3111 Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask); 3112 return new ICmpInst(Pred, And1, And2); 3113 } 3114 // If there are no trailing zeros in the multiplier, just eliminate 3115 // the multiplies (no masking is needed): 3116 // icmp eq/ne (X * C), (Y * C) --> icmp eq/ne X, Y 3117 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3118 } 3119 break; 3120 } 3121 case Instruction::UDiv: 3122 case Instruction::LShr: 3123 if (I.isSigned() || !BO0->isExact() || !BO1->isExact()) 3124 break; 3125 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3126 3127 case Instruction::SDiv: 3128 if (!I.isEquality() || !BO0->isExact() || !BO1->isExact()) 3129 break; 3130 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3131 3132 case Instruction::AShr: 3133 if (!BO0->isExact() || !BO1->isExact()) 3134 break; 3135 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3136 3137 case Instruction::Shl: { 3138 bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap(); 3139 bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap(); 3140 if (!NUW && !NSW) 3141 break; 3142 if (!NSW && I.isSigned()) 3143 break; 3144 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3145 } 3146 } 3147 } 3148 3149 if (BO0) { 3150 // Transform A & (L - 1) `ult` L --> L != 0 3151 auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes()); 3152 auto BitwiseAnd = m_c_And(m_Value(), LSubOne); 3153 3154 if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) { 3155 auto *Zero = Constant::getNullValue(BO0->getType()); 3156 return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero); 3157 } 3158 } 3159 3160 return nullptr; 3161 } 3162 3163 /// Fold icmp Pred min|max(X, Y), X. 3164 static Instruction *foldICmpWithMinMax(ICmpInst &Cmp) { 3165 ICmpInst::Predicate Pred = Cmp.getPredicate(); 3166 Value *Op0 = Cmp.getOperand(0); 3167 Value *X = Cmp.getOperand(1); 3168 3169 // Canonicalize minimum or maximum operand to LHS of the icmp. 3170 if (match(X, m_c_SMin(m_Specific(Op0), m_Value())) || 3171 match(X, m_c_SMax(m_Specific(Op0), m_Value())) || 3172 match(X, m_c_UMin(m_Specific(Op0), m_Value())) || 3173 match(X, m_c_UMax(m_Specific(Op0), m_Value()))) { 3174 std::swap(Op0, X); 3175 Pred = Cmp.getSwappedPredicate(); 3176 } 3177 3178 Value *Y; 3179 if (match(Op0, m_c_SMin(m_Specific(X), m_Value(Y)))) { 3180 // smin(X, Y) == X --> X s<= Y 3181 // smin(X, Y) s>= X --> X s<= Y 3182 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SGE) 3183 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y); 3184 3185 // smin(X, Y) != X --> X s> Y 3186 // smin(X, Y) s< X --> X s> Y 3187 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SLT) 3188 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y); 3189 3190 // These cases should be handled in InstSimplify: 3191 // smin(X, Y) s<= X --> true 3192 // smin(X, Y) s> X --> false 3193 return nullptr; 3194 } 3195 3196 if (match(Op0, m_c_SMax(m_Specific(X), m_Value(Y)))) { 3197 // smax(X, Y) == X --> X s>= Y 3198 // smax(X, Y) s<= X --> X s>= Y 3199 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLE) 3200 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y); 3201 3202 // smax(X, Y) != X --> X s< Y 3203 // smax(X, Y) s> X --> X s< Y 3204 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SGT) 3205 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y); 3206 3207 // These cases should be handled in InstSimplify: 3208 // smax(X, Y) s>= X --> true 3209 // smax(X, Y) s< X --> false 3210 return nullptr; 3211 } 3212 3213 if (match(Op0, m_c_UMin(m_Specific(X), m_Value(Y)))) { 3214 // umin(X, Y) == X --> X u<= Y 3215 // umin(X, Y) u>= X --> X u<= Y 3216 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_UGE) 3217 return new ICmpInst(ICmpInst::ICMP_ULE, X, Y); 3218 3219 // umin(X, Y) != X --> X u> Y 3220 // umin(X, Y) u< X --> X u> Y 3221 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT) 3222 return new ICmpInst(ICmpInst::ICMP_UGT, X, Y); 3223 3224 // These cases should be handled in InstSimplify: 3225 // umin(X, Y) u<= X --> true 3226 // umin(X, Y) u> X --> false 3227 return nullptr; 3228 } 3229 3230 if (match(Op0, m_c_UMax(m_Specific(X), m_Value(Y)))) { 3231 // umax(X, Y) == X --> X u>= Y 3232 // umax(X, Y) u<= X --> X u>= Y 3233 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_ULE) 3234 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y); 3235 3236 // umax(X, Y) != X --> X u< Y 3237 // umax(X, Y) u> X --> X u< Y 3238 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_UGT) 3239 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y); 3240 3241 // These cases should be handled in InstSimplify: 3242 // umax(X, Y) u>= X --> true 3243 // umax(X, Y) u< X --> false 3244 return nullptr; 3245 } 3246 3247 return nullptr; 3248 } 3249 3250 Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) { 3251 if (!I.isEquality()) 3252 return nullptr; 3253 3254 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 3255 const CmpInst::Predicate Pred = I.getPredicate(); 3256 Value *A, *B, *C, *D; 3257 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) { 3258 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0 3259 Value *OtherVal = A == Op1 ? B : A; 3260 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType())); 3261 } 3262 3263 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) { 3264 // A^c1 == C^c2 --> A == C^(c1^c2) 3265 ConstantInt *C1, *C2; 3266 if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) && 3267 Op1->hasOneUse()) { 3268 Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue()); 3269 Value *Xor = Builder.CreateXor(C, NC); 3270 return new ICmpInst(Pred, A, Xor); 3271 } 3272 3273 // A^B == A^D -> B == D 3274 if (A == C) 3275 return new ICmpInst(Pred, B, D); 3276 if (A == D) 3277 return new ICmpInst(Pred, B, C); 3278 if (B == C) 3279 return new ICmpInst(Pred, A, D); 3280 if (B == D) 3281 return new ICmpInst(Pred, A, C); 3282 } 3283 } 3284 3285 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) { 3286 // A == (A^B) -> B == 0 3287 Value *OtherVal = A == Op0 ? B : A; 3288 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType())); 3289 } 3290 3291 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0 3292 if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) && 3293 match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) { 3294 Value *X = nullptr, *Y = nullptr, *Z = nullptr; 3295 3296 if (A == C) { 3297 X = B; 3298 Y = D; 3299 Z = A; 3300 } else if (A == D) { 3301 X = B; 3302 Y = C; 3303 Z = A; 3304 } else if (B == C) { 3305 X = A; 3306 Y = D; 3307 Z = B; 3308 } else if (B == D) { 3309 X = A; 3310 Y = C; 3311 Z = B; 3312 } 3313 3314 if (X) { // Build (X^Y) & Z 3315 Op1 = Builder.CreateXor(X, Y); 3316 Op1 = Builder.CreateAnd(Op1, Z); 3317 I.setOperand(0, Op1); 3318 I.setOperand(1, Constant::getNullValue(Op1->getType())); 3319 return &I; 3320 } 3321 } 3322 3323 // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B) 3324 // and (B & (1<<X)-1) == (zext A) --> A == (trunc B) 3325 ConstantInt *Cst1; 3326 if ((Op0->hasOneUse() && match(Op0, m_ZExt(m_Value(A))) && 3327 match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) || 3328 (Op1->hasOneUse() && match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) && 3329 match(Op1, m_ZExt(m_Value(A))))) { 3330 APInt Pow2 = Cst1->getValue() + 1; 3331 if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) && 3332 Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth()) 3333 return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType())); 3334 } 3335 3336 // (A >> C) == (B >> C) --> (A^B) u< (1 << C) 3337 // For lshr and ashr pairs. 3338 if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_ConstantInt(Cst1)))) && 3339 match(Op1, m_OneUse(m_LShr(m_Value(B), m_Specific(Cst1))))) || 3340 (match(Op0, m_OneUse(m_AShr(m_Value(A), m_ConstantInt(Cst1)))) && 3341 match(Op1, m_OneUse(m_AShr(m_Value(B), m_Specific(Cst1)))))) { 3342 unsigned TypeBits = Cst1->getBitWidth(); 3343 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits); 3344 if (ShAmt < TypeBits && ShAmt != 0) { 3345 ICmpInst::Predicate NewPred = 3346 Pred == ICmpInst::ICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT; 3347 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted"); 3348 APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt); 3349 return new ICmpInst(NewPred, Xor, Builder.getInt(CmpVal)); 3350 } 3351 } 3352 3353 // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0 3354 if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) && 3355 match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) { 3356 unsigned TypeBits = Cst1->getBitWidth(); 3357 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits); 3358 if (ShAmt < TypeBits && ShAmt != 0) { 3359 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted"); 3360 APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt); 3361 Value *And = Builder.CreateAnd(Xor, Builder.getInt(AndVal), 3362 I.getName() + ".mask"); 3363 return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType())); 3364 } 3365 } 3366 3367 // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to 3368 // "icmp (and X, mask), cst" 3369 uint64_t ShAmt = 0; 3370 if (Op0->hasOneUse() && 3371 match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) && 3372 match(Op1, m_ConstantInt(Cst1)) && 3373 // Only do this when A has multiple uses. This is most important to do 3374 // when it exposes other optimizations. 3375 !A->hasOneUse()) { 3376 unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits(); 3377 3378 if (ShAmt < ASize) { 3379 APInt MaskV = 3380 APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits()); 3381 MaskV <<= ShAmt; 3382 3383 APInt CmpV = Cst1->getValue().zext(ASize); 3384 CmpV <<= ShAmt; 3385 3386 Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV)); 3387 return new ICmpInst(Pred, Mask, Builder.getInt(CmpV)); 3388 } 3389 } 3390 3391 // If both operands are byte-swapped or bit-reversed, just compare the 3392 // original values. 3393 // TODO: Move this to a function similar to foldICmpIntrinsicWithConstant() 3394 // and handle more intrinsics. 3395 if ((match(Op0, m_BSwap(m_Value(A))) && match(Op1, m_BSwap(m_Value(B)))) || 3396 (match(Op0, m_BitReverse(m_Value(A))) && 3397 match(Op1, m_BitReverse(m_Value(B))))) 3398 return new ICmpInst(Pred, A, B); 3399 3400 return nullptr; 3401 } 3402 3403 /// Handle icmp (cast x to y), (cast/cst). We only handle extending casts so 3404 /// far. 3405 Instruction *InstCombiner::foldICmpWithCastAndCast(ICmpInst &ICmp) { 3406 const CastInst *LHSCI = cast<CastInst>(ICmp.getOperand(0)); 3407 Value *LHSCIOp = LHSCI->getOperand(0); 3408 Type *SrcTy = LHSCIOp->getType(); 3409 Type *DestTy = LHSCI->getType(); 3410 Value *RHSCIOp; 3411 3412 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the 3413 // integer type is the same size as the pointer type. 3414 if (LHSCI->getOpcode() == Instruction::PtrToInt && 3415 DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) { 3416 Value *RHSOp = nullptr; 3417 if (auto *RHSC = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) { 3418 Value *RHSCIOp = RHSC->getOperand(0); 3419 if (RHSCIOp->getType()->getPointerAddressSpace() == 3420 LHSCIOp->getType()->getPointerAddressSpace()) { 3421 RHSOp = RHSC->getOperand(0); 3422 // If the pointer types don't match, insert a bitcast. 3423 if (LHSCIOp->getType() != RHSOp->getType()) 3424 RHSOp = Builder.CreateBitCast(RHSOp, LHSCIOp->getType()); 3425 } 3426 } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) { 3427 RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy); 3428 } 3429 3430 if (RHSOp) 3431 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSOp); 3432 } 3433 3434 // The code below only handles extension cast instructions, so far. 3435 // Enforce this. 3436 if (LHSCI->getOpcode() != Instruction::ZExt && 3437 LHSCI->getOpcode() != Instruction::SExt) 3438 return nullptr; 3439 3440 bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt; 3441 bool isSignedCmp = ICmp.isSigned(); 3442 3443 if (auto *CI = dyn_cast<CastInst>(ICmp.getOperand(1))) { 3444 // Not an extension from the same type? 3445 RHSCIOp = CI->getOperand(0); 3446 if (RHSCIOp->getType() != LHSCIOp->getType()) 3447 return nullptr; 3448 3449 // If the signedness of the two casts doesn't agree (i.e. one is a sext 3450 // and the other is a zext), then we can't handle this. 3451 if (CI->getOpcode() != LHSCI->getOpcode()) 3452 return nullptr; 3453 3454 // Deal with equality cases early. 3455 if (ICmp.isEquality()) 3456 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSCIOp); 3457 3458 // A signed comparison of sign extended values simplifies into a 3459 // signed comparison. 3460 if (isSignedCmp && isSignedExt) 3461 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSCIOp); 3462 3463 // The other three cases all fold into an unsigned comparison. 3464 return new ICmpInst(ICmp.getUnsignedPredicate(), LHSCIOp, RHSCIOp); 3465 } 3466 3467 // If we aren't dealing with a constant on the RHS, exit early. 3468 auto *C = dyn_cast<Constant>(ICmp.getOperand(1)); 3469 if (!C) 3470 return nullptr; 3471 3472 // Compute the constant that would happen if we truncated to SrcTy then 3473 // re-extended to DestTy. 3474 Constant *Res1 = ConstantExpr::getTrunc(C, SrcTy); 3475 Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(), Res1, DestTy); 3476 3477 // If the re-extended constant didn't change... 3478 if (Res2 == C) { 3479 // Deal with equality cases early. 3480 if (ICmp.isEquality()) 3481 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, Res1); 3482 3483 // A signed comparison of sign extended values simplifies into a 3484 // signed comparison. 3485 if (isSignedExt && isSignedCmp) 3486 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, Res1); 3487 3488 // The other three cases all fold into an unsigned comparison. 3489 return new ICmpInst(ICmp.getUnsignedPredicate(), LHSCIOp, Res1); 3490 } 3491 3492 // The re-extended constant changed, partly changed (in the case of a vector), 3493 // or could not be determined to be equal (in the case of a constant 3494 // expression), so the constant cannot be represented in the shorter type. 3495 // Consequently, we cannot emit a simple comparison. 3496 // All the cases that fold to true or false will have already been handled 3497 // by SimplifyICmpInst, so only deal with the tricky case. 3498 3499 if (isSignedCmp || !isSignedExt || !isa<ConstantInt>(C)) 3500 return nullptr; 3501 3502 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases 3503 // should have been folded away previously and not enter in here. 3504 3505 // We're performing an unsigned comp with a sign extended value. 3506 // This is true if the input is >= 0. [aka >s -1] 3507 Constant *NegOne = Constant::getAllOnesValue(SrcTy); 3508 Value *Result = Builder.CreateICmpSGT(LHSCIOp, NegOne, ICmp.getName()); 3509 3510 // Finally, return the value computed. 3511 if (ICmp.getPredicate() == ICmpInst::ICMP_ULT) 3512 return replaceInstUsesWith(ICmp, Result); 3513 3514 assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!"); 3515 return BinaryOperator::CreateNot(Result); 3516 } 3517 3518 bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS, 3519 Value *RHS, Instruction &OrigI, 3520 Value *&Result, Constant *&Overflow) { 3521 if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS)) 3522 std::swap(LHS, RHS); 3523 3524 auto SetResult = [&](Value *OpResult, Constant *OverflowVal, bool ReuseName) { 3525 Result = OpResult; 3526 Overflow = OverflowVal; 3527 if (ReuseName) 3528 Result->takeName(&OrigI); 3529 return true; 3530 }; 3531 3532 // If the overflow check was an add followed by a compare, the insertion point 3533 // may be pointing to the compare. We want to insert the new instructions 3534 // before the add in case there are uses of the add between the add and the 3535 // compare. 3536 Builder.SetInsertPoint(&OrigI); 3537 3538 switch (OCF) { 3539 case OCF_INVALID: 3540 llvm_unreachable("bad overflow check kind!"); 3541 3542 case OCF_UNSIGNED_ADD: { 3543 OverflowResult OR = computeOverflowForUnsignedAdd(LHS, RHS, &OrigI); 3544 if (OR == OverflowResult::NeverOverflows) 3545 return SetResult(Builder.CreateNUWAdd(LHS, RHS), Builder.getFalse(), 3546 true); 3547 3548 if (OR == OverflowResult::AlwaysOverflows) 3549 return SetResult(Builder.CreateAdd(LHS, RHS), Builder.getTrue(), true); 3550 3551 // Fall through uadd into sadd 3552 LLVM_FALLTHROUGH; 3553 } 3554 case OCF_SIGNED_ADD: { 3555 // X + 0 -> {X, false} 3556 if (match(RHS, m_Zero())) 3557 return SetResult(LHS, Builder.getFalse(), false); 3558 3559 // We can strength reduce this signed add into a regular add if we can prove 3560 // that it will never overflow. 3561 if (OCF == OCF_SIGNED_ADD) 3562 if (willNotOverflowSignedAdd(LHS, RHS, OrigI)) 3563 return SetResult(Builder.CreateNSWAdd(LHS, RHS), Builder.getFalse(), 3564 true); 3565 break; 3566 } 3567 3568 case OCF_UNSIGNED_SUB: 3569 case OCF_SIGNED_SUB: { 3570 // X - 0 -> {X, false} 3571 if (match(RHS, m_Zero())) 3572 return SetResult(LHS, Builder.getFalse(), false); 3573 3574 if (OCF == OCF_SIGNED_SUB) { 3575 if (willNotOverflowSignedSub(LHS, RHS, OrigI)) 3576 return SetResult(Builder.CreateNSWSub(LHS, RHS), Builder.getFalse(), 3577 true); 3578 } else { 3579 if (willNotOverflowUnsignedSub(LHS, RHS, OrigI)) 3580 return SetResult(Builder.CreateNUWSub(LHS, RHS), Builder.getFalse(), 3581 true); 3582 } 3583 break; 3584 } 3585 3586 case OCF_UNSIGNED_MUL: { 3587 OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, &OrigI); 3588 if (OR == OverflowResult::NeverOverflows) 3589 return SetResult(Builder.CreateNUWMul(LHS, RHS), Builder.getFalse(), 3590 true); 3591 if (OR == OverflowResult::AlwaysOverflows) 3592 return SetResult(Builder.CreateMul(LHS, RHS), Builder.getTrue(), true); 3593 LLVM_FALLTHROUGH; 3594 } 3595 case OCF_SIGNED_MUL: 3596 // X * undef -> undef 3597 if (isa<UndefValue>(RHS)) 3598 return SetResult(RHS, UndefValue::get(Builder.getInt1Ty()), false); 3599 3600 // X * 0 -> {0, false} 3601 if (match(RHS, m_Zero())) 3602 return SetResult(RHS, Builder.getFalse(), false); 3603 3604 // X * 1 -> {X, false} 3605 if (match(RHS, m_One())) 3606 return SetResult(LHS, Builder.getFalse(), false); 3607 3608 if (OCF == OCF_SIGNED_MUL) 3609 if (willNotOverflowSignedMul(LHS, RHS, OrigI)) 3610 return SetResult(Builder.CreateNSWMul(LHS, RHS), Builder.getFalse(), 3611 true); 3612 break; 3613 } 3614 3615 return false; 3616 } 3617 3618 /// \brief Recognize and process idiom involving test for multiplication 3619 /// overflow. 3620 /// 3621 /// The caller has matched a pattern of the form: 3622 /// I = cmp u (mul(zext A, zext B), V 3623 /// The function checks if this is a test for overflow and if so replaces 3624 /// multiplication with call to 'mul.with.overflow' intrinsic. 3625 /// 3626 /// \param I Compare instruction. 3627 /// \param MulVal Result of 'mult' instruction. It is one of the arguments of 3628 /// the compare instruction. Must be of integer type. 3629 /// \param OtherVal The other argument of compare instruction. 3630 /// \returns Instruction which must replace the compare instruction, NULL if no 3631 /// replacement required. 3632 static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal, 3633 Value *OtherVal, InstCombiner &IC) { 3634 // Don't bother doing this transformation for pointers, don't do it for 3635 // vectors. 3636 if (!isa<IntegerType>(MulVal->getType())) 3637 return nullptr; 3638 3639 assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal); 3640 assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal); 3641 auto *MulInstr = dyn_cast<Instruction>(MulVal); 3642 if (!MulInstr) 3643 return nullptr; 3644 assert(MulInstr->getOpcode() == Instruction::Mul); 3645 3646 auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)), 3647 *RHS = cast<ZExtOperator>(MulInstr->getOperand(1)); 3648 assert(LHS->getOpcode() == Instruction::ZExt); 3649 assert(RHS->getOpcode() == Instruction::ZExt); 3650 Value *A = LHS->getOperand(0), *B = RHS->getOperand(0); 3651 3652 // Calculate type and width of the result produced by mul.with.overflow. 3653 Type *TyA = A->getType(), *TyB = B->getType(); 3654 unsigned WidthA = TyA->getPrimitiveSizeInBits(), 3655 WidthB = TyB->getPrimitiveSizeInBits(); 3656 unsigned MulWidth; 3657 Type *MulType; 3658 if (WidthB > WidthA) { 3659 MulWidth = WidthB; 3660 MulType = TyB; 3661 } else { 3662 MulWidth = WidthA; 3663 MulType = TyA; 3664 } 3665 3666 // In order to replace the original mul with a narrower mul.with.overflow, 3667 // all uses must ignore upper bits of the product. The number of used low 3668 // bits must be not greater than the width of mul.with.overflow. 3669 if (MulVal->hasNUsesOrMore(2)) 3670 for (User *U : MulVal->users()) { 3671 if (U == &I) 3672 continue; 3673 if (TruncInst *TI = dyn_cast<TruncInst>(U)) { 3674 // Check if truncation ignores bits above MulWidth. 3675 unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits(); 3676 if (TruncWidth > MulWidth) 3677 return nullptr; 3678 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) { 3679 // Check if AND ignores bits above MulWidth. 3680 if (BO->getOpcode() != Instruction::And) 3681 return nullptr; 3682 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) { 3683 const APInt &CVal = CI->getValue(); 3684 if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth) 3685 return nullptr; 3686 } else { 3687 // In this case we could have the operand of the binary operation 3688 // being defined in another block, and performing the replacement 3689 // could break the dominance relation. 3690 return nullptr; 3691 } 3692 } else { 3693 // Other uses prohibit this transformation. 3694 return nullptr; 3695 } 3696 } 3697 3698 // Recognize patterns 3699 switch (I.getPredicate()) { 3700 case ICmpInst::ICMP_EQ: 3701 case ICmpInst::ICMP_NE: 3702 // Recognize pattern: 3703 // mulval = mul(zext A, zext B) 3704 // cmp eq/neq mulval, zext trunc mulval 3705 if (ZExtInst *Zext = dyn_cast<ZExtInst>(OtherVal)) 3706 if (Zext->hasOneUse()) { 3707 Value *ZextArg = Zext->getOperand(0); 3708 if (TruncInst *Trunc = dyn_cast<TruncInst>(ZextArg)) 3709 if (Trunc->getType()->getPrimitiveSizeInBits() == MulWidth) 3710 break; //Recognized 3711 } 3712 3713 // Recognize pattern: 3714 // mulval = mul(zext A, zext B) 3715 // cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits. 3716 ConstantInt *CI; 3717 Value *ValToMask; 3718 if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) { 3719 if (ValToMask != MulVal) 3720 return nullptr; 3721 const APInt &CVal = CI->getValue() + 1; 3722 if (CVal.isPowerOf2()) { 3723 unsigned MaskWidth = CVal.logBase2(); 3724 if (MaskWidth == MulWidth) 3725 break; // Recognized 3726 } 3727 } 3728 return nullptr; 3729 3730 case ICmpInst::ICMP_UGT: 3731 // Recognize pattern: 3732 // mulval = mul(zext A, zext B) 3733 // cmp ugt mulval, max 3734 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 3735 APInt MaxVal = APInt::getMaxValue(MulWidth); 3736 MaxVal = MaxVal.zext(CI->getBitWidth()); 3737 if (MaxVal.eq(CI->getValue())) 3738 break; // Recognized 3739 } 3740 return nullptr; 3741 3742 case ICmpInst::ICMP_UGE: 3743 // Recognize pattern: 3744 // mulval = mul(zext A, zext B) 3745 // cmp uge mulval, max+1 3746 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 3747 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth); 3748 if (MaxVal.eq(CI->getValue())) 3749 break; // Recognized 3750 } 3751 return nullptr; 3752 3753 case ICmpInst::ICMP_ULE: 3754 // Recognize pattern: 3755 // mulval = mul(zext A, zext B) 3756 // cmp ule mulval, max 3757 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 3758 APInt MaxVal = APInt::getMaxValue(MulWidth); 3759 MaxVal = MaxVal.zext(CI->getBitWidth()); 3760 if (MaxVal.eq(CI->getValue())) 3761 break; // Recognized 3762 } 3763 return nullptr; 3764 3765 case ICmpInst::ICMP_ULT: 3766 // Recognize pattern: 3767 // mulval = mul(zext A, zext B) 3768 // cmp ule mulval, max + 1 3769 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 3770 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth); 3771 if (MaxVal.eq(CI->getValue())) 3772 break; // Recognized 3773 } 3774 return nullptr; 3775 3776 default: 3777 return nullptr; 3778 } 3779 3780 InstCombiner::BuilderTy &Builder = IC.Builder; 3781 Builder.SetInsertPoint(MulInstr); 3782 3783 // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B) 3784 Value *MulA = A, *MulB = B; 3785 if (WidthA < MulWidth) 3786 MulA = Builder.CreateZExt(A, MulType); 3787 if (WidthB < MulWidth) 3788 MulB = Builder.CreateZExt(B, MulType); 3789 Value *F = Intrinsic::getDeclaration(I.getModule(), 3790 Intrinsic::umul_with_overflow, MulType); 3791 CallInst *Call = Builder.CreateCall(F, {MulA, MulB}, "umul"); 3792 IC.Worklist.Add(MulInstr); 3793 3794 // If there are uses of mul result other than the comparison, we know that 3795 // they are truncation or binary AND. Change them to use result of 3796 // mul.with.overflow and adjust properly mask/size. 3797 if (MulVal->hasNUsesOrMore(2)) { 3798 Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value"); 3799 for (User *U : MulVal->users()) { 3800 if (U == &I || U == OtherVal) 3801 continue; 3802 if (TruncInst *TI = dyn_cast<TruncInst>(U)) { 3803 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth) 3804 IC.replaceInstUsesWith(*TI, Mul); 3805 else 3806 TI->setOperand(0, Mul); 3807 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) { 3808 assert(BO->getOpcode() == Instruction::And); 3809 // Replace (mul & mask) --> zext (mul.with.overflow & short_mask) 3810 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1)); 3811 APInt ShortMask = CI->getValue().trunc(MulWidth); 3812 Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask); 3813 Instruction *Zext = 3814 cast<Instruction>(Builder.CreateZExt(ShortAnd, BO->getType())); 3815 IC.Worklist.Add(Zext); 3816 IC.replaceInstUsesWith(*BO, Zext); 3817 } else { 3818 llvm_unreachable("Unexpected Binary operation"); 3819 } 3820 IC.Worklist.Add(cast<Instruction>(U)); 3821 } 3822 } 3823 if (isa<Instruction>(OtherVal)) 3824 IC.Worklist.Add(cast<Instruction>(OtherVal)); 3825 3826 // The original icmp gets replaced with the overflow value, maybe inverted 3827 // depending on predicate. 3828 bool Inverse = false; 3829 switch (I.getPredicate()) { 3830 case ICmpInst::ICMP_NE: 3831 break; 3832 case ICmpInst::ICMP_EQ: 3833 Inverse = true; 3834 break; 3835 case ICmpInst::ICMP_UGT: 3836 case ICmpInst::ICMP_UGE: 3837 if (I.getOperand(0) == MulVal) 3838 break; 3839 Inverse = true; 3840 break; 3841 case ICmpInst::ICMP_ULT: 3842 case ICmpInst::ICMP_ULE: 3843 if (I.getOperand(1) == MulVal) 3844 break; 3845 Inverse = true; 3846 break; 3847 default: 3848 llvm_unreachable("Unexpected predicate"); 3849 } 3850 if (Inverse) { 3851 Value *Res = Builder.CreateExtractValue(Call, 1); 3852 return BinaryOperator::CreateNot(Res); 3853 } 3854 3855 return ExtractValueInst::Create(Call, 1); 3856 } 3857 3858 /// When performing a comparison against a constant, it is possible that not all 3859 /// the bits in the LHS are demanded. This helper method computes the mask that 3860 /// IS demanded. 3861 static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth) { 3862 const APInt *RHS; 3863 if (!match(I.getOperand(1), m_APInt(RHS))) 3864 return APInt::getAllOnesValue(BitWidth); 3865 3866 // If this is a normal comparison, it demands all bits. If it is a sign bit 3867 // comparison, it only demands the sign bit. 3868 bool UnusedBit; 3869 if (isSignBitCheck(I.getPredicate(), *RHS, UnusedBit)) 3870 return APInt::getSignMask(BitWidth); 3871 3872 switch (I.getPredicate()) { 3873 // For a UGT comparison, we don't care about any bits that 3874 // correspond to the trailing ones of the comparand. The value of these 3875 // bits doesn't impact the outcome of the comparison, because any value 3876 // greater than the RHS must differ in a bit higher than these due to carry. 3877 case ICmpInst::ICMP_UGT: 3878 return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingOnes()); 3879 3880 // Similarly, for a ULT comparison, we don't care about the trailing zeros. 3881 // Any value less than the RHS must differ in a higher bit because of carries. 3882 case ICmpInst::ICMP_ULT: 3883 return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingZeros()); 3884 3885 default: 3886 return APInt::getAllOnesValue(BitWidth); 3887 } 3888 } 3889 3890 /// \brief Check if the order of \p Op0 and \p Op1 as operand in an ICmpInst 3891 /// should be swapped. 3892 /// The decision is based on how many times these two operands are reused 3893 /// as subtract operands and their positions in those instructions. 3894 /// The rational is that several architectures use the same instruction for 3895 /// both subtract and cmp, thus it is better if the order of those operands 3896 /// match. 3897 /// \return true if Op0 and Op1 should be swapped. 3898 static bool swapMayExposeCSEOpportunities(const Value * Op0, 3899 const Value * Op1) { 3900 // Filter out pointer value as those cannot appears directly in subtract. 3901 // FIXME: we may want to go through inttoptrs or bitcasts. 3902 if (Op0->getType()->isPointerTy()) 3903 return false; 3904 // Count every uses of both Op0 and Op1 in a subtract. 3905 // Each time Op0 is the first operand, count -1: swapping is bad, the 3906 // subtract has already the same layout as the compare. 3907 // Each time Op0 is the second operand, count +1: swapping is good, the 3908 // subtract has a different layout as the compare. 3909 // At the end, if the benefit is greater than 0, Op0 should come second to 3910 // expose more CSE opportunities. 3911 int GlobalSwapBenefits = 0; 3912 for (const User *U : Op0->users()) { 3913 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(U); 3914 if (!BinOp || BinOp->getOpcode() != Instruction::Sub) 3915 continue; 3916 // If Op0 is the first argument, this is not beneficial to swap the 3917 // arguments. 3918 int LocalSwapBenefits = -1; 3919 unsigned Op1Idx = 1; 3920 if (BinOp->getOperand(Op1Idx) == Op0) { 3921 Op1Idx = 0; 3922 LocalSwapBenefits = 1; 3923 } 3924 if (BinOp->getOperand(Op1Idx) != Op1) 3925 continue; 3926 GlobalSwapBenefits += LocalSwapBenefits; 3927 } 3928 return GlobalSwapBenefits > 0; 3929 } 3930 3931 /// \brief Check that one use is in the same block as the definition and all 3932 /// other uses are in blocks dominated by a given block. 3933 /// 3934 /// \param DI Definition 3935 /// \param UI Use 3936 /// \param DB Block that must dominate all uses of \p DI outside 3937 /// the parent block 3938 /// \return true when \p UI is the only use of \p DI in the parent block 3939 /// and all other uses of \p DI are in blocks dominated by \p DB. 3940 /// 3941 bool InstCombiner::dominatesAllUses(const Instruction *DI, 3942 const Instruction *UI, 3943 const BasicBlock *DB) const { 3944 assert(DI && UI && "Instruction not defined\n"); 3945 // Ignore incomplete definitions. 3946 if (!DI->getParent()) 3947 return false; 3948 // DI and UI must be in the same block. 3949 if (DI->getParent() != UI->getParent()) 3950 return false; 3951 // Protect from self-referencing blocks. 3952 if (DI->getParent() == DB) 3953 return false; 3954 for (const User *U : DI->users()) { 3955 auto *Usr = cast<Instruction>(U); 3956 if (Usr != UI && !DT.dominates(DB, Usr->getParent())) 3957 return false; 3958 } 3959 return true; 3960 } 3961 3962 /// Return true when the instruction sequence within a block is select-cmp-br. 3963 static bool isChainSelectCmpBranch(const SelectInst *SI) { 3964 const BasicBlock *BB = SI->getParent(); 3965 if (!BB) 3966 return false; 3967 auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator()); 3968 if (!BI || BI->getNumSuccessors() != 2) 3969 return false; 3970 auto *IC = dyn_cast<ICmpInst>(BI->getCondition()); 3971 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI)) 3972 return false; 3973 return true; 3974 } 3975 3976 /// \brief True when a select result is replaced by one of its operands 3977 /// in select-icmp sequence. This will eventually result in the elimination 3978 /// of the select. 3979 /// 3980 /// \param SI Select instruction 3981 /// \param Icmp Compare instruction 3982 /// \param SIOpd Operand that replaces the select 3983 /// 3984 /// Notes: 3985 /// - The replacement is global and requires dominator information 3986 /// - The caller is responsible for the actual replacement 3987 /// 3988 /// Example: 3989 /// 3990 /// entry: 3991 /// %4 = select i1 %3, %C* %0, %C* null 3992 /// %5 = icmp eq %C* %4, null 3993 /// br i1 %5, label %9, label %7 3994 /// ... 3995 /// ; <label>:7 ; preds = %entry 3996 /// %8 = getelementptr inbounds %C* %4, i64 0, i32 0 3997 /// ... 3998 /// 3999 /// can be transformed to 4000 /// 4001 /// %5 = icmp eq %C* %0, null 4002 /// %6 = select i1 %3, i1 %5, i1 true 4003 /// br i1 %6, label %9, label %7 4004 /// ... 4005 /// ; <label>:7 ; preds = %entry 4006 /// %8 = getelementptr inbounds %C* %0, i64 0, i32 0 // replace by %0! 4007 /// 4008 /// Similar when the first operand of the select is a constant or/and 4009 /// the compare is for not equal rather than equal. 4010 /// 4011 /// NOTE: The function is only called when the select and compare constants 4012 /// are equal, the optimization can work only for EQ predicates. This is not a 4013 /// major restriction since a NE compare should be 'normalized' to an equal 4014 /// compare, which usually happens in the combiner and test case 4015 /// select-cmp-br.ll checks for it. 4016 bool InstCombiner::replacedSelectWithOperand(SelectInst *SI, 4017 const ICmpInst *Icmp, 4018 const unsigned SIOpd) { 4019 assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!"); 4020 if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) { 4021 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1); 4022 // The check for the single predecessor is not the best that can be 4023 // done. But it protects efficiently against cases like when SI's 4024 // home block has two successors, Succ and Succ1, and Succ1 predecessor 4025 // of Succ. Then SI can't be replaced by SIOpd because the use that gets 4026 // replaced can be reached on either path. So the uniqueness check 4027 // guarantees that the path all uses of SI (outside SI's parent) are on 4028 // is disjoint from all other paths out of SI. But that information 4029 // is more expensive to compute, and the trade-off here is in favor 4030 // of compile-time. It should also be noticed that we check for a single 4031 // predecessor and not only uniqueness. This to handle the situation when 4032 // Succ and Succ1 points to the same basic block. 4033 if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) { 4034 NumSel++; 4035 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent()); 4036 return true; 4037 } 4038 } 4039 return false; 4040 } 4041 4042 /// Try to fold the comparison based on range information we can get by checking 4043 /// whether bits are known to be zero or one in the inputs. 4044 Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) { 4045 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 4046 Type *Ty = Op0->getType(); 4047 ICmpInst::Predicate Pred = I.getPredicate(); 4048 4049 // Get scalar or pointer size. 4050 unsigned BitWidth = Ty->isIntOrIntVectorTy() 4051 ? Ty->getScalarSizeInBits() 4052 : DL.getTypeSizeInBits(Ty->getScalarType()); 4053 4054 if (!BitWidth) 4055 return nullptr; 4056 4057 KnownBits Op0Known(BitWidth); 4058 KnownBits Op1Known(BitWidth); 4059 4060 if (SimplifyDemandedBits(&I, 0, 4061 getDemandedBitsLHSMask(I, BitWidth), 4062 Op0Known, 0)) 4063 return &I; 4064 4065 if (SimplifyDemandedBits(&I, 1, APInt::getAllOnesValue(BitWidth), 4066 Op1Known, 0)) 4067 return &I; 4068 4069 // Given the known and unknown bits, compute a range that the LHS could be 4070 // in. Compute the Min, Max and RHS values based on the known bits. For the 4071 // EQ and NE we use unsigned values. 4072 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0); 4073 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0); 4074 if (I.isSigned()) { 4075 computeSignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max); 4076 computeSignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max); 4077 } else { 4078 computeUnsignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max); 4079 computeUnsignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max); 4080 } 4081 4082 // If Min and Max are known to be the same, then SimplifyDemandedBits 4083 // figured out that the LHS is a constant. Constant fold this now, so that 4084 // code below can assume that Min != Max. 4085 if (!isa<Constant>(Op0) && Op0Min == Op0Max) 4086 return new ICmpInst(Pred, ConstantInt::get(Op0->getType(), Op0Min), Op1); 4087 if (!isa<Constant>(Op1) && Op1Min == Op1Max) 4088 return new ICmpInst(Pred, Op0, ConstantInt::get(Op1->getType(), Op1Min)); 4089 4090 // Based on the range information we know about the LHS, see if we can 4091 // simplify this comparison. For example, (x&4) < 8 is always true. 4092 switch (Pred) { 4093 default: 4094 llvm_unreachable("Unknown icmp opcode!"); 4095 case ICmpInst::ICMP_EQ: 4096 case ICmpInst::ICMP_NE: { 4097 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max)) { 4098 return Pred == CmpInst::ICMP_EQ 4099 ? replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())) 4100 : replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4101 } 4102 4103 // If all bits are known zero except for one, then we know at most one bit 4104 // is set. If the comparison is against zero, then this is a check to see if 4105 // *that* bit is set. 4106 APInt Op0KnownZeroInverted = ~Op0Known.Zero; 4107 if (Op1Known.isZero()) { 4108 // If the LHS is an AND with the same constant, look through it. 4109 Value *LHS = nullptr; 4110 const APInt *LHSC; 4111 if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) || 4112 *LHSC != Op0KnownZeroInverted) 4113 LHS = Op0; 4114 4115 Value *X; 4116 if (match(LHS, m_Shl(m_One(), m_Value(X)))) { 4117 APInt ValToCheck = Op0KnownZeroInverted; 4118 Type *XTy = X->getType(); 4119 if (ValToCheck.isPowerOf2()) { 4120 // ((1 << X) & 8) == 0 -> X != 3 4121 // ((1 << X) & 8) != 0 -> X == 3 4122 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros()); 4123 auto NewPred = ICmpInst::getInversePredicate(Pred); 4124 return new ICmpInst(NewPred, X, CmpC); 4125 } else if ((++ValToCheck).isPowerOf2()) { 4126 // ((1 << X) & 7) == 0 -> X >= 3 4127 // ((1 << X) & 7) != 0 -> X < 3 4128 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros()); 4129 auto NewPred = 4130 Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT; 4131 return new ICmpInst(NewPred, X, CmpC); 4132 } 4133 } 4134 4135 // Check if the LHS is 8 >>u x and the result is a power of 2 like 1. 4136 const APInt *CI; 4137 if (Op0KnownZeroInverted.isOneValue() && 4138 match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) { 4139 // ((8 >>u X) & 1) == 0 -> X != 3 4140 // ((8 >>u X) & 1) != 0 -> X == 3 4141 unsigned CmpVal = CI->countTrailingZeros(); 4142 auto NewPred = ICmpInst::getInversePredicate(Pred); 4143 return new ICmpInst(NewPred, X, ConstantInt::get(X->getType(), CmpVal)); 4144 } 4145 } 4146 break; 4147 } 4148 case ICmpInst::ICMP_ULT: { 4149 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B) 4150 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4151 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B) 4152 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4153 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B) 4154 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 4155 4156 const APInt *CmpC; 4157 if (match(Op1, m_APInt(CmpC))) { 4158 // A <u C -> A == C-1 if min(A)+1 == C 4159 if (*CmpC == Op0Min + 1) 4160 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 4161 ConstantInt::get(Op1->getType(), *CmpC - 1)); 4162 // X <u C --> X == 0, if the number of zero bits in the bottom of X 4163 // exceeds the log2 of C. 4164 if (Op0Known.countMinTrailingZeros() >= CmpC->ceilLogBase2()) 4165 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 4166 Constant::getNullValue(Op1->getType())); 4167 } 4168 break; 4169 } 4170 case ICmpInst::ICMP_UGT: { 4171 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B) 4172 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4173 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B) 4174 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4175 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B) 4176 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 4177 4178 const APInt *CmpC; 4179 if (match(Op1, m_APInt(CmpC))) { 4180 // A >u C -> A == C+1 if max(a)-1 == C 4181 if (*CmpC == Op0Max - 1) 4182 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 4183 ConstantInt::get(Op1->getType(), *CmpC + 1)); 4184 // X >u C --> X != 0, if the number of zero bits in the bottom of X 4185 // exceeds the log2 of C. 4186 if (Op0Known.countMinTrailingZeros() >= CmpC->getActiveBits()) 4187 return new ICmpInst(ICmpInst::ICMP_NE, Op0, 4188 Constant::getNullValue(Op1->getType())); 4189 } 4190 break; 4191 } 4192 case ICmpInst::ICMP_SLT: { 4193 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C) 4194 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4195 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C) 4196 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4197 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B) 4198 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 4199 const APInt *CmpC; 4200 if (match(Op1, m_APInt(CmpC))) { 4201 if (*CmpC == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C 4202 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 4203 ConstantInt::get(Op1->getType(), *CmpC - 1)); 4204 } 4205 break; 4206 } 4207 case ICmpInst::ICMP_SGT: { 4208 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B) 4209 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4210 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B) 4211 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4212 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B) 4213 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 4214 const APInt *CmpC; 4215 if (match(Op1, m_APInt(CmpC))) { 4216 if (*CmpC == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C 4217 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 4218 ConstantInt::get(Op1->getType(), *CmpC + 1)); 4219 } 4220 break; 4221 } 4222 case ICmpInst::ICMP_SGE: 4223 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!"); 4224 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B) 4225 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4226 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B) 4227 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4228 if (Op1Min == Op0Max) // A >=s B -> A == B if max(A) == min(B) 4229 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1); 4230 break; 4231 case ICmpInst::ICMP_SLE: 4232 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!"); 4233 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B) 4234 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4235 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B) 4236 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4237 if (Op1Max == Op0Min) // A <=s B -> A == B if min(A) == max(B) 4238 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1); 4239 break; 4240 case ICmpInst::ICMP_UGE: 4241 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!"); 4242 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B) 4243 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4244 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B) 4245 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4246 if (Op1Min == Op0Max) // A >=u B -> A == B if max(A) == min(B) 4247 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1); 4248 break; 4249 case ICmpInst::ICMP_ULE: 4250 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!"); 4251 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B) 4252 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4253 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B) 4254 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4255 if (Op1Max == Op0Min) // A <=u B -> A == B if min(A) == max(B) 4256 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1); 4257 break; 4258 } 4259 4260 // Turn a signed comparison into an unsigned one if both operands are known to 4261 // have the same sign. 4262 if (I.isSigned() && 4263 ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) || 4264 (Op0Known.One.isNegative() && Op1Known.One.isNegative()))) 4265 return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1); 4266 4267 return nullptr; 4268 } 4269 4270 /// If we have an icmp le or icmp ge instruction with a constant operand, turn 4271 /// it into the appropriate icmp lt or icmp gt instruction. This transform 4272 /// allows them to be folded in visitICmpInst. 4273 static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) { 4274 ICmpInst::Predicate Pred = I.getPredicate(); 4275 if (Pred != ICmpInst::ICMP_SLE && Pred != ICmpInst::ICMP_SGE && 4276 Pred != ICmpInst::ICMP_ULE && Pred != ICmpInst::ICMP_UGE) 4277 return nullptr; 4278 4279 Value *Op0 = I.getOperand(0); 4280 Value *Op1 = I.getOperand(1); 4281 auto *Op1C = dyn_cast<Constant>(Op1); 4282 if (!Op1C) 4283 return nullptr; 4284 4285 // Check if the constant operand can be safely incremented/decremented without 4286 // overflowing/underflowing. For scalars, SimplifyICmpInst has already handled 4287 // the edge cases for us, so we just assert on them. For vectors, we must 4288 // handle the edge cases. 4289 Type *Op1Type = Op1->getType(); 4290 bool IsSigned = I.isSigned(); 4291 bool IsLE = (Pred == ICmpInst::ICMP_SLE || Pred == ICmpInst::ICMP_ULE); 4292 auto *CI = dyn_cast<ConstantInt>(Op1C); 4293 if (CI) { 4294 // A <= MAX -> TRUE ; A >= MIN -> TRUE 4295 assert(IsLE ? !CI->isMaxValue(IsSigned) : !CI->isMinValue(IsSigned)); 4296 } else if (Op1Type->isVectorTy()) { 4297 // TODO? If the edge cases for vectors were guaranteed to be handled as they 4298 // are for scalar, we could remove the min/max checks. However, to do that, 4299 // we would have to use insertelement/shufflevector to replace edge values. 4300 unsigned NumElts = Op1Type->getVectorNumElements(); 4301 for (unsigned i = 0; i != NumElts; ++i) { 4302 Constant *Elt = Op1C->getAggregateElement(i); 4303 if (!Elt) 4304 return nullptr; 4305 4306 if (isa<UndefValue>(Elt)) 4307 continue; 4308 4309 // Bail out if we can't determine if this constant is min/max or if we 4310 // know that this constant is min/max. 4311 auto *CI = dyn_cast<ConstantInt>(Elt); 4312 if (!CI || (IsLE ? CI->isMaxValue(IsSigned) : CI->isMinValue(IsSigned))) 4313 return nullptr; 4314 } 4315 } else { 4316 // ConstantExpr? 4317 return nullptr; 4318 } 4319 4320 // Increment or decrement the constant and set the new comparison predicate: 4321 // ULE -> ULT ; UGE -> UGT ; SLE -> SLT ; SGE -> SGT 4322 Constant *OneOrNegOne = ConstantInt::get(Op1Type, IsLE ? 1 : -1, true); 4323 CmpInst::Predicate NewPred = IsLE ? ICmpInst::ICMP_ULT: ICmpInst::ICMP_UGT; 4324 NewPred = IsSigned ? ICmpInst::getSignedPredicate(NewPred) : NewPred; 4325 return new ICmpInst(NewPred, Op0, ConstantExpr::getAdd(Op1C, OneOrNegOne)); 4326 } 4327 4328 /// Integer compare with boolean values can always be turned into bitwise ops. 4329 static Instruction *canonicalizeICmpBool(ICmpInst &I, 4330 InstCombiner::BuilderTy &Builder) { 4331 Value *A = I.getOperand(0), *B = I.getOperand(1); 4332 assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only"); 4333 4334 // A boolean compared to true/false can be simplified to Op0/true/false in 4335 // 14 out of the 20 (10 predicates * 2 constants) possible combinations. 4336 // Cases not handled by InstSimplify are always 'not' of Op0. 4337 if (match(B, m_Zero())) { 4338 switch (I.getPredicate()) { 4339 case CmpInst::ICMP_EQ: // A == 0 -> !A 4340 case CmpInst::ICMP_ULE: // A <=u 0 -> !A 4341 case CmpInst::ICMP_SGE: // A >=s 0 -> !A 4342 return BinaryOperator::CreateNot(A); 4343 default: 4344 llvm_unreachable("ICmp i1 X, C not simplified as expected."); 4345 } 4346 } else if (match(B, m_One())) { 4347 switch (I.getPredicate()) { 4348 case CmpInst::ICMP_NE: // A != 1 -> !A 4349 case CmpInst::ICMP_ULT: // A <u 1 -> !A 4350 case CmpInst::ICMP_SGT: // A >s -1 -> !A 4351 return BinaryOperator::CreateNot(A); 4352 default: 4353 llvm_unreachable("ICmp i1 X, C not simplified as expected."); 4354 } 4355 } 4356 4357 switch (I.getPredicate()) { 4358 default: 4359 llvm_unreachable("Invalid icmp instruction!"); 4360 case ICmpInst::ICMP_EQ: 4361 // icmp eq i1 A, B -> ~(A ^ B) 4362 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 4363 4364 case ICmpInst::ICMP_NE: 4365 // icmp ne i1 A, B -> A ^ B 4366 return BinaryOperator::CreateXor(A, B); 4367 4368 case ICmpInst::ICMP_UGT: 4369 // icmp ugt -> icmp ult 4370 std::swap(A, B); 4371 LLVM_FALLTHROUGH; 4372 case ICmpInst::ICMP_ULT: 4373 // icmp ult i1 A, B -> ~A & B 4374 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B); 4375 4376 case ICmpInst::ICMP_SGT: 4377 // icmp sgt -> icmp slt 4378 std::swap(A, B); 4379 LLVM_FALLTHROUGH; 4380 case ICmpInst::ICMP_SLT: 4381 // icmp slt i1 A, B -> A & ~B 4382 return BinaryOperator::CreateAnd(Builder.CreateNot(B), A); 4383 4384 case ICmpInst::ICMP_UGE: 4385 // icmp uge -> icmp ule 4386 std::swap(A, B); 4387 LLVM_FALLTHROUGH; 4388 case ICmpInst::ICMP_ULE: 4389 // icmp ule i1 A, B -> ~A | B 4390 return BinaryOperator::CreateOr(Builder.CreateNot(A), B); 4391 4392 case ICmpInst::ICMP_SGE: 4393 // icmp sge -> icmp sle 4394 std::swap(A, B); 4395 LLVM_FALLTHROUGH; 4396 case ICmpInst::ICMP_SLE: 4397 // icmp sle i1 A, B -> A | ~B 4398 return BinaryOperator::CreateOr(Builder.CreateNot(B), A); 4399 } 4400 } 4401 4402 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { 4403 bool Changed = false; 4404 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 4405 unsigned Op0Cplxity = getComplexity(Op0); 4406 unsigned Op1Cplxity = getComplexity(Op1); 4407 4408 /// Orders the operands of the compare so that they are listed from most 4409 /// complex to least complex. This puts constants before unary operators, 4410 /// before binary operators. 4411 if (Op0Cplxity < Op1Cplxity || 4412 (Op0Cplxity == Op1Cplxity && swapMayExposeCSEOpportunities(Op0, Op1))) { 4413 I.swapOperands(); 4414 std::swap(Op0, Op1); 4415 Changed = true; 4416 } 4417 4418 if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, 4419 SQ.getWithInstruction(&I))) 4420 return replaceInstUsesWith(I, V); 4421 4422 // Comparing -val or val with non-zero is the same as just comparing val 4423 // ie, abs(val) != 0 -> val != 0 4424 if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) { 4425 Value *Cond, *SelectTrue, *SelectFalse; 4426 if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue), 4427 m_Value(SelectFalse)))) { 4428 if (Value *V = dyn_castNegVal(SelectTrue)) { 4429 if (V == SelectFalse) 4430 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1); 4431 } 4432 else if (Value *V = dyn_castNegVal(SelectFalse)) { 4433 if (V == SelectTrue) 4434 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1); 4435 } 4436 } 4437 } 4438 4439 if (Op0->getType()->isIntOrIntVectorTy(1)) 4440 if (Instruction *Res = canonicalizeICmpBool(I, Builder)) 4441 return Res; 4442 4443 if (ICmpInst *NewICmp = canonicalizeCmpWithConstant(I)) 4444 return NewICmp; 4445 4446 if (Instruction *Res = foldICmpWithConstant(I)) 4447 return Res; 4448 4449 if (Instruction *Res = foldICmpUsingKnownBits(I)) 4450 return Res; 4451 4452 // Test if the ICmpInst instruction is used exclusively by a select as 4453 // part of a minimum or maximum operation. If so, refrain from doing 4454 // any other folding. This helps out other analyses which understand 4455 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution 4456 // and CodeGen. And in this case, at least one of the comparison 4457 // operands has at least one user besides the compare (the select), 4458 // which would often largely negate the benefit of folding anyway. 4459 if (I.hasOneUse()) 4460 if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin())) 4461 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) || 4462 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1)) 4463 return nullptr; 4464 4465 // FIXME: We only do this after checking for min/max to prevent infinite 4466 // looping caused by a reverse canonicalization of these patterns for min/max. 4467 // FIXME: The organization of folds is a mess. These would naturally go into 4468 // canonicalizeCmpWithConstant(), but we can't move all of the above folds 4469 // down here after the min/max restriction. 4470 ICmpInst::Predicate Pred = I.getPredicate(); 4471 const APInt *C; 4472 if (match(Op1, m_APInt(C))) { 4473 // For i32: x >u 2147483647 -> x <s 0 -> true if sign bit set 4474 if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) { 4475 Constant *Zero = Constant::getNullValue(Op0->getType()); 4476 return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero); 4477 } 4478 4479 // For i32: x <u 2147483648 -> x >s -1 -> true if sign bit clear 4480 if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) { 4481 Constant *AllOnes = Constant::getAllOnesValue(Op0->getType()); 4482 return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes); 4483 } 4484 } 4485 4486 if (Instruction *Res = foldICmpInstWithConstant(I)) 4487 return Res; 4488 4489 if (Instruction *Res = foldICmpInstWithConstantNotInt(I)) 4490 return Res; 4491 4492 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now. 4493 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0)) 4494 if (Instruction *NI = foldGEPICmp(GEP, Op1, I.getPredicate(), I)) 4495 return NI; 4496 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1)) 4497 if (Instruction *NI = foldGEPICmp(GEP, Op0, 4498 ICmpInst::getSwappedPredicate(I.getPredicate()), I)) 4499 return NI; 4500 4501 // Try to optimize equality comparisons against alloca-based pointers. 4502 if (Op0->getType()->isPointerTy() && I.isEquality()) { 4503 assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?"); 4504 if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op0, DL))) 4505 if (Instruction *New = foldAllocaCmp(I, Alloca, Op1)) 4506 return New; 4507 if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op1, DL))) 4508 if (Instruction *New = foldAllocaCmp(I, Alloca, Op0)) 4509 return New; 4510 } 4511 4512 // Test to see if the operands of the icmp are casted versions of other 4513 // values. If the ptr->ptr cast can be stripped off both arguments, we do so 4514 // now. 4515 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) { 4516 if (Op0->getType()->isPointerTy() && 4517 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) { 4518 // We keep moving the cast from the left operand over to the right 4519 // operand, where it can often be eliminated completely. 4520 Op0 = CI->getOperand(0); 4521 4522 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast 4523 // so eliminate it as well. 4524 if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1)) 4525 Op1 = CI2->getOperand(0); 4526 4527 // If Op1 is a constant, we can fold the cast into the constant. 4528 if (Op0->getType() != Op1->getType()) { 4529 if (Constant *Op1C = dyn_cast<Constant>(Op1)) { 4530 Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType()); 4531 } else { 4532 // Otherwise, cast the RHS right before the icmp 4533 Op1 = Builder.CreateBitCast(Op1, Op0->getType()); 4534 } 4535 } 4536 return new ICmpInst(I.getPredicate(), Op0, Op1); 4537 } 4538 } 4539 4540 if (isa<CastInst>(Op0)) { 4541 // Handle the special case of: icmp (cast bool to X), <cst> 4542 // This comes up when you have code like 4543 // int X = A < B; 4544 // if (X) ... 4545 // For generality, we handle any zero-extension of any operand comparison 4546 // with a constant or another cast from the same type. 4547 if (isa<Constant>(Op1) || isa<CastInst>(Op1)) 4548 if (Instruction *R = foldICmpWithCastAndCast(I)) 4549 return R; 4550 } 4551 4552 if (Instruction *Res = foldICmpBinOp(I)) 4553 return Res; 4554 4555 if (Instruction *Res = foldICmpWithMinMax(I)) 4556 return Res; 4557 4558 { 4559 Value *A, *B; 4560 // Transform (A & ~B) == 0 --> (A & B) != 0 4561 // and (A & ~B) != 0 --> (A & B) == 0 4562 // if A is a power of 2. 4563 if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) && 4564 match(Op1, m_Zero()) && 4565 isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality()) 4566 return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(A, B), 4567 Op1); 4568 4569 // ~X < ~Y --> Y < X 4570 // ~X < C --> X > ~C 4571 if (match(Op0, m_Not(m_Value(A)))) { 4572 if (match(Op1, m_Not(m_Value(B)))) 4573 return new ICmpInst(I.getPredicate(), B, A); 4574 4575 const APInt *C; 4576 if (match(Op1, m_APInt(C))) 4577 return new ICmpInst(I.getSwappedPredicate(), A, 4578 ConstantInt::get(Op1->getType(), ~(*C))); 4579 } 4580 4581 Instruction *AddI = nullptr; 4582 if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B), 4583 m_Instruction(AddI))) && 4584 isa<IntegerType>(A->getType())) { 4585 Value *Result; 4586 Constant *Overflow; 4587 if (OptimizeOverflowCheck(OCF_UNSIGNED_ADD, A, B, *AddI, Result, 4588 Overflow)) { 4589 replaceInstUsesWith(*AddI, Result); 4590 return replaceInstUsesWith(I, Overflow); 4591 } 4592 } 4593 4594 // (zext a) * (zext b) --> llvm.umul.with.overflow. 4595 if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) { 4596 if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this)) 4597 return R; 4598 } 4599 if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) { 4600 if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this)) 4601 return R; 4602 } 4603 } 4604 4605 if (Instruction *Res = foldICmpEquality(I)) 4606 return Res; 4607 4608 // The 'cmpxchg' instruction returns an aggregate containing the old value and 4609 // an i1 which indicates whether or not we successfully did the swap. 4610 // 4611 // Replace comparisons between the old value and the expected value with the 4612 // indicator that 'cmpxchg' returns. 4613 // 4614 // N.B. This transform is only valid when the 'cmpxchg' is not permitted to 4615 // spuriously fail. In those cases, the old value may equal the expected 4616 // value but it is possible for the swap to not occur. 4617 if (I.getPredicate() == ICmpInst::ICMP_EQ) 4618 if (auto *EVI = dyn_cast<ExtractValueInst>(Op0)) 4619 if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand())) 4620 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 && 4621 !ACXI->isWeak()) 4622 return ExtractValueInst::Create(ACXI, 1); 4623 4624 { 4625 Value *X; ConstantInt *Cst; 4626 // icmp X+Cst, X 4627 if (match(Op0, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op1 == X) 4628 return foldICmpAddOpConst(X, Cst, I.getPredicate()); 4629 4630 // icmp X, X+Cst 4631 if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X) 4632 return foldICmpAddOpConst(X, Cst, I.getSwappedPredicate()); 4633 } 4634 return Changed ? &I : nullptr; 4635 } 4636 4637 /// Fold fcmp ([us]itofp x, cst) if possible. 4638 Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, 4639 Constant *RHSC) { 4640 if (!isa<ConstantFP>(RHSC)) return nullptr; 4641 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF(); 4642 4643 // Get the width of the mantissa. We don't want to hack on conversions that 4644 // might lose information from the integer, e.g. "i64 -> float" 4645 int MantissaWidth = LHSI->getType()->getFPMantissaWidth(); 4646 if (MantissaWidth == -1) return nullptr; // Unknown. 4647 4648 IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType()); 4649 4650 bool LHSUnsigned = isa<UIToFPInst>(LHSI); 4651 4652 if (I.isEquality()) { 4653 FCmpInst::Predicate P = I.getPredicate(); 4654 bool IsExact = false; 4655 APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned); 4656 RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact); 4657 4658 // If the floating point constant isn't an integer value, we know if we will 4659 // ever compare equal / not equal to it. 4660 if (!IsExact) { 4661 // TODO: Can never be -0.0 and other non-representable values 4662 APFloat RHSRoundInt(RHS); 4663 RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven); 4664 if (RHS.compare(RHSRoundInt) != APFloat::cmpEqual) { 4665 if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ) 4666 return replaceInstUsesWith(I, Builder.getFalse()); 4667 4668 assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE); 4669 return replaceInstUsesWith(I, Builder.getTrue()); 4670 } 4671 } 4672 4673 // TODO: If the constant is exactly representable, is it always OK to do 4674 // equality compares as integer? 4675 } 4676 4677 // Check to see that the input is converted from an integer type that is small 4678 // enough that preserves all bits. TODO: check here for "known" sign bits. 4679 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e. 4680 unsigned InputSize = IntTy->getScalarSizeInBits(); 4681 4682 // Following test does NOT adjust InputSize downwards for signed inputs, 4683 // because the most negative value still requires all the mantissa bits 4684 // to distinguish it from one less than that value. 4685 if ((int)InputSize > MantissaWidth) { 4686 // Conversion would lose accuracy. Check if loss can impact comparison. 4687 int Exp = ilogb(RHS); 4688 if (Exp == APFloat::IEK_Inf) { 4689 int MaxExponent = ilogb(APFloat::getLargest(RHS.getSemantics())); 4690 if (MaxExponent < (int)InputSize - !LHSUnsigned) 4691 // Conversion could create infinity. 4692 return nullptr; 4693 } else { 4694 // Note that if RHS is zero or NaN, then Exp is negative 4695 // and first condition is trivially false. 4696 if (MantissaWidth <= Exp && Exp <= (int)InputSize - !LHSUnsigned) 4697 // Conversion could affect comparison. 4698 return nullptr; 4699 } 4700 } 4701 4702 // Otherwise, we can potentially simplify the comparison. We know that it 4703 // will always come through as an integer value and we know the constant is 4704 // not a NAN (it would have been previously simplified). 4705 assert(!RHS.isNaN() && "NaN comparison not already folded!"); 4706 4707 ICmpInst::Predicate Pred; 4708 switch (I.getPredicate()) { 4709 default: llvm_unreachable("Unexpected predicate!"); 4710 case FCmpInst::FCMP_UEQ: 4711 case FCmpInst::FCMP_OEQ: 4712 Pred = ICmpInst::ICMP_EQ; 4713 break; 4714 case FCmpInst::FCMP_UGT: 4715 case FCmpInst::FCMP_OGT: 4716 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT; 4717 break; 4718 case FCmpInst::FCMP_UGE: 4719 case FCmpInst::FCMP_OGE: 4720 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE; 4721 break; 4722 case FCmpInst::FCMP_ULT: 4723 case FCmpInst::FCMP_OLT: 4724 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT; 4725 break; 4726 case FCmpInst::FCMP_ULE: 4727 case FCmpInst::FCMP_OLE: 4728 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE; 4729 break; 4730 case FCmpInst::FCMP_UNE: 4731 case FCmpInst::FCMP_ONE: 4732 Pred = ICmpInst::ICMP_NE; 4733 break; 4734 case FCmpInst::FCMP_ORD: 4735 return replaceInstUsesWith(I, Builder.getTrue()); 4736 case FCmpInst::FCMP_UNO: 4737 return replaceInstUsesWith(I, Builder.getFalse()); 4738 } 4739 4740 // Now we know that the APFloat is a normal number, zero or inf. 4741 4742 // See if the FP constant is too large for the integer. For example, 4743 // comparing an i8 to 300.0. 4744 unsigned IntWidth = IntTy->getScalarSizeInBits(); 4745 4746 if (!LHSUnsigned) { 4747 // If the RHS value is > SignedMax, fold the comparison. This handles +INF 4748 // and large values. 4749 APFloat SMax(RHS.getSemantics()); 4750 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true, 4751 APFloat::rmNearestTiesToEven); 4752 if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0 4753 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT || 4754 Pred == ICmpInst::ICMP_SLE) 4755 return replaceInstUsesWith(I, Builder.getTrue()); 4756 return replaceInstUsesWith(I, Builder.getFalse()); 4757 } 4758 } else { 4759 // If the RHS value is > UnsignedMax, fold the comparison. This handles 4760 // +INF and large values. 4761 APFloat UMax(RHS.getSemantics()); 4762 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false, 4763 APFloat::rmNearestTiesToEven); 4764 if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0 4765 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT || 4766 Pred == ICmpInst::ICMP_ULE) 4767 return replaceInstUsesWith(I, Builder.getTrue()); 4768 return replaceInstUsesWith(I, Builder.getFalse()); 4769 } 4770 } 4771 4772 if (!LHSUnsigned) { 4773 // See if the RHS value is < SignedMin. 4774 APFloat SMin(RHS.getSemantics()); 4775 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true, 4776 APFloat::rmNearestTiesToEven); 4777 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0 4778 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT || 4779 Pred == ICmpInst::ICMP_SGE) 4780 return replaceInstUsesWith(I, Builder.getTrue()); 4781 return replaceInstUsesWith(I, Builder.getFalse()); 4782 } 4783 } else { 4784 // See if the RHS value is < UnsignedMin. 4785 APFloat SMin(RHS.getSemantics()); 4786 SMin.convertFromAPInt(APInt::getMinValue(IntWidth), true, 4787 APFloat::rmNearestTiesToEven); 4788 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0 4789 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT || 4790 Pred == ICmpInst::ICMP_UGE) 4791 return replaceInstUsesWith(I, Builder.getTrue()); 4792 return replaceInstUsesWith(I, Builder.getFalse()); 4793 } 4794 } 4795 4796 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or 4797 // [0, UMAX], but it may still be fractional. See if it is fractional by 4798 // casting the FP value to the integer value and back, checking for equality. 4799 // Don't do this for zero, because -0.0 is not fractional. 4800 Constant *RHSInt = LHSUnsigned 4801 ? ConstantExpr::getFPToUI(RHSC, IntTy) 4802 : ConstantExpr::getFPToSI(RHSC, IntTy); 4803 if (!RHS.isZero()) { 4804 bool Equal = LHSUnsigned 4805 ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC 4806 : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC; 4807 if (!Equal) { 4808 // If we had a comparison against a fractional value, we have to adjust 4809 // the compare predicate and sometimes the value. RHSC is rounded towards 4810 // zero at this point. 4811 switch (Pred) { 4812 default: llvm_unreachable("Unexpected integer comparison!"); 4813 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true 4814 return replaceInstUsesWith(I, Builder.getTrue()); 4815 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false 4816 return replaceInstUsesWith(I, Builder.getFalse()); 4817 case ICmpInst::ICMP_ULE: 4818 // (float)int <= 4.4 --> int <= 4 4819 // (float)int <= -4.4 --> false 4820 if (RHS.isNegative()) 4821 return replaceInstUsesWith(I, Builder.getFalse()); 4822 break; 4823 case ICmpInst::ICMP_SLE: 4824 // (float)int <= 4.4 --> int <= 4 4825 // (float)int <= -4.4 --> int < -4 4826 if (RHS.isNegative()) 4827 Pred = ICmpInst::ICMP_SLT; 4828 break; 4829 case ICmpInst::ICMP_ULT: 4830 // (float)int < -4.4 --> false 4831 // (float)int < 4.4 --> int <= 4 4832 if (RHS.isNegative()) 4833 return replaceInstUsesWith(I, Builder.getFalse()); 4834 Pred = ICmpInst::ICMP_ULE; 4835 break; 4836 case ICmpInst::ICMP_SLT: 4837 // (float)int < -4.4 --> int < -4 4838 // (float)int < 4.4 --> int <= 4 4839 if (!RHS.isNegative()) 4840 Pred = ICmpInst::ICMP_SLE; 4841 break; 4842 case ICmpInst::ICMP_UGT: 4843 // (float)int > 4.4 --> int > 4 4844 // (float)int > -4.4 --> true 4845 if (RHS.isNegative()) 4846 return replaceInstUsesWith(I, Builder.getTrue()); 4847 break; 4848 case ICmpInst::ICMP_SGT: 4849 // (float)int > 4.4 --> int > 4 4850 // (float)int > -4.4 --> int >= -4 4851 if (RHS.isNegative()) 4852 Pred = ICmpInst::ICMP_SGE; 4853 break; 4854 case ICmpInst::ICMP_UGE: 4855 // (float)int >= -4.4 --> true 4856 // (float)int >= 4.4 --> int > 4 4857 if (RHS.isNegative()) 4858 return replaceInstUsesWith(I, Builder.getTrue()); 4859 Pred = ICmpInst::ICMP_UGT; 4860 break; 4861 case ICmpInst::ICMP_SGE: 4862 // (float)int >= -4.4 --> int >= -4 4863 // (float)int >= 4.4 --> int > 4 4864 if (!RHS.isNegative()) 4865 Pred = ICmpInst::ICMP_SGT; 4866 break; 4867 } 4868 } 4869 } 4870 4871 // Lower this FP comparison into an appropriate integer version of the 4872 // comparison. 4873 return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt); 4874 } 4875 4876 Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { 4877 bool Changed = false; 4878 4879 /// Orders the operands of the compare so that they are listed from most 4880 /// complex to least complex. This puts constants before unary operators, 4881 /// before binary operators. 4882 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) { 4883 I.swapOperands(); 4884 Changed = true; 4885 } 4886 4887 const CmpInst::Predicate Pred = I.getPredicate(); 4888 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 4889 if (Value *V = SimplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(), 4890 SQ.getWithInstruction(&I))) 4891 return replaceInstUsesWith(I, V); 4892 4893 // Simplify 'fcmp pred X, X' 4894 if (Op0 == Op1) { 4895 switch (Pred) { 4896 default: break; 4897 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y) 4898 case FCmpInst::FCMP_ULT: // True if unordered or less than 4899 case FCmpInst::FCMP_UGT: // True if unordered or greater than 4900 case FCmpInst::FCMP_UNE: // True if unordered or not equal 4901 // Canonicalize these to be 'fcmp uno %X, 0.0'. 4902 I.setPredicate(FCmpInst::FCMP_UNO); 4903 I.setOperand(1, Constant::getNullValue(Op0->getType())); 4904 return &I; 4905 4906 case FCmpInst::FCMP_ORD: // True if ordered (no nans) 4907 case FCmpInst::FCMP_OEQ: // True if ordered and equal 4908 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal 4909 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal 4910 // Canonicalize these to be 'fcmp ord %X, 0.0'. 4911 I.setPredicate(FCmpInst::FCMP_ORD); 4912 I.setOperand(1, Constant::getNullValue(Op0->getType())); 4913 return &I; 4914 } 4915 } 4916 4917 // If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand, 4918 // then canonicalize the operand to 0.0. 4919 if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) { 4920 if (!match(Op0, m_Zero()) && isKnownNeverNaN(Op0)) { 4921 I.setOperand(0, ConstantFP::getNullValue(Op0->getType())); 4922 return &I; 4923 } 4924 if (!match(Op1, m_Zero()) && isKnownNeverNaN(Op1)) { 4925 I.setOperand(1, ConstantFP::getNullValue(Op0->getType())); 4926 return &I; 4927 } 4928 } 4929 4930 // Test if the FCmpInst instruction is used exclusively by a select as 4931 // part of a minimum or maximum operation. If so, refrain from doing 4932 // any other folding. This helps out other analyses which understand 4933 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution 4934 // and CodeGen. And in this case, at least one of the comparison 4935 // operands has at least one user besides the compare (the select), 4936 // which would often largely negate the benefit of folding anyway. 4937 if (I.hasOneUse()) 4938 if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin())) 4939 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) || 4940 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1)) 4941 return nullptr; 4942 4943 // Handle fcmp with constant RHS 4944 if (Constant *RHSC = dyn_cast<Constant>(Op1)) { 4945 if (Instruction *LHSI = dyn_cast<Instruction>(Op0)) 4946 switch (LHSI->getOpcode()) { 4947 case Instruction::FPExt: { 4948 // fcmp (fpext x), C -> fcmp x, (fptrunc C) if fptrunc is lossless 4949 FPExtInst *LHSExt = cast<FPExtInst>(LHSI); 4950 ConstantFP *RHSF = dyn_cast<ConstantFP>(RHSC); 4951 if (!RHSF) 4952 break; 4953 4954 const fltSemantics *Sem; 4955 // FIXME: This shouldn't be here. 4956 if (LHSExt->getSrcTy()->isHalfTy()) 4957 Sem = &APFloat::IEEEhalf(); 4958 else if (LHSExt->getSrcTy()->isFloatTy()) 4959 Sem = &APFloat::IEEEsingle(); 4960 else if (LHSExt->getSrcTy()->isDoubleTy()) 4961 Sem = &APFloat::IEEEdouble(); 4962 else if (LHSExt->getSrcTy()->isFP128Ty()) 4963 Sem = &APFloat::IEEEquad(); 4964 else if (LHSExt->getSrcTy()->isX86_FP80Ty()) 4965 Sem = &APFloat::x87DoubleExtended(); 4966 else if (LHSExt->getSrcTy()->isPPC_FP128Ty()) 4967 Sem = &APFloat::PPCDoubleDouble(); 4968 else 4969 break; 4970 4971 bool Lossy; 4972 APFloat F = RHSF->getValueAPF(); 4973 F.convert(*Sem, APFloat::rmNearestTiesToEven, &Lossy); 4974 4975 // Avoid lossy conversions and denormals. Zero is a special case 4976 // that's OK to convert. 4977 APFloat Fabs = F; 4978 Fabs.clearSign(); 4979 if (!Lossy && 4980 ((Fabs.compare(APFloat::getSmallestNormalized(*Sem)) != 4981 APFloat::cmpLessThan) || Fabs.isZero())) 4982 4983 return new FCmpInst(Pred, LHSExt->getOperand(0), 4984 ConstantFP::get(RHSC->getContext(), F)); 4985 break; 4986 } 4987 case Instruction::PHI: 4988 // Only fold fcmp into the PHI if the phi and fcmp are in the same 4989 // block. If in the same block, we're encouraging jump threading. If 4990 // not, we are just pessimizing the code by making an i1 phi. 4991 if (LHSI->getParent() == I.getParent()) 4992 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI))) 4993 return NV; 4994 break; 4995 case Instruction::SIToFP: 4996 case Instruction::UIToFP: 4997 if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC)) 4998 return NV; 4999 break; 5000 case Instruction::FSub: { 5001 // fcmp pred (fneg x), C -> fcmp swap(pred) x, -C 5002 Value *Op; 5003 if (match(LHSI, m_FNeg(m_Value(Op)))) 5004 return new FCmpInst(I.getSwappedPredicate(), Op, 5005 ConstantExpr::getFNeg(RHSC)); 5006 break; 5007 } 5008 case Instruction::Load: 5009 if (GetElementPtrInst *GEP = 5010 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) { 5011 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 5012 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 5013 !cast<LoadInst>(LHSI)->isVolatile()) 5014 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I)) 5015 return Res; 5016 } 5017 break; 5018 case Instruction::Call: { 5019 if (!RHSC->isNullValue()) 5020 break; 5021 5022 CallInst *CI = cast<CallInst>(LHSI); 5023 Intrinsic::ID IID = getIntrinsicForCallSite(CI, &TLI); 5024 if (IID != Intrinsic::fabs) 5025 break; 5026 5027 // Various optimization for fabs compared with zero. 5028 switch (Pred) { 5029 default: 5030 break; 5031 // fabs(x) < 0 --> false 5032 case FCmpInst::FCMP_OLT: 5033 llvm_unreachable("handled by SimplifyFCmpInst"); 5034 // fabs(x) > 0 --> x != 0 5035 case FCmpInst::FCMP_OGT: 5036 return new FCmpInst(FCmpInst::FCMP_ONE, CI->getArgOperand(0), RHSC); 5037 // fabs(x) <= 0 --> x == 0 5038 case FCmpInst::FCMP_OLE: 5039 return new FCmpInst(FCmpInst::FCMP_OEQ, CI->getArgOperand(0), RHSC); 5040 // fabs(x) >= 0 --> !isnan(x) 5041 case FCmpInst::FCMP_OGE: 5042 return new FCmpInst(FCmpInst::FCMP_ORD, CI->getArgOperand(0), RHSC); 5043 // fabs(x) == 0 --> x == 0 5044 // fabs(x) != 0 --> x != 0 5045 case FCmpInst::FCMP_OEQ: 5046 case FCmpInst::FCMP_UEQ: 5047 case FCmpInst::FCMP_ONE: 5048 case FCmpInst::FCMP_UNE: 5049 return new FCmpInst(Pred, CI->getArgOperand(0), RHSC); 5050 } 5051 } 5052 } 5053 } 5054 5055 // fcmp pred (fneg x), (fneg y) -> fcmp swap(pred) x, y 5056 Value *X, *Y; 5057 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) 5058 return new FCmpInst(I.getSwappedPredicate(), X, Y); 5059 5060 // fcmp (fpext x), (fpext y) -> fcmp x, y 5061 if (FPExtInst *LHSExt = dyn_cast<FPExtInst>(Op0)) 5062 if (FPExtInst *RHSExt = dyn_cast<FPExtInst>(Op1)) 5063 if (LHSExt->getSrcTy() == RHSExt->getSrcTy()) 5064 return new FCmpInst(Pred, LHSExt->getOperand(0), RHSExt->getOperand(0)); 5065 5066 return Changed ? &I : nullptr; 5067 } 5068