1 //===- InstCombineCompares.cpp --------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visitICmp and visitFCmp functions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombineInternal.h" 15 #include "llvm/ADT/APSInt.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/Analysis/ConstantFolding.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/Analysis/MemoryBuiltins.h" 21 #include "llvm/Analysis/TargetLibraryInfo.h" 22 #include "llvm/Analysis/VectorUtils.h" 23 #include "llvm/IR/ConstantRange.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/GetElementPtrTypeIterator.h" 26 #include "llvm/IR/IntrinsicInst.h" 27 #include "llvm/IR/PatternMatch.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/KnownBits.h" 30 31 using namespace llvm; 32 using namespace PatternMatch; 33 34 #define DEBUG_TYPE "instcombine" 35 36 // How many times is a select replaced by one of its operands? 37 STATISTIC(NumSel, "Number of select opts"); 38 39 40 static ConstantInt *extractElement(Constant *V, Constant *Idx) { 41 return cast<ConstantInt>(ConstantExpr::getExtractElement(V, Idx)); 42 } 43 44 static bool hasAddOverflow(ConstantInt *Result, 45 ConstantInt *In1, ConstantInt *In2, 46 bool IsSigned) { 47 if (!IsSigned) 48 return Result->getValue().ult(In1->getValue()); 49 50 if (In2->isNegative()) 51 return Result->getValue().sgt(In1->getValue()); 52 return Result->getValue().slt(In1->getValue()); 53 } 54 55 /// Compute Result = In1+In2, returning true if the result overflowed for this 56 /// type. 57 static bool addWithOverflow(Constant *&Result, Constant *In1, 58 Constant *In2, bool IsSigned = false) { 59 Result = ConstantExpr::getAdd(In1, In2); 60 61 if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) { 62 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { 63 Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i); 64 if (hasAddOverflow(extractElement(Result, Idx), 65 extractElement(In1, Idx), 66 extractElement(In2, Idx), 67 IsSigned)) 68 return true; 69 } 70 return false; 71 } 72 73 return hasAddOverflow(cast<ConstantInt>(Result), 74 cast<ConstantInt>(In1), cast<ConstantInt>(In2), 75 IsSigned); 76 } 77 78 static bool hasSubOverflow(ConstantInt *Result, 79 ConstantInt *In1, ConstantInt *In2, 80 bool IsSigned) { 81 if (!IsSigned) 82 return Result->getValue().ugt(In1->getValue()); 83 84 if (In2->isNegative()) 85 return Result->getValue().slt(In1->getValue()); 86 87 return Result->getValue().sgt(In1->getValue()); 88 } 89 90 /// Compute Result = In1-In2, returning true if the result overflowed for this 91 /// type. 92 static bool subWithOverflow(Constant *&Result, Constant *In1, 93 Constant *In2, bool IsSigned = false) { 94 Result = ConstantExpr::getSub(In1, In2); 95 96 if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) { 97 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { 98 Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i); 99 if (hasSubOverflow(extractElement(Result, Idx), 100 extractElement(In1, Idx), 101 extractElement(In2, Idx), 102 IsSigned)) 103 return true; 104 } 105 return false; 106 } 107 108 return hasSubOverflow(cast<ConstantInt>(Result), 109 cast<ConstantInt>(In1), cast<ConstantInt>(In2), 110 IsSigned); 111 } 112 113 /// Given an icmp instruction, return true if any use of this comparison is a 114 /// branch on sign bit comparison. 115 static bool isBranchOnSignBitCheck(ICmpInst &I, bool isSignBit) { 116 for (auto *U : I.users()) 117 if (isa<BranchInst>(U)) 118 return isSignBit; 119 return false; 120 } 121 122 /// Given an exploded icmp instruction, return true if the comparison only 123 /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if the 124 /// result of the comparison is true when the input value is signed. 125 static bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, 126 bool &TrueIfSigned) { 127 switch (Pred) { 128 case ICmpInst::ICMP_SLT: // True if LHS s< 0 129 TrueIfSigned = true; 130 return RHS.isNullValue(); 131 case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1 132 TrueIfSigned = true; 133 return RHS.isAllOnesValue(); 134 case ICmpInst::ICMP_SGT: // True if LHS s> -1 135 TrueIfSigned = false; 136 return RHS.isAllOnesValue(); 137 case ICmpInst::ICMP_UGT: 138 // True if LHS u> RHS and RHS == high-bit-mask - 1 139 TrueIfSigned = true; 140 return RHS.isMaxSignedValue(); 141 case ICmpInst::ICMP_UGE: 142 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc) 143 TrueIfSigned = true; 144 return RHS.isSignMask(); 145 default: 146 return false; 147 } 148 } 149 150 /// Returns true if the exploded icmp can be expressed as a signed comparison 151 /// to zero and updates the predicate accordingly. 152 /// The signedness of the comparison is preserved. 153 /// TODO: Refactor with decomposeBitTestICmp()? 154 static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) { 155 if (!ICmpInst::isSigned(Pred)) 156 return false; 157 158 if (C.isNullValue()) 159 return ICmpInst::isRelational(Pred); 160 161 if (C.isOneValue()) { 162 if (Pred == ICmpInst::ICMP_SLT) { 163 Pred = ICmpInst::ICMP_SLE; 164 return true; 165 } 166 } else if (C.isAllOnesValue()) { 167 if (Pred == ICmpInst::ICMP_SGT) { 168 Pred = ICmpInst::ICMP_SGE; 169 return true; 170 } 171 } 172 173 return false; 174 } 175 176 /// Given a signed integer type and a set of known zero and one bits, compute 177 /// the maximum and minimum values that could have the specified known zero and 178 /// known one bits, returning them in Min/Max. 179 /// TODO: Move to method on KnownBits struct? 180 static void computeSignedMinMaxValuesFromKnownBits(const KnownBits &Known, 181 APInt &Min, APInt &Max) { 182 assert(Known.getBitWidth() == Min.getBitWidth() && 183 Known.getBitWidth() == Max.getBitWidth() && 184 "KnownZero, KnownOne and Min, Max must have equal bitwidth."); 185 APInt UnknownBits = ~(Known.Zero|Known.One); 186 187 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign 188 // bit if it is unknown. 189 Min = Known.One; 190 Max = Known.One|UnknownBits; 191 192 if (UnknownBits.isNegative()) { // Sign bit is unknown 193 Min.setSignBit(); 194 Max.clearSignBit(); 195 } 196 } 197 198 /// Given an unsigned integer type and a set of known zero and one bits, compute 199 /// the maximum and minimum values that could have the specified known zero and 200 /// known one bits, returning them in Min/Max. 201 /// TODO: Move to method on KnownBits struct? 202 static void computeUnsignedMinMaxValuesFromKnownBits(const KnownBits &Known, 203 APInt &Min, APInt &Max) { 204 assert(Known.getBitWidth() == Min.getBitWidth() && 205 Known.getBitWidth() == Max.getBitWidth() && 206 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth."); 207 APInt UnknownBits = ~(Known.Zero|Known.One); 208 209 // The minimum value is when the unknown bits are all zeros. 210 Min = Known.One; 211 // The maximum value is when the unknown bits are all ones. 212 Max = Known.One|UnknownBits; 213 } 214 215 /// This is called when we see this pattern: 216 /// cmp pred (load (gep GV, ...)), cmpcst 217 /// where GV is a global variable with a constant initializer. Try to simplify 218 /// this into some simple computation that does not need the load. For example 219 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3". 220 /// 221 /// If AndCst is non-null, then the loaded value is masked with that constant 222 /// before doing the comparison. This handles cases like "A[i]&4 == 0". 223 Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, 224 GlobalVariable *GV, 225 CmpInst &ICI, 226 ConstantInt *AndCst) { 227 Constant *Init = GV->getInitializer(); 228 if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init)) 229 return nullptr; 230 231 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements(); 232 // Don't blow up on huge arrays. 233 if (ArrayElementCount > MaxArraySizeForCombine) 234 return nullptr; 235 236 // There are many forms of this optimization we can handle, for now, just do 237 // the simple index into a single-dimensional array. 238 // 239 // Require: GEP GV, 0, i {{, constant indices}} 240 if (GEP->getNumOperands() < 3 || 241 !isa<ConstantInt>(GEP->getOperand(1)) || 242 !cast<ConstantInt>(GEP->getOperand(1))->isZero() || 243 isa<Constant>(GEP->getOperand(2))) 244 return nullptr; 245 246 // Check that indices after the variable are constants and in-range for the 247 // type they index. Collect the indices. This is typically for arrays of 248 // structs. 249 SmallVector<unsigned, 4> LaterIndices; 250 251 Type *EltTy = Init->getType()->getArrayElementType(); 252 for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) { 253 ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i)); 254 if (!Idx) return nullptr; // Variable index. 255 256 uint64_t IdxVal = Idx->getZExtValue(); 257 if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index. 258 259 if (StructType *STy = dyn_cast<StructType>(EltTy)) 260 EltTy = STy->getElementType(IdxVal); 261 else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) { 262 if (IdxVal >= ATy->getNumElements()) return nullptr; 263 EltTy = ATy->getElementType(); 264 } else { 265 return nullptr; // Unknown type. 266 } 267 268 LaterIndices.push_back(IdxVal); 269 } 270 271 enum { Overdefined = -3, Undefined = -2 }; 272 273 // Variables for our state machines. 274 275 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form 276 // "i == 47 | i == 87", where 47 is the first index the condition is true for, 277 // and 87 is the second (and last) index. FirstTrueElement is -2 when 278 // undefined, otherwise set to the first true element. SecondTrueElement is 279 // -2 when undefined, -3 when overdefined and >= 0 when that index is true. 280 int FirstTrueElement = Undefined, SecondTrueElement = Undefined; 281 282 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the 283 // form "i != 47 & i != 87". Same state transitions as for true elements. 284 int FirstFalseElement = Undefined, SecondFalseElement = Undefined; 285 286 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these 287 /// define a state machine that triggers for ranges of values that the index 288 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'. 289 /// This is -2 when undefined, -3 when overdefined, and otherwise the last 290 /// index in the range (inclusive). We use -2 for undefined here because we 291 /// use relative comparisons and don't want 0-1 to match -1. 292 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined; 293 294 // MagicBitvector - This is a magic bitvector where we set a bit if the 295 // comparison is true for element 'i'. If there are 64 elements or less in 296 // the array, this will fully represent all the comparison results. 297 uint64_t MagicBitvector = 0; 298 299 // Scan the array and see if one of our patterns matches. 300 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1)); 301 for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) { 302 Constant *Elt = Init->getAggregateElement(i); 303 if (!Elt) return nullptr; 304 305 // If this is indexing an array of structures, get the structure element. 306 if (!LaterIndices.empty()) 307 Elt = ConstantExpr::getExtractValue(Elt, LaterIndices); 308 309 // If the element is masked, handle it. 310 if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst); 311 312 // Find out if the comparison would be true or false for the i'th element. 313 Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt, 314 CompareRHS, DL, &TLI); 315 // If the result is undef for this element, ignore it. 316 if (isa<UndefValue>(C)) { 317 // Extend range state machines to cover this element in case there is an 318 // undef in the middle of the range. 319 if (TrueRangeEnd == (int)i-1) 320 TrueRangeEnd = i; 321 if (FalseRangeEnd == (int)i-1) 322 FalseRangeEnd = i; 323 continue; 324 } 325 326 // If we can't compute the result for any of the elements, we have to give 327 // up evaluating the entire conditional. 328 if (!isa<ConstantInt>(C)) return nullptr; 329 330 // Otherwise, we know if the comparison is true or false for this element, 331 // update our state machines. 332 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero(); 333 334 // State machine for single/double/range index comparison. 335 if (IsTrueForElt) { 336 // Update the TrueElement state machine. 337 if (FirstTrueElement == Undefined) 338 FirstTrueElement = TrueRangeEnd = i; // First true element. 339 else { 340 // Update double-compare state machine. 341 if (SecondTrueElement == Undefined) 342 SecondTrueElement = i; 343 else 344 SecondTrueElement = Overdefined; 345 346 // Update range state machine. 347 if (TrueRangeEnd == (int)i-1) 348 TrueRangeEnd = i; 349 else 350 TrueRangeEnd = Overdefined; 351 } 352 } else { 353 // Update the FalseElement state machine. 354 if (FirstFalseElement == Undefined) 355 FirstFalseElement = FalseRangeEnd = i; // First false element. 356 else { 357 // Update double-compare state machine. 358 if (SecondFalseElement == Undefined) 359 SecondFalseElement = i; 360 else 361 SecondFalseElement = Overdefined; 362 363 // Update range state machine. 364 if (FalseRangeEnd == (int)i-1) 365 FalseRangeEnd = i; 366 else 367 FalseRangeEnd = Overdefined; 368 } 369 } 370 371 // If this element is in range, update our magic bitvector. 372 if (i < 64 && IsTrueForElt) 373 MagicBitvector |= 1ULL << i; 374 375 // If all of our states become overdefined, bail out early. Since the 376 // predicate is expensive, only check it every 8 elements. This is only 377 // really useful for really huge arrays. 378 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined && 379 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined && 380 FalseRangeEnd == Overdefined) 381 return nullptr; 382 } 383 384 // Now that we've scanned the entire array, emit our new comparison(s). We 385 // order the state machines in complexity of the generated code. 386 Value *Idx = GEP->getOperand(2); 387 388 // If the index is larger than the pointer size of the target, truncate the 389 // index down like the GEP would do implicitly. We don't have to do this for 390 // an inbounds GEP because the index can't be out of range. 391 if (!GEP->isInBounds()) { 392 Type *IntPtrTy = DL.getIntPtrType(GEP->getType()); 393 unsigned PtrSize = IntPtrTy->getIntegerBitWidth(); 394 if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize) 395 Idx = Builder->CreateTrunc(Idx, IntPtrTy); 396 } 397 398 // If the comparison is only true for one or two elements, emit direct 399 // comparisons. 400 if (SecondTrueElement != Overdefined) { 401 // None true -> false. 402 if (FirstTrueElement == Undefined) 403 return replaceInstUsesWith(ICI, Builder->getFalse()); 404 405 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement); 406 407 // True for one element -> 'i == 47'. 408 if (SecondTrueElement == Undefined) 409 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx); 410 411 // True for two elements -> 'i == 47 | i == 72'. 412 Value *C1 = Builder->CreateICmpEQ(Idx, FirstTrueIdx); 413 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement); 414 Value *C2 = Builder->CreateICmpEQ(Idx, SecondTrueIdx); 415 return BinaryOperator::CreateOr(C1, C2); 416 } 417 418 // If the comparison is only false for one or two elements, emit direct 419 // comparisons. 420 if (SecondFalseElement != Overdefined) { 421 // None false -> true. 422 if (FirstFalseElement == Undefined) 423 return replaceInstUsesWith(ICI, Builder->getTrue()); 424 425 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement); 426 427 // False for one element -> 'i != 47'. 428 if (SecondFalseElement == Undefined) 429 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx); 430 431 // False for two elements -> 'i != 47 & i != 72'. 432 Value *C1 = Builder->CreateICmpNE(Idx, FirstFalseIdx); 433 Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement); 434 Value *C2 = Builder->CreateICmpNE(Idx, SecondFalseIdx); 435 return BinaryOperator::CreateAnd(C1, C2); 436 } 437 438 // If the comparison can be replaced with a range comparison for the elements 439 // where it is true, emit the range check. 440 if (TrueRangeEnd != Overdefined) { 441 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare"); 442 443 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1). 444 if (FirstTrueElement) { 445 Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement); 446 Idx = Builder->CreateAdd(Idx, Offs); 447 } 448 449 Value *End = ConstantInt::get(Idx->getType(), 450 TrueRangeEnd-FirstTrueElement+1); 451 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End); 452 } 453 454 // False range check. 455 if (FalseRangeEnd != Overdefined) { 456 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare"); 457 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse). 458 if (FirstFalseElement) { 459 Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement); 460 Idx = Builder->CreateAdd(Idx, Offs); 461 } 462 463 Value *End = ConstantInt::get(Idx->getType(), 464 FalseRangeEnd-FirstFalseElement); 465 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End); 466 } 467 468 // If a magic bitvector captures the entire comparison state 469 // of this load, replace it with computation that does: 470 // ((magic_cst >> i) & 1) != 0 471 { 472 Type *Ty = nullptr; 473 474 // Look for an appropriate type: 475 // - The type of Idx if the magic fits 476 // - The smallest fitting legal type if we have a DataLayout 477 // - Default to i32 478 if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth()) 479 Ty = Idx->getType(); 480 else 481 Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount); 482 483 if (Ty) { 484 Value *V = Builder->CreateIntCast(Idx, Ty, false); 485 V = Builder->CreateLShr(ConstantInt::get(Ty, MagicBitvector), V); 486 V = Builder->CreateAnd(ConstantInt::get(Ty, 1), V); 487 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0)); 488 } 489 } 490 491 return nullptr; 492 } 493 494 /// Return a value that can be used to compare the *offset* implied by a GEP to 495 /// zero. For example, if we have &A[i], we want to return 'i' for 496 /// "icmp ne i, 0". Note that, in general, indices can be complex, and scales 497 /// are involved. The above expression would also be legal to codegen as 498 /// "icmp ne (i*4), 0" (assuming A is a pointer to i32). 499 /// This latter form is less amenable to optimization though, and we are allowed 500 /// to generate the first by knowing that pointer arithmetic doesn't overflow. 501 /// 502 /// If we can't emit an optimized form for this expression, this returns null. 503 /// 504 static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC, 505 const DataLayout &DL) { 506 gep_type_iterator GTI = gep_type_begin(GEP); 507 508 // Check to see if this gep only has a single variable index. If so, and if 509 // any constant indices are a multiple of its scale, then we can compute this 510 // in terms of the scale of the variable index. For example, if the GEP 511 // implies an offset of "12 + i*4", then we can codegen this as "3 + i", 512 // because the expression will cross zero at the same point. 513 unsigned i, e = GEP->getNumOperands(); 514 int64_t Offset = 0; 515 for (i = 1; i != e; ++i, ++GTI) { 516 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 517 // Compute the aggregate offset of constant indices. 518 if (CI->isZero()) continue; 519 520 // Handle a struct index, which adds its field offset to the pointer. 521 if (StructType *STy = GTI.getStructTypeOrNull()) { 522 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); 523 } else { 524 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); 525 Offset += Size*CI->getSExtValue(); 526 } 527 } else { 528 // Found our variable index. 529 break; 530 } 531 } 532 533 // If there are no variable indices, we must have a constant offset, just 534 // evaluate it the general way. 535 if (i == e) return nullptr; 536 537 Value *VariableIdx = GEP->getOperand(i); 538 // Determine the scale factor of the variable element. For example, this is 539 // 4 if the variable index is into an array of i32. 540 uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType()); 541 542 // Verify that there are no other variable indices. If so, emit the hard way. 543 for (++i, ++GTI; i != e; ++i, ++GTI) { 544 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i)); 545 if (!CI) return nullptr; 546 547 // Compute the aggregate offset of constant indices. 548 if (CI->isZero()) continue; 549 550 // Handle a struct index, which adds its field offset to the pointer. 551 if (StructType *STy = GTI.getStructTypeOrNull()) { 552 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); 553 } else { 554 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); 555 Offset += Size*CI->getSExtValue(); 556 } 557 } 558 559 // Okay, we know we have a single variable index, which must be a 560 // pointer/array/vector index. If there is no offset, life is simple, return 561 // the index. 562 Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType()); 563 unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth(); 564 if (Offset == 0) { 565 // Cast to intptrty in case a truncation occurs. If an extension is needed, 566 // we don't need to bother extending: the extension won't affect where the 567 // computation crosses zero. 568 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) { 569 VariableIdx = IC.Builder->CreateTrunc(VariableIdx, IntPtrTy); 570 } 571 return VariableIdx; 572 } 573 574 // Otherwise, there is an index. The computation we will do will be modulo 575 // the pointer size, so get it. 576 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth); 577 578 Offset &= PtrSizeMask; 579 VariableScale &= PtrSizeMask; 580 581 // To do this transformation, any constant index must be a multiple of the 582 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i", 583 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a 584 // multiple of the variable scale. 585 int64_t NewOffs = Offset / (int64_t)VariableScale; 586 if (Offset != NewOffs*(int64_t)VariableScale) 587 return nullptr; 588 589 // Okay, we can do this evaluation. Start by converting the index to intptr. 590 if (VariableIdx->getType() != IntPtrTy) 591 VariableIdx = IC.Builder->CreateIntCast(VariableIdx, IntPtrTy, 592 true /*Signed*/); 593 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs); 594 return IC.Builder->CreateAdd(VariableIdx, OffsetVal, "offset"); 595 } 596 597 /// Returns true if we can rewrite Start as a GEP with pointer Base 598 /// and some integer offset. The nodes that need to be re-written 599 /// for this transformation will be added to Explored. 600 static bool canRewriteGEPAsOffset(Value *Start, Value *Base, 601 const DataLayout &DL, 602 SetVector<Value *> &Explored) { 603 SmallVector<Value *, 16> WorkList(1, Start); 604 Explored.insert(Base); 605 606 // The following traversal gives us an order which can be used 607 // when doing the final transformation. Since in the final 608 // transformation we create the PHI replacement instructions first, 609 // we don't have to get them in any particular order. 610 // 611 // However, for other instructions we will have to traverse the 612 // operands of an instruction first, which means that we have to 613 // do a post-order traversal. 614 while (!WorkList.empty()) { 615 SetVector<PHINode *> PHIs; 616 617 while (!WorkList.empty()) { 618 if (Explored.size() >= 100) 619 return false; 620 621 Value *V = WorkList.back(); 622 623 if (Explored.count(V) != 0) { 624 WorkList.pop_back(); 625 continue; 626 } 627 628 if (!isa<IntToPtrInst>(V) && !isa<PtrToIntInst>(V) && 629 !isa<GetElementPtrInst>(V) && !isa<PHINode>(V)) 630 // We've found some value that we can't explore which is different from 631 // the base. Therefore we can't do this transformation. 632 return false; 633 634 if (isa<IntToPtrInst>(V) || isa<PtrToIntInst>(V)) { 635 auto *CI = dyn_cast<CastInst>(V); 636 if (!CI->isNoopCast(DL)) 637 return false; 638 639 if (Explored.count(CI->getOperand(0)) == 0) 640 WorkList.push_back(CI->getOperand(0)); 641 } 642 643 if (auto *GEP = dyn_cast<GEPOperator>(V)) { 644 // We're limiting the GEP to having one index. This will preserve 645 // the original pointer type. We could handle more cases in the 646 // future. 647 if (GEP->getNumIndices() != 1 || !GEP->isInBounds() || 648 GEP->getType() != Start->getType()) 649 return false; 650 651 if (Explored.count(GEP->getOperand(0)) == 0) 652 WorkList.push_back(GEP->getOperand(0)); 653 } 654 655 if (WorkList.back() == V) { 656 WorkList.pop_back(); 657 // We've finished visiting this node, mark it as such. 658 Explored.insert(V); 659 } 660 661 if (auto *PN = dyn_cast<PHINode>(V)) { 662 // We cannot transform PHIs on unsplittable basic blocks. 663 if (isa<CatchSwitchInst>(PN->getParent()->getTerminator())) 664 return false; 665 Explored.insert(PN); 666 PHIs.insert(PN); 667 } 668 } 669 670 // Explore the PHI nodes further. 671 for (auto *PN : PHIs) 672 for (Value *Op : PN->incoming_values()) 673 if (Explored.count(Op) == 0) 674 WorkList.push_back(Op); 675 } 676 677 // Make sure that we can do this. Since we can't insert GEPs in a basic 678 // block before a PHI node, we can't easily do this transformation if 679 // we have PHI node users of transformed instructions. 680 for (Value *Val : Explored) { 681 for (Value *Use : Val->uses()) { 682 683 auto *PHI = dyn_cast<PHINode>(Use); 684 auto *Inst = dyn_cast<Instruction>(Val); 685 686 if (Inst == Base || Inst == PHI || !Inst || !PHI || 687 Explored.count(PHI) == 0) 688 continue; 689 690 if (PHI->getParent() == Inst->getParent()) 691 return false; 692 } 693 } 694 return true; 695 } 696 697 // Sets the appropriate insert point on Builder where we can add 698 // a replacement Instruction for V (if that is possible). 699 static void setInsertionPoint(IRBuilder<> &Builder, Value *V, 700 bool Before = true) { 701 if (auto *PHI = dyn_cast<PHINode>(V)) { 702 Builder.SetInsertPoint(&*PHI->getParent()->getFirstInsertionPt()); 703 return; 704 } 705 if (auto *I = dyn_cast<Instruction>(V)) { 706 if (!Before) 707 I = &*std::next(I->getIterator()); 708 Builder.SetInsertPoint(I); 709 return; 710 } 711 if (auto *A = dyn_cast<Argument>(V)) { 712 // Set the insertion point in the entry block. 713 BasicBlock &Entry = A->getParent()->getEntryBlock(); 714 Builder.SetInsertPoint(&*Entry.getFirstInsertionPt()); 715 return; 716 } 717 // Otherwise, this is a constant and we don't need to set a new 718 // insertion point. 719 assert(isa<Constant>(V) && "Setting insertion point for unknown value!"); 720 } 721 722 /// Returns a re-written value of Start as an indexed GEP using Base as a 723 /// pointer. 724 static Value *rewriteGEPAsOffset(Value *Start, Value *Base, 725 const DataLayout &DL, 726 SetVector<Value *> &Explored) { 727 // Perform all the substitutions. This is a bit tricky because we can 728 // have cycles in our use-def chains. 729 // 1. Create the PHI nodes without any incoming values. 730 // 2. Create all the other values. 731 // 3. Add the edges for the PHI nodes. 732 // 4. Emit GEPs to get the original pointers. 733 // 5. Remove the original instructions. 734 Type *IndexType = IntegerType::get( 735 Base->getContext(), DL.getPointerTypeSizeInBits(Start->getType())); 736 737 DenseMap<Value *, Value *> NewInsts; 738 NewInsts[Base] = ConstantInt::getNullValue(IndexType); 739 740 // Create the new PHI nodes, without adding any incoming values. 741 for (Value *Val : Explored) { 742 if (Val == Base) 743 continue; 744 // Create empty phi nodes. This avoids cyclic dependencies when creating 745 // the remaining instructions. 746 if (auto *PHI = dyn_cast<PHINode>(Val)) 747 NewInsts[PHI] = PHINode::Create(IndexType, PHI->getNumIncomingValues(), 748 PHI->getName() + ".idx", PHI); 749 } 750 IRBuilder<> Builder(Base->getContext()); 751 752 // Create all the other instructions. 753 for (Value *Val : Explored) { 754 755 if (NewInsts.find(Val) != NewInsts.end()) 756 continue; 757 758 if (auto *CI = dyn_cast<CastInst>(Val)) { 759 NewInsts[CI] = NewInsts[CI->getOperand(0)]; 760 continue; 761 } 762 if (auto *GEP = dyn_cast<GEPOperator>(Val)) { 763 Value *Index = NewInsts[GEP->getOperand(1)] ? NewInsts[GEP->getOperand(1)] 764 : GEP->getOperand(1); 765 setInsertionPoint(Builder, GEP); 766 // Indices might need to be sign extended. GEPs will magically do 767 // this, but we need to do it ourselves here. 768 if (Index->getType()->getScalarSizeInBits() != 769 NewInsts[GEP->getOperand(0)]->getType()->getScalarSizeInBits()) { 770 Index = Builder.CreateSExtOrTrunc( 771 Index, NewInsts[GEP->getOperand(0)]->getType(), 772 GEP->getOperand(0)->getName() + ".sext"); 773 } 774 775 auto *Op = NewInsts[GEP->getOperand(0)]; 776 if (isa<ConstantInt>(Op) && dyn_cast<ConstantInt>(Op)->isZero()) 777 NewInsts[GEP] = Index; 778 else 779 NewInsts[GEP] = Builder.CreateNSWAdd( 780 Op, Index, GEP->getOperand(0)->getName() + ".add"); 781 continue; 782 } 783 if (isa<PHINode>(Val)) 784 continue; 785 786 llvm_unreachable("Unexpected instruction type"); 787 } 788 789 // Add the incoming values to the PHI nodes. 790 for (Value *Val : Explored) { 791 if (Val == Base) 792 continue; 793 // All the instructions have been created, we can now add edges to the 794 // phi nodes. 795 if (auto *PHI = dyn_cast<PHINode>(Val)) { 796 PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]); 797 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) { 798 Value *NewIncoming = PHI->getIncomingValue(I); 799 800 if (NewInsts.find(NewIncoming) != NewInsts.end()) 801 NewIncoming = NewInsts[NewIncoming]; 802 803 NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I)); 804 } 805 } 806 } 807 808 for (Value *Val : Explored) { 809 if (Val == Base) 810 continue; 811 812 // Depending on the type, for external users we have to emit 813 // a GEP or a GEP + ptrtoint. 814 setInsertionPoint(Builder, Val, false); 815 816 // If required, create an inttoptr instruction for Base. 817 Value *NewBase = Base; 818 if (!Base->getType()->isPointerTy()) 819 NewBase = Builder.CreateBitOrPointerCast(Base, Start->getType(), 820 Start->getName() + "to.ptr"); 821 822 Value *GEP = Builder.CreateInBoundsGEP( 823 Start->getType()->getPointerElementType(), NewBase, 824 makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr"); 825 826 if (!Val->getType()->isPointerTy()) { 827 Value *Cast = Builder.CreatePointerCast(GEP, Val->getType(), 828 Val->getName() + ".conv"); 829 GEP = Cast; 830 } 831 Val->replaceAllUsesWith(GEP); 832 } 833 834 return NewInsts[Start]; 835 } 836 837 /// Looks through GEPs, IntToPtrInsts and PtrToIntInsts in order to express 838 /// the input Value as a constant indexed GEP. Returns a pair containing 839 /// the GEPs Pointer and Index. 840 static std::pair<Value *, Value *> 841 getAsConstantIndexedAddress(Value *V, const DataLayout &DL) { 842 Type *IndexType = IntegerType::get(V->getContext(), 843 DL.getPointerTypeSizeInBits(V->getType())); 844 845 Constant *Index = ConstantInt::getNullValue(IndexType); 846 while (true) { 847 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 848 // We accept only inbouds GEPs here to exclude the possibility of 849 // overflow. 850 if (!GEP->isInBounds()) 851 break; 852 if (GEP->hasAllConstantIndices() && GEP->getNumIndices() == 1 && 853 GEP->getType() == V->getType()) { 854 V = GEP->getOperand(0); 855 Constant *GEPIndex = static_cast<Constant *>(GEP->getOperand(1)); 856 Index = ConstantExpr::getAdd( 857 Index, ConstantExpr::getSExtOrBitCast(GEPIndex, IndexType)); 858 continue; 859 } 860 break; 861 } 862 if (auto *CI = dyn_cast<IntToPtrInst>(V)) { 863 if (!CI->isNoopCast(DL)) 864 break; 865 V = CI->getOperand(0); 866 continue; 867 } 868 if (auto *CI = dyn_cast<PtrToIntInst>(V)) { 869 if (!CI->isNoopCast(DL)) 870 break; 871 V = CI->getOperand(0); 872 continue; 873 } 874 break; 875 } 876 return {V, Index}; 877 } 878 879 /// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant. 880 /// We can look through PHIs, GEPs and casts in order to determine a common base 881 /// between GEPLHS and RHS. 882 static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS, 883 ICmpInst::Predicate Cond, 884 const DataLayout &DL) { 885 if (!GEPLHS->hasAllConstantIndices()) 886 return nullptr; 887 888 // Make sure the pointers have the same type. 889 if (GEPLHS->getType() != RHS->getType()) 890 return nullptr; 891 892 Value *PtrBase, *Index; 893 std::tie(PtrBase, Index) = getAsConstantIndexedAddress(GEPLHS, DL); 894 895 // The set of nodes that will take part in this transformation. 896 SetVector<Value *> Nodes; 897 898 if (!canRewriteGEPAsOffset(RHS, PtrBase, DL, Nodes)) 899 return nullptr; 900 901 // We know we can re-write this as 902 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) 903 // Since we've only looked through inbouds GEPs we know that we 904 // can't have overflow on either side. We can therefore re-write 905 // this as: 906 // OFFSET1 cmp OFFSET2 907 Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, DL, Nodes); 908 909 // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written 910 // GEP having PtrBase as the pointer base, and has returned in NewRHS the 911 // offset. Since Index is the offset of LHS to the base pointer, we will now 912 // compare the offsets instead of comparing the pointers. 913 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Index, NewRHS); 914 } 915 916 /// Fold comparisons between a GEP instruction and something else. At this point 917 /// we know that the GEP is on the LHS of the comparison. 918 Instruction *InstCombiner::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, 919 ICmpInst::Predicate Cond, 920 Instruction &I) { 921 // Don't transform signed compares of GEPs into index compares. Even if the 922 // GEP is inbounds, the final add of the base pointer can have signed overflow 923 // and would change the result of the icmp. 924 // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be 925 // the maximum signed value for the pointer type. 926 if (ICmpInst::isSigned(Cond)) 927 return nullptr; 928 929 // Look through bitcasts and addrspacecasts. We do not however want to remove 930 // 0 GEPs. 931 if (!isa<GetElementPtrInst>(RHS)) 932 RHS = RHS->stripPointerCasts(); 933 934 Value *PtrBase = GEPLHS->getOperand(0); 935 if (PtrBase == RHS && GEPLHS->isInBounds()) { 936 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0). 937 // This transformation (ignoring the base and scales) is valid because we 938 // know pointers can't overflow since the gep is inbounds. See if we can 939 // output an optimized form. 940 Value *Offset = evaluateGEPOffsetExpression(GEPLHS, *this, DL); 941 942 // If not, synthesize the offset the hard way. 943 if (!Offset) 944 Offset = EmitGEPOffset(GEPLHS); 945 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset, 946 Constant::getNullValue(Offset->getType())); 947 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) { 948 // If the base pointers are different, but the indices are the same, just 949 // compare the base pointer. 950 if (PtrBase != GEPRHS->getOperand(0)) { 951 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands(); 952 IndicesTheSame &= GEPLHS->getOperand(0)->getType() == 953 GEPRHS->getOperand(0)->getType(); 954 if (IndicesTheSame) 955 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i) 956 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) { 957 IndicesTheSame = false; 958 break; 959 } 960 961 // If all indices are the same, just compare the base pointers. 962 if (IndicesTheSame) 963 return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0)); 964 965 // If we're comparing GEPs with two base pointers that only differ in type 966 // and both GEPs have only constant indices or just one use, then fold 967 // the compare with the adjusted indices. 968 if (GEPLHS->isInBounds() && GEPRHS->isInBounds() && 969 (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) && 970 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) && 971 PtrBase->stripPointerCasts() == 972 GEPRHS->getOperand(0)->stripPointerCasts()) { 973 Value *LOffset = EmitGEPOffset(GEPLHS); 974 Value *ROffset = EmitGEPOffset(GEPRHS); 975 976 // If we looked through an addrspacecast between different sized address 977 // spaces, the LHS and RHS pointers are different sized 978 // integers. Truncate to the smaller one. 979 Type *LHSIndexTy = LOffset->getType(); 980 Type *RHSIndexTy = ROffset->getType(); 981 if (LHSIndexTy != RHSIndexTy) { 982 if (LHSIndexTy->getPrimitiveSizeInBits() < 983 RHSIndexTy->getPrimitiveSizeInBits()) { 984 ROffset = Builder->CreateTrunc(ROffset, LHSIndexTy); 985 } else 986 LOffset = Builder->CreateTrunc(LOffset, RHSIndexTy); 987 } 988 989 Value *Cmp = Builder->CreateICmp(ICmpInst::getSignedPredicate(Cond), 990 LOffset, ROffset); 991 return replaceInstUsesWith(I, Cmp); 992 } 993 994 // Otherwise, the base pointers are different and the indices are 995 // different. Try convert this to an indexed compare by looking through 996 // PHIs/casts. 997 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL); 998 } 999 1000 // If one of the GEPs has all zero indices, recurse. 1001 if (GEPLHS->hasAllZeroIndices()) 1002 return foldGEPICmp(GEPRHS, GEPLHS->getOperand(0), 1003 ICmpInst::getSwappedPredicate(Cond), I); 1004 1005 // If the other GEP has all zero indices, recurse. 1006 if (GEPRHS->hasAllZeroIndices()) 1007 return foldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I); 1008 1009 bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds(); 1010 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) { 1011 // If the GEPs only differ by one index, compare it. 1012 unsigned NumDifferences = 0; // Keep track of # differences. 1013 unsigned DiffOperand = 0; // The operand that differs. 1014 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i) 1015 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) { 1016 if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() != 1017 GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) { 1018 // Irreconcilable differences. 1019 NumDifferences = 2; 1020 break; 1021 } else { 1022 if (NumDifferences++) break; 1023 DiffOperand = i; 1024 } 1025 } 1026 1027 if (NumDifferences == 0) // SAME GEP? 1028 return replaceInstUsesWith(I, // No comparison is needed here. 1029 Builder->getInt1(ICmpInst::isTrueWhenEqual(Cond))); 1030 1031 else if (NumDifferences == 1 && GEPsInBounds) { 1032 Value *LHSV = GEPLHS->getOperand(DiffOperand); 1033 Value *RHSV = GEPRHS->getOperand(DiffOperand); 1034 // Make sure we do a signed comparison here. 1035 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV); 1036 } 1037 } 1038 1039 // Only lower this if the icmp is the only user of the GEP or if we expect 1040 // the result to fold to a constant! 1041 if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) && 1042 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) { 1043 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2) 1044 Value *L = EmitGEPOffset(GEPLHS); 1045 Value *R = EmitGEPOffset(GEPRHS); 1046 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R); 1047 } 1048 } 1049 1050 // Try convert this to an indexed compare by looking through PHIs/casts as a 1051 // last resort. 1052 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL); 1053 } 1054 1055 Instruction *InstCombiner::foldAllocaCmp(ICmpInst &ICI, 1056 const AllocaInst *Alloca, 1057 const Value *Other) { 1058 assert(ICI.isEquality() && "Cannot fold non-equality comparison."); 1059 1060 // It would be tempting to fold away comparisons between allocas and any 1061 // pointer not based on that alloca (e.g. an argument). However, even 1062 // though such pointers cannot alias, they can still compare equal. 1063 // 1064 // But LLVM doesn't specify where allocas get their memory, so if the alloca 1065 // doesn't escape we can argue that it's impossible to guess its value, and we 1066 // can therefore act as if any such guesses are wrong. 1067 // 1068 // The code below checks that the alloca doesn't escape, and that it's only 1069 // used in a comparison once (the current instruction). The 1070 // single-comparison-use condition ensures that we're trivially folding all 1071 // comparisons against the alloca consistently, and avoids the risk of 1072 // erroneously folding a comparison of the pointer with itself. 1073 1074 unsigned MaxIter = 32; // Break cycles and bound to constant-time. 1075 1076 SmallVector<const Use *, 32> Worklist; 1077 for (const Use &U : Alloca->uses()) { 1078 if (Worklist.size() >= MaxIter) 1079 return nullptr; 1080 Worklist.push_back(&U); 1081 } 1082 1083 unsigned NumCmps = 0; 1084 while (!Worklist.empty()) { 1085 assert(Worklist.size() <= MaxIter); 1086 const Use *U = Worklist.pop_back_val(); 1087 const Value *V = U->getUser(); 1088 --MaxIter; 1089 1090 if (isa<BitCastInst>(V) || isa<GetElementPtrInst>(V) || isa<PHINode>(V) || 1091 isa<SelectInst>(V)) { 1092 // Track the uses. 1093 } else if (isa<LoadInst>(V)) { 1094 // Loading from the pointer doesn't escape it. 1095 continue; 1096 } else if (const auto *SI = dyn_cast<StoreInst>(V)) { 1097 // Storing *to* the pointer is fine, but storing the pointer escapes it. 1098 if (SI->getValueOperand() == U->get()) 1099 return nullptr; 1100 continue; 1101 } else if (isa<ICmpInst>(V)) { 1102 if (NumCmps++) 1103 return nullptr; // Found more than one cmp. 1104 continue; 1105 } else if (const auto *Intrin = dyn_cast<IntrinsicInst>(V)) { 1106 switch (Intrin->getIntrinsicID()) { 1107 // These intrinsics don't escape or compare the pointer. Memset is safe 1108 // because we don't allow ptrtoint. Memcpy and memmove are safe because 1109 // we don't allow stores, so src cannot point to V. 1110 case Intrinsic::lifetime_start: case Intrinsic::lifetime_end: 1111 case Intrinsic::dbg_declare: case Intrinsic::dbg_value: 1112 case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset: 1113 continue; 1114 default: 1115 return nullptr; 1116 } 1117 } else { 1118 return nullptr; 1119 } 1120 for (const Use &U : V->uses()) { 1121 if (Worklist.size() >= MaxIter) 1122 return nullptr; 1123 Worklist.push_back(&U); 1124 } 1125 } 1126 1127 Type *CmpTy = CmpInst::makeCmpResultType(Other->getType()); 1128 return replaceInstUsesWith( 1129 ICI, 1130 ConstantInt::get(CmpTy, !CmpInst::isTrueWhenEqual(ICI.getPredicate()))); 1131 } 1132 1133 /// Fold "icmp pred (X+CI), X". 1134 Instruction *InstCombiner::foldICmpAddOpConst(Instruction &ICI, 1135 Value *X, ConstantInt *CI, 1136 ICmpInst::Predicate Pred) { 1137 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0, 1138 // so the values can never be equal. Similarly for all other "or equals" 1139 // operators. 1140 1141 // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255 1142 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253 1143 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0 1144 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 1145 Value *R = 1146 ConstantExpr::getSub(ConstantInt::getAllOnesValue(CI->getType()), CI); 1147 return new ICmpInst(ICmpInst::ICMP_UGT, X, R); 1148 } 1149 1150 // (X+1) >u X --> X <u (0-1) --> X != 255 1151 // (X+2) >u X --> X <u (0-2) --> X <u 254 1152 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0 1153 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 1154 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantExpr::getNeg(CI)); 1155 1156 unsigned BitWidth = CI->getType()->getPrimitiveSizeInBits(); 1157 ConstantInt *SMax = ConstantInt::get(X->getContext(), 1158 APInt::getSignedMaxValue(BitWidth)); 1159 1160 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127 1161 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125 1162 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0 1163 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1 1164 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126 1165 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127 1166 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 1167 return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantExpr::getSub(SMax, CI)); 1168 1169 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127 1170 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126 1171 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1 1172 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2 1173 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126 1174 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128 1175 1176 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE); 1177 Constant *C = Builder->getInt(CI->getValue()-1); 1178 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C)); 1179 } 1180 1181 /// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" -> 1182 /// (icmp eq/ne A, Log2(AP2/AP1)) -> 1183 /// (icmp eq/ne A, Log2(AP2) - Log2(AP1)). 1184 Instruction *InstCombiner::foldICmpShrConstConst(ICmpInst &I, Value *A, 1185 const APInt &AP1, 1186 const APInt &AP2) { 1187 assert(I.isEquality() && "Cannot fold icmp gt/lt"); 1188 1189 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) { 1190 if (I.getPredicate() == I.ICMP_NE) 1191 Pred = CmpInst::getInversePredicate(Pred); 1192 return new ICmpInst(Pred, LHS, RHS); 1193 }; 1194 1195 // Don't bother doing any work for cases which InstSimplify handles. 1196 if (AP2.isNullValue()) 1197 return nullptr; 1198 1199 bool IsAShr = isa<AShrOperator>(I.getOperand(0)); 1200 if (IsAShr) { 1201 if (AP2.isAllOnesValue()) 1202 return nullptr; 1203 if (AP2.isNegative() != AP1.isNegative()) 1204 return nullptr; 1205 if (AP2.sgt(AP1)) 1206 return nullptr; 1207 } 1208 1209 if (!AP1) 1210 // 'A' must be large enough to shift out the highest set bit. 1211 return getICmp(I.ICMP_UGT, A, 1212 ConstantInt::get(A->getType(), AP2.logBase2())); 1213 1214 if (AP1 == AP2) 1215 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType())); 1216 1217 int Shift; 1218 if (IsAShr && AP1.isNegative()) 1219 Shift = AP1.countLeadingOnes() - AP2.countLeadingOnes(); 1220 else 1221 Shift = AP1.countLeadingZeros() - AP2.countLeadingZeros(); 1222 1223 if (Shift > 0) { 1224 if (IsAShr && AP1 == AP2.ashr(Shift)) { 1225 // There are multiple solutions if we are comparing against -1 and the LHS 1226 // of the ashr is not a power of two. 1227 if (AP1.isAllOnesValue() && !AP2.isPowerOf2()) 1228 return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift)); 1229 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift)); 1230 } else if (AP1 == AP2.lshr(Shift)) { 1231 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift)); 1232 } 1233 } 1234 1235 // Shifting const2 will never be equal to const1. 1236 // FIXME: This should always be handled by InstSimplify? 1237 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE); 1238 return replaceInstUsesWith(I, TorF); 1239 } 1240 1241 /// Handle "(icmp eq/ne (shl AP2, A), AP1)" -> 1242 /// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)). 1243 Instruction *InstCombiner::foldICmpShlConstConst(ICmpInst &I, Value *A, 1244 const APInt &AP1, 1245 const APInt &AP2) { 1246 assert(I.isEquality() && "Cannot fold icmp gt/lt"); 1247 1248 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) { 1249 if (I.getPredicate() == I.ICMP_NE) 1250 Pred = CmpInst::getInversePredicate(Pred); 1251 return new ICmpInst(Pred, LHS, RHS); 1252 }; 1253 1254 // Don't bother doing any work for cases which InstSimplify handles. 1255 if (AP2.isNullValue()) 1256 return nullptr; 1257 1258 unsigned AP2TrailingZeros = AP2.countTrailingZeros(); 1259 1260 if (!AP1 && AP2TrailingZeros != 0) 1261 return getICmp( 1262 I.ICMP_UGE, A, 1263 ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros)); 1264 1265 if (AP1 == AP2) 1266 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType())); 1267 1268 // Get the distance between the lowest bits that are set. 1269 int Shift = AP1.countTrailingZeros() - AP2TrailingZeros; 1270 1271 if (Shift > 0 && AP2.shl(Shift) == AP1) 1272 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift)); 1273 1274 // Shifting const2 will never be equal to const1. 1275 // FIXME: This should always be handled by InstSimplify? 1276 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE); 1277 return replaceInstUsesWith(I, TorF); 1278 } 1279 1280 /// The caller has matched a pattern of the form: 1281 /// I = icmp ugt (add (add A, B), CI2), CI1 1282 /// If this is of the form: 1283 /// sum = a + b 1284 /// if (sum+128 >u 255) 1285 /// Then replace it with llvm.sadd.with.overflow.i8. 1286 /// 1287 static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B, 1288 ConstantInt *CI2, ConstantInt *CI1, 1289 InstCombiner &IC) { 1290 // The transformation we're trying to do here is to transform this into an 1291 // llvm.sadd.with.overflow. To do this, we have to replace the original add 1292 // with a narrower add, and discard the add-with-constant that is part of the 1293 // range check (if we can't eliminate it, this isn't profitable). 1294 1295 // In order to eliminate the add-with-constant, the compare can be its only 1296 // use. 1297 Instruction *AddWithCst = cast<Instruction>(I.getOperand(0)); 1298 if (!AddWithCst->hasOneUse()) 1299 return nullptr; 1300 1301 // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow. 1302 if (!CI2->getValue().isPowerOf2()) 1303 return nullptr; 1304 unsigned NewWidth = CI2->getValue().countTrailingZeros(); 1305 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31) 1306 return nullptr; 1307 1308 // The width of the new add formed is 1 more than the bias. 1309 ++NewWidth; 1310 1311 // Check to see that CI1 is an all-ones value with NewWidth bits. 1312 if (CI1->getBitWidth() == NewWidth || 1313 CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth)) 1314 return nullptr; 1315 1316 // This is only really a signed overflow check if the inputs have been 1317 // sign-extended; check for that condition. For example, if CI2 is 2^31 and 1318 // the operands of the add are 64 bits wide, we need at least 33 sign bits. 1319 unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1; 1320 if (IC.ComputeNumSignBits(A, 0, &I) < NeededSignBits || 1321 IC.ComputeNumSignBits(B, 0, &I) < NeededSignBits) 1322 return nullptr; 1323 1324 // In order to replace the original add with a narrower 1325 // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant 1326 // and truncates that discard the high bits of the add. Verify that this is 1327 // the case. 1328 Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0)); 1329 for (User *U : OrigAdd->users()) { 1330 if (U == AddWithCst) 1331 continue; 1332 1333 // Only accept truncates for now. We would really like a nice recursive 1334 // predicate like SimplifyDemandedBits, but which goes downwards the use-def 1335 // chain to see which bits of a value are actually demanded. If the 1336 // original add had another add which was then immediately truncated, we 1337 // could still do the transformation. 1338 TruncInst *TI = dyn_cast<TruncInst>(U); 1339 if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth) 1340 return nullptr; 1341 } 1342 1343 // If the pattern matches, truncate the inputs to the narrower type and 1344 // use the sadd_with_overflow intrinsic to efficiently compute both the 1345 // result and the overflow bit. 1346 Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth); 1347 Value *F = Intrinsic::getDeclaration(I.getModule(), 1348 Intrinsic::sadd_with_overflow, NewType); 1349 1350 InstCombiner::BuilderTy *Builder = IC.Builder; 1351 1352 // Put the new code above the original add, in case there are any uses of the 1353 // add between the add and the compare. 1354 Builder->SetInsertPoint(OrigAdd); 1355 1356 Value *TruncA = Builder->CreateTrunc(A, NewType, A->getName() + ".trunc"); 1357 Value *TruncB = Builder->CreateTrunc(B, NewType, B->getName() + ".trunc"); 1358 CallInst *Call = Builder->CreateCall(F, {TruncA, TruncB}, "sadd"); 1359 Value *Add = Builder->CreateExtractValue(Call, 0, "sadd.result"); 1360 Value *ZExt = Builder->CreateZExt(Add, OrigAdd->getType()); 1361 1362 // The inner add was the result of the narrow add, zero extended to the 1363 // wider type. Replace it with the result computed by the intrinsic. 1364 IC.replaceInstUsesWith(*OrigAdd, ZExt); 1365 1366 // The original icmp gets replaced with the overflow value. 1367 return ExtractValueInst::Create(Call, 1, "sadd.overflow"); 1368 } 1369 1370 // Fold icmp Pred X, C. 1371 Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &Cmp) { 1372 CmpInst::Predicate Pred = Cmp.getPredicate(); 1373 Value *X = Cmp.getOperand(0); 1374 1375 const APInt *C; 1376 if (!match(Cmp.getOperand(1), m_APInt(C))) 1377 return nullptr; 1378 1379 Value *A = nullptr, *B = nullptr; 1380 1381 // Match the following pattern, which is a common idiom when writing 1382 // overflow-safe integer arithmetic functions. The source performs an addition 1383 // in wider type and explicitly checks for overflow using comparisons against 1384 // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic. 1385 // 1386 // TODO: This could probably be generalized to handle other overflow-safe 1387 // operations if we worked out the formulas to compute the appropriate magic 1388 // constants. 1389 // 1390 // sum = a + b 1391 // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8 1392 { 1393 ConstantInt *CI2; // I = icmp ugt (add (add A, B), CI2), CI 1394 if (Pred == ICmpInst::ICMP_UGT && 1395 match(X, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2)))) 1396 if (Instruction *Res = processUGT_ADDCST_ADD( 1397 Cmp, A, B, CI2, cast<ConstantInt>(Cmp.getOperand(1)), *this)) 1398 return Res; 1399 } 1400 1401 // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0) 1402 if (C->isNullValue() && Pred == ICmpInst::ICMP_SGT) { 1403 SelectPatternResult SPR = matchSelectPattern(X, A, B); 1404 if (SPR.Flavor == SPF_SMIN) { 1405 if (isKnownPositive(A, DL, 0, &AC, &Cmp, &DT)) 1406 return new ICmpInst(Pred, B, Cmp.getOperand(1)); 1407 if (isKnownPositive(B, DL, 0, &AC, &Cmp, &DT)) 1408 return new ICmpInst(Pred, A, Cmp.getOperand(1)); 1409 } 1410 } 1411 1412 // FIXME: Use m_APInt to allow folds for splat constants. 1413 ConstantInt *CI = dyn_cast<ConstantInt>(Cmp.getOperand(1)); 1414 if (!CI) 1415 return nullptr; 1416 1417 // Canonicalize icmp instructions based on dominating conditions. 1418 BasicBlock *Parent = Cmp.getParent(); 1419 BasicBlock *Dom = Parent->getSinglePredecessor(); 1420 auto *BI = Dom ? dyn_cast<BranchInst>(Dom->getTerminator()) : nullptr; 1421 ICmpInst::Predicate Pred2; 1422 BasicBlock *TrueBB, *FalseBB; 1423 ConstantInt *CI2; 1424 if (BI && match(BI, m_Br(m_ICmp(Pred2, m_Specific(X), m_ConstantInt(CI2)), 1425 TrueBB, FalseBB)) && 1426 TrueBB != FalseBB) { 1427 ConstantRange CR = 1428 ConstantRange::makeAllowedICmpRegion(Pred, CI->getValue()); 1429 ConstantRange DominatingCR = 1430 (Parent == TrueBB) 1431 ? ConstantRange::makeExactICmpRegion(Pred2, CI2->getValue()) 1432 : ConstantRange::makeExactICmpRegion( 1433 CmpInst::getInversePredicate(Pred2), CI2->getValue()); 1434 ConstantRange Intersection = DominatingCR.intersectWith(CR); 1435 ConstantRange Difference = DominatingCR.difference(CR); 1436 if (Intersection.isEmptySet()) 1437 return replaceInstUsesWith(Cmp, Builder->getFalse()); 1438 if (Difference.isEmptySet()) 1439 return replaceInstUsesWith(Cmp, Builder->getTrue()); 1440 1441 // If this is a normal comparison, it demands all bits. If it is a sign 1442 // bit comparison, it only demands the sign bit. 1443 bool UnusedBit; 1444 bool IsSignBit = isSignBitCheck(Pred, CI->getValue(), UnusedBit); 1445 1446 // Canonicalizing a sign bit comparison that gets used in a branch, 1447 // pessimizes codegen by generating branch on zero instruction instead 1448 // of a test and branch. So we avoid canonicalizing in such situations 1449 // because test and branch instruction has better branch displacement 1450 // than compare and branch instruction. 1451 if (!isBranchOnSignBitCheck(Cmp, IsSignBit) && !Cmp.isEquality()) { 1452 if (auto *AI = Intersection.getSingleElement()) 1453 return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder->getInt(*AI)); 1454 if (auto *AD = Difference.getSingleElement()) 1455 return new ICmpInst(ICmpInst::ICMP_NE, X, Builder->getInt(*AD)); 1456 } 1457 } 1458 1459 return nullptr; 1460 } 1461 1462 /// Fold icmp (trunc X, Y), C. 1463 Instruction *InstCombiner::foldICmpTruncConstant(ICmpInst &Cmp, 1464 Instruction *Trunc, 1465 const APInt *C) { 1466 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1467 Value *X = Trunc->getOperand(0); 1468 if (C->isOneValue() && C->getBitWidth() > 1) { 1469 // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1 1470 Value *V = nullptr; 1471 if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V)))) 1472 return new ICmpInst(ICmpInst::ICMP_SLT, V, 1473 ConstantInt::get(V->getType(), 1)); 1474 } 1475 1476 if (Cmp.isEquality() && Trunc->hasOneUse()) { 1477 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all 1478 // of the high bits truncated out of x are known. 1479 unsigned DstBits = Trunc->getType()->getScalarSizeInBits(), 1480 SrcBits = X->getType()->getScalarSizeInBits(); 1481 KnownBits Known = computeKnownBits(X, 0, &Cmp); 1482 1483 // If all the high bits are known, we can do this xform. 1484 if ((Known.Zero | Known.One).countLeadingOnes() >= SrcBits - DstBits) { 1485 // Pull in the high bits from known-ones set. 1486 APInt NewRHS = C->zext(SrcBits); 1487 NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits); 1488 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), NewRHS)); 1489 } 1490 } 1491 1492 return nullptr; 1493 } 1494 1495 /// Fold icmp (xor X, Y), C. 1496 Instruction *InstCombiner::foldICmpXorConstant(ICmpInst &Cmp, 1497 BinaryOperator *Xor, 1498 const APInt *C) { 1499 Value *X = Xor->getOperand(0); 1500 Value *Y = Xor->getOperand(1); 1501 const APInt *XorC; 1502 if (!match(Y, m_APInt(XorC))) 1503 return nullptr; 1504 1505 // If this is a comparison that tests the signbit (X < 0) or (x > -1), 1506 // fold the xor. 1507 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1508 if ((Pred == ICmpInst::ICMP_SLT && C->isNullValue()) || 1509 (Pred == ICmpInst::ICMP_SGT && C->isAllOnesValue())) { 1510 1511 // If the sign bit of the XorCst is not set, there is no change to 1512 // the operation, just stop using the Xor. 1513 if (!XorC->isNegative()) { 1514 Cmp.setOperand(0, X); 1515 Worklist.Add(Xor); 1516 return &Cmp; 1517 } 1518 1519 // Was the old condition true if the operand is positive? 1520 bool isTrueIfPositive = Pred == ICmpInst::ICMP_SGT; 1521 1522 // If so, the new one isn't. 1523 isTrueIfPositive ^= true; 1524 1525 Constant *CmpConstant = cast<Constant>(Cmp.getOperand(1)); 1526 if (isTrueIfPositive) 1527 return new ICmpInst(ICmpInst::ICMP_SGT, X, SubOne(CmpConstant)); 1528 else 1529 return new ICmpInst(ICmpInst::ICMP_SLT, X, AddOne(CmpConstant)); 1530 } 1531 1532 if (Xor->hasOneUse()) { 1533 // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask)) 1534 if (!Cmp.isEquality() && XorC->isSignMask()) { 1535 Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate() 1536 : Cmp.getSignedPredicate(); 1537 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), *C ^ *XorC)); 1538 } 1539 1540 // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask)) 1541 if (!Cmp.isEquality() && XorC->isMaxSignedValue()) { 1542 Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate() 1543 : Cmp.getSignedPredicate(); 1544 Pred = Cmp.getSwappedPredicate(Pred); 1545 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), *C ^ *XorC)); 1546 } 1547 } 1548 1549 // (icmp ugt (xor X, C), ~C) -> (icmp ult X, C) 1550 // iff -C is a power of 2 1551 if (Pred == ICmpInst::ICMP_UGT && *XorC == ~(*C) && (*C + 1).isPowerOf2()) 1552 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y); 1553 1554 // (icmp ult (xor X, C), -C) -> (icmp uge X, C) 1555 // iff -C is a power of 2 1556 if (Pred == ICmpInst::ICMP_ULT && *XorC == -(*C) && C->isPowerOf2()) 1557 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y); 1558 1559 return nullptr; 1560 } 1561 1562 /// Fold icmp (and (sh X, Y), C2), C1. 1563 Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, 1564 const APInt *C1, const APInt *C2) { 1565 BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0)); 1566 if (!Shift || !Shift->isShift()) 1567 return nullptr; 1568 1569 // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could 1570 // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in 1571 // code produced by the clang front-end, for bitfield access. 1572 // This seemingly simple opportunity to fold away a shift turns out to be 1573 // rather complicated. See PR17827 for details. 1574 unsigned ShiftOpcode = Shift->getOpcode(); 1575 bool IsShl = ShiftOpcode == Instruction::Shl; 1576 const APInt *C3; 1577 if (match(Shift->getOperand(1), m_APInt(C3))) { 1578 bool CanFold = false; 1579 if (ShiftOpcode == Instruction::AShr) { 1580 // There may be some constraints that make this possible, but nothing 1581 // simple has been discovered yet. 1582 CanFold = false; 1583 } else if (ShiftOpcode == Instruction::Shl) { 1584 // For a left shift, we can fold if the comparison is not signed. We can 1585 // also fold a signed comparison if the mask value and comparison value 1586 // are not negative. These constraints may not be obvious, but we can 1587 // prove that they are correct using an SMT solver. 1588 if (!Cmp.isSigned() || (!C2->isNegative() && !C1->isNegative())) 1589 CanFold = true; 1590 } else if (ShiftOpcode == Instruction::LShr) { 1591 // For a logical right shift, we can fold if the comparison is not signed. 1592 // We can also fold a signed comparison if the shifted mask value and the 1593 // shifted comparison value are not negative. These constraints may not be 1594 // obvious, but we can prove that they are correct using an SMT solver. 1595 if (!Cmp.isSigned() || 1596 (!C2->shl(*C3).isNegative() && !C1->shl(*C3).isNegative())) 1597 CanFold = true; 1598 } 1599 1600 if (CanFold) { 1601 APInt NewCst = IsShl ? C1->lshr(*C3) : C1->shl(*C3); 1602 APInt SameAsC1 = IsShl ? NewCst.shl(*C3) : NewCst.lshr(*C3); 1603 // Check to see if we are shifting out any of the bits being compared. 1604 if (SameAsC1 != *C1) { 1605 // If we shifted bits out, the fold is not going to work out. As a 1606 // special case, check to see if this means that the result is always 1607 // true or false now. 1608 if (Cmp.getPredicate() == ICmpInst::ICMP_EQ) 1609 return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType())); 1610 if (Cmp.getPredicate() == ICmpInst::ICMP_NE) 1611 return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType())); 1612 } else { 1613 Cmp.setOperand(1, ConstantInt::get(And->getType(), NewCst)); 1614 APInt NewAndCst = IsShl ? C2->lshr(*C3) : C2->shl(*C3); 1615 And->setOperand(1, ConstantInt::get(And->getType(), NewAndCst)); 1616 And->setOperand(0, Shift->getOperand(0)); 1617 Worklist.Add(Shift); // Shift is dead. 1618 return &Cmp; 1619 } 1620 } 1621 } 1622 1623 // Turn ((X >> Y) & C2) == 0 into (X & (C2 << Y)) == 0. The latter is 1624 // preferable because it allows the C2 << Y expression to be hoisted out of a 1625 // loop if Y is invariant and X is not. 1626 if (Shift->hasOneUse() && C1->isNullValue() && Cmp.isEquality() && 1627 !Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) { 1628 // Compute C2 << Y. 1629 Value *NewShift = 1630 IsShl ? Builder->CreateLShr(And->getOperand(1), Shift->getOperand(1)) 1631 : Builder->CreateShl(And->getOperand(1), Shift->getOperand(1)); 1632 1633 // Compute X & (C2 << Y). 1634 Value *NewAnd = Builder->CreateAnd(Shift->getOperand(0), NewShift); 1635 Cmp.setOperand(0, NewAnd); 1636 return &Cmp; 1637 } 1638 1639 return nullptr; 1640 } 1641 1642 /// Fold icmp (and X, C2), C1. 1643 Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp, 1644 BinaryOperator *And, 1645 const APInt *C1) { 1646 const APInt *C2; 1647 if (!match(And->getOperand(1), m_APInt(C2))) 1648 return nullptr; 1649 1650 if (!And->hasOneUse() || !And->getOperand(0)->hasOneUse()) 1651 return nullptr; 1652 1653 // If the LHS is an 'and' of a truncate and we can widen the and/compare to 1654 // the input width without changing the value produced, eliminate the cast: 1655 // 1656 // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1' 1657 // 1658 // We can do this transformation if the constants do not have their sign bits 1659 // set or if it is an equality comparison. Extending a relational comparison 1660 // when we're checking the sign bit would not work. 1661 Value *W; 1662 if (match(And->getOperand(0), m_Trunc(m_Value(W))) && 1663 (Cmp.isEquality() || (!C1->isNegative() && !C2->isNegative()))) { 1664 // TODO: Is this a good transform for vectors? Wider types may reduce 1665 // throughput. Should this transform be limited (even for scalars) by using 1666 // shouldChangeType()? 1667 if (!Cmp.getType()->isVectorTy()) { 1668 Type *WideType = W->getType(); 1669 unsigned WideScalarBits = WideType->getScalarSizeInBits(); 1670 Constant *ZextC1 = ConstantInt::get(WideType, C1->zext(WideScalarBits)); 1671 Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits)); 1672 Value *NewAnd = Builder->CreateAnd(W, ZextC2, And->getName()); 1673 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1); 1674 } 1675 } 1676 1677 if (Instruction *I = foldICmpAndShift(Cmp, And, C1, C2)) 1678 return I; 1679 1680 // (icmp pred (and (or (lshr A, B), A), 1), 0) --> 1681 // (icmp pred (and A, (or (shl 1, B), 1), 0)) 1682 // 1683 // iff pred isn't signed 1684 if (!Cmp.isSigned() && C1->isNullValue() && 1685 match(And->getOperand(1), m_One())) { 1686 Constant *One = cast<Constant>(And->getOperand(1)); 1687 Value *Or = And->getOperand(0); 1688 Value *A, *B, *LShr; 1689 if (match(Or, m_Or(m_Value(LShr), m_Value(A))) && 1690 match(LShr, m_LShr(m_Specific(A), m_Value(B)))) { 1691 unsigned UsesRemoved = 0; 1692 if (And->hasOneUse()) 1693 ++UsesRemoved; 1694 if (Or->hasOneUse()) 1695 ++UsesRemoved; 1696 if (LShr->hasOneUse()) 1697 ++UsesRemoved; 1698 1699 // Compute A & ((1 << B) | 1) 1700 Value *NewOr = nullptr; 1701 if (auto *C = dyn_cast<Constant>(B)) { 1702 if (UsesRemoved >= 1) 1703 NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One); 1704 } else { 1705 if (UsesRemoved >= 3) 1706 NewOr = Builder->CreateOr(Builder->CreateShl(One, B, LShr->getName(), 1707 /*HasNUW=*/true), 1708 One, Or->getName()); 1709 } 1710 if (NewOr) { 1711 Value *NewAnd = Builder->CreateAnd(A, NewOr, And->getName()); 1712 Cmp.setOperand(0, NewAnd); 1713 return &Cmp; 1714 } 1715 } 1716 } 1717 1718 // (X & C2) > C1 --> (X & C2) != 0, if any bit set in (X & C2) will produce a 1719 // result greater than C1. 1720 unsigned NumTZ = C2->countTrailingZeros(); 1721 if (Cmp.getPredicate() == ICmpInst::ICMP_UGT && NumTZ < C2->getBitWidth() && 1722 APInt::getOneBitSet(C2->getBitWidth(), NumTZ).ugt(*C1)) { 1723 Constant *Zero = Constant::getNullValue(And->getType()); 1724 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 1725 } 1726 1727 return nullptr; 1728 } 1729 1730 /// Fold icmp (and X, Y), C. 1731 Instruction *InstCombiner::foldICmpAndConstant(ICmpInst &Cmp, 1732 BinaryOperator *And, 1733 const APInt *C) { 1734 if (Instruction *I = foldICmpAndConstConst(Cmp, And, C)) 1735 return I; 1736 1737 // TODO: These all require that Y is constant too, so refactor with the above. 1738 1739 // Try to optimize things like "A[i] & 42 == 0" to index computations. 1740 Value *X = And->getOperand(0); 1741 Value *Y = And->getOperand(1); 1742 if (auto *LI = dyn_cast<LoadInst>(X)) 1743 if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0))) 1744 if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 1745 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 1746 !LI->isVolatile() && isa<ConstantInt>(Y)) { 1747 ConstantInt *C2 = cast<ConstantInt>(Y); 1748 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, Cmp, C2)) 1749 return Res; 1750 } 1751 1752 if (!Cmp.isEquality()) 1753 return nullptr; 1754 1755 // X & -C == -C -> X > u ~C 1756 // X & -C != -C -> X <= u ~C 1757 // iff C is a power of 2 1758 if (Cmp.getOperand(1) == Y && (-(*C)).isPowerOf2()) { 1759 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGT 1760 : CmpInst::ICMP_ULE; 1761 return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1)))); 1762 } 1763 1764 // (X & C2) == 0 -> (trunc X) >= 0 1765 // (X & C2) != 0 -> (trunc X) < 0 1766 // iff C2 is a power of 2 and it masks the sign bit of a legal integer type. 1767 const APInt *C2; 1768 if (And->hasOneUse() && C->isNullValue() && match(Y, m_APInt(C2))) { 1769 int32_t ExactLogBase2 = C2->exactLogBase2(); 1770 if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) { 1771 Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1); 1772 if (And->getType()->isVectorTy()) 1773 NTy = VectorType::get(NTy, And->getType()->getVectorNumElements()); 1774 Value *Trunc = Builder->CreateTrunc(X, NTy); 1775 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE 1776 : CmpInst::ICMP_SLT; 1777 return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy)); 1778 } 1779 } 1780 1781 return nullptr; 1782 } 1783 1784 /// Fold icmp (or X, Y), C. 1785 Instruction *InstCombiner::foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, 1786 const APInt *C) { 1787 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1788 if (C->isOneValue()) { 1789 // icmp slt signum(V) 1 --> icmp slt V, 1 1790 Value *V = nullptr; 1791 if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V)))) 1792 return new ICmpInst(ICmpInst::ICMP_SLT, V, 1793 ConstantInt::get(V->getType(), 1)); 1794 } 1795 1796 // X | C == C --> X <=u C 1797 // X | C != C --> X >u C 1798 // iff C+1 is a power of 2 (C is a bitmask of the low bits) 1799 if (Cmp.isEquality() && Cmp.getOperand(1) == Or->getOperand(1) && 1800 (*C + 1).isPowerOf2()) { 1801 Pred = (Pred == CmpInst::ICMP_EQ) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT; 1802 return new ICmpInst(Pred, Or->getOperand(0), Or->getOperand(1)); 1803 } 1804 1805 if (!Cmp.isEquality() || !C->isNullValue() || !Or->hasOneUse()) 1806 return nullptr; 1807 1808 Value *P, *Q; 1809 if (match(Or, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) { 1810 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0 1811 // -> and (icmp eq P, null), (icmp eq Q, null). 1812 Value *CmpP = 1813 Builder->CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType())); 1814 Value *CmpQ = 1815 Builder->CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType())); 1816 auto LogicOpc = Pred == ICmpInst::Predicate::ICMP_EQ ? Instruction::And 1817 : Instruction::Or; 1818 return BinaryOperator::Create(LogicOpc, CmpP, CmpQ); 1819 } 1820 1821 return nullptr; 1822 } 1823 1824 /// Fold icmp (mul X, Y), C. 1825 Instruction *InstCombiner::foldICmpMulConstant(ICmpInst &Cmp, 1826 BinaryOperator *Mul, 1827 const APInt *C) { 1828 const APInt *MulC; 1829 if (!match(Mul->getOperand(1), m_APInt(MulC))) 1830 return nullptr; 1831 1832 // If this is a test of the sign bit and the multiply is sign-preserving with 1833 // a constant operand, use the multiply LHS operand instead. 1834 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1835 if (isSignTest(Pred, *C) && Mul->hasNoSignedWrap()) { 1836 if (MulC->isNegative()) 1837 Pred = ICmpInst::getSwappedPredicate(Pred); 1838 return new ICmpInst(Pred, Mul->getOperand(0), 1839 Constant::getNullValue(Mul->getType())); 1840 } 1841 1842 return nullptr; 1843 } 1844 1845 /// Fold icmp (shl 1, Y), C. 1846 static Instruction *foldICmpShlOne(ICmpInst &Cmp, Instruction *Shl, 1847 const APInt *C) { 1848 Value *Y; 1849 if (!match(Shl, m_Shl(m_One(), m_Value(Y)))) 1850 return nullptr; 1851 1852 Type *ShiftType = Shl->getType(); 1853 uint32_t TypeBits = C->getBitWidth(); 1854 bool CIsPowerOf2 = C->isPowerOf2(); 1855 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1856 if (Cmp.isUnsigned()) { 1857 // (1 << Y) pred C -> Y pred Log2(C) 1858 if (!CIsPowerOf2) { 1859 // (1 << Y) < 30 -> Y <= 4 1860 // (1 << Y) <= 30 -> Y <= 4 1861 // (1 << Y) >= 30 -> Y > 4 1862 // (1 << Y) > 30 -> Y > 4 1863 if (Pred == ICmpInst::ICMP_ULT) 1864 Pred = ICmpInst::ICMP_ULE; 1865 else if (Pred == ICmpInst::ICMP_UGE) 1866 Pred = ICmpInst::ICMP_UGT; 1867 } 1868 1869 // (1 << Y) >= 2147483648 -> Y >= 31 -> Y == 31 1870 // (1 << Y) < 2147483648 -> Y < 31 -> Y != 31 1871 unsigned CLog2 = C->logBase2(); 1872 if (CLog2 == TypeBits - 1) { 1873 if (Pred == ICmpInst::ICMP_UGE) 1874 Pred = ICmpInst::ICMP_EQ; 1875 else if (Pred == ICmpInst::ICMP_ULT) 1876 Pred = ICmpInst::ICMP_NE; 1877 } 1878 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2)); 1879 } else if (Cmp.isSigned()) { 1880 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1); 1881 if (C->isAllOnesValue()) { 1882 // (1 << Y) <= -1 -> Y == 31 1883 if (Pred == ICmpInst::ICMP_SLE) 1884 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne); 1885 1886 // (1 << Y) > -1 -> Y != 31 1887 if (Pred == ICmpInst::ICMP_SGT) 1888 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne); 1889 } else if (!(*C)) { 1890 // (1 << Y) < 0 -> Y == 31 1891 // (1 << Y) <= 0 -> Y == 31 1892 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 1893 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne); 1894 1895 // (1 << Y) >= 0 -> Y != 31 1896 // (1 << Y) > 0 -> Y != 31 1897 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) 1898 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne); 1899 } 1900 } else if (Cmp.isEquality() && CIsPowerOf2) { 1901 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, C->logBase2())); 1902 } 1903 1904 return nullptr; 1905 } 1906 1907 /// Fold icmp (shl X, Y), C. 1908 Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp, 1909 BinaryOperator *Shl, 1910 const APInt *C) { 1911 const APInt *ShiftVal; 1912 if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal))) 1913 return foldICmpShlConstConst(Cmp, Shl->getOperand(1), *C, *ShiftVal); 1914 1915 const APInt *ShiftAmt; 1916 if (!match(Shl->getOperand(1), m_APInt(ShiftAmt))) 1917 return foldICmpShlOne(Cmp, Shl, C); 1918 1919 // Check that the shift amount is in range. If not, don't perform undefined 1920 // shifts. When the shift is visited, it will be simplified. 1921 unsigned TypeBits = C->getBitWidth(); 1922 if (ShiftAmt->uge(TypeBits)) 1923 return nullptr; 1924 1925 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1926 Value *X = Shl->getOperand(0); 1927 Type *ShType = Shl->getType(); 1928 1929 // NSW guarantees that we are only shifting out sign bits from the high bits, 1930 // so we can ASHR the compare constant without needing a mask and eliminate 1931 // the shift. 1932 if (Shl->hasNoSignedWrap()) { 1933 if (Pred == ICmpInst::ICMP_SGT) { 1934 // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt) 1935 APInt ShiftedC = C->ashr(*ShiftAmt); 1936 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1937 } 1938 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) { 1939 // This is the same code as the SGT case, but assert the pre-condition 1940 // that is needed for this to work with equality predicates. 1941 assert(C->ashr(*ShiftAmt).shl(*ShiftAmt) == *C && 1942 "Compare known true or false was not folded"); 1943 APInt ShiftedC = C->ashr(*ShiftAmt); 1944 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1945 } 1946 if (Pred == ICmpInst::ICMP_SLT) { 1947 // SLE is the same as above, but SLE is canonicalized to SLT, so convert: 1948 // (X << S) <=s C is equiv to X <=s (C >> S) for all C 1949 // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX 1950 // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN 1951 assert(!C->isMinSignedValue() && "Unexpected icmp slt"); 1952 APInt ShiftedC = (*C - 1).ashr(*ShiftAmt) + 1; 1953 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1954 } 1955 // If this is a signed comparison to 0 and the shift is sign preserving, 1956 // use the shift LHS operand instead; isSignTest may change 'Pred', so only 1957 // do that if we're sure to not continue on in this function. 1958 if (isSignTest(Pred, *C)) 1959 return new ICmpInst(Pred, X, Constant::getNullValue(ShType)); 1960 } 1961 1962 // NUW guarantees that we are only shifting out zero bits from the high bits, 1963 // so we can LSHR the compare constant without needing a mask and eliminate 1964 // the shift. 1965 if (Shl->hasNoUnsignedWrap()) { 1966 if (Pred == ICmpInst::ICMP_UGT) { 1967 // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt) 1968 APInt ShiftedC = C->lshr(*ShiftAmt); 1969 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1970 } 1971 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) { 1972 // This is the same code as the UGT case, but assert the pre-condition 1973 // that is needed for this to work with equality predicates. 1974 assert(C->lshr(*ShiftAmt).shl(*ShiftAmt) == *C && 1975 "Compare known true or false was not folded"); 1976 APInt ShiftedC = C->lshr(*ShiftAmt); 1977 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1978 } 1979 if (Pred == ICmpInst::ICMP_ULT) { 1980 // ULE is the same as above, but ULE is canonicalized to ULT, so convert: 1981 // (X << S) <=u C is equiv to X <=u (C >> S) for all C 1982 // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u 1983 // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0 1984 assert(C->ugt(0) && "ult 0 should have been eliminated"); 1985 APInt ShiftedC = (*C - 1).lshr(*ShiftAmt) + 1; 1986 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1987 } 1988 } 1989 1990 if (Cmp.isEquality() && Shl->hasOneUse()) { 1991 // Strength-reduce the shift into an 'and'. 1992 Constant *Mask = ConstantInt::get( 1993 ShType, 1994 APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue())); 1995 Value *And = Builder->CreateAnd(X, Mask, Shl->getName() + ".mask"); 1996 Constant *LShrC = ConstantInt::get(ShType, C->lshr(*ShiftAmt)); 1997 return new ICmpInst(Pred, And, LShrC); 1998 } 1999 2000 // Otherwise, if this is a comparison of the sign bit, simplify to and/test. 2001 bool TrueIfSigned = false; 2002 if (Shl->hasOneUse() && isSignBitCheck(Pred, *C, TrueIfSigned)) { 2003 // (X << 31) <s 0 --> (X & 1) != 0 2004 Constant *Mask = ConstantInt::get( 2005 ShType, 2006 APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1)); 2007 Value *And = Builder->CreateAnd(X, Mask, Shl->getName() + ".mask"); 2008 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ, 2009 And, Constant::getNullValue(ShType)); 2010 } 2011 2012 // Transform (icmp pred iM (shl iM %v, N), C) 2013 // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N)) 2014 // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N. 2015 // This enables us to get rid of the shift in favor of a trunc that may be 2016 // free on the target. It has the additional benefit of comparing to a 2017 // smaller constant that may be more target-friendly. 2018 unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1); 2019 if (Shl->hasOneUse() && Amt != 0 && C->countTrailingZeros() >= Amt && 2020 DL.isLegalInteger(TypeBits - Amt)) { 2021 Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt); 2022 if (ShType->isVectorTy()) 2023 TruncTy = VectorType::get(TruncTy, ShType->getVectorNumElements()); 2024 Constant *NewC = 2025 ConstantInt::get(TruncTy, C->ashr(*ShiftAmt).trunc(TypeBits - Amt)); 2026 return new ICmpInst(Pred, Builder->CreateTrunc(X, TruncTy), NewC); 2027 } 2028 2029 return nullptr; 2030 } 2031 2032 /// Fold icmp ({al}shr X, Y), C. 2033 Instruction *InstCombiner::foldICmpShrConstant(ICmpInst &Cmp, 2034 BinaryOperator *Shr, 2035 const APInt *C) { 2036 // An exact shr only shifts out zero bits, so: 2037 // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0 2038 Value *X = Shr->getOperand(0); 2039 CmpInst::Predicate Pred = Cmp.getPredicate(); 2040 if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() && 2041 C->isNullValue()) 2042 return new ICmpInst(Pred, X, Cmp.getOperand(1)); 2043 2044 const APInt *ShiftVal; 2045 if (Cmp.isEquality() && match(Shr->getOperand(0), m_APInt(ShiftVal))) 2046 return foldICmpShrConstConst(Cmp, Shr->getOperand(1), *C, *ShiftVal); 2047 2048 const APInt *ShiftAmt; 2049 if (!match(Shr->getOperand(1), m_APInt(ShiftAmt))) 2050 return nullptr; 2051 2052 // Check that the shift amount is in range. If not, don't perform undefined 2053 // shifts. When the shift is visited it will be simplified. 2054 unsigned TypeBits = C->getBitWidth(); 2055 unsigned ShAmtVal = ShiftAmt->getLimitedValue(TypeBits); 2056 if (ShAmtVal >= TypeBits || ShAmtVal == 0) 2057 return nullptr; 2058 2059 bool IsAShr = Shr->getOpcode() == Instruction::AShr; 2060 if (!Cmp.isEquality()) { 2061 // If we have an unsigned comparison and an ashr, we can't simplify this. 2062 // Similarly for signed comparisons with lshr. 2063 if (Cmp.isSigned() != IsAShr) 2064 return nullptr; 2065 2066 // Otherwise, all lshr and most exact ashr's are equivalent to a udiv/sdiv 2067 // by a power of 2. Since we already have logic to simplify these, 2068 // transform to div and then simplify the resultant comparison. 2069 if (IsAShr && (!Shr->isExact() || ShAmtVal == TypeBits - 1)) 2070 return nullptr; 2071 2072 // Revisit the shift (to delete it). 2073 Worklist.Add(Shr); 2074 2075 Constant *DivCst = ConstantInt::get( 2076 Shr->getType(), APInt::getOneBitSet(TypeBits, ShAmtVal)); 2077 2078 Value *Tmp = IsAShr ? Builder->CreateSDiv(X, DivCst, "", Shr->isExact()) 2079 : Builder->CreateUDiv(X, DivCst, "", Shr->isExact()); 2080 2081 Cmp.setOperand(0, Tmp); 2082 2083 // If the builder folded the binop, just return it. 2084 BinaryOperator *TheDiv = dyn_cast<BinaryOperator>(Tmp); 2085 if (!TheDiv) 2086 return &Cmp; 2087 2088 // Otherwise, fold this div/compare. 2089 assert(TheDiv->getOpcode() == Instruction::SDiv || 2090 TheDiv->getOpcode() == Instruction::UDiv); 2091 2092 Instruction *Res = foldICmpDivConstant(Cmp, TheDiv, C); 2093 assert(Res && "This div/cst should have folded!"); 2094 return Res; 2095 } 2096 2097 // Handle equality comparisons of shift-by-constant. 2098 2099 // If the comparison constant changes with the shift, the comparison cannot 2100 // succeed (bits of the comparison constant cannot match the shifted value). 2101 // This should be known by InstSimplify and already be folded to true/false. 2102 assert(((IsAShr && C->shl(ShAmtVal).ashr(ShAmtVal) == *C) || 2103 (!IsAShr && C->shl(ShAmtVal).lshr(ShAmtVal) == *C)) && 2104 "Expected icmp+shr simplify did not occur."); 2105 2106 // Check if the bits shifted out are known to be zero. If so, we can compare 2107 // against the unshifted value: 2108 // (X & 4) >> 1 == 2 --> (X & 4) == 4. 2109 Constant *ShiftedCmpRHS = ConstantInt::get(Shr->getType(), *C << ShAmtVal); 2110 if (Shr->hasOneUse()) { 2111 if (Shr->isExact()) 2112 return new ICmpInst(Pred, X, ShiftedCmpRHS); 2113 2114 // Otherwise strength reduce the shift into an 'and'. 2115 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal)); 2116 Constant *Mask = ConstantInt::get(Shr->getType(), Val); 2117 Value *And = Builder->CreateAnd(X, Mask, Shr->getName() + ".mask"); 2118 return new ICmpInst(Pred, And, ShiftedCmpRHS); 2119 } 2120 2121 return nullptr; 2122 } 2123 2124 /// Fold icmp (udiv X, Y), C. 2125 Instruction *InstCombiner::foldICmpUDivConstant(ICmpInst &Cmp, 2126 BinaryOperator *UDiv, 2127 const APInt *C) { 2128 const APInt *C2; 2129 if (!match(UDiv->getOperand(0), m_APInt(C2))) 2130 return nullptr; 2131 2132 assert(*C2 != 0 && "udiv 0, X should have been simplified already."); 2133 2134 // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1)) 2135 Value *Y = UDiv->getOperand(1); 2136 if (Cmp.getPredicate() == ICmpInst::ICMP_UGT) { 2137 assert(!C->isMaxValue() && 2138 "icmp ugt X, UINT_MAX should have been simplified already."); 2139 return new ICmpInst(ICmpInst::ICMP_ULE, Y, 2140 ConstantInt::get(Y->getType(), C2->udiv(*C + 1))); 2141 } 2142 2143 // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C) 2144 if (Cmp.getPredicate() == ICmpInst::ICMP_ULT) { 2145 assert(*C != 0 && "icmp ult X, 0 should have been simplified already."); 2146 return new ICmpInst(ICmpInst::ICMP_UGT, Y, 2147 ConstantInt::get(Y->getType(), C2->udiv(*C))); 2148 } 2149 2150 return nullptr; 2151 } 2152 2153 /// Fold icmp ({su}div X, Y), C. 2154 Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp, 2155 BinaryOperator *Div, 2156 const APInt *C) { 2157 // Fold: icmp pred ([us]div X, C2), C -> range test 2158 // Fold this div into the comparison, producing a range check. 2159 // Determine, based on the divide type, what the range is being 2160 // checked. If there is an overflow on the low or high side, remember 2161 // it, otherwise compute the range [low, hi) bounding the new value. 2162 // See: InsertRangeTest above for the kinds of replacements possible. 2163 const APInt *C2; 2164 if (!match(Div->getOperand(1), m_APInt(C2))) 2165 return nullptr; 2166 2167 // FIXME: If the operand types don't match the type of the divide 2168 // then don't attempt this transform. The code below doesn't have the 2169 // logic to deal with a signed divide and an unsigned compare (and 2170 // vice versa). This is because (x /s C2) <s C produces different 2171 // results than (x /s C2) <u C or (x /u C2) <s C or even 2172 // (x /u C2) <u C. Simply casting the operands and result won't 2173 // work. :( The if statement below tests that condition and bails 2174 // if it finds it. 2175 bool DivIsSigned = Div->getOpcode() == Instruction::SDiv; 2176 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned()) 2177 return nullptr; 2178 2179 // The ProdOV computation fails on divide by 0 and divide by -1. Cases with 2180 // INT_MIN will also fail if the divisor is 1. Although folds of all these 2181 // division-by-constant cases should be present, we can not assert that they 2182 // have happened before we reach this icmp instruction. 2183 if (C2->isNullValue() || C2->isOneValue() || 2184 (DivIsSigned && C2->isAllOnesValue())) 2185 return nullptr; 2186 2187 // TODO: We could do all of the computations below using APInt. 2188 Constant *CmpRHS = cast<Constant>(Cmp.getOperand(1)); 2189 Constant *DivRHS = cast<Constant>(Div->getOperand(1)); 2190 2191 // Compute Prod = CmpRHS * DivRHS. We are essentially solving an equation of 2192 // form X / C2 = C. We solve for X by multiplying C2 (DivRHS) and C (CmpRHS). 2193 // By solving for X, we can turn this into a range check instead of computing 2194 // a divide. 2195 Constant *Prod = ConstantExpr::getMul(CmpRHS, DivRHS); 2196 2197 // Determine if the product overflows by seeing if the product is not equal to 2198 // the divide. Make sure we do the same kind of divide as in the LHS 2199 // instruction that we're folding. 2200 bool ProdOV = (DivIsSigned ? ConstantExpr::getSDiv(Prod, DivRHS) 2201 : ConstantExpr::getUDiv(Prod, DivRHS)) != CmpRHS; 2202 2203 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2204 2205 // If the division is known to be exact, then there is no remainder from the 2206 // divide, so the covered range size is unit, otherwise it is the divisor. 2207 Constant *RangeSize = 2208 Div->isExact() ? ConstantInt::get(Div->getType(), 1) : DivRHS; 2209 2210 // Figure out the interval that is being checked. For example, a comparison 2211 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5). 2212 // Compute this interval based on the constants involved and the signedness of 2213 // the compare/divide. This computes a half-open interval, keeping track of 2214 // whether either value in the interval overflows. After analysis each 2215 // overflow variable is set to 0 if it's corresponding bound variable is valid 2216 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end. 2217 int LoOverflow = 0, HiOverflow = 0; 2218 Constant *LoBound = nullptr, *HiBound = nullptr; 2219 2220 if (!DivIsSigned) { // udiv 2221 // e.g. X/5 op 3 --> [15, 20) 2222 LoBound = Prod; 2223 HiOverflow = LoOverflow = ProdOV; 2224 if (!HiOverflow) { 2225 // If this is not an exact divide, then many values in the range collapse 2226 // to the same result value. 2227 HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false); 2228 } 2229 } else if (C2->isStrictlyPositive()) { // Divisor is > 0. 2230 if (C->isNullValue()) { // (X / pos) op 0 2231 // Can't overflow. e.g. X/2 op 0 --> [-1, 2) 2232 LoBound = ConstantExpr::getNeg(SubOne(RangeSize)); 2233 HiBound = RangeSize; 2234 } else if (C->isStrictlyPositive()) { // (X / pos) op pos 2235 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20) 2236 HiOverflow = LoOverflow = ProdOV; 2237 if (!HiOverflow) 2238 HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true); 2239 } else { // (X / pos) op neg 2240 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14) 2241 HiBound = AddOne(Prod); 2242 LoOverflow = HiOverflow = ProdOV ? -1 : 0; 2243 if (!LoOverflow) { 2244 Constant *DivNeg = ConstantExpr::getNeg(RangeSize); 2245 LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0; 2246 } 2247 } 2248 } else if (C2->isNegative()) { // Divisor is < 0. 2249 if (Div->isExact()) 2250 RangeSize = ConstantExpr::getNeg(RangeSize); 2251 if (C->isNullValue()) { // (X / neg) op 0 2252 // e.g. X/-5 op 0 --> [-4, 5) 2253 LoBound = AddOne(RangeSize); 2254 HiBound = ConstantExpr::getNeg(RangeSize); 2255 if (HiBound == DivRHS) { // -INTMIN = INTMIN 2256 HiOverflow = 1; // [INTMIN+1, overflow) 2257 HiBound = nullptr; // e.g. X/INTMIN = 0 --> X > INTMIN 2258 } 2259 } else if (C->isStrictlyPositive()) { // (X / neg) op pos 2260 // e.g. X/-5 op 3 --> [-19, -14) 2261 HiBound = AddOne(Prod); 2262 HiOverflow = LoOverflow = ProdOV ? -1 : 0; 2263 if (!LoOverflow) 2264 LoOverflow = addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0; 2265 } else { // (X / neg) op neg 2266 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20) 2267 LoOverflow = HiOverflow = ProdOV; 2268 if (!HiOverflow) 2269 HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true); 2270 } 2271 2272 // Dividing by a negative swaps the condition. LT <-> GT 2273 Pred = ICmpInst::getSwappedPredicate(Pred); 2274 } 2275 2276 Value *X = Div->getOperand(0); 2277 switch (Pred) { 2278 default: llvm_unreachable("Unhandled icmp opcode!"); 2279 case ICmpInst::ICMP_EQ: 2280 if (LoOverflow && HiOverflow) 2281 return replaceInstUsesWith(Cmp, Builder->getFalse()); 2282 if (HiOverflow) 2283 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : 2284 ICmpInst::ICMP_UGE, X, LoBound); 2285 if (LoOverflow) 2286 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : 2287 ICmpInst::ICMP_ULT, X, HiBound); 2288 return replaceInstUsesWith( 2289 Cmp, insertRangeTest(X, LoBound->getUniqueInteger(), 2290 HiBound->getUniqueInteger(), DivIsSigned, true)); 2291 case ICmpInst::ICMP_NE: 2292 if (LoOverflow && HiOverflow) 2293 return replaceInstUsesWith(Cmp, Builder->getTrue()); 2294 if (HiOverflow) 2295 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : 2296 ICmpInst::ICMP_ULT, X, LoBound); 2297 if (LoOverflow) 2298 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : 2299 ICmpInst::ICMP_UGE, X, HiBound); 2300 return replaceInstUsesWith(Cmp, 2301 insertRangeTest(X, LoBound->getUniqueInteger(), 2302 HiBound->getUniqueInteger(), 2303 DivIsSigned, false)); 2304 case ICmpInst::ICMP_ULT: 2305 case ICmpInst::ICMP_SLT: 2306 if (LoOverflow == +1) // Low bound is greater than input range. 2307 return replaceInstUsesWith(Cmp, Builder->getTrue()); 2308 if (LoOverflow == -1) // Low bound is less than input range. 2309 return replaceInstUsesWith(Cmp, Builder->getFalse()); 2310 return new ICmpInst(Pred, X, LoBound); 2311 case ICmpInst::ICMP_UGT: 2312 case ICmpInst::ICMP_SGT: 2313 if (HiOverflow == +1) // High bound greater than input range. 2314 return replaceInstUsesWith(Cmp, Builder->getFalse()); 2315 if (HiOverflow == -1) // High bound less than input range. 2316 return replaceInstUsesWith(Cmp, Builder->getTrue()); 2317 if (Pred == ICmpInst::ICMP_UGT) 2318 return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound); 2319 return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound); 2320 } 2321 2322 return nullptr; 2323 } 2324 2325 /// Fold icmp (sub X, Y), C. 2326 Instruction *InstCombiner::foldICmpSubConstant(ICmpInst &Cmp, 2327 BinaryOperator *Sub, 2328 const APInt *C) { 2329 Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1); 2330 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2331 2332 // The following transforms are only worth it if the only user of the subtract 2333 // is the icmp. 2334 if (!Sub->hasOneUse()) 2335 return nullptr; 2336 2337 if (Sub->hasNoSignedWrap()) { 2338 // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y) 2339 if (Pred == ICmpInst::ICMP_SGT && C->isAllOnesValue()) 2340 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y); 2341 2342 // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y) 2343 if (Pred == ICmpInst::ICMP_SGT && C->isNullValue()) 2344 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y); 2345 2346 // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y) 2347 if (Pred == ICmpInst::ICMP_SLT && C->isNullValue()) 2348 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y); 2349 2350 // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y) 2351 if (Pred == ICmpInst::ICMP_SLT && C->isOneValue()) 2352 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y); 2353 } 2354 2355 const APInt *C2; 2356 if (!match(X, m_APInt(C2))) 2357 return nullptr; 2358 2359 // C2 - Y <u C -> (Y | (C - 1)) == C2 2360 // iff (C2 & (C - 1)) == C - 1 and C is a power of 2 2361 if (Pred == ICmpInst::ICMP_ULT && C->isPowerOf2() && 2362 (*C2 & (*C - 1)) == (*C - 1)) 2363 return new ICmpInst(ICmpInst::ICMP_EQ, Builder->CreateOr(Y, *C - 1), X); 2364 2365 // C2 - Y >u C -> (Y | C) != C2 2366 // iff C2 & C == C and C + 1 is a power of 2 2367 if (Pred == ICmpInst::ICMP_UGT && (*C + 1).isPowerOf2() && (*C2 & *C) == *C) 2368 return new ICmpInst(ICmpInst::ICMP_NE, Builder->CreateOr(Y, *C), X); 2369 2370 return nullptr; 2371 } 2372 2373 /// Fold icmp (add X, Y), C. 2374 Instruction *InstCombiner::foldICmpAddConstant(ICmpInst &Cmp, 2375 BinaryOperator *Add, 2376 const APInt *C) { 2377 Value *Y = Add->getOperand(1); 2378 const APInt *C2; 2379 if (Cmp.isEquality() || !match(Y, m_APInt(C2))) 2380 return nullptr; 2381 2382 // Fold icmp pred (add X, C2), C. 2383 Value *X = Add->getOperand(0); 2384 Type *Ty = Add->getType(); 2385 CmpInst::Predicate Pred = Cmp.getPredicate(); 2386 2387 // If the add does not wrap, we can always adjust the compare by subtracting 2388 // the constants. Equality comparisons are handled elsewhere. SGE/SLE are 2389 // canonicalized to SGT/SLT. 2390 if (Add->hasNoSignedWrap() && 2391 (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) { 2392 bool Overflow; 2393 APInt NewC = C->ssub_ov(*C2, Overflow); 2394 // If there is overflow, the result must be true or false. 2395 // TODO: Can we assert there is no overflow because InstSimplify always 2396 // handles those cases? 2397 if (!Overflow) 2398 // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2) 2399 return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC)); 2400 } 2401 2402 auto CR = ConstantRange::makeExactICmpRegion(Pred, *C).subtract(*C2); 2403 const APInt &Upper = CR.getUpper(); 2404 const APInt &Lower = CR.getLower(); 2405 if (Cmp.isSigned()) { 2406 if (Lower.isSignMask()) 2407 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper)); 2408 if (Upper.isSignMask()) 2409 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower)); 2410 } else { 2411 if (Lower.isMinValue()) 2412 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper)); 2413 if (Upper.isMinValue()) 2414 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower)); 2415 } 2416 2417 if (!Add->hasOneUse()) 2418 return nullptr; 2419 2420 // X+C <u C2 -> (X & -C2) == C 2421 // iff C & (C2-1) == 0 2422 // C2 is a power of 2 2423 if (Pred == ICmpInst::ICMP_ULT && C->isPowerOf2() && (*C2 & (*C - 1)) == 0) 2424 return new ICmpInst(ICmpInst::ICMP_EQ, Builder->CreateAnd(X, -(*C)), 2425 ConstantExpr::getNeg(cast<Constant>(Y))); 2426 2427 // X+C >u C2 -> (X & ~C2) != C 2428 // iff C & C2 == 0 2429 // C2+1 is a power of 2 2430 if (Pred == ICmpInst::ICMP_UGT && (*C + 1).isPowerOf2() && (*C2 & *C) == 0) 2431 return new ICmpInst(ICmpInst::ICMP_NE, Builder->CreateAnd(X, ~(*C)), 2432 ConstantExpr::getNeg(cast<Constant>(Y))); 2433 2434 return nullptr; 2435 } 2436 2437 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C 2438 /// where X is some kind of instruction. 2439 Instruction *InstCombiner::foldICmpInstWithConstant(ICmpInst &Cmp) { 2440 const APInt *C; 2441 if (!match(Cmp.getOperand(1), m_APInt(C))) 2442 return nullptr; 2443 2444 BinaryOperator *BO; 2445 if (match(Cmp.getOperand(0), m_BinOp(BO))) { 2446 switch (BO->getOpcode()) { 2447 case Instruction::Xor: 2448 if (Instruction *I = foldICmpXorConstant(Cmp, BO, C)) 2449 return I; 2450 break; 2451 case Instruction::And: 2452 if (Instruction *I = foldICmpAndConstant(Cmp, BO, C)) 2453 return I; 2454 break; 2455 case Instruction::Or: 2456 if (Instruction *I = foldICmpOrConstant(Cmp, BO, C)) 2457 return I; 2458 break; 2459 case Instruction::Mul: 2460 if (Instruction *I = foldICmpMulConstant(Cmp, BO, C)) 2461 return I; 2462 break; 2463 case Instruction::Shl: 2464 if (Instruction *I = foldICmpShlConstant(Cmp, BO, C)) 2465 return I; 2466 break; 2467 case Instruction::LShr: 2468 case Instruction::AShr: 2469 if (Instruction *I = foldICmpShrConstant(Cmp, BO, C)) 2470 return I; 2471 break; 2472 case Instruction::UDiv: 2473 if (Instruction *I = foldICmpUDivConstant(Cmp, BO, C)) 2474 return I; 2475 LLVM_FALLTHROUGH; 2476 case Instruction::SDiv: 2477 if (Instruction *I = foldICmpDivConstant(Cmp, BO, C)) 2478 return I; 2479 break; 2480 case Instruction::Sub: 2481 if (Instruction *I = foldICmpSubConstant(Cmp, BO, C)) 2482 return I; 2483 break; 2484 case Instruction::Add: 2485 if (Instruction *I = foldICmpAddConstant(Cmp, BO, C)) 2486 return I; 2487 break; 2488 default: 2489 break; 2490 } 2491 // TODO: These folds could be refactored to be part of the above calls. 2492 if (Instruction *I = foldICmpBinOpEqualityWithConstant(Cmp, BO, C)) 2493 return I; 2494 } 2495 2496 Instruction *LHSI; 2497 if (match(Cmp.getOperand(0), m_Instruction(LHSI)) && 2498 LHSI->getOpcode() == Instruction::Trunc) 2499 if (Instruction *I = foldICmpTruncConstant(Cmp, LHSI, C)) 2500 return I; 2501 2502 if (Instruction *I = foldICmpIntrinsicWithConstant(Cmp, C)) 2503 return I; 2504 2505 return nullptr; 2506 } 2507 2508 /// Fold an icmp equality instruction with binary operator LHS and constant RHS: 2509 /// icmp eq/ne BO, C. 2510 Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, 2511 BinaryOperator *BO, 2512 const APInt *C) { 2513 // TODO: Some of these folds could work with arbitrary constants, but this 2514 // function is limited to scalar and vector splat constants. 2515 if (!Cmp.isEquality()) 2516 return nullptr; 2517 2518 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2519 bool isICMP_NE = Pred == ICmpInst::ICMP_NE; 2520 Constant *RHS = cast<Constant>(Cmp.getOperand(1)); 2521 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1); 2522 2523 switch (BO->getOpcode()) { 2524 case Instruction::SRem: 2525 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one. 2526 if (C->isNullValue() && BO->hasOneUse()) { 2527 const APInt *BOC; 2528 if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) { 2529 Value *NewRem = Builder->CreateURem(BOp0, BOp1, BO->getName()); 2530 return new ICmpInst(Pred, NewRem, 2531 Constant::getNullValue(BO->getType())); 2532 } 2533 } 2534 break; 2535 case Instruction::Add: { 2536 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants. 2537 const APInt *BOC; 2538 if (match(BOp1, m_APInt(BOC))) { 2539 if (BO->hasOneUse()) { 2540 Constant *SubC = ConstantExpr::getSub(RHS, cast<Constant>(BOp1)); 2541 return new ICmpInst(Pred, BOp0, SubC); 2542 } 2543 } else if (C->isNullValue()) { 2544 // Replace ((add A, B) != 0) with (A != -B) if A or B is 2545 // efficiently invertible, or if the add has just this one use. 2546 if (Value *NegVal = dyn_castNegVal(BOp1)) 2547 return new ICmpInst(Pred, BOp0, NegVal); 2548 if (Value *NegVal = dyn_castNegVal(BOp0)) 2549 return new ICmpInst(Pred, NegVal, BOp1); 2550 if (BO->hasOneUse()) { 2551 Value *Neg = Builder->CreateNeg(BOp1); 2552 Neg->takeName(BO); 2553 return new ICmpInst(Pred, BOp0, Neg); 2554 } 2555 } 2556 break; 2557 } 2558 case Instruction::Xor: 2559 if (BO->hasOneUse()) { 2560 if (Constant *BOC = dyn_cast<Constant>(BOp1)) { 2561 // For the xor case, we can xor two constants together, eliminating 2562 // the explicit xor. 2563 return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC)); 2564 } else if (C->isNullValue()) { 2565 // Replace ((xor A, B) != 0) with (A != B) 2566 return new ICmpInst(Pred, BOp0, BOp1); 2567 } 2568 } 2569 break; 2570 case Instruction::Sub: 2571 if (BO->hasOneUse()) { 2572 const APInt *BOC; 2573 if (match(BOp0, m_APInt(BOC))) { 2574 // Replace ((sub BOC, B) != C) with (B != BOC-C). 2575 Constant *SubC = ConstantExpr::getSub(cast<Constant>(BOp0), RHS); 2576 return new ICmpInst(Pred, BOp1, SubC); 2577 } else if (C->isNullValue()) { 2578 // Replace ((sub A, B) != 0) with (A != B). 2579 return new ICmpInst(Pred, BOp0, BOp1); 2580 } 2581 } 2582 break; 2583 case Instruction::Or: { 2584 const APInt *BOC; 2585 if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) { 2586 // Comparing if all bits outside of a constant mask are set? 2587 // Replace (X | C) == -1 with (X & ~C) == ~C. 2588 // This removes the -1 constant. 2589 Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1)); 2590 Value *And = Builder->CreateAnd(BOp0, NotBOC); 2591 return new ICmpInst(Pred, And, NotBOC); 2592 } 2593 break; 2594 } 2595 case Instruction::And: { 2596 const APInt *BOC; 2597 if (match(BOp1, m_APInt(BOC))) { 2598 // If we have ((X & C) == C), turn it into ((X & C) != 0). 2599 if (C == BOC && C->isPowerOf2()) 2600 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE, 2601 BO, Constant::getNullValue(RHS->getType())); 2602 2603 // Don't perform the following transforms if the AND has multiple uses 2604 if (!BO->hasOneUse()) 2605 break; 2606 2607 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0 2608 if (BOC->isSignMask()) { 2609 Constant *Zero = Constant::getNullValue(BOp0->getType()); 2610 auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE; 2611 return new ICmpInst(NewPred, BOp0, Zero); 2612 } 2613 2614 // ((X & ~7) == 0) --> X < 8 2615 if (C->isNullValue() && (~(*BOC) + 1).isPowerOf2()) { 2616 Constant *NegBOC = ConstantExpr::getNeg(cast<Constant>(BOp1)); 2617 auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT; 2618 return new ICmpInst(NewPred, BOp0, NegBOC); 2619 } 2620 } 2621 break; 2622 } 2623 case Instruction::Mul: 2624 if (C->isNullValue() && BO->hasNoSignedWrap()) { 2625 const APInt *BOC; 2626 if (match(BOp1, m_APInt(BOC)) && !BOC->isNullValue()) { 2627 // The trivial case (mul X, 0) is handled by InstSimplify. 2628 // General case : (mul X, C) != 0 iff X != 0 2629 // (mul X, C) == 0 iff X == 0 2630 return new ICmpInst(Pred, BOp0, Constant::getNullValue(RHS->getType())); 2631 } 2632 } 2633 break; 2634 case Instruction::UDiv: 2635 if (C->isNullValue()) { 2636 // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A) 2637 auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT; 2638 return new ICmpInst(NewPred, BOp1, BOp0); 2639 } 2640 break; 2641 default: 2642 break; 2643 } 2644 return nullptr; 2645 } 2646 2647 /// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C. 2648 Instruction *InstCombiner::foldICmpIntrinsicWithConstant(ICmpInst &Cmp, 2649 const APInt *C) { 2650 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)); 2651 if (!II || !Cmp.isEquality()) 2652 return nullptr; 2653 2654 // Handle icmp {eq|ne} <intrinsic>, intcst. 2655 switch (II->getIntrinsicID()) { 2656 case Intrinsic::bswap: 2657 Worklist.Add(II); 2658 Cmp.setOperand(0, II->getArgOperand(0)); 2659 Cmp.setOperand(1, Builder->getInt(C->byteSwap())); 2660 return &Cmp; 2661 case Intrinsic::ctlz: 2662 case Intrinsic::cttz: 2663 // ctz(A) == bitwidth(A) -> A == 0 and likewise for != 2664 if (*C == C->getBitWidth()) { 2665 Worklist.Add(II); 2666 Cmp.setOperand(0, II->getArgOperand(0)); 2667 Cmp.setOperand(1, ConstantInt::getNullValue(II->getType())); 2668 return &Cmp; 2669 } 2670 break; 2671 case Intrinsic::ctpop: { 2672 // popcount(A) == 0 -> A == 0 and likewise for != 2673 // popcount(A) == bitwidth(A) -> A == -1 and likewise for != 2674 bool IsZero = C->isNullValue(); 2675 if (IsZero || *C == C->getBitWidth()) { 2676 Worklist.Add(II); 2677 Cmp.setOperand(0, II->getArgOperand(0)); 2678 auto *NewOp = IsZero ? Constant::getNullValue(II->getType()) 2679 : Constant::getAllOnesValue(II->getType()); 2680 Cmp.setOperand(1, NewOp); 2681 return &Cmp; 2682 } 2683 break; 2684 } 2685 default: 2686 break; 2687 } 2688 return nullptr; 2689 } 2690 2691 /// Handle icmp with constant (but not simple integer constant) RHS. 2692 Instruction *InstCombiner::foldICmpInstWithConstantNotInt(ICmpInst &I) { 2693 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2694 Constant *RHSC = dyn_cast<Constant>(Op1); 2695 Instruction *LHSI = dyn_cast<Instruction>(Op0); 2696 if (!RHSC || !LHSI) 2697 return nullptr; 2698 2699 switch (LHSI->getOpcode()) { 2700 case Instruction::GetElementPtr: 2701 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null 2702 if (RHSC->isNullValue() && 2703 cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices()) 2704 return new ICmpInst( 2705 I.getPredicate(), LHSI->getOperand(0), 2706 Constant::getNullValue(LHSI->getOperand(0)->getType())); 2707 break; 2708 case Instruction::PHI: 2709 // Only fold icmp into the PHI if the phi and icmp are in the same 2710 // block. If in the same block, we're encouraging jump threading. If 2711 // not, we are just pessimizing the code by making an i1 phi. 2712 if (LHSI->getParent() == I.getParent()) 2713 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI))) 2714 return NV; 2715 break; 2716 case Instruction::Select: { 2717 // If either operand of the select is a constant, we can fold the 2718 // comparison into the select arms, which will cause one to be 2719 // constant folded and the select turned into a bitwise or. 2720 Value *Op1 = nullptr, *Op2 = nullptr; 2721 ConstantInt *CI = nullptr; 2722 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) { 2723 Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); 2724 CI = dyn_cast<ConstantInt>(Op1); 2725 } 2726 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) { 2727 Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); 2728 CI = dyn_cast<ConstantInt>(Op2); 2729 } 2730 2731 // We only want to perform this transformation if it will not lead to 2732 // additional code. This is true if either both sides of the select 2733 // fold to a constant (in which case the icmp is replaced with a select 2734 // which will usually simplify) or this is the only user of the 2735 // select (in which case we are trading a select+icmp for a simpler 2736 // select+icmp) or all uses of the select can be replaced based on 2737 // dominance information ("Global cases"). 2738 bool Transform = false; 2739 if (Op1 && Op2) 2740 Transform = true; 2741 else if (Op1 || Op2) { 2742 // Local case 2743 if (LHSI->hasOneUse()) 2744 Transform = true; 2745 // Global cases 2746 else if (CI && !CI->isZero()) 2747 // When Op1 is constant try replacing select with second operand. 2748 // Otherwise Op2 is constant and try replacing select with first 2749 // operand. 2750 Transform = 2751 replacedSelectWithOperand(cast<SelectInst>(LHSI), &I, Op1 ? 2 : 1); 2752 } 2753 if (Transform) { 2754 if (!Op1) 2755 Op1 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC, 2756 I.getName()); 2757 if (!Op2) 2758 Op2 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC, 2759 I.getName()); 2760 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2); 2761 } 2762 break; 2763 } 2764 case Instruction::IntToPtr: 2765 // icmp pred inttoptr(X), null -> icmp pred X, 0 2766 if (RHSC->isNullValue() && 2767 DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType()) 2768 return new ICmpInst( 2769 I.getPredicate(), LHSI->getOperand(0), 2770 Constant::getNullValue(LHSI->getOperand(0)->getType())); 2771 break; 2772 2773 case Instruction::Load: 2774 // Try to optimize things like "A[i] > 4" to index computations. 2775 if (GetElementPtrInst *GEP = 2776 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) { 2777 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 2778 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 2779 !cast<LoadInst>(LHSI)->isVolatile()) 2780 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I)) 2781 return Res; 2782 } 2783 break; 2784 } 2785 2786 return nullptr; 2787 } 2788 2789 /// Try to fold icmp (binop), X or icmp X, (binop). 2790 /// TODO: A large part of this logic is duplicated in InstSimplify's 2791 /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code 2792 /// duplication. 2793 Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) { 2794 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2795 2796 // Special logic for binary operators. 2797 BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0); 2798 BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1); 2799 if (!BO0 && !BO1) 2800 return nullptr; 2801 2802 const CmpInst::Predicate Pred = I.getPredicate(); 2803 bool NoOp0WrapProblem = false, NoOp1WrapProblem = false; 2804 if (BO0 && isa<OverflowingBinaryOperator>(BO0)) 2805 NoOp0WrapProblem = 2806 ICmpInst::isEquality(Pred) || 2807 (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) || 2808 (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap()); 2809 if (BO1 && isa<OverflowingBinaryOperator>(BO1)) 2810 NoOp1WrapProblem = 2811 ICmpInst::isEquality(Pred) || 2812 (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) || 2813 (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap()); 2814 2815 // Analyze the case when either Op0 or Op1 is an add instruction. 2816 // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null). 2817 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr; 2818 if (BO0 && BO0->getOpcode() == Instruction::Add) { 2819 A = BO0->getOperand(0); 2820 B = BO0->getOperand(1); 2821 } 2822 if (BO1 && BO1->getOpcode() == Instruction::Add) { 2823 C = BO1->getOperand(0); 2824 D = BO1->getOperand(1); 2825 } 2826 2827 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow. 2828 if ((A == Op1 || B == Op1) && NoOp0WrapProblem) 2829 return new ICmpInst(Pred, A == Op1 ? B : A, 2830 Constant::getNullValue(Op1->getType())); 2831 2832 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow. 2833 if ((C == Op0 || D == Op0) && NoOp1WrapProblem) 2834 return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()), 2835 C == Op0 ? D : C); 2836 2837 // icmp (X+Y), (X+Z) -> icmp Y, Z for equalities or if there is no overflow. 2838 if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem && 2839 NoOp1WrapProblem && 2840 // Try not to increase register pressure. 2841 BO0->hasOneUse() && BO1->hasOneUse()) { 2842 // Determine Y and Z in the form icmp (X+Y), (X+Z). 2843 Value *Y, *Z; 2844 if (A == C) { 2845 // C + B == C + D -> B == D 2846 Y = B; 2847 Z = D; 2848 } else if (A == D) { 2849 // D + B == C + D -> B == C 2850 Y = B; 2851 Z = C; 2852 } else if (B == C) { 2853 // A + C == C + D -> A == D 2854 Y = A; 2855 Z = D; 2856 } else { 2857 assert(B == D); 2858 // A + D == C + D -> A == C 2859 Y = A; 2860 Z = C; 2861 } 2862 return new ICmpInst(Pred, Y, Z); 2863 } 2864 2865 // icmp slt (X + -1), Y -> icmp sle X, Y 2866 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT && 2867 match(B, m_AllOnes())) 2868 return new ICmpInst(CmpInst::ICMP_SLE, A, Op1); 2869 2870 // icmp sge (X + -1), Y -> icmp sgt X, Y 2871 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE && 2872 match(B, m_AllOnes())) 2873 return new ICmpInst(CmpInst::ICMP_SGT, A, Op1); 2874 2875 // icmp sle (X + 1), Y -> icmp slt X, Y 2876 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE && match(B, m_One())) 2877 return new ICmpInst(CmpInst::ICMP_SLT, A, Op1); 2878 2879 // icmp sgt (X + 1), Y -> icmp sge X, Y 2880 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT && match(B, m_One())) 2881 return new ICmpInst(CmpInst::ICMP_SGE, A, Op1); 2882 2883 // icmp sgt X, (Y + -1) -> icmp sge X, Y 2884 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGT && 2885 match(D, m_AllOnes())) 2886 return new ICmpInst(CmpInst::ICMP_SGE, Op0, C); 2887 2888 // icmp sle X, (Y + -1) -> icmp slt X, Y 2889 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLE && 2890 match(D, m_AllOnes())) 2891 return new ICmpInst(CmpInst::ICMP_SLT, Op0, C); 2892 2893 // icmp sge X, (Y + 1) -> icmp sgt X, Y 2894 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGE && match(D, m_One())) 2895 return new ICmpInst(CmpInst::ICMP_SGT, Op0, C); 2896 2897 // icmp slt X, (Y + 1) -> icmp sle X, Y 2898 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLT && match(D, m_One())) 2899 return new ICmpInst(CmpInst::ICMP_SLE, Op0, C); 2900 2901 // TODO: The subtraction-related identities shown below also hold, but 2902 // canonicalization from (X -nuw 1) to (X + -1) means that the combinations 2903 // wouldn't happen even if they were implemented. 2904 // 2905 // icmp ult (X - 1), Y -> icmp ule X, Y 2906 // icmp uge (X - 1), Y -> icmp ugt X, Y 2907 // icmp ugt X, (Y - 1) -> icmp uge X, Y 2908 // icmp ule X, (Y - 1) -> icmp ult X, Y 2909 2910 // icmp ule (X + 1), Y -> icmp ult X, Y 2911 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_ULE && match(B, m_One())) 2912 return new ICmpInst(CmpInst::ICMP_ULT, A, Op1); 2913 2914 // icmp ugt (X + 1), Y -> icmp uge X, Y 2915 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_UGT && match(B, m_One())) 2916 return new ICmpInst(CmpInst::ICMP_UGE, A, Op1); 2917 2918 // icmp uge X, (Y + 1) -> icmp ugt X, Y 2919 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_UGE && match(D, m_One())) 2920 return new ICmpInst(CmpInst::ICMP_UGT, Op0, C); 2921 2922 // icmp ult X, (Y + 1) -> icmp ule X, Y 2923 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_ULT && match(D, m_One())) 2924 return new ICmpInst(CmpInst::ICMP_ULE, Op0, C); 2925 2926 // if C1 has greater magnitude than C2: 2927 // icmp (X + C1), (Y + C2) -> icmp (X + C3), Y 2928 // s.t. C3 = C1 - C2 2929 // 2930 // if C2 has greater magnitude than C1: 2931 // icmp (X + C1), (Y + C2) -> icmp X, (Y + C3) 2932 // s.t. C3 = C2 - C1 2933 if (A && C && NoOp0WrapProblem && NoOp1WrapProblem && 2934 (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned()) 2935 if (ConstantInt *C1 = dyn_cast<ConstantInt>(B)) 2936 if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) { 2937 const APInt &AP1 = C1->getValue(); 2938 const APInt &AP2 = C2->getValue(); 2939 if (AP1.isNegative() == AP2.isNegative()) { 2940 APInt AP1Abs = C1->getValue().abs(); 2941 APInt AP2Abs = C2->getValue().abs(); 2942 if (AP1Abs.uge(AP2Abs)) { 2943 ConstantInt *C3 = Builder->getInt(AP1 - AP2); 2944 Value *NewAdd = Builder->CreateNSWAdd(A, C3); 2945 return new ICmpInst(Pred, NewAdd, C); 2946 } else { 2947 ConstantInt *C3 = Builder->getInt(AP2 - AP1); 2948 Value *NewAdd = Builder->CreateNSWAdd(C, C3); 2949 return new ICmpInst(Pred, A, NewAdd); 2950 } 2951 } 2952 } 2953 2954 // Analyze the case when either Op0 or Op1 is a sub instruction. 2955 // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null). 2956 A = nullptr; 2957 B = nullptr; 2958 C = nullptr; 2959 D = nullptr; 2960 if (BO0 && BO0->getOpcode() == Instruction::Sub) { 2961 A = BO0->getOperand(0); 2962 B = BO0->getOperand(1); 2963 } 2964 if (BO1 && BO1->getOpcode() == Instruction::Sub) { 2965 C = BO1->getOperand(0); 2966 D = BO1->getOperand(1); 2967 } 2968 2969 // icmp (X-Y), X -> icmp 0, Y for equalities or if there is no overflow. 2970 if (A == Op1 && NoOp0WrapProblem) 2971 return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B); 2972 2973 // icmp X, (X-Y) -> icmp Y, 0 for equalities or if there is no overflow. 2974 if (C == Op0 && NoOp1WrapProblem) 2975 return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType())); 2976 2977 // icmp (Y-X), (Z-X) -> icmp Y, Z for equalities or if there is no overflow. 2978 if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem && 2979 // Try not to increase register pressure. 2980 BO0->hasOneUse() && BO1->hasOneUse()) 2981 return new ICmpInst(Pred, A, C); 2982 2983 // icmp (X-Y), (X-Z) -> icmp Z, Y for equalities or if there is no overflow. 2984 if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem && 2985 // Try not to increase register pressure. 2986 BO0->hasOneUse() && BO1->hasOneUse()) 2987 return new ICmpInst(Pred, D, B); 2988 2989 // icmp (0-X) < cst --> x > -cst 2990 if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) { 2991 Value *X; 2992 if (match(BO0, m_Neg(m_Value(X)))) 2993 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1)) 2994 if (!RHSC->isMinValue(/*isSigned=*/true)) 2995 return new ICmpInst(I.getSwappedPredicate(), X, 2996 ConstantExpr::getNeg(RHSC)); 2997 } 2998 2999 BinaryOperator *SRem = nullptr; 3000 // icmp (srem X, Y), Y 3001 if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1)) 3002 SRem = BO0; 3003 // icmp Y, (srem X, Y) 3004 else if (BO1 && BO1->getOpcode() == Instruction::SRem && 3005 Op0 == BO1->getOperand(1)) 3006 SRem = BO1; 3007 if (SRem) { 3008 // We don't check hasOneUse to avoid increasing register pressure because 3009 // the value we use is the same value this instruction was already using. 3010 switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) { 3011 default: 3012 break; 3013 case ICmpInst::ICMP_EQ: 3014 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 3015 case ICmpInst::ICMP_NE: 3016 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 3017 case ICmpInst::ICMP_SGT: 3018 case ICmpInst::ICMP_SGE: 3019 return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1), 3020 Constant::getAllOnesValue(SRem->getType())); 3021 case ICmpInst::ICMP_SLT: 3022 case ICmpInst::ICMP_SLE: 3023 return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1), 3024 Constant::getNullValue(SRem->getType())); 3025 } 3026 } 3027 3028 if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && BO0->hasOneUse() && 3029 BO1->hasOneUse() && BO0->getOperand(1) == BO1->getOperand(1)) { 3030 switch (BO0->getOpcode()) { 3031 default: 3032 break; 3033 case Instruction::Add: 3034 case Instruction::Sub: 3035 case Instruction::Xor: { 3036 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b 3037 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3038 3039 const APInt *C; 3040 if (match(BO0->getOperand(1), m_APInt(C))) { 3041 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b 3042 if (C->isSignMask()) { 3043 ICmpInst::Predicate NewPred = 3044 I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate(); 3045 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0)); 3046 } 3047 3048 // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b 3049 if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) { 3050 ICmpInst::Predicate NewPred = 3051 I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate(); 3052 NewPred = I.getSwappedPredicate(NewPred); 3053 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0)); 3054 } 3055 } 3056 break; 3057 } 3058 case Instruction::Mul: { 3059 if (!I.isEquality()) 3060 break; 3061 3062 const APInt *C; 3063 if (match(BO0->getOperand(1), m_APInt(C)) && !C->isNullValue() && 3064 !C->isOneValue()) { 3065 // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask) 3066 // Mask = -1 >> count-trailing-zeros(C). 3067 if (unsigned TZs = C->countTrailingZeros()) { 3068 Constant *Mask = ConstantInt::get( 3069 BO0->getType(), 3070 APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs)); 3071 Value *And1 = Builder->CreateAnd(BO0->getOperand(0), Mask); 3072 Value *And2 = Builder->CreateAnd(BO1->getOperand(0), Mask); 3073 return new ICmpInst(Pred, And1, And2); 3074 } 3075 // If there are no trailing zeros in the multiplier, just eliminate 3076 // the multiplies (no masking is needed): 3077 // icmp eq/ne (X * C), (Y * C) --> icmp eq/ne X, Y 3078 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3079 } 3080 break; 3081 } 3082 case Instruction::UDiv: 3083 case Instruction::LShr: 3084 if (I.isSigned() || !BO0->isExact() || !BO1->isExact()) 3085 break; 3086 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3087 3088 case Instruction::SDiv: 3089 if (!I.isEquality() || !BO0->isExact() || !BO1->isExact()) 3090 break; 3091 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3092 3093 case Instruction::AShr: 3094 if (!BO0->isExact() || !BO1->isExact()) 3095 break; 3096 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3097 3098 case Instruction::Shl: { 3099 bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap(); 3100 bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap(); 3101 if (!NUW && !NSW) 3102 break; 3103 if (!NSW && I.isSigned()) 3104 break; 3105 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3106 } 3107 } 3108 } 3109 3110 if (BO0) { 3111 // Transform A & (L - 1) `ult` L --> L != 0 3112 auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes()); 3113 auto BitwiseAnd = 3114 m_CombineOr(m_And(m_Value(), LSubOne), m_And(LSubOne, m_Value())); 3115 3116 if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) { 3117 auto *Zero = Constant::getNullValue(BO0->getType()); 3118 return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero); 3119 } 3120 } 3121 3122 return nullptr; 3123 } 3124 3125 /// Fold icmp Pred min|max(X, Y), X. 3126 static Instruction *foldICmpWithMinMax(ICmpInst &Cmp) { 3127 ICmpInst::Predicate Pred = Cmp.getPredicate(); 3128 Value *Op0 = Cmp.getOperand(0); 3129 Value *X = Cmp.getOperand(1); 3130 3131 // Canonicalize minimum or maximum operand to LHS of the icmp. 3132 if (match(X, m_c_SMin(m_Specific(Op0), m_Value())) || 3133 match(X, m_c_SMax(m_Specific(Op0), m_Value())) || 3134 match(X, m_c_UMin(m_Specific(Op0), m_Value())) || 3135 match(X, m_c_UMax(m_Specific(Op0), m_Value()))) { 3136 std::swap(Op0, X); 3137 Pred = Cmp.getSwappedPredicate(); 3138 } 3139 3140 Value *Y; 3141 if (match(Op0, m_c_SMin(m_Specific(X), m_Value(Y)))) { 3142 // smin(X, Y) == X --> X s<= Y 3143 // smin(X, Y) s>= X --> X s<= Y 3144 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SGE) 3145 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y); 3146 3147 // smin(X, Y) != X --> X s> Y 3148 // smin(X, Y) s< X --> X s> Y 3149 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SLT) 3150 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y); 3151 3152 // These cases should be handled in InstSimplify: 3153 // smin(X, Y) s<= X --> true 3154 // smin(X, Y) s> X --> false 3155 return nullptr; 3156 } 3157 3158 if (match(Op0, m_c_SMax(m_Specific(X), m_Value(Y)))) { 3159 // smax(X, Y) == X --> X s>= Y 3160 // smax(X, Y) s<= X --> X s>= Y 3161 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLE) 3162 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y); 3163 3164 // smax(X, Y) != X --> X s< Y 3165 // smax(X, Y) s> X --> X s< Y 3166 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SGT) 3167 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y); 3168 3169 // These cases should be handled in InstSimplify: 3170 // smax(X, Y) s>= X --> true 3171 // smax(X, Y) s< X --> false 3172 return nullptr; 3173 } 3174 3175 if (match(Op0, m_c_UMin(m_Specific(X), m_Value(Y)))) { 3176 // umin(X, Y) == X --> X u<= Y 3177 // umin(X, Y) u>= X --> X u<= Y 3178 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_UGE) 3179 return new ICmpInst(ICmpInst::ICMP_ULE, X, Y); 3180 3181 // umin(X, Y) != X --> X u> Y 3182 // umin(X, Y) u< X --> X u> Y 3183 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT) 3184 return new ICmpInst(ICmpInst::ICMP_UGT, X, Y); 3185 3186 // These cases should be handled in InstSimplify: 3187 // umin(X, Y) u<= X --> true 3188 // umin(X, Y) u> X --> false 3189 return nullptr; 3190 } 3191 3192 if (match(Op0, m_c_UMax(m_Specific(X), m_Value(Y)))) { 3193 // umax(X, Y) == X --> X u>= Y 3194 // umax(X, Y) u<= X --> X u>= Y 3195 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_ULE) 3196 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y); 3197 3198 // umax(X, Y) != X --> X u< Y 3199 // umax(X, Y) u> X --> X u< Y 3200 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_UGT) 3201 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y); 3202 3203 // These cases should be handled in InstSimplify: 3204 // umax(X, Y) u>= X --> true 3205 // umax(X, Y) u< X --> false 3206 return nullptr; 3207 } 3208 3209 return nullptr; 3210 } 3211 3212 Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) { 3213 if (!I.isEquality()) 3214 return nullptr; 3215 3216 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 3217 Value *A, *B, *C, *D; 3218 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) { 3219 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0 3220 Value *OtherVal = A == Op1 ? B : A; 3221 return new ICmpInst(I.getPredicate(), OtherVal, 3222 Constant::getNullValue(A->getType())); 3223 } 3224 3225 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) { 3226 // A^c1 == C^c2 --> A == C^(c1^c2) 3227 ConstantInt *C1, *C2; 3228 if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) && 3229 Op1->hasOneUse()) { 3230 Constant *NC = Builder->getInt(C1->getValue() ^ C2->getValue()); 3231 Value *Xor = Builder->CreateXor(C, NC); 3232 return new ICmpInst(I.getPredicate(), A, Xor); 3233 } 3234 3235 // A^B == A^D -> B == D 3236 if (A == C) 3237 return new ICmpInst(I.getPredicate(), B, D); 3238 if (A == D) 3239 return new ICmpInst(I.getPredicate(), B, C); 3240 if (B == C) 3241 return new ICmpInst(I.getPredicate(), A, D); 3242 if (B == D) 3243 return new ICmpInst(I.getPredicate(), A, C); 3244 } 3245 } 3246 3247 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) { 3248 // A == (A^B) -> B == 0 3249 Value *OtherVal = A == Op0 ? B : A; 3250 return new ICmpInst(I.getPredicate(), OtherVal, 3251 Constant::getNullValue(A->getType())); 3252 } 3253 3254 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0 3255 if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) && 3256 match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) { 3257 Value *X = nullptr, *Y = nullptr, *Z = nullptr; 3258 3259 if (A == C) { 3260 X = B; 3261 Y = D; 3262 Z = A; 3263 } else if (A == D) { 3264 X = B; 3265 Y = C; 3266 Z = A; 3267 } else if (B == C) { 3268 X = A; 3269 Y = D; 3270 Z = B; 3271 } else if (B == D) { 3272 X = A; 3273 Y = C; 3274 Z = B; 3275 } 3276 3277 if (X) { // Build (X^Y) & Z 3278 Op1 = Builder->CreateXor(X, Y); 3279 Op1 = Builder->CreateAnd(Op1, Z); 3280 I.setOperand(0, Op1); 3281 I.setOperand(1, Constant::getNullValue(Op1->getType())); 3282 return &I; 3283 } 3284 } 3285 3286 // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B) 3287 // and (B & (1<<X)-1) == (zext A) --> A == (trunc B) 3288 ConstantInt *Cst1; 3289 if ((Op0->hasOneUse() && match(Op0, m_ZExt(m_Value(A))) && 3290 match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) || 3291 (Op1->hasOneUse() && match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) && 3292 match(Op1, m_ZExt(m_Value(A))))) { 3293 APInt Pow2 = Cst1->getValue() + 1; 3294 if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) && 3295 Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth()) 3296 return new ICmpInst(I.getPredicate(), A, 3297 Builder->CreateTrunc(B, A->getType())); 3298 } 3299 3300 // (A >> C) == (B >> C) --> (A^B) u< (1 << C) 3301 // For lshr and ashr pairs. 3302 if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_ConstantInt(Cst1)))) && 3303 match(Op1, m_OneUse(m_LShr(m_Value(B), m_Specific(Cst1))))) || 3304 (match(Op0, m_OneUse(m_AShr(m_Value(A), m_ConstantInt(Cst1)))) && 3305 match(Op1, m_OneUse(m_AShr(m_Value(B), m_Specific(Cst1)))))) { 3306 unsigned TypeBits = Cst1->getBitWidth(); 3307 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits); 3308 if (ShAmt < TypeBits && ShAmt != 0) { 3309 ICmpInst::Predicate Pred = I.getPredicate() == ICmpInst::ICMP_NE 3310 ? ICmpInst::ICMP_UGE 3311 : ICmpInst::ICMP_ULT; 3312 Value *Xor = Builder->CreateXor(A, B, I.getName() + ".unshifted"); 3313 APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt); 3314 return new ICmpInst(Pred, Xor, Builder->getInt(CmpVal)); 3315 } 3316 } 3317 3318 // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0 3319 if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) && 3320 match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) { 3321 unsigned TypeBits = Cst1->getBitWidth(); 3322 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits); 3323 if (ShAmt < TypeBits && ShAmt != 0) { 3324 Value *Xor = Builder->CreateXor(A, B, I.getName() + ".unshifted"); 3325 APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt); 3326 Value *And = Builder->CreateAnd(Xor, Builder->getInt(AndVal), 3327 I.getName() + ".mask"); 3328 return new ICmpInst(I.getPredicate(), And, 3329 Constant::getNullValue(Cst1->getType())); 3330 } 3331 } 3332 3333 // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to 3334 // "icmp (and X, mask), cst" 3335 uint64_t ShAmt = 0; 3336 if (Op0->hasOneUse() && 3337 match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) && 3338 match(Op1, m_ConstantInt(Cst1)) && 3339 // Only do this when A has multiple uses. This is most important to do 3340 // when it exposes other optimizations. 3341 !A->hasOneUse()) { 3342 unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits(); 3343 3344 if (ShAmt < ASize) { 3345 APInt MaskV = 3346 APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits()); 3347 MaskV <<= ShAmt; 3348 3349 APInt CmpV = Cst1->getValue().zext(ASize); 3350 CmpV <<= ShAmt; 3351 3352 Value *Mask = Builder->CreateAnd(A, Builder->getInt(MaskV)); 3353 return new ICmpInst(I.getPredicate(), Mask, Builder->getInt(CmpV)); 3354 } 3355 } 3356 3357 return nullptr; 3358 } 3359 3360 /// Handle icmp (cast x to y), (cast/cst). We only handle extending casts so 3361 /// far. 3362 Instruction *InstCombiner::foldICmpWithCastAndCast(ICmpInst &ICmp) { 3363 const CastInst *LHSCI = cast<CastInst>(ICmp.getOperand(0)); 3364 Value *LHSCIOp = LHSCI->getOperand(0); 3365 Type *SrcTy = LHSCIOp->getType(); 3366 Type *DestTy = LHSCI->getType(); 3367 Value *RHSCIOp; 3368 3369 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the 3370 // integer type is the same size as the pointer type. 3371 if (LHSCI->getOpcode() == Instruction::PtrToInt && 3372 DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) { 3373 Value *RHSOp = nullptr; 3374 if (auto *RHSC = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) { 3375 Value *RHSCIOp = RHSC->getOperand(0); 3376 if (RHSCIOp->getType()->getPointerAddressSpace() == 3377 LHSCIOp->getType()->getPointerAddressSpace()) { 3378 RHSOp = RHSC->getOperand(0); 3379 // If the pointer types don't match, insert a bitcast. 3380 if (LHSCIOp->getType() != RHSOp->getType()) 3381 RHSOp = Builder->CreateBitCast(RHSOp, LHSCIOp->getType()); 3382 } 3383 } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) { 3384 RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy); 3385 } 3386 3387 if (RHSOp) 3388 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSOp); 3389 } 3390 3391 // The code below only handles extension cast instructions, so far. 3392 // Enforce this. 3393 if (LHSCI->getOpcode() != Instruction::ZExt && 3394 LHSCI->getOpcode() != Instruction::SExt) 3395 return nullptr; 3396 3397 bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt; 3398 bool isSignedCmp = ICmp.isSigned(); 3399 3400 if (auto *CI = dyn_cast<CastInst>(ICmp.getOperand(1))) { 3401 // Not an extension from the same type? 3402 RHSCIOp = CI->getOperand(0); 3403 if (RHSCIOp->getType() != LHSCIOp->getType()) 3404 return nullptr; 3405 3406 // If the signedness of the two casts doesn't agree (i.e. one is a sext 3407 // and the other is a zext), then we can't handle this. 3408 if (CI->getOpcode() != LHSCI->getOpcode()) 3409 return nullptr; 3410 3411 // Deal with equality cases early. 3412 if (ICmp.isEquality()) 3413 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSCIOp); 3414 3415 // A signed comparison of sign extended values simplifies into a 3416 // signed comparison. 3417 if (isSignedCmp && isSignedExt) 3418 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSCIOp); 3419 3420 // The other three cases all fold into an unsigned comparison. 3421 return new ICmpInst(ICmp.getUnsignedPredicate(), LHSCIOp, RHSCIOp); 3422 } 3423 3424 // If we aren't dealing with a constant on the RHS, exit early. 3425 auto *C = dyn_cast<Constant>(ICmp.getOperand(1)); 3426 if (!C) 3427 return nullptr; 3428 3429 // Compute the constant that would happen if we truncated to SrcTy then 3430 // re-extended to DestTy. 3431 Constant *Res1 = ConstantExpr::getTrunc(C, SrcTy); 3432 Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(), Res1, DestTy); 3433 3434 // If the re-extended constant didn't change... 3435 if (Res2 == C) { 3436 // Deal with equality cases early. 3437 if (ICmp.isEquality()) 3438 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, Res1); 3439 3440 // A signed comparison of sign extended values simplifies into a 3441 // signed comparison. 3442 if (isSignedExt && isSignedCmp) 3443 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, Res1); 3444 3445 // The other three cases all fold into an unsigned comparison. 3446 return new ICmpInst(ICmp.getUnsignedPredicate(), LHSCIOp, Res1); 3447 } 3448 3449 // The re-extended constant changed, partly changed (in the case of a vector), 3450 // or could not be determined to be equal (in the case of a constant 3451 // expression), so the constant cannot be represented in the shorter type. 3452 // Consequently, we cannot emit a simple comparison. 3453 // All the cases that fold to true or false will have already been handled 3454 // by SimplifyICmpInst, so only deal with the tricky case. 3455 3456 if (isSignedCmp || !isSignedExt || !isa<ConstantInt>(C)) 3457 return nullptr; 3458 3459 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases 3460 // should have been folded away previously and not enter in here. 3461 3462 // We're performing an unsigned comp with a sign extended value. 3463 // This is true if the input is >= 0. [aka >s -1] 3464 Constant *NegOne = Constant::getAllOnesValue(SrcTy); 3465 Value *Result = Builder->CreateICmpSGT(LHSCIOp, NegOne, ICmp.getName()); 3466 3467 // Finally, return the value computed. 3468 if (ICmp.getPredicate() == ICmpInst::ICMP_ULT) 3469 return replaceInstUsesWith(ICmp, Result); 3470 3471 assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!"); 3472 return BinaryOperator::CreateNot(Result); 3473 } 3474 3475 bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS, 3476 Value *RHS, Instruction &OrigI, 3477 Value *&Result, Constant *&Overflow) { 3478 if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS)) 3479 std::swap(LHS, RHS); 3480 3481 auto SetResult = [&](Value *OpResult, Constant *OverflowVal, bool ReuseName) { 3482 Result = OpResult; 3483 Overflow = OverflowVal; 3484 if (ReuseName) 3485 Result->takeName(&OrigI); 3486 return true; 3487 }; 3488 3489 // If the overflow check was an add followed by a compare, the insertion point 3490 // may be pointing to the compare. We want to insert the new instructions 3491 // before the add in case there are uses of the add between the add and the 3492 // compare. 3493 Builder->SetInsertPoint(&OrigI); 3494 3495 switch (OCF) { 3496 case OCF_INVALID: 3497 llvm_unreachable("bad overflow check kind!"); 3498 3499 case OCF_UNSIGNED_ADD: { 3500 OverflowResult OR = computeOverflowForUnsignedAdd(LHS, RHS, &OrigI); 3501 if (OR == OverflowResult::NeverOverflows) 3502 return SetResult(Builder->CreateNUWAdd(LHS, RHS), Builder->getFalse(), 3503 true); 3504 3505 if (OR == OverflowResult::AlwaysOverflows) 3506 return SetResult(Builder->CreateAdd(LHS, RHS), Builder->getTrue(), true); 3507 3508 // Fall through uadd into sadd 3509 LLVM_FALLTHROUGH; 3510 } 3511 case OCF_SIGNED_ADD: { 3512 // X + 0 -> {X, false} 3513 if (match(RHS, m_Zero())) 3514 return SetResult(LHS, Builder->getFalse(), false); 3515 3516 // We can strength reduce this signed add into a regular add if we can prove 3517 // that it will never overflow. 3518 if (OCF == OCF_SIGNED_ADD) 3519 if (willNotOverflowSignedAdd(LHS, RHS, OrigI)) 3520 return SetResult(Builder->CreateNSWAdd(LHS, RHS), Builder->getFalse(), 3521 true); 3522 break; 3523 } 3524 3525 case OCF_UNSIGNED_SUB: 3526 case OCF_SIGNED_SUB: { 3527 // X - 0 -> {X, false} 3528 if (match(RHS, m_Zero())) 3529 return SetResult(LHS, Builder->getFalse(), false); 3530 3531 if (OCF == OCF_SIGNED_SUB) { 3532 if (willNotOverflowSignedSub(LHS, RHS, OrigI)) 3533 return SetResult(Builder->CreateNSWSub(LHS, RHS), Builder->getFalse(), 3534 true); 3535 } else { 3536 if (willNotOverflowUnsignedSub(LHS, RHS, OrigI)) 3537 return SetResult(Builder->CreateNUWSub(LHS, RHS), Builder->getFalse(), 3538 true); 3539 } 3540 break; 3541 } 3542 3543 case OCF_UNSIGNED_MUL: { 3544 OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, &OrigI); 3545 if (OR == OverflowResult::NeverOverflows) 3546 return SetResult(Builder->CreateNUWMul(LHS, RHS), Builder->getFalse(), 3547 true); 3548 if (OR == OverflowResult::AlwaysOverflows) 3549 return SetResult(Builder->CreateMul(LHS, RHS), Builder->getTrue(), true); 3550 LLVM_FALLTHROUGH; 3551 } 3552 case OCF_SIGNED_MUL: 3553 // X * undef -> undef 3554 if (isa<UndefValue>(RHS)) 3555 return SetResult(RHS, UndefValue::get(Builder->getInt1Ty()), false); 3556 3557 // X * 0 -> {0, false} 3558 if (match(RHS, m_Zero())) 3559 return SetResult(RHS, Builder->getFalse(), false); 3560 3561 // X * 1 -> {X, false} 3562 if (match(RHS, m_One())) 3563 return SetResult(LHS, Builder->getFalse(), false); 3564 3565 if (OCF == OCF_SIGNED_MUL) 3566 if (willNotOverflowSignedMul(LHS, RHS, OrigI)) 3567 return SetResult(Builder->CreateNSWMul(LHS, RHS), Builder->getFalse(), 3568 true); 3569 break; 3570 } 3571 3572 return false; 3573 } 3574 3575 /// \brief Recognize and process idiom involving test for multiplication 3576 /// overflow. 3577 /// 3578 /// The caller has matched a pattern of the form: 3579 /// I = cmp u (mul(zext A, zext B), V 3580 /// The function checks if this is a test for overflow and if so replaces 3581 /// multiplication with call to 'mul.with.overflow' intrinsic. 3582 /// 3583 /// \param I Compare instruction. 3584 /// \param MulVal Result of 'mult' instruction. It is one of the arguments of 3585 /// the compare instruction. Must be of integer type. 3586 /// \param OtherVal The other argument of compare instruction. 3587 /// \returns Instruction which must replace the compare instruction, NULL if no 3588 /// replacement required. 3589 static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal, 3590 Value *OtherVal, InstCombiner &IC) { 3591 // Don't bother doing this transformation for pointers, don't do it for 3592 // vectors. 3593 if (!isa<IntegerType>(MulVal->getType())) 3594 return nullptr; 3595 3596 assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal); 3597 assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal); 3598 auto *MulInstr = dyn_cast<Instruction>(MulVal); 3599 if (!MulInstr) 3600 return nullptr; 3601 assert(MulInstr->getOpcode() == Instruction::Mul); 3602 3603 auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)), 3604 *RHS = cast<ZExtOperator>(MulInstr->getOperand(1)); 3605 assert(LHS->getOpcode() == Instruction::ZExt); 3606 assert(RHS->getOpcode() == Instruction::ZExt); 3607 Value *A = LHS->getOperand(0), *B = RHS->getOperand(0); 3608 3609 // Calculate type and width of the result produced by mul.with.overflow. 3610 Type *TyA = A->getType(), *TyB = B->getType(); 3611 unsigned WidthA = TyA->getPrimitiveSizeInBits(), 3612 WidthB = TyB->getPrimitiveSizeInBits(); 3613 unsigned MulWidth; 3614 Type *MulType; 3615 if (WidthB > WidthA) { 3616 MulWidth = WidthB; 3617 MulType = TyB; 3618 } else { 3619 MulWidth = WidthA; 3620 MulType = TyA; 3621 } 3622 3623 // In order to replace the original mul with a narrower mul.with.overflow, 3624 // all uses must ignore upper bits of the product. The number of used low 3625 // bits must be not greater than the width of mul.with.overflow. 3626 if (MulVal->hasNUsesOrMore(2)) 3627 for (User *U : MulVal->users()) { 3628 if (U == &I) 3629 continue; 3630 if (TruncInst *TI = dyn_cast<TruncInst>(U)) { 3631 // Check if truncation ignores bits above MulWidth. 3632 unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits(); 3633 if (TruncWidth > MulWidth) 3634 return nullptr; 3635 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) { 3636 // Check if AND ignores bits above MulWidth. 3637 if (BO->getOpcode() != Instruction::And) 3638 return nullptr; 3639 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) { 3640 const APInt &CVal = CI->getValue(); 3641 if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth) 3642 return nullptr; 3643 } 3644 } else { 3645 // Other uses prohibit this transformation. 3646 return nullptr; 3647 } 3648 } 3649 3650 // Recognize patterns 3651 switch (I.getPredicate()) { 3652 case ICmpInst::ICMP_EQ: 3653 case ICmpInst::ICMP_NE: 3654 // Recognize pattern: 3655 // mulval = mul(zext A, zext B) 3656 // cmp eq/neq mulval, zext trunc mulval 3657 if (ZExtInst *Zext = dyn_cast<ZExtInst>(OtherVal)) 3658 if (Zext->hasOneUse()) { 3659 Value *ZextArg = Zext->getOperand(0); 3660 if (TruncInst *Trunc = dyn_cast<TruncInst>(ZextArg)) 3661 if (Trunc->getType()->getPrimitiveSizeInBits() == MulWidth) 3662 break; //Recognized 3663 } 3664 3665 // Recognize pattern: 3666 // mulval = mul(zext A, zext B) 3667 // cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits. 3668 ConstantInt *CI; 3669 Value *ValToMask; 3670 if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) { 3671 if (ValToMask != MulVal) 3672 return nullptr; 3673 const APInt &CVal = CI->getValue() + 1; 3674 if (CVal.isPowerOf2()) { 3675 unsigned MaskWidth = CVal.logBase2(); 3676 if (MaskWidth == MulWidth) 3677 break; // Recognized 3678 } 3679 } 3680 return nullptr; 3681 3682 case ICmpInst::ICMP_UGT: 3683 // Recognize pattern: 3684 // mulval = mul(zext A, zext B) 3685 // cmp ugt mulval, max 3686 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 3687 APInt MaxVal = APInt::getMaxValue(MulWidth); 3688 MaxVal = MaxVal.zext(CI->getBitWidth()); 3689 if (MaxVal.eq(CI->getValue())) 3690 break; // Recognized 3691 } 3692 return nullptr; 3693 3694 case ICmpInst::ICMP_UGE: 3695 // Recognize pattern: 3696 // mulval = mul(zext A, zext B) 3697 // cmp uge mulval, max+1 3698 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 3699 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth); 3700 if (MaxVal.eq(CI->getValue())) 3701 break; // Recognized 3702 } 3703 return nullptr; 3704 3705 case ICmpInst::ICMP_ULE: 3706 // Recognize pattern: 3707 // mulval = mul(zext A, zext B) 3708 // cmp ule mulval, max 3709 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 3710 APInt MaxVal = APInt::getMaxValue(MulWidth); 3711 MaxVal = MaxVal.zext(CI->getBitWidth()); 3712 if (MaxVal.eq(CI->getValue())) 3713 break; // Recognized 3714 } 3715 return nullptr; 3716 3717 case ICmpInst::ICMP_ULT: 3718 // Recognize pattern: 3719 // mulval = mul(zext A, zext B) 3720 // cmp ule mulval, max + 1 3721 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 3722 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth); 3723 if (MaxVal.eq(CI->getValue())) 3724 break; // Recognized 3725 } 3726 return nullptr; 3727 3728 default: 3729 return nullptr; 3730 } 3731 3732 InstCombiner::BuilderTy *Builder = IC.Builder; 3733 Builder->SetInsertPoint(MulInstr); 3734 3735 // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B) 3736 Value *MulA = A, *MulB = B; 3737 if (WidthA < MulWidth) 3738 MulA = Builder->CreateZExt(A, MulType); 3739 if (WidthB < MulWidth) 3740 MulB = Builder->CreateZExt(B, MulType); 3741 Value *F = Intrinsic::getDeclaration(I.getModule(), 3742 Intrinsic::umul_with_overflow, MulType); 3743 CallInst *Call = Builder->CreateCall(F, {MulA, MulB}, "umul"); 3744 IC.Worklist.Add(MulInstr); 3745 3746 // If there are uses of mul result other than the comparison, we know that 3747 // they are truncation or binary AND. Change them to use result of 3748 // mul.with.overflow and adjust properly mask/size. 3749 if (MulVal->hasNUsesOrMore(2)) { 3750 Value *Mul = Builder->CreateExtractValue(Call, 0, "umul.value"); 3751 for (User *U : MulVal->users()) { 3752 if (U == &I || U == OtherVal) 3753 continue; 3754 if (TruncInst *TI = dyn_cast<TruncInst>(U)) { 3755 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth) 3756 IC.replaceInstUsesWith(*TI, Mul); 3757 else 3758 TI->setOperand(0, Mul); 3759 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) { 3760 assert(BO->getOpcode() == Instruction::And); 3761 // Replace (mul & mask) --> zext (mul.with.overflow & short_mask) 3762 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1)); 3763 APInt ShortMask = CI->getValue().trunc(MulWidth); 3764 Value *ShortAnd = Builder->CreateAnd(Mul, ShortMask); 3765 Instruction *Zext = 3766 cast<Instruction>(Builder->CreateZExt(ShortAnd, BO->getType())); 3767 IC.Worklist.Add(Zext); 3768 IC.replaceInstUsesWith(*BO, Zext); 3769 } else { 3770 llvm_unreachable("Unexpected Binary operation"); 3771 } 3772 IC.Worklist.Add(cast<Instruction>(U)); 3773 } 3774 } 3775 if (isa<Instruction>(OtherVal)) 3776 IC.Worklist.Add(cast<Instruction>(OtherVal)); 3777 3778 // The original icmp gets replaced with the overflow value, maybe inverted 3779 // depending on predicate. 3780 bool Inverse = false; 3781 switch (I.getPredicate()) { 3782 case ICmpInst::ICMP_NE: 3783 break; 3784 case ICmpInst::ICMP_EQ: 3785 Inverse = true; 3786 break; 3787 case ICmpInst::ICMP_UGT: 3788 case ICmpInst::ICMP_UGE: 3789 if (I.getOperand(0) == MulVal) 3790 break; 3791 Inverse = true; 3792 break; 3793 case ICmpInst::ICMP_ULT: 3794 case ICmpInst::ICMP_ULE: 3795 if (I.getOperand(1) == MulVal) 3796 break; 3797 Inverse = true; 3798 break; 3799 default: 3800 llvm_unreachable("Unexpected predicate"); 3801 } 3802 if (Inverse) { 3803 Value *Res = Builder->CreateExtractValue(Call, 1); 3804 return BinaryOperator::CreateNot(Res); 3805 } 3806 3807 return ExtractValueInst::Create(Call, 1); 3808 } 3809 3810 /// When performing a comparison against a constant, it is possible that not all 3811 /// the bits in the LHS are demanded. This helper method computes the mask that 3812 /// IS demanded. 3813 static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth, 3814 bool isSignCheck) { 3815 if (isSignCheck) 3816 return APInt::getSignMask(BitWidth); 3817 3818 ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1)); 3819 if (!CI) return APInt::getAllOnesValue(BitWidth); 3820 const APInt &RHS = CI->getValue(); 3821 3822 switch (I.getPredicate()) { 3823 // For a UGT comparison, we don't care about any bits that 3824 // correspond to the trailing ones of the comparand. The value of these 3825 // bits doesn't impact the outcome of the comparison, because any value 3826 // greater than the RHS must differ in a bit higher than these due to carry. 3827 case ICmpInst::ICMP_UGT: { 3828 unsigned trailingOnes = RHS.countTrailingOnes(); 3829 return APInt::getBitsSetFrom(BitWidth, trailingOnes); 3830 } 3831 3832 // Similarly, for a ULT comparison, we don't care about the trailing zeros. 3833 // Any value less than the RHS must differ in a higher bit because of carries. 3834 case ICmpInst::ICMP_ULT: { 3835 unsigned trailingZeros = RHS.countTrailingZeros(); 3836 return APInt::getBitsSetFrom(BitWidth, trailingZeros); 3837 } 3838 3839 default: 3840 return APInt::getAllOnesValue(BitWidth); 3841 } 3842 } 3843 3844 /// \brief Check if the order of \p Op0 and \p Op1 as operand in an ICmpInst 3845 /// should be swapped. 3846 /// The decision is based on how many times these two operands are reused 3847 /// as subtract operands and their positions in those instructions. 3848 /// The rational is that several architectures use the same instruction for 3849 /// both subtract and cmp, thus it is better if the order of those operands 3850 /// match. 3851 /// \return true if Op0 and Op1 should be swapped. 3852 static bool swapMayExposeCSEOpportunities(const Value * Op0, 3853 const Value * Op1) { 3854 // Filter out pointer value as those cannot appears directly in subtract. 3855 // FIXME: we may want to go through inttoptrs or bitcasts. 3856 if (Op0->getType()->isPointerTy()) 3857 return false; 3858 // Count every uses of both Op0 and Op1 in a subtract. 3859 // Each time Op0 is the first operand, count -1: swapping is bad, the 3860 // subtract has already the same layout as the compare. 3861 // Each time Op0 is the second operand, count +1: swapping is good, the 3862 // subtract has a different layout as the compare. 3863 // At the end, if the benefit is greater than 0, Op0 should come second to 3864 // expose more CSE opportunities. 3865 int GlobalSwapBenefits = 0; 3866 for (const User *U : Op0->users()) { 3867 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(U); 3868 if (!BinOp || BinOp->getOpcode() != Instruction::Sub) 3869 continue; 3870 // If Op0 is the first argument, this is not beneficial to swap the 3871 // arguments. 3872 int LocalSwapBenefits = -1; 3873 unsigned Op1Idx = 1; 3874 if (BinOp->getOperand(Op1Idx) == Op0) { 3875 Op1Idx = 0; 3876 LocalSwapBenefits = 1; 3877 } 3878 if (BinOp->getOperand(Op1Idx) != Op1) 3879 continue; 3880 GlobalSwapBenefits += LocalSwapBenefits; 3881 } 3882 return GlobalSwapBenefits > 0; 3883 } 3884 3885 /// \brief Check that one use is in the same block as the definition and all 3886 /// other uses are in blocks dominated by a given block. 3887 /// 3888 /// \param DI Definition 3889 /// \param UI Use 3890 /// \param DB Block that must dominate all uses of \p DI outside 3891 /// the parent block 3892 /// \return true when \p UI is the only use of \p DI in the parent block 3893 /// and all other uses of \p DI are in blocks dominated by \p DB. 3894 /// 3895 bool InstCombiner::dominatesAllUses(const Instruction *DI, 3896 const Instruction *UI, 3897 const BasicBlock *DB) const { 3898 assert(DI && UI && "Instruction not defined\n"); 3899 // Ignore incomplete definitions. 3900 if (!DI->getParent()) 3901 return false; 3902 // DI and UI must be in the same block. 3903 if (DI->getParent() != UI->getParent()) 3904 return false; 3905 // Protect from self-referencing blocks. 3906 if (DI->getParent() == DB) 3907 return false; 3908 for (const User *U : DI->users()) { 3909 auto *Usr = cast<Instruction>(U); 3910 if (Usr != UI && !DT.dominates(DB, Usr->getParent())) 3911 return false; 3912 } 3913 return true; 3914 } 3915 3916 /// Return true when the instruction sequence within a block is select-cmp-br. 3917 static bool isChainSelectCmpBranch(const SelectInst *SI) { 3918 const BasicBlock *BB = SI->getParent(); 3919 if (!BB) 3920 return false; 3921 auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator()); 3922 if (!BI || BI->getNumSuccessors() != 2) 3923 return false; 3924 auto *IC = dyn_cast<ICmpInst>(BI->getCondition()); 3925 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI)) 3926 return false; 3927 return true; 3928 } 3929 3930 /// \brief True when a select result is replaced by one of its operands 3931 /// in select-icmp sequence. This will eventually result in the elimination 3932 /// of the select. 3933 /// 3934 /// \param SI Select instruction 3935 /// \param Icmp Compare instruction 3936 /// \param SIOpd Operand that replaces the select 3937 /// 3938 /// Notes: 3939 /// - The replacement is global and requires dominator information 3940 /// - The caller is responsible for the actual replacement 3941 /// 3942 /// Example: 3943 /// 3944 /// entry: 3945 /// %4 = select i1 %3, %C* %0, %C* null 3946 /// %5 = icmp eq %C* %4, null 3947 /// br i1 %5, label %9, label %7 3948 /// ... 3949 /// ; <label>:7 ; preds = %entry 3950 /// %8 = getelementptr inbounds %C* %4, i64 0, i32 0 3951 /// ... 3952 /// 3953 /// can be transformed to 3954 /// 3955 /// %5 = icmp eq %C* %0, null 3956 /// %6 = select i1 %3, i1 %5, i1 true 3957 /// br i1 %6, label %9, label %7 3958 /// ... 3959 /// ; <label>:7 ; preds = %entry 3960 /// %8 = getelementptr inbounds %C* %0, i64 0, i32 0 // replace by %0! 3961 /// 3962 /// Similar when the first operand of the select is a constant or/and 3963 /// the compare is for not equal rather than equal. 3964 /// 3965 /// NOTE: The function is only called when the select and compare constants 3966 /// are equal, the optimization can work only for EQ predicates. This is not a 3967 /// major restriction since a NE compare should be 'normalized' to an equal 3968 /// compare, which usually happens in the combiner and test case 3969 /// select-cmp-br.ll checks for it. 3970 bool InstCombiner::replacedSelectWithOperand(SelectInst *SI, 3971 const ICmpInst *Icmp, 3972 const unsigned SIOpd) { 3973 assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!"); 3974 if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) { 3975 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1); 3976 // The check for the single predecessor is not the best that can be 3977 // done. But it protects efficiently against cases like when SI's 3978 // home block has two successors, Succ and Succ1, and Succ1 predecessor 3979 // of Succ. Then SI can't be replaced by SIOpd because the use that gets 3980 // replaced can be reached on either path. So the uniqueness check 3981 // guarantees that the path all uses of SI (outside SI's parent) are on 3982 // is disjoint from all other paths out of SI. But that information 3983 // is more expensive to compute, and the trade-off here is in favor 3984 // of compile-time. It should also be noticed that we check for a single 3985 // predecessor and not only uniqueness. This to handle the situation when 3986 // Succ and Succ1 points to the same basic block. 3987 if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) { 3988 NumSel++; 3989 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent()); 3990 return true; 3991 } 3992 } 3993 return false; 3994 } 3995 3996 /// Try to fold the comparison based on range information we can get by checking 3997 /// whether bits are known to be zero or one in the inputs. 3998 Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) { 3999 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 4000 Type *Ty = Op0->getType(); 4001 ICmpInst::Predicate Pred = I.getPredicate(); 4002 4003 // Get scalar or pointer size. 4004 unsigned BitWidth = Ty->isIntOrIntVectorTy() 4005 ? Ty->getScalarSizeInBits() 4006 : DL.getTypeSizeInBits(Ty->getScalarType()); 4007 4008 if (!BitWidth) 4009 return nullptr; 4010 4011 // If this is a normal comparison, it demands all bits. If it is a sign bit 4012 // comparison, it only demands the sign bit. 4013 bool IsSignBit = false; 4014 const APInt *CmpC; 4015 if (match(Op1, m_APInt(CmpC))) { 4016 bool UnusedBit; 4017 IsSignBit = isSignBitCheck(Pred, *CmpC, UnusedBit); 4018 } 4019 4020 KnownBits Op0Known(BitWidth); 4021 KnownBits Op1Known(BitWidth); 4022 4023 if (SimplifyDemandedBits(&I, 0, 4024 getDemandedBitsLHSMask(I, BitWidth, IsSignBit), 4025 Op0Known, 0)) 4026 return &I; 4027 4028 if (SimplifyDemandedBits(&I, 1, APInt::getAllOnesValue(BitWidth), 4029 Op1Known, 0)) 4030 return &I; 4031 4032 // Given the known and unknown bits, compute a range that the LHS could be 4033 // in. Compute the Min, Max and RHS values based on the known bits. For the 4034 // EQ and NE we use unsigned values. 4035 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0); 4036 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0); 4037 if (I.isSigned()) { 4038 computeSignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max); 4039 computeSignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max); 4040 } else { 4041 computeUnsignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max); 4042 computeUnsignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max); 4043 } 4044 4045 // If Min and Max are known to be the same, then SimplifyDemandedBits 4046 // figured out that the LHS is a constant. Constant fold this now, so that 4047 // code below can assume that Min != Max. 4048 if (!isa<Constant>(Op0) && Op0Min == Op0Max) 4049 return new ICmpInst(Pred, ConstantInt::get(Op0->getType(), Op0Min), Op1); 4050 if (!isa<Constant>(Op1) && Op1Min == Op1Max) 4051 return new ICmpInst(Pred, Op0, ConstantInt::get(Op1->getType(), Op1Min)); 4052 4053 // Based on the range information we know about the LHS, see if we can 4054 // simplify this comparison. For example, (x&4) < 8 is always true. 4055 switch (Pred) { 4056 default: 4057 llvm_unreachable("Unknown icmp opcode!"); 4058 case ICmpInst::ICMP_EQ: 4059 case ICmpInst::ICMP_NE: { 4060 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max)) { 4061 return Pred == CmpInst::ICMP_EQ 4062 ? replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())) 4063 : replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4064 } 4065 4066 // If all bits are known zero except for one, then we know at most one bit 4067 // is set. If the comparison is against zero, then this is a check to see if 4068 // *that* bit is set. 4069 APInt Op0KnownZeroInverted = ~Op0Known.Zero; 4070 if (Op1Known.isZero()) { 4071 // If the LHS is an AND with the same constant, look through it. 4072 Value *LHS = nullptr; 4073 const APInt *LHSC; 4074 if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) || 4075 *LHSC != Op0KnownZeroInverted) 4076 LHS = Op0; 4077 4078 Value *X; 4079 if (match(LHS, m_Shl(m_One(), m_Value(X)))) { 4080 APInt ValToCheck = Op0KnownZeroInverted; 4081 Type *XTy = X->getType(); 4082 if (ValToCheck.isPowerOf2()) { 4083 // ((1 << X) & 8) == 0 -> X != 3 4084 // ((1 << X) & 8) != 0 -> X == 3 4085 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros()); 4086 auto NewPred = ICmpInst::getInversePredicate(Pred); 4087 return new ICmpInst(NewPred, X, CmpC); 4088 } else if ((++ValToCheck).isPowerOf2()) { 4089 // ((1 << X) & 7) == 0 -> X >= 3 4090 // ((1 << X) & 7) != 0 -> X < 3 4091 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros()); 4092 auto NewPred = 4093 Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT; 4094 return new ICmpInst(NewPred, X, CmpC); 4095 } 4096 } 4097 4098 // Check if the LHS is 8 >>u x and the result is a power of 2 like 1. 4099 const APInt *CI; 4100 if (Op0KnownZeroInverted.isOneValue() && 4101 match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) { 4102 // ((8 >>u X) & 1) == 0 -> X != 3 4103 // ((8 >>u X) & 1) != 0 -> X == 3 4104 unsigned CmpVal = CI->countTrailingZeros(); 4105 auto NewPred = ICmpInst::getInversePredicate(Pred); 4106 return new ICmpInst(NewPred, X, ConstantInt::get(X->getType(), CmpVal)); 4107 } 4108 } 4109 break; 4110 } 4111 case ICmpInst::ICMP_ULT: { 4112 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B) 4113 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4114 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B) 4115 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4116 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B) 4117 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 4118 4119 const APInt *CmpC; 4120 if (match(Op1, m_APInt(CmpC))) { 4121 // A <u C -> A == C-1 if min(A)+1 == C 4122 if (Op1Max == Op0Min + 1) { 4123 Constant *CMinus1 = ConstantInt::get(Op0->getType(), *CmpC - 1); 4124 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, CMinus1); 4125 } 4126 } 4127 break; 4128 } 4129 case ICmpInst::ICMP_UGT: { 4130 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B) 4131 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4132 4133 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B) 4134 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4135 4136 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B) 4137 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 4138 4139 const APInt *CmpC; 4140 if (match(Op1, m_APInt(CmpC))) { 4141 // A >u C -> A == C+1 if max(a)-1 == C 4142 if (*CmpC == Op0Max - 1) 4143 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 4144 ConstantInt::get(Op1->getType(), *CmpC + 1)); 4145 } 4146 break; 4147 } 4148 case ICmpInst::ICMP_SLT: 4149 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C) 4150 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4151 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C) 4152 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4153 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B) 4154 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 4155 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { 4156 if (Op1Max == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C 4157 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 4158 Builder->getInt(CI->getValue() - 1)); 4159 } 4160 break; 4161 case ICmpInst::ICMP_SGT: 4162 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B) 4163 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4164 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B) 4165 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4166 4167 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B) 4168 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 4169 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { 4170 if (Op1Min == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C 4171 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 4172 Builder->getInt(CI->getValue() + 1)); 4173 } 4174 break; 4175 case ICmpInst::ICMP_SGE: 4176 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!"); 4177 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B) 4178 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4179 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B) 4180 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4181 break; 4182 case ICmpInst::ICMP_SLE: 4183 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!"); 4184 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B) 4185 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4186 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B) 4187 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4188 break; 4189 case ICmpInst::ICMP_UGE: 4190 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!"); 4191 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B) 4192 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4193 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B) 4194 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4195 break; 4196 case ICmpInst::ICMP_ULE: 4197 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!"); 4198 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B) 4199 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4200 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B) 4201 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4202 break; 4203 } 4204 4205 // Turn a signed comparison into an unsigned one if both operands are known to 4206 // have the same sign. 4207 if (I.isSigned() && 4208 ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) || 4209 (Op0Known.One.isNegative() && Op1Known.One.isNegative()))) 4210 return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1); 4211 4212 return nullptr; 4213 } 4214 4215 /// If we have an icmp le or icmp ge instruction with a constant operand, turn 4216 /// it into the appropriate icmp lt or icmp gt instruction. This transform 4217 /// allows them to be folded in visitICmpInst. 4218 static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) { 4219 ICmpInst::Predicate Pred = I.getPredicate(); 4220 if (Pred != ICmpInst::ICMP_SLE && Pred != ICmpInst::ICMP_SGE && 4221 Pred != ICmpInst::ICMP_ULE && Pred != ICmpInst::ICMP_UGE) 4222 return nullptr; 4223 4224 Value *Op0 = I.getOperand(0); 4225 Value *Op1 = I.getOperand(1); 4226 auto *Op1C = dyn_cast<Constant>(Op1); 4227 if (!Op1C) 4228 return nullptr; 4229 4230 // Check if the constant operand can be safely incremented/decremented without 4231 // overflowing/underflowing. For scalars, SimplifyICmpInst has already handled 4232 // the edge cases for us, so we just assert on them. For vectors, we must 4233 // handle the edge cases. 4234 Type *Op1Type = Op1->getType(); 4235 bool IsSigned = I.isSigned(); 4236 bool IsLE = (Pred == ICmpInst::ICMP_SLE || Pred == ICmpInst::ICMP_ULE); 4237 auto *CI = dyn_cast<ConstantInt>(Op1C); 4238 if (CI) { 4239 // A <= MAX -> TRUE ; A >= MIN -> TRUE 4240 assert(IsLE ? !CI->isMaxValue(IsSigned) : !CI->isMinValue(IsSigned)); 4241 } else if (Op1Type->isVectorTy()) { 4242 // TODO? If the edge cases for vectors were guaranteed to be handled as they 4243 // are for scalar, we could remove the min/max checks. However, to do that, 4244 // we would have to use insertelement/shufflevector to replace edge values. 4245 unsigned NumElts = Op1Type->getVectorNumElements(); 4246 for (unsigned i = 0; i != NumElts; ++i) { 4247 Constant *Elt = Op1C->getAggregateElement(i); 4248 if (!Elt) 4249 return nullptr; 4250 4251 if (isa<UndefValue>(Elt)) 4252 continue; 4253 4254 // Bail out if we can't determine if this constant is min/max or if we 4255 // know that this constant is min/max. 4256 auto *CI = dyn_cast<ConstantInt>(Elt); 4257 if (!CI || (IsLE ? CI->isMaxValue(IsSigned) : CI->isMinValue(IsSigned))) 4258 return nullptr; 4259 } 4260 } else { 4261 // ConstantExpr? 4262 return nullptr; 4263 } 4264 4265 // Increment or decrement the constant and set the new comparison predicate: 4266 // ULE -> ULT ; UGE -> UGT ; SLE -> SLT ; SGE -> SGT 4267 Constant *OneOrNegOne = ConstantInt::get(Op1Type, IsLE ? 1 : -1, true); 4268 CmpInst::Predicate NewPred = IsLE ? ICmpInst::ICMP_ULT: ICmpInst::ICMP_UGT; 4269 NewPred = IsSigned ? ICmpInst::getSignedPredicate(NewPred) : NewPred; 4270 return new ICmpInst(NewPred, Op0, ConstantExpr::getAdd(Op1C, OneOrNegOne)); 4271 } 4272 4273 /// Integer compare with boolean values can always be turned into bitwise ops. 4274 static Instruction *canonicalizeICmpBool(ICmpInst &I, 4275 InstCombiner::BuilderTy &Builder) { 4276 Value *A = I.getOperand(0), *B = I.getOperand(1); 4277 assert(A->getType()->getScalarType()->isIntegerTy(1) && "Bools only"); 4278 4279 // A boolean compared to true/false can be simplified to Op0/true/false in 4280 // 14 out of the 20 (10 predicates * 2 constants) possible combinations. 4281 // Cases not handled by InstSimplify are always 'not' of Op0. 4282 if (match(B, m_Zero())) { 4283 switch (I.getPredicate()) { 4284 case CmpInst::ICMP_EQ: // A == 0 -> !A 4285 case CmpInst::ICMP_ULE: // A <=u 0 -> !A 4286 case CmpInst::ICMP_SGE: // A >=s 0 -> !A 4287 return BinaryOperator::CreateNot(A); 4288 default: 4289 llvm_unreachable("ICmp i1 X, C not simplified as expected."); 4290 } 4291 } else if (match(B, m_One())) { 4292 switch (I.getPredicate()) { 4293 case CmpInst::ICMP_NE: // A != 1 -> !A 4294 case CmpInst::ICMP_ULT: // A <u 1 -> !A 4295 case CmpInst::ICMP_SGT: // A >s -1 -> !A 4296 return BinaryOperator::CreateNot(A); 4297 default: 4298 llvm_unreachable("ICmp i1 X, C not simplified as expected."); 4299 } 4300 } 4301 4302 switch (I.getPredicate()) { 4303 default: 4304 llvm_unreachable("Invalid icmp instruction!"); 4305 case ICmpInst::ICMP_EQ: 4306 // icmp eq i1 A, B -> ~(A ^ B) 4307 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 4308 4309 case ICmpInst::ICMP_NE: 4310 // icmp ne i1 A, B -> A ^ B 4311 return BinaryOperator::CreateXor(A, B); 4312 4313 case ICmpInst::ICMP_UGT: 4314 // icmp ugt -> icmp ult 4315 std::swap(A, B); 4316 LLVM_FALLTHROUGH; 4317 case ICmpInst::ICMP_ULT: 4318 // icmp ult i1 A, B -> ~A & B 4319 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B); 4320 4321 case ICmpInst::ICMP_SGT: 4322 // icmp sgt -> icmp slt 4323 std::swap(A, B); 4324 LLVM_FALLTHROUGH; 4325 case ICmpInst::ICMP_SLT: 4326 // icmp slt i1 A, B -> A & ~B 4327 return BinaryOperator::CreateAnd(Builder.CreateNot(B), A); 4328 4329 case ICmpInst::ICMP_UGE: 4330 // icmp uge -> icmp ule 4331 std::swap(A, B); 4332 LLVM_FALLTHROUGH; 4333 case ICmpInst::ICMP_ULE: 4334 // icmp ule i1 A, B -> ~A | B 4335 return BinaryOperator::CreateOr(Builder.CreateNot(A), B); 4336 4337 case ICmpInst::ICMP_SGE: 4338 // icmp sge -> icmp sle 4339 std::swap(A, B); 4340 LLVM_FALLTHROUGH; 4341 case ICmpInst::ICMP_SLE: 4342 // icmp sle i1 A, B -> A | ~B 4343 return BinaryOperator::CreateOr(Builder.CreateNot(B), A); 4344 } 4345 } 4346 4347 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { 4348 bool Changed = false; 4349 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 4350 unsigned Op0Cplxity = getComplexity(Op0); 4351 unsigned Op1Cplxity = getComplexity(Op1); 4352 4353 /// Orders the operands of the compare so that they are listed from most 4354 /// complex to least complex. This puts constants before unary operators, 4355 /// before binary operators. 4356 if (Op0Cplxity < Op1Cplxity || 4357 (Op0Cplxity == Op1Cplxity && swapMayExposeCSEOpportunities(Op0, Op1))) { 4358 I.swapOperands(); 4359 std::swap(Op0, Op1); 4360 Changed = true; 4361 } 4362 4363 if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, 4364 SQ.getWithInstruction(&I))) 4365 return replaceInstUsesWith(I, V); 4366 4367 // comparing -val or val with non-zero is the same as just comparing val 4368 // ie, abs(val) != 0 -> val != 0 4369 if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) { 4370 Value *Cond, *SelectTrue, *SelectFalse; 4371 if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue), 4372 m_Value(SelectFalse)))) { 4373 if (Value *V = dyn_castNegVal(SelectTrue)) { 4374 if (V == SelectFalse) 4375 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1); 4376 } 4377 else if (Value *V = dyn_castNegVal(SelectFalse)) { 4378 if (V == SelectTrue) 4379 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1); 4380 } 4381 } 4382 } 4383 4384 if (Op0->getType()->getScalarType()->isIntegerTy(1)) 4385 if (Instruction *Res = canonicalizeICmpBool(I, *Builder)) 4386 return Res; 4387 4388 if (ICmpInst *NewICmp = canonicalizeCmpWithConstant(I)) 4389 return NewICmp; 4390 4391 if (Instruction *Res = foldICmpWithConstant(I)) 4392 return Res; 4393 4394 if (Instruction *Res = foldICmpUsingKnownBits(I)) 4395 return Res; 4396 4397 // Test if the ICmpInst instruction is used exclusively by a select as 4398 // part of a minimum or maximum operation. If so, refrain from doing 4399 // any other folding. This helps out other analyses which understand 4400 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution 4401 // and CodeGen. And in this case, at least one of the comparison 4402 // operands has at least one user besides the compare (the select), 4403 // which would often largely negate the benefit of folding anyway. 4404 if (I.hasOneUse()) 4405 if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin())) 4406 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) || 4407 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1)) 4408 return nullptr; 4409 4410 // FIXME: We only do this after checking for min/max to prevent infinite 4411 // looping caused by a reverse canonicalization of these patterns for min/max. 4412 // FIXME: The organization of folds is a mess. These would naturally go into 4413 // canonicalizeCmpWithConstant(), but we can't move all of the above folds 4414 // down here after the min/max restriction. 4415 ICmpInst::Predicate Pred = I.getPredicate(); 4416 const APInt *C; 4417 if (match(Op1, m_APInt(C))) { 4418 // For i32: x >u 2147483647 -> x <s 0 -> true if sign bit set 4419 if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) { 4420 Constant *Zero = Constant::getNullValue(Op0->getType()); 4421 return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero); 4422 } 4423 4424 // For i32: x <u 2147483648 -> x >s -1 -> true if sign bit clear 4425 if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) { 4426 Constant *AllOnes = Constant::getAllOnesValue(Op0->getType()); 4427 return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes); 4428 } 4429 } 4430 4431 if (Instruction *Res = foldICmpInstWithConstant(I)) 4432 return Res; 4433 4434 if (Instruction *Res = foldICmpInstWithConstantNotInt(I)) 4435 return Res; 4436 4437 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now. 4438 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0)) 4439 if (Instruction *NI = foldGEPICmp(GEP, Op1, I.getPredicate(), I)) 4440 return NI; 4441 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1)) 4442 if (Instruction *NI = foldGEPICmp(GEP, Op0, 4443 ICmpInst::getSwappedPredicate(I.getPredicate()), I)) 4444 return NI; 4445 4446 // Try to optimize equality comparisons against alloca-based pointers. 4447 if (Op0->getType()->isPointerTy() && I.isEquality()) { 4448 assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?"); 4449 if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op0, DL))) 4450 if (Instruction *New = foldAllocaCmp(I, Alloca, Op1)) 4451 return New; 4452 if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op1, DL))) 4453 if (Instruction *New = foldAllocaCmp(I, Alloca, Op0)) 4454 return New; 4455 } 4456 4457 // Test to see if the operands of the icmp are casted versions of other 4458 // values. If the ptr->ptr cast can be stripped off both arguments, we do so 4459 // now. 4460 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) { 4461 if (Op0->getType()->isPointerTy() && 4462 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) { 4463 // We keep moving the cast from the left operand over to the right 4464 // operand, where it can often be eliminated completely. 4465 Op0 = CI->getOperand(0); 4466 4467 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast 4468 // so eliminate it as well. 4469 if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1)) 4470 Op1 = CI2->getOperand(0); 4471 4472 // If Op1 is a constant, we can fold the cast into the constant. 4473 if (Op0->getType() != Op1->getType()) { 4474 if (Constant *Op1C = dyn_cast<Constant>(Op1)) { 4475 Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType()); 4476 } else { 4477 // Otherwise, cast the RHS right before the icmp 4478 Op1 = Builder->CreateBitCast(Op1, Op0->getType()); 4479 } 4480 } 4481 return new ICmpInst(I.getPredicate(), Op0, Op1); 4482 } 4483 } 4484 4485 if (isa<CastInst>(Op0)) { 4486 // Handle the special case of: icmp (cast bool to X), <cst> 4487 // This comes up when you have code like 4488 // int X = A < B; 4489 // if (X) ... 4490 // For generality, we handle any zero-extension of any operand comparison 4491 // with a constant or another cast from the same type. 4492 if (isa<Constant>(Op1) || isa<CastInst>(Op1)) 4493 if (Instruction *R = foldICmpWithCastAndCast(I)) 4494 return R; 4495 } 4496 4497 if (Instruction *Res = foldICmpBinOp(I)) 4498 return Res; 4499 4500 if (Instruction *Res = foldICmpWithMinMax(I)) 4501 return Res; 4502 4503 { 4504 Value *A, *B; 4505 // Transform (A & ~B) == 0 --> (A & B) != 0 4506 // and (A & ~B) != 0 --> (A & B) == 0 4507 // if A is a power of 2. 4508 if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) && 4509 match(Op1, m_Zero()) && 4510 isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality()) 4511 return new ICmpInst(I.getInversePredicate(), 4512 Builder->CreateAnd(A, B), 4513 Op1); 4514 4515 // ~X < ~Y --> Y < X 4516 // ~X < C --> X > ~C 4517 if (match(Op0, m_Not(m_Value(A)))) { 4518 if (match(Op1, m_Not(m_Value(B)))) 4519 return new ICmpInst(I.getPredicate(), B, A); 4520 4521 const APInt *C; 4522 if (match(Op1, m_APInt(C))) 4523 return new ICmpInst(I.getSwappedPredicate(), A, 4524 ConstantInt::get(Op1->getType(), ~(*C))); 4525 } 4526 4527 Instruction *AddI = nullptr; 4528 if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B), 4529 m_Instruction(AddI))) && 4530 isa<IntegerType>(A->getType())) { 4531 Value *Result; 4532 Constant *Overflow; 4533 if (OptimizeOverflowCheck(OCF_UNSIGNED_ADD, A, B, *AddI, Result, 4534 Overflow)) { 4535 replaceInstUsesWith(*AddI, Result); 4536 return replaceInstUsesWith(I, Overflow); 4537 } 4538 } 4539 4540 // (zext a) * (zext b) --> llvm.umul.with.overflow. 4541 if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) { 4542 if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this)) 4543 return R; 4544 } 4545 if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) { 4546 if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this)) 4547 return R; 4548 } 4549 } 4550 4551 if (Instruction *Res = foldICmpEquality(I)) 4552 return Res; 4553 4554 // The 'cmpxchg' instruction returns an aggregate containing the old value and 4555 // an i1 which indicates whether or not we successfully did the swap. 4556 // 4557 // Replace comparisons between the old value and the expected value with the 4558 // indicator that 'cmpxchg' returns. 4559 // 4560 // N.B. This transform is only valid when the 'cmpxchg' is not permitted to 4561 // spuriously fail. In those cases, the old value may equal the expected 4562 // value but it is possible for the swap to not occur. 4563 if (I.getPredicate() == ICmpInst::ICMP_EQ) 4564 if (auto *EVI = dyn_cast<ExtractValueInst>(Op0)) 4565 if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand())) 4566 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 && 4567 !ACXI->isWeak()) 4568 return ExtractValueInst::Create(ACXI, 1); 4569 4570 { 4571 Value *X; ConstantInt *Cst; 4572 // icmp X+Cst, X 4573 if (match(Op0, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op1 == X) 4574 return foldICmpAddOpConst(I, X, Cst, I.getPredicate()); 4575 4576 // icmp X, X+Cst 4577 if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X) 4578 return foldICmpAddOpConst(I, X, Cst, I.getSwappedPredicate()); 4579 } 4580 return Changed ? &I : nullptr; 4581 } 4582 4583 /// Fold fcmp ([us]itofp x, cst) if possible. 4584 Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, 4585 Constant *RHSC) { 4586 if (!isa<ConstantFP>(RHSC)) return nullptr; 4587 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF(); 4588 4589 // Get the width of the mantissa. We don't want to hack on conversions that 4590 // might lose information from the integer, e.g. "i64 -> float" 4591 int MantissaWidth = LHSI->getType()->getFPMantissaWidth(); 4592 if (MantissaWidth == -1) return nullptr; // Unknown. 4593 4594 IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType()); 4595 4596 bool LHSUnsigned = isa<UIToFPInst>(LHSI); 4597 4598 if (I.isEquality()) { 4599 FCmpInst::Predicate P = I.getPredicate(); 4600 bool IsExact = false; 4601 APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned); 4602 RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact); 4603 4604 // If the floating point constant isn't an integer value, we know if we will 4605 // ever compare equal / not equal to it. 4606 if (!IsExact) { 4607 // TODO: Can never be -0.0 and other non-representable values 4608 APFloat RHSRoundInt(RHS); 4609 RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven); 4610 if (RHS.compare(RHSRoundInt) != APFloat::cmpEqual) { 4611 if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ) 4612 return replaceInstUsesWith(I, Builder->getFalse()); 4613 4614 assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE); 4615 return replaceInstUsesWith(I, Builder->getTrue()); 4616 } 4617 } 4618 4619 // TODO: If the constant is exactly representable, is it always OK to do 4620 // equality compares as integer? 4621 } 4622 4623 // Check to see that the input is converted from an integer type that is small 4624 // enough that preserves all bits. TODO: check here for "known" sign bits. 4625 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e. 4626 unsigned InputSize = IntTy->getScalarSizeInBits(); 4627 4628 // Following test does NOT adjust InputSize downwards for signed inputs, 4629 // because the most negative value still requires all the mantissa bits 4630 // to distinguish it from one less than that value. 4631 if ((int)InputSize > MantissaWidth) { 4632 // Conversion would lose accuracy. Check if loss can impact comparison. 4633 int Exp = ilogb(RHS); 4634 if (Exp == APFloat::IEK_Inf) { 4635 int MaxExponent = ilogb(APFloat::getLargest(RHS.getSemantics())); 4636 if (MaxExponent < (int)InputSize - !LHSUnsigned) 4637 // Conversion could create infinity. 4638 return nullptr; 4639 } else { 4640 // Note that if RHS is zero or NaN, then Exp is negative 4641 // and first condition is trivially false. 4642 if (MantissaWidth <= Exp && Exp <= (int)InputSize - !LHSUnsigned) 4643 // Conversion could affect comparison. 4644 return nullptr; 4645 } 4646 } 4647 4648 // Otherwise, we can potentially simplify the comparison. We know that it 4649 // will always come through as an integer value and we know the constant is 4650 // not a NAN (it would have been previously simplified). 4651 assert(!RHS.isNaN() && "NaN comparison not already folded!"); 4652 4653 ICmpInst::Predicate Pred; 4654 switch (I.getPredicate()) { 4655 default: llvm_unreachable("Unexpected predicate!"); 4656 case FCmpInst::FCMP_UEQ: 4657 case FCmpInst::FCMP_OEQ: 4658 Pred = ICmpInst::ICMP_EQ; 4659 break; 4660 case FCmpInst::FCMP_UGT: 4661 case FCmpInst::FCMP_OGT: 4662 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT; 4663 break; 4664 case FCmpInst::FCMP_UGE: 4665 case FCmpInst::FCMP_OGE: 4666 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE; 4667 break; 4668 case FCmpInst::FCMP_ULT: 4669 case FCmpInst::FCMP_OLT: 4670 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT; 4671 break; 4672 case FCmpInst::FCMP_ULE: 4673 case FCmpInst::FCMP_OLE: 4674 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE; 4675 break; 4676 case FCmpInst::FCMP_UNE: 4677 case FCmpInst::FCMP_ONE: 4678 Pred = ICmpInst::ICMP_NE; 4679 break; 4680 case FCmpInst::FCMP_ORD: 4681 return replaceInstUsesWith(I, Builder->getTrue()); 4682 case FCmpInst::FCMP_UNO: 4683 return replaceInstUsesWith(I, Builder->getFalse()); 4684 } 4685 4686 // Now we know that the APFloat is a normal number, zero or inf. 4687 4688 // See if the FP constant is too large for the integer. For example, 4689 // comparing an i8 to 300.0. 4690 unsigned IntWidth = IntTy->getScalarSizeInBits(); 4691 4692 if (!LHSUnsigned) { 4693 // If the RHS value is > SignedMax, fold the comparison. This handles +INF 4694 // and large values. 4695 APFloat SMax(RHS.getSemantics()); 4696 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true, 4697 APFloat::rmNearestTiesToEven); 4698 if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0 4699 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT || 4700 Pred == ICmpInst::ICMP_SLE) 4701 return replaceInstUsesWith(I, Builder->getTrue()); 4702 return replaceInstUsesWith(I, Builder->getFalse()); 4703 } 4704 } else { 4705 // If the RHS value is > UnsignedMax, fold the comparison. This handles 4706 // +INF and large values. 4707 APFloat UMax(RHS.getSemantics()); 4708 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false, 4709 APFloat::rmNearestTiesToEven); 4710 if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0 4711 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT || 4712 Pred == ICmpInst::ICMP_ULE) 4713 return replaceInstUsesWith(I, Builder->getTrue()); 4714 return replaceInstUsesWith(I, Builder->getFalse()); 4715 } 4716 } 4717 4718 if (!LHSUnsigned) { 4719 // See if the RHS value is < SignedMin. 4720 APFloat SMin(RHS.getSemantics()); 4721 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true, 4722 APFloat::rmNearestTiesToEven); 4723 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0 4724 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT || 4725 Pred == ICmpInst::ICMP_SGE) 4726 return replaceInstUsesWith(I, Builder->getTrue()); 4727 return replaceInstUsesWith(I, Builder->getFalse()); 4728 } 4729 } else { 4730 // See if the RHS value is < UnsignedMin. 4731 APFloat SMin(RHS.getSemantics()); 4732 SMin.convertFromAPInt(APInt::getMinValue(IntWidth), true, 4733 APFloat::rmNearestTiesToEven); 4734 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0 4735 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT || 4736 Pred == ICmpInst::ICMP_UGE) 4737 return replaceInstUsesWith(I, Builder->getTrue()); 4738 return replaceInstUsesWith(I, Builder->getFalse()); 4739 } 4740 } 4741 4742 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or 4743 // [0, UMAX], but it may still be fractional. See if it is fractional by 4744 // casting the FP value to the integer value and back, checking for equality. 4745 // Don't do this for zero, because -0.0 is not fractional. 4746 Constant *RHSInt = LHSUnsigned 4747 ? ConstantExpr::getFPToUI(RHSC, IntTy) 4748 : ConstantExpr::getFPToSI(RHSC, IntTy); 4749 if (!RHS.isZero()) { 4750 bool Equal = LHSUnsigned 4751 ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC 4752 : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC; 4753 if (!Equal) { 4754 // If we had a comparison against a fractional value, we have to adjust 4755 // the compare predicate and sometimes the value. RHSC is rounded towards 4756 // zero at this point. 4757 switch (Pred) { 4758 default: llvm_unreachable("Unexpected integer comparison!"); 4759 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true 4760 return replaceInstUsesWith(I, Builder->getTrue()); 4761 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false 4762 return replaceInstUsesWith(I, Builder->getFalse()); 4763 case ICmpInst::ICMP_ULE: 4764 // (float)int <= 4.4 --> int <= 4 4765 // (float)int <= -4.4 --> false 4766 if (RHS.isNegative()) 4767 return replaceInstUsesWith(I, Builder->getFalse()); 4768 break; 4769 case ICmpInst::ICMP_SLE: 4770 // (float)int <= 4.4 --> int <= 4 4771 // (float)int <= -4.4 --> int < -4 4772 if (RHS.isNegative()) 4773 Pred = ICmpInst::ICMP_SLT; 4774 break; 4775 case ICmpInst::ICMP_ULT: 4776 // (float)int < -4.4 --> false 4777 // (float)int < 4.4 --> int <= 4 4778 if (RHS.isNegative()) 4779 return replaceInstUsesWith(I, Builder->getFalse()); 4780 Pred = ICmpInst::ICMP_ULE; 4781 break; 4782 case ICmpInst::ICMP_SLT: 4783 // (float)int < -4.4 --> int < -4 4784 // (float)int < 4.4 --> int <= 4 4785 if (!RHS.isNegative()) 4786 Pred = ICmpInst::ICMP_SLE; 4787 break; 4788 case ICmpInst::ICMP_UGT: 4789 // (float)int > 4.4 --> int > 4 4790 // (float)int > -4.4 --> true 4791 if (RHS.isNegative()) 4792 return replaceInstUsesWith(I, Builder->getTrue()); 4793 break; 4794 case ICmpInst::ICMP_SGT: 4795 // (float)int > 4.4 --> int > 4 4796 // (float)int > -4.4 --> int >= -4 4797 if (RHS.isNegative()) 4798 Pred = ICmpInst::ICMP_SGE; 4799 break; 4800 case ICmpInst::ICMP_UGE: 4801 // (float)int >= -4.4 --> true 4802 // (float)int >= 4.4 --> int > 4 4803 if (RHS.isNegative()) 4804 return replaceInstUsesWith(I, Builder->getTrue()); 4805 Pred = ICmpInst::ICMP_UGT; 4806 break; 4807 case ICmpInst::ICMP_SGE: 4808 // (float)int >= -4.4 --> int >= -4 4809 // (float)int >= 4.4 --> int > 4 4810 if (!RHS.isNegative()) 4811 Pred = ICmpInst::ICMP_SGT; 4812 break; 4813 } 4814 } 4815 } 4816 4817 // Lower this FP comparison into an appropriate integer version of the 4818 // comparison. 4819 return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt); 4820 } 4821 4822 Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { 4823 bool Changed = false; 4824 4825 /// Orders the operands of the compare so that they are listed from most 4826 /// complex to least complex. This puts constants before unary operators, 4827 /// before binary operators. 4828 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) { 4829 I.swapOperands(); 4830 Changed = true; 4831 } 4832 4833 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 4834 4835 if (Value *V = 4836 SimplifyFCmpInst(I.getPredicate(), Op0, Op1, I.getFastMathFlags(), 4837 SQ.getWithInstruction(&I))) 4838 return replaceInstUsesWith(I, V); 4839 4840 // Simplify 'fcmp pred X, X' 4841 if (Op0 == Op1) { 4842 switch (I.getPredicate()) { 4843 default: llvm_unreachable("Unknown predicate!"); 4844 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y) 4845 case FCmpInst::FCMP_ULT: // True if unordered or less than 4846 case FCmpInst::FCMP_UGT: // True if unordered or greater than 4847 case FCmpInst::FCMP_UNE: // True if unordered or not equal 4848 // Canonicalize these to be 'fcmp uno %X, 0.0'. 4849 I.setPredicate(FCmpInst::FCMP_UNO); 4850 I.setOperand(1, Constant::getNullValue(Op0->getType())); 4851 return &I; 4852 4853 case FCmpInst::FCMP_ORD: // True if ordered (no nans) 4854 case FCmpInst::FCMP_OEQ: // True if ordered and equal 4855 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal 4856 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal 4857 // Canonicalize these to be 'fcmp ord %X, 0.0'. 4858 I.setPredicate(FCmpInst::FCMP_ORD); 4859 I.setOperand(1, Constant::getNullValue(Op0->getType())); 4860 return &I; 4861 } 4862 } 4863 4864 // Test if the FCmpInst instruction is used exclusively by a select as 4865 // part of a minimum or maximum operation. If so, refrain from doing 4866 // any other folding. This helps out other analyses which understand 4867 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution 4868 // and CodeGen. And in this case, at least one of the comparison 4869 // operands has at least one user besides the compare (the select), 4870 // which would often largely negate the benefit of folding anyway. 4871 if (I.hasOneUse()) 4872 if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin())) 4873 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) || 4874 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1)) 4875 return nullptr; 4876 4877 // Handle fcmp with constant RHS 4878 if (Constant *RHSC = dyn_cast<Constant>(Op1)) { 4879 if (Instruction *LHSI = dyn_cast<Instruction>(Op0)) 4880 switch (LHSI->getOpcode()) { 4881 case Instruction::FPExt: { 4882 // fcmp (fpext x), C -> fcmp x, (fptrunc C) if fptrunc is lossless 4883 FPExtInst *LHSExt = cast<FPExtInst>(LHSI); 4884 ConstantFP *RHSF = dyn_cast<ConstantFP>(RHSC); 4885 if (!RHSF) 4886 break; 4887 4888 const fltSemantics *Sem; 4889 // FIXME: This shouldn't be here. 4890 if (LHSExt->getSrcTy()->isHalfTy()) 4891 Sem = &APFloat::IEEEhalf(); 4892 else if (LHSExt->getSrcTy()->isFloatTy()) 4893 Sem = &APFloat::IEEEsingle(); 4894 else if (LHSExt->getSrcTy()->isDoubleTy()) 4895 Sem = &APFloat::IEEEdouble(); 4896 else if (LHSExt->getSrcTy()->isFP128Ty()) 4897 Sem = &APFloat::IEEEquad(); 4898 else if (LHSExt->getSrcTy()->isX86_FP80Ty()) 4899 Sem = &APFloat::x87DoubleExtended(); 4900 else if (LHSExt->getSrcTy()->isPPC_FP128Ty()) 4901 Sem = &APFloat::PPCDoubleDouble(); 4902 else 4903 break; 4904 4905 bool Lossy; 4906 APFloat F = RHSF->getValueAPF(); 4907 F.convert(*Sem, APFloat::rmNearestTiesToEven, &Lossy); 4908 4909 // Avoid lossy conversions and denormals. Zero is a special case 4910 // that's OK to convert. 4911 APFloat Fabs = F; 4912 Fabs.clearSign(); 4913 if (!Lossy && 4914 ((Fabs.compare(APFloat::getSmallestNormalized(*Sem)) != 4915 APFloat::cmpLessThan) || Fabs.isZero())) 4916 4917 return new FCmpInst(I.getPredicate(), LHSExt->getOperand(0), 4918 ConstantFP::get(RHSC->getContext(), F)); 4919 break; 4920 } 4921 case Instruction::PHI: 4922 // Only fold fcmp into the PHI if the phi and fcmp are in the same 4923 // block. If in the same block, we're encouraging jump threading. If 4924 // not, we are just pessimizing the code by making an i1 phi. 4925 if (LHSI->getParent() == I.getParent()) 4926 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI))) 4927 return NV; 4928 break; 4929 case Instruction::SIToFP: 4930 case Instruction::UIToFP: 4931 if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC)) 4932 return NV; 4933 break; 4934 case Instruction::FSub: { 4935 // fcmp pred (fneg x), C -> fcmp swap(pred) x, -C 4936 Value *Op; 4937 if (match(LHSI, m_FNeg(m_Value(Op)))) 4938 return new FCmpInst(I.getSwappedPredicate(), Op, 4939 ConstantExpr::getFNeg(RHSC)); 4940 break; 4941 } 4942 case Instruction::Load: 4943 if (GetElementPtrInst *GEP = 4944 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) { 4945 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 4946 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 4947 !cast<LoadInst>(LHSI)->isVolatile()) 4948 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I)) 4949 return Res; 4950 } 4951 break; 4952 case Instruction::Call: { 4953 if (!RHSC->isNullValue()) 4954 break; 4955 4956 CallInst *CI = cast<CallInst>(LHSI); 4957 Intrinsic::ID IID = getIntrinsicForCallSite(CI, &TLI); 4958 if (IID != Intrinsic::fabs) 4959 break; 4960 4961 // Various optimization for fabs compared with zero. 4962 switch (I.getPredicate()) { 4963 default: 4964 break; 4965 // fabs(x) < 0 --> false 4966 case FCmpInst::FCMP_OLT: 4967 llvm_unreachable("handled by SimplifyFCmpInst"); 4968 // fabs(x) > 0 --> x != 0 4969 case FCmpInst::FCMP_OGT: 4970 return new FCmpInst(FCmpInst::FCMP_ONE, CI->getArgOperand(0), RHSC); 4971 // fabs(x) <= 0 --> x == 0 4972 case FCmpInst::FCMP_OLE: 4973 return new FCmpInst(FCmpInst::FCMP_OEQ, CI->getArgOperand(0), RHSC); 4974 // fabs(x) >= 0 --> !isnan(x) 4975 case FCmpInst::FCMP_OGE: 4976 return new FCmpInst(FCmpInst::FCMP_ORD, CI->getArgOperand(0), RHSC); 4977 // fabs(x) == 0 --> x == 0 4978 // fabs(x) != 0 --> x != 0 4979 case FCmpInst::FCMP_OEQ: 4980 case FCmpInst::FCMP_UEQ: 4981 case FCmpInst::FCMP_ONE: 4982 case FCmpInst::FCMP_UNE: 4983 return new FCmpInst(I.getPredicate(), CI->getArgOperand(0), RHSC); 4984 } 4985 } 4986 } 4987 } 4988 4989 // fcmp pred (fneg x), (fneg y) -> fcmp swap(pred) x, y 4990 Value *X, *Y; 4991 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) 4992 return new FCmpInst(I.getSwappedPredicate(), X, Y); 4993 4994 // fcmp (fpext x), (fpext y) -> fcmp x, y 4995 if (FPExtInst *LHSExt = dyn_cast<FPExtInst>(Op0)) 4996 if (FPExtInst *RHSExt = dyn_cast<FPExtInst>(Op1)) 4997 if (LHSExt->getSrcTy() == RHSExt->getSrcTy()) 4998 return new FCmpInst(I.getPredicate(), LHSExt->getOperand(0), 4999 RHSExt->getOperand(0)); 5000 5001 return Changed ? &I : nullptr; 5002 } 5003