1 //===- InstCombineCompares.cpp --------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the visitICmp and visitFCmp functions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "InstCombineInternal.h" 15 #include "llvm/ADT/APSInt.h" 16 #include "llvm/ADT/SetVector.h" 17 #include "llvm/ADT/Statistic.h" 18 #include "llvm/Analysis/ConstantFolding.h" 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/Analysis/MemoryBuiltins.h" 21 #include "llvm/Analysis/TargetLibraryInfo.h" 22 #include "llvm/Analysis/VectorUtils.h" 23 #include "llvm/IR/ConstantRange.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/GetElementPtrTypeIterator.h" 26 #include "llvm/IR/IntrinsicInst.h" 27 #include "llvm/IR/PatternMatch.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/KnownBits.h" 30 31 using namespace llvm; 32 using namespace PatternMatch; 33 34 #define DEBUG_TYPE "instcombine" 35 36 // How many times is a select replaced by one of its operands? 37 STATISTIC(NumSel, "Number of select opts"); 38 39 40 static ConstantInt *extractElement(Constant *V, Constant *Idx) { 41 return cast<ConstantInt>(ConstantExpr::getExtractElement(V, Idx)); 42 } 43 44 static bool hasAddOverflow(ConstantInt *Result, 45 ConstantInt *In1, ConstantInt *In2, 46 bool IsSigned) { 47 if (!IsSigned) 48 return Result->getValue().ult(In1->getValue()); 49 50 if (In2->isNegative()) 51 return Result->getValue().sgt(In1->getValue()); 52 return Result->getValue().slt(In1->getValue()); 53 } 54 55 /// Compute Result = In1+In2, returning true if the result overflowed for this 56 /// type. 57 static bool addWithOverflow(Constant *&Result, Constant *In1, 58 Constant *In2, bool IsSigned = false) { 59 Result = ConstantExpr::getAdd(In1, In2); 60 61 if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) { 62 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { 63 Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i); 64 if (hasAddOverflow(extractElement(Result, Idx), 65 extractElement(In1, Idx), 66 extractElement(In2, Idx), 67 IsSigned)) 68 return true; 69 } 70 return false; 71 } 72 73 return hasAddOverflow(cast<ConstantInt>(Result), 74 cast<ConstantInt>(In1), cast<ConstantInt>(In2), 75 IsSigned); 76 } 77 78 static bool hasSubOverflow(ConstantInt *Result, 79 ConstantInt *In1, ConstantInt *In2, 80 bool IsSigned) { 81 if (!IsSigned) 82 return Result->getValue().ugt(In1->getValue()); 83 84 if (In2->isNegative()) 85 return Result->getValue().slt(In1->getValue()); 86 87 return Result->getValue().sgt(In1->getValue()); 88 } 89 90 /// Compute Result = In1-In2, returning true if the result overflowed for this 91 /// type. 92 static bool subWithOverflow(Constant *&Result, Constant *In1, 93 Constant *In2, bool IsSigned = false) { 94 Result = ConstantExpr::getSub(In1, In2); 95 96 if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) { 97 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) { 98 Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i); 99 if (hasSubOverflow(extractElement(Result, Idx), 100 extractElement(In1, Idx), 101 extractElement(In2, Idx), 102 IsSigned)) 103 return true; 104 } 105 return false; 106 } 107 108 return hasSubOverflow(cast<ConstantInt>(Result), 109 cast<ConstantInt>(In1), cast<ConstantInt>(In2), 110 IsSigned); 111 } 112 113 /// Given an icmp instruction, return true if any use of this comparison is a 114 /// branch on sign bit comparison. 115 static bool hasBranchUse(ICmpInst &I) { 116 for (auto *U : I.users()) 117 if (isa<BranchInst>(U)) 118 return true; 119 return false; 120 } 121 122 /// Given an exploded icmp instruction, return true if the comparison only 123 /// checks the sign bit. If it only checks the sign bit, set TrueIfSigned if the 124 /// result of the comparison is true when the input value is signed. 125 static bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, 126 bool &TrueIfSigned) { 127 switch (Pred) { 128 case ICmpInst::ICMP_SLT: // True if LHS s< 0 129 TrueIfSigned = true; 130 return RHS.isNullValue(); 131 case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1 132 TrueIfSigned = true; 133 return RHS.isAllOnesValue(); 134 case ICmpInst::ICMP_SGT: // True if LHS s> -1 135 TrueIfSigned = false; 136 return RHS.isAllOnesValue(); 137 case ICmpInst::ICMP_UGT: 138 // True if LHS u> RHS and RHS == high-bit-mask - 1 139 TrueIfSigned = true; 140 return RHS.isMaxSignedValue(); 141 case ICmpInst::ICMP_UGE: 142 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc) 143 TrueIfSigned = true; 144 return RHS.isSignMask(); 145 default: 146 return false; 147 } 148 } 149 150 /// Returns true if the exploded icmp can be expressed as a signed comparison 151 /// to zero and updates the predicate accordingly. 152 /// The signedness of the comparison is preserved. 153 /// TODO: Refactor with decomposeBitTestICmp()? 154 static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) { 155 if (!ICmpInst::isSigned(Pred)) 156 return false; 157 158 if (C.isNullValue()) 159 return ICmpInst::isRelational(Pred); 160 161 if (C.isOneValue()) { 162 if (Pred == ICmpInst::ICMP_SLT) { 163 Pred = ICmpInst::ICMP_SLE; 164 return true; 165 } 166 } else if (C.isAllOnesValue()) { 167 if (Pred == ICmpInst::ICMP_SGT) { 168 Pred = ICmpInst::ICMP_SGE; 169 return true; 170 } 171 } 172 173 return false; 174 } 175 176 /// Given a signed integer type and a set of known zero and one bits, compute 177 /// the maximum and minimum values that could have the specified known zero and 178 /// known one bits, returning them in Min/Max. 179 /// TODO: Move to method on KnownBits struct? 180 static void computeSignedMinMaxValuesFromKnownBits(const KnownBits &Known, 181 APInt &Min, APInt &Max) { 182 assert(Known.getBitWidth() == Min.getBitWidth() && 183 Known.getBitWidth() == Max.getBitWidth() && 184 "KnownZero, KnownOne and Min, Max must have equal bitwidth."); 185 APInt UnknownBits = ~(Known.Zero|Known.One); 186 187 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign 188 // bit if it is unknown. 189 Min = Known.One; 190 Max = Known.One|UnknownBits; 191 192 if (UnknownBits.isNegative()) { // Sign bit is unknown 193 Min.setSignBit(); 194 Max.clearSignBit(); 195 } 196 } 197 198 /// Given an unsigned integer type and a set of known zero and one bits, compute 199 /// the maximum and minimum values that could have the specified known zero and 200 /// known one bits, returning them in Min/Max. 201 /// TODO: Move to method on KnownBits struct? 202 static void computeUnsignedMinMaxValuesFromKnownBits(const KnownBits &Known, 203 APInt &Min, APInt &Max) { 204 assert(Known.getBitWidth() == Min.getBitWidth() && 205 Known.getBitWidth() == Max.getBitWidth() && 206 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth."); 207 APInt UnknownBits = ~(Known.Zero|Known.One); 208 209 // The minimum value is when the unknown bits are all zeros. 210 Min = Known.One; 211 // The maximum value is when the unknown bits are all ones. 212 Max = Known.One|UnknownBits; 213 } 214 215 /// This is called when we see this pattern: 216 /// cmp pred (load (gep GV, ...)), cmpcst 217 /// where GV is a global variable with a constant initializer. Try to simplify 218 /// this into some simple computation that does not need the load. For example 219 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3". 220 /// 221 /// If AndCst is non-null, then the loaded value is masked with that constant 222 /// before doing the comparison. This handles cases like "A[i]&4 == 0". 223 Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, 224 GlobalVariable *GV, 225 CmpInst &ICI, 226 ConstantInt *AndCst) { 227 Constant *Init = GV->getInitializer(); 228 if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init)) 229 return nullptr; 230 231 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements(); 232 // Don't blow up on huge arrays. 233 if (ArrayElementCount > MaxArraySizeForCombine) 234 return nullptr; 235 236 // There are many forms of this optimization we can handle, for now, just do 237 // the simple index into a single-dimensional array. 238 // 239 // Require: GEP GV, 0, i {{, constant indices}} 240 if (GEP->getNumOperands() < 3 || 241 !isa<ConstantInt>(GEP->getOperand(1)) || 242 !cast<ConstantInt>(GEP->getOperand(1))->isZero() || 243 isa<Constant>(GEP->getOperand(2))) 244 return nullptr; 245 246 // Check that indices after the variable are constants and in-range for the 247 // type they index. Collect the indices. This is typically for arrays of 248 // structs. 249 SmallVector<unsigned, 4> LaterIndices; 250 251 Type *EltTy = Init->getType()->getArrayElementType(); 252 for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) { 253 ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i)); 254 if (!Idx) return nullptr; // Variable index. 255 256 uint64_t IdxVal = Idx->getZExtValue(); 257 if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index. 258 259 if (StructType *STy = dyn_cast<StructType>(EltTy)) 260 EltTy = STy->getElementType(IdxVal); 261 else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) { 262 if (IdxVal >= ATy->getNumElements()) return nullptr; 263 EltTy = ATy->getElementType(); 264 } else { 265 return nullptr; // Unknown type. 266 } 267 268 LaterIndices.push_back(IdxVal); 269 } 270 271 enum { Overdefined = -3, Undefined = -2 }; 272 273 // Variables for our state machines. 274 275 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form 276 // "i == 47 | i == 87", where 47 is the first index the condition is true for, 277 // and 87 is the second (and last) index. FirstTrueElement is -2 when 278 // undefined, otherwise set to the first true element. SecondTrueElement is 279 // -2 when undefined, -3 when overdefined and >= 0 when that index is true. 280 int FirstTrueElement = Undefined, SecondTrueElement = Undefined; 281 282 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the 283 // form "i != 47 & i != 87". Same state transitions as for true elements. 284 int FirstFalseElement = Undefined, SecondFalseElement = Undefined; 285 286 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these 287 /// define a state machine that triggers for ranges of values that the index 288 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'. 289 /// This is -2 when undefined, -3 when overdefined, and otherwise the last 290 /// index in the range (inclusive). We use -2 for undefined here because we 291 /// use relative comparisons and don't want 0-1 to match -1. 292 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined; 293 294 // MagicBitvector - This is a magic bitvector where we set a bit if the 295 // comparison is true for element 'i'. If there are 64 elements or less in 296 // the array, this will fully represent all the comparison results. 297 uint64_t MagicBitvector = 0; 298 299 // Scan the array and see if one of our patterns matches. 300 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1)); 301 for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) { 302 Constant *Elt = Init->getAggregateElement(i); 303 if (!Elt) return nullptr; 304 305 // If this is indexing an array of structures, get the structure element. 306 if (!LaterIndices.empty()) 307 Elt = ConstantExpr::getExtractValue(Elt, LaterIndices); 308 309 // If the element is masked, handle it. 310 if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst); 311 312 // Find out if the comparison would be true or false for the i'th element. 313 Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt, 314 CompareRHS, DL, &TLI); 315 // If the result is undef for this element, ignore it. 316 if (isa<UndefValue>(C)) { 317 // Extend range state machines to cover this element in case there is an 318 // undef in the middle of the range. 319 if (TrueRangeEnd == (int)i-1) 320 TrueRangeEnd = i; 321 if (FalseRangeEnd == (int)i-1) 322 FalseRangeEnd = i; 323 continue; 324 } 325 326 // If we can't compute the result for any of the elements, we have to give 327 // up evaluating the entire conditional. 328 if (!isa<ConstantInt>(C)) return nullptr; 329 330 // Otherwise, we know if the comparison is true or false for this element, 331 // update our state machines. 332 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero(); 333 334 // State machine for single/double/range index comparison. 335 if (IsTrueForElt) { 336 // Update the TrueElement state machine. 337 if (FirstTrueElement == Undefined) 338 FirstTrueElement = TrueRangeEnd = i; // First true element. 339 else { 340 // Update double-compare state machine. 341 if (SecondTrueElement == Undefined) 342 SecondTrueElement = i; 343 else 344 SecondTrueElement = Overdefined; 345 346 // Update range state machine. 347 if (TrueRangeEnd == (int)i-1) 348 TrueRangeEnd = i; 349 else 350 TrueRangeEnd = Overdefined; 351 } 352 } else { 353 // Update the FalseElement state machine. 354 if (FirstFalseElement == Undefined) 355 FirstFalseElement = FalseRangeEnd = i; // First false element. 356 else { 357 // Update double-compare state machine. 358 if (SecondFalseElement == Undefined) 359 SecondFalseElement = i; 360 else 361 SecondFalseElement = Overdefined; 362 363 // Update range state machine. 364 if (FalseRangeEnd == (int)i-1) 365 FalseRangeEnd = i; 366 else 367 FalseRangeEnd = Overdefined; 368 } 369 } 370 371 // If this element is in range, update our magic bitvector. 372 if (i < 64 && IsTrueForElt) 373 MagicBitvector |= 1ULL << i; 374 375 // If all of our states become overdefined, bail out early. Since the 376 // predicate is expensive, only check it every 8 elements. This is only 377 // really useful for really huge arrays. 378 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined && 379 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined && 380 FalseRangeEnd == Overdefined) 381 return nullptr; 382 } 383 384 // Now that we've scanned the entire array, emit our new comparison(s). We 385 // order the state machines in complexity of the generated code. 386 Value *Idx = GEP->getOperand(2); 387 388 // If the index is larger than the pointer size of the target, truncate the 389 // index down like the GEP would do implicitly. We don't have to do this for 390 // an inbounds GEP because the index can't be out of range. 391 if (!GEP->isInBounds()) { 392 Type *IntPtrTy = DL.getIntPtrType(GEP->getType()); 393 unsigned PtrSize = IntPtrTy->getIntegerBitWidth(); 394 if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize) 395 Idx = Builder.CreateTrunc(Idx, IntPtrTy); 396 } 397 398 // If the comparison is only true for one or two elements, emit direct 399 // comparisons. 400 if (SecondTrueElement != Overdefined) { 401 // None true -> false. 402 if (FirstTrueElement == Undefined) 403 return replaceInstUsesWith(ICI, Builder.getFalse()); 404 405 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement); 406 407 // True for one element -> 'i == 47'. 408 if (SecondTrueElement == Undefined) 409 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx); 410 411 // True for two elements -> 'i == 47 | i == 72'. 412 Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx); 413 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement); 414 Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx); 415 return BinaryOperator::CreateOr(C1, C2); 416 } 417 418 // If the comparison is only false for one or two elements, emit direct 419 // comparisons. 420 if (SecondFalseElement != Overdefined) { 421 // None false -> true. 422 if (FirstFalseElement == Undefined) 423 return replaceInstUsesWith(ICI, Builder.getTrue()); 424 425 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement); 426 427 // False for one element -> 'i != 47'. 428 if (SecondFalseElement == Undefined) 429 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx); 430 431 // False for two elements -> 'i != 47 & i != 72'. 432 Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx); 433 Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement); 434 Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx); 435 return BinaryOperator::CreateAnd(C1, C2); 436 } 437 438 // If the comparison can be replaced with a range comparison for the elements 439 // where it is true, emit the range check. 440 if (TrueRangeEnd != Overdefined) { 441 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare"); 442 443 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1). 444 if (FirstTrueElement) { 445 Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement); 446 Idx = Builder.CreateAdd(Idx, Offs); 447 } 448 449 Value *End = ConstantInt::get(Idx->getType(), 450 TrueRangeEnd-FirstTrueElement+1); 451 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End); 452 } 453 454 // False range check. 455 if (FalseRangeEnd != Overdefined) { 456 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare"); 457 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse). 458 if (FirstFalseElement) { 459 Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement); 460 Idx = Builder.CreateAdd(Idx, Offs); 461 } 462 463 Value *End = ConstantInt::get(Idx->getType(), 464 FalseRangeEnd-FirstFalseElement); 465 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End); 466 } 467 468 // If a magic bitvector captures the entire comparison state 469 // of this load, replace it with computation that does: 470 // ((magic_cst >> i) & 1) != 0 471 { 472 Type *Ty = nullptr; 473 474 // Look for an appropriate type: 475 // - The type of Idx if the magic fits 476 // - The smallest fitting legal type if we have a DataLayout 477 // - Default to i32 478 if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth()) 479 Ty = Idx->getType(); 480 else 481 Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount); 482 483 if (Ty) { 484 Value *V = Builder.CreateIntCast(Idx, Ty, false); 485 V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V); 486 V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V); 487 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0)); 488 } 489 } 490 491 return nullptr; 492 } 493 494 /// Return a value that can be used to compare the *offset* implied by a GEP to 495 /// zero. For example, if we have &A[i], we want to return 'i' for 496 /// "icmp ne i, 0". Note that, in general, indices can be complex, and scales 497 /// are involved. The above expression would also be legal to codegen as 498 /// "icmp ne (i*4), 0" (assuming A is a pointer to i32). 499 /// This latter form is less amenable to optimization though, and we are allowed 500 /// to generate the first by knowing that pointer arithmetic doesn't overflow. 501 /// 502 /// If we can't emit an optimized form for this expression, this returns null. 503 /// 504 static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC, 505 const DataLayout &DL) { 506 gep_type_iterator GTI = gep_type_begin(GEP); 507 508 // Check to see if this gep only has a single variable index. If so, and if 509 // any constant indices are a multiple of its scale, then we can compute this 510 // in terms of the scale of the variable index. For example, if the GEP 511 // implies an offset of "12 + i*4", then we can codegen this as "3 + i", 512 // because the expression will cross zero at the same point. 513 unsigned i, e = GEP->getNumOperands(); 514 int64_t Offset = 0; 515 for (i = 1; i != e; ++i, ++GTI) { 516 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 517 // Compute the aggregate offset of constant indices. 518 if (CI->isZero()) continue; 519 520 // Handle a struct index, which adds its field offset to the pointer. 521 if (StructType *STy = GTI.getStructTypeOrNull()) { 522 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); 523 } else { 524 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); 525 Offset += Size*CI->getSExtValue(); 526 } 527 } else { 528 // Found our variable index. 529 break; 530 } 531 } 532 533 // If there are no variable indices, we must have a constant offset, just 534 // evaluate it the general way. 535 if (i == e) return nullptr; 536 537 Value *VariableIdx = GEP->getOperand(i); 538 // Determine the scale factor of the variable element. For example, this is 539 // 4 if the variable index is into an array of i32. 540 uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType()); 541 542 // Verify that there are no other variable indices. If so, emit the hard way. 543 for (++i, ++GTI; i != e; ++i, ++GTI) { 544 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i)); 545 if (!CI) return nullptr; 546 547 // Compute the aggregate offset of constant indices. 548 if (CI->isZero()) continue; 549 550 // Handle a struct index, which adds its field offset to the pointer. 551 if (StructType *STy = GTI.getStructTypeOrNull()) { 552 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); 553 } else { 554 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); 555 Offset += Size*CI->getSExtValue(); 556 } 557 } 558 559 // Okay, we know we have a single variable index, which must be a 560 // pointer/array/vector index. If there is no offset, life is simple, return 561 // the index. 562 Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType()); 563 unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth(); 564 if (Offset == 0) { 565 // Cast to intptrty in case a truncation occurs. If an extension is needed, 566 // we don't need to bother extending: the extension won't affect where the 567 // computation crosses zero. 568 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) { 569 VariableIdx = IC.Builder.CreateTrunc(VariableIdx, IntPtrTy); 570 } 571 return VariableIdx; 572 } 573 574 // Otherwise, there is an index. The computation we will do will be modulo 575 // the pointer size, so get it. 576 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth); 577 578 Offset &= PtrSizeMask; 579 VariableScale &= PtrSizeMask; 580 581 // To do this transformation, any constant index must be a multiple of the 582 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i", 583 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a 584 // multiple of the variable scale. 585 int64_t NewOffs = Offset / (int64_t)VariableScale; 586 if (Offset != NewOffs*(int64_t)VariableScale) 587 return nullptr; 588 589 // Okay, we can do this evaluation. Start by converting the index to intptr. 590 if (VariableIdx->getType() != IntPtrTy) 591 VariableIdx = IC.Builder.CreateIntCast(VariableIdx, IntPtrTy, 592 true /*Signed*/); 593 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs); 594 return IC.Builder.CreateAdd(VariableIdx, OffsetVal, "offset"); 595 } 596 597 /// Returns true if we can rewrite Start as a GEP with pointer Base 598 /// and some integer offset. The nodes that need to be re-written 599 /// for this transformation will be added to Explored. 600 static bool canRewriteGEPAsOffset(Value *Start, Value *Base, 601 const DataLayout &DL, 602 SetVector<Value *> &Explored) { 603 SmallVector<Value *, 16> WorkList(1, Start); 604 Explored.insert(Base); 605 606 // The following traversal gives us an order which can be used 607 // when doing the final transformation. Since in the final 608 // transformation we create the PHI replacement instructions first, 609 // we don't have to get them in any particular order. 610 // 611 // However, for other instructions we will have to traverse the 612 // operands of an instruction first, which means that we have to 613 // do a post-order traversal. 614 while (!WorkList.empty()) { 615 SetVector<PHINode *> PHIs; 616 617 while (!WorkList.empty()) { 618 if (Explored.size() >= 100) 619 return false; 620 621 Value *V = WorkList.back(); 622 623 if (Explored.count(V) != 0) { 624 WorkList.pop_back(); 625 continue; 626 } 627 628 if (!isa<IntToPtrInst>(V) && !isa<PtrToIntInst>(V) && 629 !isa<GetElementPtrInst>(V) && !isa<PHINode>(V)) 630 // We've found some value that we can't explore which is different from 631 // the base. Therefore we can't do this transformation. 632 return false; 633 634 if (isa<IntToPtrInst>(V) || isa<PtrToIntInst>(V)) { 635 auto *CI = dyn_cast<CastInst>(V); 636 if (!CI->isNoopCast(DL)) 637 return false; 638 639 if (Explored.count(CI->getOperand(0)) == 0) 640 WorkList.push_back(CI->getOperand(0)); 641 } 642 643 if (auto *GEP = dyn_cast<GEPOperator>(V)) { 644 // We're limiting the GEP to having one index. This will preserve 645 // the original pointer type. We could handle more cases in the 646 // future. 647 if (GEP->getNumIndices() != 1 || !GEP->isInBounds() || 648 GEP->getType() != Start->getType()) 649 return false; 650 651 if (Explored.count(GEP->getOperand(0)) == 0) 652 WorkList.push_back(GEP->getOperand(0)); 653 } 654 655 if (WorkList.back() == V) { 656 WorkList.pop_back(); 657 // We've finished visiting this node, mark it as such. 658 Explored.insert(V); 659 } 660 661 if (auto *PN = dyn_cast<PHINode>(V)) { 662 // We cannot transform PHIs on unsplittable basic blocks. 663 if (isa<CatchSwitchInst>(PN->getParent()->getTerminator())) 664 return false; 665 Explored.insert(PN); 666 PHIs.insert(PN); 667 } 668 } 669 670 // Explore the PHI nodes further. 671 for (auto *PN : PHIs) 672 for (Value *Op : PN->incoming_values()) 673 if (Explored.count(Op) == 0) 674 WorkList.push_back(Op); 675 } 676 677 // Make sure that we can do this. Since we can't insert GEPs in a basic 678 // block before a PHI node, we can't easily do this transformation if 679 // we have PHI node users of transformed instructions. 680 for (Value *Val : Explored) { 681 for (Value *Use : Val->uses()) { 682 683 auto *PHI = dyn_cast<PHINode>(Use); 684 auto *Inst = dyn_cast<Instruction>(Val); 685 686 if (Inst == Base || Inst == PHI || !Inst || !PHI || 687 Explored.count(PHI) == 0) 688 continue; 689 690 if (PHI->getParent() == Inst->getParent()) 691 return false; 692 } 693 } 694 return true; 695 } 696 697 // Sets the appropriate insert point on Builder where we can add 698 // a replacement Instruction for V (if that is possible). 699 static void setInsertionPoint(IRBuilder<> &Builder, Value *V, 700 bool Before = true) { 701 if (auto *PHI = dyn_cast<PHINode>(V)) { 702 Builder.SetInsertPoint(&*PHI->getParent()->getFirstInsertionPt()); 703 return; 704 } 705 if (auto *I = dyn_cast<Instruction>(V)) { 706 if (!Before) 707 I = &*std::next(I->getIterator()); 708 Builder.SetInsertPoint(I); 709 return; 710 } 711 if (auto *A = dyn_cast<Argument>(V)) { 712 // Set the insertion point in the entry block. 713 BasicBlock &Entry = A->getParent()->getEntryBlock(); 714 Builder.SetInsertPoint(&*Entry.getFirstInsertionPt()); 715 return; 716 } 717 // Otherwise, this is a constant and we don't need to set a new 718 // insertion point. 719 assert(isa<Constant>(V) && "Setting insertion point for unknown value!"); 720 } 721 722 /// Returns a re-written value of Start as an indexed GEP using Base as a 723 /// pointer. 724 static Value *rewriteGEPAsOffset(Value *Start, Value *Base, 725 const DataLayout &DL, 726 SetVector<Value *> &Explored) { 727 // Perform all the substitutions. This is a bit tricky because we can 728 // have cycles in our use-def chains. 729 // 1. Create the PHI nodes without any incoming values. 730 // 2. Create all the other values. 731 // 3. Add the edges for the PHI nodes. 732 // 4. Emit GEPs to get the original pointers. 733 // 5. Remove the original instructions. 734 Type *IndexType = IntegerType::get( 735 Base->getContext(), DL.getPointerTypeSizeInBits(Start->getType())); 736 737 DenseMap<Value *, Value *> NewInsts; 738 NewInsts[Base] = ConstantInt::getNullValue(IndexType); 739 740 // Create the new PHI nodes, without adding any incoming values. 741 for (Value *Val : Explored) { 742 if (Val == Base) 743 continue; 744 // Create empty phi nodes. This avoids cyclic dependencies when creating 745 // the remaining instructions. 746 if (auto *PHI = dyn_cast<PHINode>(Val)) 747 NewInsts[PHI] = PHINode::Create(IndexType, PHI->getNumIncomingValues(), 748 PHI->getName() + ".idx", PHI); 749 } 750 IRBuilder<> Builder(Base->getContext()); 751 752 // Create all the other instructions. 753 for (Value *Val : Explored) { 754 755 if (NewInsts.find(Val) != NewInsts.end()) 756 continue; 757 758 if (auto *CI = dyn_cast<CastInst>(Val)) { 759 NewInsts[CI] = NewInsts[CI->getOperand(0)]; 760 continue; 761 } 762 if (auto *GEP = dyn_cast<GEPOperator>(Val)) { 763 Value *Index = NewInsts[GEP->getOperand(1)] ? NewInsts[GEP->getOperand(1)] 764 : GEP->getOperand(1); 765 setInsertionPoint(Builder, GEP); 766 // Indices might need to be sign extended. GEPs will magically do 767 // this, but we need to do it ourselves here. 768 if (Index->getType()->getScalarSizeInBits() != 769 NewInsts[GEP->getOperand(0)]->getType()->getScalarSizeInBits()) { 770 Index = Builder.CreateSExtOrTrunc( 771 Index, NewInsts[GEP->getOperand(0)]->getType(), 772 GEP->getOperand(0)->getName() + ".sext"); 773 } 774 775 auto *Op = NewInsts[GEP->getOperand(0)]; 776 if (isa<ConstantInt>(Op) && dyn_cast<ConstantInt>(Op)->isZero()) 777 NewInsts[GEP] = Index; 778 else 779 NewInsts[GEP] = Builder.CreateNSWAdd( 780 Op, Index, GEP->getOperand(0)->getName() + ".add"); 781 continue; 782 } 783 if (isa<PHINode>(Val)) 784 continue; 785 786 llvm_unreachable("Unexpected instruction type"); 787 } 788 789 // Add the incoming values to the PHI nodes. 790 for (Value *Val : Explored) { 791 if (Val == Base) 792 continue; 793 // All the instructions have been created, we can now add edges to the 794 // phi nodes. 795 if (auto *PHI = dyn_cast<PHINode>(Val)) { 796 PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]); 797 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) { 798 Value *NewIncoming = PHI->getIncomingValue(I); 799 800 if (NewInsts.find(NewIncoming) != NewInsts.end()) 801 NewIncoming = NewInsts[NewIncoming]; 802 803 NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I)); 804 } 805 } 806 } 807 808 for (Value *Val : Explored) { 809 if (Val == Base) 810 continue; 811 812 // Depending on the type, for external users we have to emit 813 // a GEP or a GEP + ptrtoint. 814 setInsertionPoint(Builder, Val, false); 815 816 // If required, create an inttoptr instruction for Base. 817 Value *NewBase = Base; 818 if (!Base->getType()->isPointerTy()) 819 NewBase = Builder.CreateBitOrPointerCast(Base, Start->getType(), 820 Start->getName() + "to.ptr"); 821 822 Value *GEP = Builder.CreateInBoundsGEP( 823 Start->getType()->getPointerElementType(), NewBase, 824 makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr"); 825 826 if (!Val->getType()->isPointerTy()) { 827 Value *Cast = Builder.CreatePointerCast(GEP, Val->getType(), 828 Val->getName() + ".conv"); 829 GEP = Cast; 830 } 831 Val->replaceAllUsesWith(GEP); 832 } 833 834 return NewInsts[Start]; 835 } 836 837 /// Looks through GEPs, IntToPtrInsts and PtrToIntInsts in order to express 838 /// the input Value as a constant indexed GEP. Returns a pair containing 839 /// the GEPs Pointer and Index. 840 static std::pair<Value *, Value *> 841 getAsConstantIndexedAddress(Value *V, const DataLayout &DL) { 842 Type *IndexType = IntegerType::get(V->getContext(), 843 DL.getPointerTypeSizeInBits(V->getType())); 844 845 Constant *Index = ConstantInt::getNullValue(IndexType); 846 while (true) { 847 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 848 // We accept only inbouds GEPs here to exclude the possibility of 849 // overflow. 850 if (!GEP->isInBounds()) 851 break; 852 if (GEP->hasAllConstantIndices() && GEP->getNumIndices() == 1 && 853 GEP->getType() == V->getType()) { 854 V = GEP->getOperand(0); 855 Constant *GEPIndex = static_cast<Constant *>(GEP->getOperand(1)); 856 Index = ConstantExpr::getAdd( 857 Index, ConstantExpr::getSExtOrBitCast(GEPIndex, IndexType)); 858 continue; 859 } 860 break; 861 } 862 if (auto *CI = dyn_cast<IntToPtrInst>(V)) { 863 if (!CI->isNoopCast(DL)) 864 break; 865 V = CI->getOperand(0); 866 continue; 867 } 868 if (auto *CI = dyn_cast<PtrToIntInst>(V)) { 869 if (!CI->isNoopCast(DL)) 870 break; 871 V = CI->getOperand(0); 872 continue; 873 } 874 break; 875 } 876 return {V, Index}; 877 } 878 879 /// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant. 880 /// We can look through PHIs, GEPs and casts in order to determine a common base 881 /// between GEPLHS and RHS. 882 static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS, 883 ICmpInst::Predicate Cond, 884 const DataLayout &DL) { 885 if (!GEPLHS->hasAllConstantIndices()) 886 return nullptr; 887 888 // Make sure the pointers have the same type. 889 if (GEPLHS->getType() != RHS->getType()) 890 return nullptr; 891 892 Value *PtrBase, *Index; 893 std::tie(PtrBase, Index) = getAsConstantIndexedAddress(GEPLHS, DL); 894 895 // The set of nodes that will take part in this transformation. 896 SetVector<Value *> Nodes; 897 898 if (!canRewriteGEPAsOffset(RHS, PtrBase, DL, Nodes)) 899 return nullptr; 900 901 // We know we can re-write this as 902 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) 903 // Since we've only looked through inbouds GEPs we know that we 904 // can't have overflow on either side. We can therefore re-write 905 // this as: 906 // OFFSET1 cmp OFFSET2 907 Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, DL, Nodes); 908 909 // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written 910 // GEP having PtrBase as the pointer base, and has returned in NewRHS the 911 // offset. Since Index is the offset of LHS to the base pointer, we will now 912 // compare the offsets instead of comparing the pointers. 913 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Index, NewRHS); 914 } 915 916 /// Fold comparisons between a GEP instruction and something else. At this point 917 /// we know that the GEP is on the LHS of the comparison. 918 Instruction *InstCombiner::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, 919 ICmpInst::Predicate Cond, 920 Instruction &I) { 921 // Don't transform signed compares of GEPs into index compares. Even if the 922 // GEP is inbounds, the final add of the base pointer can have signed overflow 923 // and would change the result of the icmp. 924 // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be 925 // the maximum signed value for the pointer type. 926 if (ICmpInst::isSigned(Cond)) 927 return nullptr; 928 929 // Look through bitcasts and addrspacecasts. We do not however want to remove 930 // 0 GEPs. 931 if (!isa<GetElementPtrInst>(RHS)) 932 RHS = RHS->stripPointerCasts(); 933 934 Value *PtrBase = GEPLHS->getOperand(0); 935 if (PtrBase == RHS && GEPLHS->isInBounds()) { 936 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0). 937 // This transformation (ignoring the base and scales) is valid because we 938 // know pointers can't overflow since the gep is inbounds. See if we can 939 // output an optimized form. 940 Value *Offset = evaluateGEPOffsetExpression(GEPLHS, *this, DL); 941 942 // If not, synthesize the offset the hard way. 943 if (!Offset) 944 Offset = EmitGEPOffset(GEPLHS); 945 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset, 946 Constant::getNullValue(Offset->getType())); 947 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) { 948 // If the base pointers are different, but the indices are the same, just 949 // compare the base pointer. 950 if (PtrBase != GEPRHS->getOperand(0)) { 951 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands(); 952 IndicesTheSame &= GEPLHS->getOperand(0)->getType() == 953 GEPRHS->getOperand(0)->getType(); 954 if (IndicesTheSame) 955 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i) 956 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) { 957 IndicesTheSame = false; 958 break; 959 } 960 961 // If all indices are the same, just compare the base pointers. 962 if (IndicesTheSame) 963 return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0)); 964 965 // If we're comparing GEPs with two base pointers that only differ in type 966 // and both GEPs have only constant indices or just one use, then fold 967 // the compare with the adjusted indices. 968 if (GEPLHS->isInBounds() && GEPRHS->isInBounds() && 969 (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) && 970 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) && 971 PtrBase->stripPointerCasts() == 972 GEPRHS->getOperand(0)->stripPointerCasts()) { 973 Value *LOffset = EmitGEPOffset(GEPLHS); 974 Value *ROffset = EmitGEPOffset(GEPRHS); 975 976 // If we looked through an addrspacecast between different sized address 977 // spaces, the LHS and RHS pointers are different sized 978 // integers. Truncate to the smaller one. 979 Type *LHSIndexTy = LOffset->getType(); 980 Type *RHSIndexTy = ROffset->getType(); 981 if (LHSIndexTy != RHSIndexTy) { 982 if (LHSIndexTy->getPrimitiveSizeInBits() < 983 RHSIndexTy->getPrimitiveSizeInBits()) { 984 ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy); 985 } else 986 LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy); 987 } 988 989 Value *Cmp = Builder.CreateICmp(ICmpInst::getSignedPredicate(Cond), 990 LOffset, ROffset); 991 return replaceInstUsesWith(I, Cmp); 992 } 993 994 // Otherwise, the base pointers are different and the indices are 995 // different. Try convert this to an indexed compare by looking through 996 // PHIs/casts. 997 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL); 998 } 999 1000 // If one of the GEPs has all zero indices, recurse. 1001 if (GEPLHS->hasAllZeroIndices()) 1002 return foldGEPICmp(GEPRHS, GEPLHS->getOperand(0), 1003 ICmpInst::getSwappedPredicate(Cond), I); 1004 1005 // If the other GEP has all zero indices, recurse. 1006 if (GEPRHS->hasAllZeroIndices()) 1007 return foldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I); 1008 1009 bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds(); 1010 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) { 1011 // If the GEPs only differ by one index, compare it. 1012 unsigned NumDifferences = 0; // Keep track of # differences. 1013 unsigned DiffOperand = 0; // The operand that differs. 1014 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i) 1015 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) { 1016 if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() != 1017 GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) { 1018 // Irreconcilable differences. 1019 NumDifferences = 2; 1020 break; 1021 } else { 1022 if (NumDifferences++) break; 1023 DiffOperand = i; 1024 } 1025 } 1026 1027 if (NumDifferences == 0) // SAME GEP? 1028 return replaceInstUsesWith(I, // No comparison is needed here. 1029 Builder.getInt1(ICmpInst::isTrueWhenEqual(Cond))); 1030 1031 else if (NumDifferences == 1 && GEPsInBounds) { 1032 Value *LHSV = GEPLHS->getOperand(DiffOperand); 1033 Value *RHSV = GEPRHS->getOperand(DiffOperand); 1034 // Make sure we do a signed comparison here. 1035 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV); 1036 } 1037 } 1038 1039 // Only lower this if the icmp is the only user of the GEP or if we expect 1040 // the result to fold to a constant! 1041 if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) && 1042 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) { 1043 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2) 1044 Value *L = EmitGEPOffset(GEPLHS); 1045 Value *R = EmitGEPOffset(GEPRHS); 1046 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R); 1047 } 1048 } 1049 1050 // Try convert this to an indexed compare by looking through PHIs/casts as a 1051 // last resort. 1052 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL); 1053 } 1054 1055 Instruction *InstCombiner::foldAllocaCmp(ICmpInst &ICI, 1056 const AllocaInst *Alloca, 1057 const Value *Other) { 1058 assert(ICI.isEquality() && "Cannot fold non-equality comparison."); 1059 1060 // It would be tempting to fold away comparisons between allocas and any 1061 // pointer not based on that alloca (e.g. an argument). However, even 1062 // though such pointers cannot alias, they can still compare equal. 1063 // 1064 // But LLVM doesn't specify where allocas get their memory, so if the alloca 1065 // doesn't escape we can argue that it's impossible to guess its value, and we 1066 // can therefore act as if any such guesses are wrong. 1067 // 1068 // The code below checks that the alloca doesn't escape, and that it's only 1069 // used in a comparison once (the current instruction). The 1070 // single-comparison-use condition ensures that we're trivially folding all 1071 // comparisons against the alloca consistently, and avoids the risk of 1072 // erroneously folding a comparison of the pointer with itself. 1073 1074 unsigned MaxIter = 32; // Break cycles and bound to constant-time. 1075 1076 SmallVector<const Use *, 32> Worklist; 1077 for (const Use &U : Alloca->uses()) { 1078 if (Worklist.size() >= MaxIter) 1079 return nullptr; 1080 Worklist.push_back(&U); 1081 } 1082 1083 unsigned NumCmps = 0; 1084 while (!Worklist.empty()) { 1085 assert(Worklist.size() <= MaxIter); 1086 const Use *U = Worklist.pop_back_val(); 1087 const Value *V = U->getUser(); 1088 --MaxIter; 1089 1090 if (isa<BitCastInst>(V) || isa<GetElementPtrInst>(V) || isa<PHINode>(V) || 1091 isa<SelectInst>(V)) { 1092 // Track the uses. 1093 } else if (isa<LoadInst>(V)) { 1094 // Loading from the pointer doesn't escape it. 1095 continue; 1096 } else if (const auto *SI = dyn_cast<StoreInst>(V)) { 1097 // Storing *to* the pointer is fine, but storing the pointer escapes it. 1098 if (SI->getValueOperand() == U->get()) 1099 return nullptr; 1100 continue; 1101 } else if (isa<ICmpInst>(V)) { 1102 if (NumCmps++) 1103 return nullptr; // Found more than one cmp. 1104 continue; 1105 } else if (const auto *Intrin = dyn_cast<IntrinsicInst>(V)) { 1106 switch (Intrin->getIntrinsicID()) { 1107 // These intrinsics don't escape or compare the pointer. Memset is safe 1108 // because we don't allow ptrtoint. Memcpy and memmove are safe because 1109 // we don't allow stores, so src cannot point to V. 1110 case Intrinsic::lifetime_start: case Intrinsic::lifetime_end: 1111 case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset: 1112 continue; 1113 default: 1114 return nullptr; 1115 } 1116 } else { 1117 return nullptr; 1118 } 1119 for (const Use &U : V->uses()) { 1120 if (Worklist.size() >= MaxIter) 1121 return nullptr; 1122 Worklist.push_back(&U); 1123 } 1124 } 1125 1126 Type *CmpTy = CmpInst::makeCmpResultType(Other->getType()); 1127 return replaceInstUsesWith( 1128 ICI, 1129 ConstantInt::get(CmpTy, !CmpInst::isTrueWhenEqual(ICI.getPredicate()))); 1130 } 1131 1132 /// Fold "icmp pred (X+CI), X". 1133 Instruction *InstCombiner::foldICmpAddOpConst(Value *X, ConstantInt *CI, 1134 ICmpInst::Predicate Pred) { 1135 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0, 1136 // so the values can never be equal. Similarly for all other "or equals" 1137 // operators. 1138 1139 // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255 1140 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253 1141 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0 1142 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 1143 Value *R = 1144 ConstantExpr::getSub(ConstantInt::getAllOnesValue(CI->getType()), CI); 1145 return new ICmpInst(ICmpInst::ICMP_UGT, X, R); 1146 } 1147 1148 // (X+1) >u X --> X <u (0-1) --> X != 255 1149 // (X+2) >u X --> X <u (0-2) --> X <u 254 1150 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0 1151 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 1152 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantExpr::getNeg(CI)); 1153 1154 unsigned BitWidth = CI->getType()->getPrimitiveSizeInBits(); 1155 ConstantInt *SMax = ConstantInt::get(X->getContext(), 1156 APInt::getSignedMaxValue(BitWidth)); 1157 1158 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127 1159 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125 1160 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0 1161 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1 1162 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126 1163 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127 1164 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 1165 return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantExpr::getSub(SMax, CI)); 1166 1167 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127 1168 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126 1169 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1 1170 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2 1171 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126 1172 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128 1173 1174 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE); 1175 Constant *C = Builder.getInt(CI->getValue() - 1); 1176 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C)); 1177 } 1178 1179 /// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" -> 1180 /// (icmp eq/ne A, Log2(AP2/AP1)) -> 1181 /// (icmp eq/ne A, Log2(AP2) - Log2(AP1)). 1182 Instruction *InstCombiner::foldICmpShrConstConst(ICmpInst &I, Value *A, 1183 const APInt &AP1, 1184 const APInt &AP2) { 1185 assert(I.isEquality() && "Cannot fold icmp gt/lt"); 1186 1187 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) { 1188 if (I.getPredicate() == I.ICMP_NE) 1189 Pred = CmpInst::getInversePredicate(Pred); 1190 return new ICmpInst(Pred, LHS, RHS); 1191 }; 1192 1193 // Don't bother doing any work for cases which InstSimplify handles. 1194 if (AP2.isNullValue()) 1195 return nullptr; 1196 1197 bool IsAShr = isa<AShrOperator>(I.getOperand(0)); 1198 if (IsAShr) { 1199 if (AP2.isAllOnesValue()) 1200 return nullptr; 1201 if (AP2.isNegative() != AP1.isNegative()) 1202 return nullptr; 1203 if (AP2.sgt(AP1)) 1204 return nullptr; 1205 } 1206 1207 if (!AP1) 1208 // 'A' must be large enough to shift out the highest set bit. 1209 return getICmp(I.ICMP_UGT, A, 1210 ConstantInt::get(A->getType(), AP2.logBase2())); 1211 1212 if (AP1 == AP2) 1213 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType())); 1214 1215 int Shift; 1216 if (IsAShr && AP1.isNegative()) 1217 Shift = AP1.countLeadingOnes() - AP2.countLeadingOnes(); 1218 else 1219 Shift = AP1.countLeadingZeros() - AP2.countLeadingZeros(); 1220 1221 if (Shift > 0) { 1222 if (IsAShr && AP1 == AP2.ashr(Shift)) { 1223 // There are multiple solutions if we are comparing against -1 and the LHS 1224 // of the ashr is not a power of two. 1225 if (AP1.isAllOnesValue() && !AP2.isPowerOf2()) 1226 return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift)); 1227 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift)); 1228 } else if (AP1 == AP2.lshr(Shift)) { 1229 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift)); 1230 } 1231 } 1232 1233 // Shifting const2 will never be equal to const1. 1234 // FIXME: This should always be handled by InstSimplify? 1235 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE); 1236 return replaceInstUsesWith(I, TorF); 1237 } 1238 1239 /// Handle "(icmp eq/ne (shl AP2, A), AP1)" -> 1240 /// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)). 1241 Instruction *InstCombiner::foldICmpShlConstConst(ICmpInst &I, Value *A, 1242 const APInt &AP1, 1243 const APInt &AP2) { 1244 assert(I.isEquality() && "Cannot fold icmp gt/lt"); 1245 1246 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) { 1247 if (I.getPredicate() == I.ICMP_NE) 1248 Pred = CmpInst::getInversePredicate(Pred); 1249 return new ICmpInst(Pred, LHS, RHS); 1250 }; 1251 1252 // Don't bother doing any work for cases which InstSimplify handles. 1253 if (AP2.isNullValue()) 1254 return nullptr; 1255 1256 unsigned AP2TrailingZeros = AP2.countTrailingZeros(); 1257 1258 if (!AP1 && AP2TrailingZeros != 0) 1259 return getICmp( 1260 I.ICMP_UGE, A, 1261 ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros)); 1262 1263 if (AP1 == AP2) 1264 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType())); 1265 1266 // Get the distance between the lowest bits that are set. 1267 int Shift = AP1.countTrailingZeros() - AP2TrailingZeros; 1268 1269 if (Shift > 0 && AP2.shl(Shift) == AP1) 1270 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift)); 1271 1272 // Shifting const2 will never be equal to const1. 1273 // FIXME: This should always be handled by InstSimplify? 1274 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE); 1275 return replaceInstUsesWith(I, TorF); 1276 } 1277 1278 /// The caller has matched a pattern of the form: 1279 /// I = icmp ugt (add (add A, B), CI2), CI1 1280 /// If this is of the form: 1281 /// sum = a + b 1282 /// if (sum+128 >u 255) 1283 /// Then replace it with llvm.sadd.with.overflow.i8. 1284 /// 1285 static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B, 1286 ConstantInt *CI2, ConstantInt *CI1, 1287 InstCombiner &IC) { 1288 // The transformation we're trying to do here is to transform this into an 1289 // llvm.sadd.with.overflow. To do this, we have to replace the original add 1290 // with a narrower add, and discard the add-with-constant that is part of the 1291 // range check (if we can't eliminate it, this isn't profitable). 1292 1293 // In order to eliminate the add-with-constant, the compare can be its only 1294 // use. 1295 Instruction *AddWithCst = cast<Instruction>(I.getOperand(0)); 1296 if (!AddWithCst->hasOneUse()) 1297 return nullptr; 1298 1299 // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow. 1300 if (!CI2->getValue().isPowerOf2()) 1301 return nullptr; 1302 unsigned NewWidth = CI2->getValue().countTrailingZeros(); 1303 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31) 1304 return nullptr; 1305 1306 // The width of the new add formed is 1 more than the bias. 1307 ++NewWidth; 1308 1309 // Check to see that CI1 is an all-ones value with NewWidth bits. 1310 if (CI1->getBitWidth() == NewWidth || 1311 CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth)) 1312 return nullptr; 1313 1314 // This is only really a signed overflow check if the inputs have been 1315 // sign-extended; check for that condition. For example, if CI2 is 2^31 and 1316 // the operands of the add are 64 bits wide, we need at least 33 sign bits. 1317 unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1; 1318 if (IC.ComputeNumSignBits(A, 0, &I) < NeededSignBits || 1319 IC.ComputeNumSignBits(B, 0, &I) < NeededSignBits) 1320 return nullptr; 1321 1322 // In order to replace the original add with a narrower 1323 // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant 1324 // and truncates that discard the high bits of the add. Verify that this is 1325 // the case. 1326 Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0)); 1327 for (User *U : OrigAdd->users()) { 1328 if (U == AddWithCst) 1329 continue; 1330 1331 // Only accept truncates for now. We would really like a nice recursive 1332 // predicate like SimplifyDemandedBits, but which goes downwards the use-def 1333 // chain to see which bits of a value are actually demanded. If the 1334 // original add had another add which was then immediately truncated, we 1335 // could still do the transformation. 1336 TruncInst *TI = dyn_cast<TruncInst>(U); 1337 if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth) 1338 return nullptr; 1339 } 1340 1341 // If the pattern matches, truncate the inputs to the narrower type and 1342 // use the sadd_with_overflow intrinsic to efficiently compute both the 1343 // result and the overflow bit. 1344 Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth); 1345 Value *F = Intrinsic::getDeclaration(I.getModule(), 1346 Intrinsic::sadd_with_overflow, NewType); 1347 1348 InstCombiner::BuilderTy &Builder = IC.Builder; 1349 1350 // Put the new code above the original add, in case there are any uses of the 1351 // add between the add and the compare. 1352 Builder.SetInsertPoint(OrigAdd); 1353 1354 Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc"); 1355 Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc"); 1356 CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd"); 1357 Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result"); 1358 Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType()); 1359 1360 // The inner add was the result of the narrow add, zero extended to the 1361 // wider type. Replace it with the result computed by the intrinsic. 1362 IC.replaceInstUsesWith(*OrigAdd, ZExt); 1363 1364 // The original icmp gets replaced with the overflow value. 1365 return ExtractValueInst::Create(Call, 1, "sadd.overflow"); 1366 } 1367 1368 // Fold icmp Pred X, C. 1369 Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &Cmp) { 1370 CmpInst::Predicate Pred = Cmp.getPredicate(); 1371 Value *X = Cmp.getOperand(0); 1372 1373 const APInt *C; 1374 if (!match(Cmp.getOperand(1), m_APInt(C))) 1375 return nullptr; 1376 1377 Value *A = nullptr, *B = nullptr; 1378 1379 // Match the following pattern, which is a common idiom when writing 1380 // overflow-safe integer arithmetic functions. The source performs an addition 1381 // in wider type and explicitly checks for overflow using comparisons against 1382 // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic. 1383 // 1384 // TODO: This could probably be generalized to handle other overflow-safe 1385 // operations if we worked out the formulas to compute the appropriate magic 1386 // constants. 1387 // 1388 // sum = a + b 1389 // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8 1390 { 1391 ConstantInt *CI2; // I = icmp ugt (add (add A, B), CI2), CI 1392 if (Pred == ICmpInst::ICMP_UGT && 1393 match(X, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2)))) 1394 if (Instruction *Res = processUGT_ADDCST_ADD( 1395 Cmp, A, B, CI2, cast<ConstantInt>(Cmp.getOperand(1)), *this)) 1396 return Res; 1397 } 1398 1399 // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0) 1400 if (C->isNullValue() && Pred == ICmpInst::ICMP_SGT) { 1401 SelectPatternResult SPR = matchSelectPattern(X, A, B); 1402 if (SPR.Flavor == SPF_SMIN) { 1403 if (isKnownPositive(A, DL, 0, &AC, &Cmp, &DT)) 1404 return new ICmpInst(Pred, B, Cmp.getOperand(1)); 1405 if (isKnownPositive(B, DL, 0, &AC, &Cmp, &DT)) 1406 return new ICmpInst(Pred, A, Cmp.getOperand(1)); 1407 } 1408 } 1409 1410 // FIXME: Use m_APInt to allow folds for splat constants. 1411 ConstantInt *CI = dyn_cast<ConstantInt>(Cmp.getOperand(1)); 1412 if (!CI) 1413 return nullptr; 1414 1415 // Canonicalize icmp instructions based on dominating conditions. 1416 BasicBlock *Parent = Cmp.getParent(); 1417 BasicBlock *Dom = Parent->getSinglePredecessor(); 1418 auto *BI = Dom ? dyn_cast<BranchInst>(Dom->getTerminator()) : nullptr; 1419 ICmpInst::Predicate Pred2; 1420 BasicBlock *TrueBB, *FalseBB; 1421 ConstantInt *CI2; 1422 if (BI && match(BI, m_Br(m_ICmp(Pred2, m_Specific(X), m_ConstantInt(CI2)), 1423 TrueBB, FalseBB)) && 1424 TrueBB != FalseBB) { 1425 ConstantRange CR = 1426 ConstantRange::makeAllowedICmpRegion(Pred, CI->getValue()); 1427 ConstantRange DominatingCR = 1428 (Parent == TrueBB) 1429 ? ConstantRange::makeExactICmpRegion(Pred2, CI2->getValue()) 1430 : ConstantRange::makeExactICmpRegion( 1431 CmpInst::getInversePredicate(Pred2), CI2->getValue()); 1432 ConstantRange Intersection = DominatingCR.intersectWith(CR); 1433 ConstantRange Difference = DominatingCR.difference(CR); 1434 if (Intersection.isEmptySet()) 1435 return replaceInstUsesWith(Cmp, Builder.getFalse()); 1436 if (Difference.isEmptySet()) 1437 return replaceInstUsesWith(Cmp, Builder.getTrue()); 1438 1439 // If this is a normal comparison, it demands all bits. If it is a sign 1440 // bit comparison, it only demands the sign bit. 1441 bool UnusedBit; 1442 bool IsSignBit = isSignBitCheck(Pred, CI->getValue(), UnusedBit); 1443 1444 // Canonicalizing a sign bit comparison that gets used in a branch, 1445 // pessimizes codegen by generating branch on zero instruction instead 1446 // of a test and branch. So we avoid canonicalizing in such situations 1447 // because test and branch instruction has better branch displacement 1448 // than compare and branch instruction. 1449 if (Cmp.isEquality() || (IsSignBit && hasBranchUse(Cmp))) 1450 return nullptr; 1451 1452 if (auto *AI = Intersection.getSingleElement()) 1453 return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*AI)); 1454 if (auto *AD = Difference.getSingleElement()) 1455 return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*AD)); 1456 } 1457 1458 return nullptr; 1459 } 1460 1461 /// Fold icmp (trunc X, Y), C. 1462 Instruction *InstCombiner::foldICmpTruncConstant(ICmpInst &Cmp, 1463 TruncInst *Trunc, 1464 const APInt *C) { 1465 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1466 Value *X = Trunc->getOperand(0); 1467 if (C->isOneValue() && C->getBitWidth() > 1) { 1468 // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1 1469 Value *V = nullptr; 1470 if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V)))) 1471 return new ICmpInst(ICmpInst::ICMP_SLT, V, 1472 ConstantInt::get(V->getType(), 1)); 1473 } 1474 1475 if (Cmp.isEquality() && Trunc->hasOneUse()) { 1476 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all 1477 // of the high bits truncated out of x are known. 1478 unsigned DstBits = Trunc->getType()->getScalarSizeInBits(), 1479 SrcBits = X->getType()->getScalarSizeInBits(); 1480 KnownBits Known = computeKnownBits(X, 0, &Cmp); 1481 1482 // If all the high bits are known, we can do this xform. 1483 if ((Known.Zero | Known.One).countLeadingOnes() >= SrcBits - DstBits) { 1484 // Pull in the high bits from known-ones set. 1485 APInt NewRHS = C->zext(SrcBits); 1486 NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits); 1487 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), NewRHS)); 1488 } 1489 } 1490 1491 return nullptr; 1492 } 1493 1494 /// Fold icmp (xor X, Y), C. 1495 Instruction *InstCombiner::foldICmpXorConstant(ICmpInst &Cmp, 1496 BinaryOperator *Xor, 1497 const APInt *C) { 1498 Value *X = Xor->getOperand(0); 1499 Value *Y = Xor->getOperand(1); 1500 const APInt *XorC; 1501 if (!match(Y, m_APInt(XorC))) 1502 return nullptr; 1503 1504 // If this is a comparison that tests the signbit (X < 0) or (x > -1), 1505 // fold the xor. 1506 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1507 if ((Pred == ICmpInst::ICMP_SLT && C->isNullValue()) || 1508 (Pred == ICmpInst::ICMP_SGT && C->isAllOnesValue())) { 1509 1510 // If the sign bit of the XorCst is not set, there is no change to 1511 // the operation, just stop using the Xor. 1512 if (!XorC->isNegative()) { 1513 Cmp.setOperand(0, X); 1514 Worklist.Add(Xor); 1515 return &Cmp; 1516 } 1517 1518 // Was the old condition true if the operand is positive? 1519 bool isTrueIfPositive = Pred == ICmpInst::ICMP_SGT; 1520 1521 // If so, the new one isn't. 1522 isTrueIfPositive ^= true; 1523 1524 Constant *CmpConstant = cast<Constant>(Cmp.getOperand(1)); 1525 if (isTrueIfPositive) 1526 return new ICmpInst(ICmpInst::ICMP_SGT, X, SubOne(CmpConstant)); 1527 else 1528 return new ICmpInst(ICmpInst::ICMP_SLT, X, AddOne(CmpConstant)); 1529 } 1530 1531 if (Xor->hasOneUse()) { 1532 // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask)) 1533 if (!Cmp.isEquality() && XorC->isSignMask()) { 1534 Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate() 1535 : Cmp.getSignedPredicate(); 1536 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), *C ^ *XorC)); 1537 } 1538 1539 // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask)) 1540 if (!Cmp.isEquality() && XorC->isMaxSignedValue()) { 1541 Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate() 1542 : Cmp.getSignedPredicate(); 1543 Pred = Cmp.getSwappedPredicate(Pred); 1544 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), *C ^ *XorC)); 1545 } 1546 } 1547 1548 // (icmp ugt (xor X, C), ~C) -> (icmp ult X, C) 1549 // iff -C is a power of 2 1550 if (Pred == ICmpInst::ICMP_UGT && *XorC == ~(*C) && (*C + 1).isPowerOf2()) 1551 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y); 1552 1553 // (icmp ult (xor X, C), -C) -> (icmp uge X, C) 1554 // iff -C is a power of 2 1555 if (Pred == ICmpInst::ICMP_ULT && *XorC == -(*C) && C->isPowerOf2()) 1556 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y); 1557 1558 return nullptr; 1559 } 1560 1561 /// Fold icmp (and (sh X, Y), C2), C1. 1562 Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, 1563 const APInt *C1, const APInt *C2) { 1564 BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0)); 1565 if (!Shift || !Shift->isShift()) 1566 return nullptr; 1567 1568 // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could 1569 // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in 1570 // code produced by the clang front-end, for bitfield access. 1571 // This seemingly simple opportunity to fold away a shift turns out to be 1572 // rather complicated. See PR17827 for details. 1573 unsigned ShiftOpcode = Shift->getOpcode(); 1574 bool IsShl = ShiftOpcode == Instruction::Shl; 1575 const APInt *C3; 1576 if (match(Shift->getOperand(1), m_APInt(C3))) { 1577 bool CanFold = false; 1578 if (ShiftOpcode == Instruction::AShr) { 1579 // There may be some constraints that make this possible, but nothing 1580 // simple has been discovered yet. 1581 CanFold = false; 1582 } else if (ShiftOpcode == Instruction::Shl) { 1583 // For a left shift, we can fold if the comparison is not signed. We can 1584 // also fold a signed comparison if the mask value and comparison value 1585 // are not negative. These constraints may not be obvious, but we can 1586 // prove that they are correct using an SMT solver. 1587 if (!Cmp.isSigned() || (!C2->isNegative() && !C1->isNegative())) 1588 CanFold = true; 1589 } else if (ShiftOpcode == Instruction::LShr) { 1590 // For a logical right shift, we can fold if the comparison is not signed. 1591 // We can also fold a signed comparison if the shifted mask value and the 1592 // shifted comparison value are not negative. These constraints may not be 1593 // obvious, but we can prove that they are correct using an SMT solver. 1594 if (!Cmp.isSigned() || 1595 (!C2->shl(*C3).isNegative() && !C1->shl(*C3).isNegative())) 1596 CanFold = true; 1597 } 1598 1599 if (CanFold) { 1600 APInt NewCst = IsShl ? C1->lshr(*C3) : C1->shl(*C3); 1601 APInt SameAsC1 = IsShl ? NewCst.shl(*C3) : NewCst.lshr(*C3); 1602 // Check to see if we are shifting out any of the bits being compared. 1603 if (SameAsC1 != *C1) { 1604 // If we shifted bits out, the fold is not going to work out. As a 1605 // special case, check to see if this means that the result is always 1606 // true or false now. 1607 if (Cmp.getPredicate() == ICmpInst::ICMP_EQ) 1608 return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType())); 1609 if (Cmp.getPredicate() == ICmpInst::ICMP_NE) 1610 return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType())); 1611 } else { 1612 Cmp.setOperand(1, ConstantInt::get(And->getType(), NewCst)); 1613 APInt NewAndCst = IsShl ? C2->lshr(*C3) : C2->shl(*C3); 1614 And->setOperand(1, ConstantInt::get(And->getType(), NewAndCst)); 1615 And->setOperand(0, Shift->getOperand(0)); 1616 Worklist.Add(Shift); // Shift is dead. 1617 return &Cmp; 1618 } 1619 } 1620 } 1621 1622 // Turn ((X >> Y) & C2) == 0 into (X & (C2 << Y)) == 0. The latter is 1623 // preferable because it allows the C2 << Y expression to be hoisted out of a 1624 // loop if Y is invariant and X is not. 1625 if (Shift->hasOneUse() && C1->isNullValue() && Cmp.isEquality() && 1626 !Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) { 1627 // Compute C2 << Y. 1628 Value *NewShift = 1629 IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1)) 1630 : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1)); 1631 1632 // Compute X & (C2 << Y). 1633 Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift); 1634 Cmp.setOperand(0, NewAnd); 1635 return &Cmp; 1636 } 1637 1638 return nullptr; 1639 } 1640 1641 /// Fold icmp (and X, C2), C1. 1642 Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp, 1643 BinaryOperator *And, 1644 const APInt *C1) { 1645 const APInt *C2; 1646 if (!match(And->getOperand(1), m_APInt(C2))) 1647 return nullptr; 1648 1649 if (!And->hasOneUse() || !And->getOperand(0)->hasOneUse()) 1650 return nullptr; 1651 1652 // If the LHS is an 'and' of a truncate and we can widen the and/compare to 1653 // the input width without changing the value produced, eliminate the cast: 1654 // 1655 // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1' 1656 // 1657 // We can do this transformation if the constants do not have their sign bits 1658 // set or if it is an equality comparison. Extending a relational comparison 1659 // when we're checking the sign bit would not work. 1660 Value *W; 1661 if (match(And->getOperand(0), m_Trunc(m_Value(W))) && 1662 (Cmp.isEquality() || (!C1->isNegative() && !C2->isNegative()))) { 1663 // TODO: Is this a good transform for vectors? Wider types may reduce 1664 // throughput. Should this transform be limited (even for scalars) by using 1665 // shouldChangeType()? 1666 if (!Cmp.getType()->isVectorTy()) { 1667 Type *WideType = W->getType(); 1668 unsigned WideScalarBits = WideType->getScalarSizeInBits(); 1669 Constant *ZextC1 = ConstantInt::get(WideType, C1->zext(WideScalarBits)); 1670 Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits)); 1671 Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName()); 1672 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1); 1673 } 1674 } 1675 1676 if (Instruction *I = foldICmpAndShift(Cmp, And, C1, C2)) 1677 return I; 1678 1679 // (icmp pred (and (or (lshr A, B), A), 1), 0) --> 1680 // (icmp pred (and A, (or (shl 1, B), 1), 0)) 1681 // 1682 // iff pred isn't signed 1683 if (!Cmp.isSigned() && C1->isNullValue() && 1684 match(And->getOperand(1), m_One())) { 1685 Constant *One = cast<Constant>(And->getOperand(1)); 1686 Value *Or = And->getOperand(0); 1687 Value *A, *B, *LShr; 1688 if (match(Or, m_Or(m_Value(LShr), m_Value(A))) && 1689 match(LShr, m_LShr(m_Specific(A), m_Value(B)))) { 1690 unsigned UsesRemoved = 0; 1691 if (And->hasOneUse()) 1692 ++UsesRemoved; 1693 if (Or->hasOneUse()) 1694 ++UsesRemoved; 1695 if (LShr->hasOneUse()) 1696 ++UsesRemoved; 1697 1698 // Compute A & ((1 << B) | 1) 1699 Value *NewOr = nullptr; 1700 if (auto *C = dyn_cast<Constant>(B)) { 1701 if (UsesRemoved >= 1) 1702 NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One); 1703 } else { 1704 if (UsesRemoved >= 3) 1705 NewOr = Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(), 1706 /*HasNUW=*/true), 1707 One, Or->getName()); 1708 } 1709 if (NewOr) { 1710 Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName()); 1711 Cmp.setOperand(0, NewAnd); 1712 return &Cmp; 1713 } 1714 } 1715 } 1716 1717 // (X & C2) > C1 --> (X & C2) != 0, if any bit set in (X & C2) will produce a 1718 // result greater than C1. 1719 unsigned NumTZ = C2->countTrailingZeros(); 1720 if (Cmp.getPredicate() == ICmpInst::ICMP_UGT && NumTZ < C2->getBitWidth() && 1721 APInt::getOneBitSet(C2->getBitWidth(), NumTZ).ugt(*C1)) { 1722 Constant *Zero = Constant::getNullValue(And->getType()); 1723 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 1724 } 1725 1726 return nullptr; 1727 } 1728 1729 /// Fold icmp (and X, Y), C. 1730 Instruction *InstCombiner::foldICmpAndConstant(ICmpInst &Cmp, 1731 BinaryOperator *And, 1732 const APInt *C) { 1733 if (Instruction *I = foldICmpAndConstConst(Cmp, And, C)) 1734 return I; 1735 1736 // TODO: These all require that Y is constant too, so refactor with the above. 1737 1738 // Try to optimize things like "A[i] & 42 == 0" to index computations. 1739 Value *X = And->getOperand(0); 1740 Value *Y = And->getOperand(1); 1741 if (auto *LI = dyn_cast<LoadInst>(X)) 1742 if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0))) 1743 if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 1744 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 1745 !LI->isVolatile() && isa<ConstantInt>(Y)) { 1746 ConstantInt *C2 = cast<ConstantInt>(Y); 1747 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, Cmp, C2)) 1748 return Res; 1749 } 1750 1751 if (!Cmp.isEquality()) 1752 return nullptr; 1753 1754 // X & -C == -C -> X > u ~C 1755 // X & -C != -C -> X <= u ~C 1756 // iff C is a power of 2 1757 if (Cmp.getOperand(1) == Y && (-(*C)).isPowerOf2()) { 1758 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGT 1759 : CmpInst::ICMP_ULE; 1760 return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1)))); 1761 } 1762 1763 // (X & C2) == 0 -> (trunc X) >= 0 1764 // (X & C2) != 0 -> (trunc X) < 0 1765 // iff C2 is a power of 2 and it masks the sign bit of a legal integer type. 1766 const APInt *C2; 1767 if (And->hasOneUse() && C->isNullValue() && match(Y, m_APInt(C2))) { 1768 int32_t ExactLogBase2 = C2->exactLogBase2(); 1769 if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) { 1770 Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1); 1771 if (And->getType()->isVectorTy()) 1772 NTy = VectorType::get(NTy, And->getType()->getVectorNumElements()); 1773 Value *Trunc = Builder.CreateTrunc(X, NTy); 1774 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE 1775 : CmpInst::ICMP_SLT; 1776 return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy)); 1777 } 1778 } 1779 1780 return nullptr; 1781 } 1782 1783 /// Fold icmp (or X, Y), C. 1784 Instruction *InstCombiner::foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or, 1785 const APInt *C) { 1786 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1787 if (C->isOneValue()) { 1788 // icmp slt signum(V) 1 --> icmp slt V, 1 1789 Value *V = nullptr; 1790 if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V)))) 1791 return new ICmpInst(ICmpInst::ICMP_SLT, V, 1792 ConstantInt::get(V->getType(), 1)); 1793 } 1794 1795 // X | C == C --> X <=u C 1796 // X | C != C --> X >u C 1797 // iff C+1 is a power of 2 (C is a bitmask of the low bits) 1798 if (Cmp.isEquality() && Cmp.getOperand(1) == Or->getOperand(1) && 1799 (*C + 1).isPowerOf2()) { 1800 Pred = (Pred == CmpInst::ICMP_EQ) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT; 1801 return new ICmpInst(Pred, Or->getOperand(0), Or->getOperand(1)); 1802 } 1803 1804 if (!Cmp.isEquality() || !C->isNullValue() || !Or->hasOneUse()) 1805 return nullptr; 1806 1807 Value *P, *Q; 1808 if (match(Or, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) { 1809 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0 1810 // -> and (icmp eq P, null), (icmp eq Q, null). 1811 Value *CmpP = 1812 Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType())); 1813 Value *CmpQ = 1814 Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType())); 1815 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1816 return BinaryOperator::Create(BOpc, CmpP, CmpQ); 1817 } 1818 1819 // Are we using xors to bitwise check for a pair of (in)equalities? Convert to 1820 // a shorter form that has more potential to be folded even further. 1821 Value *X1, *X2, *X3, *X4; 1822 if (match(Or->getOperand(0), m_OneUse(m_Xor(m_Value(X1), m_Value(X2)))) && 1823 match(Or->getOperand(1), m_OneUse(m_Xor(m_Value(X3), m_Value(X4))))) { 1824 // ((X1 ^ X2) || (X3 ^ X4)) == 0 --> (X1 == X2) && (X3 == X4) 1825 // ((X1 ^ X2) || (X3 ^ X4)) != 0 --> (X1 != X2) || (X3 != X4) 1826 Value *Cmp12 = Builder.CreateICmp(Pred, X1, X2); 1827 Value *Cmp34 = Builder.CreateICmp(Pred, X3, X4); 1828 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1829 return BinaryOperator::Create(BOpc, Cmp12, Cmp34); 1830 } 1831 1832 return nullptr; 1833 } 1834 1835 /// Fold icmp (mul X, Y), C. 1836 Instruction *InstCombiner::foldICmpMulConstant(ICmpInst &Cmp, 1837 BinaryOperator *Mul, 1838 const APInt *C) { 1839 const APInt *MulC; 1840 if (!match(Mul->getOperand(1), m_APInt(MulC))) 1841 return nullptr; 1842 1843 // If this is a test of the sign bit and the multiply is sign-preserving with 1844 // a constant operand, use the multiply LHS operand instead. 1845 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1846 if (isSignTest(Pred, *C) && Mul->hasNoSignedWrap()) { 1847 if (MulC->isNegative()) 1848 Pred = ICmpInst::getSwappedPredicate(Pred); 1849 return new ICmpInst(Pred, Mul->getOperand(0), 1850 Constant::getNullValue(Mul->getType())); 1851 } 1852 1853 return nullptr; 1854 } 1855 1856 /// Fold icmp (shl 1, Y), C. 1857 static Instruction *foldICmpShlOne(ICmpInst &Cmp, Instruction *Shl, 1858 const APInt *C) { 1859 Value *Y; 1860 if (!match(Shl, m_Shl(m_One(), m_Value(Y)))) 1861 return nullptr; 1862 1863 Type *ShiftType = Shl->getType(); 1864 uint32_t TypeBits = C->getBitWidth(); 1865 bool CIsPowerOf2 = C->isPowerOf2(); 1866 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1867 if (Cmp.isUnsigned()) { 1868 // (1 << Y) pred C -> Y pred Log2(C) 1869 if (!CIsPowerOf2) { 1870 // (1 << Y) < 30 -> Y <= 4 1871 // (1 << Y) <= 30 -> Y <= 4 1872 // (1 << Y) >= 30 -> Y > 4 1873 // (1 << Y) > 30 -> Y > 4 1874 if (Pred == ICmpInst::ICMP_ULT) 1875 Pred = ICmpInst::ICMP_ULE; 1876 else if (Pred == ICmpInst::ICMP_UGE) 1877 Pred = ICmpInst::ICMP_UGT; 1878 } 1879 1880 // (1 << Y) >= 2147483648 -> Y >= 31 -> Y == 31 1881 // (1 << Y) < 2147483648 -> Y < 31 -> Y != 31 1882 unsigned CLog2 = C->logBase2(); 1883 if (CLog2 == TypeBits - 1) { 1884 if (Pred == ICmpInst::ICMP_UGE) 1885 Pred = ICmpInst::ICMP_EQ; 1886 else if (Pred == ICmpInst::ICMP_ULT) 1887 Pred = ICmpInst::ICMP_NE; 1888 } 1889 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2)); 1890 } else if (Cmp.isSigned()) { 1891 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1); 1892 if (C->isAllOnesValue()) { 1893 // (1 << Y) <= -1 -> Y == 31 1894 if (Pred == ICmpInst::ICMP_SLE) 1895 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne); 1896 1897 // (1 << Y) > -1 -> Y != 31 1898 if (Pred == ICmpInst::ICMP_SGT) 1899 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne); 1900 } else if (!(*C)) { 1901 // (1 << Y) < 0 -> Y == 31 1902 // (1 << Y) <= 0 -> Y == 31 1903 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 1904 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne); 1905 1906 // (1 << Y) >= 0 -> Y != 31 1907 // (1 << Y) > 0 -> Y != 31 1908 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) 1909 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne); 1910 } 1911 } else if (Cmp.isEquality() && CIsPowerOf2) { 1912 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, C->logBase2())); 1913 } 1914 1915 return nullptr; 1916 } 1917 1918 /// Fold icmp (shl X, Y), C. 1919 Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp, 1920 BinaryOperator *Shl, 1921 const APInt *C) { 1922 const APInt *ShiftVal; 1923 if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal))) 1924 return foldICmpShlConstConst(Cmp, Shl->getOperand(1), *C, *ShiftVal); 1925 1926 const APInt *ShiftAmt; 1927 if (!match(Shl->getOperand(1), m_APInt(ShiftAmt))) 1928 return foldICmpShlOne(Cmp, Shl, C); 1929 1930 // Check that the shift amount is in range. If not, don't perform undefined 1931 // shifts. When the shift is visited, it will be simplified. 1932 unsigned TypeBits = C->getBitWidth(); 1933 if (ShiftAmt->uge(TypeBits)) 1934 return nullptr; 1935 1936 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1937 Value *X = Shl->getOperand(0); 1938 Type *ShType = Shl->getType(); 1939 1940 // NSW guarantees that we are only shifting out sign bits from the high bits, 1941 // so we can ASHR the compare constant without needing a mask and eliminate 1942 // the shift. 1943 if (Shl->hasNoSignedWrap()) { 1944 if (Pred == ICmpInst::ICMP_SGT) { 1945 // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt) 1946 APInt ShiftedC = C->ashr(*ShiftAmt); 1947 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1948 } 1949 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) { 1950 // This is the same code as the SGT case, but assert the pre-condition 1951 // that is needed for this to work with equality predicates. 1952 assert(C->ashr(*ShiftAmt).shl(*ShiftAmt) == *C && 1953 "Compare known true or false was not folded"); 1954 APInt ShiftedC = C->ashr(*ShiftAmt); 1955 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1956 } 1957 if (Pred == ICmpInst::ICMP_SLT) { 1958 // SLE is the same as above, but SLE is canonicalized to SLT, so convert: 1959 // (X << S) <=s C is equiv to X <=s (C >> S) for all C 1960 // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX 1961 // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN 1962 assert(!C->isMinSignedValue() && "Unexpected icmp slt"); 1963 APInt ShiftedC = (*C - 1).ashr(*ShiftAmt) + 1; 1964 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1965 } 1966 // If this is a signed comparison to 0 and the shift is sign preserving, 1967 // use the shift LHS operand instead; isSignTest may change 'Pred', so only 1968 // do that if we're sure to not continue on in this function. 1969 if (isSignTest(Pred, *C)) 1970 return new ICmpInst(Pred, X, Constant::getNullValue(ShType)); 1971 } 1972 1973 // NUW guarantees that we are only shifting out zero bits from the high bits, 1974 // so we can LSHR the compare constant without needing a mask and eliminate 1975 // the shift. 1976 if (Shl->hasNoUnsignedWrap()) { 1977 if (Pred == ICmpInst::ICMP_UGT) { 1978 // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt) 1979 APInt ShiftedC = C->lshr(*ShiftAmt); 1980 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1981 } 1982 if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) { 1983 // This is the same code as the UGT case, but assert the pre-condition 1984 // that is needed for this to work with equality predicates. 1985 assert(C->lshr(*ShiftAmt).shl(*ShiftAmt) == *C && 1986 "Compare known true or false was not folded"); 1987 APInt ShiftedC = C->lshr(*ShiftAmt); 1988 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1989 } 1990 if (Pred == ICmpInst::ICMP_ULT) { 1991 // ULE is the same as above, but ULE is canonicalized to ULT, so convert: 1992 // (X << S) <=u C is equiv to X <=u (C >> S) for all C 1993 // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u 1994 // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0 1995 assert(C->ugt(0) && "ult 0 should have been eliminated"); 1996 APInt ShiftedC = (*C - 1).lshr(*ShiftAmt) + 1; 1997 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 1998 } 1999 } 2000 2001 if (Cmp.isEquality() && Shl->hasOneUse()) { 2002 // Strength-reduce the shift into an 'and'. 2003 Constant *Mask = ConstantInt::get( 2004 ShType, 2005 APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue())); 2006 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask"); 2007 Constant *LShrC = ConstantInt::get(ShType, C->lshr(*ShiftAmt)); 2008 return new ICmpInst(Pred, And, LShrC); 2009 } 2010 2011 // Otherwise, if this is a comparison of the sign bit, simplify to and/test. 2012 bool TrueIfSigned = false; 2013 if (Shl->hasOneUse() && isSignBitCheck(Pred, *C, TrueIfSigned)) { 2014 // (X << 31) <s 0 --> (X & 1) != 0 2015 Constant *Mask = ConstantInt::get( 2016 ShType, 2017 APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1)); 2018 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask"); 2019 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ, 2020 And, Constant::getNullValue(ShType)); 2021 } 2022 2023 // Transform (icmp pred iM (shl iM %v, N), C) 2024 // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N)) 2025 // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N. 2026 // This enables us to get rid of the shift in favor of a trunc that may be 2027 // free on the target. It has the additional benefit of comparing to a 2028 // smaller constant that may be more target-friendly. 2029 unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1); 2030 if (Shl->hasOneUse() && Amt != 0 && C->countTrailingZeros() >= Amt && 2031 DL.isLegalInteger(TypeBits - Amt)) { 2032 Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt); 2033 if (ShType->isVectorTy()) 2034 TruncTy = VectorType::get(TruncTy, ShType->getVectorNumElements()); 2035 Constant *NewC = 2036 ConstantInt::get(TruncTy, C->ashr(*ShiftAmt).trunc(TypeBits - Amt)); 2037 return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC); 2038 } 2039 2040 return nullptr; 2041 } 2042 2043 /// Fold icmp ({al}shr X, Y), C. 2044 Instruction *InstCombiner::foldICmpShrConstant(ICmpInst &Cmp, 2045 BinaryOperator *Shr, 2046 const APInt *C) { 2047 // An exact shr only shifts out zero bits, so: 2048 // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0 2049 Value *X = Shr->getOperand(0); 2050 CmpInst::Predicate Pred = Cmp.getPredicate(); 2051 if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() && 2052 C->isNullValue()) 2053 return new ICmpInst(Pred, X, Cmp.getOperand(1)); 2054 2055 const APInt *ShiftVal; 2056 if (Cmp.isEquality() && match(Shr->getOperand(0), m_APInt(ShiftVal))) 2057 return foldICmpShrConstConst(Cmp, Shr->getOperand(1), *C, *ShiftVal); 2058 2059 const APInt *ShiftAmt; 2060 if (!match(Shr->getOperand(1), m_APInt(ShiftAmt))) 2061 return nullptr; 2062 2063 // Check that the shift amount is in range. If not, don't perform undefined 2064 // shifts. When the shift is visited it will be simplified. 2065 unsigned TypeBits = C->getBitWidth(); 2066 unsigned ShAmtVal = ShiftAmt->getLimitedValue(TypeBits); 2067 if (ShAmtVal >= TypeBits || ShAmtVal == 0) 2068 return nullptr; 2069 2070 bool IsAShr = Shr->getOpcode() == Instruction::AShr; 2071 if (!Cmp.isEquality()) { 2072 // If we have an unsigned comparison and an ashr, we can't simplify this. 2073 // Similarly for signed comparisons with lshr. 2074 if (Cmp.isSigned() != IsAShr) 2075 return nullptr; 2076 2077 // Otherwise, all lshr and most exact ashr's are equivalent to a udiv/sdiv 2078 // by a power of 2. Since we already have logic to simplify these, 2079 // transform to div and then simplify the resultant comparison. 2080 if (IsAShr && (!Shr->isExact() || ShAmtVal == TypeBits - 1)) 2081 return nullptr; 2082 2083 // Revisit the shift (to delete it). 2084 Worklist.Add(Shr); 2085 2086 Constant *DivCst = ConstantInt::get( 2087 Shr->getType(), APInt::getOneBitSet(TypeBits, ShAmtVal)); 2088 2089 Value *Tmp = IsAShr ? Builder.CreateSDiv(X, DivCst, "", Shr->isExact()) 2090 : Builder.CreateUDiv(X, DivCst, "", Shr->isExact()); 2091 2092 Cmp.setOperand(0, Tmp); 2093 2094 // If the builder folded the binop, just return it. 2095 BinaryOperator *TheDiv = dyn_cast<BinaryOperator>(Tmp); 2096 if (!TheDiv) 2097 return &Cmp; 2098 2099 // Otherwise, fold this div/compare. 2100 assert(TheDiv->getOpcode() == Instruction::SDiv || 2101 TheDiv->getOpcode() == Instruction::UDiv); 2102 2103 Instruction *Res = foldICmpDivConstant(Cmp, TheDiv, C); 2104 assert(Res && "This div/cst should have folded!"); 2105 return Res; 2106 } 2107 2108 // Handle equality comparisons of shift-by-constant. 2109 2110 // If the comparison constant changes with the shift, the comparison cannot 2111 // succeed (bits of the comparison constant cannot match the shifted value). 2112 // This should be known by InstSimplify and already be folded to true/false. 2113 assert(((IsAShr && C->shl(ShAmtVal).ashr(ShAmtVal) == *C) || 2114 (!IsAShr && C->shl(ShAmtVal).lshr(ShAmtVal) == *C)) && 2115 "Expected icmp+shr simplify did not occur."); 2116 2117 // Check if the bits shifted out are known to be zero. If so, we can compare 2118 // against the unshifted value: 2119 // (X & 4) >> 1 == 2 --> (X & 4) == 4. 2120 Constant *ShiftedCmpRHS = ConstantInt::get(Shr->getType(), *C << ShAmtVal); 2121 if (Shr->hasOneUse()) { 2122 if (Shr->isExact()) 2123 return new ICmpInst(Pred, X, ShiftedCmpRHS); 2124 2125 // Otherwise strength reduce the shift into an 'and'. 2126 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal)); 2127 Constant *Mask = ConstantInt::get(Shr->getType(), Val); 2128 Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask"); 2129 return new ICmpInst(Pred, And, ShiftedCmpRHS); 2130 } 2131 2132 return nullptr; 2133 } 2134 2135 /// Fold icmp (udiv X, Y), C. 2136 Instruction *InstCombiner::foldICmpUDivConstant(ICmpInst &Cmp, 2137 BinaryOperator *UDiv, 2138 const APInt *C) { 2139 const APInt *C2; 2140 if (!match(UDiv->getOperand(0), m_APInt(C2))) 2141 return nullptr; 2142 2143 assert(*C2 != 0 && "udiv 0, X should have been simplified already."); 2144 2145 // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1)) 2146 Value *Y = UDiv->getOperand(1); 2147 if (Cmp.getPredicate() == ICmpInst::ICMP_UGT) { 2148 assert(!C->isMaxValue() && 2149 "icmp ugt X, UINT_MAX should have been simplified already."); 2150 return new ICmpInst(ICmpInst::ICMP_ULE, Y, 2151 ConstantInt::get(Y->getType(), C2->udiv(*C + 1))); 2152 } 2153 2154 // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C) 2155 if (Cmp.getPredicate() == ICmpInst::ICMP_ULT) { 2156 assert(*C != 0 && "icmp ult X, 0 should have been simplified already."); 2157 return new ICmpInst(ICmpInst::ICMP_UGT, Y, 2158 ConstantInt::get(Y->getType(), C2->udiv(*C))); 2159 } 2160 2161 return nullptr; 2162 } 2163 2164 /// Fold icmp ({su}div X, Y), C. 2165 Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp, 2166 BinaryOperator *Div, 2167 const APInt *C) { 2168 // Fold: icmp pred ([us]div X, C2), C -> range test 2169 // Fold this div into the comparison, producing a range check. 2170 // Determine, based on the divide type, what the range is being 2171 // checked. If there is an overflow on the low or high side, remember 2172 // it, otherwise compute the range [low, hi) bounding the new value. 2173 // See: InsertRangeTest above for the kinds of replacements possible. 2174 const APInt *C2; 2175 if (!match(Div->getOperand(1), m_APInt(C2))) 2176 return nullptr; 2177 2178 // FIXME: If the operand types don't match the type of the divide 2179 // then don't attempt this transform. The code below doesn't have the 2180 // logic to deal with a signed divide and an unsigned compare (and 2181 // vice versa). This is because (x /s C2) <s C produces different 2182 // results than (x /s C2) <u C or (x /u C2) <s C or even 2183 // (x /u C2) <u C. Simply casting the operands and result won't 2184 // work. :( The if statement below tests that condition and bails 2185 // if it finds it. 2186 bool DivIsSigned = Div->getOpcode() == Instruction::SDiv; 2187 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned()) 2188 return nullptr; 2189 2190 // The ProdOV computation fails on divide by 0 and divide by -1. Cases with 2191 // INT_MIN will also fail if the divisor is 1. Although folds of all these 2192 // division-by-constant cases should be present, we can not assert that they 2193 // have happened before we reach this icmp instruction. 2194 if (C2->isNullValue() || C2->isOneValue() || 2195 (DivIsSigned && C2->isAllOnesValue())) 2196 return nullptr; 2197 2198 // TODO: We could do all of the computations below using APInt. 2199 Constant *CmpRHS = cast<Constant>(Cmp.getOperand(1)); 2200 Constant *DivRHS = cast<Constant>(Div->getOperand(1)); 2201 2202 // Compute Prod = CmpRHS * DivRHS. We are essentially solving an equation of 2203 // form X / C2 = C. We solve for X by multiplying C2 (DivRHS) and C (CmpRHS). 2204 // By solving for X, we can turn this into a range check instead of computing 2205 // a divide. 2206 Constant *Prod = ConstantExpr::getMul(CmpRHS, DivRHS); 2207 2208 // Determine if the product overflows by seeing if the product is not equal to 2209 // the divide. Make sure we do the same kind of divide as in the LHS 2210 // instruction that we're folding. 2211 bool ProdOV = (DivIsSigned ? ConstantExpr::getSDiv(Prod, DivRHS) 2212 : ConstantExpr::getUDiv(Prod, DivRHS)) != CmpRHS; 2213 2214 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2215 2216 // If the division is known to be exact, then there is no remainder from the 2217 // divide, so the covered range size is unit, otherwise it is the divisor. 2218 Constant *RangeSize = 2219 Div->isExact() ? ConstantInt::get(Div->getType(), 1) : DivRHS; 2220 2221 // Figure out the interval that is being checked. For example, a comparison 2222 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5). 2223 // Compute this interval based on the constants involved and the signedness of 2224 // the compare/divide. This computes a half-open interval, keeping track of 2225 // whether either value in the interval overflows. After analysis each 2226 // overflow variable is set to 0 if it's corresponding bound variable is valid 2227 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end. 2228 int LoOverflow = 0, HiOverflow = 0; 2229 Constant *LoBound = nullptr, *HiBound = nullptr; 2230 2231 if (!DivIsSigned) { // udiv 2232 // e.g. X/5 op 3 --> [15, 20) 2233 LoBound = Prod; 2234 HiOverflow = LoOverflow = ProdOV; 2235 if (!HiOverflow) { 2236 // If this is not an exact divide, then many values in the range collapse 2237 // to the same result value. 2238 HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false); 2239 } 2240 } else if (C2->isStrictlyPositive()) { // Divisor is > 0. 2241 if (C->isNullValue()) { // (X / pos) op 0 2242 // Can't overflow. e.g. X/2 op 0 --> [-1, 2) 2243 LoBound = ConstantExpr::getNeg(SubOne(RangeSize)); 2244 HiBound = RangeSize; 2245 } else if (C->isStrictlyPositive()) { // (X / pos) op pos 2246 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20) 2247 HiOverflow = LoOverflow = ProdOV; 2248 if (!HiOverflow) 2249 HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true); 2250 } else { // (X / pos) op neg 2251 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14) 2252 HiBound = AddOne(Prod); 2253 LoOverflow = HiOverflow = ProdOV ? -1 : 0; 2254 if (!LoOverflow) { 2255 Constant *DivNeg = ConstantExpr::getNeg(RangeSize); 2256 LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0; 2257 } 2258 } 2259 } else if (C2->isNegative()) { // Divisor is < 0. 2260 if (Div->isExact()) 2261 RangeSize = ConstantExpr::getNeg(RangeSize); 2262 if (C->isNullValue()) { // (X / neg) op 0 2263 // e.g. X/-5 op 0 --> [-4, 5) 2264 LoBound = AddOne(RangeSize); 2265 HiBound = ConstantExpr::getNeg(RangeSize); 2266 if (HiBound == DivRHS) { // -INTMIN = INTMIN 2267 HiOverflow = 1; // [INTMIN+1, overflow) 2268 HiBound = nullptr; // e.g. X/INTMIN = 0 --> X > INTMIN 2269 } 2270 } else if (C->isStrictlyPositive()) { // (X / neg) op pos 2271 // e.g. X/-5 op 3 --> [-19, -14) 2272 HiBound = AddOne(Prod); 2273 HiOverflow = LoOverflow = ProdOV ? -1 : 0; 2274 if (!LoOverflow) 2275 LoOverflow = addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0; 2276 } else { // (X / neg) op neg 2277 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20) 2278 LoOverflow = HiOverflow = ProdOV; 2279 if (!HiOverflow) 2280 HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true); 2281 } 2282 2283 // Dividing by a negative swaps the condition. LT <-> GT 2284 Pred = ICmpInst::getSwappedPredicate(Pred); 2285 } 2286 2287 Value *X = Div->getOperand(0); 2288 switch (Pred) { 2289 default: llvm_unreachable("Unhandled icmp opcode!"); 2290 case ICmpInst::ICMP_EQ: 2291 if (LoOverflow && HiOverflow) 2292 return replaceInstUsesWith(Cmp, Builder.getFalse()); 2293 if (HiOverflow) 2294 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : 2295 ICmpInst::ICMP_UGE, X, LoBound); 2296 if (LoOverflow) 2297 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : 2298 ICmpInst::ICMP_ULT, X, HiBound); 2299 return replaceInstUsesWith( 2300 Cmp, insertRangeTest(X, LoBound->getUniqueInteger(), 2301 HiBound->getUniqueInteger(), DivIsSigned, true)); 2302 case ICmpInst::ICMP_NE: 2303 if (LoOverflow && HiOverflow) 2304 return replaceInstUsesWith(Cmp, Builder.getTrue()); 2305 if (HiOverflow) 2306 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : 2307 ICmpInst::ICMP_ULT, X, LoBound); 2308 if (LoOverflow) 2309 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : 2310 ICmpInst::ICMP_UGE, X, HiBound); 2311 return replaceInstUsesWith(Cmp, 2312 insertRangeTest(X, LoBound->getUniqueInteger(), 2313 HiBound->getUniqueInteger(), 2314 DivIsSigned, false)); 2315 case ICmpInst::ICMP_ULT: 2316 case ICmpInst::ICMP_SLT: 2317 if (LoOverflow == +1) // Low bound is greater than input range. 2318 return replaceInstUsesWith(Cmp, Builder.getTrue()); 2319 if (LoOverflow == -1) // Low bound is less than input range. 2320 return replaceInstUsesWith(Cmp, Builder.getFalse()); 2321 return new ICmpInst(Pred, X, LoBound); 2322 case ICmpInst::ICMP_UGT: 2323 case ICmpInst::ICMP_SGT: 2324 if (HiOverflow == +1) // High bound greater than input range. 2325 return replaceInstUsesWith(Cmp, Builder.getFalse()); 2326 if (HiOverflow == -1) // High bound less than input range. 2327 return replaceInstUsesWith(Cmp, Builder.getTrue()); 2328 if (Pred == ICmpInst::ICMP_UGT) 2329 return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound); 2330 return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound); 2331 } 2332 2333 return nullptr; 2334 } 2335 2336 /// Fold icmp (sub X, Y), C. 2337 Instruction *InstCombiner::foldICmpSubConstant(ICmpInst &Cmp, 2338 BinaryOperator *Sub, 2339 const APInt *C) { 2340 Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1); 2341 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2342 2343 // The following transforms are only worth it if the only user of the subtract 2344 // is the icmp. 2345 if (!Sub->hasOneUse()) 2346 return nullptr; 2347 2348 if (Sub->hasNoSignedWrap()) { 2349 // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y) 2350 if (Pred == ICmpInst::ICMP_SGT && C->isAllOnesValue()) 2351 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y); 2352 2353 // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y) 2354 if (Pred == ICmpInst::ICMP_SGT && C->isNullValue()) 2355 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y); 2356 2357 // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y) 2358 if (Pred == ICmpInst::ICMP_SLT && C->isNullValue()) 2359 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y); 2360 2361 // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y) 2362 if (Pred == ICmpInst::ICMP_SLT && C->isOneValue()) 2363 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y); 2364 } 2365 2366 const APInt *C2; 2367 if (!match(X, m_APInt(C2))) 2368 return nullptr; 2369 2370 // C2 - Y <u C -> (Y | (C - 1)) == C2 2371 // iff (C2 & (C - 1)) == C - 1 and C is a power of 2 2372 if (Pred == ICmpInst::ICMP_ULT && C->isPowerOf2() && 2373 (*C2 & (*C - 1)) == (*C - 1)) 2374 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, *C - 1), X); 2375 2376 // C2 - Y >u C -> (Y | C) != C2 2377 // iff C2 & C == C and C + 1 is a power of 2 2378 if (Pred == ICmpInst::ICMP_UGT && (*C + 1).isPowerOf2() && (*C2 & *C) == *C) 2379 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, *C), X); 2380 2381 return nullptr; 2382 } 2383 2384 /// Fold icmp (add X, Y), C. 2385 Instruction *InstCombiner::foldICmpAddConstant(ICmpInst &Cmp, 2386 BinaryOperator *Add, 2387 const APInt *C) { 2388 Value *Y = Add->getOperand(1); 2389 const APInt *C2; 2390 if (Cmp.isEquality() || !match(Y, m_APInt(C2))) 2391 return nullptr; 2392 2393 // Fold icmp pred (add X, C2), C. 2394 Value *X = Add->getOperand(0); 2395 Type *Ty = Add->getType(); 2396 CmpInst::Predicate Pred = Cmp.getPredicate(); 2397 2398 // If the add does not wrap, we can always adjust the compare by subtracting 2399 // the constants. Equality comparisons are handled elsewhere. SGE/SLE are 2400 // canonicalized to SGT/SLT. 2401 if (Add->hasNoSignedWrap() && 2402 (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) { 2403 bool Overflow; 2404 APInt NewC = C->ssub_ov(*C2, Overflow); 2405 // If there is overflow, the result must be true or false. 2406 // TODO: Can we assert there is no overflow because InstSimplify always 2407 // handles those cases? 2408 if (!Overflow) 2409 // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2) 2410 return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC)); 2411 } 2412 2413 auto CR = ConstantRange::makeExactICmpRegion(Pred, *C).subtract(*C2); 2414 const APInt &Upper = CR.getUpper(); 2415 const APInt &Lower = CR.getLower(); 2416 if (Cmp.isSigned()) { 2417 if (Lower.isSignMask()) 2418 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper)); 2419 if (Upper.isSignMask()) 2420 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower)); 2421 } else { 2422 if (Lower.isMinValue()) 2423 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper)); 2424 if (Upper.isMinValue()) 2425 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower)); 2426 } 2427 2428 if (!Add->hasOneUse()) 2429 return nullptr; 2430 2431 // X+C <u C2 -> (X & -C2) == C 2432 // iff C & (C2-1) == 0 2433 // C2 is a power of 2 2434 if (Pred == ICmpInst::ICMP_ULT && C->isPowerOf2() && (*C2 & (*C - 1)) == 0) 2435 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -(*C)), 2436 ConstantExpr::getNeg(cast<Constant>(Y))); 2437 2438 // X+C >u C2 -> (X & ~C2) != C 2439 // iff C & C2 == 0 2440 // C2+1 is a power of 2 2441 if (Pred == ICmpInst::ICMP_UGT && (*C + 1).isPowerOf2() && (*C2 & *C) == 0) 2442 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~(*C)), 2443 ConstantExpr::getNeg(cast<Constant>(Y))); 2444 2445 return nullptr; 2446 } 2447 2448 bool InstCombiner::matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, 2449 Value *&RHS, ConstantInt *&Less, 2450 ConstantInt *&Equal, 2451 ConstantInt *&Greater) { 2452 // TODO: Generalize this to work with other comparison idioms or ensure 2453 // they get canonicalized into this form. 2454 2455 // select i1 (a == b), i32 Equal, i32 (select i1 (a < b), i32 Less, i32 2456 // Greater), where Equal, Less and Greater are placeholders for any three 2457 // constants. 2458 ICmpInst::Predicate PredA, PredB; 2459 if (match(SI->getTrueValue(), m_ConstantInt(Equal)) && 2460 match(SI->getCondition(), m_ICmp(PredA, m_Value(LHS), m_Value(RHS))) && 2461 PredA == ICmpInst::ICMP_EQ && 2462 match(SI->getFalseValue(), 2463 m_Select(m_ICmp(PredB, m_Specific(LHS), m_Specific(RHS)), 2464 m_ConstantInt(Less), m_ConstantInt(Greater))) && 2465 PredB == ICmpInst::ICMP_SLT) { 2466 return true; 2467 } 2468 return false; 2469 } 2470 2471 Instruction *InstCombiner::foldICmpSelectConstant(ICmpInst &Cmp, 2472 SelectInst *Select, 2473 ConstantInt *C) { 2474 2475 assert(C && "Cmp RHS should be a constant int!"); 2476 // If we're testing a constant value against the result of a three way 2477 // comparison, the result can be expressed directly in terms of the 2478 // original values being compared. Note: We could possibly be more 2479 // aggressive here and remove the hasOneUse test. The original select is 2480 // really likely to simplify or sink when we remove a test of the result. 2481 Value *OrigLHS, *OrigRHS; 2482 ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan; 2483 if (Cmp.hasOneUse() && 2484 matchThreeWayIntCompare(Select, OrigLHS, OrigRHS, C1LessThan, C2Equal, 2485 C3GreaterThan)) { 2486 assert(C1LessThan && C2Equal && C3GreaterThan); 2487 2488 bool TrueWhenLessThan = 2489 ConstantExpr::getCompare(Cmp.getPredicate(), C1LessThan, C) 2490 ->isAllOnesValue(); 2491 bool TrueWhenEqual = 2492 ConstantExpr::getCompare(Cmp.getPredicate(), C2Equal, C) 2493 ->isAllOnesValue(); 2494 bool TrueWhenGreaterThan = 2495 ConstantExpr::getCompare(Cmp.getPredicate(), C3GreaterThan, C) 2496 ->isAllOnesValue(); 2497 2498 // This generates the new instruction that will replace the original Cmp 2499 // Instruction. Instead of enumerating the various combinations when 2500 // TrueWhenLessThan, TrueWhenEqual and TrueWhenGreaterThan are true versus 2501 // false, we rely on chaining of ORs and future passes of InstCombine to 2502 // simplify the OR further (i.e. a s< b || a == b becomes a s<= b). 2503 2504 // When none of the three constants satisfy the predicate for the RHS (C), 2505 // the entire original Cmp can be simplified to a false. 2506 Value *Cond = Builder.getFalse(); 2507 if (TrueWhenLessThan) 2508 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT, OrigLHS, OrigRHS)); 2509 if (TrueWhenEqual) 2510 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ, OrigLHS, OrigRHS)); 2511 if (TrueWhenGreaterThan) 2512 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT, OrigLHS, OrigRHS)); 2513 2514 return replaceInstUsesWith(Cmp, Cond); 2515 } 2516 return nullptr; 2517 } 2518 2519 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C 2520 /// where X is some kind of instruction. 2521 Instruction *InstCombiner::foldICmpInstWithConstant(ICmpInst &Cmp) { 2522 const APInt *C; 2523 if (!match(Cmp.getOperand(1), m_APInt(C))) 2524 return nullptr; 2525 2526 if (auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0))) { 2527 switch (BO->getOpcode()) { 2528 case Instruction::Xor: 2529 if (Instruction *I = foldICmpXorConstant(Cmp, BO, C)) 2530 return I; 2531 break; 2532 case Instruction::And: 2533 if (Instruction *I = foldICmpAndConstant(Cmp, BO, C)) 2534 return I; 2535 break; 2536 case Instruction::Or: 2537 if (Instruction *I = foldICmpOrConstant(Cmp, BO, C)) 2538 return I; 2539 break; 2540 case Instruction::Mul: 2541 if (Instruction *I = foldICmpMulConstant(Cmp, BO, C)) 2542 return I; 2543 break; 2544 case Instruction::Shl: 2545 if (Instruction *I = foldICmpShlConstant(Cmp, BO, C)) 2546 return I; 2547 break; 2548 case Instruction::LShr: 2549 case Instruction::AShr: 2550 if (Instruction *I = foldICmpShrConstant(Cmp, BO, C)) 2551 return I; 2552 break; 2553 case Instruction::UDiv: 2554 if (Instruction *I = foldICmpUDivConstant(Cmp, BO, C)) 2555 return I; 2556 LLVM_FALLTHROUGH; 2557 case Instruction::SDiv: 2558 if (Instruction *I = foldICmpDivConstant(Cmp, BO, C)) 2559 return I; 2560 break; 2561 case Instruction::Sub: 2562 if (Instruction *I = foldICmpSubConstant(Cmp, BO, C)) 2563 return I; 2564 break; 2565 case Instruction::Add: 2566 if (Instruction *I = foldICmpAddConstant(Cmp, BO, C)) 2567 return I; 2568 break; 2569 default: 2570 break; 2571 } 2572 // TODO: These folds could be refactored to be part of the above calls. 2573 if (Instruction *I = foldICmpBinOpEqualityWithConstant(Cmp, BO, C)) 2574 return I; 2575 } 2576 2577 // Match against CmpInst LHS being instructions other than binary operators. 2578 2579 if (auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0))) { 2580 // For now, we only support constant integers while folding the 2581 // ICMP(SELECT)) pattern. We can extend this to support vector of integers 2582 // similar to the cases handled by binary ops above. 2583 if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1))) 2584 if (Instruction *I = foldICmpSelectConstant(Cmp, SI, ConstRHS)) 2585 return I; 2586 } 2587 2588 if (auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0))) { 2589 if (Instruction *I = foldICmpTruncConstant(Cmp, TI, C)) 2590 return I; 2591 } 2592 2593 if (Instruction *I = foldICmpIntrinsicWithConstant(Cmp, C)) 2594 return I; 2595 2596 return nullptr; 2597 } 2598 2599 /// Fold an icmp equality instruction with binary operator LHS and constant RHS: 2600 /// icmp eq/ne BO, C. 2601 Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp, 2602 BinaryOperator *BO, 2603 const APInt *C) { 2604 // TODO: Some of these folds could work with arbitrary constants, but this 2605 // function is limited to scalar and vector splat constants. 2606 if (!Cmp.isEquality()) 2607 return nullptr; 2608 2609 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2610 bool isICMP_NE = Pred == ICmpInst::ICMP_NE; 2611 Constant *RHS = cast<Constant>(Cmp.getOperand(1)); 2612 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1); 2613 2614 switch (BO->getOpcode()) { 2615 case Instruction::SRem: 2616 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one. 2617 if (C->isNullValue() && BO->hasOneUse()) { 2618 const APInt *BOC; 2619 if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) { 2620 Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName()); 2621 return new ICmpInst(Pred, NewRem, 2622 Constant::getNullValue(BO->getType())); 2623 } 2624 } 2625 break; 2626 case Instruction::Add: { 2627 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants. 2628 const APInt *BOC; 2629 if (match(BOp1, m_APInt(BOC))) { 2630 if (BO->hasOneUse()) { 2631 Constant *SubC = ConstantExpr::getSub(RHS, cast<Constant>(BOp1)); 2632 return new ICmpInst(Pred, BOp0, SubC); 2633 } 2634 } else if (C->isNullValue()) { 2635 // Replace ((add A, B) != 0) with (A != -B) if A or B is 2636 // efficiently invertible, or if the add has just this one use. 2637 if (Value *NegVal = dyn_castNegVal(BOp1)) 2638 return new ICmpInst(Pred, BOp0, NegVal); 2639 if (Value *NegVal = dyn_castNegVal(BOp0)) 2640 return new ICmpInst(Pred, NegVal, BOp1); 2641 if (BO->hasOneUse()) { 2642 Value *Neg = Builder.CreateNeg(BOp1); 2643 Neg->takeName(BO); 2644 return new ICmpInst(Pred, BOp0, Neg); 2645 } 2646 } 2647 break; 2648 } 2649 case Instruction::Xor: 2650 if (BO->hasOneUse()) { 2651 if (Constant *BOC = dyn_cast<Constant>(BOp1)) { 2652 // For the xor case, we can xor two constants together, eliminating 2653 // the explicit xor. 2654 return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC)); 2655 } else if (C->isNullValue()) { 2656 // Replace ((xor A, B) != 0) with (A != B) 2657 return new ICmpInst(Pred, BOp0, BOp1); 2658 } 2659 } 2660 break; 2661 case Instruction::Sub: 2662 if (BO->hasOneUse()) { 2663 const APInt *BOC; 2664 if (match(BOp0, m_APInt(BOC))) { 2665 // Replace ((sub BOC, B) != C) with (B != BOC-C). 2666 Constant *SubC = ConstantExpr::getSub(cast<Constant>(BOp0), RHS); 2667 return new ICmpInst(Pred, BOp1, SubC); 2668 } else if (C->isNullValue()) { 2669 // Replace ((sub A, B) != 0) with (A != B). 2670 return new ICmpInst(Pred, BOp0, BOp1); 2671 } 2672 } 2673 break; 2674 case Instruction::Or: { 2675 const APInt *BOC; 2676 if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) { 2677 // Comparing if all bits outside of a constant mask are set? 2678 // Replace (X | C) == -1 with (X & ~C) == ~C. 2679 // This removes the -1 constant. 2680 Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1)); 2681 Value *And = Builder.CreateAnd(BOp0, NotBOC); 2682 return new ICmpInst(Pred, And, NotBOC); 2683 } 2684 break; 2685 } 2686 case Instruction::And: { 2687 const APInt *BOC; 2688 if (match(BOp1, m_APInt(BOC))) { 2689 // If we have ((X & C) == C), turn it into ((X & C) != 0). 2690 if (C == BOC && C->isPowerOf2()) 2691 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE, 2692 BO, Constant::getNullValue(RHS->getType())); 2693 2694 // Don't perform the following transforms if the AND has multiple uses 2695 if (!BO->hasOneUse()) 2696 break; 2697 2698 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0 2699 if (BOC->isSignMask()) { 2700 Constant *Zero = Constant::getNullValue(BOp0->getType()); 2701 auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE; 2702 return new ICmpInst(NewPred, BOp0, Zero); 2703 } 2704 2705 // ((X & ~7) == 0) --> X < 8 2706 if (C->isNullValue() && (~(*BOC) + 1).isPowerOf2()) { 2707 Constant *NegBOC = ConstantExpr::getNeg(cast<Constant>(BOp1)); 2708 auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT; 2709 return new ICmpInst(NewPred, BOp0, NegBOC); 2710 } 2711 } 2712 break; 2713 } 2714 case Instruction::Mul: 2715 if (C->isNullValue() && BO->hasNoSignedWrap()) { 2716 const APInt *BOC; 2717 if (match(BOp1, m_APInt(BOC)) && !BOC->isNullValue()) { 2718 // The trivial case (mul X, 0) is handled by InstSimplify. 2719 // General case : (mul X, C) != 0 iff X != 0 2720 // (mul X, C) == 0 iff X == 0 2721 return new ICmpInst(Pred, BOp0, Constant::getNullValue(RHS->getType())); 2722 } 2723 } 2724 break; 2725 case Instruction::UDiv: 2726 if (C->isNullValue()) { 2727 // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A) 2728 auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT; 2729 return new ICmpInst(NewPred, BOp1, BOp0); 2730 } 2731 break; 2732 default: 2733 break; 2734 } 2735 return nullptr; 2736 } 2737 2738 /// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C. 2739 Instruction *InstCombiner::foldICmpIntrinsicWithConstant(ICmpInst &Cmp, 2740 const APInt *C) { 2741 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)); 2742 if (!II || !Cmp.isEquality()) 2743 return nullptr; 2744 2745 // Handle icmp {eq|ne} <intrinsic>, Constant. 2746 Type *Ty = II->getType(); 2747 switch (II->getIntrinsicID()) { 2748 case Intrinsic::bswap: 2749 Worklist.Add(II); 2750 Cmp.setOperand(0, II->getArgOperand(0)); 2751 Cmp.setOperand(1, ConstantInt::get(Ty, C->byteSwap())); 2752 return &Cmp; 2753 2754 case Intrinsic::ctlz: 2755 case Intrinsic::cttz: 2756 // ctz(A) == bitwidth(A) -> A == 0 and likewise for != 2757 if (*C == C->getBitWidth()) { 2758 Worklist.Add(II); 2759 Cmp.setOperand(0, II->getArgOperand(0)); 2760 Cmp.setOperand(1, ConstantInt::getNullValue(Ty)); 2761 return &Cmp; 2762 } 2763 break; 2764 2765 case Intrinsic::ctpop: { 2766 // popcount(A) == 0 -> A == 0 and likewise for != 2767 // popcount(A) == bitwidth(A) -> A == -1 and likewise for != 2768 bool IsZero = C->isNullValue(); 2769 if (IsZero || *C == C->getBitWidth()) { 2770 Worklist.Add(II); 2771 Cmp.setOperand(0, II->getArgOperand(0)); 2772 auto *NewOp = 2773 IsZero ? Constant::getNullValue(Ty) : Constant::getAllOnesValue(Ty); 2774 Cmp.setOperand(1, NewOp); 2775 return &Cmp; 2776 } 2777 break; 2778 } 2779 default: 2780 break; 2781 } 2782 2783 return nullptr; 2784 } 2785 2786 /// Handle icmp with constant (but not simple integer constant) RHS. 2787 Instruction *InstCombiner::foldICmpInstWithConstantNotInt(ICmpInst &I) { 2788 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2789 Constant *RHSC = dyn_cast<Constant>(Op1); 2790 Instruction *LHSI = dyn_cast<Instruction>(Op0); 2791 if (!RHSC || !LHSI) 2792 return nullptr; 2793 2794 switch (LHSI->getOpcode()) { 2795 case Instruction::GetElementPtr: 2796 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null 2797 if (RHSC->isNullValue() && 2798 cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices()) 2799 return new ICmpInst( 2800 I.getPredicate(), LHSI->getOperand(0), 2801 Constant::getNullValue(LHSI->getOperand(0)->getType())); 2802 break; 2803 case Instruction::PHI: 2804 // Only fold icmp into the PHI if the phi and icmp are in the same 2805 // block. If in the same block, we're encouraging jump threading. If 2806 // not, we are just pessimizing the code by making an i1 phi. 2807 if (LHSI->getParent() == I.getParent()) 2808 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI))) 2809 return NV; 2810 break; 2811 case Instruction::Select: { 2812 // If either operand of the select is a constant, we can fold the 2813 // comparison into the select arms, which will cause one to be 2814 // constant folded and the select turned into a bitwise or. 2815 Value *Op1 = nullptr, *Op2 = nullptr; 2816 ConstantInt *CI = nullptr; 2817 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) { 2818 Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); 2819 CI = dyn_cast<ConstantInt>(Op1); 2820 } 2821 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) { 2822 Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); 2823 CI = dyn_cast<ConstantInt>(Op2); 2824 } 2825 2826 // We only want to perform this transformation if it will not lead to 2827 // additional code. This is true if either both sides of the select 2828 // fold to a constant (in which case the icmp is replaced with a select 2829 // which will usually simplify) or this is the only user of the 2830 // select (in which case we are trading a select+icmp for a simpler 2831 // select+icmp) or all uses of the select can be replaced based on 2832 // dominance information ("Global cases"). 2833 bool Transform = false; 2834 if (Op1 && Op2) 2835 Transform = true; 2836 else if (Op1 || Op2) { 2837 // Local case 2838 if (LHSI->hasOneUse()) 2839 Transform = true; 2840 // Global cases 2841 else if (CI && !CI->isZero()) 2842 // When Op1 is constant try replacing select with second operand. 2843 // Otherwise Op2 is constant and try replacing select with first 2844 // operand. 2845 Transform = 2846 replacedSelectWithOperand(cast<SelectInst>(LHSI), &I, Op1 ? 2 : 1); 2847 } 2848 if (Transform) { 2849 if (!Op1) 2850 Op1 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC, 2851 I.getName()); 2852 if (!Op2) 2853 Op2 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC, 2854 I.getName()); 2855 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2); 2856 } 2857 break; 2858 } 2859 case Instruction::IntToPtr: 2860 // icmp pred inttoptr(X), null -> icmp pred X, 0 2861 if (RHSC->isNullValue() && 2862 DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType()) 2863 return new ICmpInst( 2864 I.getPredicate(), LHSI->getOperand(0), 2865 Constant::getNullValue(LHSI->getOperand(0)->getType())); 2866 break; 2867 2868 case Instruction::Load: 2869 // Try to optimize things like "A[i] > 4" to index computations. 2870 if (GetElementPtrInst *GEP = 2871 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) { 2872 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 2873 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 2874 !cast<LoadInst>(LHSI)->isVolatile()) 2875 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I)) 2876 return Res; 2877 } 2878 break; 2879 } 2880 2881 return nullptr; 2882 } 2883 2884 /// Try to fold icmp (binop), X or icmp X, (binop). 2885 /// TODO: A large part of this logic is duplicated in InstSimplify's 2886 /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code 2887 /// duplication. 2888 Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) { 2889 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2890 2891 // Special logic for binary operators. 2892 BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0); 2893 BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1); 2894 if (!BO0 && !BO1) 2895 return nullptr; 2896 2897 const CmpInst::Predicate Pred = I.getPredicate(); 2898 bool NoOp0WrapProblem = false, NoOp1WrapProblem = false; 2899 if (BO0 && isa<OverflowingBinaryOperator>(BO0)) 2900 NoOp0WrapProblem = 2901 ICmpInst::isEquality(Pred) || 2902 (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) || 2903 (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap()); 2904 if (BO1 && isa<OverflowingBinaryOperator>(BO1)) 2905 NoOp1WrapProblem = 2906 ICmpInst::isEquality(Pred) || 2907 (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) || 2908 (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap()); 2909 2910 // Analyze the case when either Op0 or Op1 is an add instruction. 2911 // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null). 2912 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr; 2913 if (BO0 && BO0->getOpcode() == Instruction::Add) { 2914 A = BO0->getOperand(0); 2915 B = BO0->getOperand(1); 2916 } 2917 if (BO1 && BO1->getOpcode() == Instruction::Add) { 2918 C = BO1->getOperand(0); 2919 D = BO1->getOperand(1); 2920 } 2921 2922 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow. 2923 if ((A == Op1 || B == Op1) && NoOp0WrapProblem) 2924 return new ICmpInst(Pred, A == Op1 ? B : A, 2925 Constant::getNullValue(Op1->getType())); 2926 2927 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow. 2928 if ((C == Op0 || D == Op0) && NoOp1WrapProblem) 2929 return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()), 2930 C == Op0 ? D : C); 2931 2932 // icmp (X+Y), (X+Z) -> icmp Y, Z for equalities or if there is no overflow. 2933 if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem && 2934 NoOp1WrapProblem && 2935 // Try not to increase register pressure. 2936 BO0->hasOneUse() && BO1->hasOneUse()) { 2937 // Determine Y and Z in the form icmp (X+Y), (X+Z). 2938 Value *Y, *Z; 2939 if (A == C) { 2940 // C + B == C + D -> B == D 2941 Y = B; 2942 Z = D; 2943 } else if (A == D) { 2944 // D + B == C + D -> B == C 2945 Y = B; 2946 Z = C; 2947 } else if (B == C) { 2948 // A + C == C + D -> A == D 2949 Y = A; 2950 Z = D; 2951 } else { 2952 assert(B == D); 2953 // A + D == C + D -> A == C 2954 Y = A; 2955 Z = C; 2956 } 2957 return new ICmpInst(Pred, Y, Z); 2958 } 2959 2960 // icmp slt (X + -1), Y -> icmp sle X, Y 2961 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT && 2962 match(B, m_AllOnes())) 2963 return new ICmpInst(CmpInst::ICMP_SLE, A, Op1); 2964 2965 // icmp sge (X + -1), Y -> icmp sgt X, Y 2966 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE && 2967 match(B, m_AllOnes())) 2968 return new ICmpInst(CmpInst::ICMP_SGT, A, Op1); 2969 2970 // icmp sle (X + 1), Y -> icmp slt X, Y 2971 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE && match(B, m_One())) 2972 return new ICmpInst(CmpInst::ICMP_SLT, A, Op1); 2973 2974 // icmp sgt (X + 1), Y -> icmp sge X, Y 2975 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT && match(B, m_One())) 2976 return new ICmpInst(CmpInst::ICMP_SGE, A, Op1); 2977 2978 // icmp sgt X, (Y + -1) -> icmp sge X, Y 2979 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGT && 2980 match(D, m_AllOnes())) 2981 return new ICmpInst(CmpInst::ICMP_SGE, Op0, C); 2982 2983 // icmp sle X, (Y + -1) -> icmp slt X, Y 2984 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLE && 2985 match(D, m_AllOnes())) 2986 return new ICmpInst(CmpInst::ICMP_SLT, Op0, C); 2987 2988 // icmp sge X, (Y + 1) -> icmp sgt X, Y 2989 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGE && match(D, m_One())) 2990 return new ICmpInst(CmpInst::ICMP_SGT, Op0, C); 2991 2992 // icmp slt X, (Y + 1) -> icmp sle X, Y 2993 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLT && match(D, m_One())) 2994 return new ICmpInst(CmpInst::ICMP_SLE, Op0, C); 2995 2996 // TODO: The subtraction-related identities shown below also hold, but 2997 // canonicalization from (X -nuw 1) to (X + -1) means that the combinations 2998 // wouldn't happen even if they were implemented. 2999 // 3000 // icmp ult (X - 1), Y -> icmp ule X, Y 3001 // icmp uge (X - 1), Y -> icmp ugt X, Y 3002 // icmp ugt X, (Y - 1) -> icmp uge X, Y 3003 // icmp ule X, (Y - 1) -> icmp ult X, Y 3004 3005 // icmp ule (X + 1), Y -> icmp ult X, Y 3006 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_ULE && match(B, m_One())) 3007 return new ICmpInst(CmpInst::ICMP_ULT, A, Op1); 3008 3009 // icmp ugt (X + 1), Y -> icmp uge X, Y 3010 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_UGT && match(B, m_One())) 3011 return new ICmpInst(CmpInst::ICMP_UGE, A, Op1); 3012 3013 // icmp uge X, (Y + 1) -> icmp ugt X, Y 3014 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_UGE && match(D, m_One())) 3015 return new ICmpInst(CmpInst::ICMP_UGT, Op0, C); 3016 3017 // icmp ult X, (Y + 1) -> icmp ule X, Y 3018 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_ULT && match(D, m_One())) 3019 return new ICmpInst(CmpInst::ICMP_ULE, Op0, C); 3020 3021 // if C1 has greater magnitude than C2: 3022 // icmp (X + C1), (Y + C2) -> icmp (X + C3), Y 3023 // s.t. C3 = C1 - C2 3024 // 3025 // if C2 has greater magnitude than C1: 3026 // icmp (X + C1), (Y + C2) -> icmp X, (Y + C3) 3027 // s.t. C3 = C2 - C1 3028 if (A && C && NoOp0WrapProblem && NoOp1WrapProblem && 3029 (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned()) 3030 if (ConstantInt *C1 = dyn_cast<ConstantInt>(B)) 3031 if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) { 3032 const APInt &AP1 = C1->getValue(); 3033 const APInt &AP2 = C2->getValue(); 3034 if (AP1.isNegative() == AP2.isNegative()) { 3035 APInt AP1Abs = C1->getValue().abs(); 3036 APInt AP2Abs = C2->getValue().abs(); 3037 if (AP1Abs.uge(AP2Abs)) { 3038 ConstantInt *C3 = Builder.getInt(AP1 - AP2); 3039 Value *NewAdd = Builder.CreateNSWAdd(A, C3); 3040 return new ICmpInst(Pred, NewAdd, C); 3041 } else { 3042 ConstantInt *C3 = Builder.getInt(AP2 - AP1); 3043 Value *NewAdd = Builder.CreateNSWAdd(C, C3); 3044 return new ICmpInst(Pred, A, NewAdd); 3045 } 3046 } 3047 } 3048 3049 // Analyze the case when either Op0 or Op1 is a sub instruction. 3050 // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null). 3051 A = nullptr; 3052 B = nullptr; 3053 C = nullptr; 3054 D = nullptr; 3055 if (BO0 && BO0->getOpcode() == Instruction::Sub) { 3056 A = BO0->getOperand(0); 3057 B = BO0->getOperand(1); 3058 } 3059 if (BO1 && BO1->getOpcode() == Instruction::Sub) { 3060 C = BO1->getOperand(0); 3061 D = BO1->getOperand(1); 3062 } 3063 3064 // icmp (X-Y), X -> icmp 0, Y for equalities or if there is no overflow. 3065 if (A == Op1 && NoOp0WrapProblem) 3066 return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B); 3067 3068 // icmp X, (X-Y) -> icmp Y, 0 for equalities or if there is no overflow. 3069 if (C == Op0 && NoOp1WrapProblem) 3070 return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType())); 3071 3072 // icmp (Y-X), (Z-X) -> icmp Y, Z for equalities or if there is no overflow. 3073 if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem && 3074 // Try not to increase register pressure. 3075 BO0->hasOneUse() && BO1->hasOneUse()) 3076 return new ICmpInst(Pred, A, C); 3077 3078 // icmp (X-Y), (X-Z) -> icmp Z, Y for equalities or if there is no overflow. 3079 if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem && 3080 // Try not to increase register pressure. 3081 BO0->hasOneUse() && BO1->hasOneUse()) 3082 return new ICmpInst(Pred, D, B); 3083 3084 // icmp (0-X) < cst --> x > -cst 3085 if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) { 3086 Value *X; 3087 if (match(BO0, m_Neg(m_Value(X)))) 3088 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1)) 3089 if (!RHSC->isMinValue(/*isSigned=*/true)) 3090 return new ICmpInst(I.getSwappedPredicate(), X, 3091 ConstantExpr::getNeg(RHSC)); 3092 } 3093 3094 BinaryOperator *SRem = nullptr; 3095 // icmp (srem X, Y), Y 3096 if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1)) 3097 SRem = BO0; 3098 // icmp Y, (srem X, Y) 3099 else if (BO1 && BO1->getOpcode() == Instruction::SRem && 3100 Op0 == BO1->getOperand(1)) 3101 SRem = BO1; 3102 if (SRem) { 3103 // We don't check hasOneUse to avoid increasing register pressure because 3104 // the value we use is the same value this instruction was already using. 3105 switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) { 3106 default: 3107 break; 3108 case ICmpInst::ICMP_EQ: 3109 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 3110 case ICmpInst::ICMP_NE: 3111 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 3112 case ICmpInst::ICMP_SGT: 3113 case ICmpInst::ICMP_SGE: 3114 return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1), 3115 Constant::getAllOnesValue(SRem->getType())); 3116 case ICmpInst::ICMP_SLT: 3117 case ICmpInst::ICMP_SLE: 3118 return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1), 3119 Constant::getNullValue(SRem->getType())); 3120 } 3121 } 3122 3123 if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && BO0->hasOneUse() && 3124 BO1->hasOneUse() && BO0->getOperand(1) == BO1->getOperand(1)) { 3125 switch (BO0->getOpcode()) { 3126 default: 3127 break; 3128 case Instruction::Add: 3129 case Instruction::Sub: 3130 case Instruction::Xor: { 3131 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b 3132 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3133 3134 const APInt *C; 3135 if (match(BO0->getOperand(1), m_APInt(C))) { 3136 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b 3137 if (C->isSignMask()) { 3138 ICmpInst::Predicate NewPred = 3139 I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate(); 3140 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0)); 3141 } 3142 3143 // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b 3144 if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) { 3145 ICmpInst::Predicate NewPred = 3146 I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate(); 3147 NewPred = I.getSwappedPredicate(NewPred); 3148 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0)); 3149 } 3150 } 3151 break; 3152 } 3153 case Instruction::Mul: { 3154 if (!I.isEquality()) 3155 break; 3156 3157 const APInt *C; 3158 if (match(BO0->getOperand(1), m_APInt(C)) && !C->isNullValue() && 3159 !C->isOneValue()) { 3160 // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask) 3161 // Mask = -1 >> count-trailing-zeros(C). 3162 if (unsigned TZs = C->countTrailingZeros()) { 3163 Constant *Mask = ConstantInt::get( 3164 BO0->getType(), 3165 APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs)); 3166 Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask); 3167 Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask); 3168 return new ICmpInst(Pred, And1, And2); 3169 } 3170 // If there are no trailing zeros in the multiplier, just eliminate 3171 // the multiplies (no masking is needed): 3172 // icmp eq/ne (X * C), (Y * C) --> icmp eq/ne X, Y 3173 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3174 } 3175 break; 3176 } 3177 case Instruction::UDiv: 3178 case Instruction::LShr: 3179 if (I.isSigned() || !BO0->isExact() || !BO1->isExact()) 3180 break; 3181 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3182 3183 case Instruction::SDiv: 3184 if (!I.isEquality() || !BO0->isExact() || !BO1->isExact()) 3185 break; 3186 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3187 3188 case Instruction::AShr: 3189 if (!BO0->isExact() || !BO1->isExact()) 3190 break; 3191 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3192 3193 case Instruction::Shl: { 3194 bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap(); 3195 bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap(); 3196 if (!NUW && !NSW) 3197 break; 3198 if (!NSW && I.isSigned()) 3199 break; 3200 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 3201 } 3202 } 3203 } 3204 3205 if (BO0) { 3206 // Transform A & (L - 1) `ult` L --> L != 0 3207 auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes()); 3208 auto BitwiseAnd = m_c_And(m_Value(), LSubOne); 3209 3210 if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) { 3211 auto *Zero = Constant::getNullValue(BO0->getType()); 3212 return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero); 3213 } 3214 } 3215 3216 return nullptr; 3217 } 3218 3219 /// Fold icmp Pred min|max(X, Y), X. 3220 static Instruction *foldICmpWithMinMax(ICmpInst &Cmp) { 3221 ICmpInst::Predicate Pred = Cmp.getPredicate(); 3222 Value *Op0 = Cmp.getOperand(0); 3223 Value *X = Cmp.getOperand(1); 3224 3225 // Canonicalize minimum or maximum operand to LHS of the icmp. 3226 if (match(X, m_c_SMin(m_Specific(Op0), m_Value())) || 3227 match(X, m_c_SMax(m_Specific(Op0), m_Value())) || 3228 match(X, m_c_UMin(m_Specific(Op0), m_Value())) || 3229 match(X, m_c_UMax(m_Specific(Op0), m_Value()))) { 3230 std::swap(Op0, X); 3231 Pred = Cmp.getSwappedPredicate(); 3232 } 3233 3234 Value *Y; 3235 if (match(Op0, m_c_SMin(m_Specific(X), m_Value(Y)))) { 3236 // smin(X, Y) == X --> X s<= Y 3237 // smin(X, Y) s>= X --> X s<= Y 3238 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SGE) 3239 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y); 3240 3241 // smin(X, Y) != X --> X s> Y 3242 // smin(X, Y) s< X --> X s> Y 3243 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SLT) 3244 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y); 3245 3246 // These cases should be handled in InstSimplify: 3247 // smin(X, Y) s<= X --> true 3248 // smin(X, Y) s> X --> false 3249 return nullptr; 3250 } 3251 3252 if (match(Op0, m_c_SMax(m_Specific(X), m_Value(Y)))) { 3253 // smax(X, Y) == X --> X s>= Y 3254 // smax(X, Y) s<= X --> X s>= Y 3255 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLE) 3256 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y); 3257 3258 // smax(X, Y) != X --> X s< Y 3259 // smax(X, Y) s> X --> X s< Y 3260 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SGT) 3261 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y); 3262 3263 // These cases should be handled in InstSimplify: 3264 // smax(X, Y) s>= X --> true 3265 // smax(X, Y) s< X --> false 3266 return nullptr; 3267 } 3268 3269 if (match(Op0, m_c_UMin(m_Specific(X), m_Value(Y)))) { 3270 // umin(X, Y) == X --> X u<= Y 3271 // umin(X, Y) u>= X --> X u<= Y 3272 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_UGE) 3273 return new ICmpInst(ICmpInst::ICMP_ULE, X, Y); 3274 3275 // umin(X, Y) != X --> X u> Y 3276 // umin(X, Y) u< X --> X u> Y 3277 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT) 3278 return new ICmpInst(ICmpInst::ICMP_UGT, X, Y); 3279 3280 // These cases should be handled in InstSimplify: 3281 // umin(X, Y) u<= X --> true 3282 // umin(X, Y) u> X --> false 3283 return nullptr; 3284 } 3285 3286 if (match(Op0, m_c_UMax(m_Specific(X), m_Value(Y)))) { 3287 // umax(X, Y) == X --> X u>= Y 3288 // umax(X, Y) u<= X --> X u>= Y 3289 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_ULE) 3290 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y); 3291 3292 // umax(X, Y) != X --> X u< Y 3293 // umax(X, Y) u> X --> X u< Y 3294 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_UGT) 3295 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y); 3296 3297 // These cases should be handled in InstSimplify: 3298 // umax(X, Y) u>= X --> true 3299 // umax(X, Y) u< X --> false 3300 return nullptr; 3301 } 3302 3303 return nullptr; 3304 } 3305 3306 Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) { 3307 if (!I.isEquality()) 3308 return nullptr; 3309 3310 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 3311 const CmpInst::Predicate Pred = I.getPredicate(); 3312 Value *A, *B, *C, *D; 3313 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) { 3314 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0 3315 Value *OtherVal = A == Op1 ? B : A; 3316 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType())); 3317 } 3318 3319 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) { 3320 // A^c1 == C^c2 --> A == C^(c1^c2) 3321 ConstantInt *C1, *C2; 3322 if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) && 3323 Op1->hasOneUse()) { 3324 Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue()); 3325 Value *Xor = Builder.CreateXor(C, NC); 3326 return new ICmpInst(Pred, A, Xor); 3327 } 3328 3329 // A^B == A^D -> B == D 3330 if (A == C) 3331 return new ICmpInst(Pred, B, D); 3332 if (A == D) 3333 return new ICmpInst(Pred, B, C); 3334 if (B == C) 3335 return new ICmpInst(Pred, A, D); 3336 if (B == D) 3337 return new ICmpInst(Pred, A, C); 3338 } 3339 } 3340 3341 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) { 3342 // A == (A^B) -> B == 0 3343 Value *OtherVal = A == Op0 ? B : A; 3344 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType())); 3345 } 3346 3347 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0 3348 if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) && 3349 match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) { 3350 Value *X = nullptr, *Y = nullptr, *Z = nullptr; 3351 3352 if (A == C) { 3353 X = B; 3354 Y = D; 3355 Z = A; 3356 } else if (A == D) { 3357 X = B; 3358 Y = C; 3359 Z = A; 3360 } else if (B == C) { 3361 X = A; 3362 Y = D; 3363 Z = B; 3364 } else if (B == D) { 3365 X = A; 3366 Y = C; 3367 Z = B; 3368 } 3369 3370 if (X) { // Build (X^Y) & Z 3371 Op1 = Builder.CreateXor(X, Y); 3372 Op1 = Builder.CreateAnd(Op1, Z); 3373 I.setOperand(0, Op1); 3374 I.setOperand(1, Constant::getNullValue(Op1->getType())); 3375 return &I; 3376 } 3377 } 3378 3379 // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B) 3380 // and (B & (1<<X)-1) == (zext A) --> A == (trunc B) 3381 ConstantInt *Cst1; 3382 if ((Op0->hasOneUse() && match(Op0, m_ZExt(m_Value(A))) && 3383 match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) || 3384 (Op1->hasOneUse() && match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) && 3385 match(Op1, m_ZExt(m_Value(A))))) { 3386 APInt Pow2 = Cst1->getValue() + 1; 3387 if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) && 3388 Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth()) 3389 return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType())); 3390 } 3391 3392 // (A >> C) == (B >> C) --> (A^B) u< (1 << C) 3393 // For lshr and ashr pairs. 3394 if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_ConstantInt(Cst1)))) && 3395 match(Op1, m_OneUse(m_LShr(m_Value(B), m_Specific(Cst1))))) || 3396 (match(Op0, m_OneUse(m_AShr(m_Value(A), m_ConstantInt(Cst1)))) && 3397 match(Op1, m_OneUse(m_AShr(m_Value(B), m_Specific(Cst1)))))) { 3398 unsigned TypeBits = Cst1->getBitWidth(); 3399 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits); 3400 if (ShAmt < TypeBits && ShAmt != 0) { 3401 ICmpInst::Predicate NewPred = 3402 Pred == ICmpInst::ICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT; 3403 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted"); 3404 APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt); 3405 return new ICmpInst(NewPred, Xor, Builder.getInt(CmpVal)); 3406 } 3407 } 3408 3409 // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0 3410 if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) && 3411 match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) { 3412 unsigned TypeBits = Cst1->getBitWidth(); 3413 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits); 3414 if (ShAmt < TypeBits && ShAmt != 0) { 3415 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted"); 3416 APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt); 3417 Value *And = Builder.CreateAnd(Xor, Builder.getInt(AndVal), 3418 I.getName() + ".mask"); 3419 return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType())); 3420 } 3421 } 3422 3423 // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to 3424 // "icmp (and X, mask), cst" 3425 uint64_t ShAmt = 0; 3426 if (Op0->hasOneUse() && 3427 match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) && 3428 match(Op1, m_ConstantInt(Cst1)) && 3429 // Only do this when A has multiple uses. This is most important to do 3430 // when it exposes other optimizations. 3431 !A->hasOneUse()) { 3432 unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits(); 3433 3434 if (ShAmt < ASize) { 3435 APInt MaskV = 3436 APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits()); 3437 MaskV <<= ShAmt; 3438 3439 APInt CmpV = Cst1->getValue().zext(ASize); 3440 CmpV <<= ShAmt; 3441 3442 Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV)); 3443 return new ICmpInst(Pred, Mask, Builder.getInt(CmpV)); 3444 } 3445 } 3446 3447 // If both operands are byte-swapped or bit-reversed, just compare the 3448 // original values. 3449 // TODO: Move this to a function similar to foldICmpIntrinsicWithConstant() 3450 // and handle more intrinsics. 3451 if ((match(Op0, m_BSwap(m_Value(A))) && match(Op1, m_BSwap(m_Value(B)))) || 3452 (match(Op0, m_BitReverse(m_Value(A))) && 3453 match(Op1, m_BitReverse(m_Value(B))))) 3454 return new ICmpInst(Pred, A, B); 3455 3456 return nullptr; 3457 } 3458 3459 /// Handle icmp (cast x to y), (cast/cst). We only handle extending casts so 3460 /// far. 3461 Instruction *InstCombiner::foldICmpWithCastAndCast(ICmpInst &ICmp) { 3462 const CastInst *LHSCI = cast<CastInst>(ICmp.getOperand(0)); 3463 Value *LHSCIOp = LHSCI->getOperand(0); 3464 Type *SrcTy = LHSCIOp->getType(); 3465 Type *DestTy = LHSCI->getType(); 3466 Value *RHSCIOp; 3467 3468 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the 3469 // integer type is the same size as the pointer type. 3470 if (LHSCI->getOpcode() == Instruction::PtrToInt && 3471 DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) { 3472 Value *RHSOp = nullptr; 3473 if (auto *RHSC = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) { 3474 Value *RHSCIOp = RHSC->getOperand(0); 3475 if (RHSCIOp->getType()->getPointerAddressSpace() == 3476 LHSCIOp->getType()->getPointerAddressSpace()) { 3477 RHSOp = RHSC->getOperand(0); 3478 // If the pointer types don't match, insert a bitcast. 3479 if (LHSCIOp->getType() != RHSOp->getType()) 3480 RHSOp = Builder.CreateBitCast(RHSOp, LHSCIOp->getType()); 3481 } 3482 } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) { 3483 RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy); 3484 } 3485 3486 if (RHSOp) 3487 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSOp); 3488 } 3489 3490 // The code below only handles extension cast instructions, so far. 3491 // Enforce this. 3492 if (LHSCI->getOpcode() != Instruction::ZExt && 3493 LHSCI->getOpcode() != Instruction::SExt) 3494 return nullptr; 3495 3496 bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt; 3497 bool isSignedCmp = ICmp.isSigned(); 3498 3499 if (auto *CI = dyn_cast<CastInst>(ICmp.getOperand(1))) { 3500 // Not an extension from the same type? 3501 RHSCIOp = CI->getOperand(0); 3502 if (RHSCIOp->getType() != LHSCIOp->getType()) 3503 return nullptr; 3504 3505 // If the signedness of the two casts doesn't agree (i.e. one is a sext 3506 // and the other is a zext), then we can't handle this. 3507 if (CI->getOpcode() != LHSCI->getOpcode()) 3508 return nullptr; 3509 3510 // Deal with equality cases early. 3511 if (ICmp.isEquality()) 3512 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSCIOp); 3513 3514 // A signed comparison of sign extended values simplifies into a 3515 // signed comparison. 3516 if (isSignedCmp && isSignedExt) 3517 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, RHSCIOp); 3518 3519 // The other three cases all fold into an unsigned comparison. 3520 return new ICmpInst(ICmp.getUnsignedPredicate(), LHSCIOp, RHSCIOp); 3521 } 3522 3523 // If we aren't dealing with a constant on the RHS, exit early. 3524 auto *C = dyn_cast<Constant>(ICmp.getOperand(1)); 3525 if (!C) 3526 return nullptr; 3527 3528 // Compute the constant that would happen if we truncated to SrcTy then 3529 // re-extended to DestTy. 3530 Constant *Res1 = ConstantExpr::getTrunc(C, SrcTy); 3531 Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(), Res1, DestTy); 3532 3533 // If the re-extended constant didn't change... 3534 if (Res2 == C) { 3535 // Deal with equality cases early. 3536 if (ICmp.isEquality()) 3537 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, Res1); 3538 3539 // A signed comparison of sign extended values simplifies into a 3540 // signed comparison. 3541 if (isSignedExt && isSignedCmp) 3542 return new ICmpInst(ICmp.getPredicate(), LHSCIOp, Res1); 3543 3544 // The other three cases all fold into an unsigned comparison. 3545 return new ICmpInst(ICmp.getUnsignedPredicate(), LHSCIOp, Res1); 3546 } 3547 3548 // The re-extended constant changed, partly changed (in the case of a vector), 3549 // or could not be determined to be equal (in the case of a constant 3550 // expression), so the constant cannot be represented in the shorter type. 3551 // Consequently, we cannot emit a simple comparison. 3552 // All the cases that fold to true or false will have already been handled 3553 // by SimplifyICmpInst, so only deal with the tricky case. 3554 3555 if (isSignedCmp || !isSignedExt || !isa<ConstantInt>(C)) 3556 return nullptr; 3557 3558 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases 3559 // should have been folded away previously and not enter in here. 3560 3561 // We're performing an unsigned comp with a sign extended value. 3562 // This is true if the input is >= 0. [aka >s -1] 3563 Constant *NegOne = Constant::getAllOnesValue(SrcTy); 3564 Value *Result = Builder.CreateICmpSGT(LHSCIOp, NegOne, ICmp.getName()); 3565 3566 // Finally, return the value computed. 3567 if (ICmp.getPredicate() == ICmpInst::ICMP_ULT) 3568 return replaceInstUsesWith(ICmp, Result); 3569 3570 assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!"); 3571 return BinaryOperator::CreateNot(Result); 3572 } 3573 3574 bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS, 3575 Value *RHS, Instruction &OrigI, 3576 Value *&Result, Constant *&Overflow) { 3577 if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS)) 3578 std::swap(LHS, RHS); 3579 3580 auto SetResult = [&](Value *OpResult, Constant *OverflowVal, bool ReuseName) { 3581 Result = OpResult; 3582 Overflow = OverflowVal; 3583 if (ReuseName) 3584 Result->takeName(&OrigI); 3585 return true; 3586 }; 3587 3588 // If the overflow check was an add followed by a compare, the insertion point 3589 // may be pointing to the compare. We want to insert the new instructions 3590 // before the add in case there are uses of the add between the add and the 3591 // compare. 3592 Builder.SetInsertPoint(&OrigI); 3593 3594 switch (OCF) { 3595 case OCF_INVALID: 3596 llvm_unreachable("bad overflow check kind!"); 3597 3598 case OCF_UNSIGNED_ADD: { 3599 OverflowResult OR = computeOverflowForUnsignedAdd(LHS, RHS, &OrigI); 3600 if (OR == OverflowResult::NeverOverflows) 3601 return SetResult(Builder.CreateNUWAdd(LHS, RHS), Builder.getFalse(), 3602 true); 3603 3604 if (OR == OverflowResult::AlwaysOverflows) 3605 return SetResult(Builder.CreateAdd(LHS, RHS), Builder.getTrue(), true); 3606 3607 // Fall through uadd into sadd 3608 LLVM_FALLTHROUGH; 3609 } 3610 case OCF_SIGNED_ADD: { 3611 // X + 0 -> {X, false} 3612 if (match(RHS, m_Zero())) 3613 return SetResult(LHS, Builder.getFalse(), false); 3614 3615 // We can strength reduce this signed add into a regular add if we can prove 3616 // that it will never overflow. 3617 if (OCF == OCF_SIGNED_ADD) 3618 if (willNotOverflowSignedAdd(LHS, RHS, OrigI)) 3619 return SetResult(Builder.CreateNSWAdd(LHS, RHS), Builder.getFalse(), 3620 true); 3621 break; 3622 } 3623 3624 case OCF_UNSIGNED_SUB: 3625 case OCF_SIGNED_SUB: { 3626 // X - 0 -> {X, false} 3627 if (match(RHS, m_Zero())) 3628 return SetResult(LHS, Builder.getFalse(), false); 3629 3630 if (OCF == OCF_SIGNED_SUB) { 3631 if (willNotOverflowSignedSub(LHS, RHS, OrigI)) 3632 return SetResult(Builder.CreateNSWSub(LHS, RHS), Builder.getFalse(), 3633 true); 3634 } else { 3635 if (willNotOverflowUnsignedSub(LHS, RHS, OrigI)) 3636 return SetResult(Builder.CreateNUWSub(LHS, RHS), Builder.getFalse(), 3637 true); 3638 } 3639 break; 3640 } 3641 3642 case OCF_UNSIGNED_MUL: { 3643 OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, &OrigI); 3644 if (OR == OverflowResult::NeverOverflows) 3645 return SetResult(Builder.CreateNUWMul(LHS, RHS), Builder.getFalse(), 3646 true); 3647 if (OR == OverflowResult::AlwaysOverflows) 3648 return SetResult(Builder.CreateMul(LHS, RHS), Builder.getTrue(), true); 3649 LLVM_FALLTHROUGH; 3650 } 3651 case OCF_SIGNED_MUL: 3652 // X * undef -> undef 3653 if (isa<UndefValue>(RHS)) 3654 return SetResult(RHS, UndefValue::get(Builder.getInt1Ty()), false); 3655 3656 // X * 0 -> {0, false} 3657 if (match(RHS, m_Zero())) 3658 return SetResult(RHS, Builder.getFalse(), false); 3659 3660 // X * 1 -> {X, false} 3661 if (match(RHS, m_One())) 3662 return SetResult(LHS, Builder.getFalse(), false); 3663 3664 if (OCF == OCF_SIGNED_MUL) 3665 if (willNotOverflowSignedMul(LHS, RHS, OrigI)) 3666 return SetResult(Builder.CreateNSWMul(LHS, RHS), Builder.getFalse(), 3667 true); 3668 break; 3669 } 3670 3671 return false; 3672 } 3673 3674 /// \brief Recognize and process idiom involving test for multiplication 3675 /// overflow. 3676 /// 3677 /// The caller has matched a pattern of the form: 3678 /// I = cmp u (mul(zext A, zext B), V 3679 /// The function checks if this is a test for overflow and if so replaces 3680 /// multiplication with call to 'mul.with.overflow' intrinsic. 3681 /// 3682 /// \param I Compare instruction. 3683 /// \param MulVal Result of 'mult' instruction. It is one of the arguments of 3684 /// the compare instruction. Must be of integer type. 3685 /// \param OtherVal The other argument of compare instruction. 3686 /// \returns Instruction which must replace the compare instruction, NULL if no 3687 /// replacement required. 3688 static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal, 3689 Value *OtherVal, InstCombiner &IC) { 3690 // Don't bother doing this transformation for pointers, don't do it for 3691 // vectors. 3692 if (!isa<IntegerType>(MulVal->getType())) 3693 return nullptr; 3694 3695 assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal); 3696 assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal); 3697 auto *MulInstr = dyn_cast<Instruction>(MulVal); 3698 if (!MulInstr) 3699 return nullptr; 3700 assert(MulInstr->getOpcode() == Instruction::Mul); 3701 3702 auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)), 3703 *RHS = cast<ZExtOperator>(MulInstr->getOperand(1)); 3704 assert(LHS->getOpcode() == Instruction::ZExt); 3705 assert(RHS->getOpcode() == Instruction::ZExt); 3706 Value *A = LHS->getOperand(0), *B = RHS->getOperand(0); 3707 3708 // Calculate type and width of the result produced by mul.with.overflow. 3709 Type *TyA = A->getType(), *TyB = B->getType(); 3710 unsigned WidthA = TyA->getPrimitiveSizeInBits(), 3711 WidthB = TyB->getPrimitiveSizeInBits(); 3712 unsigned MulWidth; 3713 Type *MulType; 3714 if (WidthB > WidthA) { 3715 MulWidth = WidthB; 3716 MulType = TyB; 3717 } else { 3718 MulWidth = WidthA; 3719 MulType = TyA; 3720 } 3721 3722 // In order to replace the original mul with a narrower mul.with.overflow, 3723 // all uses must ignore upper bits of the product. The number of used low 3724 // bits must be not greater than the width of mul.with.overflow. 3725 if (MulVal->hasNUsesOrMore(2)) 3726 for (User *U : MulVal->users()) { 3727 if (U == &I) 3728 continue; 3729 if (TruncInst *TI = dyn_cast<TruncInst>(U)) { 3730 // Check if truncation ignores bits above MulWidth. 3731 unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits(); 3732 if (TruncWidth > MulWidth) 3733 return nullptr; 3734 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) { 3735 // Check if AND ignores bits above MulWidth. 3736 if (BO->getOpcode() != Instruction::And) 3737 return nullptr; 3738 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) { 3739 const APInt &CVal = CI->getValue(); 3740 if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth) 3741 return nullptr; 3742 } else { 3743 // In this case we could have the operand of the binary operation 3744 // being defined in another block, and performing the replacement 3745 // could break the dominance relation. 3746 return nullptr; 3747 } 3748 } else { 3749 // Other uses prohibit this transformation. 3750 return nullptr; 3751 } 3752 } 3753 3754 // Recognize patterns 3755 switch (I.getPredicate()) { 3756 case ICmpInst::ICMP_EQ: 3757 case ICmpInst::ICMP_NE: 3758 // Recognize pattern: 3759 // mulval = mul(zext A, zext B) 3760 // cmp eq/neq mulval, zext trunc mulval 3761 if (ZExtInst *Zext = dyn_cast<ZExtInst>(OtherVal)) 3762 if (Zext->hasOneUse()) { 3763 Value *ZextArg = Zext->getOperand(0); 3764 if (TruncInst *Trunc = dyn_cast<TruncInst>(ZextArg)) 3765 if (Trunc->getType()->getPrimitiveSizeInBits() == MulWidth) 3766 break; //Recognized 3767 } 3768 3769 // Recognize pattern: 3770 // mulval = mul(zext A, zext B) 3771 // cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits. 3772 ConstantInt *CI; 3773 Value *ValToMask; 3774 if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) { 3775 if (ValToMask != MulVal) 3776 return nullptr; 3777 const APInt &CVal = CI->getValue() + 1; 3778 if (CVal.isPowerOf2()) { 3779 unsigned MaskWidth = CVal.logBase2(); 3780 if (MaskWidth == MulWidth) 3781 break; // Recognized 3782 } 3783 } 3784 return nullptr; 3785 3786 case ICmpInst::ICMP_UGT: 3787 // Recognize pattern: 3788 // mulval = mul(zext A, zext B) 3789 // cmp ugt mulval, max 3790 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 3791 APInt MaxVal = APInt::getMaxValue(MulWidth); 3792 MaxVal = MaxVal.zext(CI->getBitWidth()); 3793 if (MaxVal.eq(CI->getValue())) 3794 break; // Recognized 3795 } 3796 return nullptr; 3797 3798 case ICmpInst::ICMP_UGE: 3799 // Recognize pattern: 3800 // mulval = mul(zext A, zext B) 3801 // cmp uge mulval, max+1 3802 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 3803 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth); 3804 if (MaxVal.eq(CI->getValue())) 3805 break; // Recognized 3806 } 3807 return nullptr; 3808 3809 case ICmpInst::ICMP_ULE: 3810 // Recognize pattern: 3811 // mulval = mul(zext A, zext B) 3812 // cmp ule mulval, max 3813 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 3814 APInt MaxVal = APInt::getMaxValue(MulWidth); 3815 MaxVal = MaxVal.zext(CI->getBitWidth()); 3816 if (MaxVal.eq(CI->getValue())) 3817 break; // Recognized 3818 } 3819 return nullptr; 3820 3821 case ICmpInst::ICMP_ULT: 3822 // Recognize pattern: 3823 // mulval = mul(zext A, zext B) 3824 // cmp ule mulval, max + 1 3825 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 3826 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth); 3827 if (MaxVal.eq(CI->getValue())) 3828 break; // Recognized 3829 } 3830 return nullptr; 3831 3832 default: 3833 return nullptr; 3834 } 3835 3836 InstCombiner::BuilderTy &Builder = IC.Builder; 3837 Builder.SetInsertPoint(MulInstr); 3838 3839 // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B) 3840 Value *MulA = A, *MulB = B; 3841 if (WidthA < MulWidth) 3842 MulA = Builder.CreateZExt(A, MulType); 3843 if (WidthB < MulWidth) 3844 MulB = Builder.CreateZExt(B, MulType); 3845 Value *F = Intrinsic::getDeclaration(I.getModule(), 3846 Intrinsic::umul_with_overflow, MulType); 3847 CallInst *Call = Builder.CreateCall(F, {MulA, MulB}, "umul"); 3848 IC.Worklist.Add(MulInstr); 3849 3850 // If there are uses of mul result other than the comparison, we know that 3851 // they are truncation or binary AND. Change them to use result of 3852 // mul.with.overflow and adjust properly mask/size. 3853 if (MulVal->hasNUsesOrMore(2)) { 3854 Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value"); 3855 for (User *U : MulVal->users()) { 3856 if (U == &I || U == OtherVal) 3857 continue; 3858 if (TruncInst *TI = dyn_cast<TruncInst>(U)) { 3859 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth) 3860 IC.replaceInstUsesWith(*TI, Mul); 3861 else 3862 TI->setOperand(0, Mul); 3863 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) { 3864 assert(BO->getOpcode() == Instruction::And); 3865 // Replace (mul & mask) --> zext (mul.with.overflow & short_mask) 3866 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1)); 3867 APInt ShortMask = CI->getValue().trunc(MulWidth); 3868 Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask); 3869 Instruction *Zext = 3870 cast<Instruction>(Builder.CreateZExt(ShortAnd, BO->getType())); 3871 IC.Worklist.Add(Zext); 3872 IC.replaceInstUsesWith(*BO, Zext); 3873 } else { 3874 llvm_unreachable("Unexpected Binary operation"); 3875 } 3876 IC.Worklist.Add(cast<Instruction>(U)); 3877 } 3878 } 3879 if (isa<Instruction>(OtherVal)) 3880 IC.Worklist.Add(cast<Instruction>(OtherVal)); 3881 3882 // The original icmp gets replaced with the overflow value, maybe inverted 3883 // depending on predicate. 3884 bool Inverse = false; 3885 switch (I.getPredicate()) { 3886 case ICmpInst::ICMP_NE: 3887 break; 3888 case ICmpInst::ICMP_EQ: 3889 Inverse = true; 3890 break; 3891 case ICmpInst::ICMP_UGT: 3892 case ICmpInst::ICMP_UGE: 3893 if (I.getOperand(0) == MulVal) 3894 break; 3895 Inverse = true; 3896 break; 3897 case ICmpInst::ICMP_ULT: 3898 case ICmpInst::ICMP_ULE: 3899 if (I.getOperand(1) == MulVal) 3900 break; 3901 Inverse = true; 3902 break; 3903 default: 3904 llvm_unreachable("Unexpected predicate"); 3905 } 3906 if (Inverse) { 3907 Value *Res = Builder.CreateExtractValue(Call, 1); 3908 return BinaryOperator::CreateNot(Res); 3909 } 3910 3911 return ExtractValueInst::Create(Call, 1); 3912 } 3913 3914 /// When performing a comparison against a constant, it is possible that not all 3915 /// the bits in the LHS are demanded. This helper method computes the mask that 3916 /// IS demanded. 3917 static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth, 3918 bool isSignCheck) { 3919 if (isSignCheck) 3920 return APInt::getSignMask(BitWidth); 3921 3922 ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1)); 3923 if (!CI) return APInt::getAllOnesValue(BitWidth); 3924 const APInt &RHS = CI->getValue(); 3925 3926 switch (I.getPredicate()) { 3927 // For a UGT comparison, we don't care about any bits that 3928 // correspond to the trailing ones of the comparand. The value of these 3929 // bits doesn't impact the outcome of the comparison, because any value 3930 // greater than the RHS must differ in a bit higher than these due to carry. 3931 case ICmpInst::ICMP_UGT: { 3932 unsigned trailingOnes = RHS.countTrailingOnes(); 3933 return APInt::getBitsSetFrom(BitWidth, trailingOnes); 3934 } 3935 3936 // Similarly, for a ULT comparison, we don't care about the trailing zeros. 3937 // Any value less than the RHS must differ in a higher bit because of carries. 3938 case ICmpInst::ICMP_ULT: { 3939 unsigned trailingZeros = RHS.countTrailingZeros(); 3940 return APInt::getBitsSetFrom(BitWidth, trailingZeros); 3941 } 3942 3943 default: 3944 return APInt::getAllOnesValue(BitWidth); 3945 } 3946 } 3947 3948 /// \brief Check if the order of \p Op0 and \p Op1 as operand in an ICmpInst 3949 /// should be swapped. 3950 /// The decision is based on how many times these two operands are reused 3951 /// as subtract operands and their positions in those instructions. 3952 /// The rational is that several architectures use the same instruction for 3953 /// both subtract and cmp, thus it is better if the order of those operands 3954 /// match. 3955 /// \return true if Op0 and Op1 should be swapped. 3956 static bool swapMayExposeCSEOpportunities(const Value * Op0, 3957 const Value * Op1) { 3958 // Filter out pointer value as those cannot appears directly in subtract. 3959 // FIXME: we may want to go through inttoptrs or bitcasts. 3960 if (Op0->getType()->isPointerTy()) 3961 return false; 3962 // Count every uses of both Op0 and Op1 in a subtract. 3963 // Each time Op0 is the first operand, count -1: swapping is bad, the 3964 // subtract has already the same layout as the compare. 3965 // Each time Op0 is the second operand, count +1: swapping is good, the 3966 // subtract has a different layout as the compare. 3967 // At the end, if the benefit is greater than 0, Op0 should come second to 3968 // expose more CSE opportunities. 3969 int GlobalSwapBenefits = 0; 3970 for (const User *U : Op0->users()) { 3971 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(U); 3972 if (!BinOp || BinOp->getOpcode() != Instruction::Sub) 3973 continue; 3974 // If Op0 is the first argument, this is not beneficial to swap the 3975 // arguments. 3976 int LocalSwapBenefits = -1; 3977 unsigned Op1Idx = 1; 3978 if (BinOp->getOperand(Op1Idx) == Op0) { 3979 Op1Idx = 0; 3980 LocalSwapBenefits = 1; 3981 } 3982 if (BinOp->getOperand(Op1Idx) != Op1) 3983 continue; 3984 GlobalSwapBenefits += LocalSwapBenefits; 3985 } 3986 return GlobalSwapBenefits > 0; 3987 } 3988 3989 /// \brief Check that one use is in the same block as the definition and all 3990 /// other uses are in blocks dominated by a given block. 3991 /// 3992 /// \param DI Definition 3993 /// \param UI Use 3994 /// \param DB Block that must dominate all uses of \p DI outside 3995 /// the parent block 3996 /// \return true when \p UI is the only use of \p DI in the parent block 3997 /// and all other uses of \p DI are in blocks dominated by \p DB. 3998 /// 3999 bool InstCombiner::dominatesAllUses(const Instruction *DI, 4000 const Instruction *UI, 4001 const BasicBlock *DB) const { 4002 assert(DI && UI && "Instruction not defined\n"); 4003 // Ignore incomplete definitions. 4004 if (!DI->getParent()) 4005 return false; 4006 // DI and UI must be in the same block. 4007 if (DI->getParent() != UI->getParent()) 4008 return false; 4009 // Protect from self-referencing blocks. 4010 if (DI->getParent() == DB) 4011 return false; 4012 for (const User *U : DI->users()) { 4013 auto *Usr = cast<Instruction>(U); 4014 if (Usr != UI && !DT.dominates(DB, Usr->getParent())) 4015 return false; 4016 } 4017 return true; 4018 } 4019 4020 /// Return true when the instruction sequence within a block is select-cmp-br. 4021 static bool isChainSelectCmpBranch(const SelectInst *SI) { 4022 const BasicBlock *BB = SI->getParent(); 4023 if (!BB) 4024 return false; 4025 auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator()); 4026 if (!BI || BI->getNumSuccessors() != 2) 4027 return false; 4028 auto *IC = dyn_cast<ICmpInst>(BI->getCondition()); 4029 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI)) 4030 return false; 4031 return true; 4032 } 4033 4034 /// \brief True when a select result is replaced by one of its operands 4035 /// in select-icmp sequence. This will eventually result in the elimination 4036 /// of the select. 4037 /// 4038 /// \param SI Select instruction 4039 /// \param Icmp Compare instruction 4040 /// \param SIOpd Operand that replaces the select 4041 /// 4042 /// Notes: 4043 /// - The replacement is global and requires dominator information 4044 /// - The caller is responsible for the actual replacement 4045 /// 4046 /// Example: 4047 /// 4048 /// entry: 4049 /// %4 = select i1 %3, %C* %0, %C* null 4050 /// %5 = icmp eq %C* %4, null 4051 /// br i1 %5, label %9, label %7 4052 /// ... 4053 /// ; <label>:7 ; preds = %entry 4054 /// %8 = getelementptr inbounds %C* %4, i64 0, i32 0 4055 /// ... 4056 /// 4057 /// can be transformed to 4058 /// 4059 /// %5 = icmp eq %C* %0, null 4060 /// %6 = select i1 %3, i1 %5, i1 true 4061 /// br i1 %6, label %9, label %7 4062 /// ... 4063 /// ; <label>:7 ; preds = %entry 4064 /// %8 = getelementptr inbounds %C* %0, i64 0, i32 0 // replace by %0! 4065 /// 4066 /// Similar when the first operand of the select is a constant or/and 4067 /// the compare is for not equal rather than equal. 4068 /// 4069 /// NOTE: The function is only called when the select and compare constants 4070 /// are equal, the optimization can work only for EQ predicates. This is not a 4071 /// major restriction since a NE compare should be 'normalized' to an equal 4072 /// compare, which usually happens in the combiner and test case 4073 /// select-cmp-br.ll checks for it. 4074 bool InstCombiner::replacedSelectWithOperand(SelectInst *SI, 4075 const ICmpInst *Icmp, 4076 const unsigned SIOpd) { 4077 assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!"); 4078 if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) { 4079 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1); 4080 // The check for the single predecessor is not the best that can be 4081 // done. But it protects efficiently against cases like when SI's 4082 // home block has two successors, Succ and Succ1, and Succ1 predecessor 4083 // of Succ. Then SI can't be replaced by SIOpd because the use that gets 4084 // replaced can be reached on either path. So the uniqueness check 4085 // guarantees that the path all uses of SI (outside SI's parent) are on 4086 // is disjoint from all other paths out of SI. But that information 4087 // is more expensive to compute, and the trade-off here is in favor 4088 // of compile-time. It should also be noticed that we check for a single 4089 // predecessor and not only uniqueness. This to handle the situation when 4090 // Succ and Succ1 points to the same basic block. 4091 if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) { 4092 NumSel++; 4093 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent()); 4094 return true; 4095 } 4096 } 4097 return false; 4098 } 4099 4100 /// Try to fold the comparison based on range information we can get by checking 4101 /// whether bits are known to be zero or one in the inputs. 4102 Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) { 4103 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 4104 Type *Ty = Op0->getType(); 4105 ICmpInst::Predicate Pred = I.getPredicate(); 4106 4107 // Get scalar or pointer size. 4108 unsigned BitWidth = Ty->isIntOrIntVectorTy() 4109 ? Ty->getScalarSizeInBits() 4110 : DL.getTypeSizeInBits(Ty->getScalarType()); 4111 4112 if (!BitWidth) 4113 return nullptr; 4114 4115 // If this is a normal comparison, it demands all bits. If it is a sign bit 4116 // comparison, it only demands the sign bit. 4117 bool IsSignBit = false; 4118 const APInt *CmpC; 4119 if (match(Op1, m_APInt(CmpC))) { 4120 bool UnusedBit; 4121 IsSignBit = isSignBitCheck(Pred, *CmpC, UnusedBit); 4122 } 4123 4124 KnownBits Op0Known(BitWidth); 4125 KnownBits Op1Known(BitWidth); 4126 4127 if (SimplifyDemandedBits(&I, 0, 4128 getDemandedBitsLHSMask(I, BitWidth, IsSignBit), 4129 Op0Known, 0)) 4130 return &I; 4131 4132 if (SimplifyDemandedBits(&I, 1, APInt::getAllOnesValue(BitWidth), 4133 Op1Known, 0)) 4134 return &I; 4135 4136 // Given the known and unknown bits, compute a range that the LHS could be 4137 // in. Compute the Min, Max and RHS values based on the known bits. For the 4138 // EQ and NE we use unsigned values. 4139 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0); 4140 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0); 4141 if (I.isSigned()) { 4142 computeSignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max); 4143 computeSignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max); 4144 } else { 4145 computeUnsignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max); 4146 computeUnsignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max); 4147 } 4148 4149 // If Min and Max are known to be the same, then SimplifyDemandedBits 4150 // figured out that the LHS is a constant. Constant fold this now, so that 4151 // code below can assume that Min != Max. 4152 if (!isa<Constant>(Op0) && Op0Min == Op0Max) 4153 return new ICmpInst(Pred, ConstantInt::get(Op0->getType(), Op0Min), Op1); 4154 if (!isa<Constant>(Op1) && Op1Min == Op1Max) 4155 return new ICmpInst(Pred, Op0, ConstantInt::get(Op1->getType(), Op1Min)); 4156 4157 // Based on the range information we know about the LHS, see if we can 4158 // simplify this comparison. For example, (x&4) < 8 is always true. 4159 switch (Pred) { 4160 default: 4161 llvm_unreachable("Unknown icmp opcode!"); 4162 case ICmpInst::ICMP_EQ: 4163 case ICmpInst::ICMP_NE: { 4164 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max)) { 4165 return Pred == CmpInst::ICMP_EQ 4166 ? replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())) 4167 : replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4168 } 4169 4170 // If all bits are known zero except for one, then we know at most one bit 4171 // is set. If the comparison is against zero, then this is a check to see if 4172 // *that* bit is set. 4173 APInt Op0KnownZeroInverted = ~Op0Known.Zero; 4174 if (Op1Known.isZero()) { 4175 // If the LHS is an AND with the same constant, look through it. 4176 Value *LHS = nullptr; 4177 const APInt *LHSC; 4178 if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) || 4179 *LHSC != Op0KnownZeroInverted) 4180 LHS = Op0; 4181 4182 Value *X; 4183 if (match(LHS, m_Shl(m_One(), m_Value(X)))) { 4184 APInt ValToCheck = Op0KnownZeroInverted; 4185 Type *XTy = X->getType(); 4186 if (ValToCheck.isPowerOf2()) { 4187 // ((1 << X) & 8) == 0 -> X != 3 4188 // ((1 << X) & 8) != 0 -> X == 3 4189 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros()); 4190 auto NewPred = ICmpInst::getInversePredicate(Pred); 4191 return new ICmpInst(NewPred, X, CmpC); 4192 } else if ((++ValToCheck).isPowerOf2()) { 4193 // ((1 << X) & 7) == 0 -> X >= 3 4194 // ((1 << X) & 7) != 0 -> X < 3 4195 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros()); 4196 auto NewPred = 4197 Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT; 4198 return new ICmpInst(NewPred, X, CmpC); 4199 } 4200 } 4201 4202 // Check if the LHS is 8 >>u x and the result is a power of 2 like 1. 4203 const APInt *CI; 4204 if (Op0KnownZeroInverted.isOneValue() && 4205 match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) { 4206 // ((8 >>u X) & 1) == 0 -> X != 3 4207 // ((8 >>u X) & 1) != 0 -> X == 3 4208 unsigned CmpVal = CI->countTrailingZeros(); 4209 auto NewPred = ICmpInst::getInversePredicate(Pred); 4210 return new ICmpInst(NewPred, X, ConstantInt::get(X->getType(), CmpVal)); 4211 } 4212 } 4213 break; 4214 } 4215 case ICmpInst::ICMP_ULT: { 4216 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B) 4217 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4218 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B) 4219 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4220 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B) 4221 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 4222 4223 const APInt *CmpC; 4224 if (match(Op1, m_APInt(CmpC))) { 4225 // A <u C -> A == C-1 if min(A)+1 == C 4226 if (Op1Max == Op0Min + 1) { 4227 Constant *CMinus1 = ConstantInt::get(Op0->getType(), *CmpC - 1); 4228 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, CMinus1); 4229 } 4230 } 4231 break; 4232 } 4233 case ICmpInst::ICMP_UGT: { 4234 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B) 4235 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4236 4237 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B) 4238 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4239 4240 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B) 4241 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 4242 4243 const APInt *CmpC; 4244 if (match(Op1, m_APInt(CmpC))) { 4245 // A >u C -> A == C+1 if max(a)-1 == C 4246 if (*CmpC == Op0Max - 1) 4247 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 4248 ConstantInt::get(Op1->getType(), *CmpC + 1)); 4249 } 4250 break; 4251 } 4252 case ICmpInst::ICMP_SLT: 4253 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C) 4254 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4255 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C) 4256 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4257 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B) 4258 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 4259 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { 4260 if (Op1Max == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C 4261 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 4262 Builder.getInt(CI->getValue() - 1)); 4263 } 4264 break; 4265 case ICmpInst::ICMP_SGT: 4266 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B) 4267 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4268 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B) 4269 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4270 4271 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B) 4272 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 4273 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { 4274 if (Op1Min == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C 4275 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 4276 Builder.getInt(CI->getValue() + 1)); 4277 } 4278 break; 4279 case ICmpInst::ICMP_SGE: 4280 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!"); 4281 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B) 4282 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4283 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B) 4284 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4285 break; 4286 case ICmpInst::ICMP_SLE: 4287 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!"); 4288 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B) 4289 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4290 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B) 4291 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4292 break; 4293 case ICmpInst::ICMP_UGE: 4294 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!"); 4295 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B) 4296 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4297 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B) 4298 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4299 break; 4300 case ICmpInst::ICMP_ULE: 4301 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!"); 4302 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B) 4303 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4304 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B) 4305 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4306 break; 4307 } 4308 4309 // Turn a signed comparison into an unsigned one if both operands are known to 4310 // have the same sign. 4311 if (I.isSigned() && 4312 ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) || 4313 (Op0Known.One.isNegative() && Op1Known.One.isNegative()))) 4314 return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1); 4315 4316 return nullptr; 4317 } 4318 4319 /// If we have an icmp le or icmp ge instruction with a constant operand, turn 4320 /// it into the appropriate icmp lt or icmp gt instruction. This transform 4321 /// allows them to be folded in visitICmpInst. 4322 static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) { 4323 ICmpInst::Predicate Pred = I.getPredicate(); 4324 if (Pred != ICmpInst::ICMP_SLE && Pred != ICmpInst::ICMP_SGE && 4325 Pred != ICmpInst::ICMP_ULE && Pred != ICmpInst::ICMP_UGE) 4326 return nullptr; 4327 4328 Value *Op0 = I.getOperand(0); 4329 Value *Op1 = I.getOperand(1); 4330 auto *Op1C = dyn_cast<Constant>(Op1); 4331 if (!Op1C) 4332 return nullptr; 4333 4334 // Check if the constant operand can be safely incremented/decremented without 4335 // overflowing/underflowing. For scalars, SimplifyICmpInst has already handled 4336 // the edge cases for us, so we just assert on them. For vectors, we must 4337 // handle the edge cases. 4338 Type *Op1Type = Op1->getType(); 4339 bool IsSigned = I.isSigned(); 4340 bool IsLE = (Pred == ICmpInst::ICMP_SLE || Pred == ICmpInst::ICMP_ULE); 4341 auto *CI = dyn_cast<ConstantInt>(Op1C); 4342 if (CI) { 4343 // A <= MAX -> TRUE ; A >= MIN -> TRUE 4344 assert(IsLE ? !CI->isMaxValue(IsSigned) : !CI->isMinValue(IsSigned)); 4345 } else if (Op1Type->isVectorTy()) { 4346 // TODO? If the edge cases for vectors were guaranteed to be handled as they 4347 // are for scalar, we could remove the min/max checks. However, to do that, 4348 // we would have to use insertelement/shufflevector to replace edge values. 4349 unsigned NumElts = Op1Type->getVectorNumElements(); 4350 for (unsigned i = 0; i != NumElts; ++i) { 4351 Constant *Elt = Op1C->getAggregateElement(i); 4352 if (!Elt) 4353 return nullptr; 4354 4355 if (isa<UndefValue>(Elt)) 4356 continue; 4357 4358 // Bail out if we can't determine if this constant is min/max or if we 4359 // know that this constant is min/max. 4360 auto *CI = dyn_cast<ConstantInt>(Elt); 4361 if (!CI || (IsLE ? CI->isMaxValue(IsSigned) : CI->isMinValue(IsSigned))) 4362 return nullptr; 4363 } 4364 } else { 4365 // ConstantExpr? 4366 return nullptr; 4367 } 4368 4369 // Increment or decrement the constant and set the new comparison predicate: 4370 // ULE -> ULT ; UGE -> UGT ; SLE -> SLT ; SGE -> SGT 4371 Constant *OneOrNegOne = ConstantInt::get(Op1Type, IsLE ? 1 : -1, true); 4372 CmpInst::Predicate NewPred = IsLE ? ICmpInst::ICMP_ULT: ICmpInst::ICMP_UGT; 4373 NewPred = IsSigned ? ICmpInst::getSignedPredicate(NewPred) : NewPred; 4374 return new ICmpInst(NewPred, Op0, ConstantExpr::getAdd(Op1C, OneOrNegOne)); 4375 } 4376 4377 /// Integer compare with boolean values can always be turned into bitwise ops. 4378 static Instruction *canonicalizeICmpBool(ICmpInst &I, 4379 InstCombiner::BuilderTy &Builder) { 4380 Value *A = I.getOperand(0), *B = I.getOperand(1); 4381 assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only"); 4382 4383 // A boolean compared to true/false can be simplified to Op0/true/false in 4384 // 14 out of the 20 (10 predicates * 2 constants) possible combinations. 4385 // Cases not handled by InstSimplify are always 'not' of Op0. 4386 if (match(B, m_Zero())) { 4387 switch (I.getPredicate()) { 4388 case CmpInst::ICMP_EQ: // A == 0 -> !A 4389 case CmpInst::ICMP_ULE: // A <=u 0 -> !A 4390 case CmpInst::ICMP_SGE: // A >=s 0 -> !A 4391 return BinaryOperator::CreateNot(A); 4392 default: 4393 llvm_unreachable("ICmp i1 X, C not simplified as expected."); 4394 } 4395 } else if (match(B, m_One())) { 4396 switch (I.getPredicate()) { 4397 case CmpInst::ICMP_NE: // A != 1 -> !A 4398 case CmpInst::ICMP_ULT: // A <u 1 -> !A 4399 case CmpInst::ICMP_SGT: // A >s -1 -> !A 4400 return BinaryOperator::CreateNot(A); 4401 default: 4402 llvm_unreachable("ICmp i1 X, C not simplified as expected."); 4403 } 4404 } 4405 4406 switch (I.getPredicate()) { 4407 default: 4408 llvm_unreachable("Invalid icmp instruction!"); 4409 case ICmpInst::ICMP_EQ: 4410 // icmp eq i1 A, B -> ~(A ^ B) 4411 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 4412 4413 case ICmpInst::ICMP_NE: 4414 // icmp ne i1 A, B -> A ^ B 4415 return BinaryOperator::CreateXor(A, B); 4416 4417 case ICmpInst::ICMP_UGT: 4418 // icmp ugt -> icmp ult 4419 std::swap(A, B); 4420 LLVM_FALLTHROUGH; 4421 case ICmpInst::ICMP_ULT: 4422 // icmp ult i1 A, B -> ~A & B 4423 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B); 4424 4425 case ICmpInst::ICMP_SGT: 4426 // icmp sgt -> icmp slt 4427 std::swap(A, B); 4428 LLVM_FALLTHROUGH; 4429 case ICmpInst::ICMP_SLT: 4430 // icmp slt i1 A, B -> A & ~B 4431 return BinaryOperator::CreateAnd(Builder.CreateNot(B), A); 4432 4433 case ICmpInst::ICMP_UGE: 4434 // icmp uge -> icmp ule 4435 std::swap(A, B); 4436 LLVM_FALLTHROUGH; 4437 case ICmpInst::ICMP_ULE: 4438 // icmp ule i1 A, B -> ~A | B 4439 return BinaryOperator::CreateOr(Builder.CreateNot(A), B); 4440 4441 case ICmpInst::ICMP_SGE: 4442 // icmp sge -> icmp sle 4443 std::swap(A, B); 4444 LLVM_FALLTHROUGH; 4445 case ICmpInst::ICMP_SLE: 4446 // icmp sle i1 A, B -> A | ~B 4447 return BinaryOperator::CreateOr(Builder.CreateNot(B), A); 4448 } 4449 } 4450 4451 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) { 4452 bool Changed = false; 4453 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 4454 unsigned Op0Cplxity = getComplexity(Op0); 4455 unsigned Op1Cplxity = getComplexity(Op1); 4456 4457 /// Orders the operands of the compare so that they are listed from most 4458 /// complex to least complex. This puts constants before unary operators, 4459 /// before binary operators. 4460 if (Op0Cplxity < Op1Cplxity || 4461 (Op0Cplxity == Op1Cplxity && swapMayExposeCSEOpportunities(Op0, Op1))) { 4462 I.swapOperands(); 4463 std::swap(Op0, Op1); 4464 Changed = true; 4465 } 4466 4467 if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, 4468 SQ.getWithInstruction(&I))) 4469 return replaceInstUsesWith(I, V); 4470 4471 // Comparing -val or val with non-zero is the same as just comparing val 4472 // ie, abs(val) != 0 -> val != 0 4473 if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) { 4474 Value *Cond, *SelectTrue, *SelectFalse; 4475 if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue), 4476 m_Value(SelectFalse)))) { 4477 if (Value *V = dyn_castNegVal(SelectTrue)) { 4478 if (V == SelectFalse) 4479 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1); 4480 } 4481 else if (Value *V = dyn_castNegVal(SelectFalse)) { 4482 if (V == SelectTrue) 4483 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1); 4484 } 4485 } 4486 } 4487 4488 if (Op0->getType()->isIntOrIntVectorTy(1)) 4489 if (Instruction *Res = canonicalizeICmpBool(I, Builder)) 4490 return Res; 4491 4492 if (ICmpInst *NewICmp = canonicalizeCmpWithConstant(I)) 4493 return NewICmp; 4494 4495 if (Instruction *Res = foldICmpWithConstant(I)) 4496 return Res; 4497 4498 if (Instruction *Res = foldICmpUsingKnownBits(I)) 4499 return Res; 4500 4501 // Test if the ICmpInst instruction is used exclusively by a select as 4502 // part of a minimum or maximum operation. If so, refrain from doing 4503 // any other folding. This helps out other analyses which understand 4504 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution 4505 // and CodeGen. And in this case, at least one of the comparison 4506 // operands has at least one user besides the compare (the select), 4507 // which would often largely negate the benefit of folding anyway. 4508 if (I.hasOneUse()) 4509 if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin())) 4510 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) || 4511 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1)) 4512 return nullptr; 4513 4514 // FIXME: We only do this after checking for min/max to prevent infinite 4515 // looping caused by a reverse canonicalization of these patterns for min/max. 4516 // FIXME: The organization of folds is a mess. These would naturally go into 4517 // canonicalizeCmpWithConstant(), but we can't move all of the above folds 4518 // down here after the min/max restriction. 4519 ICmpInst::Predicate Pred = I.getPredicate(); 4520 const APInt *C; 4521 if (match(Op1, m_APInt(C))) { 4522 // For i32: x >u 2147483647 -> x <s 0 -> true if sign bit set 4523 if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) { 4524 Constant *Zero = Constant::getNullValue(Op0->getType()); 4525 return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero); 4526 } 4527 4528 // For i32: x <u 2147483648 -> x >s -1 -> true if sign bit clear 4529 if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) { 4530 Constant *AllOnes = Constant::getAllOnesValue(Op0->getType()); 4531 return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes); 4532 } 4533 } 4534 4535 if (Instruction *Res = foldICmpInstWithConstant(I)) 4536 return Res; 4537 4538 if (Instruction *Res = foldICmpInstWithConstantNotInt(I)) 4539 return Res; 4540 4541 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now. 4542 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0)) 4543 if (Instruction *NI = foldGEPICmp(GEP, Op1, I.getPredicate(), I)) 4544 return NI; 4545 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1)) 4546 if (Instruction *NI = foldGEPICmp(GEP, Op0, 4547 ICmpInst::getSwappedPredicate(I.getPredicate()), I)) 4548 return NI; 4549 4550 // Try to optimize equality comparisons against alloca-based pointers. 4551 if (Op0->getType()->isPointerTy() && I.isEquality()) { 4552 assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?"); 4553 if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op0, DL))) 4554 if (Instruction *New = foldAllocaCmp(I, Alloca, Op1)) 4555 return New; 4556 if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op1, DL))) 4557 if (Instruction *New = foldAllocaCmp(I, Alloca, Op0)) 4558 return New; 4559 } 4560 4561 // Test to see if the operands of the icmp are casted versions of other 4562 // values. If the ptr->ptr cast can be stripped off both arguments, we do so 4563 // now. 4564 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) { 4565 if (Op0->getType()->isPointerTy() && 4566 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) { 4567 // We keep moving the cast from the left operand over to the right 4568 // operand, where it can often be eliminated completely. 4569 Op0 = CI->getOperand(0); 4570 4571 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast 4572 // so eliminate it as well. 4573 if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1)) 4574 Op1 = CI2->getOperand(0); 4575 4576 // If Op1 is a constant, we can fold the cast into the constant. 4577 if (Op0->getType() != Op1->getType()) { 4578 if (Constant *Op1C = dyn_cast<Constant>(Op1)) { 4579 Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType()); 4580 } else { 4581 // Otherwise, cast the RHS right before the icmp 4582 Op1 = Builder.CreateBitCast(Op1, Op0->getType()); 4583 } 4584 } 4585 return new ICmpInst(I.getPredicate(), Op0, Op1); 4586 } 4587 } 4588 4589 if (isa<CastInst>(Op0)) { 4590 // Handle the special case of: icmp (cast bool to X), <cst> 4591 // This comes up when you have code like 4592 // int X = A < B; 4593 // if (X) ... 4594 // For generality, we handle any zero-extension of any operand comparison 4595 // with a constant or another cast from the same type. 4596 if (isa<Constant>(Op1) || isa<CastInst>(Op1)) 4597 if (Instruction *R = foldICmpWithCastAndCast(I)) 4598 return R; 4599 } 4600 4601 if (Instruction *Res = foldICmpBinOp(I)) 4602 return Res; 4603 4604 if (Instruction *Res = foldICmpWithMinMax(I)) 4605 return Res; 4606 4607 { 4608 Value *A, *B; 4609 // Transform (A & ~B) == 0 --> (A & B) != 0 4610 // and (A & ~B) != 0 --> (A & B) == 0 4611 // if A is a power of 2. 4612 if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) && 4613 match(Op1, m_Zero()) && 4614 isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality()) 4615 return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(A, B), 4616 Op1); 4617 4618 // ~X < ~Y --> Y < X 4619 // ~X < C --> X > ~C 4620 if (match(Op0, m_Not(m_Value(A)))) { 4621 if (match(Op1, m_Not(m_Value(B)))) 4622 return new ICmpInst(I.getPredicate(), B, A); 4623 4624 const APInt *C; 4625 if (match(Op1, m_APInt(C))) 4626 return new ICmpInst(I.getSwappedPredicate(), A, 4627 ConstantInt::get(Op1->getType(), ~(*C))); 4628 } 4629 4630 Instruction *AddI = nullptr; 4631 if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B), 4632 m_Instruction(AddI))) && 4633 isa<IntegerType>(A->getType())) { 4634 Value *Result; 4635 Constant *Overflow; 4636 if (OptimizeOverflowCheck(OCF_UNSIGNED_ADD, A, B, *AddI, Result, 4637 Overflow)) { 4638 replaceInstUsesWith(*AddI, Result); 4639 return replaceInstUsesWith(I, Overflow); 4640 } 4641 } 4642 4643 // (zext a) * (zext b) --> llvm.umul.with.overflow. 4644 if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) { 4645 if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this)) 4646 return R; 4647 } 4648 if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) { 4649 if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this)) 4650 return R; 4651 } 4652 } 4653 4654 if (Instruction *Res = foldICmpEquality(I)) 4655 return Res; 4656 4657 // The 'cmpxchg' instruction returns an aggregate containing the old value and 4658 // an i1 which indicates whether or not we successfully did the swap. 4659 // 4660 // Replace comparisons between the old value and the expected value with the 4661 // indicator that 'cmpxchg' returns. 4662 // 4663 // N.B. This transform is only valid when the 'cmpxchg' is not permitted to 4664 // spuriously fail. In those cases, the old value may equal the expected 4665 // value but it is possible for the swap to not occur. 4666 if (I.getPredicate() == ICmpInst::ICMP_EQ) 4667 if (auto *EVI = dyn_cast<ExtractValueInst>(Op0)) 4668 if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand())) 4669 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 && 4670 !ACXI->isWeak()) 4671 return ExtractValueInst::Create(ACXI, 1); 4672 4673 { 4674 Value *X; ConstantInt *Cst; 4675 // icmp X+Cst, X 4676 if (match(Op0, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op1 == X) 4677 return foldICmpAddOpConst(X, Cst, I.getPredicate()); 4678 4679 // icmp X, X+Cst 4680 if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X) 4681 return foldICmpAddOpConst(X, Cst, I.getSwappedPredicate()); 4682 } 4683 return Changed ? &I : nullptr; 4684 } 4685 4686 /// Fold fcmp ([us]itofp x, cst) if possible. 4687 Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI, 4688 Constant *RHSC) { 4689 if (!isa<ConstantFP>(RHSC)) return nullptr; 4690 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF(); 4691 4692 // Get the width of the mantissa. We don't want to hack on conversions that 4693 // might lose information from the integer, e.g. "i64 -> float" 4694 int MantissaWidth = LHSI->getType()->getFPMantissaWidth(); 4695 if (MantissaWidth == -1) return nullptr; // Unknown. 4696 4697 IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType()); 4698 4699 bool LHSUnsigned = isa<UIToFPInst>(LHSI); 4700 4701 if (I.isEquality()) { 4702 FCmpInst::Predicate P = I.getPredicate(); 4703 bool IsExact = false; 4704 APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned); 4705 RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact); 4706 4707 // If the floating point constant isn't an integer value, we know if we will 4708 // ever compare equal / not equal to it. 4709 if (!IsExact) { 4710 // TODO: Can never be -0.0 and other non-representable values 4711 APFloat RHSRoundInt(RHS); 4712 RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven); 4713 if (RHS.compare(RHSRoundInt) != APFloat::cmpEqual) { 4714 if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ) 4715 return replaceInstUsesWith(I, Builder.getFalse()); 4716 4717 assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE); 4718 return replaceInstUsesWith(I, Builder.getTrue()); 4719 } 4720 } 4721 4722 // TODO: If the constant is exactly representable, is it always OK to do 4723 // equality compares as integer? 4724 } 4725 4726 // Check to see that the input is converted from an integer type that is small 4727 // enough that preserves all bits. TODO: check here for "known" sign bits. 4728 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e. 4729 unsigned InputSize = IntTy->getScalarSizeInBits(); 4730 4731 // Following test does NOT adjust InputSize downwards for signed inputs, 4732 // because the most negative value still requires all the mantissa bits 4733 // to distinguish it from one less than that value. 4734 if ((int)InputSize > MantissaWidth) { 4735 // Conversion would lose accuracy. Check if loss can impact comparison. 4736 int Exp = ilogb(RHS); 4737 if (Exp == APFloat::IEK_Inf) { 4738 int MaxExponent = ilogb(APFloat::getLargest(RHS.getSemantics())); 4739 if (MaxExponent < (int)InputSize - !LHSUnsigned) 4740 // Conversion could create infinity. 4741 return nullptr; 4742 } else { 4743 // Note that if RHS is zero or NaN, then Exp is negative 4744 // and first condition is trivially false. 4745 if (MantissaWidth <= Exp && Exp <= (int)InputSize - !LHSUnsigned) 4746 // Conversion could affect comparison. 4747 return nullptr; 4748 } 4749 } 4750 4751 // Otherwise, we can potentially simplify the comparison. We know that it 4752 // will always come through as an integer value and we know the constant is 4753 // not a NAN (it would have been previously simplified). 4754 assert(!RHS.isNaN() && "NaN comparison not already folded!"); 4755 4756 ICmpInst::Predicate Pred; 4757 switch (I.getPredicate()) { 4758 default: llvm_unreachable("Unexpected predicate!"); 4759 case FCmpInst::FCMP_UEQ: 4760 case FCmpInst::FCMP_OEQ: 4761 Pred = ICmpInst::ICMP_EQ; 4762 break; 4763 case FCmpInst::FCMP_UGT: 4764 case FCmpInst::FCMP_OGT: 4765 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT; 4766 break; 4767 case FCmpInst::FCMP_UGE: 4768 case FCmpInst::FCMP_OGE: 4769 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE; 4770 break; 4771 case FCmpInst::FCMP_ULT: 4772 case FCmpInst::FCMP_OLT: 4773 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT; 4774 break; 4775 case FCmpInst::FCMP_ULE: 4776 case FCmpInst::FCMP_OLE: 4777 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE; 4778 break; 4779 case FCmpInst::FCMP_UNE: 4780 case FCmpInst::FCMP_ONE: 4781 Pred = ICmpInst::ICMP_NE; 4782 break; 4783 case FCmpInst::FCMP_ORD: 4784 return replaceInstUsesWith(I, Builder.getTrue()); 4785 case FCmpInst::FCMP_UNO: 4786 return replaceInstUsesWith(I, Builder.getFalse()); 4787 } 4788 4789 // Now we know that the APFloat is a normal number, zero or inf. 4790 4791 // See if the FP constant is too large for the integer. For example, 4792 // comparing an i8 to 300.0. 4793 unsigned IntWidth = IntTy->getScalarSizeInBits(); 4794 4795 if (!LHSUnsigned) { 4796 // If the RHS value is > SignedMax, fold the comparison. This handles +INF 4797 // and large values. 4798 APFloat SMax(RHS.getSemantics()); 4799 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true, 4800 APFloat::rmNearestTiesToEven); 4801 if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0 4802 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT || 4803 Pred == ICmpInst::ICMP_SLE) 4804 return replaceInstUsesWith(I, Builder.getTrue()); 4805 return replaceInstUsesWith(I, Builder.getFalse()); 4806 } 4807 } else { 4808 // If the RHS value is > UnsignedMax, fold the comparison. This handles 4809 // +INF and large values. 4810 APFloat UMax(RHS.getSemantics()); 4811 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false, 4812 APFloat::rmNearestTiesToEven); 4813 if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0 4814 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT || 4815 Pred == ICmpInst::ICMP_ULE) 4816 return replaceInstUsesWith(I, Builder.getTrue()); 4817 return replaceInstUsesWith(I, Builder.getFalse()); 4818 } 4819 } 4820 4821 if (!LHSUnsigned) { 4822 // See if the RHS value is < SignedMin. 4823 APFloat SMin(RHS.getSemantics()); 4824 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true, 4825 APFloat::rmNearestTiesToEven); 4826 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0 4827 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT || 4828 Pred == ICmpInst::ICMP_SGE) 4829 return replaceInstUsesWith(I, Builder.getTrue()); 4830 return replaceInstUsesWith(I, Builder.getFalse()); 4831 } 4832 } else { 4833 // See if the RHS value is < UnsignedMin. 4834 APFloat SMin(RHS.getSemantics()); 4835 SMin.convertFromAPInt(APInt::getMinValue(IntWidth), true, 4836 APFloat::rmNearestTiesToEven); 4837 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0 4838 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT || 4839 Pred == ICmpInst::ICMP_UGE) 4840 return replaceInstUsesWith(I, Builder.getTrue()); 4841 return replaceInstUsesWith(I, Builder.getFalse()); 4842 } 4843 } 4844 4845 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or 4846 // [0, UMAX], but it may still be fractional. See if it is fractional by 4847 // casting the FP value to the integer value and back, checking for equality. 4848 // Don't do this for zero, because -0.0 is not fractional. 4849 Constant *RHSInt = LHSUnsigned 4850 ? ConstantExpr::getFPToUI(RHSC, IntTy) 4851 : ConstantExpr::getFPToSI(RHSC, IntTy); 4852 if (!RHS.isZero()) { 4853 bool Equal = LHSUnsigned 4854 ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC 4855 : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC; 4856 if (!Equal) { 4857 // If we had a comparison against a fractional value, we have to adjust 4858 // the compare predicate and sometimes the value. RHSC is rounded towards 4859 // zero at this point. 4860 switch (Pred) { 4861 default: llvm_unreachable("Unexpected integer comparison!"); 4862 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true 4863 return replaceInstUsesWith(I, Builder.getTrue()); 4864 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false 4865 return replaceInstUsesWith(I, Builder.getFalse()); 4866 case ICmpInst::ICMP_ULE: 4867 // (float)int <= 4.4 --> int <= 4 4868 // (float)int <= -4.4 --> false 4869 if (RHS.isNegative()) 4870 return replaceInstUsesWith(I, Builder.getFalse()); 4871 break; 4872 case ICmpInst::ICMP_SLE: 4873 // (float)int <= 4.4 --> int <= 4 4874 // (float)int <= -4.4 --> int < -4 4875 if (RHS.isNegative()) 4876 Pred = ICmpInst::ICMP_SLT; 4877 break; 4878 case ICmpInst::ICMP_ULT: 4879 // (float)int < -4.4 --> false 4880 // (float)int < 4.4 --> int <= 4 4881 if (RHS.isNegative()) 4882 return replaceInstUsesWith(I, Builder.getFalse()); 4883 Pred = ICmpInst::ICMP_ULE; 4884 break; 4885 case ICmpInst::ICMP_SLT: 4886 // (float)int < -4.4 --> int < -4 4887 // (float)int < 4.4 --> int <= 4 4888 if (!RHS.isNegative()) 4889 Pred = ICmpInst::ICMP_SLE; 4890 break; 4891 case ICmpInst::ICMP_UGT: 4892 // (float)int > 4.4 --> int > 4 4893 // (float)int > -4.4 --> true 4894 if (RHS.isNegative()) 4895 return replaceInstUsesWith(I, Builder.getTrue()); 4896 break; 4897 case ICmpInst::ICMP_SGT: 4898 // (float)int > 4.4 --> int > 4 4899 // (float)int > -4.4 --> int >= -4 4900 if (RHS.isNegative()) 4901 Pred = ICmpInst::ICMP_SGE; 4902 break; 4903 case ICmpInst::ICMP_UGE: 4904 // (float)int >= -4.4 --> true 4905 // (float)int >= 4.4 --> int > 4 4906 if (RHS.isNegative()) 4907 return replaceInstUsesWith(I, Builder.getTrue()); 4908 Pred = ICmpInst::ICMP_UGT; 4909 break; 4910 case ICmpInst::ICMP_SGE: 4911 // (float)int >= -4.4 --> int >= -4 4912 // (float)int >= 4.4 --> int > 4 4913 if (!RHS.isNegative()) 4914 Pred = ICmpInst::ICMP_SGT; 4915 break; 4916 } 4917 } 4918 } 4919 4920 // Lower this FP comparison into an appropriate integer version of the 4921 // comparison. 4922 return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt); 4923 } 4924 4925 Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { 4926 bool Changed = false; 4927 4928 /// Orders the operands of the compare so that they are listed from most 4929 /// complex to least complex. This puts constants before unary operators, 4930 /// before binary operators. 4931 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) { 4932 I.swapOperands(); 4933 Changed = true; 4934 } 4935 4936 const CmpInst::Predicate Pred = I.getPredicate(); 4937 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 4938 if (Value *V = SimplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(), 4939 SQ.getWithInstruction(&I))) 4940 return replaceInstUsesWith(I, V); 4941 4942 // Simplify 'fcmp pred X, X' 4943 if (Op0 == Op1) { 4944 switch (Pred) { 4945 default: break; 4946 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y) 4947 case FCmpInst::FCMP_ULT: // True if unordered or less than 4948 case FCmpInst::FCMP_UGT: // True if unordered or greater than 4949 case FCmpInst::FCMP_UNE: // True if unordered or not equal 4950 // Canonicalize these to be 'fcmp uno %X, 0.0'. 4951 I.setPredicate(FCmpInst::FCMP_UNO); 4952 I.setOperand(1, Constant::getNullValue(Op0->getType())); 4953 return &I; 4954 4955 case FCmpInst::FCMP_ORD: // True if ordered (no nans) 4956 case FCmpInst::FCMP_OEQ: // True if ordered and equal 4957 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal 4958 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal 4959 // Canonicalize these to be 'fcmp ord %X, 0.0'. 4960 I.setPredicate(FCmpInst::FCMP_ORD); 4961 I.setOperand(1, Constant::getNullValue(Op0->getType())); 4962 return &I; 4963 } 4964 } 4965 4966 // If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand, 4967 // then canonicalize the operand to 0.0. 4968 if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) { 4969 if (!match(Op0, m_Zero()) && isKnownNeverNaN(Op0)) { 4970 I.setOperand(0, ConstantFP::getNullValue(Op0->getType())); 4971 return &I; 4972 } 4973 if (!match(Op1, m_Zero()) && isKnownNeverNaN(Op1)) { 4974 I.setOperand(1, ConstantFP::getNullValue(Op0->getType())); 4975 return &I; 4976 } 4977 } 4978 4979 // Test if the FCmpInst instruction is used exclusively by a select as 4980 // part of a minimum or maximum operation. If so, refrain from doing 4981 // any other folding. This helps out other analyses which understand 4982 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution 4983 // and CodeGen. And in this case, at least one of the comparison 4984 // operands has at least one user besides the compare (the select), 4985 // which would often largely negate the benefit of folding anyway. 4986 if (I.hasOneUse()) 4987 if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin())) 4988 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) || 4989 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1)) 4990 return nullptr; 4991 4992 // Handle fcmp with constant RHS 4993 if (Constant *RHSC = dyn_cast<Constant>(Op1)) { 4994 if (Instruction *LHSI = dyn_cast<Instruction>(Op0)) 4995 switch (LHSI->getOpcode()) { 4996 case Instruction::FPExt: { 4997 // fcmp (fpext x), C -> fcmp x, (fptrunc C) if fptrunc is lossless 4998 FPExtInst *LHSExt = cast<FPExtInst>(LHSI); 4999 ConstantFP *RHSF = dyn_cast<ConstantFP>(RHSC); 5000 if (!RHSF) 5001 break; 5002 5003 const fltSemantics *Sem; 5004 // FIXME: This shouldn't be here. 5005 if (LHSExt->getSrcTy()->isHalfTy()) 5006 Sem = &APFloat::IEEEhalf(); 5007 else if (LHSExt->getSrcTy()->isFloatTy()) 5008 Sem = &APFloat::IEEEsingle(); 5009 else if (LHSExt->getSrcTy()->isDoubleTy()) 5010 Sem = &APFloat::IEEEdouble(); 5011 else if (LHSExt->getSrcTy()->isFP128Ty()) 5012 Sem = &APFloat::IEEEquad(); 5013 else if (LHSExt->getSrcTy()->isX86_FP80Ty()) 5014 Sem = &APFloat::x87DoubleExtended(); 5015 else if (LHSExt->getSrcTy()->isPPC_FP128Ty()) 5016 Sem = &APFloat::PPCDoubleDouble(); 5017 else 5018 break; 5019 5020 bool Lossy; 5021 APFloat F = RHSF->getValueAPF(); 5022 F.convert(*Sem, APFloat::rmNearestTiesToEven, &Lossy); 5023 5024 // Avoid lossy conversions and denormals. Zero is a special case 5025 // that's OK to convert. 5026 APFloat Fabs = F; 5027 Fabs.clearSign(); 5028 if (!Lossy && 5029 ((Fabs.compare(APFloat::getSmallestNormalized(*Sem)) != 5030 APFloat::cmpLessThan) || Fabs.isZero())) 5031 5032 return new FCmpInst(Pred, LHSExt->getOperand(0), 5033 ConstantFP::get(RHSC->getContext(), F)); 5034 break; 5035 } 5036 case Instruction::PHI: 5037 // Only fold fcmp into the PHI if the phi and fcmp are in the same 5038 // block. If in the same block, we're encouraging jump threading. If 5039 // not, we are just pessimizing the code by making an i1 phi. 5040 if (LHSI->getParent() == I.getParent()) 5041 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI))) 5042 return NV; 5043 break; 5044 case Instruction::SIToFP: 5045 case Instruction::UIToFP: 5046 if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC)) 5047 return NV; 5048 break; 5049 case Instruction::FSub: { 5050 // fcmp pred (fneg x), C -> fcmp swap(pred) x, -C 5051 Value *Op; 5052 if (match(LHSI, m_FNeg(m_Value(Op)))) 5053 return new FCmpInst(I.getSwappedPredicate(), Op, 5054 ConstantExpr::getFNeg(RHSC)); 5055 break; 5056 } 5057 case Instruction::Load: 5058 if (GetElementPtrInst *GEP = 5059 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) { 5060 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 5061 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 5062 !cast<LoadInst>(LHSI)->isVolatile()) 5063 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I)) 5064 return Res; 5065 } 5066 break; 5067 case Instruction::Call: { 5068 if (!RHSC->isNullValue()) 5069 break; 5070 5071 CallInst *CI = cast<CallInst>(LHSI); 5072 Intrinsic::ID IID = getIntrinsicForCallSite(CI, &TLI); 5073 if (IID != Intrinsic::fabs) 5074 break; 5075 5076 // Various optimization for fabs compared with zero. 5077 switch (Pred) { 5078 default: 5079 break; 5080 // fabs(x) < 0 --> false 5081 case FCmpInst::FCMP_OLT: 5082 llvm_unreachable("handled by SimplifyFCmpInst"); 5083 // fabs(x) > 0 --> x != 0 5084 case FCmpInst::FCMP_OGT: 5085 return new FCmpInst(FCmpInst::FCMP_ONE, CI->getArgOperand(0), RHSC); 5086 // fabs(x) <= 0 --> x == 0 5087 case FCmpInst::FCMP_OLE: 5088 return new FCmpInst(FCmpInst::FCMP_OEQ, CI->getArgOperand(0), RHSC); 5089 // fabs(x) >= 0 --> !isnan(x) 5090 case FCmpInst::FCMP_OGE: 5091 return new FCmpInst(FCmpInst::FCMP_ORD, CI->getArgOperand(0), RHSC); 5092 // fabs(x) == 0 --> x == 0 5093 // fabs(x) != 0 --> x != 0 5094 case FCmpInst::FCMP_OEQ: 5095 case FCmpInst::FCMP_UEQ: 5096 case FCmpInst::FCMP_ONE: 5097 case FCmpInst::FCMP_UNE: 5098 return new FCmpInst(Pred, CI->getArgOperand(0), RHSC); 5099 } 5100 } 5101 } 5102 } 5103 5104 // fcmp pred (fneg x), (fneg y) -> fcmp swap(pred) x, y 5105 Value *X, *Y; 5106 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) 5107 return new FCmpInst(I.getSwappedPredicate(), X, Y); 5108 5109 // fcmp (fpext x), (fpext y) -> fcmp x, y 5110 if (FPExtInst *LHSExt = dyn_cast<FPExtInst>(Op0)) 5111 if (FPExtInst *RHSExt = dyn_cast<FPExtInst>(Op1)) 5112 if (LHSExt->getSrcTy() == RHSExt->getSrcTy()) 5113 return new FCmpInst(Pred, LHSExt->getOperand(0), RHSExt->getOperand(0)); 5114 5115 return Changed ? &I : nullptr; 5116 } 5117