1 //===- InstCombineCompares.cpp --------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visitICmp and visitFCmp functions. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/APSInt.h" 15 #include "llvm/ADT/SetVector.h" 16 #include "llvm/ADT/Statistic.h" 17 #include "llvm/Analysis/ConstantFolding.h" 18 #include "llvm/Analysis/InstructionSimplify.h" 19 #include "llvm/Analysis/TargetLibraryInfo.h" 20 #include "llvm/IR/ConstantRange.h" 21 #include "llvm/IR/DataLayout.h" 22 #include "llvm/IR/GetElementPtrTypeIterator.h" 23 #include "llvm/IR/IntrinsicInst.h" 24 #include "llvm/IR/PatternMatch.h" 25 #include "llvm/Support/Debug.h" 26 #include "llvm/Support/KnownBits.h" 27 #include "llvm/Transforms/InstCombine/InstCombiner.h" 28 29 using namespace llvm; 30 using namespace PatternMatch; 31 32 #define DEBUG_TYPE "instcombine" 33 34 // How many times is a select replaced by one of its operands? 35 STATISTIC(NumSel, "Number of select opts"); 36 37 38 /// Compute Result = In1+In2, returning true if the result overflowed for this 39 /// type. 40 static bool addWithOverflow(APInt &Result, const APInt &In1, 41 const APInt &In2, bool IsSigned = false) { 42 bool Overflow; 43 if (IsSigned) 44 Result = In1.sadd_ov(In2, Overflow); 45 else 46 Result = In1.uadd_ov(In2, Overflow); 47 48 return Overflow; 49 } 50 51 /// Compute Result = In1-In2, returning true if the result overflowed for this 52 /// type. 53 static bool subWithOverflow(APInt &Result, const APInt &In1, 54 const APInt &In2, bool IsSigned = false) { 55 bool Overflow; 56 if (IsSigned) 57 Result = In1.ssub_ov(In2, Overflow); 58 else 59 Result = In1.usub_ov(In2, Overflow); 60 61 return Overflow; 62 } 63 64 /// Given an icmp instruction, return true if any use of this comparison is a 65 /// branch on sign bit comparison. 66 static bool hasBranchUse(ICmpInst &I) { 67 for (auto *U : I.users()) 68 if (isa<BranchInst>(U)) 69 return true; 70 return false; 71 } 72 73 /// Returns true if the exploded icmp can be expressed as a signed comparison 74 /// to zero and updates the predicate accordingly. 75 /// The signedness of the comparison is preserved. 76 /// TODO: Refactor with decomposeBitTestICmp()? 77 static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) { 78 if (!ICmpInst::isSigned(Pred)) 79 return false; 80 81 if (C.isNullValue()) 82 return ICmpInst::isRelational(Pred); 83 84 if (C.isOneValue()) { 85 if (Pred == ICmpInst::ICMP_SLT) { 86 Pred = ICmpInst::ICMP_SLE; 87 return true; 88 } 89 } else if (C.isAllOnesValue()) { 90 if (Pred == ICmpInst::ICMP_SGT) { 91 Pred = ICmpInst::ICMP_SGE; 92 return true; 93 } 94 } 95 96 return false; 97 } 98 99 /// This is called when we see this pattern: 100 /// cmp pred (load (gep GV, ...)), cmpcst 101 /// where GV is a global variable with a constant initializer. Try to simplify 102 /// this into some simple computation that does not need the load. For example 103 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3". 104 /// 105 /// If AndCst is non-null, then the loaded value is masked with that constant 106 /// before doing the comparison. This handles cases like "A[i]&4 == 0". 107 Instruction * 108 InstCombinerImpl::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, 109 GlobalVariable *GV, CmpInst &ICI, 110 ConstantInt *AndCst) { 111 Constant *Init = GV->getInitializer(); 112 if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init)) 113 return nullptr; 114 115 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements(); 116 // Don't blow up on huge arrays. 117 if (ArrayElementCount > MaxArraySizeForCombine) 118 return nullptr; 119 120 // There are many forms of this optimization we can handle, for now, just do 121 // the simple index into a single-dimensional array. 122 // 123 // Require: GEP GV, 0, i {{, constant indices}} 124 if (GEP->getNumOperands() < 3 || 125 !isa<ConstantInt>(GEP->getOperand(1)) || 126 !cast<ConstantInt>(GEP->getOperand(1))->isZero() || 127 isa<Constant>(GEP->getOperand(2))) 128 return nullptr; 129 130 // Check that indices after the variable are constants and in-range for the 131 // type they index. Collect the indices. This is typically for arrays of 132 // structs. 133 SmallVector<unsigned, 4> LaterIndices; 134 135 Type *EltTy = Init->getType()->getArrayElementType(); 136 for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) { 137 ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i)); 138 if (!Idx) return nullptr; // Variable index. 139 140 uint64_t IdxVal = Idx->getZExtValue(); 141 if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index. 142 143 if (StructType *STy = dyn_cast<StructType>(EltTy)) 144 EltTy = STy->getElementType(IdxVal); 145 else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) { 146 if (IdxVal >= ATy->getNumElements()) return nullptr; 147 EltTy = ATy->getElementType(); 148 } else { 149 return nullptr; // Unknown type. 150 } 151 152 LaterIndices.push_back(IdxVal); 153 } 154 155 enum { Overdefined = -3, Undefined = -2 }; 156 157 // Variables for our state machines. 158 159 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form 160 // "i == 47 | i == 87", where 47 is the first index the condition is true for, 161 // and 87 is the second (and last) index. FirstTrueElement is -2 when 162 // undefined, otherwise set to the first true element. SecondTrueElement is 163 // -2 when undefined, -3 when overdefined and >= 0 when that index is true. 164 int FirstTrueElement = Undefined, SecondTrueElement = Undefined; 165 166 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the 167 // form "i != 47 & i != 87". Same state transitions as for true elements. 168 int FirstFalseElement = Undefined, SecondFalseElement = Undefined; 169 170 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these 171 /// define a state machine that triggers for ranges of values that the index 172 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'. 173 /// This is -2 when undefined, -3 when overdefined, and otherwise the last 174 /// index in the range (inclusive). We use -2 for undefined here because we 175 /// use relative comparisons and don't want 0-1 to match -1. 176 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined; 177 178 // MagicBitvector - This is a magic bitvector where we set a bit if the 179 // comparison is true for element 'i'. If there are 64 elements or less in 180 // the array, this will fully represent all the comparison results. 181 uint64_t MagicBitvector = 0; 182 183 // Scan the array and see if one of our patterns matches. 184 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1)); 185 for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) { 186 Constant *Elt = Init->getAggregateElement(i); 187 if (!Elt) return nullptr; 188 189 // If this is indexing an array of structures, get the structure element. 190 if (!LaterIndices.empty()) 191 Elt = ConstantExpr::getExtractValue(Elt, LaterIndices); 192 193 // If the element is masked, handle it. 194 if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst); 195 196 // Find out if the comparison would be true or false for the i'th element. 197 Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt, 198 CompareRHS, DL, &TLI); 199 // If the result is undef for this element, ignore it. 200 if (isa<UndefValue>(C)) { 201 // Extend range state machines to cover this element in case there is an 202 // undef in the middle of the range. 203 if (TrueRangeEnd == (int)i-1) 204 TrueRangeEnd = i; 205 if (FalseRangeEnd == (int)i-1) 206 FalseRangeEnd = i; 207 continue; 208 } 209 210 // If we can't compute the result for any of the elements, we have to give 211 // up evaluating the entire conditional. 212 if (!isa<ConstantInt>(C)) return nullptr; 213 214 // Otherwise, we know if the comparison is true or false for this element, 215 // update our state machines. 216 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero(); 217 218 // State machine for single/double/range index comparison. 219 if (IsTrueForElt) { 220 // Update the TrueElement state machine. 221 if (FirstTrueElement == Undefined) 222 FirstTrueElement = TrueRangeEnd = i; // First true element. 223 else { 224 // Update double-compare state machine. 225 if (SecondTrueElement == Undefined) 226 SecondTrueElement = i; 227 else 228 SecondTrueElement = Overdefined; 229 230 // Update range state machine. 231 if (TrueRangeEnd == (int)i-1) 232 TrueRangeEnd = i; 233 else 234 TrueRangeEnd = Overdefined; 235 } 236 } else { 237 // Update the FalseElement state machine. 238 if (FirstFalseElement == Undefined) 239 FirstFalseElement = FalseRangeEnd = i; // First false element. 240 else { 241 // Update double-compare state machine. 242 if (SecondFalseElement == Undefined) 243 SecondFalseElement = i; 244 else 245 SecondFalseElement = Overdefined; 246 247 // Update range state machine. 248 if (FalseRangeEnd == (int)i-1) 249 FalseRangeEnd = i; 250 else 251 FalseRangeEnd = Overdefined; 252 } 253 } 254 255 // If this element is in range, update our magic bitvector. 256 if (i < 64 && IsTrueForElt) 257 MagicBitvector |= 1ULL << i; 258 259 // If all of our states become overdefined, bail out early. Since the 260 // predicate is expensive, only check it every 8 elements. This is only 261 // really useful for really huge arrays. 262 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined && 263 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined && 264 FalseRangeEnd == Overdefined) 265 return nullptr; 266 } 267 268 // Now that we've scanned the entire array, emit our new comparison(s). We 269 // order the state machines in complexity of the generated code. 270 Value *Idx = GEP->getOperand(2); 271 272 // If the index is larger than the pointer size of the target, truncate the 273 // index down like the GEP would do implicitly. We don't have to do this for 274 // an inbounds GEP because the index can't be out of range. 275 if (!GEP->isInBounds()) { 276 Type *IntPtrTy = DL.getIntPtrType(GEP->getType()); 277 unsigned PtrSize = IntPtrTy->getIntegerBitWidth(); 278 if (Idx->getType()->getPrimitiveSizeInBits().getFixedSize() > PtrSize) 279 Idx = Builder.CreateTrunc(Idx, IntPtrTy); 280 } 281 282 // If the comparison is only true for one or two elements, emit direct 283 // comparisons. 284 if (SecondTrueElement != Overdefined) { 285 // None true -> false. 286 if (FirstTrueElement == Undefined) 287 return replaceInstUsesWith(ICI, Builder.getFalse()); 288 289 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement); 290 291 // True for one element -> 'i == 47'. 292 if (SecondTrueElement == Undefined) 293 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx); 294 295 // True for two elements -> 'i == 47 | i == 72'. 296 Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx); 297 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement); 298 Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx); 299 return BinaryOperator::CreateOr(C1, C2); 300 } 301 302 // If the comparison is only false for one or two elements, emit direct 303 // comparisons. 304 if (SecondFalseElement != Overdefined) { 305 // None false -> true. 306 if (FirstFalseElement == Undefined) 307 return replaceInstUsesWith(ICI, Builder.getTrue()); 308 309 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement); 310 311 // False for one element -> 'i != 47'. 312 if (SecondFalseElement == Undefined) 313 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx); 314 315 // False for two elements -> 'i != 47 & i != 72'. 316 Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx); 317 Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement); 318 Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx); 319 return BinaryOperator::CreateAnd(C1, C2); 320 } 321 322 // If the comparison can be replaced with a range comparison for the elements 323 // where it is true, emit the range check. 324 if (TrueRangeEnd != Overdefined) { 325 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare"); 326 327 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1). 328 if (FirstTrueElement) { 329 Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement); 330 Idx = Builder.CreateAdd(Idx, Offs); 331 } 332 333 Value *End = ConstantInt::get(Idx->getType(), 334 TrueRangeEnd-FirstTrueElement+1); 335 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End); 336 } 337 338 // False range check. 339 if (FalseRangeEnd != Overdefined) { 340 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare"); 341 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse). 342 if (FirstFalseElement) { 343 Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement); 344 Idx = Builder.CreateAdd(Idx, Offs); 345 } 346 347 Value *End = ConstantInt::get(Idx->getType(), 348 FalseRangeEnd-FirstFalseElement); 349 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End); 350 } 351 352 // If a magic bitvector captures the entire comparison state 353 // of this load, replace it with computation that does: 354 // ((magic_cst >> i) & 1) != 0 355 { 356 Type *Ty = nullptr; 357 358 // Look for an appropriate type: 359 // - The type of Idx if the magic fits 360 // - The smallest fitting legal type 361 if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth()) 362 Ty = Idx->getType(); 363 else 364 Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount); 365 366 if (Ty) { 367 Value *V = Builder.CreateIntCast(Idx, Ty, false); 368 V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V); 369 V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V); 370 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0)); 371 } 372 } 373 374 return nullptr; 375 } 376 377 /// Return a value that can be used to compare the *offset* implied by a GEP to 378 /// zero. For example, if we have &A[i], we want to return 'i' for 379 /// "icmp ne i, 0". Note that, in general, indices can be complex, and scales 380 /// are involved. The above expression would also be legal to codegen as 381 /// "icmp ne (i*4), 0" (assuming A is a pointer to i32). 382 /// This latter form is less amenable to optimization though, and we are allowed 383 /// to generate the first by knowing that pointer arithmetic doesn't overflow. 384 /// 385 /// If we can't emit an optimized form for this expression, this returns null. 386 /// 387 static Value *evaluateGEPOffsetExpression(User *GEP, InstCombinerImpl &IC, 388 const DataLayout &DL) { 389 gep_type_iterator GTI = gep_type_begin(GEP); 390 391 // Check to see if this gep only has a single variable index. If so, and if 392 // any constant indices are a multiple of its scale, then we can compute this 393 // in terms of the scale of the variable index. For example, if the GEP 394 // implies an offset of "12 + i*4", then we can codegen this as "3 + i", 395 // because the expression will cross zero at the same point. 396 unsigned i, e = GEP->getNumOperands(); 397 int64_t Offset = 0; 398 for (i = 1; i != e; ++i, ++GTI) { 399 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) { 400 // Compute the aggregate offset of constant indices. 401 if (CI->isZero()) continue; 402 403 // Handle a struct index, which adds its field offset to the pointer. 404 if (StructType *STy = GTI.getStructTypeOrNull()) { 405 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); 406 } else { 407 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); 408 Offset += Size*CI->getSExtValue(); 409 } 410 } else { 411 // Found our variable index. 412 break; 413 } 414 } 415 416 // If there are no variable indices, we must have a constant offset, just 417 // evaluate it the general way. 418 if (i == e) return nullptr; 419 420 Value *VariableIdx = GEP->getOperand(i); 421 // Determine the scale factor of the variable element. For example, this is 422 // 4 if the variable index is into an array of i32. 423 uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType()); 424 425 // Verify that there are no other variable indices. If so, emit the hard way. 426 for (++i, ++GTI; i != e; ++i, ++GTI) { 427 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i)); 428 if (!CI) return nullptr; 429 430 // Compute the aggregate offset of constant indices. 431 if (CI->isZero()) continue; 432 433 // Handle a struct index, which adds its field offset to the pointer. 434 if (StructType *STy = GTI.getStructTypeOrNull()) { 435 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue()); 436 } else { 437 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()); 438 Offset += Size*CI->getSExtValue(); 439 } 440 } 441 442 // Okay, we know we have a single variable index, which must be a 443 // pointer/array/vector index. If there is no offset, life is simple, return 444 // the index. 445 Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType()); 446 unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth(); 447 if (Offset == 0) { 448 // Cast to intptrty in case a truncation occurs. If an extension is needed, 449 // we don't need to bother extending: the extension won't affect where the 450 // computation crosses zero. 451 if (VariableIdx->getType()->getPrimitiveSizeInBits().getFixedSize() > 452 IntPtrWidth) { 453 VariableIdx = IC.Builder.CreateTrunc(VariableIdx, IntPtrTy); 454 } 455 return VariableIdx; 456 } 457 458 // Otherwise, there is an index. The computation we will do will be modulo 459 // the pointer size. 460 Offset = SignExtend64(Offset, IntPtrWidth); 461 VariableScale = SignExtend64(VariableScale, IntPtrWidth); 462 463 // To do this transformation, any constant index must be a multiple of the 464 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i", 465 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a 466 // multiple of the variable scale. 467 int64_t NewOffs = Offset / (int64_t)VariableScale; 468 if (Offset != NewOffs*(int64_t)VariableScale) 469 return nullptr; 470 471 // Okay, we can do this evaluation. Start by converting the index to intptr. 472 if (VariableIdx->getType() != IntPtrTy) 473 VariableIdx = IC.Builder.CreateIntCast(VariableIdx, IntPtrTy, 474 true /*Signed*/); 475 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs); 476 return IC.Builder.CreateAdd(VariableIdx, OffsetVal, "offset"); 477 } 478 479 /// Returns true if we can rewrite Start as a GEP with pointer Base 480 /// and some integer offset. The nodes that need to be re-written 481 /// for this transformation will be added to Explored. 482 static bool canRewriteGEPAsOffset(Value *Start, Value *Base, 483 const DataLayout &DL, 484 SetVector<Value *> &Explored) { 485 SmallVector<Value *, 16> WorkList(1, Start); 486 Explored.insert(Base); 487 488 // The following traversal gives us an order which can be used 489 // when doing the final transformation. Since in the final 490 // transformation we create the PHI replacement instructions first, 491 // we don't have to get them in any particular order. 492 // 493 // However, for other instructions we will have to traverse the 494 // operands of an instruction first, which means that we have to 495 // do a post-order traversal. 496 while (!WorkList.empty()) { 497 SetVector<PHINode *> PHIs; 498 499 while (!WorkList.empty()) { 500 if (Explored.size() >= 100) 501 return false; 502 503 Value *V = WorkList.back(); 504 505 if (Explored.contains(V)) { 506 WorkList.pop_back(); 507 continue; 508 } 509 510 if (!isa<IntToPtrInst>(V) && !isa<PtrToIntInst>(V) && 511 !isa<GetElementPtrInst>(V) && !isa<PHINode>(V)) 512 // We've found some value that we can't explore which is different from 513 // the base. Therefore we can't do this transformation. 514 return false; 515 516 if (isa<IntToPtrInst>(V) || isa<PtrToIntInst>(V)) { 517 auto *CI = cast<CastInst>(V); 518 if (!CI->isNoopCast(DL)) 519 return false; 520 521 if (Explored.count(CI->getOperand(0)) == 0) 522 WorkList.push_back(CI->getOperand(0)); 523 } 524 525 if (auto *GEP = dyn_cast<GEPOperator>(V)) { 526 // We're limiting the GEP to having one index. This will preserve 527 // the original pointer type. We could handle more cases in the 528 // future. 529 if (GEP->getNumIndices() != 1 || !GEP->isInBounds() || 530 GEP->getType() != Start->getType()) 531 return false; 532 533 if (Explored.count(GEP->getOperand(0)) == 0) 534 WorkList.push_back(GEP->getOperand(0)); 535 } 536 537 if (WorkList.back() == V) { 538 WorkList.pop_back(); 539 // We've finished visiting this node, mark it as such. 540 Explored.insert(V); 541 } 542 543 if (auto *PN = dyn_cast<PHINode>(V)) { 544 // We cannot transform PHIs on unsplittable basic blocks. 545 if (isa<CatchSwitchInst>(PN->getParent()->getTerminator())) 546 return false; 547 Explored.insert(PN); 548 PHIs.insert(PN); 549 } 550 } 551 552 // Explore the PHI nodes further. 553 for (auto *PN : PHIs) 554 for (Value *Op : PN->incoming_values()) 555 if (Explored.count(Op) == 0) 556 WorkList.push_back(Op); 557 } 558 559 // Make sure that we can do this. Since we can't insert GEPs in a basic 560 // block before a PHI node, we can't easily do this transformation if 561 // we have PHI node users of transformed instructions. 562 for (Value *Val : Explored) { 563 for (Value *Use : Val->uses()) { 564 565 auto *PHI = dyn_cast<PHINode>(Use); 566 auto *Inst = dyn_cast<Instruction>(Val); 567 568 if (Inst == Base || Inst == PHI || !Inst || !PHI || 569 Explored.count(PHI) == 0) 570 continue; 571 572 if (PHI->getParent() == Inst->getParent()) 573 return false; 574 } 575 } 576 return true; 577 } 578 579 // Sets the appropriate insert point on Builder where we can add 580 // a replacement Instruction for V (if that is possible). 581 static void setInsertionPoint(IRBuilder<> &Builder, Value *V, 582 bool Before = true) { 583 if (auto *PHI = dyn_cast<PHINode>(V)) { 584 Builder.SetInsertPoint(&*PHI->getParent()->getFirstInsertionPt()); 585 return; 586 } 587 if (auto *I = dyn_cast<Instruction>(V)) { 588 if (!Before) 589 I = &*std::next(I->getIterator()); 590 Builder.SetInsertPoint(I); 591 return; 592 } 593 if (auto *A = dyn_cast<Argument>(V)) { 594 // Set the insertion point in the entry block. 595 BasicBlock &Entry = A->getParent()->getEntryBlock(); 596 Builder.SetInsertPoint(&*Entry.getFirstInsertionPt()); 597 return; 598 } 599 // Otherwise, this is a constant and we don't need to set a new 600 // insertion point. 601 assert(isa<Constant>(V) && "Setting insertion point for unknown value!"); 602 } 603 604 /// Returns a re-written value of Start as an indexed GEP using Base as a 605 /// pointer. 606 static Value *rewriteGEPAsOffset(Value *Start, Value *Base, 607 const DataLayout &DL, 608 SetVector<Value *> &Explored) { 609 // Perform all the substitutions. This is a bit tricky because we can 610 // have cycles in our use-def chains. 611 // 1. Create the PHI nodes without any incoming values. 612 // 2. Create all the other values. 613 // 3. Add the edges for the PHI nodes. 614 // 4. Emit GEPs to get the original pointers. 615 // 5. Remove the original instructions. 616 Type *IndexType = IntegerType::get( 617 Base->getContext(), DL.getIndexTypeSizeInBits(Start->getType())); 618 619 DenseMap<Value *, Value *> NewInsts; 620 NewInsts[Base] = ConstantInt::getNullValue(IndexType); 621 622 // Create the new PHI nodes, without adding any incoming values. 623 for (Value *Val : Explored) { 624 if (Val == Base) 625 continue; 626 // Create empty phi nodes. This avoids cyclic dependencies when creating 627 // the remaining instructions. 628 if (auto *PHI = dyn_cast<PHINode>(Val)) 629 NewInsts[PHI] = PHINode::Create(IndexType, PHI->getNumIncomingValues(), 630 PHI->getName() + ".idx", PHI); 631 } 632 IRBuilder<> Builder(Base->getContext()); 633 634 // Create all the other instructions. 635 for (Value *Val : Explored) { 636 637 if (NewInsts.find(Val) != NewInsts.end()) 638 continue; 639 640 if (auto *CI = dyn_cast<CastInst>(Val)) { 641 // Don't get rid of the intermediate variable here; the store can grow 642 // the map which will invalidate the reference to the input value. 643 Value *V = NewInsts[CI->getOperand(0)]; 644 NewInsts[CI] = V; 645 continue; 646 } 647 if (auto *GEP = dyn_cast<GEPOperator>(Val)) { 648 Value *Index = NewInsts[GEP->getOperand(1)] ? NewInsts[GEP->getOperand(1)] 649 : GEP->getOperand(1); 650 setInsertionPoint(Builder, GEP); 651 // Indices might need to be sign extended. GEPs will magically do 652 // this, but we need to do it ourselves here. 653 if (Index->getType()->getScalarSizeInBits() != 654 NewInsts[GEP->getOperand(0)]->getType()->getScalarSizeInBits()) { 655 Index = Builder.CreateSExtOrTrunc( 656 Index, NewInsts[GEP->getOperand(0)]->getType(), 657 GEP->getOperand(0)->getName() + ".sext"); 658 } 659 660 auto *Op = NewInsts[GEP->getOperand(0)]; 661 if (isa<ConstantInt>(Op) && cast<ConstantInt>(Op)->isZero()) 662 NewInsts[GEP] = Index; 663 else 664 NewInsts[GEP] = Builder.CreateNSWAdd( 665 Op, Index, GEP->getOperand(0)->getName() + ".add"); 666 continue; 667 } 668 if (isa<PHINode>(Val)) 669 continue; 670 671 llvm_unreachable("Unexpected instruction type"); 672 } 673 674 // Add the incoming values to the PHI nodes. 675 for (Value *Val : Explored) { 676 if (Val == Base) 677 continue; 678 // All the instructions have been created, we can now add edges to the 679 // phi nodes. 680 if (auto *PHI = dyn_cast<PHINode>(Val)) { 681 PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]); 682 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) { 683 Value *NewIncoming = PHI->getIncomingValue(I); 684 685 if (NewInsts.find(NewIncoming) != NewInsts.end()) 686 NewIncoming = NewInsts[NewIncoming]; 687 688 NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I)); 689 } 690 } 691 } 692 693 for (Value *Val : Explored) { 694 if (Val == Base) 695 continue; 696 697 // Depending on the type, for external users we have to emit 698 // a GEP or a GEP + ptrtoint. 699 setInsertionPoint(Builder, Val, false); 700 701 // If required, create an inttoptr instruction for Base. 702 Value *NewBase = Base; 703 if (!Base->getType()->isPointerTy()) 704 NewBase = Builder.CreateBitOrPointerCast(Base, Start->getType(), 705 Start->getName() + "to.ptr"); 706 707 Value *GEP = Builder.CreateInBoundsGEP( 708 Start->getType()->getPointerElementType(), NewBase, 709 makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr"); 710 711 if (!Val->getType()->isPointerTy()) { 712 Value *Cast = Builder.CreatePointerCast(GEP, Val->getType(), 713 Val->getName() + ".conv"); 714 GEP = Cast; 715 } 716 Val->replaceAllUsesWith(GEP); 717 } 718 719 return NewInsts[Start]; 720 } 721 722 /// Looks through GEPs, IntToPtrInsts and PtrToIntInsts in order to express 723 /// the input Value as a constant indexed GEP. Returns a pair containing 724 /// the GEPs Pointer and Index. 725 static std::pair<Value *, Value *> 726 getAsConstantIndexedAddress(Value *V, const DataLayout &DL) { 727 Type *IndexType = IntegerType::get(V->getContext(), 728 DL.getIndexTypeSizeInBits(V->getType())); 729 730 Constant *Index = ConstantInt::getNullValue(IndexType); 731 while (true) { 732 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 733 // We accept only inbouds GEPs here to exclude the possibility of 734 // overflow. 735 if (!GEP->isInBounds()) 736 break; 737 if (GEP->hasAllConstantIndices() && GEP->getNumIndices() == 1 && 738 GEP->getType() == V->getType()) { 739 V = GEP->getOperand(0); 740 Constant *GEPIndex = static_cast<Constant *>(GEP->getOperand(1)); 741 Index = ConstantExpr::getAdd( 742 Index, ConstantExpr::getSExtOrBitCast(GEPIndex, IndexType)); 743 continue; 744 } 745 break; 746 } 747 if (auto *CI = dyn_cast<IntToPtrInst>(V)) { 748 if (!CI->isNoopCast(DL)) 749 break; 750 V = CI->getOperand(0); 751 continue; 752 } 753 if (auto *CI = dyn_cast<PtrToIntInst>(V)) { 754 if (!CI->isNoopCast(DL)) 755 break; 756 V = CI->getOperand(0); 757 continue; 758 } 759 break; 760 } 761 return {V, Index}; 762 } 763 764 /// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant. 765 /// We can look through PHIs, GEPs and casts in order to determine a common base 766 /// between GEPLHS and RHS. 767 static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS, 768 ICmpInst::Predicate Cond, 769 const DataLayout &DL) { 770 // FIXME: Support vector of pointers. 771 if (GEPLHS->getType()->isVectorTy()) 772 return nullptr; 773 774 if (!GEPLHS->hasAllConstantIndices()) 775 return nullptr; 776 777 // Make sure the pointers have the same type. 778 if (GEPLHS->getType() != RHS->getType()) 779 return nullptr; 780 781 Value *PtrBase, *Index; 782 std::tie(PtrBase, Index) = getAsConstantIndexedAddress(GEPLHS, DL); 783 784 // The set of nodes that will take part in this transformation. 785 SetVector<Value *> Nodes; 786 787 if (!canRewriteGEPAsOffset(RHS, PtrBase, DL, Nodes)) 788 return nullptr; 789 790 // We know we can re-write this as 791 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) 792 // Since we've only looked through inbouds GEPs we know that we 793 // can't have overflow on either side. We can therefore re-write 794 // this as: 795 // OFFSET1 cmp OFFSET2 796 Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, DL, Nodes); 797 798 // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written 799 // GEP having PtrBase as the pointer base, and has returned in NewRHS the 800 // offset. Since Index is the offset of LHS to the base pointer, we will now 801 // compare the offsets instead of comparing the pointers. 802 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Index, NewRHS); 803 } 804 805 /// Fold comparisons between a GEP instruction and something else. At this point 806 /// we know that the GEP is on the LHS of the comparison. 807 Instruction *InstCombinerImpl::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS, 808 ICmpInst::Predicate Cond, 809 Instruction &I) { 810 // Don't transform signed compares of GEPs into index compares. Even if the 811 // GEP is inbounds, the final add of the base pointer can have signed overflow 812 // and would change the result of the icmp. 813 // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be 814 // the maximum signed value for the pointer type. 815 if (ICmpInst::isSigned(Cond)) 816 return nullptr; 817 818 // Look through bitcasts and addrspacecasts. We do not however want to remove 819 // 0 GEPs. 820 if (!isa<GetElementPtrInst>(RHS)) 821 RHS = RHS->stripPointerCasts(); 822 823 Value *PtrBase = GEPLHS->getOperand(0); 824 // FIXME: Support vector pointer GEPs. 825 if (PtrBase == RHS && GEPLHS->isInBounds() && 826 !GEPLHS->getType()->isVectorTy()) { 827 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0). 828 // This transformation (ignoring the base and scales) is valid because we 829 // know pointers can't overflow since the gep is inbounds. See if we can 830 // output an optimized form. 831 Value *Offset = evaluateGEPOffsetExpression(GEPLHS, *this, DL); 832 833 // If not, synthesize the offset the hard way. 834 if (!Offset) 835 Offset = EmitGEPOffset(GEPLHS); 836 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset, 837 Constant::getNullValue(Offset->getType())); 838 } 839 840 if (GEPLHS->isInBounds() && ICmpInst::isEquality(Cond) && 841 isa<Constant>(RHS) && cast<Constant>(RHS)->isNullValue() && 842 !NullPointerIsDefined(I.getFunction(), 843 RHS->getType()->getPointerAddressSpace())) { 844 // For most address spaces, an allocation can't be placed at null, but null 845 // itself is treated as a 0 size allocation in the in bounds rules. Thus, 846 // the only valid inbounds address derived from null, is null itself. 847 // Thus, we have four cases to consider: 848 // 1) Base == nullptr, Offset == 0 -> inbounds, null 849 // 2) Base == nullptr, Offset != 0 -> poison as the result is out of bounds 850 // 3) Base != nullptr, Offset == (-base) -> poison (crossing allocations) 851 // 4) Base != nullptr, Offset != (-base) -> nonnull (and possibly poison) 852 // 853 // (Note if we're indexing a type of size 0, that simply collapses into one 854 // of the buckets above.) 855 // 856 // In general, we're allowed to make values less poison (i.e. remove 857 // sources of full UB), so in this case, we just select between the two 858 // non-poison cases (1 and 4 above). 859 // 860 // For vectors, we apply the same reasoning on a per-lane basis. 861 auto *Base = GEPLHS->getPointerOperand(); 862 if (GEPLHS->getType()->isVectorTy() && Base->getType()->isPointerTy()) { 863 auto EC = cast<VectorType>(GEPLHS->getType())->getElementCount(); 864 Base = Builder.CreateVectorSplat(EC, Base); 865 } 866 return new ICmpInst(Cond, Base, 867 ConstantExpr::getPointerBitCastOrAddrSpaceCast( 868 cast<Constant>(RHS), Base->getType())); 869 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) { 870 // If the base pointers are different, but the indices are the same, just 871 // compare the base pointer. 872 if (PtrBase != GEPRHS->getOperand(0)) { 873 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands(); 874 IndicesTheSame &= GEPLHS->getOperand(0)->getType() == 875 GEPRHS->getOperand(0)->getType(); 876 if (IndicesTheSame) 877 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i) 878 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) { 879 IndicesTheSame = false; 880 break; 881 } 882 883 // If all indices are the same, just compare the base pointers. 884 Type *BaseType = GEPLHS->getOperand(0)->getType(); 885 if (IndicesTheSame && CmpInst::makeCmpResultType(BaseType) == I.getType()) 886 return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0)); 887 888 // If we're comparing GEPs with two base pointers that only differ in type 889 // and both GEPs have only constant indices or just one use, then fold 890 // the compare with the adjusted indices. 891 // FIXME: Support vector of pointers. 892 if (GEPLHS->isInBounds() && GEPRHS->isInBounds() && 893 (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) && 894 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) && 895 PtrBase->stripPointerCasts() == 896 GEPRHS->getOperand(0)->stripPointerCasts() && 897 !GEPLHS->getType()->isVectorTy()) { 898 Value *LOffset = EmitGEPOffset(GEPLHS); 899 Value *ROffset = EmitGEPOffset(GEPRHS); 900 901 // If we looked through an addrspacecast between different sized address 902 // spaces, the LHS and RHS pointers are different sized 903 // integers. Truncate to the smaller one. 904 Type *LHSIndexTy = LOffset->getType(); 905 Type *RHSIndexTy = ROffset->getType(); 906 if (LHSIndexTy != RHSIndexTy) { 907 if (LHSIndexTy->getPrimitiveSizeInBits().getFixedSize() < 908 RHSIndexTy->getPrimitiveSizeInBits().getFixedSize()) { 909 ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy); 910 } else 911 LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy); 912 } 913 914 Value *Cmp = Builder.CreateICmp(ICmpInst::getSignedPredicate(Cond), 915 LOffset, ROffset); 916 return replaceInstUsesWith(I, Cmp); 917 } 918 919 // Otherwise, the base pointers are different and the indices are 920 // different. Try convert this to an indexed compare by looking through 921 // PHIs/casts. 922 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL); 923 } 924 925 // If one of the GEPs has all zero indices, recurse. 926 // FIXME: Handle vector of pointers. 927 if (!GEPLHS->getType()->isVectorTy() && GEPLHS->hasAllZeroIndices()) 928 return foldGEPICmp(GEPRHS, GEPLHS->getOperand(0), 929 ICmpInst::getSwappedPredicate(Cond), I); 930 931 // If the other GEP has all zero indices, recurse. 932 // FIXME: Handle vector of pointers. 933 if (!GEPRHS->getType()->isVectorTy() && GEPRHS->hasAllZeroIndices()) 934 return foldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I); 935 936 bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds(); 937 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) { 938 // If the GEPs only differ by one index, compare it. 939 unsigned NumDifferences = 0; // Keep track of # differences. 940 unsigned DiffOperand = 0; // The operand that differs. 941 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i) 942 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) { 943 Type *LHSType = GEPLHS->getOperand(i)->getType(); 944 Type *RHSType = GEPRHS->getOperand(i)->getType(); 945 // FIXME: Better support for vector of pointers. 946 if (LHSType->getPrimitiveSizeInBits() != 947 RHSType->getPrimitiveSizeInBits() || 948 (GEPLHS->getType()->isVectorTy() && 949 (!LHSType->isVectorTy() || !RHSType->isVectorTy()))) { 950 // Irreconcilable differences. 951 NumDifferences = 2; 952 break; 953 } 954 955 if (NumDifferences++) break; 956 DiffOperand = i; 957 } 958 959 if (NumDifferences == 0) // SAME GEP? 960 return replaceInstUsesWith(I, // No comparison is needed here. 961 ConstantInt::get(I.getType(), ICmpInst::isTrueWhenEqual(Cond))); 962 963 else if (NumDifferences == 1 && GEPsInBounds) { 964 Value *LHSV = GEPLHS->getOperand(DiffOperand); 965 Value *RHSV = GEPRHS->getOperand(DiffOperand); 966 // Make sure we do a signed comparison here. 967 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV); 968 } 969 } 970 971 // Only lower this if the icmp is the only user of the GEP or if we expect 972 // the result to fold to a constant! 973 if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) && 974 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) { 975 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2) 976 Value *L = EmitGEPOffset(GEPLHS); 977 Value *R = EmitGEPOffset(GEPRHS); 978 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R); 979 } 980 } 981 982 // Try convert this to an indexed compare by looking through PHIs/casts as a 983 // last resort. 984 return transformToIndexedCompare(GEPLHS, RHS, Cond, DL); 985 } 986 987 Instruction *InstCombinerImpl::foldAllocaCmp(ICmpInst &ICI, 988 const AllocaInst *Alloca, 989 const Value *Other) { 990 assert(ICI.isEquality() && "Cannot fold non-equality comparison."); 991 992 // It would be tempting to fold away comparisons between allocas and any 993 // pointer not based on that alloca (e.g. an argument). However, even 994 // though such pointers cannot alias, they can still compare equal. 995 // 996 // But LLVM doesn't specify where allocas get their memory, so if the alloca 997 // doesn't escape we can argue that it's impossible to guess its value, and we 998 // can therefore act as if any such guesses are wrong. 999 // 1000 // The code below checks that the alloca doesn't escape, and that it's only 1001 // used in a comparison once (the current instruction). The 1002 // single-comparison-use condition ensures that we're trivially folding all 1003 // comparisons against the alloca consistently, and avoids the risk of 1004 // erroneously folding a comparison of the pointer with itself. 1005 1006 unsigned MaxIter = 32; // Break cycles and bound to constant-time. 1007 1008 SmallVector<const Use *, 32> Worklist; 1009 for (const Use &U : Alloca->uses()) { 1010 if (Worklist.size() >= MaxIter) 1011 return nullptr; 1012 Worklist.push_back(&U); 1013 } 1014 1015 unsigned NumCmps = 0; 1016 while (!Worklist.empty()) { 1017 assert(Worklist.size() <= MaxIter); 1018 const Use *U = Worklist.pop_back_val(); 1019 const Value *V = U->getUser(); 1020 --MaxIter; 1021 1022 if (isa<BitCastInst>(V) || isa<GetElementPtrInst>(V) || isa<PHINode>(V) || 1023 isa<SelectInst>(V)) { 1024 // Track the uses. 1025 } else if (isa<LoadInst>(V)) { 1026 // Loading from the pointer doesn't escape it. 1027 continue; 1028 } else if (const auto *SI = dyn_cast<StoreInst>(V)) { 1029 // Storing *to* the pointer is fine, but storing the pointer escapes it. 1030 if (SI->getValueOperand() == U->get()) 1031 return nullptr; 1032 continue; 1033 } else if (isa<ICmpInst>(V)) { 1034 if (NumCmps++) 1035 return nullptr; // Found more than one cmp. 1036 continue; 1037 } else if (const auto *Intrin = dyn_cast<IntrinsicInst>(V)) { 1038 switch (Intrin->getIntrinsicID()) { 1039 // These intrinsics don't escape or compare the pointer. Memset is safe 1040 // because we don't allow ptrtoint. Memcpy and memmove are safe because 1041 // we don't allow stores, so src cannot point to V. 1042 case Intrinsic::lifetime_start: case Intrinsic::lifetime_end: 1043 case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset: 1044 continue; 1045 default: 1046 return nullptr; 1047 } 1048 } else { 1049 return nullptr; 1050 } 1051 for (const Use &U : V->uses()) { 1052 if (Worklist.size() >= MaxIter) 1053 return nullptr; 1054 Worklist.push_back(&U); 1055 } 1056 } 1057 1058 Type *CmpTy = CmpInst::makeCmpResultType(Other->getType()); 1059 return replaceInstUsesWith( 1060 ICI, 1061 ConstantInt::get(CmpTy, !CmpInst::isTrueWhenEqual(ICI.getPredicate()))); 1062 } 1063 1064 /// Fold "icmp pred (X+C), X". 1065 Instruction *InstCombinerImpl::foldICmpAddOpConst(Value *X, const APInt &C, 1066 ICmpInst::Predicate Pred) { 1067 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0, 1068 // so the values can never be equal. Similarly for all other "or equals" 1069 // operators. 1070 assert(!!C && "C should not be zero!"); 1071 1072 // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255 1073 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253 1074 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0 1075 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 1076 Constant *R = ConstantInt::get(X->getType(), 1077 APInt::getMaxValue(C.getBitWidth()) - C); 1078 return new ICmpInst(ICmpInst::ICMP_UGT, X, R); 1079 } 1080 1081 // (X+1) >u X --> X <u (0-1) --> X != 255 1082 // (X+2) >u X --> X <u (0-2) --> X <u 254 1083 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0 1084 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 1085 return new ICmpInst(ICmpInst::ICMP_ULT, X, 1086 ConstantInt::get(X->getType(), -C)); 1087 1088 APInt SMax = APInt::getSignedMaxValue(C.getBitWidth()); 1089 1090 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127 1091 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125 1092 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0 1093 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1 1094 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126 1095 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127 1096 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 1097 return new ICmpInst(ICmpInst::ICMP_SGT, X, 1098 ConstantInt::get(X->getType(), SMax - C)); 1099 1100 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127 1101 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126 1102 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1 1103 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2 1104 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126 1105 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128 1106 1107 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE); 1108 return new ICmpInst(ICmpInst::ICMP_SLT, X, 1109 ConstantInt::get(X->getType(), SMax - (C - 1))); 1110 } 1111 1112 /// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" -> 1113 /// (icmp eq/ne A, Log2(AP2/AP1)) -> 1114 /// (icmp eq/ne A, Log2(AP2) - Log2(AP1)). 1115 Instruction *InstCombinerImpl::foldICmpShrConstConst(ICmpInst &I, Value *A, 1116 const APInt &AP1, 1117 const APInt &AP2) { 1118 assert(I.isEquality() && "Cannot fold icmp gt/lt"); 1119 1120 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) { 1121 if (I.getPredicate() == I.ICMP_NE) 1122 Pred = CmpInst::getInversePredicate(Pred); 1123 return new ICmpInst(Pred, LHS, RHS); 1124 }; 1125 1126 // Don't bother doing any work for cases which InstSimplify handles. 1127 if (AP2.isNullValue()) 1128 return nullptr; 1129 1130 bool IsAShr = isa<AShrOperator>(I.getOperand(0)); 1131 if (IsAShr) { 1132 if (AP2.isAllOnesValue()) 1133 return nullptr; 1134 if (AP2.isNegative() != AP1.isNegative()) 1135 return nullptr; 1136 if (AP2.sgt(AP1)) 1137 return nullptr; 1138 } 1139 1140 if (!AP1) 1141 // 'A' must be large enough to shift out the highest set bit. 1142 return getICmp(I.ICMP_UGT, A, 1143 ConstantInt::get(A->getType(), AP2.logBase2())); 1144 1145 if (AP1 == AP2) 1146 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType())); 1147 1148 int Shift; 1149 if (IsAShr && AP1.isNegative()) 1150 Shift = AP1.countLeadingOnes() - AP2.countLeadingOnes(); 1151 else 1152 Shift = AP1.countLeadingZeros() - AP2.countLeadingZeros(); 1153 1154 if (Shift > 0) { 1155 if (IsAShr && AP1 == AP2.ashr(Shift)) { 1156 // There are multiple solutions if we are comparing against -1 and the LHS 1157 // of the ashr is not a power of two. 1158 if (AP1.isAllOnesValue() && !AP2.isPowerOf2()) 1159 return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift)); 1160 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift)); 1161 } else if (AP1 == AP2.lshr(Shift)) { 1162 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift)); 1163 } 1164 } 1165 1166 // Shifting const2 will never be equal to const1. 1167 // FIXME: This should always be handled by InstSimplify? 1168 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE); 1169 return replaceInstUsesWith(I, TorF); 1170 } 1171 1172 /// Handle "(icmp eq/ne (shl AP2, A), AP1)" -> 1173 /// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)). 1174 Instruction *InstCombinerImpl::foldICmpShlConstConst(ICmpInst &I, Value *A, 1175 const APInt &AP1, 1176 const APInt &AP2) { 1177 assert(I.isEquality() && "Cannot fold icmp gt/lt"); 1178 1179 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) { 1180 if (I.getPredicate() == I.ICMP_NE) 1181 Pred = CmpInst::getInversePredicate(Pred); 1182 return new ICmpInst(Pred, LHS, RHS); 1183 }; 1184 1185 // Don't bother doing any work for cases which InstSimplify handles. 1186 if (AP2.isNullValue()) 1187 return nullptr; 1188 1189 unsigned AP2TrailingZeros = AP2.countTrailingZeros(); 1190 1191 if (!AP1 && AP2TrailingZeros != 0) 1192 return getICmp( 1193 I.ICMP_UGE, A, 1194 ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros)); 1195 1196 if (AP1 == AP2) 1197 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType())); 1198 1199 // Get the distance between the lowest bits that are set. 1200 int Shift = AP1.countTrailingZeros() - AP2TrailingZeros; 1201 1202 if (Shift > 0 && AP2.shl(Shift) == AP1) 1203 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift)); 1204 1205 // Shifting const2 will never be equal to const1. 1206 // FIXME: This should always be handled by InstSimplify? 1207 auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE); 1208 return replaceInstUsesWith(I, TorF); 1209 } 1210 1211 /// The caller has matched a pattern of the form: 1212 /// I = icmp ugt (add (add A, B), CI2), CI1 1213 /// If this is of the form: 1214 /// sum = a + b 1215 /// if (sum+128 >u 255) 1216 /// Then replace it with llvm.sadd.with.overflow.i8. 1217 /// 1218 static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B, 1219 ConstantInt *CI2, ConstantInt *CI1, 1220 InstCombinerImpl &IC) { 1221 // The transformation we're trying to do here is to transform this into an 1222 // llvm.sadd.with.overflow. To do this, we have to replace the original add 1223 // with a narrower add, and discard the add-with-constant that is part of the 1224 // range check (if we can't eliminate it, this isn't profitable). 1225 1226 // In order to eliminate the add-with-constant, the compare can be its only 1227 // use. 1228 Instruction *AddWithCst = cast<Instruction>(I.getOperand(0)); 1229 if (!AddWithCst->hasOneUse()) 1230 return nullptr; 1231 1232 // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow. 1233 if (!CI2->getValue().isPowerOf2()) 1234 return nullptr; 1235 unsigned NewWidth = CI2->getValue().countTrailingZeros(); 1236 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31) 1237 return nullptr; 1238 1239 // The width of the new add formed is 1 more than the bias. 1240 ++NewWidth; 1241 1242 // Check to see that CI1 is an all-ones value with NewWidth bits. 1243 if (CI1->getBitWidth() == NewWidth || 1244 CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth)) 1245 return nullptr; 1246 1247 // This is only really a signed overflow check if the inputs have been 1248 // sign-extended; check for that condition. For example, if CI2 is 2^31 and 1249 // the operands of the add are 64 bits wide, we need at least 33 sign bits. 1250 unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1; 1251 if (IC.ComputeNumSignBits(A, 0, &I) < NeededSignBits || 1252 IC.ComputeNumSignBits(B, 0, &I) < NeededSignBits) 1253 return nullptr; 1254 1255 // In order to replace the original add with a narrower 1256 // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant 1257 // and truncates that discard the high bits of the add. Verify that this is 1258 // the case. 1259 Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0)); 1260 for (User *U : OrigAdd->users()) { 1261 if (U == AddWithCst) 1262 continue; 1263 1264 // Only accept truncates for now. We would really like a nice recursive 1265 // predicate like SimplifyDemandedBits, but which goes downwards the use-def 1266 // chain to see which bits of a value are actually demanded. If the 1267 // original add had another add which was then immediately truncated, we 1268 // could still do the transformation. 1269 TruncInst *TI = dyn_cast<TruncInst>(U); 1270 if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth) 1271 return nullptr; 1272 } 1273 1274 // If the pattern matches, truncate the inputs to the narrower type and 1275 // use the sadd_with_overflow intrinsic to efficiently compute both the 1276 // result and the overflow bit. 1277 Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth); 1278 Function *F = Intrinsic::getDeclaration( 1279 I.getModule(), Intrinsic::sadd_with_overflow, NewType); 1280 1281 InstCombiner::BuilderTy &Builder = IC.Builder; 1282 1283 // Put the new code above the original add, in case there are any uses of the 1284 // add between the add and the compare. 1285 Builder.SetInsertPoint(OrigAdd); 1286 1287 Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc"); 1288 Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc"); 1289 CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd"); 1290 Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result"); 1291 Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType()); 1292 1293 // The inner add was the result of the narrow add, zero extended to the 1294 // wider type. Replace it with the result computed by the intrinsic. 1295 IC.replaceInstUsesWith(*OrigAdd, ZExt); 1296 IC.eraseInstFromFunction(*OrigAdd); 1297 1298 // The original icmp gets replaced with the overflow value. 1299 return ExtractValueInst::Create(Call, 1, "sadd.overflow"); 1300 } 1301 1302 /// If we have: 1303 /// icmp eq/ne (urem/srem %x, %y), 0 1304 /// iff %y is a power-of-two, we can replace this with a bit test: 1305 /// icmp eq/ne (and %x, (add %y, -1)), 0 1306 Instruction *InstCombinerImpl::foldIRemByPowerOfTwoToBitTest(ICmpInst &I) { 1307 // This fold is only valid for equality predicates. 1308 if (!I.isEquality()) 1309 return nullptr; 1310 ICmpInst::Predicate Pred; 1311 Value *X, *Y, *Zero; 1312 if (!match(&I, m_ICmp(Pred, m_OneUse(m_IRem(m_Value(X), m_Value(Y))), 1313 m_CombineAnd(m_Zero(), m_Value(Zero))))) 1314 return nullptr; 1315 if (!isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, 0, &I)) 1316 return nullptr; 1317 // This may increase instruction count, we don't enforce that Y is a constant. 1318 Value *Mask = Builder.CreateAdd(Y, Constant::getAllOnesValue(Y->getType())); 1319 Value *Masked = Builder.CreateAnd(X, Mask); 1320 return ICmpInst::Create(Instruction::ICmp, Pred, Masked, Zero); 1321 } 1322 1323 /// Fold equality-comparison between zero and any (maybe truncated) right-shift 1324 /// by one-less-than-bitwidth into a sign test on the original value. 1325 Instruction *InstCombinerImpl::foldSignBitTest(ICmpInst &I) { 1326 Instruction *Val; 1327 ICmpInst::Predicate Pred; 1328 if (!I.isEquality() || !match(&I, m_ICmp(Pred, m_Instruction(Val), m_Zero()))) 1329 return nullptr; 1330 1331 Value *X; 1332 Type *XTy; 1333 1334 Constant *C; 1335 if (match(Val, m_TruncOrSelf(m_Shr(m_Value(X), m_Constant(C))))) { 1336 XTy = X->getType(); 1337 unsigned XBitWidth = XTy->getScalarSizeInBits(); 1338 if (!match(C, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ, 1339 APInt(XBitWidth, XBitWidth - 1)))) 1340 return nullptr; 1341 } else if (isa<BinaryOperator>(Val) && 1342 (X = reassociateShiftAmtsOfTwoSameDirectionShifts( 1343 cast<BinaryOperator>(Val), SQ.getWithInstruction(Val), 1344 /*AnalyzeForSignBitExtraction=*/true))) { 1345 XTy = X->getType(); 1346 } else 1347 return nullptr; 1348 1349 return ICmpInst::Create(Instruction::ICmp, 1350 Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_SGE 1351 : ICmpInst::ICMP_SLT, 1352 X, ConstantInt::getNullValue(XTy)); 1353 } 1354 1355 // Handle icmp pred X, 0 1356 Instruction *InstCombinerImpl::foldICmpWithZero(ICmpInst &Cmp) { 1357 CmpInst::Predicate Pred = Cmp.getPredicate(); 1358 if (!match(Cmp.getOperand(1), m_Zero())) 1359 return nullptr; 1360 1361 // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0) 1362 if (Pred == ICmpInst::ICMP_SGT) { 1363 Value *A, *B; 1364 SelectPatternResult SPR = matchSelectPattern(Cmp.getOperand(0), A, B); 1365 if (SPR.Flavor == SPF_SMIN) { 1366 if (isKnownPositive(A, DL, 0, &AC, &Cmp, &DT)) 1367 return new ICmpInst(Pred, B, Cmp.getOperand(1)); 1368 if (isKnownPositive(B, DL, 0, &AC, &Cmp, &DT)) 1369 return new ICmpInst(Pred, A, Cmp.getOperand(1)); 1370 } 1371 } 1372 1373 if (Instruction *New = foldIRemByPowerOfTwoToBitTest(Cmp)) 1374 return New; 1375 1376 // Given: 1377 // icmp eq/ne (urem %x, %y), 0 1378 // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem': 1379 // icmp eq/ne %x, 0 1380 Value *X, *Y; 1381 if (match(Cmp.getOperand(0), m_URem(m_Value(X), m_Value(Y))) && 1382 ICmpInst::isEquality(Pred)) { 1383 KnownBits XKnown = computeKnownBits(X, 0, &Cmp); 1384 KnownBits YKnown = computeKnownBits(Y, 0, &Cmp); 1385 if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2) 1386 return new ICmpInst(Pred, X, Cmp.getOperand(1)); 1387 } 1388 1389 return nullptr; 1390 } 1391 1392 /// Fold icmp Pred X, C. 1393 /// TODO: This code structure does not make sense. The saturating add fold 1394 /// should be moved to some other helper and extended as noted below (it is also 1395 /// possible that code has been made unnecessary - do we canonicalize IR to 1396 /// overflow/saturating intrinsics or not?). 1397 Instruction *InstCombinerImpl::foldICmpWithConstant(ICmpInst &Cmp) { 1398 // Match the following pattern, which is a common idiom when writing 1399 // overflow-safe integer arithmetic functions. The source performs an addition 1400 // in wider type and explicitly checks for overflow using comparisons against 1401 // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic. 1402 // 1403 // TODO: This could probably be generalized to handle other overflow-safe 1404 // operations if we worked out the formulas to compute the appropriate magic 1405 // constants. 1406 // 1407 // sum = a + b 1408 // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8 1409 CmpInst::Predicate Pred = Cmp.getPredicate(); 1410 Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1); 1411 Value *A, *B; 1412 ConstantInt *CI, *CI2; // I = icmp ugt (add (add A, B), CI2), CI 1413 if (Pred == ICmpInst::ICMP_UGT && match(Op1, m_ConstantInt(CI)) && 1414 match(Op0, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2)))) 1415 if (Instruction *Res = processUGT_ADDCST_ADD(Cmp, A, B, CI2, CI, *this)) 1416 return Res; 1417 1418 // icmp(phi(C1, C2, ...), C) -> phi(icmp(C1, C), icmp(C2, C), ...). 1419 Constant *C = dyn_cast<Constant>(Op1); 1420 if (!C) 1421 return nullptr; 1422 1423 if (auto *Phi = dyn_cast<PHINode>(Op0)) 1424 if (all_of(Phi->operands(), [](Value *V) { return isa<Constant>(V); })) { 1425 Type *Ty = Cmp.getType(); 1426 Builder.SetInsertPoint(Phi); 1427 PHINode *NewPhi = 1428 Builder.CreatePHI(Ty, Phi->getNumOperands()); 1429 for (BasicBlock *Predecessor : predecessors(Phi->getParent())) { 1430 auto *Input = 1431 cast<Constant>(Phi->getIncomingValueForBlock(Predecessor)); 1432 auto *BoolInput = ConstantExpr::getCompare(Pred, Input, C); 1433 NewPhi->addIncoming(BoolInput, Predecessor); 1434 } 1435 NewPhi->takeName(&Cmp); 1436 return replaceInstUsesWith(Cmp, NewPhi); 1437 } 1438 1439 return nullptr; 1440 } 1441 1442 /// Canonicalize icmp instructions based on dominating conditions. 1443 Instruction *InstCombinerImpl::foldICmpWithDominatingICmp(ICmpInst &Cmp) { 1444 // This is a cheap/incomplete check for dominance - just match a single 1445 // predecessor with a conditional branch. 1446 BasicBlock *CmpBB = Cmp.getParent(); 1447 BasicBlock *DomBB = CmpBB->getSinglePredecessor(); 1448 if (!DomBB) 1449 return nullptr; 1450 1451 Value *DomCond; 1452 BasicBlock *TrueBB, *FalseBB; 1453 if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB))) 1454 return nullptr; 1455 1456 assert((TrueBB == CmpBB || FalseBB == CmpBB) && 1457 "Predecessor block does not point to successor?"); 1458 1459 // The branch should get simplified. Don't bother simplifying this condition. 1460 if (TrueBB == FalseBB) 1461 return nullptr; 1462 1463 // Try to simplify this compare to T/F based on the dominating condition. 1464 Optional<bool> Imp = isImpliedCondition(DomCond, &Cmp, DL, TrueBB == CmpBB); 1465 if (Imp) 1466 return replaceInstUsesWith(Cmp, ConstantInt::get(Cmp.getType(), *Imp)); 1467 1468 CmpInst::Predicate Pred = Cmp.getPredicate(); 1469 Value *X = Cmp.getOperand(0), *Y = Cmp.getOperand(1); 1470 ICmpInst::Predicate DomPred; 1471 const APInt *C, *DomC; 1472 if (match(DomCond, m_ICmp(DomPred, m_Specific(X), m_APInt(DomC))) && 1473 match(Y, m_APInt(C))) { 1474 // We have 2 compares of a variable with constants. Calculate the constant 1475 // ranges of those compares to see if we can transform the 2nd compare: 1476 // DomBB: 1477 // DomCond = icmp DomPred X, DomC 1478 // br DomCond, CmpBB, FalseBB 1479 // CmpBB: 1480 // Cmp = icmp Pred X, C 1481 ConstantRange CR = ConstantRange::makeAllowedICmpRegion(Pred, *C); 1482 ConstantRange DominatingCR = 1483 (CmpBB == TrueBB) ? ConstantRange::makeExactICmpRegion(DomPred, *DomC) 1484 : ConstantRange::makeExactICmpRegion( 1485 CmpInst::getInversePredicate(DomPred), *DomC); 1486 ConstantRange Intersection = DominatingCR.intersectWith(CR); 1487 ConstantRange Difference = DominatingCR.difference(CR); 1488 if (Intersection.isEmptySet()) 1489 return replaceInstUsesWith(Cmp, Builder.getFalse()); 1490 if (Difference.isEmptySet()) 1491 return replaceInstUsesWith(Cmp, Builder.getTrue()); 1492 1493 // Canonicalizing a sign bit comparison that gets used in a branch, 1494 // pessimizes codegen by generating branch on zero instruction instead 1495 // of a test and branch. So we avoid canonicalizing in such situations 1496 // because test and branch instruction has better branch displacement 1497 // than compare and branch instruction. 1498 bool UnusedBit; 1499 bool IsSignBit = isSignBitCheck(Pred, *C, UnusedBit); 1500 if (Cmp.isEquality() || (IsSignBit && hasBranchUse(Cmp))) 1501 return nullptr; 1502 1503 // Avoid an infinite loop with min/max canonicalization. 1504 // TODO: This will be unnecessary if we canonicalize to min/max intrinsics. 1505 if (Cmp.hasOneUse() && 1506 match(Cmp.user_back(), m_MaxOrMin(m_Value(), m_Value()))) 1507 return nullptr; 1508 1509 if (const APInt *EqC = Intersection.getSingleElement()) 1510 return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*EqC)); 1511 if (const APInt *NeC = Difference.getSingleElement()) 1512 return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*NeC)); 1513 } 1514 1515 return nullptr; 1516 } 1517 1518 /// Fold icmp (trunc X, Y), C. 1519 Instruction *InstCombinerImpl::foldICmpTruncConstant(ICmpInst &Cmp, 1520 TruncInst *Trunc, 1521 const APInt &C) { 1522 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1523 Value *X = Trunc->getOperand(0); 1524 if (C.isOneValue() && C.getBitWidth() > 1) { 1525 // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1 1526 Value *V = nullptr; 1527 if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V)))) 1528 return new ICmpInst(ICmpInst::ICMP_SLT, V, 1529 ConstantInt::get(V->getType(), 1)); 1530 } 1531 1532 unsigned DstBits = Trunc->getType()->getScalarSizeInBits(), 1533 SrcBits = X->getType()->getScalarSizeInBits(); 1534 if (Cmp.isEquality() && Trunc->hasOneUse()) { 1535 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all 1536 // of the high bits truncated out of x are known. 1537 KnownBits Known = computeKnownBits(X, 0, &Cmp); 1538 1539 // If all the high bits are known, we can do this xform. 1540 if ((Known.Zero | Known.One).countLeadingOnes() >= SrcBits - DstBits) { 1541 // Pull in the high bits from known-ones set. 1542 APInt NewRHS = C.zext(SrcBits); 1543 NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits); 1544 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), NewRHS)); 1545 } 1546 } 1547 1548 // Look through truncated right-shift of the sign-bit for a sign-bit check: 1549 // trunc iN (ShOp >> ShAmtC) to i[N - ShAmtC] < 0 --> ShOp < 0 1550 // trunc iN (ShOp >> ShAmtC) to i[N - ShAmtC] > -1 --> ShOp > -1 1551 Value *ShOp; 1552 const APInt *ShAmtC; 1553 bool TrueIfSigned; 1554 if (isSignBitCheck(Pred, C, TrueIfSigned) && 1555 match(X, m_Shr(m_Value(ShOp), m_APInt(ShAmtC))) && 1556 DstBits == SrcBits - ShAmtC->getZExtValue()) { 1557 return TrueIfSigned 1558 ? new ICmpInst(ICmpInst::ICMP_SLT, ShOp, 1559 ConstantInt::getNullValue(X->getType())) 1560 : new ICmpInst(ICmpInst::ICMP_SGT, ShOp, 1561 ConstantInt::getAllOnesValue(X->getType())); 1562 } 1563 1564 return nullptr; 1565 } 1566 1567 /// Fold icmp (xor X, Y), C. 1568 Instruction *InstCombinerImpl::foldICmpXorConstant(ICmpInst &Cmp, 1569 BinaryOperator *Xor, 1570 const APInt &C) { 1571 Value *X = Xor->getOperand(0); 1572 Value *Y = Xor->getOperand(1); 1573 const APInt *XorC; 1574 if (!match(Y, m_APInt(XorC))) 1575 return nullptr; 1576 1577 // If this is a comparison that tests the signbit (X < 0) or (x > -1), 1578 // fold the xor. 1579 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1580 bool TrueIfSigned = false; 1581 if (isSignBitCheck(Cmp.getPredicate(), C, TrueIfSigned)) { 1582 1583 // If the sign bit of the XorCst is not set, there is no change to 1584 // the operation, just stop using the Xor. 1585 if (!XorC->isNegative()) 1586 return replaceOperand(Cmp, 0, X); 1587 1588 // Emit the opposite comparison. 1589 if (TrueIfSigned) 1590 return new ICmpInst(ICmpInst::ICMP_SGT, X, 1591 ConstantInt::getAllOnesValue(X->getType())); 1592 else 1593 return new ICmpInst(ICmpInst::ICMP_SLT, X, 1594 ConstantInt::getNullValue(X->getType())); 1595 } 1596 1597 if (Xor->hasOneUse()) { 1598 // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask)) 1599 if (!Cmp.isEquality() && XorC->isSignMask()) { 1600 Pred = Cmp.getFlippedSignednessPredicate(); 1601 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC)); 1602 } 1603 1604 // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask)) 1605 if (!Cmp.isEquality() && XorC->isMaxSignedValue()) { 1606 Pred = Cmp.getFlippedSignednessPredicate(); 1607 Pred = Cmp.getSwappedPredicate(Pred); 1608 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC)); 1609 } 1610 } 1611 1612 // Mask constant magic can eliminate an 'xor' with unsigned compares. 1613 if (Pred == ICmpInst::ICMP_UGT) { 1614 // (xor X, ~C) >u C --> X <u ~C (when C+1 is a power of 2) 1615 if (*XorC == ~C && (C + 1).isPowerOf2()) 1616 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y); 1617 // (xor X, C) >u C --> X >u C (when C+1 is a power of 2) 1618 if (*XorC == C && (C + 1).isPowerOf2()) 1619 return new ICmpInst(ICmpInst::ICMP_UGT, X, Y); 1620 } 1621 if (Pred == ICmpInst::ICMP_ULT) { 1622 // (xor X, -C) <u C --> X >u ~C (when C is a power of 2) 1623 if (*XorC == -C && C.isPowerOf2()) 1624 return new ICmpInst(ICmpInst::ICMP_UGT, X, 1625 ConstantInt::get(X->getType(), ~C)); 1626 // (xor X, C) <u C --> X >u ~C (when -C is a power of 2) 1627 if (*XorC == C && (-C).isPowerOf2()) 1628 return new ICmpInst(ICmpInst::ICMP_UGT, X, 1629 ConstantInt::get(X->getType(), ~C)); 1630 } 1631 return nullptr; 1632 } 1633 1634 /// Fold icmp (and (sh X, Y), C2), C1. 1635 Instruction *InstCombinerImpl::foldICmpAndShift(ICmpInst &Cmp, 1636 BinaryOperator *And, 1637 const APInt &C1, 1638 const APInt &C2) { 1639 BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0)); 1640 if (!Shift || !Shift->isShift()) 1641 return nullptr; 1642 1643 // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could 1644 // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in 1645 // code produced by the clang front-end, for bitfield access. 1646 // This seemingly simple opportunity to fold away a shift turns out to be 1647 // rather complicated. See PR17827 for details. 1648 unsigned ShiftOpcode = Shift->getOpcode(); 1649 bool IsShl = ShiftOpcode == Instruction::Shl; 1650 const APInt *C3; 1651 if (match(Shift->getOperand(1), m_APInt(C3))) { 1652 APInt NewAndCst, NewCmpCst; 1653 bool AnyCmpCstBitsShiftedOut; 1654 if (ShiftOpcode == Instruction::Shl) { 1655 // For a left shift, we can fold if the comparison is not signed. We can 1656 // also fold a signed comparison if the mask value and comparison value 1657 // are not negative. These constraints may not be obvious, but we can 1658 // prove that they are correct using an SMT solver. 1659 if (Cmp.isSigned() && (C2.isNegative() || C1.isNegative())) 1660 return nullptr; 1661 1662 NewCmpCst = C1.lshr(*C3); 1663 NewAndCst = C2.lshr(*C3); 1664 AnyCmpCstBitsShiftedOut = NewCmpCst.shl(*C3) != C1; 1665 } else if (ShiftOpcode == Instruction::LShr) { 1666 // For a logical right shift, we can fold if the comparison is not signed. 1667 // We can also fold a signed comparison if the shifted mask value and the 1668 // shifted comparison value are not negative. These constraints may not be 1669 // obvious, but we can prove that they are correct using an SMT solver. 1670 NewCmpCst = C1.shl(*C3); 1671 NewAndCst = C2.shl(*C3); 1672 AnyCmpCstBitsShiftedOut = NewCmpCst.lshr(*C3) != C1; 1673 if (Cmp.isSigned() && (NewAndCst.isNegative() || NewCmpCst.isNegative())) 1674 return nullptr; 1675 } else { 1676 // For an arithmetic shift, check that both constants don't use (in a 1677 // signed sense) the top bits being shifted out. 1678 assert(ShiftOpcode == Instruction::AShr && "Unknown shift opcode"); 1679 NewCmpCst = C1.shl(*C3); 1680 NewAndCst = C2.shl(*C3); 1681 AnyCmpCstBitsShiftedOut = NewCmpCst.ashr(*C3) != C1; 1682 if (NewAndCst.ashr(*C3) != C2) 1683 return nullptr; 1684 } 1685 1686 if (AnyCmpCstBitsShiftedOut) { 1687 // If we shifted bits out, the fold is not going to work out. As a 1688 // special case, check to see if this means that the result is always 1689 // true or false now. 1690 if (Cmp.getPredicate() == ICmpInst::ICMP_EQ) 1691 return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType())); 1692 if (Cmp.getPredicate() == ICmpInst::ICMP_NE) 1693 return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType())); 1694 } else { 1695 Value *NewAnd = Builder.CreateAnd( 1696 Shift->getOperand(0), ConstantInt::get(And->getType(), NewAndCst)); 1697 return new ICmpInst(Cmp.getPredicate(), 1698 NewAnd, ConstantInt::get(And->getType(), NewCmpCst)); 1699 } 1700 } 1701 1702 // Turn ((X >> Y) & C2) == 0 into (X & (C2 << Y)) == 0. The latter is 1703 // preferable because it allows the C2 << Y expression to be hoisted out of a 1704 // loop if Y is invariant and X is not. 1705 if (Shift->hasOneUse() && C1.isNullValue() && Cmp.isEquality() && 1706 !Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) { 1707 // Compute C2 << Y. 1708 Value *NewShift = 1709 IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1)) 1710 : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1)); 1711 1712 // Compute X & (C2 << Y). 1713 Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift); 1714 return replaceOperand(Cmp, 0, NewAnd); 1715 } 1716 1717 return nullptr; 1718 } 1719 1720 /// Fold icmp (and X, C2), C1. 1721 Instruction *InstCombinerImpl::foldICmpAndConstConst(ICmpInst &Cmp, 1722 BinaryOperator *And, 1723 const APInt &C1) { 1724 bool isICMP_NE = Cmp.getPredicate() == ICmpInst::ICMP_NE; 1725 1726 // For vectors: icmp ne (and X, 1), 0 --> trunc X to N x i1 1727 // TODO: We canonicalize to the longer form for scalars because we have 1728 // better analysis/folds for icmp, and codegen may be better with icmp. 1729 if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.isNullValue() && 1730 match(And->getOperand(1), m_One())) 1731 return new TruncInst(And->getOperand(0), Cmp.getType()); 1732 1733 const APInt *C2; 1734 Value *X; 1735 if (!match(And, m_And(m_Value(X), m_APInt(C2)))) 1736 return nullptr; 1737 1738 // Don't perform the following transforms if the AND has multiple uses 1739 if (!And->hasOneUse()) 1740 return nullptr; 1741 1742 if (Cmp.isEquality() && C1.isNullValue()) { 1743 // Restrict this fold to single-use 'and' (PR10267). 1744 // Replace (and X, (1 << size(X)-1) != 0) with X s< 0 1745 if (C2->isSignMask()) { 1746 Constant *Zero = Constant::getNullValue(X->getType()); 1747 auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE; 1748 return new ICmpInst(NewPred, X, Zero); 1749 } 1750 1751 // Restrict this fold only for single-use 'and' (PR10267). 1752 // ((%x & C) == 0) --> %x u< (-C) iff (-C) is power of two. 1753 if ((~(*C2) + 1).isPowerOf2()) { 1754 Constant *NegBOC = 1755 ConstantExpr::getNeg(cast<Constant>(And->getOperand(1))); 1756 auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT; 1757 return new ICmpInst(NewPred, X, NegBOC); 1758 } 1759 } 1760 1761 // If the LHS is an 'and' of a truncate and we can widen the and/compare to 1762 // the input width without changing the value produced, eliminate the cast: 1763 // 1764 // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1' 1765 // 1766 // We can do this transformation if the constants do not have their sign bits 1767 // set or if it is an equality comparison. Extending a relational comparison 1768 // when we're checking the sign bit would not work. 1769 Value *W; 1770 if (match(And->getOperand(0), m_OneUse(m_Trunc(m_Value(W)))) && 1771 (Cmp.isEquality() || (!C1.isNegative() && !C2->isNegative()))) { 1772 // TODO: Is this a good transform for vectors? Wider types may reduce 1773 // throughput. Should this transform be limited (even for scalars) by using 1774 // shouldChangeType()? 1775 if (!Cmp.getType()->isVectorTy()) { 1776 Type *WideType = W->getType(); 1777 unsigned WideScalarBits = WideType->getScalarSizeInBits(); 1778 Constant *ZextC1 = ConstantInt::get(WideType, C1.zext(WideScalarBits)); 1779 Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits)); 1780 Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName()); 1781 return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1); 1782 } 1783 } 1784 1785 if (Instruction *I = foldICmpAndShift(Cmp, And, C1, *C2)) 1786 return I; 1787 1788 // (icmp pred (and (or (lshr A, B), A), 1), 0) --> 1789 // (icmp pred (and A, (or (shl 1, B), 1), 0)) 1790 // 1791 // iff pred isn't signed 1792 if (!Cmp.isSigned() && C1.isNullValue() && And->getOperand(0)->hasOneUse() && 1793 match(And->getOperand(1), m_One())) { 1794 Constant *One = cast<Constant>(And->getOperand(1)); 1795 Value *Or = And->getOperand(0); 1796 Value *A, *B, *LShr; 1797 if (match(Or, m_Or(m_Value(LShr), m_Value(A))) && 1798 match(LShr, m_LShr(m_Specific(A), m_Value(B)))) { 1799 unsigned UsesRemoved = 0; 1800 if (And->hasOneUse()) 1801 ++UsesRemoved; 1802 if (Or->hasOneUse()) 1803 ++UsesRemoved; 1804 if (LShr->hasOneUse()) 1805 ++UsesRemoved; 1806 1807 // Compute A & ((1 << B) | 1) 1808 Value *NewOr = nullptr; 1809 if (auto *C = dyn_cast<Constant>(B)) { 1810 if (UsesRemoved >= 1) 1811 NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One); 1812 } else { 1813 if (UsesRemoved >= 3) 1814 NewOr = Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(), 1815 /*HasNUW=*/true), 1816 One, Or->getName()); 1817 } 1818 if (NewOr) { 1819 Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName()); 1820 return replaceOperand(Cmp, 0, NewAnd); 1821 } 1822 } 1823 } 1824 1825 return nullptr; 1826 } 1827 1828 /// Fold icmp (and X, Y), C. 1829 Instruction *InstCombinerImpl::foldICmpAndConstant(ICmpInst &Cmp, 1830 BinaryOperator *And, 1831 const APInt &C) { 1832 if (Instruction *I = foldICmpAndConstConst(Cmp, And, C)) 1833 return I; 1834 1835 // TODO: These all require that Y is constant too, so refactor with the above. 1836 1837 // Try to optimize things like "A[i] & 42 == 0" to index computations. 1838 Value *X = And->getOperand(0); 1839 Value *Y = And->getOperand(1); 1840 if (auto *LI = dyn_cast<LoadInst>(X)) 1841 if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0))) 1842 if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 1843 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 1844 !LI->isVolatile() && isa<ConstantInt>(Y)) { 1845 ConstantInt *C2 = cast<ConstantInt>(Y); 1846 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, Cmp, C2)) 1847 return Res; 1848 } 1849 1850 if (!Cmp.isEquality()) 1851 return nullptr; 1852 1853 // X & -C == -C -> X > u ~C 1854 // X & -C != -C -> X <= u ~C 1855 // iff C is a power of 2 1856 if (Cmp.getOperand(1) == Y && (-C).isPowerOf2()) { 1857 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGT 1858 : CmpInst::ICMP_ULE; 1859 return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1)))); 1860 } 1861 1862 // (X & C2) == 0 -> (trunc X) >= 0 1863 // (X & C2) != 0 -> (trunc X) < 0 1864 // iff C2 is a power of 2 and it masks the sign bit of a legal integer type. 1865 const APInt *C2; 1866 if (And->hasOneUse() && C.isNullValue() && match(Y, m_APInt(C2))) { 1867 int32_t ExactLogBase2 = C2->exactLogBase2(); 1868 if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) { 1869 Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1); 1870 if (auto *AndVTy = dyn_cast<VectorType>(And->getType())) 1871 NTy = VectorType::get(NTy, AndVTy->getElementCount()); 1872 Value *Trunc = Builder.CreateTrunc(X, NTy); 1873 auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE 1874 : CmpInst::ICMP_SLT; 1875 return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy)); 1876 } 1877 } 1878 1879 return nullptr; 1880 } 1881 1882 /// Fold icmp (or X, Y), C. 1883 Instruction *InstCombinerImpl::foldICmpOrConstant(ICmpInst &Cmp, 1884 BinaryOperator *Or, 1885 const APInt &C) { 1886 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1887 if (C.isOneValue()) { 1888 // icmp slt signum(V) 1 --> icmp slt V, 1 1889 Value *V = nullptr; 1890 if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V)))) 1891 return new ICmpInst(ICmpInst::ICMP_SLT, V, 1892 ConstantInt::get(V->getType(), 1)); 1893 } 1894 1895 Value *OrOp0 = Or->getOperand(0), *OrOp1 = Or->getOperand(1); 1896 const APInt *MaskC; 1897 if (match(OrOp1, m_APInt(MaskC)) && Cmp.isEquality()) { 1898 if (*MaskC == C && (C + 1).isPowerOf2()) { 1899 // X | C == C --> X <=u C 1900 // X | C != C --> X >u C 1901 // iff C+1 is a power of 2 (C is a bitmask of the low bits) 1902 Pred = (Pred == CmpInst::ICMP_EQ) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT; 1903 return new ICmpInst(Pred, OrOp0, OrOp1); 1904 } 1905 1906 // More general: canonicalize 'equality with set bits mask' to 1907 // 'equality with clear bits mask'. 1908 // (X | MaskC) == C --> (X & ~MaskC) == C ^ MaskC 1909 // (X | MaskC) != C --> (X & ~MaskC) != C ^ MaskC 1910 if (Or->hasOneUse()) { 1911 Value *And = Builder.CreateAnd(OrOp0, ~(*MaskC)); 1912 Constant *NewC = ConstantInt::get(Or->getType(), C ^ (*MaskC)); 1913 return new ICmpInst(Pred, And, NewC); 1914 } 1915 } 1916 1917 if (!Cmp.isEquality() || !C.isNullValue() || !Or->hasOneUse()) 1918 return nullptr; 1919 1920 Value *P, *Q; 1921 if (match(Or, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) { 1922 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0 1923 // -> and (icmp eq P, null), (icmp eq Q, null). 1924 Value *CmpP = 1925 Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType())); 1926 Value *CmpQ = 1927 Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType())); 1928 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1929 return BinaryOperator::Create(BOpc, CmpP, CmpQ); 1930 } 1931 1932 // Are we using xors to bitwise check for a pair of (in)equalities? Convert to 1933 // a shorter form that has more potential to be folded even further. 1934 Value *X1, *X2, *X3, *X4; 1935 if (match(OrOp0, m_OneUse(m_Xor(m_Value(X1), m_Value(X2)))) && 1936 match(OrOp1, m_OneUse(m_Xor(m_Value(X3), m_Value(X4))))) { 1937 // ((X1 ^ X2) || (X3 ^ X4)) == 0 --> (X1 == X2) && (X3 == X4) 1938 // ((X1 ^ X2) || (X3 ^ X4)) != 0 --> (X1 != X2) || (X3 != X4) 1939 Value *Cmp12 = Builder.CreateICmp(Pred, X1, X2); 1940 Value *Cmp34 = Builder.CreateICmp(Pred, X3, X4); 1941 auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or; 1942 return BinaryOperator::Create(BOpc, Cmp12, Cmp34); 1943 } 1944 1945 return nullptr; 1946 } 1947 1948 /// Fold icmp (mul X, Y), C. 1949 Instruction *InstCombinerImpl::foldICmpMulConstant(ICmpInst &Cmp, 1950 BinaryOperator *Mul, 1951 const APInt &C) { 1952 const APInt *MulC; 1953 if (!match(Mul->getOperand(1), m_APInt(MulC))) 1954 return nullptr; 1955 1956 // If this is a test of the sign bit and the multiply is sign-preserving with 1957 // a constant operand, use the multiply LHS operand instead. 1958 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1959 if (isSignTest(Pred, C) && Mul->hasNoSignedWrap()) { 1960 if (MulC->isNegative()) 1961 Pred = ICmpInst::getSwappedPredicate(Pred); 1962 return new ICmpInst(Pred, Mul->getOperand(0), 1963 Constant::getNullValue(Mul->getType())); 1964 } 1965 1966 // If the multiply does not wrap, try to divide the compare constant by the 1967 // multiplication factor. 1968 if (Cmp.isEquality() && !MulC->isNullValue()) { 1969 // (mul nsw X, MulC) == C --> X == C /s MulC 1970 if (Mul->hasNoSignedWrap() && C.srem(*MulC).isNullValue()) { 1971 Constant *NewC = ConstantInt::get(Mul->getType(), C.sdiv(*MulC)); 1972 return new ICmpInst(Pred, Mul->getOperand(0), NewC); 1973 } 1974 // (mul nuw X, MulC) == C --> X == C /u MulC 1975 if (Mul->hasNoUnsignedWrap() && C.urem(*MulC).isNullValue()) { 1976 Constant *NewC = ConstantInt::get(Mul->getType(), C.udiv(*MulC)); 1977 return new ICmpInst(Pred, Mul->getOperand(0), NewC); 1978 } 1979 } 1980 1981 return nullptr; 1982 } 1983 1984 /// Fold icmp (shl 1, Y), C. 1985 static Instruction *foldICmpShlOne(ICmpInst &Cmp, Instruction *Shl, 1986 const APInt &C) { 1987 Value *Y; 1988 if (!match(Shl, m_Shl(m_One(), m_Value(Y)))) 1989 return nullptr; 1990 1991 Type *ShiftType = Shl->getType(); 1992 unsigned TypeBits = C.getBitWidth(); 1993 bool CIsPowerOf2 = C.isPowerOf2(); 1994 ICmpInst::Predicate Pred = Cmp.getPredicate(); 1995 if (Cmp.isUnsigned()) { 1996 // (1 << Y) pred C -> Y pred Log2(C) 1997 if (!CIsPowerOf2) { 1998 // (1 << Y) < 30 -> Y <= 4 1999 // (1 << Y) <= 30 -> Y <= 4 2000 // (1 << Y) >= 30 -> Y > 4 2001 // (1 << Y) > 30 -> Y > 4 2002 if (Pred == ICmpInst::ICMP_ULT) 2003 Pred = ICmpInst::ICMP_ULE; 2004 else if (Pred == ICmpInst::ICMP_UGE) 2005 Pred = ICmpInst::ICMP_UGT; 2006 } 2007 2008 // (1 << Y) >= 2147483648 -> Y >= 31 -> Y == 31 2009 // (1 << Y) < 2147483648 -> Y < 31 -> Y != 31 2010 unsigned CLog2 = C.logBase2(); 2011 if (CLog2 == TypeBits - 1) { 2012 if (Pred == ICmpInst::ICMP_UGE) 2013 Pred = ICmpInst::ICMP_EQ; 2014 else if (Pred == ICmpInst::ICMP_ULT) 2015 Pred = ICmpInst::ICMP_NE; 2016 } 2017 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2)); 2018 } else if (Cmp.isSigned()) { 2019 Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1); 2020 if (C.isAllOnesValue()) { 2021 // (1 << Y) <= -1 -> Y == 31 2022 if (Pred == ICmpInst::ICMP_SLE) 2023 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne); 2024 2025 // (1 << Y) > -1 -> Y != 31 2026 if (Pred == ICmpInst::ICMP_SGT) 2027 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne); 2028 } else if (!C) { 2029 // (1 << Y) < 0 -> Y == 31 2030 // (1 << Y) <= 0 -> Y == 31 2031 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 2032 return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne); 2033 2034 // (1 << Y) >= 0 -> Y != 31 2035 // (1 << Y) > 0 -> Y != 31 2036 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) 2037 return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne); 2038 } 2039 } else if (Cmp.isEquality() && CIsPowerOf2) { 2040 return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, C.logBase2())); 2041 } 2042 2043 return nullptr; 2044 } 2045 2046 /// Fold icmp (shl X, Y), C. 2047 Instruction *InstCombinerImpl::foldICmpShlConstant(ICmpInst &Cmp, 2048 BinaryOperator *Shl, 2049 const APInt &C) { 2050 const APInt *ShiftVal; 2051 if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal))) 2052 return foldICmpShlConstConst(Cmp, Shl->getOperand(1), C, *ShiftVal); 2053 2054 const APInt *ShiftAmt; 2055 if (!match(Shl->getOperand(1), m_APInt(ShiftAmt))) 2056 return foldICmpShlOne(Cmp, Shl, C); 2057 2058 // Check that the shift amount is in range. If not, don't perform undefined 2059 // shifts. When the shift is visited, it will be simplified. 2060 unsigned TypeBits = C.getBitWidth(); 2061 if (ShiftAmt->uge(TypeBits)) 2062 return nullptr; 2063 2064 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2065 Value *X = Shl->getOperand(0); 2066 Type *ShType = Shl->getType(); 2067 2068 // NSW guarantees that we are only shifting out sign bits from the high bits, 2069 // so we can ASHR the compare constant without needing a mask and eliminate 2070 // the shift. 2071 if (Shl->hasNoSignedWrap()) { 2072 if (Pred == ICmpInst::ICMP_SGT) { 2073 // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt) 2074 APInt ShiftedC = C.ashr(*ShiftAmt); 2075 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 2076 } 2077 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) && 2078 C.ashr(*ShiftAmt).shl(*ShiftAmt) == C) { 2079 APInt ShiftedC = C.ashr(*ShiftAmt); 2080 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 2081 } 2082 if (Pred == ICmpInst::ICMP_SLT) { 2083 // SLE is the same as above, but SLE is canonicalized to SLT, so convert: 2084 // (X << S) <=s C is equiv to X <=s (C >> S) for all C 2085 // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX 2086 // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN 2087 assert(!C.isMinSignedValue() && "Unexpected icmp slt"); 2088 APInt ShiftedC = (C - 1).ashr(*ShiftAmt) + 1; 2089 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 2090 } 2091 // If this is a signed comparison to 0 and the shift is sign preserving, 2092 // use the shift LHS operand instead; isSignTest may change 'Pred', so only 2093 // do that if we're sure to not continue on in this function. 2094 if (isSignTest(Pred, C)) 2095 return new ICmpInst(Pred, X, Constant::getNullValue(ShType)); 2096 } 2097 2098 // NUW guarantees that we are only shifting out zero bits from the high bits, 2099 // so we can LSHR the compare constant without needing a mask and eliminate 2100 // the shift. 2101 if (Shl->hasNoUnsignedWrap()) { 2102 if (Pred == ICmpInst::ICMP_UGT) { 2103 // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt) 2104 APInt ShiftedC = C.lshr(*ShiftAmt); 2105 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 2106 } 2107 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) && 2108 C.lshr(*ShiftAmt).shl(*ShiftAmt) == C) { 2109 APInt ShiftedC = C.lshr(*ShiftAmt); 2110 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 2111 } 2112 if (Pred == ICmpInst::ICMP_ULT) { 2113 // ULE is the same as above, but ULE is canonicalized to ULT, so convert: 2114 // (X << S) <=u C is equiv to X <=u (C >> S) for all C 2115 // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u 2116 // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0 2117 assert(C.ugt(0) && "ult 0 should have been eliminated"); 2118 APInt ShiftedC = (C - 1).lshr(*ShiftAmt) + 1; 2119 return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC)); 2120 } 2121 } 2122 2123 if (Cmp.isEquality() && Shl->hasOneUse()) { 2124 // Strength-reduce the shift into an 'and'. 2125 Constant *Mask = ConstantInt::get( 2126 ShType, 2127 APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue())); 2128 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask"); 2129 Constant *LShrC = ConstantInt::get(ShType, C.lshr(*ShiftAmt)); 2130 return new ICmpInst(Pred, And, LShrC); 2131 } 2132 2133 // Otherwise, if this is a comparison of the sign bit, simplify to and/test. 2134 bool TrueIfSigned = false; 2135 if (Shl->hasOneUse() && isSignBitCheck(Pred, C, TrueIfSigned)) { 2136 // (X << 31) <s 0 --> (X & 1) != 0 2137 Constant *Mask = ConstantInt::get( 2138 ShType, 2139 APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1)); 2140 Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask"); 2141 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ, 2142 And, Constant::getNullValue(ShType)); 2143 } 2144 2145 // Simplify 'shl' inequality test into 'and' equality test. 2146 if (Cmp.isUnsigned() && Shl->hasOneUse()) { 2147 // (X l<< C2) u<=/u> C1 iff C1+1 is power of two -> X & (~C1 l>> C2) ==/!= 0 2148 if ((C + 1).isPowerOf2() && 2149 (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT)) { 2150 Value *And = Builder.CreateAnd(X, (~C).lshr(ShiftAmt->getZExtValue())); 2151 return new ICmpInst(Pred == ICmpInst::ICMP_ULE ? ICmpInst::ICMP_EQ 2152 : ICmpInst::ICMP_NE, 2153 And, Constant::getNullValue(ShType)); 2154 } 2155 // (X l<< C2) u</u>= C1 iff C1 is power of two -> X & (-C1 l>> C2) ==/!= 0 2156 if (C.isPowerOf2() && 2157 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) { 2158 Value *And = 2159 Builder.CreateAnd(X, (~(C - 1)).lshr(ShiftAmt->getZExtValue())); 2160 return new ICmpInst(Pred == ICmpInst::ICMP_ULT ? ICmpInst::ICMP_EQ 2161 : ICmpInst::ICMP_NE, 2162 And, Constant::getNullValue(ShType)); 2163 } 2164 } 2165 2166 // Transform (icmp pred iM (shl iM %v, N), C) 2167 // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N)) 2168 // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N. 2169 // This enables us to get rid of the shift in favor of a trunc that may be 2170 // free on the target. It has the additional benefit of comparing to a 2171 // smaller constant that may be more target-friendly. 2172 unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1); 2173 if (Shl->hasOneUse() && Amt != 0 && C.countTrailingZeros() >= Amt && 2174 DL.isLegalInteger(TypeBits - Amt)) { 2175 Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt); 2176 if (auto *ShVTy = dyn_cast<VectorType>(ShType)) 2177 TruncTy = VectorType::get(TruncTy, ShVTy->getElementCount()); 2178 Constant *NewC = 2179 ConstantInt::get(TruncTy, C.ashr(*ShiftAmt).trunc(TypeBits - Amt)); 2180 return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC); 2181 } 2182 2183 return nullptr; 2184 } 2185 2186 /// Fold icmp ({al}shr X, Y), C. 2187 Instruction *InstCombinerImpl::foldICmpShrConstant(ICmpInst &Cmp, 2188 BinaryOperator *Shr, 2189 const APInt &C) { 2190 // An exact shr only shifts out zero bits, so: 2191 // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0 2192 Value *X = Shr->getOperand(0); 2193 CmpInst::Predicate Pred = Cmp.getPredicate(); 2194 if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() && 2195 C.isNullValue()) 2196 return new ICmpInst(Pred, X, Cmp.getOperand(1)); 2197 2198 const APInt *ShiftVal; 2199 if (Cmp.isEquality() && match(Shr->getOperand(0), m_APInt(ShiftVal))) 2200 return foldICmpShrConstConst(Cmp, Shr->getOperand(1), C, *ShiftVal); 2201 2202 const APInt *ShiftAmt; 2203 if (!match(Shr->getOperand(1), m_APInt(ShiftAmt))) 2204 return nullptr; 2205 2206 // Check that the shift amount is in range. If not, don't perform undefined 2207 // shifts. When the shift is visited it will be simplified. 2208 unsigned TypeBits = C.getBitWidth(); 2209 unsigned ShAmtVal = ShiftAmt->getLimitedValue(TypeBits); 2210 if (ShAmtVal >= TypeBits || ShAmtVal == 0) 2211 return nullptr; 2212 2213 bool IsAShr = Shr->getOpcode() == Instruction::AShr; 2214 bool IsExact = Shr->isExact(); 2215 Type *ShrTy = Shr->getType(); 2216 // TODO: If we could guarantee that InstSimplify would handle all of the 2217 // constant-value-based preconditions in the folds below, then we could assert 2218 // those conditions rather than checking them. This is difficult because of 2219 // undef/poison (PR34838). 2220 if (IsAShr) { 2221 if (Pred == CmpInst::ICMP_SLT || (Pred == CmpInst::ICMP_SGT && IsExact)) { 2222 // icmp slt (ashr X, ShAmtC), C --> icmp slt X, (C << ShAmtC) 2223 // icmp sgt (ashr exact X, ShAmtC), C --> icmp sgt X, (C << ShAmtC) 2224 APInt ShiftedC = C.shl(ShAmtVal); 2225 if (ShiftedC.ashr(ShAmtVal) == C) 2226 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC)); 2227 } 2228 if (Pred == CmpInst::ICMP_SGT) { 2229 // icmp sgt (ashr X, ShAmtC), C --> icmp sgt X, ((C + 1) << ShAmtC) - 1 2230 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1; 2231 if (!C.isMaxSignedValue() && !(C + 1).shl(ShAmtVal).isMinSignedValue() && 2232 (ShiftedC + 1).ashr(ShAmtVal) == (C + 1)) 2233 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC)); 2234 } 2235 2236 // If the compare constant has significant bits above the lowest sign-bit, 2237 // then convert an unsigned cmp to a test of the sign-bit: 2238 // (ashr X, ShiftC) u> C --> X s< 0 2239 // (ashr X, ShiftC) u< C --> X s> -1 2240 if (C.getBitWidth() > 2 && C.getNumSignBits() <= ShAmtVal) { 2241 if (Pred == CmpInst::ICMP_UGT) { 2242 return new ICmpInst(CmpInst::ICMP_SLT, X, 2243 ConstantInt::getNullValue(ShrTy)); 2244 } 2245 if (Pred == CmpInst::ICMP_ULT) { 2246 return new ICmpInst(CmpInst::ICMP_SGT, X, 2247 ConstantInt::getAllOnesValue(ShrTy)); 2248 } 2249 } 2250 } else { 2251 if (Pred == CmpInst::ICMP_ULT || (Pred == CmpInst::ICMP_UGT && IsExact)) { 2252 // icmp ult (lshr X, ShAmtC), C --> icmp ult X, (C << ShAmtC) 2253 // icmp ugt (lshr exact X, ShAmtC), C --> icmp ugt X, (C << ShAmtC) 2254 APInt ShiftedC = C.shl(ShAmtVal); 2255 if (ShiftedC.lshr(ShAmtVal) == C) 2256 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC)); 2257 } 2258 if (Pred == CmpInst::ICMP_UGT) { 2259 // icmp ugt (lshr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1 2260 APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1; 2261 if ((ShiftedC + 1).lshr(ShAmtVal) == (C + 1)) 2262 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC)); 2263 } 2264 } 2265 2266 if (!Cmp.isEquality()) 2267 return nullptr; 2268 2269 // Handle equality comparisons of shift-by-constant. 2270 2271 // If the comparison constant changes with the shift, the comparison cannot 2272 // succeed (bits of the comparison constant cannot match the shifted value). 2273 // This should be known by InstSimplify and already be folded to true/false. 2274 assert(((IsAShr && C.shl(ShAmtVal).ashr(ShAmtVal) == C) || 2275 (!IsAShr && C.shl(ShAmtVal).lshr(ShAmtVal) == C)) && 2276 "Expected icmp+shr simplify did not occur."); 2277 2278 // If the bits shifted out are known zero, compare the unshifted value: 2279 // (X & 4) >> 1 == 2 --> (X & 4) == 4. 2280 if (Shr->isExact()) 2281 return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, C << ShAmtVal)); 2282 2283 if (Shr->hasOneUse()) { 2284 // Canonicalize the shift into an 'and': 2285 // icmp eq/ne (shr X, ShAmt), C --> icmp eq/ne (and X, HiMask), (C << ShAmt) 2286 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal)); 2287 Constant *Mask = ConstantInt::get(ShrTy, Val); 2288 Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask"); 2289 return new ICmpInst(Pred, And, ConstantInt::get(ShrTy, C << ShAmtVal)); 2290 } 2291 2292 return nullptr; 2293 } 2294 2295 Instruction *InstCombinerImpl::foldICmpSRemConstant(ICmpInst &Cmp, 2296 BinaryOperator *SRem, 2297 const APInt &C) { 2298 // Match an 'is positive' or 'is negative' comparison of remainder by a 2299 // constant power-of-2 value: 2300 // (X % pow2C) sgt/slt 0 2301 const ICmpInst::Predicate Pred = Cmp.getPredicate(); 2302 if (Pred != ICmpInst::ICMP_SGT && Pred != ICmpInst::ICMP_SLT) 2303 return nullptr; 2304 2305 // TODO: The one-use check is standard because we do not typically want to 2306 // create longer instruction sequences, but this might be a special-case 2307 // because srem is not good for analysis or codegen. 2308 if (!SRem->hasOneUse()) 2309 return nullptr; 2310 2311 const APInt *DivisorC; 2312 if (!C.isNullValue() || !match(SRem->getOperand(1), m_Power2(DivisorC))) 2313 return nullptr; 2314 2315 // Mask off the sign bit and the modulo bits (low-bits). 2316 Type *Ty = SRem->getType(); 2317 APInt SignMask = APInt::getSignMask(Ty->getScalarSizeInBits()); 2318 Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1)); 2319 Value *And = Builder.CreateAnd(SRem->getOperand(0), MaskC); 2320 2321 // For 'is positive?' check that the sign-bit is clear and at least 1 masked 2322 // bit is set. Example: 2323 // (i8 X % 32) s> 0 --> (X & 159) s> 0 2324 if (Pred == ICmpInst::ICMP_SGT) 2325 return new ICmpInst(ICmpInst::ICMP_SGT, And, ConstantInt::getNullValue(Ty)); 2326 2327 // For 'is negative?' check that the sign-bit is set and at least 1 masked 2328 // bit is set. Example: 2329 // (i16 X % 4) s< 0 --> (X & 32771) u> 32768 2330 return new ICmpInst(ICmpInst::ICMP_UGT, And, ConstantInt::get(Ty, SignMask)); 2331 } 2332 2333 /// Fold icmp (udiv X, Y), C. 2334 Instruction *InstCombinerImpl::foldICmpUDivConstant(ICmpInst &Cmp, 2335 BinaryOperator *UDiv, 2336 const APInt &C) { 2337 const APInt *C2; 2338 if (!match(UDiv->getOperand(0), m_APInt(C2))) 2339 return nullptr; 2340 2341 assert(*C2 != 0 && "udiv 0, X should have been simplified already."); 2342 2343 // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1)) 2344 Value *Y = UDiv->getOperand(1); 2345 if (Cmp.getPredicate() == ICmpInst::ICMP_UGT) { 2346 assert(!C.isMaxValue() && 2347 "icmp ugt X, UINT_MAX should have been simplified already."); 2348 return new ICmpInst(ICmpInst::ICMP_ULE, Y, 2349 ConstantInt::get(Y->getType(), C2->udiv(C + 1))); 2350 } 2351 2352 // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C) 2353 if (Cmp.getPredicate() == ICmpInst::ICMP_ULT) { 2354 assert(C != 0 && "icmp ult X, 0 should have been simplified already."); 2355 return new ICmpInst(ICmpInst::ICMP_UGT, Y, 2356 ConstantInt::get(Y->getType(), C2->udiv(C))); 2357 } 2358 2359 return nullptr; 2360 } 2361 2362 /// Fold icmp ({su}div X, Y), C. 2363 Instruction *InstCombinerImpl::foldICmpDivConstant(ICmpInst &Cmp, 2364 BinaryOperator *Div, 2365 const APInt &C) { 2366 // Fold: icmp pred ([us]div X, C2), C -> range test 2367 // Fold this div into the comparison, producing a range check. 2368 // Determine, based on the divide type, what the range is being 2369 // checked. If there is an overflow on the low or high side, remember 2370 // it, otherwise compute the range [low, hi) bounding the new value. 2371 // See: InsertRangeTest above for the kinds of replacements possible. 2372 const APInt *C2; 2373 if (!match(Div->getOperand(1), m_APInt(C2))) 2374 return nullptr; 2375 2376 // FIXME: If the operand types don't match the type of the divide 2377 // then don't attempt this transform. The code below doesn't have the 2378 // logic to deal with a signed divide and an unsigned compare (and 2379 // vice versa). This is because (x /s C2) <s C produces different 2380 // results than (x /s C2) <u C or (x /u C2) <s C or even 2381 // (x /u C2) <u C. Simply casting the operands and result won't 2382 // work. :( The if statement below tests that condition and bails 2383 // if it finds it. 2384 bool DivIsSigned = Div->getOpcode() == Instruction::SDiv; 2385 if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned()) 2386 return nullptr; 2387 2388 // The ProdOV computation fails on divide by 0 and divide by -1. Cases with 2389 // INT_MIN will also fail if the divisor is 1. Although folds of all these 2390 // division-by-constant cases should be present, we can not assert that they 2391 // have happened before we reach this icmp instruction. 2392 if (C2->isNullValue() || C2->isOneValue() || 2393 (DivIsSigned && C2->isAllOnesValue())) 2394 return nullptr; 2395 2396 // Compute Prod = C * C2. We are essentially solving an equation of 2397 // form X / C2 = C. We solve for X by multiplying C2 and C. 2398 // By solving for X, we can turn this into a range check instead of computing 2399 // a divide. 2400 APInt Prod = C * *C2; 2401 2402 // Determine if the product overflows by seeing if the product is not equal to 2403 // the divide. Make sure we do the same kind of divide as in the LHS 2404 // instruction that we're folding. 2405 bool ProdOV = (DivIsSigned ? Prod.sdiv(*C2) : Prod.udiv(*C2)) != C; 2406 2407 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2408 2409 // If the division is known to be exact, then there is no remainder from the 2410 // divide, so the covered range size is unit, otherwise it is the divisor. 2411 APInt RangeSize = Div->isExact() ? APInt(C2->getBitWidth(), 1) : *C2; 2412 2413 // Figure out the interval that is being checked. For example, a comparison 2414 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5). 2415 // Compute this interval based on the constants involved and the signedness of 2416 // the compare/divide. This computes a half-open interval, keeping track of 2417 // whether either value in the interval overflows. After analysis each 2418 // overflow variable is set to 0 if it's corresponding bound variable is valid 2419 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end. 2420 int LoOverflow = 0, HiOverflow = 0; 2421 APInt LoBound, HiBound; 2422 2423 if (!DivIsSigned) { // udiv 2424 // e.g. X/5 op 3 --> [15, 20) 2425 LoBound = Prod; 2426 HiOverflow = LoOverflow = ProdOV; 2427 if (!HiOverflow) { 2428 // If this is not an exact divide, then many values in the range collapse 2429 // to the same result value. 2430 HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false); 2431 } 2432 } else if (C2->isStrictlyPositive()) { // Divisor is > 0. 2433 if (C.isNullValue()) { // (X / pos) op 0 2434 // Can't overflow. e.g. X/2 op 0 --> [-1, 2) 2435 LoBound = -(RangeSize - 1); 2436 HiBound = RangeSize; 2437 } else if (C.isStrictlyPositive()) { // (X / pos) op pos 2438 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20) 2439 HiOverflow = LoOverflow = ProdOV; 2440 if (!HiOverflow) 2441 HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true); 2442 } else { // (X / pos) op neg 2443 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14) 2444 HiBound = Prod + 1; 2445 LoOverflow = HiOverflow = ProdOV ? -1 : 0; 2446 if (!LoOverflow) { 2447 APInt DivNeg = -RangeSize; 2448 LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0; 2449 } 2450 } 2451 } else if (C2->isNegative()) { // Divisor is < 0. 2452 if (Div->isExact()) 2453 RangeSize.negate(); 2454 if (C.isNullValue()) { // (X / neg) op 0 2455 // e.g. X/-5 op 0 --> [-4, 5) 2456 LoBound = RangeSize + 1; 2457 HiBound = -RangeSize; 2458 if (HiBound == *C2) { // -INTMIN = INTMIN 2459 HiOverflow = 1; // [INTMIN+1, overflow) 2460 HiBound = APInt(); // e.g. X/INTMIN = 0 --> X > INTMIN 2461 } 2462 } else if (C.isStrictlyPositive()) { // (X / neg) op pos 2463 // e.g. X/-5 op 3 --> [-19, -14) 2464 HiBound = Prod + 1; 2465 HiOverflow = LoOverflow = ProdOV ? -1 : 0; 2466 if (!LoOverflow) 2467 LoOverflow = addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0; 2468 } else { // (X / neg) op neg 2469 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20) 2470 LoOverflow = HiOverflow = ProdOV; 2471 if (!HiOverflow) 2472 HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true); 2473 } 2474 2475 // Dividing by a negative swaps the condition. LT <-> GT 2476 Pred = ICmpInst::getSwappedPredicate(Pred); 2477 } 2478 2479 Value *X = Div->getOperand(0); 2480 switch (Pred) { 2481 default: llvm_unreachable("Unhandled icmp opcode!"); 2482 case ICmpInst::ICMP_EQ: 2483 if (LoOverflow && HiOverflow) 2484 return replaceInstUsesWith(Cmp, Builder.getFalse()); 2485 if (HiOverflow) 2486 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : 2487 ICmpInst::ICMP_UGE, X, 2488 ConstantInt::get(Div->getType(), LoBound)); 2489 if (LoOverflow) 2490 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : 2491 ICmpInst::ICMP_ULT, X, 2492 ConstantInt::get(Div->getType(), HiBound)); 2493 return replaceInstUsesWith( 2494 Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, true)); 2495 case ICmpInst::ICMP_NE: 2496 if (LoOverflow && HiOverflow) 2497 return replaceInstUsesWith(Cmp, Builder.getTrue()); 2498 if (HiOverflow) 2499 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : 2500 ICmpInst::ICMP_ULT, X, 2501 ConstantInt::get(Div->getType(), LoBound)); 2502 if (LoOverflow) 2503 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : 2504 ICmpInst::ICMP_UGE, X, 2505 ConstantInt::get(Div->getType(), HiBound)); 2506 return replaceInstUsesWith(Cmp, 2507 insertRangeTest(X, LoBound, HiBound, 2508 DivIsSigned, false)); 2509 case ICmpInst::ICMP_ULT: 2510 case ICmpInst::ICMP_SLT: 2511 if (LoOverflow == +1) // Low bound is greater than input range. 2512 return replaceInstUsesWith(Cmp, Builder.getTrue()); 2513 if (LoOverflow == -1) // Low bound is less than input range. 2514 return replaceInstUsesWith(Cmp, Builder.getFalse()); 2515 return new ICmpInst(Pred, X, ConstantInt::get(Div->getType(), LoBound)); 2516 case ICmpInst::ICMP_UGT: 2517 case ICmpInst::ICMP_SGT: 2518 if (HiOverflow == +1) // High bound greater than input range. 2519 return replaceInstUsesWith(Cmp, Builder.getFalse()); 2520 if (HiOverflow == -1) // High bound less than input range. 2521 return replaceInstUsesWith(Cmp, Builder.getTrue()); 2522 if (Pred == ICmpInst::ICMP_UGT) 2523 return new ICmpInst(ICmpInst::ICMP_UGE, X, 2524 ConstantInt::get(Div->getType(), HiBound)); 2525 return new ICmpInst(ICmpInst::ICMP_SGE, X, 2526 ConstantInt::get(Div->getType(), HiBound)); 2527 } 2528 2529 return nullptr; 2530 } 2531 2532 /// Fold icmp (sub X, Y), C. 2533 Instruction *InstCombinerImpl::foldICmpSubConstant(ICmpInst &Cmp, 2534 BinaryOperator *Sub, 2535 const APInt &C) { 2536 Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1); 2537 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2538 const APInt *C2; 2539 APInt SubResult; 2540 2541 // icmp eq/ne (sub C, Y), C -> icmp eq/ne Y, 0 2542 if (match(X, m_APInt(C2)) && *C2 == C && Cmp.isEquality()) 2543 return new ICmpInst(Cmp.getPredicate(), Y, 2544 ConstantInt::get(Y->getType(), 0)); 2545 2546 // (icmp P (sub nuw|nsw C2, Y), C) -> (icmp swap(P) Y, C2-C) 2547 if (match(X, m_APInt(C2)) && 2548 ((Cmp.isUnsigned() && Sub->hasNoUnsignedWrap()) || 2549 (Cmp.isSigned() && Sub->hasNoSignedWrap())) && 2550 !subWithOverflow(SubResult, *C2, C, Cmp.isSigned())) 2551 return new ICmpInst(Cmp.getSwappedPredicate(), Y, 2552 ConstantInt::get(Y->getType(), SubResult)); 2553 2554 // The following transforms are only worth it if the only user of the subtract 2555 // is the icmp. 2556 if (!Sub->hasOneUse()) 2557 return nullptr; 2558 2559 if (Sub->hasNoSignedWrap()) { 2560 // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y) 2561 if (Pred == ICmpInst::ICMP_SGT && C.isAllOnesValue()) 2562 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y); 2563 2564 // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y) 2565 if (Pred == ICmpInst::ICMP_SGT && C.isNullValue()) 2566 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y); 2567 2568 // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y) 2569 if (Pred == ICmpInst::ICMP_SLT && C.isNullValue()) 2570 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y); 2571 2572 // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y) 2573 if (Pred == ICmpInst::ICMP_SLT && C.isOneValue()) 2574 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y); 2575 } 2576 2577 if (!match(X, m_APInt(C2))) 2578 return nullptr; 2579 2580 // C2 - Y <u C -> (Y | (C - 1)) == C2 2581 // iff (C2 & (C - 1)) == C - 1 and C is a power of 2 2582 if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && 2583 (*C2 & (C - 1)) == (C - 1)) 2584 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, C - 1), X); 2585 2586 // C2 - Y >u C -> (Y | C) != C2 2587 // iff C2 & C == C and C + 1 is a power of 2 2588 if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == C) 2589 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, C), X); 2590 2591 return nullptr; 2592 } 2593 2594 /// Fold icmp (add X, Y), C. 2595 Instruction *InstCombinerImpl::foldICmpAddConstant(ICmpInst &Cmp, 2596 BinaryOperator *Add, 2597 const APInt &C) { 2598 Value *Y = Add->getOperand(1); 2599 const APInt *C2; 2600 if (Cmp.isEquality() || !match(Y, m_APInt(C2))) 2601 return nullptr; 2602 2603 // Fold icmp pred (add X, C2), C. 2604 Value *X = Add->getOperand(0); 2605 Type *Ty = Add->getType(); 2606 CmpInst::Predicate Pred = Cmp.getPredicate(); 2607 2608 // If the add does not wrap, we can always adjust the compare by subtracting 2609 // the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE 2610 // are canonicalized to SGT/SLT/UGT/ULT. 2611 if ((Add->hasNoSignedWrap() && 2612 (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) || 2613 (Add->hasNoUnsignedWrap() && 2614 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT))) { 2615 bool Overflow; 2616 APInt NewC = 2617 Cmp.isSigned() ? C.ssub_ov(*C2, Overflow) : C.usub_ov(*C2, Overflow); 2618 // If there is overflow, the result must be true or false. 2619 // TODO: Can we assert there is no overflow because InstSimplify always 2620 // handles those cases? 2621 if (!Overflow) 2622 // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2) 2623 return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC)); 2624 } 2625 2626 auto CR = ConstantRange::makeExactICmpRegion(Pred, C).subtract(*C2); 2627 const APInt &Upper = CR.getUpper(); 2628 const APInt &Lower = CR.getLower(); 2629 if (Cmp.isSigned()) { 2630 if (Lower.isSignMask()) 2631 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper)); 2632 if (Upper.isSignMask()) 2633 return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower)); 2634 } else { 2635 if (Lower.isMinValue()) 2636 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper)); 2637 if (Upper.isMinValue()) 2638 return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower)); 2639 } 2640 2641 if (!Add->hasOneUse()) 2642 return nullptr; 2643 2644 // X+C <u C2 -> (X & -C2) == C 2645 // iff C & (C2-1) == 0 2646 // C2 is a power of 2 2647 if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && (*C2 & (C - 1)) == 0) 2648 return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -C), 2649 ConstantExpr::getNeg(cast<Constant>(Y))); 2650 2651 // X+C >u C2 -> (X & ~C2) != C 2652 // iff C & C2 == 0 2653 // C2+1 is a power of 2 2654 if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == 0) 2655 return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~C), 2656 ConstantExpr::getNeg(cast<Constant>(Y))); 2657 2658 return nullptr; 2659 } 2660 2661 bool InstCombinerImpl::matchThreeWayIntCompare(SelectInst *SI, Value *&LHS, 2662 Value *&RHS, ConstantInt *&Less, 2663 ConstantInt *&Equal, 2664 ConstantInt *&Greater) { 2665 // TODO: Generalize this to work with other comparison idioms or ensure 2666 // they get canonicalized into this form. 2667 2668 // select i1 (a == b), 2669 // i32 Equal, 2670 // i32 (select i1 (a < b), i32 Less, i32 Greater) 2671 // where Equal, Less and Greater are placeholders for any three constants. 2672 ICmpInst::Predicate PredA; 2673 if (!match(SI->getCondition(), m_ICmp(PredA, m_Value(LHS), m_Value(RHS))) || 2674 !ICmpInst::isEquality(PredA)) 2675 return false; 2676 Value *EqualVal = SI->getTrueValue(); 2677 Value *UnequalVal = SI->getFalseValue(); 2678 // We still can get non-canonical predicate here, so canonicalize. 2679 if (PredA == ICmpInst::ICMP_NE) 2680 std::swap(EqualVal, UnequalVal); 2681 if (!match(EqualVal, m_ConstantInt(Equal))) 2682 return false; 2683 ICmpInst::Predicate PredB; 2684 Value *LHS2, *RHS2; 2685 if (!match(UnequalVal, m_Select(m_ICmp(PredB, m_Value(LHS2), m_Value(RHS2)), 2686 m_ConstantInt(Less), m_ConstantInt(Greater)))) 2687 return false; 2688 // We can get predicate mismatch here, so canonicalize if possible: 2689 // First, ensure that 'LHS' match. 2690 if (LHS2 != LHS) { 2691 // x sgt y <--> y slt x 2692 std::swap(LHS2, RHS2); 2693 PredB = ICmpInst::getSwappedPredicate(PredB); 2694 } 2695 if (LHS2 != LHS) 2696 return false; 2697 // We also need to canonicalize 'RHS'. 2698 if (PredB == ICmpInst::ICMP_SGT && isa<Constant>(RHS2)) { 2699 // x sgt C-1 <--> x sge C <--> not(x slt C) 2700 auto FlippedStrictness = 2701 InstCombiner::getFlippedStrictnessPredicateAndConstant( 2702 PredB, cast<Constant>(RHS2)); 2703 if (!FlippedStrictness) 2704 return false; 2705 assert(FlippedStrictness->first == ICmpInst::ICMP_SGE && "Sanity check"); 2706 RHS2 = FlippedStrictness->second; 2707 // And kind-of perform the result swap. 2708 std::swap(Less, Greater); 2709 PredB = ICmpInst::ICMP_SLT; 2710 } 2711 return PredB == ICmpInst::ICMP_SLT && RHS == RHS2; 2712 } 2713 2714 Instruction *InstCombinerImpl::foldICmpSelectConstant(ICmpInst &Cmp, 2715 SelectInst *Select, 2716 ConstantInt *C) { 2717 2718 assert(C && "Cmp RHS should be a constant int!"); 2719 // If we're testing a constant value against the result of a three way 2720 // comparison, the result can be expressed directly in terms of the 2721 // original values being compared. Note: We could possibly be more 2722 // aggressive here and remove the hasOneUse test. The original select is 2723 // really likely to simplify or sink when we remove a test of the result. 2724 Value *OrigLHS, *OrigRHS; 2725 ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan; 2726 if (Cmp.hasOneUse() && 2727 matchThreeWayIntCompare(Select, OrigLHS, OrigRHS, C1LessThan, C2Equal, 2728 C3GreaterThan)) { 2729 assert(C1LessThan && C2Equal && C3GreaterThan); 2730 2731 bool TrueWhenLessThan = 2732 ConstantExpr::getCompare(Cmp.getPredicate(), C1LessThan, C) 2733 ->isAllOnesValue(); 2734 bool TrueWhenEqual = 2735 ConstantExpr::getCompare(Cmp.getPredicate(), C2Equal, C) 2736 ->isAllOnesValue(); 2737 bool TrueWhenGreaterThan = 2738 ConstantExpr::getCompare(Cmp.getPredicate(), C3GreaterThan, C) 2739 ->isAllOnesValue(); 2740 2741 // This generates the new instruction that will replace the original Cmp 2742 // Instruction. Instead of enumerating the various combinations when 2743 // TrueWhenLessThan, TrueWhenEqual and TrueWhenGreaterThan are true versus 2744 // false, we rely on chaining of ORs and future passes of InstCombine to 2745 // simplify the OR further (i.e. a s< b || a == b becomes a s<= b). 2746 2747 // When none of the three constants satisfy the predicate for the RHS (C), 2748 // the entire original Cmp can be simplified to a false. 2749 Value *Cond = Builder.getFalse(); 2750 if (TrueWhenLessThan) 2751 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT, 2752 OrigLHS, OrigRHS)); 2753 if (TrueWhenEqual) 2754 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ, 2755 OrigLHS, OrigRHS)); 2756 if (TrueWhenGreaterThan) 2757 Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT, 2758 OrigLHS, OrigRHS)); 2759 2760 return replaceInstUsesWith(Cmp, Cond); 2761 } 2762 return nullptr; 2763 } 2764 2765 static Instruction *foldICmpBitCast(ICmpInst &Cmp, 2766 InstCombiner::BuilderTy &Builder) { 2767 auto *Bitcast = dyn_cast<BitCastInst>(Cmp.getOperand(0)); 2768 if (!Bitcast) 2769 return nullptr; 2770 2771 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2772 Value *Op1 = Cmp.getOperand(1); 2773 Value *BCSrcOp = Bitcast->getOperand(0); 2774 2775 // Make sure the bitcast doesn't change the number of vector elements. 2776 if (Bitcast->getSrcTy()->getScalarSizeInBits() == 2777 Bitcast->getDestTy()->getScalarSizeInBits()) { 2778 // Zero-equality and sign-bit checks are preserved through sitofp + bitcast. 2779 Value *X; 2780 if (match(BCSrcOp, m_SIToFP(m_Value(X)))) { 2781 // icmp eq (bitcast (sitofp X)), 0 --> icmp eq X, 0 2782 // icmp ne (bitcast (sitofp X)), 0 --> icmp ne X, 0 2783 // icmp slt (bitcast (sitofp X)), 0 --> icmp slt X, 0 2784 // icmp sgt (bitcast (sitofp X)), 0 --> icmp sgt X, 0 2785 if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_SLT || 2786 Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT) && 2787 match(Op1, m_Zero())) 2788 return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType())); 2789 2790 // icmp slt (bitcast (sitofp X)), 1 --> icmp slt X, 1 2791 if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_One())) 2792 return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), 1)); 2793 2794 // icmp sgt (bitcast (sitofp X)), -1 --> icmp sgt X, -1 2795 if (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes())) 2796 return new ICmpInst(Pred, X, 2797 ConstantInt::getAllOnesValue(X->getType())); 2798 } 2799 2800 // Zero-equality checks are preserved through unsigned floating-point casts: 2801 // icmp eq (bitcast (uitofp X)), 0 --> icmp eq X, 0 2802 // icmp ne (bitcast (uitofp X)), 0 --> icmp ne X, 0 2803 if (match(BCSrcOp, m_UIToFP(m_Value(X)))) 2804 if (Cmp.isEquality() && match(Op1, m_Zero())) 2805 return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType())); 2806 2807 // If this is a sign-bit test of a bitcast of a casted FP value, eliminate 2808 // the FP extend/truncate because that cast does not change the sign-bit. 2809 // This is true for all standard IEEE-754 types and the X86 80-bit type. 2810 // The sign-bit is always the most significant bit in those types. 2811 const APInt *C; 2812 bool TrueIfSigned; 2813 if (match(Op1, m_APInt(C)) && Bitcast->hasOneUse() && 2814 InstCombiner::isSignBitCheck(Pred, *C, TrueIfSigned)) { 2815 if (match(BCSrcOp, m_FPExt(m_Value(X))) || 2816 match(BCSrcOp, m_FPTrunc(m_Value(X)))) { 2817 // (bitcast (fpext/fptrunc X)) to iX) < 0 --> (bitcast X to iY) < 0 2818 // (bitcast (fpext/fptrunc X)) to iX) > -1 --> (bitcast X to iY) > -1 2819 Type *XType = X->getType(); 2820 2821 // We can't currently handle Power style floating point operations here. 2822 if (!(XType->isPPC_FP128Ty() || BCSrcOp->getType()->isPPC_FP128Ty())) { 2823 2824 Type *NewType = Builder.getIntNTy(XType->getScalarSizeInBits()); 2825 if (auto *XVTy = dyn_cast<VectorType>(XType)) 2826 NewType = VectorType::get(NewType, XVTy->getElementCount()); 2827 Value *NewBitcast = Builder.CreateBitCast(X, NewType); 2828 if (TrueIfSigned) 2829 return new ICmpInst(ICmpInst::ICMP_SLT, NewBitcast, 2830 ConstantInt::getNullValue(NewType)); 2831 else 2832 return new ICmpInst(ICmpInst::ICMP_SGT, NewBitcast, 2833 ConstantInt::getAllOnesValue(NewType)); 2834 } 2835 } 2836 } 2837 } 2838 2839 // Test to see if the operands of the icmp are casted versions of other 2840 // values. If the ptr->ptr cast can be stripped off both arguments, do so. 2841 if (Bitcast->getType()->isPointerTy() && 2842 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) { 2843 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast 2844 // so eliminate it as well. 2845 if (auto *BC2 = dyn_cast<BitCastInst>(Op1)) 2846 Op1 = BC2->getOperand(0); 2847 2848 Op1 = Builder.CreateBitCast(Op1, BCSrcOp->getType()); 2849 return new ICmpInst(Pred, BCSrcOp, Op1); 2850 } 2851 2852 // Folding: icmp <pred> iN X, C 2853 // where X = bitcast <M x iK> (shufflevector <M x iK> %vec, undef, SC)) to iN 2854 // and C is a splat of a K-bit pattern 2855 // and SC is a constant vector = <C', C', C', ..., C'> 2856 // Into: 2857 // %E = extractelement <M x iK> %vec, i32 C' 2858 // icmp <pred> iK %E, trunc(C) 2859 const APInt *C; 2860 if (!match(Cmp.getOperand(1), m_APInt(C)) || 2861 !Bitcast->getType()->isIntegerTy() || 2862 !Bitcast->getSrcTy()->isIntOrIntVectorTy()) 2863 return nullptr; 2864 2865 Value *Vec; 2866 ArrayRef<int> Mask; 2867 if (match(BCSrcOp, m_Shuffle(m_Value(Vec), m_Undef(), m_Mask(Mask)))) { 2868 // Check whether every element of Mask is the same constant 2869 if (is_splat(Mask)) { 2870 auto *VecTy = cast<VectorType>(BCSrcOp->getType()); 2871 auto *EltTy = cast<IntegerType>(VecTy->getElementType()); 2872 if (C->isSplat(EltTy->getBitWidth())) { 2873 // Fold the icmp based on the value of C 2874 // If C is M copies of an iK sized bit pattern, 2875 // then: 2876 // => %E = extractelement <N x iK> %vec, i32 Elem 2877 // icmp <pred> iK %SplatVal, <pattern> 2878 Value *Elem = Builder.getInt32(Mask[0]); 2879 Value *Extract = Builder.CreateExtractElement(Vec, Elem); 2880 Value *NewC = ConstantInt::get(EltTy, C->trunc(EltTy->getBitWidth())); 2881 return new ICmpInst(Pred, Extract, NewC); 2882 } 2883 } 2884 } 2885 return nullptr; 2886 } 2887 2888 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C 2889 /// where X is some kind of instruction. 2890 Instruction *InstCombinerImpl::foldICmpInstWithConstant(ICmpInst &Cmp) { 2891 const APInt *C; 2892 if (!match(Cmp.getOperand(1), m_APInt(C))) 2893 return nullptr; 2894 2895 if (auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0))) { 2896 switch (BO->getOpcode()) { 2897 case Instruction::Xor: 2898 if (Instruction *I = foldICmpXorConstant(Cmp, BO, *C)) 2899 return I; 2900 break; 2901 case Instruction::And: 2902 if (Instruction *I = foldICmpAndConstant(Cmp, BO, *C)) 2903 return I; 2904 break; 2905 case Instruction::Or: 2906 if (Instruction *I = foldICmpOrConstant(Cmp, BO, *C)) 2907 return I; 2908 break; 2909 case Instruction::Mul: 2910 if (Instruction *I = foldICmpMulConstant(Cmp, BO, *C)) 2911 return I; 2912 break; 2913 case Instruction::Shl: 2914 if (Instruction *I = foldICmpShlConstant(Cmp, BO, *C)) 2915 return I; 2916 break; 2917 case Instruction::LShr: 2918 case Instruction::AShr: 2919 if (Instruction *I = foldICmpShrConstant(Cmp, BO, *C)) 2920 return I; 2921 break; 2922 case Instruction::SRem: 2923 if (Instruction *I = foldICmpSRemConstant(Cmp, BO, *C)) 2924 return I; 2925 break; 2926 case Instruction::UDiv: 2927 if (Instruction *I = foldICmpUDivConstant(Cmp, BO, *C)) 2928 return I; 2929 LLVM_FALLTHROUGH; 2930 case Instruction::SDiv: 2931 if (Instruction *I = foldICmpDivConstant(Cmp, BO, *C)) 2932 return I; 2933 break; 2934 case Instruction::Sub: 2935 if (Instruction *I = foldICmpSubConstant(Cmp, BO, *C)) 2936 return I; 2937 break; 2938 case Instruction::Add: 2939 if (Instruction *I = foldICmpAddConstant(Cmp, BO, *C)) 2940 return I; 2941 break; 2942 default: 2943 break; 2944 } 2945 // TODO: These folds could be refactored to be part of the above calls. 2946 if (Instruction *I = foldICmpBinOpEqualityWithConstant(Cmp, BO, *C)) 2947 return I; 2948 } 2949 2950 // Match against CmpInst LHS being instructions other than binary operators. 2951 2952 if (auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0))) { 2953 // For now, we only support constant integers while folding the 2954 // ICMP(SELECT)) pattern. We can extend this to support vector of integers 2955 // similar to the cases handled by binary ops above. 2956 if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1))) 2957 if (Instruction *I = foldICmpSelectConstant(Cmp, SI, ConstRHS)) 2958 return I; 2959 } 2960 2961 if (auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0))) { 2962 if (Instruction *I = foldICmpTruncConstant(Cmp, TI, *C)) 2963 return I; 2964 } 2965 2966 if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0))) 2967 if (Instruction *I = foldICmpIntrinsicWithConstant(Cmp, II, *C)) 2968 return I; 2969 2970 return nullptr; 2971 } 2972 2973 /// Fold an icmp equality instruction with binary operator LHS and constant RHS: 2974 /// icmp eq/ne BO, C. 2975 Instruction *InstCombinerImpl::foldICmpBinOpEqualityWithConstant( 2976 ICmpInst &Cmp, BinaryOperator *BO, const APInt &C) { 2977 // TODO: Some of these folds could work with arbitrary constants, but this 2978 // function is limited to scalar and vector splat constants. 2979 if (!Cmp.isEquality()) 2980 return nullptr; 2981 2982 ICmpInst::Predicate Pred = Cmp.getPredicate(); 2983 bool isICMP_NE = Pred == ICmpInst::ICMP_NE; 2984 Constant *RHS = cast<Constant>(Cmp.getOperand(1)); 2985 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1); 2986 2987 switch (BO->getOpcode()) { 2988 case Instruction::SRem: 2989 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one. 2990 if (C.isNullValue() && BO->hasOneUse()) { 2991 const APInt *BOC; 2992 if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) { 2993 Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName()); 2994 return new ICmpInst(Pred, NewRem, 2995 Constant::getNullValue(BO->getType())); 2996 } 2997 } 2998 break; 2999 case Instruction::Add: { 3000 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants. 3001 if (Constant *BOC = dyn_cast<Constant>(BOp1)) { 3002 if (BO->hasOneUse()) 3003 return new ICmpInst(Pred, BOp0, ConstantExpr::getSub(RHS, BOC)); 3004 } else if (C.isNullValue()) { 3005 // Replace ((add A, B) != 0) with (A != -B) if A or B is 3006 // efficiently invertible, or if the add has just this one use. 3007 if (Value *NegVal = dyn_castNegVal(BOp1)) 3008 return new ICmpInst(Pred, BOp0, NegVal); 3009 if (Value *NegVal = dyn_castNegVal(BOp0)) 3010 return new ICmpInst(Pred, NegVal, BOp1); 3011 if (BO->hasOneUse()) { 3012 Value *Neg = Builder.CreateNeg(BOp1); 3013 Neg->takeName(BO); 3014 return new ICmpInst(Pred, BOp0, Neg); 3015 } 3016 } 3017 break; 3018 } 3019 case Instruction::Xor: 3020 if (BO->hasOneUse()) { 3021 if (Constant *BOC = dyn_cast<Constant>(BOp1)) { 3022 // For the xor case, we can xor two constants together, eliminating 3023 // the explicit xor. 3024 return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC)); 3025 } else if (C.isNullValue()) { 3026 // Replace ((xor A, B) != 0) with (A != B) 3027 return new ICmpInst(Pred, BOp0, BOp1); 3028 } 3029 } 3030 break; 3031 case Instruction::Sub: 3032 if (BO->hasOneUse()) { 3033 // Only check for constant LHS here, as constant RHS will be canonicalized 3034 // to add and use the fold above. 3035 if (Constant *BOC = dyn_cast<Constant>(BOp0)) { 3036 // Replace ((sub BOC, B) != C) with (B != BOC-C). 3037 return new ICmpInst(Pred, BOp1, ConstantExpr::getSub(BOC, RHS)); 3038 } else if (C.isNullValue()) { 3039 // Replace ((sub A, B) != 0) with (A != B). 3040 return new ICmpInst(Pred, BOp0, BOp1); 3041 } 3042 } 3043 break; 3044 case Instruction::Or: { 3045 const APInt *BOC; 3046 if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) { 3047 // Comparing if all bits outside of a constant mask are set? 3048 // Replace (X | C) == -1 with (X & ~C) == ~C. 3049 // This removes the -1 constant. 3050 Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1)); 3051 Value *And = Builder.CreateAnd(BOp0, NotBOC); 3052 return new ICmpInst(Pred, And, NotBOC); 3053 } 3054 break; 3055 } 3056 case Instruction::And: { 3057 const APInt *BOC; 3058 if (match(BOp1, m_APInt(BOC))) { 3059 // If we have ((X & C) == C), turn it into ((X & C) != 0). 3060 if (C == *BOC && C.isPowerOf2()) 3061 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE, 3062 BO, Constant::getNullValue(RHS->getType())); 3063 } 3064 break; 3065 } 3066 case Instruction::UDiv: 3067 if (C.isNullValue()) { 3068 // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A) 3069 auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT; 3070 return new ICmpInst(NewPred, BOp1, BOp0); 3071 } 3072 break; 3073 default: 3074 break; 3075 } 3076 return nullptr; 3077 } 3078 3079 /// Fold an equality icmp with LLVM intrinsic and constant operand. 3080 Instruction *InstCombinerImpl::foldICmpEqIntrinsicWithConstant( 3081 ICmpInst &Cmp, IntrinsicInst *II, const APInt &C) { 3082 Type *Ty = II->getType(); 3083 unsigned BitWidth = C.getBitWidth(); 3084 switch (II->getIntrinsicID()) { 3085 case Intrinsic::abs: 3086 // abs(A) == 0 -> A == 0 3087 // abs(A) == INT_MIN -> A == INT_MIN 3088 if (C.isNullValue() || C.isMinSignedValue()) 3089 return new ICmpInst(Cmp.getPredicate(), II->getArgOperand(0), 3090 ConstantInt::get(Ty, C)); 3091 break; 3092 3093 case Intrinsic::bswap: 3094 // bswap(A) == C -> A == bswap(C) 3095 return new ICmpInst(Cmp.getPredicate(), II->getArgOperand(0), 3096 ConstantInt::get(Ty, C.byteSwap())); 3097 3098 case Intrinsic::ctlz: 3099 case Intrinsic::cttz: { 3100 // ctz(A) == bitwidth(A) -> A == 0 and likewise for != 3101 if (C == BitWidth) 3102 return new ICmpInst(Cmp.getPredicate(), II->getArgOperand(0), 3103 ConstantInt::getNullValue(Ty)); 3104 3105 // ctz(A) == C -> A & Mask1 == Mask2, where Mask2 only has bit C set 3106 // and Mask1 has bits 0..C+1 set. Similar for ctl, but for high bits. 3107 // Limit to one use to ensure we don't increase instruction count. 3108 unsigned Num = C.getLimitedValue(BitWidth); 3109 if (Num != BitWidth && II->hasOneUse()) { 3110 bool IsTrailing = II->getIntrinsicID() == Intrinsic::cttz; 3111 APInt Mask1 = IsTrailing ? APInt::getLowBitsSet(BitWidth, Num + 1) 3112 : APInt::getHighBitsSet(BitWidth, Num + 1); 3113 APInt Mask2 = IsTrailing 3114 ? APInt::getOneBitSet(BitWidth, Num) 3115 : APInt::getOneBitSet(BitWidth, BitWidth - Num - 1); 3116 return new ICmpInst(Cmp.getPredicate(), 3117 Builder.CreateAnd(II->getArgOperand(0), Mask1), 3118 ConstantInt::get(Ty, Mask2)); 3119 } 3120 break; 3121 } 3122 3123 case Intrinsic::ctpop: { 3124 // popcount(A) == 0 -> A == 0 and likewise for != 3125 // popcount(A) == bitwidth(A) -> A == -1 and likewise for != 3126 bool IsZero = C.isNullValue(); 3127 if (IsZero || C == BitWidth) 3128 return new ICmpInst(Cmp.getPredicate(), II->getArgOperand(0), 3129 IsZero ? Constant::getNullValue(Ty) : Constant::getAllOnesValue(Ty)); 3130 3131 break; 3132 } 3133 3134 case Intrinsic::uadd_sat: { 3135 // uadd.sat(a, b) == 0 -> (a | b) == 0 3136 if (C.isNullValue()) { 3137 Value *Or = Builder.CreateOr(II->getArgOperand(0), II->getArgOperand(1)); 3138 return new ICmpInst(Cmp.getPredicate(), Or, Constant::getNullValue(Ty)); 3139 } 3140 break; 3141 } 3142 3143 case Intrinsic::usub_sat: { 3144 // usub.sat(a, b) == 0 -> a <= b 3145 if (C.isNullValue()) { 3146 ICmpInst::Predicate NewPred = Cmp.getPredicate() == ICmpInst::ICMP_EQ 3147 ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT; 3148 return new ICmpInst(NewPred, II->getArgOperand(0), II->getArgOperand(1)); 3149 } 3150 break; 3151 } 3152 default: 3153 break; 3154 } 3155 3156 return nullptr; 3157 } 3158 3159 /// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C. 3160 Instruction *InstCombinerImpl::foldICmpIntrinsicWithConstant(ICmpInst &Cmp, 3161 IntrinsicInst *II, 3162 const APInt &C) { 3163 if (Cmp.isEquality()) 3164 return foldICmpEqIntrinsicWithConstant(Cmp, II, C); 3165 3166 Type *Ty = II->getType(); 3167 unsigned BitWidth = C.getBitWidth(); 3168 ICmpInst::Predicate Pred = Cmp.getPredicate(); 3169 switch (II->getIntrinsicID()) { 3170 case Intrinsic::ctpop: { 3171 // (ctpop X > BitWidth - 1) --> X == -1 3172 Value *X = II->getArgOperand(0); 3173 if (C == BitWidth - 1 && Pred == ICmpInst::ICMP_UGT) 3174 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ, X, 3175 ConstantInt::getAllOnesValue(Ty)); 3176 // (ctpop X < BitWidth) --> X != -1 3177 if (C == BitWidth && Pred == ICmpInst::ICMP_ULT) 3178 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE, X, 3179 ConstantInt::getAllOnesValue(Ty)); 3180 break; 3181 } 3182 case Intrinsic::ctlz: { 3183 // ctlz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX < 0b00010000 3184 if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) { 3185 unsigned Num = C.getLimitedValue(); 3186 APInt Limit = APInt::getOneBitSet(BitWidth, BitWidth - Num - 1); 3187 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_ULT, 3188 II->getArgOperand(0), ConstantInt::get(Ty, Limit)); 3189 } 3190 3191 // ctlz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX > 0b00011111 3192 if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) { 3193 unsigned Num = C.getLimitedValue(); 3194 APInt Limit = APInt::getLowBitsSet(BitWidth, BitWidth - Num); 3195 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_UGT, 3196 II->getArgOperand(0), ConstantInt::get(Ty, Limit)); 3197 } 3198 break; 3199 } 3200 case Intrinsic::cttz: { 3201 // Limit to one use to ensure we don't increase instruction count. 3202 if (!II->hasOneUse()) 3203 return nullptr; 3204 3205 // cttz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX & 0b00001111 == 0 3206 if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) { 3207 APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue() + 1); 3208 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ, 3209 Builder.CreateAnd(II->getArgOperand(0), Mask), 3210 ConstantInt::getNullValue(Ty)); 3211 } 3212 3213 // cttz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX & 0b00000111 != 0 3214 if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) { 3215 APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue()); 3216 return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE, 3217 Builder.CreateAnd(II->getArgOperand(0), Mask), 3218 ConstantInt::getNullValue(Ty)); 3219 } 3220 break; 3221 } 3222 default: 3223 break; 3224 } 3225 3226 return nullptr; 3227 } 3228 3229 /// Handle icmp with constant (but not simple integer constant) RHS. 3230 Instruction *InstCombinerImpl::foldICmpInstWithConstantNotInt(ICmpInst &I) { 3231 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 3232 Constant *RHSC = dyn_cast<Constant>(Op1); 3233 Instruction *LHSI = dyn_cast<Instruction>(Op0); 3234 if (!RHSC || !LHSI) 3235 return nullptr; 3236 3237 switch (LHSI->getOpcode()) { 3238 case Instruction::GetElementPtr: 3239 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null 3240 if (RHSC->isNullValue() && 3241 cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices()) 3242 return new ICmpInst( 3243 I.getPredicate(), LHSI->getOperand(0), 3244 Constant::getNullValue(LHSI->getOperand(0)->getType())); 3245 break; 3246 case Instruction::PHI: 3247 // Only fold icmp into the PHI if the phi and icmp are in the same 3248 // block. If in the same block, we're encouraging jump threading. If 3249 // not, we are just pessimizing the code by making an i1 phi. 3250 if (LHSI->getParent() == I.getParent()) 3251 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI))) 3252 return NV; 3253 break; 3254 case Instruction::Select: { 3255 // If either operand of the select is a constant, we can fold the 3256 // comparison into the select arms, which will cause one to be 3257 // constant folded and the select turned into a bitwise or. 3258 Value *Op1 = nullptr, *Op2 = nullptr; 3259 ConstantInt *CI = nullptr; 3260 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) { 3261 Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); 3262 CI = dyn_cast<ConstantInt>(Op1); 3263 } 3264 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) { 3265 Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC); 3266 CI = dyn_cast<ConstantInt>(Op2); 3267 } 3268 3269 // We only want to perform this transformation if it will not lead to 3270 // additional code. This is true if either both sides of the select 3271 // fold to a constant (in which case the icmp is replaced with a select 3272 // which will usually simplify) or this is the only user of the 3273 // select (in which case we are trading a select+icmp for a simpler 3274 // select+icmp) or all uses of the select can be replaced based on 3275 // dominance information ("Global cases"). 3276 bool Transform = false; 3277 if (Op1 && Op2) 3278 Transform = true; 3279 else if (Op1 || Op2) { 3280 // Local case 3281 if (LHSI->hasOneUse()) 3282 Transform = true; 3283 // Global cases 3284 else if (CI && !CI->isZero()) 3285 // When Op1 is constant try replacing select with second operand. 3286 // Otherwise Op2 is constant and try replacing select with first 3287 // operand. 3288 Transform = 3289 replacedSelectWithOperand(cast<SelectInst>(LHSI), &I, Op1 ? 2 : 1); 3290 } 3291 if (Transform) { 3292 if (!Op1) 3293 Op1 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC, 3294 I.getName()); 3295 if (!Op2) 3296 Op2 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC, 3297 I.getName()); 3298 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2); 3299 } 3300 break; 3301 } 3302 case Instruction::IntToPtr: 3303 // icmp pred inttoptr(X), null -> icmp pred X, 0 3304 if (RHSC->isNullValue() && 3305 DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType()) 3306 return new ICmpInst( 3307 I.getPredicate(), LHSI->getOperand(0), 3308 Constant::getNullValue(LHSI->getOperand(0)->getType())); 3309 break; 3310 3311 case Instruction::Load: 3312 // Try to optimize things like "A[i] > 4" to index computations. 3313 if (GetElementPtrInst *GEP = 3314 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) { 3315 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 3316 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 3317 !cast<LoadInst>(LHSI)->isVolatile()) 3318 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I)) 3319 return Res; 3320 } 3321 break; 3322 } 3323 3324 return nullptr; 3325 } 3326 3327 /// Some comparisons can be simplified. 3328 /// In this case, we are looking for comparisons that look like 3329 /// a check for a lossy truncation. 3330 /// Folds: 3331 /// icmp SrcPred (x & Mask), x to icmp DstPred x, Mask 3332 /// Where Mask is some pattern that produces all-ones in low bits: 3333 /// (-1 >> y) 3334 /// ((-1 << y) >> y) <- non-canonical, has extra uses 3335 /// ~(-1 << y) 3336 /// ((1 << y) + (-1)) <- non-canonical, has extra uses 3337 /// The Mask can be a constant, too. 3338 /// For some predicates, the operands are commutative. 3339 /// For others, x can only be on a specific side. 3340 static Value *foldICmpWithLowBitMaskedVal(ICmpInst &I, 3341 InstCombiner::BuilderTy &Builder) { 3342 ICmpInst::Predicate SrcPred; 3343 Value *X, *M, *Y; 3344 auto m_VariableMask = m_CombineOr( 3345 m_CombineOr(m_Not(m_Shl(m_AllOnes(), m_Value())), 3346 m_Add(m_Shl(m_One(), m_Value()), m_AllOnes())), 3347 m_CombineOr(m_LShr(m_AllOnes(), m_Value()), 3348 m_LShr(m_Shl(m_AllOnes(), m_Value(Y)), m_Deferred(Y)))); 3349 auto m_Mask = m_CombineOr(m_VariableMask, m_LowBitMask()); 3350 if (!match(&I, m_c_ICmp(SrcPred, 3351 m_c_And(m_CombineAnd(m_Mask, m_Value(M)), m_Value(X)), 3352 m_Deferred(X)))) 3353 return nullptr; 3354 3355 ICmpInst::Predicate DstPred; 3356 switch (SrcPred) { 3357 case ICmpInst::Predicate::ICMP_EQ: 3358 // x & (-1 >> y) == x -> x u<= (-1 >> y) 3359 DstPred = ICmpInst::Predicate::ICMP_ULE; 3360 break; 3361 case ICmpInst::Predicate::ICMP_NE: 3362 // x & (-1 >> y) != x -> x u> (-1 >> y) 3363 DstPred = ICmpInst::Predicate::ICMP_UGT; 3364 break; 3365 case ICmpInst::Predicate::ICMP_ULT: 3366 // x & (-1 >> y) u< x -> x u> (-1 >> y) 3367 // x u> x & (-1 >> y) -> x u> (-1 >> y) 3368 DstPred = ICmpInst::Predicate::ICMP_UGT; 3369 break; 3370 case ICmpInst::Predicate::ICMP_UGE: 3371 // x & (-1 >> y) u>= x -> x u<= (-1 >> y) 3372 // x u<= x & (-1 >> y) -> x u<= (-1 >> y) 3373 DstPred = ICmpInst::Predicate::ICMP_ULE; 3374 break; 3375 case ICmpInst::Predicate::ICMP_SLT: 3376 // x & (-1 >> y) s< x -> x s> (-1 >> y) 3377 // x s> x & (-1 >> y) -> x s> (-1 >> y) 3378 if (!match(M, m_Constant())) // Can not do this fold with non-constant. 3379 return nullptr; 3380 if (!match(M, m_NonNegative())) // Must not have any -1 vector elements. 3381 return nullptr; 3382 DstPred = ICmpInst::Predicate::ICMP_SGT; 3383 break; 3384 case ICmpInst::Predicate::ICMP_SGE: 3385 // x & (-1 >> y) s>= x -> x s<= (-1 >> y) 3386 // x s<= x & (-1 >> y) -> x s<= (-1 >> y) 3387 if (!match(M, m_Constant())) // Can not do this fold with non-constant. 3388 return nullptr; 3389 if (!match(M, m_NonNegative())) // Must not have any -1 vector elements. 3390 return nullptr; 3391 DstPred = ICmpInst::Predicate::ICMP_SLE; 3392 break; 3393 case ICmpInst::Predicate::ICMP_SGT: 3394 case ICmpInst::Predicate::ICMP_SLE: 3395 return nullptr; 3396 case ICmpInst::Predicate::ICMP_UGT: 3397 case ICmpInst::Predicate::ICMP_ULE: 3398 llvm_unreachable("Instsimplify took care of commut. variant"); 3399 break; 3400 default: 3401 llvm_unreachable("All possible folds are handled."); 3402 } 3403 3404 // The mask value may be a vector constant that has undefined elements. But it 3405 // may not be safe to propagate those undefs into the new compare, so replace 3406 // those elements by copying an existing, defined, and safe scalar constant. 3407 Type *OpTy = M->getType(); 3408 auto *VecC = dyn_cast<Constant>(M); 3409 auto *OpVTy = dyn_cast<FixedVectorType>(OpTy); 3410 if (OpVTy && VecC && VecC->containsUndefOrPoisonElement()) { 3411 Constant *SafeReplacementConstant = nullptr; 3412 for (unsigned i = 0, e = OpVTy->getNumElements(); i != e; ++i) { 3413 if (!isa<UndefValue>(VecC->getAggregateElement(i))) { 3414 SafeReplacementConstant = VecC->getAggregateElement(i); 3415 break; 3416 } 3417 } 3418 assert(SafeReplacementConstant && "Failed to find undef replacement"); 3419 M = Constant::replaceUndefsWith(VecC, SafeReplacementConstant); 3420 } 3421 3422 return Builder.CreateICmp(DstPred, X, M); 3423 } 3424 3425 /// Some comparisons can be simplified. 3426 /// In this case, we are looking for comparisons that look like 3427 /// a check for a lossy signed truncation. 3428 /// Folds: (MaskedBits is a constant.) 3429 /// ((%x << MaskedBits) a>> MaskedBits) SrcPred %x 3430 /// Into: 3431 /// (add %x, (1 << (KeptBits-1))) DstPred (1 << KeptBits) 3432 /// Where KeptBits = bitwidth(%x) - MaskedBits 3433 static Value * 3434 foldICmpWithTruncSignExtendedVal(ICmpInst &I, 3435 InstCombiner::BuilderTy &Builder) { 3436 ICmpInst::Predicate SrcPred; 3437 Value *X; 3438 const APInt *C0, *C1; // FIXME: non-splats, potentially with undef. 3439 // We are ok with 'shl' having multiple uses, but 'ashr' must be one-use. 3440 if (!match(&I, m_c_ICmp(SrcPred, 3441 m_OneUse(m_AShr(m_Shl(m_Value(X), m_APInt(C0)), 3442 m_APInt(C1))), 3443 m_Deferred(X)))) 3444 return nullptr; 3445 3446 // Potential handling of non-splats: for each element: 3447 // * if both are undef, replace with constant 0. 3448 // Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0. 3449 // * if both are not undef, and are different, bailout. 3450 // * else, only one is undef, then pick the non-undef one. 3451 3452 // The shift amount must be equal. 3453 if (*C0 != *C1) 3454 return nullptr; 3455 const APInt &MaskedBits = *C0; 3456 assert(MaskedBits != 0 && "shift by zero should be folded away already."); 3457 3458 ICmpInst::Predicate DstPred; 3459 switch (SrcPred) { 3460 case ICmpInst::Predicate::ICMP_EQ: 3461 // ((%x << MaskedBits) a>> MaskedBits) == %x 3462 // => 3463 // (add %x, (1 << (KeptBits-1))) u< (1 << KeptBits) 3464 DstPred = ICmpInst::Predicate::ICMP_ULT; 3465 break; 3466 case ICmpInst::Predicate::ICMP_NE: 3467 // ((%x << MaskedBits) a>> MaskedBits) != %x 3468 // => 3469 // (add %x, (1 << (KeptBits-1))) u>= (1 << KeptBits) 3470 DstPred = ICmpInst::Predicate::ICMP_UGE; 3471 break; 3472 // FIXME: are more folds possible? 3473 default: 3474 return nullptr; 3475 } 3476 3477 auto *XType = X->getType(); 3478 const unsigned XBitWidth = XType->getScalarSizeInBits(); 3479 const APInt BitWidth = APInt(XBitWidth, XBitWidth); 3480 assert(BitWidth.ugt(MaskedBits) && "shifts should leave some bits untouched"); 3481 3482 // KeptBits = bitwidth(%x) - MaskedBits 3483 const APInt KeptBits = BitWidth - MaskedBits; 3484 assert(KeptBits.ugt(0) && KeptBits.ult(BitWidth) && "unreachable"); 3485 // ICmpCst = (1 << KeptBits) 3486 const APInt ICmpCst = APInt(XBitWidth, 1).shl(KeptBits); 3487 assert(ICmpCst.isPowerOf2()); 3488 // AddCst = (1 << (KeptBits-1)) 3489 const APInt AddCst = ICmpCst.lshr(1); 3490 assert(AddCst.ult(ICmpCst) && AddCst.isPowerOf2()); 3491 3492 // T0 = add %x, AddCst 3493 Value *T0 = Builder.CreateAdd(X, ConstantInt::get(XType, AddCst)); 3494 // T1 = T0 DstPred ICmpCst 3495 Value *T1 = Builder.CreateICmp(DstPred, T0, ConstantInt::get(XType, ICmpCst)); 3496 3497 return T1; 3498 } 3499 3500 // Given pattern: 3501 // icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0 3502 // we should move shifts to the same hand of 'and', i.e. rewrite as 3503 // icmp eq/ne (and (x shift (Q+K)), y), 0 iff (Q+K) u< bitwidth(x) 3504 // We are only interested in opposite logical shifts here. 3505 // One of the shifts can be truncated. 3506 // If we can, we want to end up creating 'lshr' shift. 3507 static Value * 3508 foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ, 3509 InstCombiner::BuilderTy &Builder) { 3510 if (!I.isEquality() || !match(I.getOperand(1), m_Zero()) || 3511 !I.getOperand(0)->hasOneUse()) 3512 return nullptr; 3513 3514 auto m_AnyLogicalShift = m_LogicalShift(m_Value(), m_Value()); 3515 3516 // Look for an 'and' of two logical shifts, one of which may be truncated. 3517 // We use m_TruncOrSelf() on the RHS to correctly handle commutative case. 3518 Instruction *XShift, *MaybeTruncation, *YShift; 3519 if (!match( 3520 I.getOperand(0), 3521 m_c_And(m_CombineAnd(m_AnyLogicalShift, m_Instruction(XShift)), 3522 m_CombineAnd(m_TruncOrSelf(m_CombineAnd( 3523 m_AnyLogicalShift, m_Instruction(YShift))), 3524 m_Instruction(MaybeTruncation))))) 3525 return nullptr; 3526 3527 // We potentially looked past 'trunc', but only when matching YShift, 3528 // therefore YShift must have the widest type. 3529 Instruction *WidestShift = YShift; 3530 // Therefore XShift must have the shallowest type. 3531 // Or they both have identical types if there was no truncation. 3532 Instruction *NarrowestShift = XShift; 3533 3534 Type *WidestTy = WidestShift->getType(); 3535 Type *NarrowestTy = NarrowestShift->getType(); 3536 assert(NarrowestTy == I.getOperand(0)->getType() && 3537 "We did not look past any shifts while matching XShift though."); 3538 bool HadTrunc = WidestTy != I.getOperand(0)->getType(); 3539 3540 // If YShift is a 'lshr', swap the shifts around. 3541 if (match(YShift, m_LShr(m_Value(), m_Value()))) 3542 std::swap(XShift, YShift); 3543 3544 // The shifts must be in opposite directions. 3545 auto XShiftOpcode = XShift->getOpcode(); 3546 if (XShiftOpcode == YShift->getOpcode()) 3547 return nullptr; // Do not care about same-direction shifts here. 3548 3549 Value *X, *XShAmt, *Y, *YShAmt; 3550 match(XShift, m_BinOp(m_Value(X), m_ZExtOrSelf(m_Value(XShAmt)))); 3551 match(YShift, m_BinOp(m_Value(Y), m_ZExtOrSelf(m_Value(YShAmt)))); 3552 3553 // If one of the values being shifted is a constant, then we will end with 3554 // and+icmp, and [zext+]shift instrs will be constant-folded. If they are not, 3555 // however, we will need to ensure that we won't increase instruction count. 3556 if (!isa<Constant>(X) && !isa<Constant>(Y)) { 3557 // At least one of the hands of the 'and' should be one-use shift. 3558 if (!match(I.getOperand(0), 3559 m_c_And(m_OneUse(m_AnyLogicalShift), m_Value()))) 3560 return nullptr; 3561 if (HadTrunc) { 3562 // Due to the 'trunc', we will need to widen X. For that either the old 3563 // 'trunc' or the shift amt in the non-truncated shift should be one-use. 3564 if (!MaybeTruncation->hasOneUse() && 3565 !NarrowestShift->getOperand(1)->hasOneUse()) 3566 return nullptr; 3567 } 3568 } 3569 3570 // We have two shift amounts from two different shifts. The types of those 3571 // shift amounts may not match. If that's the case let's bailout now. 3572 if (XShAmt->getType() != YShAmt->getType()) 3573 return nullptr; 3574 3575 // As input, we have the following pattern: 3576 // icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0 3577 // We want to rewrite that as: 3578 // icmp eq/ne (and (x shift (Q+K)), y), 0 iff (Q+K) u< bitwidth(x) 3579 // While we know that originally (Q+K) would not overflow 3580 // (because 2 * (N-1) u<= iN -1), we have looked past extensions of 3581 // shift amounts. so it may now overflow in smaller bitwidth. 3582 // To ensure that does not happen, we need to ensure that the total maximal 3583 // shift amount is still representable in that smaller bit width. 3584 unsigned MaximalPossibleTotalShiftAmount = 3585 (WidestTy->getScalarSizeInBits() - 1) + 3586 (NarrowestTy->getScalarSizeInBits() - 1); 3587 APInt MaximalRepresentableShiftAmount = 3588 APInt::getAllOnesValue(XShAmt->getType()->getScalarSizeInBits()); 3589 if (MaximalRepresentableShiftAmount.ult(MaximalPossibleTotalShiftAmount)) 3590 return nullptr; 3591 3592 // Can we fold (XShAmt+YShAmt) ? 3593 auto *NewShAmt = dyn_cast_or_null<Constant>( 3594 SimplifyAddInst(XShAmt, YShAmt, /*isNSW=*/false, 3595 /*isNUW=*/false, SQ.getWithInstruction(&I))); 3596 if (!NewShAmt) 3597 return nullptr; 3598 NewShAmt = ConstantExpr::getZExtOrBitCast(NewShAmt, WidestTy); 3599 unsigned WidestBitWidth = WidestTy->getScalarSizeInBits(); 3600 3601 // Is the new shift amount smaller than the bit width? 3602 // FIXME: could also rely on ConstantRange. 3603 if (!match(NewShAmt, 3604 m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_ULT, 3605 APInt(WidestBitWidth, WidestBitWidth)))) 3606 return nullptr; 3607 3608 // An extra legality check is needed if we had trunc-of-lshr. 3609 if (HadTrunc && match(WidestShift, m_LShr(m_Value(), m_Value()))) { 3610 auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ, 3611 WidestShift]() { 3612 // It isn't obvious whether it's worth it to analyze non-constants here. 3613 // Also, let's basically give up on non-splat cases, pessimizing vectors. 3614 // If *any* of these preconditions matches we can perform the fold. 3615 Constant *NewShAmtSplat = NewShAmt->getType()->isVectorTy() 3616 ? NewShAmt->getSplatValue() 3617 : NewShAmt; 3618 // If it's edge-case shift (by 0 or by WidestBitWidth-1) we can fold. 3619 if (NewShAmtSplat && 3620 (NewShAmtSplat->isNullValue() || 3621 NewShAmtSplat->getUniqueInteger() == WidestBitWidth - 1)) 3622 return true; 3623 // We consider *min* leading zeros so a single outlier 3624 // blocks the transform as opposed to allowing it. 3625 if (auto *C = dyn_cast<Constant>(NarrowestShift->getOperand(0))) { 3626 KnownBits Known = computeKnownBits(C, SQ.DL); 3627 unsigned MinLeadZero = Known.countMinLeadingZeros(); 3628 // If the value being shifted has at most lowest bit set we can fold. 3629 unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero; 3630 if (MaxActiveBits <= 1) 3631 return true; 3632 // Precondition: NewShAmt u<= countLeadingZeros(C) 3633 if (NewShAmtSplat && NewShAmtSplat->getUniqueInteger().ule(MinLeadZero)) 3634 return true; 3635 } 3636 if (auto *C = dyn_cast<Constant>(WidestShift->getOperand(0))) { 3637 KnownBits Known = computeKnownBits(C, SQ.DL); 3638 unsigned MinLeadZero = Known.countMinLeadingZeros(); 3639 // If the value being shifted has at most lowest bit set we can fold. 3640 unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero; 3641 if (MaxActiveBits <= 1) 3642 return true; 3643 // Precondition: ((WidestBitWidth-1)-NewShAmt) u<= countLeadingZeros(C) 3644 if (NewShAmtSplat) { 3645 APInt AdjNewShAmt = 3646 (WidestBitWidth - 1) - NewShAmtSplat->getUniqueInteger(); 3647 if (AdjNewShAmt.ule(MinLeadZero)) 3648 return true; 3649 } 3650 } 3651 return false; // Can't tell if it's ok. 3652 }; 3653 if (!CanFold()) 3654 return nullptr; 3655 } 3656 3657 // All good, we can do this fold. 3658 X = Builder.CreateZExt(X, WidestTy); 3659 Y = Builder.CreateZExt(Y, WidestTy); 3660 // The shift is the same that was for X. 3661 Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr 3662 ? Builder.CreateLShr(X, NewShAmt) 3663 : Builder.CreateShl(X, NewShAmt); 3664 Value *T1 = Builder.CreateAnd(T0, Y); 3665 return Builder.CreateICmp(I.getPredicate(), T1, 3666 Constant::getNullValue(WidestTy)); 3667 } 3668 3669 /// Fold 3670 /// (-1 u/ x) u< y 3671 /// ((x * y) u/ x) != y 3672 /// to 3673 /// @llvm.umul.with.overflow(x, y) plus extraction of overflow bit 3674 /// Note that the comparison is commutative, while inverted (u>=, ==) predicate 3675 /// will mean that we are looking for the opposite answer. 3676 Value *InstCombinerImpl::foldUnsignedMultiplicationOverflowCheck(ICmpInst &I) { 3677 ICmpInst::Predicate Pred; 3678 Value *X, *Y; 3679 Instruction *Mul; 3680 bool NeedNegation; 3681 // Look for: (-1 u/ x) u</u>= y 3682 if (!I.isEquality() && 3683 match(&I, m_c_ICmp(Pred, m_OneUse(m_UDiv(m_AllOnes(), m_Value(X))), 3684 m_Value(Y)))) { 3685 Mul = nullptr; 3686 3687 // Are we checking that overflow does not happen, or does happen? 3688 switch (Pred) { 3689 case ICmpInst::Predicate::ICMP_ULT: 3690 NeedNegation = false; 3691 break; // OK 3692 case ICmpInst::Predicate::ICMP_UGE: 3693 NeedNegation = true; 3694 break; // OK 3695 default: 3696 return nullptr; // Wrong predicate. 3697 } 3698 } else // Look for: ((x * y) u/ x) !=/== y 3699 if (I.isEquality() && 3700 match(&I, m_c_ICmp(Pred, m_Value(Y), 3701 m_OneUse(m_UDiv(m_CombineAnd(m_c_Mul(m_Deferred(Y), 3702 m_Value(X)), 3703 m_Instruction(Mul)), 3704 m_Deferred(X)))))) { 3705 NeedNegation = Pred == ICmpInst::Predicate::ICMP_EQ; 3706 } else 3707 return nullptr; 3708 3709 BuilderTy::InsertPointGuard Guard(Builder); 3710 // If the pattern included (x * y), we'll want to insert new instructions 3711 // right before that original multiplication so that we can replace it. 3712 bool MulHadOtherUses = Mul && !Mul->hasOneUse(); 3713 if (MulHadOtherUses) 3714 Builder.SetInsertPoint(Mul); 3715 3716 Function *F = Intrinsic::getDeclaration( 3717 I.getModule(), Intrinsic::umul_with_overflow, X->getType()); 3718 CallInst *Call = Builder.CreateCall(F, {X, Y}, "umul"); 3719 3720 // If the multiplication was used elsewhere, to ensure that we don't leave 3721 // "duplicate" instructions, replace uses of that original multiplication 3722 // with the multiplication result from the with.overflow intrinsic. 3723 if (MulHadOtherUses) 3724 replaceInstUsesWith(*Mul, Builder.CreateExtractValue(Call, 0, "umul.val")); 3725 3726 Value *Res = Builder.CreateExtractValue(Call, 1, "umul.ov"); 3727 if (NeedNegation) // This technically increases instruction count. 3728 Res = Builder.CreateNot(Res, "umul.not.ov"); 3729 3730 // If we replaced the mul, erase it. Do this after all uses of Builder, 3731 // as the mul is used as insertion point. 3732 if (MulHadOtherUses) 3733 eraseInstFromFunction(*Mul); 3734 3735 return Res; 3736 } 3737 3738 static Instruction *foldICmpXNegX(ICmpInst &I) { 3739 CmpInst::Predicate Pred; 3740 Value *X; 3741 if (!match(&I, m_c_ICmp(Pred, m_NSWNeg(m_Value(X)), m_Deferred(X)))) 3742 return nullptr; 3743 3744 if (ICmpInst::isSigned(Pred)) 3745 Pred = ICmpInst::getSwappedPredicate(Pred); 3746 else if (ICmpInst::isUnsigned(Pred)) 3747 Pred = ICmpInst::getSignedPredicate(Pred); 3748 // else for equality-comparisons just keep the predicate. 3749 3750 return ICmpInst::Create(Instruction::ICmp, Pred, X, 3751 Constant::getNullValue(X->getType()), I.getName()); 3752 } 3753 3754 /// Try to fold icmp (binop), X or icmp X, (binop). 3755 /// TODO: A large part of this logic is duplicated in InstSimplify's 3756 /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code 3757 /// duplication. 3758 Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I, 3759 const SimplifyQuery &SQ) { 3760 const SimplifyQuery Q = SQ.getWithInstruction(&I); 3761 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 3762 3763 // Special logic for binary operators. 3764 BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0); 3765 BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1); 3766 if (!BO0 && !BO1) 3767 return nullptr; 3768 3769 if (Instruction *NewICmp = foldICmpXNegX(I)) 3770 return NewICmp; 3771 3772 const CmpInst::Predicate Pred = I.getPredicate(); 3773 Value *X; 3774 3775 // Convert add-with-unsigned-overflow comparisons into a 'not' with compare. 3776 // (Op1 + X) u</u>= Op1 --> ~Op1 u</u>= X 3777 if (match(Op0, m_OneUse(m_c_Add(m_Specific(Op1), m_Value(X)))) && 3778 (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) 3779 return new ICmpInst(Pred, Builder.CreateNot(Op1), X); 3780 // Op0 u>/u<= (Op0 + X) --> X u>/u<= ~Op0 3781 if (match(Op1, m_OneUse(m_c_Add(m_Specific(Op0), m_Value(X)))) && 3782 (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE)) 3783 return new ICmpInst(Pred, X, Builder.CreateNot(Op0)); 3784 3785 bool NoOp0WrapProblem = false, NoOp1WrapProblem = false; 3786 if (BO0 && isa<OverflowingBinaryOperator>(BO0)) 3787 NoOp0WrapProblem = 3788 ICmpInst::isEquality(Pred) || 3789 (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) || 3790 (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap()); 3791 if (BO1 && isa<OverflowingBinaryOperator>(BO1)) 3792 NoOp1WrapProblem = 3793 ICmpInst::isEquality(Pred) || 3794 (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) || 3795 (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap()); 3796 3797 // Analyze the case when either Op0 or Op1 is an add instruction. 3798 // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null). 3799 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr; 3800 if (BO0 && BO0->getOpcode() == Instruction::Add) { 3801 A = BO0->getOperand(0); 3802 B = BO0->getOperand(1); 3803 } 3804 if (BO1 && BO1->getOpcode() == Instruction::Add) { 3805 C = BO1->getOperand(0); 3806 D = BO1->getOperand(1); 3807 } 3808 3809 // icmp (A+B), A -> icmp B, 0 for equalities or if there is no overflow. 3810 // icmp (A+B), B -> icmp A, 0 for equalities or if there is no overflow. 3811 if ((A == Op1 || B == Op1) && NoOp0WrapProblem) 3812 return new ICmpInst(Pred, A == Op1 ? B : A, 3813 Constant::getNullValue(Op1->getType())); 3814 3815 // icmp C, (C+D) -> icmp 0, D for equalities or if there is no overflow. 3816 // icmp D, (C+D) -> icmp 0, C for equalities or if there is no overflow. 3817 if ((C == Op0 || D == Op0) && NoOp1WrapProblem) 3818 return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()), 3819 C == Op0 ? D : C); 3820 3821 // icmp (A+B), (A+D) -> icmp B, D for equalities or if there is no overflow. 3822 if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem && 3823 NoOp1WrapProblem) { 3824 // Determine Y and Z in the form icmp (X+Y), (X+Z). 3825 Value *Y, *Z; 3826 if (A == C) { 3827 // C + B == C + D -> B == D 3828 Y = B; 3829 Z = D; 3830 } else if (A == D) { 3831 // D + B == C + D -> B == C 3832 Y = B; 3833 Z = C; 3834 } else if (B == C) { 3835 // A + C == C + D -> A == D 3836 Y = A; 3837 Z = D; 3838 } else { 3839 assert(B == D); 3840 // A + D == C + D -> A == C 3841 Y = A; 3842 Z = C; 3843 } 3844 return new ICmpInst(Pred, Y, Z); 3845 } 3846 3847 // icmp slt (A + -1), Op1 -> icmp sle A, Op1 3848 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT && 3849 match(B, m_AllOnes())) 3850 return new ICmpInst(CmpInst::ICMP_SLE, A, Op1); 3851 3852 // icmp sge (A + -1), Op1 -> icmp sgt A, Op1 3853 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE && 3854 match(B, m_AllOnes())) 3855 return new ICmpInst(CmpInst::ICMP_SGT, A, Op1); 3856 3857 // icmp sle (A + 1), Op1 -> icmp slt A, Op1 3858 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE && match(B, m_One())) 3859 return new ICmpInst(CmpInst::ICMP_SLT, A, Op1); 3860 3861 // icmp sgt (A + 1), Op1 -> icmp sge A, Op1 3862 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT && match(B, m_One())) 3863 return new ICmpInst(CmpInst::ICMP_SGE, A, Op1); 3864 3865 // icmp sgt Op0, (C + -1) -> icmp sge Op0, C 3866 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGT && 3867 match(D, m_AllOnes())) 3868 return new ICmpInst(CmpInst::ICMP_SGE, Op0, C); 3869 3870 // icmp sle Op0, (C + -1) -> icmp slt Op0, C 3871 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLE && 3872 match(D, m_AllOnes())) 3873 return new ICmpInst(CmpInst::ICMP_SLT, Op0, C); 3874 3875 // icmp sge Op0, (C + 1) -> icmp sgt Op0, C 3876 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGE && match(D, m_One())) 3877 return new ICmpInst(CmpInst::ICMP_SGT, Op0, C); 3878 3879 // icmp slt Op0, (C + 1) -> icmp sle Op0, C 3880 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLT && match(D, m_One())) 3881 return new ICmpInst(CmpInst::ICMP_SLE, Op0, C); 3882 3883 // TODO: The subtraction-related identities shown below also hold, but 3884 // canonicalization from (X -nuw 1) to (X + -1) means that the combinations 3885 // wouldn't happen even if they were implemented. 3886 // 3887 // icmp ult (A - 1), Op1 -> icmp ule A, Op1 3888 // icmp uge (A - 1), Op1 -> icmp ugt A, Op1 3889 // icmp ugt Op0, (C - 1) -> icmp uge Op0, C 3890 // icmp ule Op0, (C - 1) -> icmp ult Op0, C 3891 3892 // icmp ule (A + 1), Op0 -> icmp ult A, Op1 3893 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_ULE && match(B, m_One())) 3894 return new ICmpInst(CmpInst::ICMP_ULT, A, Op1); 3895 3896 // icmp ugt (A + 1), Op0 -> icmp uge A, Op1 3897 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_UGT && match(B, m_One())) 3898 return new ICmpInst(CmpInst::ICMP_UGE, A, Op1); 3899 3900 // icmp uge Op0, (C + 1) -> icmp ugt Op0, C 3901 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_UGE && match(D, m_One())) 3902 return new ICmpInst(CmpInst::ICMP_UGT, Op0, C); 3903 3904 // icmp ult Op0, (C + 1) -> icmp ule Op0, C 3905 if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_ULT && match(D, m_One())) 3906 return new ICmpInst(CmpInst::ICMP_ULE, Op0, C); 3907 3908 // if C1 has greater magnitude than C2: 3909 // icmp (A + C1), (C + C2) -> icmp (A + C3), C 3910 // s.t. C3 = C1 - C2 3911 // 3912 // if C2 has greater magnitude than C1: 3913 // icmp (A + C1), (C + C2) -> icmp A, (C + C3) 3914 // s.t. C3 = C2 - C1 3915 if (A && C && NoOp0WrapProblem && NoOp1WrapProblem && 3916 (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned()) 3917 if (ConstantInt *C1 = dyn_cast<ConstantInt>(B)) 3918 if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) { 3919 const APInt &AP1 = C1->getValue(); 3920 const APInt &AP2 = C2->getValue(); 3921 if (AP1.isNegative() == AP2.isNegative()) { 3922 APInt AP1Abs = C1->getValue().abs(); 3923 APInt AP2Abs = C2->getValue().abs(); 3924 if (AP1Abs.uge(AP2Abs)) { 3925 ConstantInt *C3 = Builder.getInt(AP1 - AP2); 3926 bool HasNUW = BO0->hasNoUnsignedWrap() && C3->getValue().ule(AP1); 3927 bool HasNSW = BO0->hasNoSignedWrap(); 3928 Value *NewAdd = Builder.CreateAdd(A, C3, "", HasNUW, HasNSW); 3929 return new ICmpInst(Pred, NewAdd, C); 3930 } else { 3931 ConstantInt *C3 = Builder.getInt(AP2 - AP1); 3932 bool HasNUW = BO1->hasNoUnsignedWrap() && C3->getValue().ule(AP2); 3933 bool HasNSW = BO1->hasNoSignedWrap(); 3934 Value *NewAdd = Builder.CreateAdd(C, C3, "", HasNUW, HasNSW); 3935 return new ICmpInst(Pred, A, NewAdd); 3936 } 3937 } 3938 } 3939 3940 // Analyze the case when either Op0 or Op1 is a sub instruction. 3941 // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null). 3942 A = nullptr; 3943 B = nullptr; 3944 C = nullptr; 3945 D = nullptr; 3946 if (BO0 && BO0->getOpcode() == Instruction::Sub) { 3947 A = BO0->getOperand(0); 3948 B = BO0->getOperand(1); 3949 } 3950 if (BO1 && BO1->getOpcode() == Instruction::Sub) { 3951 C = BO1->getOperand(0); 3952 D = BO1->getOperand(1); 3953 } 3954 3955 // icmp (A-B), A -> icmp 0, B for equalities or if there is no overflow. 3956 if (A == Op1 && NoOp0WrapProblem) 3957 return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B); 3958 // icmp C, (C-D) -> icmp D, 0 for equalities or if there is no overflow. 3959 if (C == Op0 && NoOp1WrapProblem) 3960 return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType())); 3961 3962 // Convert sub-with-unsigned-overflow comparisons into a comparison of args. 3963 // (A - B) u>/u<= A --> B u>/u<= A 3964 if (A == Op1 && (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE)) 3965 return new ICmpInst(Pred, B, A); 3966 // C u</u>= (C - D) --> C u</u>= D 3967 if (C == Op0 && (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) 3968 return new ICmpInst(Pred, C, D); 3969 // (A - B) u>=/u< A --> B u>/u<= A iff B != 0 3970 if (A == Op1 && (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) && 3971 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) 3972 return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), B, A); 3973 // C u<=/u> (C - D) --> C u</u>= D iff B != 0 3974 if (C == Op0 && (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) && 3975 isKnownNonZero(D, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) 3976 return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), C, D); 3977 3978 // icmp (A-B), (C-B) -> icmp A, C for equalities or if there is no overflow. 3979 if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem) 3980 return new ICmpInst(Pred, A, C); 3981 3982 // icmp (A-B), (A-D) -> icmp D, B for equalities or if there is no overflow. 3983 if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem) 3984 return new ICmpInst(Pred, D, B); 3985 3986 // icmp (0-X) < cst --> x > -cst 3987 if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) { 3988 Value *X; 3989 if (match(BO0, m_Neg(m_Value(X)))) 3990 if (Constant *RHSC = dyn_cast<Constant>(Op1)) 3991 if (RHSC->isNotMinSignedValue()) 3992 return new ICmpInst(I.getSwappedPredicate(), X, 3993 ConstantExpr::getNeg(RHSC)); 3994 } 3995 3996 { 3997 // Try to remove shared constant multiplier from equality comparison: 3998 // X * C == Y * C (with no overflowing/aliasing) --> X == Y 3999 Value *X, *Y; 4000 const APInt *C; 4001 if (match(Op0, m_Mul(m_Value(X), m_APInt(C))) && *C != 0 && 4002 match(Op1, m_Mul(m_Value(Y), m_SpecificInt(*C))) && I.isEquality()) 4003 if (!C->countTrailingZeros() || 4004 (BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap()) || 4005 (BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap())) 4006 return new ICmpInst(Pred, X, Y); 4007 } 4008 4009 BinaryOperator *SRem = nullptr; 4010 // icmp (srem X, Y), Y 4011 if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1)) 4012 SRem = BO0; 4013 // icmp Y, (srem X, Y) 4014 else if (BO1 && BO1->getOpcode() == Instruction::SRem && 4015 Op0 == BO1->getOperand(1)) 4016 SRem = BO1; 4017 if (SRem) { 4018 // We don't check hasOneUse to avoid increasing register pressure because 4019 // the value we use is the same value this instruction was already using. 4020 switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) { 4021 default: 4022 break; 4023 case ICmpInst::ICMP_EQ: 4024 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 4025 case ICmpInst::ICMP_NE: 4026 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 4027 case ICmpInst::ICMP_SGT: 4028 case ICmpInst::ICMP_SGE: 4029 return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1), 4030 Constant::getAllOnesValue(SRem->getType())); 4031 case ICmpInst::ICMP_SLT: 4032 case ICmpInst::ICMP_SLE: 4033 return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1), 4034 Constant::getNullValue(SRem->getType())); 4035 } 4036 } 4037 4038 if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && BO0->hasOneUse() && 4039 BO1->hasOneUse() && BO0->getOperand(1) == BO1->getOperand(1)) { 4040 switch (BO0->getOpcode()) { 4041 default: 4042 break; 4043 case Instruction::Add: 4044 case Instruction::Sub: 4045 case Instruction::Xor: { 4046 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b 4047 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 4048 4049 const APInt *C; 4050 if (match(BO0->getOperand(1), m_APInt(C))) { 4051 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b 4052 if (C->isSignMask()) { 4053 ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate(); 4054 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0)); 4055 } 4056 4057 // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b 4058 if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) { 4059 ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate(); 4060 NewPred = I.getSwappedPredicate(NewPred); 4061 return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0)); 4062 } 4063 } 4064 break; 4065 } 4066 case Instruction::Mul: { 4067 if (!I.isEquality()) 4068 break; 4069 4070 const APInt *C; 4071 if (match(BO0->getOperand(1), m_APInt(C)) && !C->isNullValue() && 4072 !C->isOneValue()) { 4073 // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask) 4074 // Mask = -1 >> count-trailing-zeros(C). 4075 if (unsigned TZs = C->countTrailingZeros()) { 4076 Constant *Mask = ConstantInt::get( 4077 BO0->getType(), 4078 APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs)); 4079 Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask); 4080 Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask); 4081 return new ICmpInst(Pred, And1, And2); 4082 } 4083 } 4084 break; 4085 } 4086 case Instruction::UDiv: 4087 case Instruction::LShr: 4088 if (I.isSigned() || !BO0->isExact() || !BO1->isExact()) 4089 break; 4090 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 4091 4092 case Instruction::SDiv: 4093 if (!I.isEquality() || !BO0->isExact() || !BO1->isExact()) 4094 break; 4095 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 4096 4097 case Instruction::AShr: 4098 if (!BO0->isExact() || !BO1->isExact()) 4099 break; 4100 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 4101 4102 case Instruction::Shl: { 4103 bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap(); 4104 bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap(); 4105 if (!NUW && !NSW) 4106 break; 4107 if (!NSW && I.isSigned()) 4108 break; 4109 return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0)); 4110 } 4111 } 4112 } 4113 4114 if (BO0) { 4115 // Transform A & (L - 1) `ult` L --> L != 0 4116 auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes()); 4117 auto BitwiseAnd = m_c_And(m_Value(), LSubOne); 4118 4119 if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) { 4120 auto *Zero = Constant::getNullValue(BO0->getType()); 4121 return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero); 4122 } 4123 } 4124 4125 if (Value *V = foldUnsignedMultiplicationOverflowCheck(I)) 4126 return replaceInstUsesWith(I, V); 4127 4128 if (Value *V = foldICmpWithLowBitMaskedVal(I, Builder)) 4129 return replaceInstUsesWith(I, V); 4130 4131 if (Value *V = foldICmpWithTruncSignExtendedVal(I, Builder)) 4132 return replaceInstUsesWith(I, V); 4133 4134 if (Value *V = foldShiftIntoShiftInAnotherHandOfAndInICmp(I, SQ, Builder)) 4135 return replaceInstUsesWith(I, V); 4136 4137 return nullptr; 4138 } 4139 4140 /// Fold icmp Pred min|max(X, Y), X. 4141 static Instruction *foldICmpWithMinMax(ICmpInst &Cmp) { 4142 ICmpInst::Predicate Pred = Cmp.getPredicate(); 4143 Value *Op0 = Cmp.getOperand(0); 4144 Value *X = Cmp.getOperand(1); 4145 4146 // Canonicalize minimum or maximum operand to LHS of the icmp. 4147 if (match(X, m_c_SMin(m_Specific(Op0), m_Value())) || 4148 match(X, m_c_SMax(m_Specific(Op0), m_Value())) || 4149 match(X, m_c_UMin(m_Specific(Op0), m_Value())) || 4150 match(X, m_c_UMax(m_Specific(Op0), m_Value()))) { 4151 std::swap(Op0, X); 4152 Pred = Cmp.getSwappedPredicate(); 4153 } 4154 4155 Value *Y; 4156 if (match(Op0, m_c_SMin(m_Specific(X), m_Value(Y)))) { 4157 // smin(X, Y) == X --> X s<= Y 4158 // smin(X, Y) s>= X --> X s<= Y 4159 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SGE) 4160 return new ICmpInst(ICmpInst::ICMP_SLE, X, Y); 4161 4162 // smin(X, Y) != X --> X s> Y 4163 // smin(X, Y) s< X --> X s> Y 4164 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SLT) 4165 return new ICmpInst(ICmpInst::ICMP_SGT, X, Y); 4166 4167 // These cases should be handled in InstSimplify: 4168 // smin(X, Y) s<= X --> true 4169 // smin(X, Y) s> X --> false 4170 return nullptr; 4171 } 4172 4173 if (match(Op0, m_c_SMax(m_Specific(X), m_Value(Y)))) { 4174 // smax(X, Y) == X --> X s>= Y 4175 // smax(X, Y) s<= X --> X s>= Y 4176 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLE) 4177 return new ICmpInst(ICmpInst::ICMP_SGE, X, Y); 4178 4179 // smax(X, Y) != X --> X s< Y 4180 // smax(X, Y) s> X --> X s< Y 4181 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SGT) 4182 return new ICmpInst(ICmpInst::ICMP_SLT, X, Y); 4183 4184 // These cases should be handled in InstSimplify: 4185 // smax(X, Y) s>= X --> true 4186 // smax(X, Y) s< X --> false 4187 return nullptr; 4188 } 4189 4190 if (match(Op0, m_c_UMin(m_Specific(X), m_Value(Y)))) { 4191 // umin(X, Y) == X --> X u<= Y 4192 // umin(X, Y) u>= X --> X u<= Y 4193 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_UGE) 4194 return new ICmpInst(ICmpInst::ICMP_ULE, X, Y); 4195 4196 // umin(X, Y) != X --> X u> Y 4197 // umin(X, Y) u< X --> X u> Y 4198 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT) 4199 return new ICmpInst(ICmpInst::ICMP_UGT, X, Y); 4200 4201 // These cases should be handled in InstSimplify: 4202 // umin(X, Y) u<= X --> true 4203 // umin(X, Y) u> X --> false 4204 return nullptr; 4205 } 4206 4207 if (match(Op0, m_c_UMax(m_Specific(X), m_Value(Y)))) { 4208 // umax(X, Y) == X --> X u>= Y 4209 // umax(X, Y) u<= X --> X u>= Y 4210 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_ULE) 4211 return new ICmpInst(ICmpInst::ICMP_UGE, X, Y); 4212 4213 // umax(X, Y) != X --> X u< Y 4214 // umax(X, Y) u> X --> X u< Y 4215 if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_UGT) 4216 return new ICmpInst(ICmpInst::ICMP_ULT, X, Y); 4217 4218 // These cases should be handled in InstSimplify: 4219 // umax(X, Y) u>= X --> true 4220 // umax(X, Y) u< X --> false 4221 return nullptr; 4222 } 4223 4224 return nullptr; 4225 } 4226 4227 Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) { 4228 if (!I.isEquality()) 4229 return nullptr; 4230 4231 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 4232 const CmpInst::Predicate Pred = I.getPredicate(); 4233 Value *A, *B, *C, *D; 4234 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) { 4235 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0 4236 Value *OtherVal = A == Op1 ? B : A; 4237 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType())); 4238 } 4239 4240 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) { 4241 // A^c1 == C^c2 --> A == C^(c1^c2) 4242 ConstantInt *C1, *C2; 4243 if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) && 4244 Op1->hasOneUse()) { 4245 Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue()); 4246 Value *Xor = Builder.CreateXor(C, NC); 4247 return new ICmpInst(Pred, A, Xor); 4248 } 4249 4250 // A^B == A^D -> B == D 4251 if (A == C) 4252 return new ICmpInst(Pred, B, D); 4253 if (A == D) 4254 return new ICmpInst(Pred, B, C); 4255 if (B == C) 4256 return new ICmpInst(Pred, A, D); 4257 if (B == D) 4258 return new ICmpInst(Pred, A, C); 4259 } 4260 } 4261 4262 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) { 4263 // A == (A^B) -> B == 0 4264 Value *OtherVal = A == Op0 ? B : A; 4265 return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType())); 4266 } 4267 4268 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0 4269 if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) && 4270 match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) { 4271 Value *X = nullptr, *Y = nullptr, *Z = nullptr; 4272 4273 if (A == C) { 4274 X = B; 4275 Y = D; 4276 Z = A; 4277 } else if (A == D) { 4278 X = B; 4279 Y = C; 4280 Z = A; 4281 } else if (B == C) { 4282 X = A; 4283 Y = D; 4284 Z = B; 4285 } else if (B == D) { 4286 X = A; 4287 Y = C; 4288 Z = B; 4289 } 4290 4291 if (X) { // Build (X^Y) & Z 4292 Op1 = Builder.CreateXor(X, Y); 4293 Op1 = Builder.CreateAnd(Op1, Z); 4294 return new ICmpInst(Pred, Op1, Constant::getNullValue(Op1->getType())); 4295 } 4296 } 4297 4298 // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B) 4299 // and (B & (1<<X)-1) == (zext A) --> A == (trunc B) 4300 ConstantInt *Cst1; 4301 if ((Op0->hasOneUse() && match(Op0, m_ZExt(m_Value(A))) && 4302 match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) || 4303 (Op1->hasOneUse() && match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) && 4304 match(Op1, m_ZExt(m_Value(A))))) { 4305 APInt Pow2 = Cst1->getValue() + 1; 4306 if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) && 4307 Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth()) 4308 return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType())); 4309 } 4310 4311 // (A >> C) == (B >> C) --> (A^B) u< (1 << C) 4312 // For lshr and ashr pairs. 4313 if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_ConstantInt(Cst1)))) && 4314 match(Op1, m_OneUse(m_LShr(m_Value(B), m_Specific(Cst1))))) || 4315 (match(Op0, m_OneUse(m_AShr(m_Value(A), m_ConstantInt(Cst1)))) && 4316 match(Op1, m_OneUse(m_AShr(m_Value(B), m_Specific(Cst1)))))) { 4317 unsigned TypeBits = Cst1->getBitWidth(); 4318 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits); 4319 if (ShAmt < TypeBits && ShAmt != 0) { 4320 ICmpInst::Predicate NewPred = 4321 Pred == ICmpInst::ICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT; 4322 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted"); 4323 APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt); 4324 return new ICmpInst(NewPred, Xor, Builder.getInt(CmpVal)); 4325 } 4326 } 4327 4328 // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0 4329 if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) && 4330 match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) { 4331 unsigned TypeBits = Cst1->getBitWidth(); 4332 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits); 4333 if (ShAmt < TypeBits && ShAmt != 0) { 4334 Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted"); 4335 APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt); 4336 Value *And = Builder.CreateAnd(Xor, Builder.getInt(AndVal), 4337 I.getName() + ".mask"); 4338 return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType())); 4339 } 4340 } 4341 4342 // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to 4343 // "icmp (and X, mask), cst" 4344 uint64_t ShAmt = 0; 4345 if (Op0->hasOneUse() && 4346 match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) && 4347 match(Op1, m_ConstantInt(Cst1)) && 4348 // Only do this when A has multiple uses. This is most important to do 4349 // when it exposes other optimizations. 4350 !A->hasOneUse()) { 4351 unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits(); 4352 4353 if (ShAmt < ASize) { 4354 APInt MaskV = 4355 APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits()); 4356 MaskV <<= ShAmt; 4357 4358 APInt CmpV = Cst1->getValue().zext(ASize); 4359 CmpV <<= ShAmt; 4360 4361 Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV)); 4362 return new ICmpInst(Pred, Mask, Builder.getInt(CmpV)); 4363 } 4364 } 4365 4366 // If both operands are byte-swapped or bit-reversed, just compare the 4367 // original values. 4368 // TODO: Move this to a function similar to foldICmpIntrinsicWithConstant() 4369 // and handle more intrinsics. 4370 if ((match(Op0, m_BSwap(m_Value(A))) && match(Op1, m_BSwap(m_Value(B)))) || 4371 (match(Op0, m_BitReverse(m_Value(A))) && 4372 match(Op1, m_BitReverse(m_Value(B))))) 4373 return new ICmpInst(Pred, A, B); 4374 4375 // Canonicalize checking for a power-of-2-or-zero value: 4376 // (A & (A-1)) == 0 --> ctpop(A) < 2 (two commuted variants) 4377 // ((A-1) & A) != 0 --> ctpop(A) > 1 (two commuted variants) 4378 if (!match(Op0, m_OneUse(m_c_And(m_Add(m_Value(A), m_AllOnes()), 4379 m_Deferred(A)))) || 4380 !match(Op1, m_ZeroInt())) 4381 A = nullptr; 4382 4383 // (A & -A) == A --> ctpop(A) < 2 (four commuted variants) 4384 // (-A & A) != A --> ctpop(A) > 1 (four commuted variants) 4385 if (match(Op0, m_OneUse(m_c_And(m_Neg(m_Specific(Op1)), m_Specific(Op1))))) 4386 A = Op1; 4387 else if (match(Op1, 4388 m_OneUse(m_c_And(m_Neg(m_Specific(Op0)), m_Specific(Op0))))) 4389 A = Op0; 4390 4391 if (A) { 4392 Type *Ty = A->getType(); 4393 CallInst *CtPop = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, A); 4394 return Pred == ICmpInst::ICMP_EQ 4395 ? new ICmpInst(ICmpInst::ICMP_ULT, CtPop, ConstantInt::get(Ty, 2)) 4396 : new ICmpInst(ICmpInst::ICMP_UGT, CtPop, ConstantInt::get(Ty, 1)); 4397 } 4398 4399 return nullptr; 4400 } 4401 4402 static Instruction *foldICmpWithZextOrSext(ICmpInst &ICmp, 4403 InstCombiner::BuilderTy &Builder) { 4404 assert(isa<CastInst>(ICmp.getOperand(0)) && "Expected cast for operand 0"); 4405 auto *CastOp0 = cast<CastInst>(ICmp.getOperand(0)); 4406 Value *X; 4407 if (!match(CastOp0, m_ZExtOrSExt(m_Value(X)))) 4408 return nullptr; 4409 4410 bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt; 4411 bool IsSignedCmp = ICmp.isSigned(); 4412 if (auto *CastOp1 = dyn_cast<CastInst>(ICmp.getOperand(1))) { 4413 // If the signedness of the two casts doesn't agree (i.e. one is a sext 4414 // and the other is a zext), then we can't handle this. 4415 // TODO: This is too strict. We can handle some predicates (equality?). 4416 if (CastOp0->getOpcode() != CastOp1->getOpcode()) 4417 return nullptr; 4418 4419 // Not an extension from the same type? 4420 Value *Y = CastOp1->getOperand(0); 4421 Type *XTy = X->getType(), *YTy = Y->getType(); 4422 if (XTy != YTy) { 4423 // One of the casts must have one use because we are creating a new cast. 4424 if (!CastOp0->hasOneUse() && !CastOp1->hasOneUse()) 4425 return nullptr; 4426 // Extend the narrower operand to the type of the wider operand. 4427 if (XTy->getScalarSizeInBits() < YTy->getScalarSizeInBits()) 4428 X = Builder.CreateCast(CastOp0->getOpcode(), X, YTy); 4429 else if (YTy->getScalarSizeInBits() < XTy->getScalarSizeInBits()) 4430 Y = Builder.CreateCast(CastOp0->getOpcode(), Y, XTy); 4431 else 4432 return nullptr; 4433 } 4434 4435 // (zext X) == (zext Y) --> X == Y 4436 // (sext X) == (sext Y) --> X == Y 4437 if (ICmp.isEquality()) 4438 return new ICmpInst(ICmp.getPredicate(), X, Y); 4439 4440 // A signed comparison of sign extended values simplifies into a 4441 // signed comparison. 4442 if (IsSignedCmp && IsSignedExt) 4443 return new ICmpInst(ICmp.getPredicate(), X, Y); 4444 4445 // The other three cases all fold into an unsigned comparison. 4446 return new ICmpInst(ICmp.getUnsignedPredicate(), X, Y); 4447 } 4448 4449 // Below here, we are only folding a compare with constant. 4450 auto *C = dyn_cast<Constant>(ICmp.getOperand(1)); 4451 if (!C) 4452 return nullptr; 4453 4454 // Compute the constant that would happen if we truncated to SrcTy then 4455 // re-extended to DestTy. 4456 Type *SrcTy = CastOp0->getSrcTy(); 4457 Type *DestTy = CastOp0->getDestTy(); 4458 Constant *Res1 = ConstantExpr::getTrunc(C, SrcTy); 4459 Constant *Res2 = ConstantExpr::getCast(CastOp0->getOpcode(), Res1, DestTy); 4460 4461 // If the re-extended constant didn't change... 4462 if (Res2 == C) { 4463 if (ICmp.isEquality()) 4464 return new ICmpInst(ICmp.getPredicate(), X, Res1); 4465 4466 // A signed comparison of sign extended values simplifies into a 4467 // signed comparison. 4468 if (IsSignedExt && IsSignedCmp) 4469 return new ICmpInst(ICmp.getPredicate(), X, Res1); 4470 4471 // The other three cases all fold into an unsigned comparison. 4472 return new ICmpInst(ICmp.getUnsignedPredicate(), X, Res1); 4473 } 4474 4475 // The re-extended constant changed, partly changed (in the case of a vector), 4476 // or could not be determined to be equal (in the case of a constant 4477 // expression), so the constant cannot be represented in the shorter type. 4478 // All the cases that fold to true or false will have already been handled 4479 // by SimplifyICmpInst, so only deal with the tricky case. 4480 if (IsSignedCmp || !IsSignedExt || !isa<ConstantInt>(C)) 4481 return nullptr; 4482 4483 // Is source op positive? 4484 // icmp ult (sext X), C --> icmp sgt X, -1 4485 if (ICmp.getPredicate() == ICmpInst::ICMP_ULT) 4486 return new ICmpInst(CmpInst::ICMP_SGT, X, Constant::getAllOnesValue(SrcTy)); 4487 4488 // Is source op negative? 4489 // icmp ugt (sext X), C --> icmp slt X, 0 4490 assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!"); 4491 return new ICmpInst(CmpInst::ICMP_SLT, X, Constant::getNullValue(SrcTy)); 4492 } 4493 4494 /// Handle icmp (cast x), (cast or constant). 4495 Instruction *InstCombinerImpl::foldICmpWithCastOp(ICmpInst &ICmp) { 4496 auto *CastOp0 = dyn_cast<CastInst>(ICmp.getOperand(0)); 4497 if (!CastOp0) 4498 return nullptr; 4499 if (!isa<Constant>(ICmp.getOperand(1)) && !isa<CastInst>(ICmp.getOperand(1))) 4500 return nullptr; 4501 4502 Value *Op0Src = CastOp0->getOperand(0); 4503 Type *SrcTy = CastOp0->getSrcTy(); 4504 Type *DestTy = CastOp0->getDestTy(); 4505 4506 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the 4507 // integer type is the same size as the pointer type. 4508 auto CompatibleSizes = [&](Type *SrcTy, Type *DestTy) { 4509 if (isa<VectorType>(SrcTy)) { 4510 SrcTy = cast<VectorType>(SrcTy)->getElementType(); 4511 DestTy = cast<VectorType>(DestTy)->getElementType(); 4512 } 4513 return DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth(); 4514 }; 4515 if (CastOp0->getOpcode() == Instruction::PtrToInt && 4516 CompatibleSizes(SrcTy, DestTy)) { 4517 Value *NewOp1 = nullptr; 4518 if (auto *PtrToIntOp1 = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) { 4519 Value *PtrSrc = PtrToIntOp1->getOperand(0); 4520 if (PtrSrc->getType()->getPointerAddressSpace() == 4521 Op0Src->getType()->getPointerAddressSpace()) { 4522 NewOp1 = PtrToIntOp1->getOperand(0); 4523 // If the pointer types don't match, insert a bitcast. 4524 if (Op0Src->getType() != NewOp1->getType()) 4525 NewOp1 = Builder.CreateBitCast(NewOp1, Op0Src->getType()); 4526 } 4527 } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) { 4528 NewOp1 = ConstantExpr::getIntToPtr(RHSC, SrcTy); 4529 } 4530 4531 if (NewOp1) 4532 return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1); 4533 } 4534 4535 return foldICmpWithZextOrSext(ICmp, Builder); 4536 } 4537 4538 static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS) { 4539 switch (BinaryOp) { 4540 default: 4541 llvm_unreachable("Unsupported binary op"); 4542 case Instruction::Add: 4543 case Instruction::Sub: 4544 return match(RHS, m_Zero()); 4545 case Instruction::Mul: 4546 return match(RHS, m_One()); 4547 } 4548 } 4549 4550 OverflowResult 4551 InstCombinerImpl::computeOverflow(Instruction::BinaryOps BinaryOp, 4552 bool IsSigned, Value *LHS, Value *RHS, 4553 Instruction *CxtI) const { 4554 switch (BinaryOp) { 4555 default: 4556 llvm_unreachable("Unsupported binary op"); 4557 case Instruction::Add: 4558 if (IsSigned) 4559 return computeOverflowForSignedAdd(LHS, RHS, CxtI); 4560 else 4561 return computeOverflowForUnsignedAdd(LHS, RHS, CxtI); 4562 case Instruction::Sub: 4563 if (IsSigned) 4564 return computeOverflowForSignedSub(LHS, RHS, CxtI); 4565 else 4566 return computeOverflowForUnsignedSub(LHS, RHS, CxtI); 4567 case Instruction::Mul: 4568 if (IsSigned) 4569 return computeOverflowForSignedMul(LHS, RHS, CxtI); 4570 else 4571 return computeOverflowForUnsignedMul(LHS, RHS, CxtI); 4572 } 4573 } 4574 4575 bool InstCombinerImpl::OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp, 4576 bool IsSigned, Value *LHS, 4577 Value *RHS, Instruction &OrigI, 4578 Value *&Result, 4579 Constant *&Overflow) { 4580 if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS)) 4581 std::swap(LHS, RHS); 4582 4583 // If the overflow check was an add followed by a compare, the insertion point 4584 // may be pointing to the compare. We want to insert the new instructions 4585 // before the add in case there are uses of the add between the add and the 4586 // compare. 4587 Builder.SetInsertPoint(&OrigI); 4588 4589 Type *OverflowTy = Type::getInt1Ty(LHS->getContext()); 4590 if (auto *LHSTy = dyn_cast<VectorType>(LHS->getType())) 4591 OverflowTy = VectorType::get(OverflowTy, LHSTy->getElementCount()); 4592 4593 if (isNeutralValue(BinaryOp, RHS)) { 4594 Result = LHS; 4595 Overflow = ConstantInt::getFalse(OverflowTy); 4596 return true; 4597 } 4598 4599 switch (computeOverflow(BinaryOp, IsSigned, LHS, RHS, &OrigI)) { 4600 case OverflowResult::MayOverflow: 4601 return false; 4602 case OverflowResult::AlwaysOverflowsLow: 4603 case OverflowResult::AlwaysOverflowsHigh: 4604 Result = Builder.CreateBinOp(BinaryOp, LHS, RHS); 4605 Result->takeName(&OrigI); 4606 Overflow = ConstantInt::getTrue(OverflowTy); 4607 return true; 4608 case OverflowResult::NeverOverflows: 4609 Result = Builder.CreateBinOp(BinaryOp, LHS, RHS); 4610 Result->takeName(&OrigI); 4611 Overflow = ConstantInt::getFalse(OverflowTy); 4612 if (auto *Inst = dyn_cast<Instruction>(Result)) { 4613 if (IsSigned) 4614 Inst->setHasNoSignedWrap(); 4615 else 4616 Inst->setHasNoUnsignedWrap(); 4617 } 4618 return true; 4619 } 4620 4621 llvm_unreachable("Unexpected overflow result"); 4622 } 4623 4624 /// Recognize and process idiom involving test for multiplication 4625 /// overflow. 4626 /// 4627 /// The caller has matched a pattern of the form: 4628 /// I = cmp u (mul(zext A, zext B), V 4629 /// The function checks if this is a test for overflow and if so replaces 4630 /// multiplication with call to 'mul.with.overflow' intrinsic. 4631 /// 4632 /// \param I Compare instruction. 4633 /// \param MulVal Result of 'mult' instruction. It is one of the arguments of 4634 /// the compare instruction. Must be of integer type. 4635 /// \param OtherVal The other argument of compare instruction. 4636 /// \returns Instruction which must replace the compare instruction, NULL if no 4637 /// replacement required. 4638 static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal, 4639 Value *OtherVal, 4640 InstCombinerImpl &IC) { 4641 // Don't bother doing this transformation for pointers, don't do it for 4642 // vectors. 4643 if (!isa<IntegerType>(MulVal->getType())) 4644 return nullptr; 4645 4646 assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal); 4647 assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal); 4648 auto *MulInstr = dyn_cast<Instruction>(MulVal); 4649 if (!MulInstr) 4650 return nullptr; 4651 assert(MulInstr->getOpcode() == Instruction::Mul); 4652 4653 auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)), 4654 *RHS = cast<ZExtOperator>(MulInstr->getOperand(1)); 4655 assert(LHS->getOpcode() == Instruction::ZExt); 4656 assert(RHS->getOpcode() == Instruction::ZExt); 4657 Value *A = LHS->getOperand(0), *B = RHS->getOperand(0); 4658 4659 // Calculate type and width of the result produced by mul.with.overflow. 4660 Type *TyA = A->getType(), *TyB = B->getType(); 4661 unsigned WidthA = TyA->getPrimitiveSizeInBits(), 4662 WidthB = TyB->getPrimitiveSizeInBits(); 4663 unsigned MulWidth; 4664 Type *MulType; 4665 if (WidthB > WidthA) { 4666 MulWidth = WidthB; 4667 MulType = TyB; 4668 } else { 4669 MulWidth = WidthA; 4670 MulType = TyA; 4671 } 4672 4673 // In order to replace the original mul with a narrower mul.with.overflow, 4674 // all uses must ignore upper bits of the product. The number of used low 4675 // bits must be not greater than the width of mul.with.overflow. 4676 if (MulVal->hasNUsesOrMore(2)) 4677 for (User *U : MulVal->users()) { 4678 if (U == &I) 4679 continue; 4680 if (TruncInst *TI = dyn_cast<TruncInst>(U)) { 4681 // Check if truncation ignores bits above MulWidth. 4682 unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits(); 4683 if (TruncWidth > MulWidth) 4684 return nullptr; 4685 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) { 4686 // Check if AND ignores bits above MulWidth. 4687 if (BO->getOpcode() != Instruction::And) 4688 return nullptr; 4689 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) { 4690 const APInt &CVal = CI->getValue(); 4691 if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth) 4692 return nullptr; 4693 } else { 4694 // In this case we could have the operand of the binary operation 4695 // being defined in another block, and performing the replacement 4696 // could break the dominance relation. 4697 return nullptr; 4698 } 4699 } else { 4700 // Other uses prohibit this transformation. 4701 return nullptr; 4702 } 4703 } 4704 4705 // Recognize patterns 4706 switch (I.getPredicate()) { 4707 case ICmpInst::ICMP_EQ: 4708 case ICmpInst::ICMP_NE: 4709 // Recognize pattern: 4710 // mulval = mul(zext A, zext B) 4711 // cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits. 4712 ConstantInt *CI; 4713 Value *ValToMask; 4714 if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) { 4715 if (ValToMask != MulVal) 4716 return nullptr; 4717 const APInt &CVal = CI->getValue() + 1; 4718 if (CVal.isPowerOf2()) { 4719 unsigned MaskWidth = CVal.logBase2(); 4720 if (MaskWidth == MulWidth) 4721 break; // Recognized 4722 } 4723 } 4724 return nullptr; 4725 4726 case ICmpInst::ICMP_UGT: 4727 // Recognize pattern: 4728 // mulval = mul(zext A, zext B) 4729 // cmp ugt mulval, max 4730 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 4731 APInt MaxVal = APInt::getMaxValue(MulWidth); 4732 MaxVal = MaxVal.zext(CI->getBitWidth()); 4733 if (MaxVal.eq(CI->getValue())) 4734 break; // Recognized 4735 } 4736 return nullptr; 4737 4738 case ICmpInst::ICMP_UGE: 4739 // Recognize pattern: 4740 // mulval = mul(zext A, zext B) 4741 // cmp uge mulval, max+1 4742 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 4743 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth); 4744 if (MaxVal.eq(CI->getValue())) 4745 break; // Recognized 4746 } 4747 return nullptr; 4748 4749 case ICmpInst::ICMP_ULE: 4750 // Recognize pattern: 4751 // mulval = mul(zext A, zext B) 4752 // cmp ule mulval, max 4753 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 4754 APInt MaxVal = APInt::getMaxValue(MulWidth); 4755 MaxVal = MaxVal.zext(CI->getBitWidth()); 4756 if (MaxVal.eq(CI->getValue())) 4757 break; // Recognized 4758 } 4759 return nullptr; 4760 4761 case ICmpInst::ICMP_ULT: 4762 // Recognize pattern: 4763 // mulval = mul(zext A, zext B) 4764 // cmp ule mulval, max + 1 4765 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) { 4766 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth); 4767 if (MaxVal.eq(CI->getValue())) 4768 break; // Recognized 4769 } 4770 return nullptr; 4771 4772 default: 4773 return nullptr; 4774 } 4775 4776 InstCombiner::BuilderTy &Builder = IC.Builder; 4777 Builder.SetInsertPoint(MulInstr); 4778 4779 // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B) 4780 Value *MulA = A, *MulB = B; 4781 if (WidthA < MulWidth) 4782 MulA = Builder.CreateZExt(A, MulType); 4783 if (WidthB < MulWidth) 4784 MulB = Builder.CreateZExt(B, MulType); 4785 Function *F = Intrinsic::getDeclaration( 4786 I.getModule(), Intrinsic::umul_with_overflow, MulType); 4787 CallInst *Call = Builder.CreateCall(F, {MulA, MulB}, "umul"); 4788 IC.addToWorklist(MulInstr); 4789 4790 // If there are uses of mul result other than the comparison, we know that 4791 // they are truncation or binary AND. Change them to use result of 4792 // mul.with.overflow and adjust properly mask/size. 4793 if (MulVal->hasNUsesOrMore(2)) { 4794 Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value"); 4795 for (User *U : make_early_inc_range(MulVal->users())) { 4796 if (U == &I || U == OtherVal) 4797 continue; 4798 if (TruncInst *TI = dyn_cast<TruncInst>(U)) { 4799 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth) 4800 IC.replaceInstUsesWith(*TI, Mul); 4801 else 4802 TI->setOperand(0, Mul); 4803 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) { 4804 assert(BO->getOpcode() == Instruction::And); 4805 // Replace (mul & mask) --> zext (mul.with.overflow & short_mask) 4806 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1)); 4807 APInt ShortMask = CI->getValue().trunc(MulWidth); 4808 Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask); 4809 Value *Zext = Builder.CreateZExt(ShortAnd, BO->getType()); 4810 IC.replaceInstUsesWith(*BO, Zext); 4811 } else { 4812 llvm_unreachable("Unexpected Binary operation"); 4813 } 4814 IC.addToWorklist(cast<Instruction>(U)); 4815 } 4816 } 4817 if (isa<Instruction>(OtherVal)) 4818 IC.addToWorklist(cast<Instruction>(OtherVal)); 4819 4820 // The original icmp gets replaced with the overflow value, maybe inverted 4821 // depending on predicate. 4822 bool Inverse = false; 4823 switch (I.getPredicate()) { 4824 case ICmpInst::ICMP_NE: 4825 break; 4826 case ICmpInst::ICMP_EQ: 4827 Inverse = true; 4828 break; 4829 case ICmpInst::ICMP_UGT: 4830 case ICmpInst::ICMP_UGE: 4831 if (I.getOperand(0) == MulVal) 4832 break; 4833 Inverse = true; 4834 break; 4835 case ICmpInst::ICMP_ULT: 4836 case ICmpInst::ICMP_ULE: 4837 if (I.getOperand(1) == MulVal) 4838 break; 4839 Inverse = true; 4840 break; 4841 default: 4842 llvm_unreachable("Unexpected predicate"); 4843 } 4844 if (Inverse) { 4845 Value *Res = Builder.CreateExtractValue(Call, 1); 4846 return BinaryOperator::CreateNot(Res); 4847 } 4848 4849 return ExtractValueInst::Create(Call, 1); 4850 } 4851 4852 /// When performing a comparison against a constant, it is possible that not all 4853 /// the bits in the LHS are demanded. This helper method computes the mask that 4854 /// IS demanded. 4855 static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth) { 4856 const APInt *RHS; 4857 if (!match(I.getOperand(1), m_APInt(RHS))) 4858 return APInt::getAllOnesValue(BitWidth); 4859 4860 // If this is a normal comparison, it demands all bits. If it is a sign bit 4861 // comparison, it only demands the sign bit. 4862 bool UnusedBit; 4863 if (InstCombiner::isSignBitCheck(I.getPredicate(), *RHS, UnusedBit)) 4864 return APInt::getSignMask(BitWidth); 4865 4866 switch (I.getPredicate()) { 4867 // For a UGT comparison, we don't care about any bits that 4868 // correspond to the trailing ones of the comparand. The value of these 4869 // bits doesn't impact the outcome of the comparison, because any value 4870 // greater than the RHS must differ in a bit higher than these due to carry. 4871 case ICmpInst::ICMP_UGT: 4872 return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingOnes()); 4873 4874 // Similarly, for a ULT comparison, we don't care about the trailing zeros. 4875 // Any value less than the RHS must differ in a higher bit because of carries. 4876 case ICmpInst::ICMP_ULT: 4877 return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingZeros()); 4878 4879 default: 4880 return APInt::getAllOnesValue(BitWidth); 4881 } 4882 } 4883 4884 /// Check if the order of \p Op0 and \p Op1 as operands in an ICmpInst 4885 /// should be swapped. 4886 /// The decision is based on how many times these two operands are reused 4887 /// as subtract operands and their positions in those instructions. 4888 /// The rationale is that several architectures use the same instruction for 4889 /// both subtract and cmp. Thus, it is better if the order of those operands 4890 /// match. 4891 /// \return true if Op0 and Op1 should be swapped. 4892 static bool swapMayExposeCSEOpportunities(const Value *Op0, const Value *Op1) { 4893 // Filter out pointer values as those cannot appear directly in subtract. 4894 // FIXME: we may want to go through inttoptrs or bitcasts. 4895 if (Op0->getType()->isPointerTy()) 4896 return false; 4897 // If a subtract already has the same operands as a compare, swapping would be 4898 // bad. If a subtract has the same operands as a compare but in reverse order, 4899 // then swapping is good. 4900 int GoodToSwap = 0; 4901 for (const User *U : Op0->users()) { 4902 if (match(U, m_Sub(m_Specific(Op1), m_Specific(Op0)))) 4903 GoodToSwap++; 4904 else if (match(U, m_Sub(m_Specific(Op0), m_Specific(Op1)))) 4905 GoodToSwap--; 4906 } 4907 return GoodToSwap > 0; 4908 } 4909 4910 /// Check that one use is in the same block as the definition and all 4911 /// other uses are in blocks dominated by a given block. 4912 /// 4913 /// \param DI Definition 4914 /// \param UI Use 4915 /// \param DB Block that must dominate all uses of \p DI outside 4916 /// the parent block 4917 /// \return true when \p UI is the only use of \p DI in the parent block 4918 /// and all other uses of \p DI are in blocks dominated by \p DB. 4919 /// 4920 bool InstCombinerImpl::dominatesAllUses(const Instruction *DI, 4921 const Instruction *UI, 4922 const BasicBlock *DB) const { 4923 assert(DI && UI && "Instruction not defined\n"); 4924 // Ignore incomplete definitions. 4925 if (!DI->getParent()) 4926 return false; 4927 // DI and UI must be in the same block. 4928 if (DI->getParent() != UI->getParent()) 4929 return false; 4930 // Protect from self-referencing blocks. 4931 if (DI->getParent() == DB) 4932 return false; 4933 for (const User *U : DI->users()) { 4934 auto *Usr = cast<Instruction>(U); 4935 if (Usr != UI && !DT.dominates(DB, Usr->getParent())) 4936 return false; 4937 } 4938 return true; 4939 } 4940 4941 /// Return true when the instruction sequence within a block is select-cmp-br. 4942 static bool isChainSelectCmpBranch(const SelectInst *SI) { 4943 const BasicBlock *BB = SI->getParent(); 4944 if (!BB) 4945 return false; 4946 auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator()); 4947 if (!BI || BI->getNumSuccessors() != 2) 4948 return false; 4949 auto *IC = dyn_cast<ICmpInst>(BI->getCondition()); 4950 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI)) 4951 return false; 4952 return true; 4953 } 4954 4955 /// True when a select result is replaced by one of its operands 4956 /// in select-icmp sequence. This will eventually result in the elimination 4957 /// of the select. 4958 /// 4959 /// \param SI Select instruction 4960 /// \param Icmp Compare instruction 4961 /// \param SIOpd Operand that replaces the select 4962 /// 4963 /// Notes: 4964 /// - The replacement is global and requires dominator information 4965 /// - The caller is responsible for the actual replacement 4966 /// 4967 /// Example: 4968 /// 4969 /// entry: 4970 /// %4 = select i1 %3, %C* %0, %C* null 4971 /// %5 = icmp eq %C* %4, null 4972 /// br i1 %5, label %9, label %7 4973 /// ... 4974 /// ; <label>:7 ; preds = %entry 4975 /// %8 = getelementptr inbounds %C* %4, i64 0, i32 0 4976 /// ... 4977 /// 4978 /// can be transformed to 4979 /// 4980 /// %5 = icmp eq %C* %0, null 4981 /// %6 = select i1 %3, i1 %5, i1 true 4982 /// br i1 %6, label %9, label %7 4983 /// ... 4984 /// ; <label>:7 ; preds = %entry 4985 /// %8 = getelementptr inbounds %C* %0, i64 0, i32 0 // replace by %0! 4986 /// 4987 /// Similar when the first operand of the select is a constant or/and 4988 /// the compare is for not equal rather than equal. 4989 /// 4990 /// NOTE: The function is only called when the select and compare constants 4991 /// are equal, the optimization can work only for EQ predicates. This is not a 4992 /// major restriction since a NE compare should be 'normalized' to an equal 4993 /// compare, which usually happens in the combiner and test case 4994 /// select-cmp-br.ll checks for it. 4995 bool InstCombinerImpl::replacedSelectWithOperand(SelectInst *SI, 4996 const ICmpInst *Icmp, 4997 const unsigned SIOpd) { 4998 assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!"); 4999 if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) { 5000 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1); 5001 // The check for the single predecessor is not the best that can be 5002 // done. But it protects efficiently against cases like when SI's 5003 // home block has two successors, Succ and Succ1, and Succ1 predecessor 5004 // of Succ. Then SI can't be replaced by SIOpd because the use that gets 5005 // replaced can be reached on either path. So the uniqueness check 5006 // guarantees that the path all uses of SI (outside SI's parent) are on 5007 // is disjoint from all other paths out of SI. But that information 5008 // is more expensive to compute, and the trade-off here is in favor 5009 // of compile-time. It should also be noticed that we check for a single 5010 // predecessor and not only uniqueness. This to handle the situation when 5011 // Succ and Succ1 points to the same basic block. 5012 if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) { 5013 NumSel++; 5014 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent()); 5015 return true; 5016 } 5017 } 5018 return false; 5019 } 5020 5021 /// Try to fold the comparison based on range information we can get by checking 5022 /// whether bits are known to be zero or one in the inputs. 5023 Instruction *InstCombinerImpl::foldICmpUsingKnownBits(ICmpInst &I) { 5024 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 5025 Type *Ty = Op0->getType(); 5026 ICmpInst::Predicate Pred = I.getPredicate(); 5027 5028 // Get scalar or pointer size. 5029 unsigned BitWidth = Ty->isIntOrIntVectorTy() 5030 ? Ty->getScalarSizeInBits() 5031 : DL.getPointerTypeSizeInBits(Ty->getScalarType()); 5032 5033 if (!BitWidth) 5034 return nullptr; 5035 5036 KnownBits Op0Known(BitWidth); 5037 KnownBits Op1Known(BitWidth); 5038 5039 if (SimplifyDemandedBits(&I, 0, 5040 getDemandedBitsLHSMask(I, BitWidth), 5041 Op0Known, 0)) 5042 return &I; 5043 5044 if (SimplifyDemandedBits(&I, 1, APInt::getAllOnesValue(BitWidth), 5045 Op1Known, 0)) 5046 return &I; 5047 5048 // Given the known and unknown bits, compute a range that the LHS could be 5049 // in. Compute the Min, Max and RHS values based on the known bits. For the 5050 // EQ and NE we use unsigned values. 5051 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0); 5052 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0); 5053 if (I.isSigned()) { 5054 Op0Min = Op0Known.getSignedMinValue(); 5055 Op0Max = Op0Known.getSignedMaxValue(); 5056 Op1Min = Op1Known.getSignedMinValue(); 5057 Op1Max = Op1Known.getSignedMaxValue(); 5058 } else { 5059 Op0Min = Op0Known.getMinValue(); 5060 Op0Max = Op0Known.getMaxValue(); 5061 Op1Min = Op1Known.getMinValue(); 5062 Op1Max = Op1Known.getMaxValue(); 5063 } 5064 5065 // If Min and Max are known to be the same, then SimplifyDemandedBits figured 5066 // out that the LHS or RHS is a constant. Constant fold this now, so that 5067 // code below can assume that Min != Max. 5068 if (!isa<Constant>(Op0) && Op0Min == Op0Max) 5069 return new ICmpInst(Pred, ConstantExpr::getIntegerValue(Ty, Op0Min), Op1); 5070 if (!isa<Constant>(Op1) && Op1Min == Op1Max) 5071 return new ICmpInst(Pred, Op0, ConstantExpr::getIntegerValue(Ty, Op1Min)); 5072 5073 // Based on the range information we know about the LHS, see if we can 5074 // simplify this comparison. For example, (x&4) < 8 is always true. 5075 switch (Pred) { 5076 default: 5077 llvm_unreachable("Unknown icmp opcode!"); 5078 case ICmpInst::ICMP_EQ: 5079 case ICmpInst::ICMP_NE: { 5080 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max)) 5081 return replaceInstUsesWith( 5082 I, ConstantInt::getBool(I.getType(), Pred == CmpInst::ICMP_NE)); 5083 5084 // If all bits are known zero except for one, then we know at most one bit 5085 // is set. If the comparison is against zero, then this is a check to see if 5086 // *that* bit is set. 5087 APInt Op0KnownZeroInverted = ~Op0Known.Zero; 5088 if (Op1Known.isZero()) { 5089 // If the LHS is an AND with the same constant, look through it. 5090 Value *LHS = nullptr; 5091 const APInt *LHSC; 5092 if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) || 5093 *LHSC != Op0KnownZeroInverted) 5094 LHS = Op0; 5095 5096 Value *X; 5097 if (match(LHS, m_Shl(m_One(), m_Value(X)))) { 5098 APInt ValToCheck = Op0KnownZeroInverted; 5099 Type *XTy = X->getType(); 5100 if (ValToCheck.isPowerOf2()) { 5101 // ((1 << X) & 8) == 0 -> X != 3 5102 // ((1 << X) & 8) != 0 -> X == 3 5103 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros()); 5104 auto NewPred = ICmpInst::getInversePredicate(Pred); 5105 return new ICmpInst(NewPred, X, CmpC); 5106 } else if ((++ValToCheck).isPowerOf2()) { 5107 // ((1 << X) & 7) == 0 -> X >= 3 5108 // ((1 << X) & 7) != 0 -> X < 3 5109 auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros()); 5110 auto NewPred = 5111 Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT; 5112 return new ICmpInst(NewPred, X, CmpC); 5113 } 5114 } 5115 5116 // Check if the LHS is 8 >>u x and the result is a power of 2 like 1. 5117 const APInt *CI; 5118 if (Op0KnownZeroInverted.isOneValue() && 5119 match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) { 5120 // ((8 >>u X) & 1) == 0 -> X != 3 5121 // ((8 >>u X) & 1) != 0 -> X == 3 5122 unsigned CmpVal = CI->countTrailingZeros(); 5123 auto NewPred = ICmpInst::getInversePredicate(Pred); 5124 return new ICmpInst(NewPred, X, ConstantInt::get(X->getType(), CmpVal)); 5125 } 5126 } 5127 break; 5128 } 5129 case ICmpInst::ICMP_ULT: { 5130 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B) 5131 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 5132 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B) 5133 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 5134 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B) 5135 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 5136 5137 const APInt *CmpC; 5138 if (match(Op1, m_APInt(CmpC))) { 5139 // A <u C -> A == C-1 if min(A)+1 == C 5140 if (*CmpC == Op0Min + 1) 5141 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 5142 ConstantInt::get(Op1->getType(), *CmpC - 1)); 5143 // X <u C --> X == 0, if the number of zero bits in the bottom of X 5144 // exceeds the log2 of C. 5145 if (Op0Known.countMinTrailingZeros() >= CmpC->ceilLogBase2()) 5146 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 5147 Constant::getNullValue(Op1->getType())); 5148 } 5149 break; 5150 } 5151 case ICmpInst::ICMP_UGT: { 5152 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B) 5153 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 5154 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B) 5155 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 5156 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B) 5157 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 5158 5159 const APInt *CmpC; 5160 if (match(Op1, m_APInt(CmpC))) { 5161 // A >u C -> A == C+1 if max(a)-1 == C 5162 if (*CmpC == Op0Max - 1) 5163 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 5164 ConstantInt::get(Op1->getType(), *CmpC + 1)); 5165 // X >u C --> X != 0, if the number of zero bits in the bottom of X 5166 // exceeds the log2 of C. 5167 if (Op0Known.countMinTrailingZeros() >= CmpC->getActiveBits()) 5168 return new ICmpInst(ICmpInst::ICMP_NE, Op0, 5169 Constant::getNullValue(Op1->getType())); 5170 } 5171 break; 5172 } 5173 case ICmpInst::ICMP_SLT: { 5174 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C) 5175 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 5176 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C) 5177 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 5178 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B) 5179 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 5180 const APInt *CmpC; 5181 if (match(Op1, m_APInt(CmpC))) { 5182 if (*CmpC == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C 5183 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 5184 ConstantInt::get(Op1->getType(), *CmpC - 1)); 5185 } 5186 break; 5187 } 5188 case ICmpInst::ICMP_SGT: { 5189 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B) 5190 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 5191 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B) 5192 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 5193 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B) 5194 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1); 5195 const APInt *CmpC; 5196 if (match(Op1, m_APInt(CmpC))) { 5197 if (*CmpC == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C 5198 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, 5199 ConstantInt::get(Op1->getType(), *CmpC + 1)); 5200 } 5201 break; 5202 } 5203 case ICmpInst::ICMP_SGE: 5204 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!"); 5205 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B) 5206 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 5207 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B) 5208 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 5209 if (Op1Min == Op0Max) // A >=s B -> A == B if max(A) == min(B) 5210 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1); 5211 break; 5212 case ICmpInst::ICMP_SLE: 5213 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!"); 5214 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B) 5215 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 5216 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B) 5217 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 5218 if (Op1Max == Op0Min) // A <=s B -> A == B if min(A) == max(B) 5219 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1); 5220 break; 5221 case ICmpInst::ICMP_UGE: 5222 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!"); 5223 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B) 5224 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 5225 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B) 5226 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 5227 if (Op1Min == Op0Max) // A >=u B -> A == B if max(A) == min(B) 5228 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1); 5229 break; 5230 case ICmpInst::ICMP_ULE: 5231 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!"); 5232 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B) 5233 return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType())); 5234 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B) 5235 return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType())); 5236 if (Op1Max == Op0Min) // A <=u B -> A == B if min(A) == max(B) 5237 return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1); 5238 break; 5239 } 5240 5241 // Turn a signed comparison into an unsigned one if both operands are known to 5242 // have the same sign. 5243 if (I.isSigned() && 5244 ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) || 5245 (Op0Known.One.isNegative() && Op1Known.One.isNegative()))) 5246 return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1); 5247 5248 return nullptr; 5249 } 5250 5251 llvm::Optional<std::pair<CmpInst::Predicate, Constant *>> 5252 InstCombiner::getFlippedStrictnessPredicateAndConstant(CmpInst::Predicate Pred, 5253 Constant *C) { 5254 assert(ICmpInst::isRelational(Pred) && ICmpInst::isIntPredicate(Pred) && 5255 "Only for relational integer predicates."); 5256 5257 Type *Type = C->getType(); 5258 bool IsSigned = ICmpInst::isSigned(Pred); 5259 5260 CmpInst::Predicate UnsignedPred = ICmpInst::getUnsignedPredicate(Pred); 5261 bool WillIncrement = 5262 UnsignedPred == ICmpInst::ICMP_ULE || UnsignedPred == ICmpInst::ICMP_UGT; 5263 5264 // Check if the constant operand can be safely incremented/decremented 5265 // without overflowing/underflowing. 5266 auto ConstantIsOk = [WillIncrement, IsSigned](ConstantInt *C) { 5267 return WillIncrement ? !C->isMaxValue(IsSigned) : !C->isMinValue(IsSigned); 5268 }; 5269 5270 Constant *SafeReplacementConstant = nullptr; 5271 if (auto *CI = dyn_cast<ConstantInt>(C)) { 5272 // Bail out if the constant can't be safely incremented/decremented. 5273 if (!ConstantIsOk(CI)) 5274 return llvm::None; 5275 } else if (auto *FVTy = dyn_cast<FixedVectorType>(Type)) { 5276 unsigned NumElts = FVTy->getNumElements(); 5277 for (unsigned i = 0; i != NumElts; ++i) { 5278 Constant *Elt = C->getAggregateElement(i); 5279 if (!Elt) 5280 return llvm::None; 5281 5282 if (isa<UndefValue>(Elt)) 5283 continue; 5284 5285 // Bail out if we can't determine if this constant is min/max or if we 5286 // know that this constant is min/max. 5287 auto *CI = dyn_cast<ConstantInt>(Elt); 5288 if (!CI || !ConstantIsOk(CI)) 5289 return llvm::None; 5290 5291 if (!SafeReplacementConstant) 5292 SafeReplacementConstant = CI; 5293 } 5294 } else { 5295 // ConstantExpr? 5296 return llvm::None; 5297 } 5298 5299 // It may not be safe to change a compare predicate in the presence of 5300 // undefined elements, so replace those elements with the first safe constant 5301 // that we found. 5302 // TODO: in case of poison, it is safe; let's replace undefs only. 5303 if (C->containsUndefOrPoisonElement()) { 5304 assert(SafeReplacementConstant && "Replacement constant not set"); 5305 C = Constant::replaceUndefsWith(C, SafeReplacementConstant); 5306 } 5307 5308 CmpInst::Predicate NewPred = CmpInst::getFlippedStrictnessPredicate(Pred); 5309 5310 // Increment or decrement the constant. 5311 Constant *OneOrNegOne = ConstantInt::get(Type, WillIncrement ? 1 : -1, true); 5312 Constant *NewC = ConstantExpr::getAdd(C, OneOrNegOne); 5313 5314 return std::make_pair(NewPred, NewC); 5315 } 5316 5317 /// If we have an icmp le or icmp ge instruction with a constant operand, turn 5318 /// it into the appropriate icmp lt or icmp gt instruction. This transform 5319 /// allows them to be folded in visitICmpInst. 5320 static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) { 5321 ICmpInst::Predicate Pred = I.getPredicate(); 5322 if (ICmpInst::isEquality(Pred) || !ICmpInst::isIntPredicate(Pred) || 5323 InstCombiner::isCanonicalPredicate(Pred)) 5324 return nullptr; 5325 5326 Value *Op0 = I.getOperand(0); 5327 Value *Op1 = I.getOperand(1); 5328 auto *Op1C = dyn_cast<Constant>(Op1); 5329 if (!Op1C) 5330 return nullptr; 5331 5332 auto FlippedStrictness = 5333 InstCombiner::getFlippedStrictnessPredicateAndConstant(Pred, Op1C); 5334 if (!FlippedStrictness) 5335 return nullptr; 5336 5337 return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second); 5338 } 5339 5340 /// If we have a comparison with a non-canonical predicate, if we can update 5341 /// all the users, invert the predicate and adjust all the users. 5342 CmpInst *InstCombinerImpl::canonicalizeICmpPredicate(CmpInst &I) { 5343 // Is the predicate already canonical? 5344 CmpInst::Predicate Pred = I.getPredicate(); 5345 if (InstCombiner::isCanonicalPredicate(Pred)) 5346 return nullptr; 5347 5348 // Can all users be adjusted to predicate inversion? 5349 if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr)) 5350 return nullptr; 5351 5352 // Ok, we can canonicalize comparison! 5353 // Let's first invert the comparison's predicate. 5354 I.setPredicate(CmpInst::getInversePredicate(Pred)); 5355 I.setName(I.getName() + ".not"); 5356 5357 // And, adapt users. 5358 freelyInvertAllUsersOf(&I); 5359 5360 return &I; 5361 } 5362 5363 /// Integer compare with boolean values can always be turned into bitwise ops. 5364 static Instruction *canonicalizeICmpBool(ICmpInst &I, 5365 InstCombiner::BuilderTy &Builder) { 5366 Value *A = I.getOperand(0), *B = I.getOperand(1); 5367 assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only"); 5368 5369 // A boolean compared to true/false can be simplified to Op0/true/false in 5370 // 14 out of the 20 (10 predicates * 2 constants) possible combinations. 5371 // Cases not handled by InstSimplify are always 'not' of Op0. 5372 if (match(B, m_Zero())) { 5373 switch (I.getPredicate()) { 5374 case CmpInst::ICMP_EQ: // A == 0 -> !A 5375 case CmpInst::ICMP_ULE: // A <=u 0 -> !A 5376 case CmpInst::ICMP_SGE: // A >=s 0 -> !A 5377 return BinaryOperator::CreateNot(A); 5378 default: 5379 llvm_unreachable("ICmp i1 X, C not simplified as expected."); 5380 } 5381 } else if (match(B, m_One())) { 5382 switch (I.getPredicate()) { 5383 case CmpInst::ICMP_NE: // A != 1 -> !A 5384 case CmpInst::ICMP_ULT: // A <u 1 -> !A 5385 case CmpInst::ICMP_SGT: // A >s -1 -> !A 5386 return BinaryOperator::CreateNot(A); 5387 default: 5388 llvm_unreachable("ICmp i1 X, C not simplified as expected."); 5389 } 5390 } 5391 5392 switch (I.getPredicate()) { 5393 default: 5394 llvm_unreachable("Invalid icmp instruction!"); 5395 case ICmpInst::ICMP_EQ: 5396 // icmp eq i1 A, B -> ~(A ^ B) 5397 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 5398 5399 case ICmpInst::ICMP_NE: 5400 // icmp ne i1 A, B -> A ^ B 5401 return BinaryOperator::CreateXor(A, B); 5402 5403 case ICmpInst::ICMP_UGT: 5404 // icmp ugt -> icmp ult 5405 std::swap(A, B); 5406 LLVM_FALLTHROUGH; 5407 case ICmpInst::ICMP_ULT: 5408 // icmp ult i1 A, B -> ~A & B 5409 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B); 5410 5411 case ICmpInst::ICMP_SGT: 5412 // icmp sgt -> icmp slt 5413 std::swap(A, B); 5414 LLVM_FALLTHROUGH; 5415 case ICmpInst::ICMP_SLT: 5416 // icmp slt i1 A, B -> A & ~B 5417 return BinaryOperator::CreateAnd(Builder.CreateNot(B), A); 5418 5419 case ICmpInst::ICMP_UGE: 5420 // icmp uge -> icmp ule 5421 std::swap(A, B); 5422 LLVM_FALLTHROUGH; 5423 case ICmpInst::ICMP_ULE: 5424 // icmp ule i1 A, B -> ~A | B 5425 return BinaryOperator::CreateOr(Builder.CreateNot(A), B); 5426 5427 case ICmpInst::ICMP_SGE: 5428 // icmp sge -> icmp sle 5429 std::swap(A, B); 5430 LLVM_FALLTHROUGH; 5431 case ICmpInst::ICMP_SLE: 5432 // icmp sle i1 A, B -> A | ~B 5433 return BinaryOperator::CreateOr(Builder.CreateNot(B), A); 5434 } 5435 } 5436 5437 // Transform pattern like: 5438 // (1 << Y) u<= X or ~(-1 << Y) u< X or ((1 << Y)+(-1)) u< X 5439 // (1 << Y) u> X or ~(-1 << Y) u>= X or ((1 << Y)+(-1)) u>= X 5440 // Into: 5441 // (X l>> Y) != 0 5442 // (X l>> Y) == 0 5443 static Instruction *foldICmpWithHighBitMask(ICmpInst &Cmp, 5444 InstCombiner::BuilderTy &Builder) { 5445 ICmpInst::Predicate Pred, NewPred; 5446 Value *X, *Y; 5447 if (match(&Cmp, 5448 m_c_ICmp(Pred, m_OneUse(m_Shl(m_One(), m_Value(Y))), m_Value(X)))) { 5449 switch (Pred) { 5450 case ICmpInst::ICMP_ULE: 5451 NewPred = ICmpInst::ICMP_NE; 5452 break; 5453 case ICmpInst::ICMP_UGT: 5454 NewPred = ICmpInst::ICMP_EQ; 5455 break; 5456 default: 5457 return nullptr; 5458 } 5459 } else if (match(&Cmp, m_c_ICmp(Pred, 5460 m_OneUse(m_CombineOr( 5461 m_Not(m_Shl(m_AllOnes(), m_Value(Y))), 5462 m_Add(m_Shl(m_One(), m_Value(Y)), 5463 m_AllOnes()))), 5464 m_Value(X)))) { 5465 // The variant with 'add' is not canonical, (the variant with 'not' is) 5466 // we only get it because it has extra uses, and can't be canonicalized, 5467 5468 switch (Pred) { 5469 case ICmpInst::ICMP_ULT: 5470 NewPred = ICmpInst::ICMP_NE; 5471 break; 5472 case ICmpInst::ICMP_UGE: 5473 NewPred = ICmpInst::ICMP_EQ; 5474 break; 5475 default: 5476 return nullptr; 5477 } 5478 } else 5479 return nullptr; 5480 5481 Value *NewX = Builder.CreateLShr(X, Y, X->getName() + ".highbits"); 5482 Constant *Zero = Constant::getNullValue(NewX->getType()); 5483 return CmpInst::Create(Instruction::ICmp, NewPred, NewX, Zero); 5484 } 5485 5486 static Instruction *foldVectorCmp(CmpInst &Cmp, 5487 InstCombiner::BuilderTy &Builder) { 5488 const CmpInst::Predicate Pred = Cmp.getPredicate(); 5489 Value *LHS = Cmp.getOperand(0), *RHS = Cmp.getOperand(1); 5490 Value *V1, *V2; 5491 ArrayRef<int> M; 5492 if (!match(LHS, m_Shuffle(m_Value(V1), m_Undef(), m_Mask(M)))) 5493 return nullptr; 5494 5495 // If both arguments of the cmp are shuffles that use the same mask and 5496 // shuffle within a single vector, move the shuffle after the cmp: 5497 // cmp (shuffle V1, M), (shuffle V2, M) --> shuffle (cmp V1, V2), M 5498 Type *V1Ty = V1->getType(); 5499 if (match(RHS, m_Shuffle(m_Value(V2), m_Undef(), m_SpecificMask(M))) && 5500 V1Ty == V2->getType() && (LHS->hasOneUse() || RHS->hasOneUse())) { 5501 Value *NewCmp = Builder.CreateCmp(Pred, V1, V2); 5502 return new ShuffleVectorInst(NewCmp, UndefValue::get(NewCmp->getType()), M); 5503 } 5504 5505 // Try to canonicalize compare with splatted operand and splat constant. 5506 // TODO: We could generalize this for more than splats. See/use the code in 5507 // InstCombiner::foldVectorBinop(). 5508 Constant *C; 5509 if (!LHS->hasOneUse() || !match(RHS, m_Constant(C))) 5510 return nullptr; 5511 5512 // Length-changing splats are ok, so adjust the constants as needed: 5513 // cmp (shuffle V1, M), C --> shuffle (cmp V1, C'), M 5514 Constant *ScalarC = C->getSplatValue(/* AllowUndefs */ true); 5515 int MaskSplatIndex; 5516 if (ScalarC && match(M, m_SplatOrUndefMask(MaskSplatIndex))) { 5517 // We allow undefs in matching, but this transform removes those for safety. 5518 // Demanded elements analysis should be able to recover some/all of that. 5519 C = ConstantVector::getSplat(cast<VectorType>(V1Ty)->getElementCount(), 5520 ScalarC); 5521 SmallVector<int, 8> NewM(M.size(), MaskSplatIndex); 5522 Value *NewCmp = Builder.CreateCmp(Pred, V1, C); 5523 return new ShuffleVectorInst(NewCmp, UndefValue::get(NewCmp->getType()), 5524 NewM); 5525 } 5526 5527 return nullptr; 5528 } 5529 5530 // extract(uadd.with.overflow(A, B), 0) ult A 5531 // -> extract(uadd.with.overflow(A, B), 1) 5532 static Instruction *foldICmpOfUAddOv(ICmpInst &I) { 5533 CmpInst::Predicate Pred = I.getPredicate(); 5534 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 5535 5536 Value *UAddOv; 5537 Value *A, *B; 5538 auto UAddOvResultPat = m_ExtractValue<0>( 5539 m_Intrinsic<Intrinsic::uadd_with_overflow>(m_Value(A), m_Value(B))); 5540 if (match(Op0, UAddOvResultPat) && 5541 ((Pred == ICmpInst::ICMP_ULT && (Op1 == A || Op1 == B)) || 5542 (Pred == ICmpInst::ICMP_EQ && match(Op1, m_ZeroInt()) && 5543 (match(A, m_One()) || match(B, m_One()))) || 5544 (Pred == ICmpInst::ICMP_NE && match(Op1, m_AllOnes()) && 5545 (match(A, m_AllOnes()) || match(B, m_AllOnes()))))) 5546 // extract(uadd.with.overflow(A, B), 0) < A 5547 // extract(uadd.with.overflow(A, 1), 0) == 0 5548 // extract(uadd.with.overflow(A, -1), 0) != -1 5549 UAddOv = cast<ExtractValueInst>(Op0)->getAggregateOperand(); 5550 else if (match(Op1, UAddOvResultPat) && 5551 Pred == ICmpInst::ICMP_UGT && (Op0 == A || Op0 == B)) 5552 // A > extract(uadd.with.overflow(A, B), 0) 5553 UAddOv = cast<ExtractValueInst>(Op1)->getAggregateOperand(); 5554 else 5555 return nullptr; 5556 5557 return ExtractValueInst::Create(UAddOv, 1); 5558 } 5559 5560 Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) { 5561 bool Changed = false; 5562 const SimplifyQuery Q = SQ.getWithInstruction(&I); 5563 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 5564 unsigned Op0Cplxity = getComplexity(Op0); 5565 unsigned Op1Cplxity = getComplexity(Op1); 5566 5567 /// Orders the operands of the compare so that they are listed from most 5568 /// complex to least complex. This puts constants before unary operators, 5569 /// before binary operators. 5570 if (Op0Cplxity < Op1Cplxity || 5571 (Op0Cplxity == Op1Cplxity && swapMayExposeCSEOpportunities(Op0, Op1))) { 5572 I.swapOperands(); 5573 std::swap(Op0, Op1); 5574 Changed = true; 5575 } 5576 5577 if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, Q)) 5578 return replaceInstUsesWith(I, V); 5579 5580 // Comparing -val or val with non-zero is the same as just comparing val 5581 // ie, abs(val) != 0 -> val != 0 5582 if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) { 5583 Value *Cond, *SelectTrue, *SelectFalse; 5584 if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue), 5585 m_Value(SelectFalse)))) { 5586 if (Value *V = dyn_castNegVal(SelectTrue)) { 5587 if (V == SelectFalse) 5588 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1); 5589 } 5590 else if (Value *V = dyn_castNegVal(SelectFalse)) { 5591 if (V == SelectTrue) 5592 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1); 5593 } 5594 } 5595 } 5596 5597 if (Op0->getType()->isIntOrIntVectorTy(1)) 5598 if (Instruction *Res = canonicalizeICmpBool(I, Builder)) 5599 return Res; 5600 5601 if (Instruction *Res = canonicalizeCmpWithConstant(I)) 5602 return Res; 5603 5604 if (Instruction *Res = canonicalizeICmpPredicate(I)) 5605 return Res; 5606 5607 if (Instruction *Res = foldICmpWithConstant(I)) 5608 return Res; 5609 5610 if (Instruction *Res = foldICmpWithDominatingICmp(I)) 5611 return Res; 5612 5613 if (Instruction *Res = foldICmpBinOp(I, Q)) 5614 return Res; 5615 5616 if (Instruction *Res = foldICmpUsingKnownBits(I)) 5617 return Res; 5618 5619 // Test if the ICmpInst instruction is used exclusively by a select as 5620 // part of a minimum or maximum operation. If so, refrain from doing 5621 // any other folding. This helps out other analyses which understand 5622 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution 5623 // and CodeGen. And in this case, at least one of the comparison 5624 // operands has at least one user besides the compare (the select), 5625 // which would often largely negate the benefit of folding anyway. 5626 // 5627 // Do the same for the other patterns recognized by matchSelectPattern. 5628 if (I.hasOneUse()) 5629 if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) { 5630 Value *A, *B; 5631 SelectPatternResult SPR = matchSelectPattern(SI, A, B); 5632 if (SPR.Flavor != SPF_UNKNOWN) 5633 return nullptr; 5634 } 5635 5636 // Do this after checking for min/max to prevent infinite looping. 5637 if (Instruction *Res = foldICmpWithZero(I)) 5638 return Res; 5639 5640 // FIXME: We only do this after checking for min/max to prevent infinite 5641 // looping caused by a reverse canonicalization of these patterns for min/max. 5642 // FIXME: The organization of folds is a mess. These would naturally go into 5643 // canonicalizeCmpWithConstant(), but we can't move all of the above folds 5644 // down here after the min/max restriction. 5645 ICmpInst::Predicate Pred = I.getPredicate(); 5646 const APInt *C; 5647 if (match(Op1, m_APInt(C))) { 5648 // For i32: x >u 2147483647 -> x <s 0 -> true if sign bit set 5649 if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) { 5650 Constant *Zero = Constant::getNullValue(Op0->getType()); 5651 return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero); 5652 } 5653 5654 // For i32: x <u 2147483648 -> x >s -1 -> true if sign bit clear 5655 if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) { 5656 Constant *AllOnes = Constant::getAllOnesValue(Op0->getType()); 5657 return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes); 5658 } 5659 } 5660 5661 if (Instruction *Res = foldICmpInstWithConstant(I)) 5662 return Res; 5663 5664 // Try to match comparison as a sign bit test. Intentionally do this after 5665 // foldICmpInstWithConstant() to potentially let other folds to happen first. 5666 if (Instruction *New = foldSignBitTest(I)) 5667 return New; 5668 5669 if (Instruction *Res = foldICmpInstWithConstantNotInt(I)) 5670 return Res; 5671 5672 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now. 5673 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0)) 5674 if (Instruction *NI = foldGEPICmp(GEP, Op1, I.getPredicate(), I)) 5675 return NI; 5676 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1)) 5677 if (Instruction *NI = foldGEPICmp(GEP, Op0, 5678 ICmpInst::getSwappedPredicate(I.getPredicate()), I)) 5679 return NI; 5680 5681 // Try to optimize equality comparisons against alloca-based pointers. 5682 if (Op0->getType()->isPointerTy() && I.isEquality()) { 5683 assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?"); 5684 if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op0))) 5685 if (Instruction *New = foldAllocaCmp(I, Alloca, Op1)) 5686 return New; 5687 if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op1))) 5688 if (Instruction *New = foldAllocaCmp(I, Alloca, Op0)) 5689 return New; 5690 } 5691 5692 if (Instruction *Res = foldICmpBitCast(I, Builder)) 5693 return Res; 5694 5695 // TODO: Hoist this above the min/max bailout. 5696 if (Instruction *R = foldICmpWithCastOp(I)) 5697 return R; 5698 5699 if (Instruction *Res = foldICmpWithMinMax(I)) 5700 return Res; 5701 5702 { 5703 Value *A, *B; 5704 // Transform (A & ~B) == 0 --> (A & B) != 0 5705 // and (A & ~B) != 0 --> (A & B) == 0 5706 // if A is a power of 2. 5707 if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) && 5708 match(Op1, m_Zero()) && 5709 isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality()) 5710 return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(A, B), 5711 Op1); 5712 5713 // ~X < ~Y --> Y < X 5714 // ~X < C --> X > ~C 5715 if (match(Op0, m_Not(m_Value(A)))) { 5716 if (match(Op1, m_Not(m_Value(B)))) 5717 return new ICmpInst(I.getPredicate(), B, A); 5718 5719 const APInt *C; 5720 if (match(Op1, m_APInt(C))) 5721 return new ICmpInst(I.getSwappedPredicate(), A, 5722 ConstantInt::get(Op1->getType(), ~(*C))); 5723 } 5724 5725 Instruction *AddI = nullptr; 5726 if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B), 5727 m_Instruction(AddI))) && 5728 isa<IntegerType>(A->getType())) { 5729 Value *Result; 5730 Constant *Overflow; 5731 // m_UAddWithOverflow can match patterns that do not include an explicit 5732 // "add" instruction, so check the opcode of the matched op. 5733 if (AddI->getOpcode() == Instruction::Add && 5734 OptimizeOverflowCheck(Instruction::Add, /*Signed*/ false, A, B, *AddI, 5735 Result, Overflow)) { 5736 replaceInstUsesWith(*AddI, Result); 5737 eraseInstFromFunction(*AddI); 5738 return replaceInstUsesWith(I, Overflow); 5739 } 5740 } 5741 5742 // (zext a) * (zext b) --> llvm.umul.with.overflow. 5743 if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) { 5744 if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this)) 5745 return R; 5746 } 5747 if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) { 5748 if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this)) 5749 return R; 5750 } 5751 } 5752 5753 if (Instruction *Res = foldICmpEquality(I)) 5754 return Res; 5755 5756 if (Instruction *Res = foldICmpOfUAddOv(I)) 5757 return Res; 5758 5759 // The 'cmpxchg' instruction returns an aggregate containing the old value and 5760 // an i1 which indicates whether or not we successfully did the swap. 5761 // 5762 // Replace comparisons between the old value and the expected value with the 5763 // indicator that 'cmpxchg' returns. 5764 // 5765 // N.B. This transform is only valid when the 'cmpxchg' is not permitted to 5766 // spuriously fail. In those cases, the old value may equal the expected 5767 // value but it is possible for the swap to not occur. 5768 if (I.getPredicate() == ICmpInst::ICMP_EQ) 5769 if (auto *EVI = dyn_cast<ExtractValueInst>(Op0)) 5770 if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand())) 5771 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 && 5772 !ACXI->isWeak()) 5773 return ExtractValueInst::Create(ACXI, 1); 5774 5775 { 5776 Value *X; 5777 const APInt *C; 5778 // icmp X+Cst, X 5779 if (match(Op0, m_Add(m_Value(X), m_APInt(C))) && Op1 == X) 5780 return foldICmpAddOpConst(X, *C, I.getPredicate()); 5781 5782 // icmp X, X+Cst 5783 if (match(Op1, m_Add(m_Value(X), m_APInt(C))) && Op0 == X) 5784 return foldICmpAddOpConst(X, *C, I.getSwappedPredicate()); 5785 } 5786 5787 if (Instruction *Res = foldICmpWithHighBitMask(I, Builder)) 5788 return Res; 5789 5790 if (I.getType()->isVectorTy()) 5791 if (Instruction *Res = foldVectorCmp(I, Builder)) 5792 return Res; 5793 5794 return Changed ? &I : nullptr; 5795 } 5796 5797 /// Fold fcmp ([us]itofp x, cst) if possible. 5798 Instruction *InstCombinerImpl::foldFCmpIntToFPConst(FCmpInst &I, 5799 Instruction *LHSI, 5800 Constant *RHSC) { 5801 if (!isa<ConstantFP>(RHSC)) return nullptr; 5802 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF(); 5803 5804 // Get the width of the mantissa. We don't want to hack on conversions that 5805 // might lose information from the integer, e.g. "i64 -> float" 5806 int MantissaWidth = LHSI->getType()->getFPMantissaWidth(); 5807 if (MantissaWidth == -1) return nullptr; // Unknown. 5808 5809 IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType()); 5810 5811 bool LHSUnsigned = isa<UIToFPInst>(LHSI); 5812 5813 if (I.isEquality()) { 5814 FCmpInst::Predicate P = I.getPredicate(); 5815 bool IsExact = false; 5816 APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned); 5817 RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact); 5818 5819 // If the floating point constant isn't an integer value, we know if we will 5820 // ever compare equal / not equal to it. 5821 if (!IsExact) { 5822 // TODO: Can never be -0.0 and other non-representable values 5823 APFloat RHSRoundInt(RHS); 5824 RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven); 5825 if (RHS != RHSRoundInt) { 5826 if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ) 5827 return replaceInstUsesWith(I, Builder.getFalse()); 5828 5829 assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE); 5830 return replaceInstUsesWith(I, Builder.getTrue()); 5831 } 5832 } 5833 5834 // TODO: If the constant is exactly representable, is it always OK to do 5835 // equality compares as integer? 5836 } 5837 5838 // Check to see that the input is converted from an integer type that is small 5839 // enough that preserves all bits. TODO: check here for "known" sign bits. 5840 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e. 5841 unsigned InputSize = IntTy->getScalarSizeInBits(); 5842 5843 // Following test does NOT adjust InputSize downwards for signed inputs, 5844 // because the most negative value still requires all the mantissa bits 5845 // to distinguish it from one less than that value. 5846 if ((int)InputSize > MantissaWidth) { 5847 // Conversion would lose accuracy. Check if loss can impact comparison. 5848 int Exp = ilogb(RHS); 5849 if (Exp == APFloat::IEK_Inf) { 5850 int MaxExponent = ilogb(APFloat::getLargest(RHS.getSemantics())); 5851 if (MaxExponent < (int)InputSize - !LHSUnsigned) 5852 // Conversion could create infinity. 5853 return nullptr; 5854 } else { 5855 // Note that if RHS is zero or NaN, then Exp is negative 5856 // and first condition is trivially false. 5857 if (MantissaWidth <= Exp && Exp <= (int)InputSize - !LHSUnsigned) 5858 // Conversion could affect comparison. 5859 return nullptr; 5860 } 5861 } 5862 5863 // Otherwise, we can potentially simplify the comparison. We know that it 5864 // will always come through as an integer value and we know the constant is 5865 // not a NAN (it would have been previously simplified). 5866 assert(!RHS.isNaN() && "NaN comparison not already folded!"); 5867 5868 ICmpInst::Predicate Pred; 5869 switch (I.getPredicate()) { 5870 default: llvm_unreachable("Unexpected predicate!"); 5871 case FCmpInst::FCMP_UEQ: 5872 case FCmpInst::FCMP_OEQ: 5873 Pred = ICmpInst::ICMP_EQ; 5874 break; 5875 case FCmpInst::FCMP_UGT: 5876 case FCmpInst::FCMP_OGT: 5877 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT; 5878 break; 5879 case FCmpInst::FCMP_UGE: 5880 case FCmpInst::FCMP_OGE: 5881 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE; 5882 break; 5883 case FCmpInst::FCMP_ULT: 5884 case FCmpInst::FCMP_OLT: 5885 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT; 5886 break; 5887 case FCmpInst::FCMP_ULE: 5888 case FCmpInst::FCMP_OLE: 5889 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE; 5890 break; 5891 case FCmpInst::FCMP_UNE: 5892 case FCmpInst::FCMP_ONE: 5893 Pred = ICmpInst::ICMP_NE; 5894 break; 5895 case FCmpInst::FCMP_ORD: 5896 return replaceInstUsesWith(I, Builder.getTrue()); 5897 case FCmpInst::FCMP_UNO: 5898 return replaceInstUsesWith(I, Builder.getFalse()); 5899 } 5900 5901 // Now we know that the APFloat is a normal number, zero or inf. 5902 5903 // See if the FP constant is too large for the integer. For example, 5904 // comparing an i8 to 300.0. 5905 unsigned IntWidth = IntTy->getScalarSizeInBits(); 5906 5907 if (!LHSUnsigned) { 5908 // If the RHS value is > SignedMax, fold the comparison. This handles +INF 5909 // and large values. 5910 APFloat SMax(RHS.getSemantics()); 5911 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true, 5912 APFloat::rmNearestTiesToEven); 5913 if (SMax < RHS) { // smax < 13123.0 5914 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT || 5915 Pred == ICmpInst::ICMP_SLE) 5916 return replaceInstUsesWith(I, Builder.getTrue()); 5917 return replaceInstUsesWith(I, Builder.getFalse()); 5918 } 5919 } else { 5920 // If the RHS value is > UnsignedMax, fold the comparison. This handles 5921 // +INF and large values. 5922 APFloat UMax(RHS.getSemantics()); 5923 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false, 5924 APFloat::rmNearestTiesToEven); 5925 if (UMax < RHS) { // umax < 13123.0 5926 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT || 5927 Pred == ICmpInst::ICMP_ULE) 5928 return replaceInstUsesWith(I, Builder.getTrue()); 5929 return replaceInstUsesWith(I, Builder.getFalse()); 5930 } 5931 } 5932 5933 if (!LHSUnsigned) { 5934 // See if the RHS value is < SignedMin. 5935 APFloat SMin(RHS.getSemantics()); 5936 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true, 5937 APFloat::rmNearestTiesToEven); 5938 if (SMin > RHS) { // smin > 12312.0 5939 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT || 5940 Pred == ICmpInst::ICMP_SGE) 5941 return replaceInstUsesWith(I, Builder.getTrue()); 5942 return replaceInstUsesWith(I, Builder.getFalse()); 5943 } 5944 } else { 5945 // See if the RHS value is < UnsignedMin. 5946 APFloat UMin(RHS.getSemantics()); 5947 UMin.convertFromAPInt(APInt::getMinValue(IntWidth), false, 5948 APFloat::rmNearestTiesToEven); 5949 if (UMin > RHS) { // umin > 12312.0 5950 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT || 5951 Pred == ICmpInst::ICMP_UGE) 5952 return replaceInstUsesWith(I, Builder.getTrue()); 5953 return replaceInstUsesWith(I, Builder.getFalse()); 5954 } 5955 } 5956 5957 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or 5958 // [0, UMAX], but it may still be fractional. See if it is fractional by 5959 // casting the FP value to the integer value and back, checking for equality. 5960 // Don't do this for zero, because -0.0 is not fractional. 5961 Constant *RHSInt = LHSUnsigned 5962 ? ConstantExpr::getFPToUI(RHSC, IntTy) 5963 : ConstantExpr::getFPToSI(RHSC, IntTy); 5964 if (!RHS.isZero()) { 5965 bool Equal = LHSUnsigned 5966 ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC 5967 : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC; 5968 if (!Equal) { 5969 // If we had a comparison against a fractional value, we have to adjust 5970 // the compare predicate and sometimes the value. RHSC is rounded towards 5971 // zero at this point. 5972 switch (Pred) { 5973 default: llvm_unreachable("Unexpected integer comparison!"); 5974 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true 5975 return replaceInstUsesWith(I, Builder.getTrue()); 5976 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false 5977 return replaceInstUsesWith(I, Builder.getFalse()); 5978 case ICmpInst::ICMP_ULE: 5979 // (float)int <= 4.4 --> int <= 4 5980 // (float)int <= -4.4 --> false 5981 if (RHS.isNegative()) 5982 return replaceInstUsesWith(I, Builder.getFalse()); 5983 break; 5984 case ICmpInst::ICMP_SLE: 5985 // (float)int <= 4.4 --> int <= 4 5986 // (float)int <= -4.4 --> int < -4 5987 if (RHS.isNegative()) 5988 Pred = ICmpInst::ICMP_SLT; 5989 break; 5990 case ICmpInst::ICMP_ULT: 5991 // (float)int < -4.4 --> false 5992 // (float)int < 4.4 --> int <= 4 5993 if (RHS.isNegative()) 5994 return replaceInstUsesWith(I, Builder.getFalse()); 5995 Pred = ICmpInst::ICMP_ULE; 5996 break; 5997 case ICmpInst::ICMP_SLT: 5998 // (float)int < -4.4 --> int < -4 5999 // (float)int < 4.4 --> int <= 4 6000 if (!RHS.isNegative()) 6001 Pred = ICmpInst::ICMP_SLE; 6002 break; 6003 case ICmpInst::ICMP_UGT: 6004 // (float)int > 4.4 --> int > 4 6005 // (float)int > -4.4 --> true 6006 if (RHS.isNegative()) 6007 return replaceInstUsesWith(I, Builder.getTrue()); 6008 break; 6009 case ICmpInst::ICMP_SGT: 6010 // (float)int > 4.4 --> int > 4 6011 // (float)int > -4.4 --> int >= -4 6012 if (RHS.isNegative()) 6013 Pred = ICmpInst::ICMP_SGE; 6014 break; 6015 case ICmpInst::ICMP_UGE: 6016 // (float)int >= -4.4 --> true 6017 // (float)int >= 4.4 --> int > 4 6018 if (RHS.isNegative()) 6019 return replaceInstUsesWith(I, Builder.getTrue()); 6020 Pred = ICmpInst::ICMP_UGT; 6021 break; 6022 case ICmpInst::ICMP_SGE: 6023 // (float)int >= -4.4 --> int >= -4 6024 // (float)int >= 4.4 --> int > 4 6025 if (!RHS.isNegative()) 6026 Pred = ICmpInst::ICMP_SGT; 6027 break; 6028 } 6029 } 6030 } 6031 6032 // Lower this FP comparison into an appropriate integer version of the 6033 // comparison. 6034 return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt); 6035 } 6036 6037 /// Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary. 6038 static Instruction *foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI, 6039 Constant *RHSC) { 6040 // When C is not 0.0 and infinities are not allowed: 6041 // (C / X) < 0.0 is a sign-bit test of X 6042 // (C / X) < 0.0 --> X < 0.0 (if C is positive) 6043 // (C / X) < 0.0 --> X > 0.0 (if C is negative, swap the predicate) 6044 // 6045 // Proof: 6046 // Multiply (C / X) < 0.0 by X * X / C. 6047 // - X is non zero, if it is the flag 'ninf' is violated. 6048 // - C defines the sign of X * X * C. Thus it also defines whether to swap 6049 // the predicate. C is also non zero by definition. 6050 // 6051 // Thus X * X / C is non zero and the transformation is valid. [qed] 6052 6053 FCmpInst::Predicate Pred = I.getPredicate(); 6054 6055 // Check that predicates are valid. 6056 if ((Pred != FCmpInst::FCMP_OGT) && (Pred != FCmpInst::FCMP_OLT) && 6057 (Pred != FCmpInst::FCMP_OGE) && (Pred != FCmpInst::FCMP_OLE)) 6058 return nullptr; 6059 6060 // Check that RHS operand is zero. 6061 if (!match(RHSC, m_AnyZeroFP())) 6062 return nullptr; 6063 6064 // Check fastmath flags ('ninf'). 6065 if (!LHSI->hasNoInfs() || !I.hasNoInfs()) 6066 return nullptr; 6067 6068 // Check the properties of the dividend. It must not be zero to avoid a 6069 // division by zero (see Proof). 6070 const APFloat *C; 6071 if (!match(LHSI->getOperand(0), m_APFloat(C))) 6072 return nullptr; 6073 6074 if (C->isZero()) 6075 return nullptr; 6076 6077 // Get swapped predicate if necessary. 6078 if (C->isNegative()) 6079 Pred = I.getSwappedPredicate(); 6080 6081 return new FCmpInst(Pred, LHSI->getOperand(1), RHSC, "", &I); 6082 } 6083 6084 /// Optimize fabs(X) compared with zero. 6085 static Instruction *foldFabsWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC) { 6086 Value *X; 6087 if (!match(I.getOperand(0), m_FAbs(m_Value(X))) || 6088 !match(I.getOperand(1), m_PosZeroFP())) 6089 return nullptr; 6090 6091 auto replacePredAndOp0 = [&IC](FCmpInst *I, FCmpInst::Predicate P, Value *X) { 6092 I->setPredicate(P); 6093 return IC.replaceOperand(*I, 0, X); 6094 }; 6095 6096 switch (I.getPredicate()) { 6097 case FCmpInst::FCMP_UGE: 6098 case FCmpInst::FCMP_OLT: 6099 // fabs(X) >= 0.0 --> true 6100 // fabs(X) < 0.0 --> false 6101 llvm_unreachable("fcmp should have simplified"); 6102 6103 case FCmpInst::FCMP_OGT: 6104 // fabs(X) > 0.0 --> X != 0.0 6105 return replacePredAndOp0(&I, FCmpInst::FCMP_ONE, X); 6106 6107 case FCmpInst::FCMP_UGT: 6108 // fabs(X) u> 0.0 --> X u!= 0.0 6109 return replacePredAndOp0(&I, FCmpInst::FCMP_UNE, X); 6110 6111 case FCmpInst::FCMP_OLE: 6112 // fabs(X) <= 0.0 --> X == 0.0 6113 return replacePredAndOp0(&I, FCmpInst::FCMP_OEQ, X); 6114 6115 case FCmpInst::FCMP_ULE: 6116 // fabs(X) u<= 0.0 --> X u== 0.0 6117 return replacePredAndOp0(&I, FCmpInst::FCMP_UEQ, X); 6118 6119 case FCmpInst::FCMP_OGE: 6120 // fabs(X) >= 0.0 --> !isnan(X) 6121 assert(!I.hasNoNaNs() && "fcmp should have simplified"); 6122 return replacePredAndOp0(&I, FCmpInst::FCMP_ORD, X); 6123 6124 case FCmpInst::FCMP_ULT: 6125 // fabs(X) u< 0.0 --> isnan(X) 6126 assert(!I.hasNoNaNs() && "fcmp should have simplified"); 6127 return replacePredAndOp0(&I, FCmpInst::FCMP_UNO, X); 6128 6129 case FCmpInst::FCMP_OEQ: 6130 case FCmpInst::FCMP_UEQ: 6131 case FCmpInst::FCMP_ONE: 6132 case FCmpInst::FCMP_UNE: 6133 case FCmpInst::FCMP_ORD: 6134 case FCmpInst::FCMP_UNO: 6135 // Look through the fabs() because it doesn't change anything but the sign. 6136 // fabs(X) == 0.0 --> X == 0.0, 6137 // fabs(X) != 0.0 --> X != 0.0 6138 // isnan(fabs(X)) --> isnan(X) 6139 // !isnan(fabs(X) --> !isnan(X) 6140 return replacePredAndOp0(&I, I.getPredicate(), X); 6141 6142 default: 6143 return nullptr; 6144 } 6145 } 6146 6147 Instruction *InstCombinerImpl::visitFCmpInst(FCmpInst &I) { 6148 bool Changed = false; 6149 6150 /// Orders the operands of the compare so that they are listed from most 6151 /// complex to least complex. This puts constants before unary operators, 6152 /// before binary operators. 6153 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) { 6154 I.swapOperands(); 6155 Changed = true; 6156 } 6157 6158 const CmpInst::Predicate Pred = I.getPredicate(); 6159 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 6160 if (Value *V = SimplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(), 6161 SQ.getWithInstruction(&I))) 6162 return replaceInstUsesWith(I, V); 6163 6164 // Simplify 'fcmp pred X, X' 6165 Type *OpType = Op0->getType(); 6166 assert(OpType == Op1->getType() && "fcmp with different-typed operands?"); 6167 if (Op0 == Op1) { 6168 switch (Pred) { 6169 default: break; 6170 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y) 6171 case FCmpInst::FCMP_ULT: // True if unordered or less than 6172 case FCmpInst::FCMP_UGT: // True if unordered or greater than 6173 case FCmpInst::FCMP_UNE: // True if unordered or not equal 6174 // Canonicalize these to be 'fcmp uno %X, 0.0'. 6175 I.setPredicate(FCmpInst::FCMP_UNO); 6176 I.setOperand(1, Constant::getNullValue(OpType)); 6177 return &I; 6178 6179 case FCmpInst::FCMP_ORD: // True if ordered (no nans) 6180 case FCmpInst::FCMP_OEQ: // True if ordered and equal 6181 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal 6182 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal 6183 // Canonicalize these to be 'fcmp ord %X, 0.0'. 6184 I.setPredicate(FCmpInst::FCMP_ORD); 6185 I.setOperand(1, Constant::getNullValue(OpType)); 6186 return &I; 6187 } 6188 } 6189 6190 // If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand, 6191 // then canonicalize the operand to 0.0. 6192 if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) { 6193 if (!match(Op0, m_PosZeroFP()) && isKnownNeverNaN(Op0, &TLI)) 6194 return replaceOperand(I, 0, ConstantFP::getNullValue(OpType)); 6195 6196 if (!match(Op1, m_PosZeroFP()) && isKnownNeverNaN(Op1, &TLI)) 6197 return replaceOperand(I, 1, ConstantFP::getNullValue(OpType)); 6198 } 6199 6200 // fcmp pred (fneg X), (fneg Y) -> fcmp swap(pred) X, Y 6201 Value *X, *Y; 6202 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y)))) 6203 return new FCmpInst(I.getSwappedPredicate(), X, Y, "", &I); 6204 6205 // Test if the FCmpInst instruction is used exclusively by a select as 6206 // part of a minimum or maximum operation. If so, refrain from doing 6207 // any other folding. This helps out other analyses which understand 6208 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution 6209 // and CodeGen. And in this case, at least one of the comparison 6210 // operands has at least one user besides the compare (the select), 6211 // which would often largely negate the benefit of folding anyway. 6212 if (I.hasOneUse()) 6213 if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) { 6214 Value *A, *B; 6215 SelectPatternResult SPR = matchSelectPattern(SI, A, B); 6216 if (SPR.Flavor != SPF_UNKNOWN) 6217 return nullptr; 6218 } 6219 6220 // The sign of 0.0 is ignored by fcmp, so canonicalize to +0.0: 6221 // fcmp Pred X, -0.0 --> fcmp Pred X, 0.0 6222 if (match(Op1, m_AnyZeroFP()) && !match(Op1, m_PosZeroFP())) 6223 return replaceOperand(I, 1, ConstantFP::getNullValue(OpType)); 6224 6225 // Handle fcmp with instruction LHS and constant RHS. 6226 Instruction *LHSI; 6227 Constant *RHSC; 6228 if (match(Op0, m_Instruction(LHSI)) && match(Op1, m_Constant(RHSC))) { 6229 switch (LHSI->getOpcode()) { 6230 case Instruction::PHI: 6231 // Only fold fcmp into the PHI if the phi and fcmp are in the same 6232 // block. If in the same block, we're encouraging jump threading. If 6233 // not, we are just pessimizing the code by making an i1 phi. 6234 if (LHSI->getParent() == I.getParent()) 6235 if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI))) 6236 return NV; 6237 break; 6238 case Instruction::SIToFP: 6239 case Instruction::UIToFP: 6240 if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC)) 6241 return NV; 6242 break; 6243 case Instruction::FDiv: 6244 if (Instruction *NV = foldFCmpReciprocalAndZero(I, LHSI, RHSC)) 6245 return NV; 6246 break; 6247 case Instruction::Load: 6248 if (auto *GEP = dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) 6249 if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) 6250 if (GV->isConstant() && GV->hasDefinitiveInitializer() && 6251 !cast<LoadInst>(LHSI)->isVolatile()) 6252 if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I)) 6253 return Res; 6254 break; 6255 } 6256 } 6257 6258 if (Instruction *R = foldFabsWithFcmpZero(I, *this)) 6259 return R; 6260 6261 if (match(Op0, m_FNeg(m_Value(X)))) { 6262 // fcmp pred (fneg X), C --> fcmp swap(pred) X, -C 6263 Constant *C; 6264 if (match(Op1, m_Constant(C))) { 6265 Constant *NegC = ConstantExpr::getFNeg(C); 6266 return new FCmpInst(I.getSwappedPredicate(), X, NegC, "", &I); 6267 } 6268 } 6269 6270 if (match(Op0, m_FPExt(m_Value(X)))) { 6271 // fcmp (fpext X), (fpext Y) -> fcmp X, Y 6272 if (match(Op1, m_FPExt(m_Value(Y))) && X->getType() == Y->getType()) 6273 return new FCmpInst(Pred, X, Y, "", &I); 6274 6275 // fcmp (fpext X), C -> fcmp X, (fptrunc C) if fptrunc is lossless 6276 const APFloat *C; 6277 if (match(Op1, m_APFloat(C))) { 6278 const fltSemantics &FPSem = 6279 X->getType()->getScalarType()->getFltSemantics(); 6280 bool Lossy; 6281 APFloat TruncC = *C; 6282 TruncC.convert(FPSem, APFloat::rmNearestTiesToEven, &Lossy); 6283 6284 // Avoid lossy conversions and denormals. 6285 // Zero is a special case that's OK to convert. 6286 APFloat Fabs = TruncC; 6287 Fabs.clearSign(); 6288 if (!Lossy && 6289 (!(Fabs < APFloat::getSmallestNormalized(FPSem)) || Fabs.isZero())) { 6290 Constant *NewC = ConstantFP::get(X->getType(), TruncC); 6291 return new FCmpInst(Pred, X, NewC, "", &I); 6292 } 6293 } 6294 } 6295 6296 // Convert a sign-bit test of an FP value into a cast and integer compare. 6297 // TODO: Simplify if the copysign constant is 0.0 or NaN. 6298 // TODO: Handle non-zero compare constants. 6299 // TODO: Handle other predicates. 6300 const APFloat *C; 6301 if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::copysign>(m_APFloat(C), 6302 m_Value(X)))) && 6303 match(Op1, m_AnyZeroFP()) && !C->isZero() && !C->isNaN()) { 6304 Type *IntType = Builder.getIntNTy(X->getType()->getScalarSizeInBits()); 6305 if (auto *VecTy = dyn_cast<VectorType>(OpType)) 6306 IntType = VectorType::get(IntType, VecTy->getElementCount()); 6307 6308 // copysign(non-zero constant, X) < 0.0 --> (bitcast X) < 0 6309 if (Pred == FCmpInst::FCMP_OLT) { 6310 Value *IntX = Builder.CreateBitCast(X, IntType); 6311 return new ICmpInst(ICmpInst::ICMP_SLT, IntX, 6312 ConstantInt::getNullValue(IntType)); 6313 } 6314 } 6315 6316 if (I.getType()->isVectorTy()) 6317 if (Instruction *Res = foldVectorCmp(I, Builder)) 6318 return Res; 6319 6320 return Changed ? &I : nullptr; 6321 } 6322