1 //===- InstCombineSimplifyDemanded.cpp ------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains logic for simplifying instructions based on information 11 // about how they are used. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "InstCombineInternal.h" 16 #include "llvm/Analysis/ValueTracking.h" 17 #include "llvm/IR/IntrinsicInst.h" 18 #include "llvm/IR/PatternMatch.h" 19 #include "llvm/Support/KnownBits.h" 20 21 using namespace llvm; 22 using namespace llvm::PatternMatch; 23 24 #define DEBUG_TYPE "instcombine" 25 26 /// Check to see if the specified operand of the specified instruction is a 27 /// constant integer. If so, check to see if there are any bits set in the 28 /// constant that are not demanded. If so, shrink the constant and return true. 29 static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, 30 const APInt &Demanded) { 31 assert(I && "No instruction?"); 32 assert(OpNo < I->getNumOperands() && "Operand index too large"); 33 34 // The operand must be a constant integer or splat integer. 35 Value *Op = I->getOperand(OpNo); 36 const APInt *C; 37 if (!match(Op, m_APInt(C))) 38 return false; 39 40 // If there are no bits set that aren't demanded, nothing to do. 41 if (C->isSubsetOf(Demanded)) 42 return false; 43 44 // This instruction is producing bits that are not demanded. Shrink the RHS. 45 I->setOperand(OpNo, ConstantInt::get(Op->getType(), *C & Demanded)); 46 47 return true; 48 } 49 50 51 52 /// Inst is an integer instruction that SimplifyDemandedBits knows about. See if 53 /// the instruction has any properties that allow us to simplify its operands. 54 bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) { 55 unsigned BitWidth = Inst.getType()->getScalarSizeInBits(); 56 KnownBits Known(BitWidth); 57 APInt DemandedMask(APInt::getAllOnesValue(BitWidth)); 58 59 Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, Known, 60 0, &Inst); 61 if (!V) return false; 62 if (V == &Inst) return true; 63 replaceInstUsesWith(Inst, V); 64 return true; 65 } 66 67 /// This form of SimplifyDemandedBits simplifies the specified instruction 68 /// operand if possible, updating it in place. It returns true if it made any 69 /// change and false otherwise. 70 bool InstCombiner::SimplifyDemandedBits(Instruction *I, unsigned OpNo, 71 const APInt &DemandedMask, 72 KnownBits &Known, 73 unsigned Depth) { 74 Use &U = I->getOperandUse(OpNo); 75 Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask, Known, 76 Depth, I); 77 if (!NewVal) return false; 78 U = NewVal; 79 return true; 80 } 81 82 83 /// This function attempts to replace V with a simpler value based on the 84 /// demanded bits. When this function is called, it is known that only the bits 85 /// set in DemandedMask of the result of V are ever used downstream. 86 /// Consequently, depending on the mask and V, it may be possible to replace V 87 /// with a constant or one of its operands. In such cases, this function does 88 /// the replacement and returns true. In all other cases, it returns false after 89 /// analyzing the expression and setting KnownOne and known to be one in the 90 /// expression. Known.Zero contains all the bits that are known to be zero in 91 /// the expression. These are provided to potentially allow the caller (which 92 /// might recursively be SimplifyDemandedBits itself) to simplify the 93 /// expression. 94 /// Known.One and Known.Zero always follow the invariant that: 95 /// Known.One & Known.Zero == 0. 96 /// That is, a bit can't be both 1 and 0. Note that the bits in Known.One and 97 /// Known.Zero may only be accurate for those bits set in DemandedMask. Note 98 /// also that the bitwidth of V, DemandedMask, Known.Zero and Known.One must all 99 /// be the same. 100 /// 101 /// This returns null if it did not change anything and it permits no 102 /// simplification. This returns V itself if it did some simplification of V's 103 /// operands based on the information about what bits are demanded. This returns 104 /// some other non-null value if it found out that V is equal to another value 105 /// in the context where the specified bits are demanded, but not for all users. 106 Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, 107 KnownBits &Known, unsigned Depth, 108 Instruction *CxtI) { 109 assert(V != nullptr && "Null pointer of Value???"); 110 assert(Depth <= 6 && "Limit Search Depth"); 111 uint32_t BitWidth = DemandedMask.getBitWidth(); 112 Type *VTy = V->getType(); 113 assert( 114 (!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) && 115 Known.getBitWidth() == BitWidth && 116 "Value *V, DemandedMask and Known must have same BitWidth"); 117 118 if (isa<Constant>(V)) { 119 computeKnownBits(V, Known, Depth, CxtI); 120 return nullptr; 121 } 122 123 Known.Zero.clearAllBits(); 124 Known.One.clearAllBits(); 125 if (DemandedMask == 0) // Not demanding any bits from V. 126 return UndefValue::get(VTy); 127 128 if (Depth == 6) // Limit search depth. 129 return nullptr; 130 131 Instruction *I = dyn_cast<Instruction>(V); 132 if (!I) { 133 computeKnownBits(V, Known, Depth, CxtI); 134 return nullptr; // Only analyze instructions. 135 } 136 137 // If there are multiple uses of this value and we aren't at the root, then 138 // we can't do any simplifications of the operands, because DemandedMask 139 // only reflects the bits demanded by *one* of the users. 140 if (Depth != 0 && !I->hasOneUse()) 141 return SimplifyMultipleUseDemandedBits(I, DemandedMask, Known, Depth, CxtI); 142 143 KnownBits LHSKnown(BitWidth), RHSKnown(BitWidth); 144 145 // If this is the root being simplified, allow it to have multiple uses, 146 // just set the DemandedMask to all bits so that we can try to simplify the 147 // operands. This allows visitTruncInst (for example) to simplify the 148 // operand of a trunc without duplicating all the logic below. 149 if (Depth == 0 && !V->hasOneUse()) 150 DemandedMask.setAllBits(); 151 152 switch (I->getOpcode()) { 153 default: 154 computeKnownBits(I, Known, Depth, CxtI); 155 break; 156 case Instruction::And: { 157 // If either the LHS or the RHS are Zero, the result is zero. 158 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) || 159 SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.Zero, LHSKnown, 160 Depth + 1)) 161 return I; 162 assert(!(RHSKnown.Zero & RHSKnown.One) && "Bits known to be one AND zero?"); 163 assert(!(LHSKnown.Zero & LHSKnown.One) && "Bits known to be one AND zero?"); 164 165 // Output known-0 are known to be clear if zero in either the LHS | RHS. 166 APInt IKnownZero = RHSKnown.Zero | LHSKnown.Zero; 167 // Output known-1 bits are only known if set in both the LHS & RHS. 168 APInt IKnownOne = RHSKnown.One & LHSKnown.One; 169 170 // If the client is only demanding bits that we know, return the known 171 // constant. 172 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) 173 return Constant::getIntegerValue(VTy, IKnownOne); 174 175 // If all of the demanded bits are known 1 on one side, return the other. 176 // These bits cannot contribute to the result of the 'and'. 177 if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One)) 178 return I->getOperand(0); 179 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One)) 180 return I->getOperand(1); 181 182 // If the RHS is a constant, see if we can simplify it. 183 if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnown.Zero)) 184 return I; 185 186 Known.Zero = std::move(IKnownZero); 187 Known.One = std::move(IKnownOne); 188 break; 189 } 190 case Instruction::Or: { 191 // If either the LHS or the RHS are One, the result is One. 192 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) || 193 SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnown.One, LHSKnown, 194 Depth + 1)) 195 return I; 196 assert(!(RHSKnown.Zero & RHSKnown.One) && "Bits known to be one AND zero?"); 197 assert(!(LHSKnown.Zero & LHSKnown.One) && "Bits known to be one AND zero?"); 198 199 // Output known-0 bits are only known if clear in both the LHS & RHS. 200 APInt IKnownZero = RHSKnown.Zero & LHSKnown.Zero; 201 // Output known-1 are known. to be set if s.et in either the LHS | RHS. 202 APInt IKnownOne = RHSKnown.One | LHSKnown.One; 203 204 // If the client is only demanding bits that we know, return the known 205 // constant. 206 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) 207 return Constant::getIntegerValue(VTy, IKnownOne); 208 209 // If all of the demanded bits are known zero on one side, return the other. 210 // These bits cannot contribute to the result of the 'or'. 211 if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero)) 212 return I->getOperand(0); 213 if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero)) 214 return I->getOperand(1); 215 216 // If the RHS is a constant, see if we can simplify it. 217 if (ShrinkDemandedConstant(I, 1, DemandedMask)) 218 return I; 219 220 Known.Zero = std::move(IKnownZero); 221 Known.One = std::move(IKnownOne); 222 break; 223 } 224 case Instruction::Xor: { 225 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnown, Depth + 1) || 226 SimplifyDemandedBits(I, 0, DemandedMask, LHSKnown, Depth + 1)) 227 return I; 228 assert(!(RHSKnown.Zero & RHSKnown.One) && "Bits known to be one AND zero?"); 229 assert(!(LHSKnown.Zero & LHSKnown.One) && "Bits known to be one AND zero?"); 230 231 // Output known-0 bits are known if clear or set in both the LHS & RHS. 232 APInt IKnownZero = (RHSKnown.Zero & LHSKnown.Zero) | 233 (RHSKnown.One & LHSKnown.One); 234 // Output known-1 are known to be set if set in only one of the LHS, RHS. 235 APInt IKnownOne = (RHSKnown.Zero & LHSKnown.One) | 236 (RHSKnown.One & LHSKnown.Zero); 237 238 // If the client is only demanding bits that we know, return the known 239 // constant. 240 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) 241 return Constant::getIntegerValue(VTy, IKnownOne); 242 243 // If all of the demanded bits are known zero on one side, return the other. 244 // These bits cannot contribute to the result of the 'xor'. 245 if (DemandedMask.isSubsetOf(RHSKnown.Zero)) 246 return I->getOperand(0); 247 if (DemandedMask.isSubsetOf(LHSKnown.Zero)) 248 return I->getOperand(1); 249 250 // If all of the demanded bits are known to be zero on one side or the 251 // other, turn this into an *inclusive* or. 252 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 253 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.Zero)) { 254 Instruction *Or = 255 BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1), 256 I->getName()); 257 return InsertNewInstWith(Or, *I); 258 } 259 260 // If all of the demanded bits on one side are known, and all of the set 261 // bits on that side are also known to be set on the other side, turn this 262 // into an AND, as we know the bits will be cleared. 263 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 264 if (DemandedMask.isSubsetOf(RHSKnown.Zero|RHSKnown.One) && 265 RHSKnown.One.isSubsetOf(LHSKnown.One)) { 266 Constant *AndC = Constant::getIntegerValue(VTy, 267 ~RHSKnown.One & DemandedMask); 268 Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC); 269 return InsertNewInstWith(And, *I); 270 } 271 272 // If the RHS is a constant, see if we can simplify it. 273 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1. 274 if (ShrinkDemandedConstant(I, 1, DemandedMask)) 275 return I; 276 277 // If our LHS is an 'and' and if it has one use, and if any of the bits we 278 // are flipping are known to be set, then the xor is just resetting those 279 // bits to zero. We can just knock out bits from the 'and' and the 'xor', 280 // simplifying both of them. 281 if (Instruction *LHSInst = dyn_cast<Instruction>(I->getOperand(0))) 282 if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() && 283 isa<ConstantInt>(I->getOperand(1)) && 284 isa<ConstantInt>(LHSInst->getOperand(1)) && 285 (LHSKnown.One & RHSKnown.One & DemandedMask) != 0) { 286 ConstantInt *AndRHS = cast<ConstantInt>(LHSInst->getOperand(1)); 287 ConstantInt *XorRHS = cast<ConstantInt>(I->getOperand(1)); 288 APInt NewMask = ~(LHSKnown.One & RHSKnown.One & DemandedMask); 289 290 Constant *AndC = 291 ConstantInt::get(I->getType(), NewMask & AndRHS->getValue()); 292 Instruction *NewAnd = BinaryOperator::CreateAnd(I->getOperand(0), AndC); 293 InsertNewInstWith(NewAnd, *I); 294 295 Constant *XorC = 296 ConstantInt::get(I->getType(), NewMask & XorRHS->getValue()); 297 Instruction *NewXor = BinaryOperator::CreateXor(NewAnd, XorC); 298 return InsertNewInstWith(NewXor, *I); 299 } 300 301 // Output known-0 bits are known if clear or set in both the LHS & RHS. 302 Known.Zero = std::move(IKnownZero); 303 // Output known-1 are known to be set if set in only one of the LHS, RHS. 304 Known.One = std::move(IKnownOne); 305 break; 306 } 307 case Instruction::Select: 308 // If this is a select as part of a min/max pattern, don't simplify any 309 // further in case we break the structure. 310 Value *LHS, *RHS; 311 if (matchSelectPattern(I, LHS, RHS).Flavor != SPF_UNKNOWN) 312 return nullptr; 313 314 if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnown, Depth + 1) || 315 SimplifyDemandedBits(I, 1, DemandedMask, LHSKnown, Depth + 1)) 316 return I; 317 assert(!(RHSKnown.Zero & RHSKnown.One) && "Bits known to be one AND zero?"); 318 assert(!(LHSKnown.Zero & LHSKnown.One) && "Bits known to be one AND zero?"); 319 320 // If the operands are constants, see if we can simplify them. 321 if (ShrinkDemandedConstant(I, 1, DemandedMask) || 322 ShrinkDemandedConstant(I, 2, DemandedMask)) 323 return I; 324 325 // Only known if known in both the LHS and RHS. 326 Known.One = RHSKnown.One & LHSKnown.One; 327 Known.Zero = RHSKnown.Zero & LHSKnown.Zero; 328 break; 329 case Instruction::Trunc: { 330 unsigned truncBf = I->getOperand(0)->getType()->getScalarSizeInBits(); 331 DemandedMask = DemandedMask.zext(truncBf); 332 Known = Known.zext(truncBf); 333 if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1)) 334 return I; 335 DemandedMask = DemandedMask.trunc(BitWidth); 336 Known = Known.trunc(BitWidth); 337 assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?"); 338 break; 339 } 340 case Instruction::BitCast: 341 if (!I->getOperand(0)->getType()->isIntOrIntVectorTy()) 342 return nullptr; // vector->int or fp->int? 343 344 if (VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) { 345 if (VectorType *SrcVTy = 346 dyn_cast<VectorType>(I->getOperand(0)->getType())) { 347 if (DstVTy->getNumElements() != SrcVTy->getNumElements()) 348 // Don't touch a bitcast between vectors of different element counts. 349 return nullptr; 350 } else 351 // Don't touch a scalar-to-vector bitcast. 352 return nullptr; 353 } else if (I->getOperand(0)->getType()->isVectorTy()) 354 // Don't touch a vector-to-scalar bitcast. 355 return nullptr; 356 357 if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1)) 358 return I; 359 assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?"); 360 break; 361 case Instruction::ZExt: { 362 // Compute the bits in the result that are not present in the input. 363 unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits(); 364 365 DemandedMask = DemandedMask.trunc(SrcBitWidth); 366 Known = Known.trunc(SrcBitWidth); 367 if (SimplifyDemandedBits(I, 0, DemandedMask, Known, Depth + 1)) 368 return I; 369 DemandedMask = DemandedMask.zext(BitWidth); 370 Known = Known.zext(BitWidth); 371 assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?"); 372 // The top bits are known to be zero. 373 Known.Zero.setBitsFrom(SrcBitWidth); 374 break; 375 } 376 case Instruction::SExt: { 377 // Compute the bits in the result that are not present in the input. 378 unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits(); 379 380 APInt InputDemandedBits = DemandedMask & 381 APInt::getLowBitsSet(BitWidth, SrcBitWidth); 382 383 APInt NewBits(APInt::getBitsSetFrom(BitWidth, SrcBitWidth)); 384 // If any of the sign extended bits are demanded, we know that the sign 385 // bit is demanded. 386 if ((NewBits & DemandedMask) != 0) 387 InputDemandedBits.setBit(SrcBitWidth-1); 388 389 InputDemandedBits = InputDemandedBits.trunc(SrcBitWidth); 390 Known = Known.trunc(SrcBitWidth); 391 if (SimplifyDemandedBits(I, 0, InputDemandedBits, Known, Depth + 1)) 392 return I; 393 InputDemandedBits = InputDemandedBits.zext(BitWidth); 394 Known = Known.zext(BitWidth); 395 assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?"); 396 397 // If the sign bit of the input is known set or clear, then we know the 398 // top bits of the result. 399 400 // If the input sign bit is known zero, or if the NewBits are not demanded 401 // convert this into a zero extension. 402 if (Known.Zero[SrcBitWidth-1] || (NewBits & ~DemandedMask) == NewBits) { 403 // Convert to ZExt cast 404 CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName()); 405 return InsertNewInstWith(NewCast, *I); 406 } else if (Known.One[SrcBitWidth-1]) { // Input sign bit known set 407 Known.One |= NewBits; 408 } 409 break; 410 } 411 case Instruction::Add: 412 case Instruction::Sub: { 413 /// If the high-bits of an ADD/SUB are not demanded, then we do not care 414 /// about the high bits of the operands. 415 unsigned NLZ = DemandedMask.countLeadingZeros(); 416 if (NLZ > 0) { 417 // Right fill the mask of bits for this ADD/SUB to demand the most 418 // significant bit and all those below it. 419 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ)); 420 if (ShrinkDemandedConstant(I, 0, DemandedFromOps) || 421 SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnown, Depth + 1) || 422 ShrinkDemandedConstant(I, 1, DemandedFromOps) || 423 SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnown, Depth + 1)) { 424 // Disable the nsw and nuw flags here: We can no longer guarantee that 425 // we won't wrap after simplification. Removing the nsw/nuw flags is 426 // legal here because the top bit is not demanded. 427 BinaryOperator &BinOP = *cast<BinaryOperator>(I); 428 BinOP.setHasNoSignedWrap(false); 429 BinOP.setHasNoUnsignedWrap(false); 430 return I; 431 } 432 433 // If we are known to be adding/subtracting zeros to every bit below 434 // the highest demanded bit, we just return the other side. 435 if (DemandedFromOps.isSubsetOf(RHSKnown.Zero)) 436 return I->getOperand(0); 437 // We can't do this with the LHS for subtraction. 438 if (I->getOpcode() == Instruction::Add && 439 DemandedFromOps.isSubsetOf(LHSKnown.Zero)) 440 return I->getOperand(1); 441 } 442 443 // Otherwise just hand the add/sub off to computeKnownBits to fill in 444 // the known zeros and ones. 445 computeKnownBits(V, Known, Depth, CxtI); 446 break; 447 } 448 case Instruction::Shl: { 449 const APInt *SA; 450 if (match(I->getOperand(1), m_APInt(SA))) { 451 const APInt *ShrAmt; 452 if (match(I->getOperand(0), m_Shr(m_Value(), m_APInt(ShrAmt)))) { 453 Instruction *Shr = cast<Instruction>(I->getOperand(0)); 454 if (Value *R = simplifyShrShlDemandedBits( 455 Shr, *ShrAmt, I, *SA, DemandedMask, Known)) 456 return R; 457 } 458 459 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 460 APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt)); 461 462 // If the shift is NUW/NSW, then it does demand the high bits. 463 ShlOperator *IOp = cast<ShlOperator>(I); 464 if (IOp->hasNoSignedWrap()) 465 DemandedMaskIn.setHighBits(ShiftAmt+1); 466 else if (IOp->hasNoUnsignedWrap()) 467 DemandedMaskIn.setHighBits(ShiftAmt); 468 469 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1)) 470 return I; 471 assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?"); 472 Known.Zero <<= ShiftAmt; 473 Known.One <<= ShiftAmt; 474 // low bits known zero. 475 if (ShiftAmt) 476 Known.Zero.setLowBits(ShiftAmt); 477 } 478 break; 479 } 480 case Instruction::LShr: { 481 const APInt *SA; 482 if (match(I->getOperand(1), m_APInt(SA))) { 483 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 484 485 // Unsigned shift right. 486 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt)); 487 488 // If the shift is exact, then it does demand the low bits (and knows that 489 // they are zero). 490 if (cast<LShrOperator>(I)->isExact()) 491 DemandedMaskIn.setLowBits(ShiftAmt); 492 493 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1)) 494 return I; 495 assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?"); 496 Known.Zero.lshrInPlace(ShiftAmt); 497 Known.One.lshrInPlace(ShiftAmt); 498 if (ShiftAmt) 499 Known.Zero.setHighBits(ShiftAmt); // high bits known zero. 500 } 501 break; 502 } 503 case Instruction::AShr: { 504 // If this is an arithmetic shift right and only the low-bit is set, we can 505 // always convert this into a logical shr, even if the shift amount is 506 // variable. The low bit of the shift cannot be an input sign bit unless 507 // the shift amount is >= the size of the datatype, which is undefined. 508 if (DemandedMask == 1) { 509 // Perform the logical shift right. 510 Instruction *NewVal = BinaryOperator::CreateLShr( 511 I->getOperand(0), I->getOperand(1), I->getName()); 512 return InsertNewInstWith(NewVal, *I); 513 } 514 515 // If the sign bit is the only bit demanded by this ashr, then there is no 516 // need to do it, the shift doesn't change the high bit. 517 if (DemandedMask.isSignMask()) 518 return I->getOperand(0); 519 520 const APInt *SA; 521 if (match(I->getOperand(1), m_APInt(SA))) { 522 uint32_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 523 524 // Signed shift right. 525 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt)); 526 // If any of the high bits are demanded, we should set the sign bit as 527 // demanded. 528 if (DemandedMask.countLeadingZeros() <= ShiftAmt) 529 DemandedMaskIn.setSignBit(); 530 531 // If the shift is exact, then it does demand the low bits (and knows that 532 // they are zero). 533 if (cast<AShrOperator>(I)->isExact()) 534 DemandedMaskIn.setLowBits(ShiftAmt); 535 536 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, Known, Depth + 1)) 537 return I; 538 539 assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?"); 540 // Compute the new bits that are at the top now. 541 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt)); 542 Known.Zero.lshrInPlace(ShiftAmt); 543 Known.One.lshrInPlace(ShiftAmt); 544 545 // Handle the sign bits. 546 APInt SignMask(APInt::getSignMask(BitWidth)); 547 // Adjust to where it is now in the mask. 548 SignMask.lshrInPlace(ShiftAmt); 549 550 // If the input sign bit is known to be zero, or if none of the top bits 551 // are demanded, turn this into an unsigned shift right. 552 if (BitWidth <= ShiftAmt || Known.Zero[BitWidth-ShiftAmt-1] || 553 !DemandedMask.intersects(HighBits)) { 554 BinaryOperator *LShr = BinaryOperator::CreateLShr(I->getOperand(0), 555 I->getOperand(1)); 556 LShr->setIsExact(cast<BinaryOperator>(I)->isExact()); 557 return InsertNewInstWith(LShr, *I); 558 } else if (Known.One.intersects(SignMask)) { // New bits are known one. 559 Known.One |= HighBits; 560 } 561 } 562 break; 563 } 564 case Instruction::SRem: 565 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 566 // X % -1 demands all the bits because we don't want to introduce 567 // INT_MIN % -1 (== undef) by accident. 568 if (Rem->isAllOnesValue()) 569 break; 570 APInt RA = Rem->getValue().abs(); 571 if (RA.isPowerOf2()) { 572 if (DemandedMask.ult(RA)) // srem won't affect demanded bits 573 return I->getOperand(0); 574 575 APInt LowBits = RA - 1; 576 APInt Mask2 = LowBits | APInt::getSignMask(BitWidth); 577 if (SimplifyDemandedBits(I, 0, Mask2, LHSKnown, Depth + 1)) 578 return I; 579 580 // The low bits of LHS are unchanged by the srem. 581 Known.Zero = LHSKnown.Zero & LowBits; 582 Known.One = LHSKnown.One & LowBits; 583 584 // If LHS is non-negative or has all low bits zero, then the upper bits 585 // are all zero. 586 if (LHSKnown.isNonNegative() || LowBits.isSubsetOf(LHSKnown.Zero)) 587 Known.Zero |= ~LowBits; 588 589 // If LHS is negative and not all low bits are zero, then the upper bits 590 // are all one. 591 if (LHSKnown.isNegative() && LowBits.intersects(LHSKnown.One)) 592 Known.One |= ~LowBits; 593 594 assert(!(Known.Zero & Known.One) && "Bits known to be one AND zero?"); 595 break; 596 } 597 } 598 599 // The sign bit is the LHS's sign bit, except when the result of the 600 // remainder is zero. 601 if (DemandedMask.isSignBitSet()) { 602 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, CxtI); 603 // If it's known zero, our sign bit is also zero. 604 if (LHSKnown.isNonNegative()) 605 Known.makeNonNegative(); 606 } 607 break; 608 case Instruction::URem: { 609 KnownBits Known2(BitWidth); 610 APInt AllOnes = APInt::getAllOnesValue(BitWidth); 611 if (SimplifyDemandedBits(I, 0, AllOnes, Known2, Depth + 1) || 612 SimplifyDemandedBits(I, 1, AllOnes, Known2, Depth + 1)) 613 return I; 614 615 unsigned Leaders = Known2.Zero.countLeadingOnes(); 616 Known.Zero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask; 617 break; 618 } 619 case Instruction::Call: 620 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 621 switch (II->getIntrinsicID()) { 622 default: break; 623 case Intrinsic::bswap: { 624 // If the only bits demanded come from one byte of the bswap result, 625 // just shift the input byte into position to eliminate the bswap. 626 unsigned NLZ = DemandedMask.countLeadingZeros(); 627 unsigned NTZ = DemandedMask.countTrailingZeros(); 628 629 // Round NTZ down to the next byte. If we have 11 trailing zeros, then 630 // we need all the bits down to bit 8. Likewise, round NLZ. If we 631 // have 14 leading zeros, round to 8. 632 NLZ &= ~7; 633 NTZ &= ~7; 634 // If we need exactly one byte, we can do this transformation. 635 if (BitWidth-NLZ-NTZ == 8) { 636 unsigned ResultBit = NTZ; 637 unsigned InputBit = BitWidth-NTZ-8; 638 639 // Replace this with either a left or right shift to get the byte into 640 // the right place. 641 Instruction *NewVal; 642 if (InputBit > ResultBit) 643 NewVal = BinaryOperator::CreateLShr(II->getArgOperand(0), 644 ConstantInt::get(I->getType(), InputBit-ResultBit)); 645 else 646 NewVal = BinaryOperator::CreateShl(II->getArgOperand(0), 647 ConstantInt::get(I->getType(), ResultBit-InputBit)); 648 NewVal->takeName(I); 649 return InsertNewInstWith(NewVal, *I); 650 } 651 652 // TODO: Could compute known zero/one bits based on the input. 653 break; 654 } 655 case Intrinsic::x86_mmx_pmovmskb: 656 case Intrinsic::x86_sse_movmsk_ps: 657 case Intrinsic::x86_sse2_movmsk_pd: 658 case Intrinsic::x86_sse2_pmovmskb_128: 659 case Intrinsic::x86_avx_movmsk_ps_256: 660 case Intrinsic::x86_avx_movmsk_pd_256: 661 case Intrinsic::x86_avx2_pmovmskb: { 662 // MOVMSK copies the vector elements' sign bits to the low bits 663 // and zeros the high bits. 664 unsigned ArgWidth; 665 if (II->getIntrinsicID() == Intrinsic::x86_mmx_pmovmskb) { 666 ArgWidth = 8; // Arg is x86_mmx, but treated as <8 x i8>. 667 } else { 668 auto Arg = II->getArgOperand(0); 669 auto ArgType = cast<VectorType>(Arg->getType()); 670 ArgWidth = ArgType->getNumElements(); 671 } 672 673 // If we don't need any of low bits then return zero, 674 // we know that DemandedMask is non-zero already. 675 APInt DemandedElts = DemandedMask.zextOrTrunc(ArgWidth); 676 if (DemandedElts == 0) 677 return ConstantInt::getNullValue(VTy); 678 679 // We know that the upper bits are set to zero. 680 Known.Zero.setBitsFrom(ArgWidth); 681 return nullptr; 682 } 683 case Intrinsic::x86_sse42_crc32_64_64: 684 Known.Zero.setBitsFrom(32); 685 return nullptr; 686 } 687 } 688 computeKnownBits(V, Known, Depth, CxtI); 689 break; 690 } 691 692 // If the client is only demanding bits that we know, return the known 693 // constant. 694 if (DemandedMask.isSubsetOf(Known.Zero|Known.One)) 695 return Constant::getIntegerValue(VTy, Known.One); 696 return nullptr; 697 } 698 699 /// Helper routine of SimplifyDemandedUseBits. It computes Known 700 /// bits. It also tries to handle simplifications that can be done based on 701 /// DemandedMask, but without modifying the Instruction. 702 Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I, 703 const APInt &DemandedMask, 704 KnownBits &Known, 705 unsigned Depth, 706 Instruction *CxtI) { 707 unsigned BitWidth = DemandedMask.getBitWidth(); 708 Type *ITy = I->getType(); 709 710 KnownBits LHSKnown(BitWidth); 711 KnownBits RHSKnown(BitWidth); 712 713 // Despite the fact that we can't simplify this instruction in all User's 714 // context, we can at least compute the known bits, and we can 715 // do simplifications that apply to *just* the one user if we know that 716 // this instruction has a simpler value in that context. 717 switch (I->getOpcode()) { 718 case Instruction::And: { 719 // If either the LHS or the RHS are Zero, the result is zero. 720 computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI); 721 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, 722 CxtI); 723 724 // Output known-0 are known to be clear if zero in either the LHS | RHS. 725 APInt IKnownZero = RHSKnown.Zero | LHSKnown.Zero; 726 // Output known-1 bits are only known if set in both the LHS & RHS. 727 APInt IKnownOne = RHSKnown.One & LHSKnown.One; 728 729 // If the client is only demanding bits that we know, return the known 730 // constant. 731 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) 732 return Constant::getIntegerValue(ITy, IKnownOne); 733 734 // If all of the demanded bits are known 1 on one side, return the other. 735 // These bits cannot contribute to the result of the 'and' in this 736 // context. 737 if (DemandedMask.isSubsetOf(LHSKnown.Zero | RHSKnown.One)) 738 return I->getOperand(0); 739 if (DemandedMask.isSubsetOf(RHSKnown.Zero | LHSKnown.One)) 740 return I->getOperand(1); 741 742 Known.Zero = std::move(IKnownZero); 743 Known.One = std::move(IKnownOne); 744 break; 745 } 746 case Instruction::Or: { 747 // We can simplify (X|Y) -> X or Y in the user's context if we know that 748 // only bits from X or Y are demanded. 749 750 // If either the LHS or the RHS are One, the result is One. 751 computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI); 752 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, 753 CxtI); 754 755 // Output known-0 bits are only known if clear in both the LHS & RHS. 756 APInt IKnownZero = RHSKnown.Zero & LHSKnown.Zero; 757 // Output known-1 are known to be set if set in either the LHS | RHS. 758 APInt IKnownOne = RHSKnown.One | LHSKnown.One; 759 760 // If the client is only demanding bits that we know, return the known 761 // constant. 762 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) 763 return Constant::getIntegerValue(ITy, IKnownOne); 764 765 // If all of the demanded bits are known zero on one side, return the 766 // other. These bits cannot contribute to the result of the 'or' in this 767 // context. 768 if (DemandedMask.isSubsetOf(LHSKnown.One | RHSKnown.Zero)) 769 return I->getOperand(0); 770 if (DemandedMask.isSubsetOf(RHSKnown.One | LHSKnown.Zero)) 771 return I->getOperand(1); 772 773 Known.Zero = std::move(IKnownZero); 774 Known.One = std::move(IKnownOne); 775 break; 776 } 777 case Instruction::Xor: { 778 // We can simplify (X^Y) -> X or Y in the user's context if we know that 779 // only bits from X or Y are demanded. 780 781 computeKnownBits(I->getOperand(1), RHSKnown, Depth + 1, CxtI); 782 computeKnownBits(I->getOperand(0), LHSKnown, Depth + 1, 783 CxtI); 784 785 // Output known-0 bits are known if clear or set in both the LHS & RHS. 786 APInt IKnownZero = (RHSKnown.Zero & LHSKnown.Zero) | 787 (RHSKnown.One & LHSKnown.One); 788 // Output known-1 are known to be set if set in only one of the LHS, RHS. 789 APInt IKnownOne = (RHSKnown.Zero & LHSKnown.One) | 790 (RHSKnown.One & LHSKnown.Zero); 791 792 // If the client is only demanding bits that we know, return the known 793 // constant. 794 if (DemandedMask.isSubsetOf(IKnownZero|IKnownOne)) 795 return Constant::getIntegerValue(ITy, IKnownOne); 796 797 // If all of the demanded bits are known zero on one side, return the 798 // other. 799 if (DemandedMask.isSubsetOf(RHSKnown.Zero)) 800 return I->getOperand(0); 801 if (DemandedMask.isSubsetOf(LHSKnown.Zero)) 802 return I->getOperand(1); 803 804 // Output known-0 bits are known if clear or set in both the LHS & RHS. 805 Known.Zero = std::move(IKnownZero); 806 // Output known-1 are known to be set if set in only one of the LHS, RHS. 807 Known.One = std::move(IKnownOne); 808 break; 809 } 810 default: 811 // Compute the Known bits to simplify things downstream. 812 computeKnownBits(I, Known, Depth, CxtI); 813 814 // If this user is only demanding bits that we know, return the known 815 // constant. 816 if (DemandedMask.isSubsetOf(Known.Zero|Known.One)) 817 return Constant::getIntegerValue(ITy, Known.One); 818 819 break; 820 } 821 822 return nullptr; 823 } 824 825 826 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify 827 /// "E1 = (X lsr C1) << C2", where the C1 and C2 are constant, into 828 /// "E2 = X << (C2 - C1)" or "E2 = X >> (C1 - C2)", depending on the sign 829 /// of "C2-C1". 830 /// 831 /// Suppose E1 and E2 are generally different in bits S={bm, bm+1, 832 /// ..., bn}, without considering the specific value X is holding. 833 /// This transformation is legal iff one of following conditions is hold: 834 /// 1) All the bit in S are 0, in this case E1 == E2. 835 /// 2) We don't care those bits in S, per the input DemandedMask. 836 /// 3) Combination of 1) and 2). Some bits in S are 0, and we don't care the 837 /// rest bits. 838 /// 839 /// Currently we only test condition 2). 840 /// 841 /// As with SimplifyDemandedUseBits, it returns NULL if the simplification was 842 /// not successful. 843 Value * 844 InstCombiner::simplifyShrShlDemandedBits(Instruction *Shr, const APInt &ShrOp1, 845 Instruction *Shl, const APInt &ShlOp1, 846 const APInt &DemandedMask, 847 KnownBits &Known) { 848 if (!ShlOp1 || !ShrOp1) 849 return nullptr; // No-op. 850 851 Value *VarX = Shr->getOperand(0); 852 Type *Ty = VarX->getType(); 853 unsigned BitWidth = Ty->getScalarSizeInBits(); 854 if (ShlOp1.uge(BitWidth) || ShrOp1.uge(BitWidth)) 855 return nullptr; // Undef. 856 857 unsigned ShlAmt = ShlOp1.getZExtValue(); 858 unsigned ShrAmt = ShrOp1.getZExtValue(); 859 860 Known.One.clearAllBits(); 861 Known.Zero.setLowBits(ShlAmt - 1); 862 Known.Zero &= DemandedMask; 863 864 APInt BitMask1(APInt::getAllOnesValue(BitWidth)); 865 APInt BitMask2(APInt::getAllOnesValue(BitWidth)); 866 867 bool isLshr = (Shr->getOpcode() == Instruction::LShr); 868 BitMask1 = isLshr ? (BitMask1.lshr(ShrAmt) << ShlAmt) : 869 (BitMask1.ashr(ShrAmt) << ShlAmt); 870 871 if (ShrAmt <= ShlAmt) { 872 BitMask2 <<= (ShlAmt - ShrAmt); 873 } else { 874 BitMask2 = isLshr ? BitMask2.lshr(ShrAmt - ShlAmt): 875 BitMask2.ashr(ShrAmt - ShlAmt); 876 } 877 878 // Check if condition-2 (see the comment to this function) is satified. 879 if ((BitMask1 & DemandedMask) == (BitMask2 & DemandedMask)) { 880 if (ShrAmt == ShlAmt) 881 return VarX; 882 883 if (!Shr->hasOneUse()) 884 return nullptr; 885 886 BinaryOperator *New; 887 if (ShrAmt < ShlAmt) { 888 Constant *Amt = ConstantInt::get(VarX->getType(), ShlAmt - ShrAmt); 889 New = BinaryOperator::CreateShl(VarX, Amt); 890 BinaryOperator *Orig = cast<BinaryOperator>(Shl); 891 New->setHasNoSignedWrap(Orig->hasNoSignedWrap()); 892 New->setHasNoUnsignedWrap(Orig->hasNoUnsignedWrap()); 893 } else { 894 Constant *Amt = ConstantInt::get(VarX->getType(), ShrAmt - ShlAmt); 895 New = isLshr ? BinaryOperator::CreateLShr(VarX, Amt) : 896 BinaryOperator::CreateAShr(VarX, Amt); 897 if (cast<BinaryOperator>(Shr)->isExact()) 898 New->setIsExact(true); 899 } 900 901 return InsertNewInstWith(New, *Shl); 902 } 903 904 return nullptr; 905 } 906 907 /// The specified value produces a vector with any number of elements. 908 /// DemandedElts contains the set of elements that are actually used by the 909 /// caller. This method analyzes which elements of the operand are undef and 910 /// returns that information in UndefElts. 911 /// 912 /// If the information about demanded elements can be used to simplify the 913 /// operation, the operation is simplified, then the resultant value is 914 /// returned. This returns null if no change was made. 915 Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, 916 APInt &UndefElts, 917 unsigned Depth) { 918 unsigned VWidth = V->getType()->getVectorNumElements(); 919 APInt EltMask(APInt::getAllOnesValue(VWidth)); 920 assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!"); 921 922 if (isa<UndefValue>(V)) { 923 // If the entire vector is undefined, just return this info. 924 UndefElts = EltMask; 925 return nullptr; 926 } 927 928 if (DemandedElts == 0) { // If nothing is demanded, provide undef. 929 UndefElts = EltMask; 930 return UndefValue::get(V->getType()); 931 } 932 933 UndefElts = 0; 934 935 // Handle ConstantAggregateZero, ConstantVector, ConstantDataSequential. 936 if (Constant *C = dyn_cast<Constant>(V)) { 937 // Check if this is identity. If so, return 0 since we are not simplifying 938 // anything. 939 if (DemandedElts.isAllOnesValue()) 940 return nullptr; 941 942 Type *EltTy = cast<VectorType>(V->getType())->getElementType(); 943 Constant *Undef = UndefValue::get(EltTy); 944 945 SmallVector<Constant*, 16> Elts; 946 for (unsigned i = 0; i != VWidth; ++i) { 947 if (!DemandedElts[i]) { // If not demanded, set to undef. 948 Elts.push_back(Undef); 949 UndefElts.setBit(i); 950 continue; 951 } 952 953 Constant *Elt = C->getAggregateElement(i); 954 if (!Elt) return nullptr; 955 956 if (isa<UndefValue>(Elt)) { // Already undef. 957 Elts.push_back(Undef); 958 UndefElts.setBit(i); 959 } else { // Otherwise, defined. 960 Elts.push_back(Elt); 961 } 962 } 963 964 // If we changed the constant, return it. 965 Constant *NewCV = ConstantVector::get(Elts); 966 return NewCV != C ? NewCV : nullptr; 967 } 968 969 // Limit search depth. 970 if (Depth == 10) 971 return nullptr; 972 973 // If multiple users are using the root value, proceed with 974 // simplification conservatively assuming that all elements 975 // are needed. 976 if (!V->hasOneUse()) { 977 // Quit if we find multiple users of a non-root value though. 978 // They'll be handled when it's their turn to be visited by 979 // the main instcombine process. 980 if (Depth != 0) 981 // TODO: Just compute the UndefElts information recursively. 982 return nullptr; 983 984 // Conservatively assume that all elements are needed. 985 DemandedElts = EltMask; 986 } 987 988 Instruction *I = dyn_cast<Instruction>(V); 989 if (!I) return nullptr; // Only analyze instructions. 990 991 bool MadeChange = false; 992 APInt UndefElts2(VWidth, 0); 993 APInt UndefElts3(VWidth, 0); 994 Value *TmpV; 995 switch (I->getOpcode()) { 996 default: break; 997 998 case Instruction::InsertElement: { 999 // If this is a variable index, we don't know which element it overwrites. 1000 // demand exactly the same input as we produce. 1001 ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2)); 1002 if (!Idx) { 1003 // Note that we can't propagate undef elt info, because we don't know 1004 // which elt is getting updated. 1005 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, 1006 UndefElts2, Depth + 1); 1007 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1008 break; 1009 } 1010 1011 // If this is inserting an element that isn't demanded, remove this 1012 // insertelement. 1013 unsigned IdxNo = Idx->getZExtValue(); 1014 if (IdxNo >= VWidth || !DemandedElts[IdxNo]) { 1015 Worklist.Add(I); 1016 return I->getOperand(0); 1017 } 1018 1019 // Otherwise, the element inserted overwrites whatever was there, so the 1020 // input demanded set is simpler than the output set. 1021 APInt DemandedElts2 = DemandedElts; 1022 DemandedElts2.clearBit(IdxNo); 1023 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts2, 1024 UndefElts, Depth + 1); 1025 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1026 1027 // The inserted element is defined. 1028 UndefElts.clearBit(IdxNo); 1029 break; 1030 } 1031 case Instruction::ShuffleVector: { 1032 ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I); 1033 unsigned LHSVWidth = 1034 Shuffle->getOperand(0)->getType()->getVectorNumElements(); 1035 APInt LeftDemanded(LHSVWidth, 0), RightDemanded(LHSVWidth, 0); 1036 for (unsigned i = 0; i < VWidth; i++) { 1037 if (DemandedElts[i]) { 1038 unsigned MaskVal = Shuffle->getMaskValue(i); 1039 if (MaskVal != -1u) { 1040 assert(MaskVal < LHSVWidth * 2 && 1041 "shufflevector mask index out of range!"); 1042 if (MaskVal < LHSVWidth) 1043 LeftDemanded.setBit(MaskVal); 1044 else 1045 RightDemanded.setBit(MaskVal - LHSVWidth); 1046 } 1047 } 1048 } 1049 1050 APInt LHSUndefElts(LHSVWidth, 0); 1051 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), LeftDemanded, 1052 LHSUndefElts, Depth + 1); 1053 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1054 1055 APInt RHSUndefElts(LHSVWidth, 0); 1056 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), RightDemanded, 1057 RHSUndefElts, Depth + 1); 1058 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; } 1059 1060 bool NewUndefElts = false; 1061 unsigned LHSIdx = -1u, LHSValIdx = -1u; 1062 unsigned RHSIdx = -1u, RHSValIdx = -1u; 1063 bool LHSUniform = true; 1064 bool RHSUniform = true; 1065 for (unsigned i = 0; i < VWidth; i++) { 1066 unsigned MaskVal = Shuffle->getMaskValue(i); 1067 if (MaskVal == -1u) { 1068 UndefElts.setBit(i); 1069 } else if (!DemandedElts[i]) { 1070 NewUndefElts = true; 1071 UndefElts.setBit(i); 1072 } else if (MaskVal < LHSVWidth) { 1073 if (LHSUndefElts[MaskVal]) { 1074 NewUndefElts = true; 1075 UndefElts.setBit(i); 1076 } else { 1077 LHSIdx = LHSIdx == -1u ? i : LHSVWidth; 1078 LHSValIdx = LHSValIdx == -1u ? MaskVal : LHSVWidth; 1079 LHSUniform = LHSUniform && (MaskVal == i); 1080 } 1081 } else { 1082 if (RHSUndefElts[MaskVal - LHSVWidth]) { 1083 NewUndefElts = true; 1084 UndefElts.setBit(i); 1085 } else { 1086 RHSIdx = RHSIdx == -1u ? i : LHSVWidth; 1087 RHSValIdx = RHSValIdx == -1u ? MaskVal - LHSVWidth : LHSVWidth; 1088 RHSUniform = RHSUniform && (MaskVal - LHSVWidth == i); 1089 } 1090 } 1091 } 1092 1093 // Try to transform shuffle with constant vector and single element from 1094 // this constant vector to single insertelement instruction. 1095 // shufflevector V, C, <v1, v2, .., ci, .., vm> -> 1096 // insertelement V, C[ci], ci-n 1097 if (LHSVWidth == Shuffle->getType()->getNumElements()) { 1098 Value *Op = nullptr; 1099 Constant *Value = nullptr; 1100 unsigned Idx = -1u; 1101 1102 // Find constant vector with the single element in shuffle (LHS or RHS). 1103 if (LHSIdx < LHSVWidth && RHSUniform) { 1104 if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(0))) { 1105 Op = Shuffle->getOperand(1); 1106 Value = CV->getOperand(LHSValIdx); 1107 Idx = LHSIdx; 1108 } 1109 } 1110 if (RHSIdx < LHSVWidth && LHSUniform) { 1111 if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(1))) { 1112 Op = Shuffle->getOperand(0); 1113 Value = CV->getOperand(RHSValIdx); 1114 Idx = RHSIdx; 1115 } 1116 } 1117 // Found constant vector with single element - convert to insertelement. 1118 if (Op && Value) { 1119 Instruction *New = InsertElementInst::Create( 1120 Op, Value, ConstantInt::get(Type::getInt32Ty(I->getContext()), Idx), 1121 Shuffle->getName()); 1122 InsertNewInstWith(New, *Shuffle); 1123 return New; 1124 } 1125 } 1126 if (NewUndefElts) { 1127 // Add additional discovered undefs. 1128 SmallVector<Constant*, 16> Elts; 1129 for (unsigned i = 0; i < VWidth; ++i) { 1130 if (UndefElts[i]) 1131 Elts.push_back(UndefValue::get(Type::getInt32Ty(I->getContext()))); 1132 else 1133 Elts.push_back(ConstantInt::get(Type::getInt32Ty(I->getContext()), 1134 Shuffle->getMaskValue(i))); 1135 } 1136 I->setOperand(2, ConstantVector::get(Elts)); 1137 MadeChange = true; 1138 } 1139 break; 1140 } 1141 case Instruction::Select: { 1142 APInt LeftDemanded(DemandedElts), RightDemanded(DemandedElts); 1143 if (ConstantVector* CV = dyn_cast<ConstantVector>(I->getOperand(0))) { 1144 for (unsigned i = 0; i < VWidth; i++) { 1145 Constant *CElt = CV->getAggregateElement(i); 1146 // Method isNullValue always returns false when called on a 1147 // ConstantExpr. If CElt is a ConstantExpr then skip it in order to 1148 // to avoid propagating incorrect information. 1149 if (isa<ConstantExpr>(CElt)) 1150 continue; 1151 if (CElt->isNullValue()) 1152 LeftDemanded.clearBit(i); 1153 else 1154 RightDemanded.clearBit(i); 1155 } 1156 } 1157 1158 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), LeftDemanded, UndefElts, 1159 Depth + 1); 1160 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; } 1161 1162 TmpV = SimplifyDemandedVectorElts(I->getOperand(2), RightDemanded, 1163 UndefElts2, Depth + 1); 1164 if (TmpV) { I->setOperand(2, TmpV); MadeChange = true; } 1165 1166 // Output elements are undefined if both are undefined. 1167 UndefElts &= UndefElts2; 1168 break; 1169 } 1170 case Instruction::BitCast: { 1171 // Vector->vector casts only. 1172 VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType()); 1173 if (!VTy) break; 1174 unsigned InVWidth = VTy->getNumElements(); 1175 APInt InputDemandedElts(InVWidth, 0); 1176 UndefElts2 = APInt(InVWidth, 0); 1177 unsigned Ratio; 1178 1179 if (VWidth == InVWidth) { 1180 // If we are converting from <4 x i32> -> <4 x f32>, we demand the same 1181 // elements as are demanded of us. 1182 Ratio = 1; 1183 InputDemandedElts = DemandedElts; 1184 } else if ((VWidth % InVWidth) == 0) { 1185 // If the number of elements in the output is a multiple of the number of 1186 // elements in the input then an input element is live if any of the 1187 // corresponding output elements are live. 1188 Ratio = VWidth / InVWidth; 1189 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) 1190 if (DemandedElts[OutIdx]) 1191 InputDemandedElts.setBit(OutIdx / Ratio); 1192 } else if ((InVWidth % VWidth) == 0) { 1193 // If the number of elements in the input is a multiple of the number of 1194 // elements in the output then an input element is live if the 1195 // corresponding output element is live. 1196 Ratio = InVWidth / VWidth; 1197 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx) 1198 if (DemandedElts[InIdx / Ratio]) 1199 InputDemandedElts.setBit(InIdx); 1200 } else { 1201 // Unsupported so far. 1202 break; 1203 } 1204 1205 // div/rem demand all inputs, because they don't want divide by zero. 1206 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), InputDemandedElts, 1207 UndefElts2, Depth + 1); 1208 if (TmpV) { 1209 I->setOperand(0, TmpV); 1210 MadeChange = true; 1211 } 1212 1213 if (VWidth == InVWidth) { 1214 UndefElts = UndefElts2; 1215 } else if ((VWidth % InVWidth) == 0) { 1216 // If the number of elements in the output is a multiple of the number of 1217 // elements in the input then an output element is undef if the 1218 // corresponding input element is undef. 1219 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) 1220 if (UndefElts2[OutIdx / Ratio]) 1221 UndefElts.setBit(OutIdx); 1222 } else if ((InVWidth % VWidth) == 0) { 1223 // If the number of elements in the input is a multiple of the number of 1224 // elements in the output then an output element is undef if all of the 1225 // corresponding input elements are undef. 1226 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) { 1227 APInt SubUndef = UndefElts2.lshr(OutIdx * Ratio).zextOrTrunc(Ratio); 1228 if (SubUndef.countPopulation() == Ratio) 1229 UndefElts.setBit(OutIdx); 1230 } 1231 } else { 1232 llvm_unreachable("Unimp"); 1233 } 1234 break; 1235 } 1236 case Instruction::And: 1237 case Instruction::Or: 1238 case Instruction::Xor: 1239 case Instruction::Add: 1240 case Instruction::Sub: 1241 case Instruction::Mul: 1242 // div/rem demand all inputs, because they don't want divide by zero. 1243 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, UndefElts, 1244 Depth + 1); 1245 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1246 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), DemandedElts, 1247 UndefElts2, Depth + 1); 1248 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; } 1249 1250 // Output elements are undefined if both are undefined. Consider things 1251 // like undef&0. The result is known zero, not undef. 1252 UndefElts &= UndefElts2; 1253 break; 1254 case Instruction::FPTrunc: 1255 case Instruction::FPExt: 1256 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, UndefElts, 1257 Depth + 1); 1258 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1259 break; 1260 1261 case Instruction::Call: { 1262 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I); 1263 if (!II) break; 1264 switch (II->getIntrinsicID()) { 1265 default: break; 1266 1267 case Intrinsic::x86_xop_vfrcz_ss: 1268 case Intrinsic::x86_xop_vfrcz_sd: 1269 // The instructions for these intrinsics are speced to zero upper bits not 1270 // pass them through like other scalar intrinsics. So we shouldn't just 1271 // use Arg0 if DemandedElts[0] is clear like we do for other intrinsics. 1272 // Instead we should return a zero vector. 1273 if (!DemandedElts[0]) { 1274 Worklist.Add(II); 1275 return ConstantAggregateZero::get(II->getType()); 1276 } 1277 1278 // Only the lower element is used. 1279 DemandedElts = 1; 1280 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1281 UndefElts, Depth + 1); 1282 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1283 1284 // Only the lower element is undefined. The high elements are zero. 1285 UndefElts = UndefElts[0]; 1286 break; 1287 1288 // Unary scalar-as-vector operations that work column-wise. 1289 case Intrinsic::x86_sse_rcp_ss: 1290 case Intrinsic::x86_sse_rsqrt_ss: 1291 case Intrinsic::x86_sse_sqrt_ss: 1292 case Intrinsic::x86_sse2_sqrt_sd: 1293 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1294 UndefElts, Depth + 1); 1295 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1296 1297 // If lowest element of a scalar op isn't used then use Arg0. 1298 if (!DemandedElts[0]) { 1299 Worklist.Add(II); 1300 return II->getArgOperand(0); 1301 } 1302 // TODO: If only low elt lower SQRT to FSQRT (with rounding/exceptions 1303 // checks). 1304 break; 1305 1306 // Binary scalar-as-vector operations that work column-wise. The high 1307 // elements come from operand 0. The low element is a function of both 1308 // operands. 1309 case Intrinsic::x86_sse_min_ss: 1310 case Intrinsic::x86_sse_max_ss: 1311 case Intrinsic::x86_sse_cmp_ss: 1312 case Intrinsic::x86_sse2_min_sd: 1313 case Intrinsic::x86_sse2_max_sd: 1314 case Intrinsic::x86_sse2_cmp_sd: { 1315 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1316 UndefElts, Depth + 1); 1317 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1318 1319 // If lowest element of a scalar op isn't used then use Arg0. 1320 if (!DemandedElts[0]) { 1321 Worklist.Add(II); 1322 return II->getArgOperand(0); 1323 } 1324 1325 // Only lower element is used for operand 1. 1326 DemandedElts = 1; 1327 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts, 1328 UndefElts2, Depth + 1); 1329 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1330 1331 // Lower element is undefined if both lower elements are undefined. 1332 // Consider things like undef&0. The result is known zero, not undef. 1333 if (!UndefElts2[0]) 1334 UndefElts.clearBit(0); 1335 1336 break; 1337 } 1338 1339 // Binary scalar-as-vector operations that work column-wise. The high 1340 // elements come from operand 0 and the low element comes from operand 1. 1341 case Intrinsic::x86_sse41_round_ss: 1342 case Intrinsic::x86_sse41_round_sd: { 1343 // Don't use the low element of operand 0. 1344 APInt DemandedElts2 = DemandedElts; 1345 DemandedElts2.clearBit(0); 1346 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts2, 1347 UndefElts, Depth + 1); 1348 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1349 1350 // If lowest element of a scalar op isn't used then use Arg0. 1351 if (!DemandedElts[0]) { 1352 Worklist.Add(II); 1353 return II->getArgOperand(0); 1354 } 1355 1356 // Only lower element is used for operand 1. 1357 DemandedElts = 1; 1358 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts, 1359 UndefElts2, Depth + 1); 1360 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1361 1362 // Take the high undef elements from operand 0 and take the lower element 1363 // from operand 1. 1364 UndefElts.clearBit(0); 1365 UndefElts |= UndefElts2[0]; 1366 break; 1367 } 1368 1369 // Three input scalar-as-vector operations that work column-wise. The high 1370 // elements come from operand 0 and the low element is a function of all 1371 // three inputs. 1372 case Intrinsic::x86_avx512_mask_add_ss_round: 1373 case Intrinsic::x86_avx512_mask_div_ss_round: 1374 case Intrinsic::x86_avx512_mask_mul_ss_round: 1375 case Intrinsic::x86_avx512_mask_sub_ss_round: 1376 case Intrinsic::x86_avx512_mask_max_ss_round: 1377 case Intrinsic::x86_avx512_mask_min_ss_round: 1378 case Intrinsic::x86_avx512_mask_add_sd_round: 1379 case Intrinsic::x86_avx512_mask_div_sd_round: 1380 case Intrinsic::x86_avx512_mask_mul_sd_round: 1381 case Intrinsic::x86_avx512_mask_sub_sd_round: 1382 case Intrinsic::x86_avx512_mask_max_sd_round: 1383 case Intrinsic::x86_avx512_mask_min_sd_round: 1384 case Intrinsic::x86_fma_vfmadd_ss: 1385 case Intrinsic::x86_fma_vfmsub_ss: 1386 case Intrinsic::x86_fma_vfnmadd_ss: 1387 case Intrinsic::x86_fma_vfnmsub_ss: 1388 case Intrinsic::x86_fma_vfmadd_sd: 1389 case Intrinsic::x86_fma_vfmsub_sd: 1390 case Intrinsic::x86_fma_vfnmadd_sd: 1391 case Intrinsic::x86_fma_vfnmsub_sd: 1392 case Intrinsic::x86_avx512_mask_vfmadd_ss: 1393 case Intrinsic::x86_avx512_mask_vfmadd_sd: 1394 case Intrinsic::x86_avx512_maskz_vfmadd_ss: 1395 case Intrinsic::x86_avx512_maskz_vfmadd_sd: 1396 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1397 UndefElts, Depth + 1); 1398 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1399 1400 // If lowest element of a scalar op isn't used then use Arg0. 1401 if (!DemandedElts[0]) { 1402 Worklist.Add(II); 1403 return II->getArgOperand(0); 1404 } 1405 1406 // Only lower element is used for operand 1 and 2. 1407 DemandedElts = 1; 1408 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts, 1409 UndefElts2, Depth + 1); 1410 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1411 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(2), DemandedElts, 1412 UndefElts3, Depth + 1); 1413 if (TmpV) { II->setArgOperand(2, TmpV); MadeChange = true; } 1414 1415 // Lower element is undefined if all three lower elements are undefined. 1416 // Consider things like undef&0. The result is known zero, not undef. 1417 if (!UndefElts2[0] || !UndefElts3[0]) 1418 UndefElts.clearBit(0); 1419 1420 break; 1421 1422 case Intrinsic::x86_avx512_mask3_vfmadd_ss: 1423 case Intrinsic::x86_avx512_mask3_vfmadd_sd: 1424 case Intrinsic::x86_avx512_mask3_vfmsub_ss: 1425 case Intrinsic::x86_avx512_mask3_vfmsub_sd: 1426 case Intrinsic::x86_avx512_mask3_vfnmsub_ss: 1427 case Intrinsic::x86_avx512_mask3_vfnmsub_sd: 1428 // These intrinsics get the passthru bits from operand 2. 1429 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(2), DemandedElts, 1430 UndefElts, Depth + 1); 1431 if (TmpV) { II->setArgOperand(2, TmpV); MadeChange = true; } 1432 1433 // If lowest element of a scalar op isn't used then use Arg2. 1434 if (!DemandedElts[0]) { 1435 Worklist.Add(II); 1436 return II->getArgOperand(2); 1437 } 1438 1439 // Only lower element is used for operand 0 and 1. 1440 DemandedElts = 1; 1441 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1442 UndefElts2, Depth + 1); 1443 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1444 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts, 1445 UndefElts3, Depth + 1); 1446 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1447 1448 // Lower element is undefined if all three lower elements are undefined. 1449 // Consider things like undef&0. The result is known zero, not undef. 1450 if (!UndefElts2[0] || !UndefElts3[0]) 1451 UndefElts.clearBit(0); 1452 1453 break; 1454 1455 case Intrinsic::x86_sse2_pmulu_dq: 1456 case Intrinsic::x86_sse41_pmuldq: 1457 case Intrinsic::x86_avx2_pmul_dq: 1458 case Intrinsic::x86_avx2_pmulu_dq: 1459 case Intrinsic::x86_avx512_pmul_dq_512: 1460 case Intrinsic::x86_avx512_pmulu_dq_512: { 1461 Value *Op0 = II->getArgOperand(0); 1462 Value *Op1 = II->getArgOperand(1); 1463 unsigned InnerVWidth = Op0->getType()->getVectorNumElements(); 1464 assert((VWidth * 2) == InnerVWidth && "Unexpected input size"); 1465 1466 APInt InnerDemandedElts(InnerVWidth, 0); 1467 for (unsigned i = 0; i != VWidth; ++i) 1468 if (DemandedElts[i]) 1469 InnerDemandedElts.setBit(i * 2); 1470 1471 UndefElts2 = APInt(InnerVWidth, 0); 1472 TmpV = SimplifyDemandedVectorElts(Op0, InnerDemandedElts, UndefElts2, 1473 Depth + 1); 1474 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1475 1476 UndefElts3 = APInt(InnerVWidth, 0); 1477 TmpV = SimplifyDemandedVectorElts(Op1, InnerDemandedElts, UndefElts3, 1478 Depth + 1); 1479 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1480 1481 break; 1482 } 1483 1484 case Intrinsic::x86_sse2_packssdw_128: 1485 case Intrinsic::x86_sse2_packsswb_128: 1486 case Intrinsic::x86_sse2_packuswb_128: 1487 case Intrinsic::x86_sse41_packusdw: 1488 case Intrinsic::x86_avx2_packssdw: 1489 case Intrinsic::x86_avx2_packsswb: 1490 case Intrinsic::x86_avx2_packusdw: 1491 case Intrinsic::x86_avx2_packuswb: 1492 case Intrinsic::x86_avx512_packssdw_512: 1493 case Intrinsic::x86_avx512_packsswb_512: 1494 case Intrinsic::x86_avx512_packusdw_512: 1495 case Intrinsic::x86_avx512_packuswb_512: { 1496 auto *Ty0 = II->getArgOperand(0)->getType(); 1497 unsigned InnerVWidth = Ty0->getVectorNumElements(); 1498 assert(VWidth == (InnerVWidth * 2) && "Unexpected input size"); 1499 1500 unsigned NumLanes = Ty0->getPrimitiveSizeInBits() / 128; 1501 unsigned VWidthPerLane = VWidth / NumLanes; 1502 unsigned InnerVWidthPerLane = InnerVWidth / NumLanes; 1503 1504 // Per lane, pack the elements of the first input and then the second. 1505 // e.g. 1506 // v8i16 PACK(v4i32 X, v4i32 Y) - (X[0..3],Y[0..3]) 1507 // v32i8 PACK(v16i16 X, v16i16 Y) - (X[0..7],Y[0..7]),(X[8..15],Y[8..15]) 1508 for (int OpNum = 0; OpNum != 2; ++OpNum) { 1509 APInt OpDemandedElts(InnerVWidth, 0); 1510 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1511 unsigned LaneIdx = Lane * VWidthPerLane; 1512 for (unsigned Elt = 0; Elt != InnerVWidthPerLane; ++Elt) { 1513 unsigned Idx = LaneIdx + Elt + InnerVWidthPerLane * OpNum; 1514 if (DemandedElts[Idx]) 1515 OpDemandedElts.setBit((Lane * InnerVWidthPerLane) + Elt); 1516 } 1517 } 1518 1519 // Demand elements from the operand. 1520 auto *Op = II->getArgOperand(OpNum); 1521 APInt OpUndefElts(InnerVWidth, 0); 1522 TmpV = SimplifyDemandedVectorElts(Op, OpDemandedElts, OpUndefElts, 1523 Depth + 1); 1524 if (TmpV) { 1525 II->setArgOperand(OpNum, TmpV); 1526 MadeChange = true; 1527 } 1528 1529 // Pack the operand's UNDEF elements, one lane at a time. 1530 OpUndefElts = OpUndefElts.zext(VWidth); 1531 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1532 APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane); 1533 LaneElts = LaneElts.getLoBits(InnerVWidthPerLane); 1534 LaneElts <<= InnerVWidthPerLane * (2 * Lane + OpNum); 1535 UndefElts |= LaneElts; 1536 } 1537 } 1538 break; 1539 } 1540 1541 // PSHUFB 1542 case Intrinsic::x86_ssse3_pshuf_b_128: 1543 case Intrinsic::x86_avx2_pshuf_b: 1544 case Intrinsic::x86_avx512_pshuf_b_512: 1545 // PERMILVAR 1546 case Intrinsic::x86_avx_vpermilvar_ps: 1547 case Intrinsic::x86_avx_vpermilvar_ps_256: 1548 case Intrinsic::x86_avx512_vpermilvar_ps_512: 1549 case Intrinsic::x86_avx_vpermilvar_pd: 1550 case Intrinsic::x86_avx_vpermilvar_pd_256: 1551 case Intrinsic::x86_avx512_vpermilvar_pd_512: 1552 // PERMV 1553 case Intrinsic::x86_avx2_permd: 1554 case Intrinsic::x86_avx2_permps: { 1555 Value *Op1 = II->getArgOperand(1); 1556 TmpV = SimplifyDemandedVectorElts(Op1, DemandedElts, UndefElts, 1557 Depth + 1); 1558 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1559 break; 1560 } 1561 1562 // SSE4A instructions leave the upper 64-bits of the 128-bit result 1563 // in an undefined state. 1564 case Intrinsic::x86_sse4a_extrq: 1565 case Intrinsic::x86_sse4a_extrqi: 1566 case Intrinsic::x86_sse4a_insertq: 1567 case Intrinsic::x86_sse4a_insertqi: 1568 UndefElts.setHighBits(VWidth / 2); 1569 break; 1570 case Intrinsic::amdgcn_buffer_load: 1571 case Intrinsic::amdgcn_buffer_load_format: 1572 case Intrinsic::amdgcn_image_sample: 1573 case Intrinsic::amdgcn_image_sample_cl: 1574 case Intrinsic::amdgcn_image_sample_d: 1575 case Intrinsic::amdgcn_image_sample_d_cl: 1576 case Intrinsic::amdgcn_image_sample_l: 1577 case Intrinsic::amdgcn_image_sample_b: 1578 case Intrinsic::amdgcn_image_sample_b_cl: 1579 case Intrinsic::amdgcn_image_sample_lz: 1580 case Intrinsic::amdgcn_image_sample_cd: 1581 case Intrinsic::amdgcn_image_sample_cd_cl: 1582 1583 case Intrinsic::amdgcn_image_sample_c: 1584 case Intrinsic::amdgcn_image_sample_c_cl: 1585 case Intrinsic::amdgcn_image_sample_c_d: 1586 case Intrinsic::amdgcn_image_sample_c_d_cl: 1587 case Intrinsic::amdgcn_image_sample_c_l: 1588 case Intrinsic::amdgcn_image_sample_c_b: 1589 case Intrinsic::amdgcn_image_sample_c_b_cl: 1590 case Intrinsic::amdgcn_image_sample_c_lz: 1591 case Intrinsic::amdgcn_image_sample_c_cd: 1592 case Intrinsic::amdgcn_image_sample_c_cd_cl: 1593 1594 case Intrinsic::amdgcn_image_sample_o: 1595 case Intrinsic::amdgcn_image_sample_cl_o: 1596 case Intrinsic::amdgcn_image_sample_d_o: 1597 case Intrinsic::amdgcn_image_sample_d_cl_o: 1598 case Intrinsic::amdgcn_image_sample_l_o: 1599 case Intrinsic::amdgcn_image_sample_b_o: 1600 case Intrinsic::amdgcn_image_sample_b_cl_o: 1601 case Intrinsic::amdgcn_image_sample_lz_o: 1602 case Intrinsic::amdgcn_image_sample_cd_o: 1603 case Intrinsic::amdgcn_image_sample_cd_cl_o: 1604 1605 case Intrinsic::amdgcn_image_sample_c_o: 1606 case Intrinsic::amdgcn_image_sample_c_cl_o: 1607 case Intrinsic::amdgcn_image_sample_c_d_o: 1608 case Intrinsic::amdgcn_image_sample_c_d_cl_o: 1609 case Intrinsic::amdgcn_image_sample_c_l_o: 1610 case Intrinsic::amdgcn_image_sample_c_b_o: 1611 case Intrinsic::amdgcn_image_sample_c_b_cl_o: 1612 case Intrinsic::amdgcn_image_sample_c_lz_o: 1613 case Intrinsic::amdgcn_image_sample_c_cd_o: 1614 case Intrinsic::amdgcn_image_sample_c_cd_cl_o: 1615 1616 case Intrinsic::amdgcn_image_getlod: { 1617 if (VWidth == 1 || !DemandedElts.isMask()) 1618 return nullptr; 1619 1620 // TODO: Handle 3 vectors when supported in code gen. 1621 unsigned NewNumElts = PowerOf2Ceil(DemandedElts.countTrailingOnes()); 1622 if (NewNumElts == VWidth) 1623 return nullptr; 1624 1625 Module *M = II->getParent()->getParent()->getParent(); 1626 Type *EltTy = V->getType()->getVectorElementType(); 1627 1628 Type *NewTy = (NewNumElts == 1) ? EltTy : 1629 VectorType::get(EltTy, NewNumElts); 1630 1631 auto IID = II->getIntrinsicID(); 1632 1633 bool IsBuffer = IID == Intrinsic::amdgcn_buffer_load || 1634 IID == Intrinsic::amdgcn_buffer_load_format; 1635 1636 Function *NewIntrin = IsBuffer ? 1637 Intrinsic::getDeclaration(M, IID, NewTy) : 1638 // Samplers have 3 mangled types. 1639 Intrinsic::getDeclaration(M, IID, 1640 { NewTy, II->getArgOperand(0)->getType(), 1641 II->getArgOperand(1)->getType()}); 1642 1643 SmallVector<Value *, 5> Args; 1644 for (unsigned I = 0, E = II->getNumArgOperands(); I != E; ++I) 1645 Args.push_back(II->getArgOperand(I)); 1646 1647 IRBuilderBase::InsertPointGuard Guard(*Builder); 1648 Builder->SetInsertPoint(II); 1649 1650 CallInst *NewCall = Builder->CreateCall(NewIntrin, Args); 1651 NewCall->takeName(II); 1652 NewCall->copyMetadata(*II); 1653 1654 if (!IsBuffer) { 1655 ConstantInt *DMask = dyn_cast<ConstantInt>(NewCall->getArgOperand(3)); 1656 if (DMask) { 1657 unsigned DMaskVal = DMask->getZExtValue() & 0xf; 1658 1659 unsigned PopCnt = 0; 1660 unsigned NewDMask = 0; 1661 for (unsigned I = 0; I < 4; ++I) { 1662 const unsigned Bit = 1 << I; 1663 if (!!(DMaskVal & Bit)) { 1664 if (++PopCnt > NewNumElts) 1665 break; 1666 1667 NewDMask |= Bit; 1668 } 1669 } 1670 1671 NewCall->setArgOperand(3, ConstantInt::get(DMask->getType(), NewDMask)); 1672 } 1673 } 1674 1675 1676 if (NewNumElts == 1) { 1677 return Builder->CreateInsertElement(UndefValue::get(V->getType()), 1678 NewCall, static_cast<uint64_t>(0)); 1679 } 1680 1681 SmallVector<uint32_t, 8> EltMask; 1682 for (unsigned I = 0; I < VWidth; ++I) 1683 EltMask.push_back(I); 1684 1685 Value *Shuffle = Builder->CreateShuffleVector( 1686 NewCall, UndefValue::get(NewTy), EltMask); 1687 1688 MadeChange = true; 1689 return Shuffle; 1690 } 1691 } 1692 break; 1693 } 1694 } 1695 return MadeChange ? I : nullptr; 1696 } 1697