1 //===- InstCombineSimplifyDemanded.cpp ------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains logic for simplifying instructions based on information 11 // about how they are used. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "InstCombineInternal.h" 16 #include "llvm/Analysis/ValueTracking.h" 17 #include "llvm/IR/IntrinsicInst.h" 18 #include "llvm/IR/PatternMatch.h" 19 20 using namespace llvm; 21 using namespace llvm::PatternMatch; 22 23 #define DEBUG_TYPE "instcombine" 24 25 /// Check to see if the specified operand of the specified instruction is a 26 /// constant integer. If so, check to see if there are any bits set in the 27 /// constant that are not demanded. If so, shrink the constant and return true. 28 static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, 29 APInt Demanded) { 30 assert(I && "No instruction?"); 31 assert(OpNo < I->getNumOperands() && "Operand index too large"); 32 33 // The operand must be a constant integer or splat integer. 34 Value *Op = I->getOperand(OpNo); 35 const APInt *C; 36 if (!match(Op, m_APInt(C))) 37 return false; 38 39 // If there are no bits set that aren't demanded, nothing to do. 40 Demanded = Demanded.zextOrTrunc(C->getBitWidth()); 41 if ((~Demanded & *C) == 0) 42 return false; 43 44 // This instruction is producing bits that are not demanded. Shrink the RHS. 45 Demanded &= *C; 46 I->setOperand(OpNo, ConstantInt::get(Op->getType(), Demanded)); 47 48 return true; 49 } 50 51 52 53 /// Inst is an integer instruction that SimplifyDemandedBits knows about. See if 54 /// the instruction has any properties that allow us to simplify its operands. 55 bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) { 56 unsigned BitWidth = Inst.getType()->getScalarSizeInBits(); 57 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 58 APInt DemandedMask(APInt::getAllOnesValue(BitWidth)); 59 60 Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, KnownZero, KnownOne, 61 0, &Inst); 62 if (!V) return false; 63 if (V == &Inst) return true; 64 replaceInstUsesWith(Inst, V); 65 return true; 66 } 67 68 /// This form of SimplifyDemandedBits simplifies the specified instruction 69 /// operand if possible, updating it in place. It returns true if it made any 70 /// change and false otherwise. 71 bool InstCombiner::SimplifyDemandedBits(Instruction *I, unsigned OpNo, 72 const APInt &DemandedMask, 73 APInt &KnownZero, APInt &KnownOne, 74 unsigned Depth) { 75 Use &U = I->getOperandUse(OpNo); 76 Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask, KnownZero, 77 KnownOne, Depth, I); 78 if (!NewVal) return false; 79 U = NewVal; 80 return true; 81 } 82 83 84 /// This function attempts to replace V with a simpler value based on the 85 /// demanded bits. When this function is called, it is known that only the bits 86 /// set in DemandedMask of the result of V are ever used downstream. 87 /// Consequently, depending on the mask and V, it may be possible to replace V 88 /// with a constant or one of its operands. In such cases, this function does 89 /// the replacement and returns true. In all other cases, it returns false after 90 /// analyzing the expression and setting KnownOne and known to be one in the 91 /// expression. KnownZero contains all the bits that are known to be zero in the 92 /// expression. These are provided to potentially allow the caller (which might 93 /// recursively be SimplifyDemandedBits itself) to simplify the expression. 94 /// KnownOne and KnownZero always follow the invariant that: 95 /// KnownOne & KnownZero == 0. 96 /// That is, a bit can't be both 1 and 0. Note that the bits in KnownOne and 97 /// KnownZero may only be accurate for those bits set in DemandedMask. Note also 98 /// that the bitwidth of V, DemandedMask, KnownZero and KnownOne must all be the 99 /// same. 100 /// 101 /// This returns null if it did not change anything and it permits no 102 /// simplification. This returns V itself if it did some simplification of V's 103 /// operands based on the information about what bits are demanded. This returns 104 /// some other non-null value if it found out that V is equal to another value 105 /// in the context where the specified bits are demanded, but not for all users. 106 Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, 107 APInt &KnownZero, APInt &KnownOne, 108 unsigned Depth, 109 Instruction *CxtI) { 110 assert(V != nullptr && "Null pointer of Value???"); 111 assert(Depth <= 6 && "Limit Search Depth"); 112 uint32_t BitWidth = DemandedMask.getBitWidth(); 113 Type *VTy = V->getType(); 114 assert( 115 (!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) && 116 KnownZero.getBitWidth() == BitWidth && 117 KnownOne.getBitWidth() == BitWidth && 118 "Value *V, DemandedMask, KnownZero and KnownOne " 119 "must have same BitWidth"); 120 const APInt *C; 121 if (match(V, m_APInt(C))) { 122 // We know all of the bits for a scalar constant or a splat vector constant! 123 KnownOne = *C; 124 KnownZero = ~KnownOne; 125 return nullptr; 126 } 127 if (isa<ConstantPointerNull>(V)) { 128 // We know all of the bits for a constant! 129 KnownOne.clearAllBits(); 130 KnownZero.setAllBits(); 131 return nullptr; 132 } 133 134 KnownZero.clearAllBits(); 135 KnownOne.clearAllBits(); 136 if (DemandedMask == 0) { // Not demanding any bits from V. 137 if (isa<UndefValue>(V)) 138 return nullptr; 139 return UndefValue::get(VTy); 140 } 141 142 if (Depth == 6) // Limit search depth. 143 return nullptr; 144 145 Instruction *I = dyn_cast<Instruction>(V); 146 if (!I) { 147 computeKnownBits(V, KnownZero, KnownOne, Depth, CxtI); 148 return nullptr; // Only analyze instructions. 149 } 150 151 // If there are multiple uses of this value and we aren't at the root, then 152 // we can't do any simplifications of the operands, because DemandedMask 153 // only reflects the bits demanded by *one* of the users. 154 if (Depth != 0 && !I->hasOneUse()) { 155 return SimplifyMultipleUseDemandedBits(I, DemandedMask, KnownZero, KnownOne, 156 Depth, CxtI); 157 } 158 159 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 160 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 161 162 // If this is the root being simplified, allow it to have multiple uses, 163 // just set the DemandedMask to all bits so that we can try to simplify the 164 // operands. This allows visitTruncInst (for example) to simplify the 165 // operand of a trunc without duplicating all the logic below. 166 if (Depth == 0 && !V->hasOneUse()) 167 DemandedMask.setAllBits(); 168 169 switch (I->getOpcode()) { 170 default: 171 computeKnownBits(I, KnownZero, KnownOne, Depth, CxtI); 172 break; 173 case Instruction::And: { 174 // If either the LHS or the RHS are Zero, the result is zero. 175 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnownZero, RHSKnownOne, 176 Depth + 1) || 177 SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnownZero, LHSKnownZero, 178 LHSKnownOne, Depth + 1)) 179 return I; 180 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?"); 181 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?"); 182 183 // Output known-0 are known to be clear if zero in either the LHS | RHS. 184 APInt IKnownZero = RHSKnownZero | LHSKnownZero; 185 // Output known-1 bits are only known if set in both the LHS & RHS. 186 APInt IKnownOne = RHSKnownOne & LHSKnownOne; 187 188 // If the client is only demanding bits that we know, return the known 189 // constant. 190 if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask) 191 return Constant::getIntegerValue(VTy, IKnownOne); 192 193 // If all of the demanded bits are known 1 on one side, return the other. 194 // These bits cannot contribute to the result of the 'and'. 195 if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) == 196 (DemandedMask & ~LHSKnownZero)) 197 return I->getOperand(0); 198 if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) == 199 (DemandedMask & ~RHSKnownZero)) 200 return I->getOperand(1); 201 202 // If the RHS is a constant, see if we can simplify it. 203 if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnownZero)) 204 return I; 205 206 KnownZero = std::move(IKnownZero); 207 KnownOne = std::move(IKnownOne); 208 break; 209 } 210 case Instruction::Or: { 211 // If either the LHS or the RHS are One, the result is One. 212 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnownZero, RHSKnownOne, 213 Depth + 1) || 214 SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnownOne, LHSKnownZero, 215 LHSKnownOne, Depth + 1)) 216 return I; 217 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?"); 218 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?"); 219 220 // Output known-0 bits are only known if clear in both the LHS & RHS. 221 APInt IKnownZero = RHSKnownZero & LHSKnownZero; 222 // Output known-1 are known to be set if set in either the LHS | RHS. 223 APInt IKnownOne = RHSKnownOne | LHSKnownOne; 224 225 // If the client is only demanding bits that we know, return the known 226 // constant. 227 if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask) 228 return Constant::getIntegerValue(VTy, IKnownOne); 229 230 // If all of the demanded bits are known zero on one side, return the other. 231 // These bits cannot contribute to the result of the 'or'. 232 if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) == 233 (DemandedMask & ~LHSKnownOne)) 234 return I->getOperand(0); 235 if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) == 236 (DemandedMask & ~RHSKnownOne)) 237 return I->getOperand(1); 238 239 // If all of the potentially set bits on one side are known to be set on 240 // the other side, just use the 'other' side. 241 if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) == 242 (DemandedMask & (~RHSKnownZero))) 243 return I->getOperand(0); 244 if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) == 245 (DemandedMask & (~LHSKnownZero))) 246 return I->getOperand(1); 247 248 // If the RHS is a constant, see if we can simplify it. 249 if (ShrinkDemandedConstant(I, 1, DemandedMask)) 250 return I; 251 252 KnownZero = std::move(IKnownZero); 253 KnownOne = std::move(IKnownOne); 254 break; 255 } 256 case Instruction::Xor: { 257 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnownZero, RHSKnownOne, 258 Depth + 1) || 259 SimplifyDemandedBits(I, 0, DemandedMask, LHSKnownZero, LHSKnownOne, 260 Depth + 1)) 261 return I; 262 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?"); 263 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?"); 264 265 // Output known-0 bits are known if clear or set in both the LHS & RHS. 266 APInt IKnownZero = (RHSKnownZero & LHSKnownZero) | 267 (RHSKnownOne & LHSKnownOne); 268 // Output known-1 are known to be set if set in only one of the LHS, RHS. 269 APInt IKnownOne = (RHSKnownZero & LHSKnownOne) | 270 (RHSKnownOne & LHSKnownZero); 271 272 // If the client is only demanding bits that we know, return the known 273 // constant. 274 if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask) 275 return Constant::getIntegerValue(VTy, IKnownOne); 276 277 // If all of the demanded bits are known zero on one side, return the other. 278 // These bits cannot contribute to the result of the 'xor'. 279 if ((DemandedMask & RHSKnownZero) == DemandedMask) 280 return I->getOperand(0); 281 if ((DemandedMask & LHSKnownZero) == DemandedMask) 282 return I->getOperand(1); 283 284 // If all of the demanded bits are known to be zero on one side or the 285 // other, turn this into an *inclusive* or. 286 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 287 if ((DemandedMask & ~RHSKnownZero & ~LHSKnownZero) == 0) { 288 Instruction *Or = 289 BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1), 290 I->getName()); 291 return InsertNewInstWith(Or, *I); 292 } 293 294 // If all of the demanded bits on one side are known, and all of the set 295 // bits on that side are also known to be set on the other side, turn this 296 // into an AND, as we know the bits will be cleared. 297 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 298 if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask) { 299 // all known 300 if ((RHSKnownOne & LHSKnownOne) == RHSKnownOne) { 301 Constant *AndC = Constant::getIntegerValue(VTy, 302 ~RHSKnownOne & DemandedMask); 303 Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC); 304 return InsertNewInstWith(And, *I); 305 } 306 } 307 308 // If the RHS is a constant, see if we can simplify it. 309 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1. 310 if (ShrinkDemandedConstant(I, 1, DemandedMask)) 311 return I; 312 313 // If our LHS is an 'and' and if it has one use, and if any of the bits we 314 // are flipping are known to be set, then the xor is just resetting those 315 // bits to zero. We can just knock out bits from the 'and' and the 'xor', 316 // simplifying both of them. 317 if (Instruction *LHSInst = dyn_cast<Instruction>(I->getOperand(0))) 318 if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() && 319 isa<ConstantInt>(I->getOperand(1)) && 320 isa<ConstantInt>(LHSInst->getOperand(1)) && 321 (LHSKnownOne & RHSKnownOne & DemandedMask) != 0) { 322 ConstantInt *AndRHS = cast<ConstantInt>(LHSInst->getOperand(1)); 323 ConstantInt *XorRHS = cast<ConstantInt>(I->getOperand(1)); 324 APInt NewMask = ~(LHSKnownOne & RHSKnownOne & DemandedMask); 325 326 Constant *AndC = 327 ConstantInt::get(I->getType(), NewMask & AndRHS->getValue()); 328 Instruction *NewAnd = BinaryOperator::CreateAnd(I->getOperand(0), AndC); 329 InsertNewInstWith(NewAnd, *I); 330 331 Constant *XorC = 332 ConstantInt::get(I->getType(), NewMask & XorRHS->getValue()); 333 Instruction *NewXor = BinaryOperator::CreateXor(NewAnd, XorC); 334 return InsertNewInstWith(NewXor, *I); 335 } 336 337 // Output known-0 bits are known if clear or set in both the LHS & RHS. 338 KnownZero = std::move(IKnownZero); 339 // Output known-1 are known to be set if set in only one of the LHS, RHS. 340 KnownOne = std::move(IKnownOne); 341 break; 342 } 343 case Instruction::Select: 344 // If this is a select as part of a min/max pattern, don't simplify any 345 // further in case we break the structure. 346 Value *LHS, *RHS; 347 if (matchSelectPattern(I, LHS, RHS).Flavor != SPF_UNKNOWN) 348 return nullptr; 349 350 if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnownZero, RHSKnownOne, 351 Depth + 1) || 352 SimplifyDemandedBits(I, 1, DemandedMask, LHSKnownZero, LHSKnownOne, 353 Depth + 1)) 354 return I; 355 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?"); 356 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?"); 357 358 // If the operands are constants, see if we can simplify them. 359 if (ShrinkDemandedConstant(I, 1, DemandedMask) || 360 ShrinkDemandedConstant(I, 2, DemandedMask)) 361 return I; 362 363 // Only known if known in both the LHS and RHS. 364 KnownOne = RHSKnownOne & LHSKnownOne; 365 KnownZero = RHSKnownZero & LHSKnownZero; 366 break; 367 case Instruction::Trunc: { 368 unsigned truncBf = I->getOperand(0)->getType()->getScalarSizeInBits(); 369 DemandedMask = DemandedMask.zext(truncBf); 370 KnownZero = KnownZero.zext(truncBf); 371 KnownOne = KnownOne.zext(truncBf); 372 if (SimplifyDemandedBits(I, 0, DemandedMask, KnownZero, KnownOne, 373 Depth + 1)) 374 return I; 375 DemandedMask = DemandedMask.trunc(BitWidth); 376 KnownZero = KnownZero.trunc(BitWidth); 377 KnownOne = KnownOne.trunc(BitWidth); 378 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); 379 break; 380 } 381 case Instruction::BitCast: 382 if (!I->getOperand(0)->getType()->isIntOrIntVectorTy()) 383 return nullptr; // vector->int or fp->int? 384 385 if (VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) { 386 if (VectorType *SrcVTy = 387 dyn_cast<VectorType>(I->getOperand(0)->getType())) { 388 if (DstVTy->getNumElements() != SrcVTy->getNumElements()) 389 // Don't touch a bitcast between vectors of different element counts. 390 return nullptr; 391 } else 392 // Don't touch a scalar-to-vector bitcast. 393 return nullptr; 394 } else if (I->getOperand(0)->getType()->isVectorTy()) 395 // Don't touch a vector-to-scalar bitcast. 396 return nullptr; 397 398 if (SimplifyDemandedBits(I, 0, DemandedMask, KnownZero, KnownOne, 399 Depth + 1)) 400 return I; 401 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); 402 break; 403 case Instruction::ZExt: { 404 // Compute the bits in the result that are not present in the input. 405 unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits(); 406 407 DemandedMask = DemandedMask.trunc(SrcBitWidth); 408 KnownZero = KnownZero.trunc(SrcBitWidth); 409 KnownOne = KnownOne.trunc(SrcBitWidth); 410 if (SimplifyDemandedBits(I, 0, DemandedMask, KnownZero, KnownOne, 411 Depth + 1)) 412 return I; 413 DemandedMask = DemandedMask.zext(BitWidth); 414 KnownZero = KnownZero.zext(BitWidth); 415 KnownOne = KnownOne.zext(BitWidth); 416 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); 417 // The top bits are known to be zero. 418 KnownZero.setBitsFrom(SrcBitWidth); 419 break; 420 } 421 case Instruction::SExt: { 422 // Compute the bits in the result that are not present in the input. 423 unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits(); 424 425 APInt InputDemandedBits = DemandedMask & 426 APInt::getLowBitsSet(BitWidth, SrcBitWidth); 427 428 APInt NewBits(APInt::getBitsSetFrom(BitWidth, SrcBitWidth)); 429 // If any of the sign extended bits are demanded, we know that the sign 430 // bit is demanded. 431 if ((NewBits & DemandedMask) != 0) 432 InputDemandedBits.setBit(SrcBitWidth-1); 433 434 InputDemandedBits = InputDemandedBits.trunc(SrcBitWidth); 435 KnownZero = KnownZero.trunc(SrcBitWidth); 436 KnownOne = KnownOne.trunc(SrcBitWidth); 437 if (SimplifyDemandedBits(I, 0, InputDemandedBits, KnownZero, KnownOne, 438 Depth + 1)) 439 return I; 440 InputDemandedBits = InputDemandedBits.zext(BitWidth); 441 KnownZero = KnownZero.zext(BitWidth); 442 KnownOne = KnownOne.zext(BitWidth); 443 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); 444 445 // If the sign bit of the input is known set or clear, then we know the 446 // top bits of the result. 447 448 // If the input sign bit is known zero, or if the NewBits are not demanded 449 // convert this into a zero extension. 450 if (KnownZero[SrcBitWidth-1] || (NewBits & ~DemandedMask) == NewBits) { 451 // Convert to ZExt cast 452 CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName()); 453 return InsertNewInstWith(NewCast, *I); 454 } else if (KnownOne[SrcBitWidth-1]) { // Input sign bit known set 455 KnownOne |= NewBits; 456 } 457 break; 458 } 459 case Instruction::Add: 460 case Instruction::Sub: { 461 /// If the high-bits of an ADD/SUB are not demanded, then we do not care 462 /// about the high bits of the operands. 463 unsigned NLZ = DemandedMask.countLeadingZeros(); 464 if (NLZ > 0) { 465 // Right fill the mask of bits for this ADD/SUB to demand the most 466 // significant bit and all those below it. 467 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ)); 468 if (ShrinkDemandedConstant(I, 0, DemandedFromOps) || 469 SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnownZero, LHSKnownOne, 470 Depth + 1) || 471 ShrinkDemandedConstant(I, 1, DemandedFromOps) || 472 SimplifyDemandedBits(I, 1, DemandedFromOps, RHSKnownZero, RHSKnownOne, 473 Depth + 1)) { 474 // Disable the nsw and nuw flags here: We can no longer guarantee that 475 // we won't wrap after simplification. Removing the nsw/nuw flags is 476 // legal here because the top bit is not demanded. 477 BinaryOperator &BinOP = *cast<BinaryOperator>(I); 478 BinOP.setHasNoSignedWrap(false); 479 BinOP.setHasNoUnsignedWrap(false); 480 return I; 481 } 482 483 // If we are known to be adding/subtracting zeros to every bit below 484 // the highest demanded bit, we just return the other side. 485 if ((DemandedFromOps & RHSKnownZero) == DemandedFromOps) 486 return I->getOperand(0); 487 // We can't do this with the LHS for subtraction. 488 if (I->getOpcode() == Instruction::Add && 489 (DemandedFromOps & LHSKnownZero) == DemandedFromOps) 490 return I->getOperand(1); 491 } 492 493 // Otherwise just hand the add/sub off to computeKnownBits to fill in 494 // the known zeros and ones. 495 computeKnownBits(V, KnownZero, KnownOne, Depth, CxtI); 496 break; 497 } 498 case Instruction::Shl: 499 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 500 { 501 Value *VarX; ConstantInt *C1; 502 if (match(I->getOperand(0), m_Shr(m_Value(VarX), m_ConstantInt(C1)))) { 503 Instruction *Shr = cast<Instruction>(I->getOperand(0)); 504 Value *R = SimplifyShrShlDemandedBits(Shr, I, DemandedMask, 505 KnownZero, KnownOne); 506 if (R) 507 return R; 508 } 509 } 510 511 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 512 APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt)); 513 514 // If the shift is NUW/NSW, then it does demand the high bits. 515 ShlOperator *IOp = cast<ShlOperator>(I); 516 if (IOp->hasNoSignedWrap()) 517 DemandedMaskIn.setHighBits(ShiftAmt+1); 518 else if (IOp->hasNoUnsignedWrap()) 519 DemandedMaskIn.setHighBits(ShiftAmt); 520 521 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, KnownZero, KnownOne, 522 Depth + 1)) 523 return I; 524 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); 525 KnownZero <<= ShiftAmt; 526 KnownOne <<= ShiftAmt; 527 // low bits known zero. 528 if (ShiftAmt) 529 KnownZero.setLowBits(ShiftAmt); 530 } 531 break; 532 case Instruction::LShr: 533 // For a logical shift right 534 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 535 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 536 537 // Unsigned shift right. 538 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt)); 539 540 // If the shift is exact, then it does demand the low bits (and knows that 541 // they are zero). 542 if (cast<LShrOperator>(I)->isExact()) 543 DemandedMaskIn.setLowBits(ShiftAmt); 544 545 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, KnownZero, KnownOne, 546 Depth + 1)) 547 return I; 548 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); 549 KnownZero.lshrInPlace(ShiftAmt); 550 KnownOne.lshrInPlace(ShiftAmt); 551 if (ShiftAmt) 552 KnownZero.setHighBits(ShiftAmt); // high bits known zero. 553 } 554 break; 555 case Instruction::AShr: 556 // If this is an arithmetic shift right and only the low-bit is set, we can 557 // always convert this into a logical shr, even if the shift amount is 558 // variable. The low bit of the shift cannot be an input sign bit unless 559 // the shift amount is >= the size of the datatype, which is undefined. 560 if (DemandedMask == 1) { 561 // Perform the logical shift right. 562 Instruction *NewVal = BinaryOperator::CreateLShr( 563 I->getOperand(0), I->getOperand(1), I->getName()); 564 return InsertNewInstWith(NewVal, *I); 565 } 566 567 // If the sign bit is the only bit demanded by this ashr, then there is no 568 // need to do it, the shift doesn't change the high bit. 569 if (DemandedMask.isSignBit()) 570 return I->getOperand(0); 571 572 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 573 uint32_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 574 575 // Signed shift right. 576 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt)); 577 // If any of the "high bits" are demanded, we should set the sign bit as 578 // demanded. 579 if (DemandedMask.countLeadingZeros() <= ShiftAmt) 580 DemandedMaskIn.setSignBit(); 581 582 // If the shift is exact, then it does demand the low bits (and knows that 583 // they are zero). 584 if (cast<AShrOperator>(I)->isExact()) 585 DemandedMaskIn.setLowBits(ShiftAmt); 586 587 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, KnownZero, KnownOne, 588 Depth + 1)) 589 return I; 590 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); 591 // Compute the new bits that are at the top now. 592 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt)); 593 KnownZero.lshrInPlace(ShiftAmt); 594 KnownOne.lshrInPlace(ShiftAmt); 595 596 // Handle the sign bits. 597 APInt SignBit(APInt::getSignBit(BitWidth)); 598 // Adjust to where it is now in the mask. 599 SignBit.lshrInPlace(ShiftAmt); 600 601 // If the input sign bit is known to be zero, or if none of the top bits 602 // are demanded, turn this into an unsigned shift right. 603 if (BitWidth <= ShiftAmt || KnownZero[BitWidth-ShiftAmt-1] || 604 (HighBits & ~DemandedMask) == HighBits) { 605 // Perform the logical shift right. 606 BinaryOperator *NewVal = BinaryOperator::CreateLShr(I->getOperand(0), 607 SA, I->getName()); 608 NewVal->setIsExact(cast<BinaryOperator>(I)->isExact()); 609 return InsertNewInstWith(NewVal, *I); 610 } else if ((KnownOne & SignBit) != 0) { // New bits are known one. 611 KnownOne |= HighBits; 612 } 613 } 614 break; 615 case Instruction::SRem: 616 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 617 // X % -1 demands all the bits because we don't want to introduce 618 // INT_MIN % -1 (== undef) by accident. 619 if (Rem->isAllOnesValue()) 620 break; 621 APInt RA = Rem->getValue().abs(); 622 if (RA.isPowerOf2()) { 623 if (DemandedMask.ult(RA)) // srem won't affect demanded bits 624 return I->getOperand(0); 625 626 APInt LowBits = RA - 1; 627 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth); 628 if (SimplifyDemandedBits(I, 0, Mask2, LHSKnownZero, LHSKnownOne, 629 Depth + 1)) 630 return I; 631 632 // The low bits of LHS are unchanged by the srem. 633 KnownZero = LHSKnownZero & LowBits; 634 KnownOne = LHSKnownOne & LowBits; 635 636 // If LHS is non-negative or has all low bits zero, then the upper bits 637 // are all zero. 638 if (LHSKnownZero.isSignBitSet() || ((LHSKnownZero & LowBits) == LowBits)) 639 KnownZero |= ~LowBits; 640 641 // If LHS is negative and not all low bits are zero, then the upper bits 642 // are all one. 643 if (LHSKnownOne.isSignBitSet() && ((LHSKnownOne & LowBits) != 0)) 644 KnownOne |= ~LowBits; 645 646 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); 647 break; 648 } 649 } 650 651 // The sign bit is the LHS's sign bit, except when the result of the 652 // remainder is zero. 653 if (DemandedMask.isSignBitSet()) { 654 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1, 655 CxtI); 656 // If it's known zero, our sign bit is also zero. 657 if (LHSKnownZero.isSignBitSet()) 658 KnownZero.setSignBit(); 659 } 660 break; 661 case Instruction::URem: { 662 APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0); 663 APInt AllOnes = APInt::getAllOnesValue(BitWidth); 664 if (SimplifyDemandedBits(I, 0, AllOnes, KnownZero2, KnownOne2, Depth + 1) || 665 SimplifyDemandedBits(I, 1, AllOnes, KnownZero2, KnownOne2, Depth + 1)) 666 return I; 667 668 unsigned Leaders = KnownZero2.countLeadingOnes(); 669 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask; 670 break; 671 } 672 case Instruction::Call: 673 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 674 switch (II->getIntrinsicID()) { 675 default: break; 676 case Intrinsic::bswap: { 677 // If the only bits demanded come from one byte of the bswap result, 678 // just shift the input byte into position to eliminate the bswap. 679 unsigned NLZ = DemandedMask.countLeadingZeros(); 680 unsigned NTZ = DemandedMask.countTrailingZeros(); 681 682 // Round NTZ down to the next byte. If we have 11 trailing zeros, then 683 // we need all the bits down to bit 8. Likewise, round NLZ. If we 684 // have 14 leading zeros, round to 8. 685 NLZ &= ~7; 686 NTZ &= ~7; 687 // If we need exactly one byte, we can do this transformation. 688 if (BitWidth-NLZ-NTZ == 8) { 689 unsigned ResultBit = NTZ; 690 unsigned InputBit = BitWidth-NTZ-8; 691 692 // Replace this with either a left or right shift to get the byte into 693 // the right place. 694 Instruction *NewVal; 695 if (InputBit > ResultBit) 696 NewVal = BinaryOperator::CreateLShr(II->getArgOperand(0), 697 ConstantInt::get(I->getType(), InputBit-ResultBit)); 698 else 699 NewVal = BinaryOperator::CreateShl(II->getArgOperand(0), 700 ConstantInt::get(I->getType(), ResultBit-InputBit)); 701 NewVal->takeName(I); 702 return InsertNewInstWith(NewVal, *I); 703 } 704 705 // TODO: Could compute known zero/one bits based on the input. 706 break; 707 } 708 case Intrinsic::x86_mmx_pmovmskb: 709 case Intrinsic::x86_sse_movmsk_ps: 710 case Intrinsic::x86_sse2_movmsk_pd: 711 case Intrinsic::x86_sse2_pmovmskb_128: 712 case Intrinsic::x86_avx_movmsk_ps_256: 713 case Intrinsic::x86_avx_movmsk_pd_256: 714 case Intrinsic::x86_avx2_pmovmskb: { 715 // MOVMSK copies the vector elements' sign bits to the low bits 716 // and zeros the high bits. 717 unsigned ArgWidth; 718 if (II->getIntrinsicID() == Intrinsic::x86_mmx_pmovmskb) { 719 ArgWidth = 8; // Arg is x86_mmx, but treated as <8 x i8>. 720 } else { 721 auto Arg = II->getArgOperand(0); 722 auto ArgType = cast<VectorType>(Arg->getType()); 723 ArgWidth = ArgType->getNumElements(); 724 } 725 726 // If we don't need any of low bits then return zero, 727 // we know that DemandedMask is non-zero already. 728 APInt DemandedElts = DemandedMask.zextOrTrunc(ArgWidth); 729 if (DemandedElts == 0) 730 return ConstantInt::getNullValue(VTy); 731 732 // We know that the upper bits are set to zero. 733 KnownZero.setBitsFrom(ArgWidth); 734 return nullptr; 735 } 736 case Intrinsic::x86_sse42_crc32_64_64: 737 KnownZero.setBitsFrom(32); 738 return nullptr; 739 } 740 } 741 computeKnownBits(V, KnownZero, KnownOne, Depth, CxtI); 742 break; 743 } 744 745 // If the client is only demanding bits that we know, return the known 746 // constant. 747 if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask) 748 return Constant::getIntegerValue(VTy, KnownOne); 749 return nullptr; 750 } 751 752 /// Helper routine of SimplifyDemandedUseBits. It computes KnownZero/KnownOne 753 /// bits. It also tries to handle simplifications that can be done based on 754 /// DemandedMask, but without modifying the Instruction. 755 Value *InstCombiner::SimplifyMultipleUseDemandedBits(Instruction *I, 756 const APInt &DemandedMask, 757 APInt &KnownZero, 758 APInt &KnownOne, 759 unsigned Depth, 760 Instruction *CxtI) { 761 unsigned BitWidth = DemandedMask.getBitWidth(); 762 Type *ITy = I->getType(); 763 764 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 765 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 766 767 // Despite the fact that we can't simplify this instruction in all User's 768 // context, we can at least compute the knownzero/knownone bits, and we can 769 // do simplifications that apply to *just* the one user if we know that 770 // this instruction has a simpler value in that context. 771 switch (I->getOpcode()) { 772 case Instruction::And: { 773 // If either the LHS or the RHS are Zero, the result is zero. 774 computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth + 1, 775 CxtI); 776 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1, 777 CxtI); 778 779 // Output known-0 are known to be clear if zero in either the LHS | RHS. 780 APInt IKnownZero = RHSKnownZero | LHSKnownZero; 781 // Output known-1 bits are only known if set in both the LHS & RHS. 782 APInt IKnownOne = RHSKnownOne & LHSKnownOne; 783 784 // If the client is only demanding bits that we know, return the known 785 // constant. 786 if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask) 787 return Constant::getIntegerValue(ITy, IKnownOne); 788 789 // If all of the demanded bits are known 1 on one side, return the other. 790 // These bits cannot contribute to the result of the 'and' in this 791 // context. 792 if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) == 793 (DemandedMask & ~LHSKnownZero)) 794 return I->getOperand(0); 795 if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) == 796 (DemandedMask & ~RHSKnownZero)) 797 return I->getOperand(1); 798 799 KnownZero = std::move(IKnownZero); 800 KnownOne = std::move(IKnownOne); 801 break; 802 } 803 case Instruction::Or: { 804 // We can simplify (X|Y) -> X or Y in the user's context if we know that 805 // only bits from X or Y are demanded. 806 807 // If either the LHS or the RHS are One, the result is One. 808 computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth + 1, 809 CxtI); 810 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1, 811 CxtI); 812 813 // Output known-0 bits are only known if clear in both the LHS & RHS. 814 APInt IKnownZero = RHSKnownZero & LHSKnownZero; 815 // Output known-1 are known to be set if set in either the LHS | RHS. 816 APInt IKnownOne = RHSKnownOne | LHSKnownOne; 817 818 // If the client is only demanding bits that we know, return the known 819 // constant. 820 if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask) 821 return Constant::getIntegerValue(ITy, IKnownOne); 822 823 // If all of the demanded bits are known zero on one side, return the 824 // other. These bits cannot contribute to the result of the 'or' in this 825 // context. 826 if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) == 827 (DemandedMask & ~LHSKnownOne)) 828 return I->getOperand(0); 829 if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) == 830 (DemandedMask & ~RHSKnownOne)) 831 return I->getOperand(1); 832 833 // If all of the potentially set bits on one side are known to be set on 834 // the other side, just use the 'other' side. 835 if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) == 836 (DemandedMask & (~RHSKnownZero))) 837 return I->getOperand(0); 838 if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) == 839 (DemandedMask & (~LHSKnownZero))) 840 return I->getOperand(1); 841 842 KnownZero = std::move(IKnownZero); 843 KnownOne = std::move(IKnownOne); 844 break; 845 } 846 case Instruction::Xor: { 847 // We can simplify (X^Y) -> X or Y in the user's context if we know that 848 // only bits from X or Y are demanded. 849 850 computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth + 1, 851 CxtI); 852 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1, 853 CxtI); 854 855 // Output known-0 bits are known if clear or set in both the LHS & RHS. 856 APInt IKnownZero = (RHSKnownZero & LHSKnownZero) | 857 (RHSKnownOne & LHSKnownOne); 858 // Output known-1 are known to be set if set in only one of the LHS, RHS. 859 APInt IKnownOne = (RHSKnownZero & LHSKnownOne) | 860 (RHSKnownOne & LHSKnownZero); 861 862 // If the client is only demanding bits that we know, return the known 863 // constant. 864 if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask) 865 return Constant::getIntegerValue(ITy, IKnownOne); 866 867 // If all of the demanded bits are known zero on one side, return the 868 // other. 869 if ((DemandedMask & RHSKnownZero) == DemandedMask) 870 return I->getOperand(0); 871 if ((DemandedMask & LHSKnownZero) == DemandedMask) 872 return I->getOperand(1); 873 874 // Output known-0 bits are known if clear or set in both the LHS & RHS. 875 KnownZero = std::move(IKnownZero); 876 // Output known-1 are known to be set if set in only one of the LHS, RHS. 877 KnownOne = std::move(IKnownOne); 878 break; 879 } 880 default: 881 // Compute the KnownZero/KnownOne bits to simplify things downstream. 882 computeKnownBits(I, KnownZero, KnownOne, Depth, CxtI); 883 884 // If this user is only demanding bits that we know, return the known 885 // constant. 886 if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask) 887 return Constant::getIntegerValue(ITy, KnownOne); 888 889 break; 890 } 891 892 return nullptr; 893 } 894 895 896 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify 897 /// "E1 = (X lsr C1) << C2", where the C1 and C2 are constant, into 898 /// "E2 = X << (C2 - C1)" or "E2 = X >> (C1 - C2)", depending on the sign 899 /// of "C2-C1". 900 /// 901 /// Suppose E1 and E2 are generally different in bits S={bm, bm+1, 902 /// ..., bn}, without considering the specific value X is holding. 903 /// This transformation is legal iff one of following conditions is hold: 904 /// 1) All the bit in S are 0, in this case E1 == E2. 905 /// 2) We don't care those bits in S, per the input DemandedMask. 906 /// 3) Combination of 1) and 2). Some bits in S are 0, and we don't care the 907 /// rest bits. 908 /// 909 /// Currently we only test condition 2). 910 /// 911 /// As with SimplifyDemandedUseBits, it returns NULL if the simplification was 912 /// not successful. 913 Value *InstCombiner::SimplifyShrShlDemandedBits(Instruction *Shr, 914 Instruction *Shl, 915 const APInt &DemandedMask, 916 APInt &KnownZero, 917 APInt &KnownOne) { 918 919 const APInt &ShlOp1 = cast<ConstantInt>(Shl->getOperand(1))->getValue(); 920 const APInt &ShrOp1 = cast<ConstantInt>(Shr->getOperand(1))->getValue(); 921 if (!ShlOp1 || !ShrOp1) 922 return nullptr; // Noop. 923 924 Value *VarX = Shr->getOperand(0); 925 Type *Ty = VarX->getType(); 926 unsigned BitWidth = Ty->getIntegerBitWidth(); 927 if (ShlOp1.uge(BitWidth) || ShrOp1.uge(BitWidth)) 928 return nullptr; // Undef. 929 930 unsigned ShlAmt = ShlOp1.getZExtValue(); 931 unsigned ShrAmt = ShrOp1.getZExtValue(); 932 933 KnownOne.clearAllBits(); 934 KnownZero.setLowBits(ShlAmt - 1); 935 KnownZero &= DemandedMask; 936 937 APInt BitMask1(APInt::getAllOnesValue(BitWidth)); 938 APInt BitMask2(APInt::getAllOnesValue(BitWidth)); 939 940 bool isLshr = (Shr->getOpcode() == Instruction::LShr); 941 BitMask1 = isLshr ? (BitMask1.lshr(ShrAmt) << ShlAmt) : 942 (BitMask1.ashr(ShrAmt) << ShlAmt); 943 944 if (ShrAmt <= ShlAmt) { 945 BitMask2 <<= (ShlAmt - ShrAmt); 946 } else { 947 BitMask2 = isLshr ? BitMask2.lshr(ShrAmt - ShlAmt): 948 BitMask2.ashr(ShrAmt - ShlAmt); 949 } 950 951 // Check if condition-2 (see the comment to this function) is satified. 952 if ((BitMask1 & DemandedMask) == (BitMask2 & DemandedMask)) { 953 if (ShrAmt == ShlAmt) 954 return VarX; 955 956 if (!Shr->hasOneUse()) 957 return nullptr; 958 959 BinaryOperator *New; 960 if (ShrAmt < ShlAmt) { 961 Constant *Amt = ConstantInt::get(VarX->getType(), ShlAmt - ShrAmt); 962 New = BinaryOperator::CreateShl(VarX, Amt); 963 BinaryOperator *Orig = cast<BinaryOperator>(Shl); 964 New->setHasNoSignedWrap(Orig->hasNoSignedWrap()); 965 New->setHasNoUnsignedWrap(Orig->hasNoUnsignedWrap()); 966 } else { 967 Constant *Amt = ConstantInt::get(VarX->getType(), ShrAmt - ShlAmt); 968 New = isLshr ? BinaryOperator::CreateLShr(VarX, Amt) : 969 BinaryOperator::CreateAShr(VarX, Amt); 970 if (cast<BinaryOperator>(Shr)->isExact()) 971 New->setIsExact(true); 972 } 973 974 return InsertNewInstWith(New, *Shl); 975 } 976 977 return nullptr; 978 } 979 980 /// The specified value produces a vector with any number of elements. 981 /// DemandedElts contains the set of elements that are actually used by the 982 /// caller. This method analyzes which elements of the operand are undef and 983 /// returns that information in UndefElts. 984 /// 985 /// If the information about demanded elements can be used to simplify the 986 /// operation, the operation is simplified, then the resultant value is 987 /// returned. This returns null if no change was made. 988 Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, 989 APInt &UndefElts, 990 unsigned Depth) { 991 unsigned VWidth = V->getType()->getVectorNumElements(); 992 APInt EltMask(APInt::getAllOnesValue(VWidth)); 993 assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!"); 994 995 if (isa<UndefValue>(V)) { 996 // If the entire vector is undefined, just return this info. 997 UndefElts = EltMask; 998 return nullptr; 999 } 1000 1001 if (DemandedElts == 0) { // If nothing is demanded, provide undef. 1002 UndefElts = EltMask; 1003 return UndefValue::get(V->getType()); 1004 } 1005 1006 UndefElts = 0; 1007 1008 // Handle ConstantAggregateZero, ConstantVector, ConstantDataSequential. 1009 if (Constant *C = dyn_cast<Constant>(V)) { 1010 // Check if this is identity. If so, return 0 since we are not simplifying 1011 // anything. 1012 if (DemandedElts.isAllOnesValue()) 1013 return nullptr; 1014 1015 Type *EltTy = cast<VectorType>(V->getType())->getElementType(); 1016 Constant *Undef = UndefValue::get(EltTy); 1017 1018 SmallVector<Constant*, 16> Elts; 1019 for (unsigned i = 0; i != VWidth; ++i) { 1020 if (!DemandedElts[i]) { // If not demanded, set to undef. 1021 Elts.push_back(Undef); 1022 UndefElts.setBit(i); 1023 continue; 1024 } 1025 1026 Constant *Elt = C->getAggregateElement(i); 1027 if (!Elt) return nullptr; 1028 1029 if (isa<UndefValue>(Elt)) { // Already undef. 1030 Elts.push_back(Undef); 1031 UndefElts.setBit(i); 1032 } else { // Otherwise, defined. 1033 Elts.push_back(Elt); 1034 } 1035 } 1036 1037 // If we changed the constant, return it. 1038 Constant *NewCV = ConstantVector::get(Elts); 1039 return NewCV != C ? NewCV : nullptr; 1040 } 1041 1042 // Limit search depth. 1043 if (Depth == 10) 1044 return nullptr; 1045 1046 // If multiple users are using the root value, proceed with 1047 // simplification conservatively assuming that all elements 1048 // are needed. 1049 if (!V->hasOneUse()) { 1050 // Quit if we find multiple users of a non-root value though. 1051 // They'll be handled when it's their turn to be visited by 1052 // the main instcombine process. 1053 if (Depth != 0) 1054 // TODO: Just compute the UndefElts information recursively. 1055 return nullptr; 1056 1057 // Conservatively assume that all elements are needed. 1058 DemandedElts = EltMask; 1059 } 1060 1061 Instruction *I = dyn_cast<Instruction>(V); 1062 if (!I) return nullptr; // Only analyze instructions. 1063 1064 bool MadeChange = false; 1065 APInt UndefElts2(VWidth, 0); 1066 APInt UndefElts3(VWidth, 0); 1067 Value *TmpV; 1068 switch (I->getOpcode()) { 1069 default: break; 1070 1071 case Instruction::InsertElement: { 1072 // If this is a variable index, we don't know which element it overwrites. 1073 // demand exactly the same input as we produce. 1074 ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2)); 1075 if (!Idx) { 1076 // Note that we can't propagate undef elt info, because we don't know 1077 // which elt is getting updated. 1078 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, 1079 UndefElts2, Depth + 1); 1080 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1081 break; 1082 } 1083 1084 // If this is inserting an element that isn't demanded, remove this 1085 // insertelement. 1086 unsigned IdxNo = Idx->getZExtValue(); 1087 if (IdxNo >= VWidth || !DemandedElts[IdxNo]) { 1088 Worklist.Add(I); 1089 return I->getOperand(0); 1090 } 1091 1092 // Otherwise, the element inserted overwrites whatever was there, so the 1093 // input demanded set is simpler than the output set. 1094 APInt DemandedElts2 = DemandedElts; 1095 DemandedElts2.clearBit(IdxNo); 1096 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts2, 1097 UndefElts, Depth + 1); 1098 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1099 1100 // The inserted element is defined. 1101 UndefElts.clearBit(IdxNo); 1102 break; 1103 } 1104 case Instruction::ShuffleVector: { 1105 ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I); 1106 unsigned LHSVWidth = 1107 Shuffle->getOperand(0)->getType()->getVectorNumElements(); 1108 APInt LeftDemanded(LHSVWidth, 0), RightDemanded(LHSVWidth, 0); 1109 for (unsigned i = 0; i < VWidth; i++) { 1110 if (DemandedElts[i]) { 1111 unsigned MaskVal = Shuffle->getMaskValue(i); 1112 if (MaskVal != -1u) { 1113 assert(MaskVal < LHSVWidth * 2 && 1114 "shufflevector mask index out of range!"); 1115 if (MaskVal < LHSVWidth) 1116 LeftDemanded.setBit(MaskVal); 1117 else 1118 RightDemanded.setBit(MaskVal - LHSVWidth); 1119 } 1120 } 1121 } 1122 1123 APInt LHSUndefElts(LHSVWidth, 0); 1124 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), LeftDemanded, 1125 LHSUndefElts, Depth + 1); 1126 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1127 1128 APInt RHSUndefElts(LHSVWidth, 0); 1129 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), RightDemanded, 1130 RHSUndefElts, Depth + 1); 1131 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; } 1132 1133 bool NewUndefElts = false; 1134 unsigned LHSIdx = -1u, LHSValIdx = -1u; 1135 unsigned RHSIdx = -1u, RHSValIdx = -1u; 1136 bool LHSUniform = true; 1137 bool RHSUniform = true; 1138 for (unsigned i = 0; i < VWidth; i++) { 1139 unsigned MaskVal = Shuffle->getMaskValue(i); 1140 if (MaskVal == -1u) { 1141 UndefElts.setBit(i); 1142 } else if (!DemandedElts[i]) { 1143 NewUndefElts = true; 1144 UndefElts.setBit(i); 1145 } else if (MaskVal < LHSVWidth) { 1146 if (LHSUndefElts[MaskVal]) { 1147 NewUndefElts = true; 1148 UndefElts.setBit(i); 1149 } else { 1150 LHSIdx = LHSIdx == -1u ? i : LHSVWidth; 1151 LHSValIdx = LHSValIdx == -1u ? MaskVal : LHSVWidth; 1152 LHSUniform = LHSUniform && (MaskVal == i); 1153 } 1154 } else { 1155 if (RHSUndefElts[MaskVal - LHSVWidth]) { 1156 NewUndefElts = true; 1157 UndefElts.setBit(i); 1158 } else { 1159 RHSIdx = RHSIdx == -1u ? i : LHSVWidth; 1160 RHSValIdx = RHSValIdx == -1u ? MaskVal - LHSVWidth : LHSVWidth; 1161 RHSUniform = RHSUniform && (MaskVal - LHSVWidth == i); 1162 } 1163 } 1164 } 1165 1166 // Try to transform shuffle with constant vector and single element from 1167 // this constant vector to single insertelement instruction. 1168 // shufflevector V, C, <v1, v2, .., ci, .., vm> -> 1169 // insertelement V, C[ci], ci-n 1170 if (LHSVWidth == Shuffle->getType()->getNumElements()) { 1171 Value *Op = nullptr; 1172 Constant *Value = nullptr; 1173 unsigned Idx = -1u; 1174 1175 // Find constant vector with the single element in shuffle (LHS or RHS). 1176 if (LHSIdx < LHSVWidth && RHSUniform) { 1177 if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(0))) { 1178 Op = Shuffle->getOperand(1); 1179 Value = CV->getOperand(LHSValIdx); 1180 Idx = LHSIdx; 1181 } 1182 } 1183 if (RHSIdx < LHSVWidth && LHSUniform) { 1184 if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(1))) { 1185 Op = Shuffle->getOperand(0); 1186 Value = CV->getOperand(RHSValIdx); 1187 Idx = RHSIdx; 1188 } 1189 } 1190 // Found constant vector with single element - convert to insertelement. 1191 if (Op && Value) { 1192 Instruction *New = InsertElementInst::Create( 1193 Op, Value, ConstantInt::get(Type::getInt32Ty(I->getContext()), Idx), 1194 Shuffle->getName()); 1195 InsertNewInstWith(New, *Shuffle); 1196 return New; 1197 } 1198 } 1199 if (NewUndefElts) { 1200 // Add additional discovered undefs. 1201 SmallVector<Constant*, 16> Elts; 1202 for (unsigned i = 0; i < VWidth; ++i) { 1203 if (UndefElts[i]) 1204 Elts.push_back(UndefValue::get(Type::getInt32Ty(I->getContext()))); 1205 else 1206 Elts.push_back(ConstantInt::get(Type::getInt32Ty(I->getContext()), 1207 Shuffle->getMaskValue(i))); 1208 } 1209 I->setOperand(2, ConstantVector::get(Elts)); 1210 MadeChange = true; 1211 } 1212 break; 1213 } 1214 case Instruction::Select: { 1215 APInt LeftDemanded(DemandedElts), RightDemanded(DemandedElts); 1216 if (ConstantVector* CV = dyn_cast<ConstantVector>(I->getOperand(0))) { 1217 for (unsigned i = 0; i < VWidth; i++) { 1218 Constant *CElt = CV->getAggregateElement(i); 1219 // Method isNullValue always returns false when called on a 1220 // ConstantExpr. If CElt is a ConstantExpr then skip it in order to 1221 // to avoid propagating incorrect information. 1222 if (isa<ConstantExpr>(CElt)) 1223 continue; 1224 if (CElt->isNullValue()) 1225 LeftDemanded.clearBit(i); 1226 else 1227 RightDemanded.clearBit(i); 1228 } 1229 } 1230 1231 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), LeftDemanded, UndefElts, 1232 Depth + 1); 1233 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; } 1234 1235 TmpV = SimplifyDemandedVectorElts(I->getOperand(2), RightDemanded, 1236 UndefElts2, Depth + 1); 1237 if (TmpV) { I->setOperand(2, TmpV); MadeChange = true; } 1238 1239 // Output elements are undefined if both are undefined. 1240 UndefElts &= UndefElts2; 1241 break; 1242 } 1243 case Instruction::BitCast: { 1244 // Vector->vector casts only. 1245 VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType()); 1246 if (!VTy) break; 1247 unsigned InVWidth = VTy->getNumElements(); 1248 APInt InputDemandedElts(InVWidth, 0); 1249 UndefElts2 = APInt(InVWidth, 0); 1250 unsigned Ratio; 1251 1252 if (VWidth == InVWidth) { 1253 // If we are converting from <4 x i32> -> <4 x f32>, we demand the same 1254 // elements as are demanded of us. 1255 Ratio = 1; 1256 InputDemandedElts = DemandedElts; 1257 } else if ((VWidth % InVWidth) == 0) { 1258 // If the number of elements in the output is a multiple of the number of 1259 // elements in the input then an input element is live if any of the 1260 // corresponding output elements are live. 1261 Ratio = VWidth / InVWidth; 1262 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) 1263 if (DemandedElts[OutIdx]) 1264 InputDemandedElts.setBit(OutIdx / Ratio); 1265 } else if ((InVWidth % VWidth) == 0) { 1266 // If the number of elements in the input is a multiple of the number of 1267 // elements in the output then an input element is live if the 1268 // corresponding output element is live. 1269 Ratio = InVWidth / VWidth; 1270 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx) 1271 if (DemandedElts[InIdx / Ratio]) 1272 InputDemandedElts.setBit(InIdx); 1273 } else { 1274 // Unsupported so far. 1275 break; 1276 } 1277 1278 // div/rem demand all inputs, because they don't want divide by zero. 1279 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), InputDemandedElts, 1280 UndefElts2, Depth + 1); 1281 if (TmpV) { 1282 I->setOperand(0, TmpV); 1283 MadeChange = true; 1284 } 1285 1286 if (VWidth == InVWidth) { 1287 UndefElts = UndefElts2; 1288 } else if ((VWidth % InVWidth) == 0) { 1289 // If the number of elements in the output is a multiple of the number of 1290 // elements in the input then an output element is undef if the 1291 // corresponding input element is undef. 1292 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) 1293 if (UndefElts2[OutIdx / Ratio]) 1294 UndefElts.setBit(OutIdx); 1295 } else if ((InVWidth % VWidth) == 0) { 1296 // If the number of elements in the input is a multiple of the number of 1297 // elements in the output then an output element is undef if all of the 1298 // corresponding input elements are undef. 1299 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) { 1300 APInt SubUndef = UndefElts2.lshr(OutIdx * Ratio).zextOrTrunc(Ratio); 1301 if (SubUndef.countPopulation() == Ratio) 1302 UndefElts.setBit(OutIdx); 1303 } 1304 } else { 1305 llvm_unreachable("Unimp"); 1306 } 1307 break; 1308 } 1309 case Instruction::And: 1310 case Instruction::Or: 1311 case Instruction::Xor: 1312 case Instruction::Add: 1313 case Instruction::Sub: 1314 case Instruction::Mul: 1315 // div/rem demand all inputs, because they don't want divide by zero. 1316 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, UndefElts, 1317 Depth + 1); 1318 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1319 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), DemandedElts, 1320 UndefElts2, Depth + 1); 1321 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; } 1322 1323 // Output elements are undefined if both are undefined. Consider things 1324 // like undef&0. The result is known zero, not undef. 1325 UndefElts &= UndefElts2; 1326 break; 1327 case Instruction::FPTrunc: 1328 case Instruction::FPExt: 1329 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, UndefElts, 1330 Depth + 1); 1331 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1332 break; 1333 1334 case Instruction::Call: { 1335 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I); 1336 if (!II) break; 1337 switch (II->getIntrinsicID()) { 1338 default: break; 1339 1340 case Intrinsic::x86_xop_vfrcz_ss: 1341 case Intrinsic::x86_xop_vfrcz_sd: 1342 // The instructions for these intrinsics are speced to zero upper bits not 1343 // pass them through like other scalar intrinsics. So we shouldn't just 1344 // use Arg0 if DemandedElts[0] is clear like we do for other intrinsics. 1345 // Instead we should return a zero vector. 1346 if (!DemandedElts[0]) { 1347 Worklist.Add(II); 1348 return ConstantAggregateZero::get(II->getType()); 1349 } 1350 1351 // Only the lower element is used. 1352 DemandedElts = 1; 1353 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1354 UndefElts, Depth + 1); 1355 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1356 1357 // Only the lower element is undefined. The high elements are zero. 1358 UndefElts = UndefElts[0]; 1359 break; 1360 1361 // Unary scalar-as-vector operations that work column-wise. 1362 case Intrinsic::x86_sse_rcp_ss: 1363 case Intrinsic::x86_sse_rsqrt_ss: 1364 case Intrinsic::x86_sse_sqrt_ss: 1365 case Intrinsic::x86_sse2_sqrt_sd: 1366 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1367 UndefElts, Depth + 1); 1368 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1369 1370 // If lowest element of a scalar op isn't used then use Arg0. 1371 if (!DemandedElts[0]) { 1372 Worklist.Add(II); 1373 return II->getArgOperand(0); 1374 } 1375 // TODO: If only low elt lower SQRT to FSQRT (with rounding/exceptions 1376 // checks). 1377 break; 1378 1379 // Binary scalar-as-vector operations that work column-wise. The high 1380 // elements come from operand 0. The low element is a function of both 1381 // operands. 1382 case Intrinsic::x86_sse_min_ss: 1383 case Intrinsic::x86_sse_max_ss: 1384 case Intrinsic::x86_sse_cmp_ss: 1385 case Intrinsic::x86_sse2_min_sd: 1386 case Intrinsic::x86_sse2_max_sd: 1387 case Intrinsic::x86_sse2_cmp_sd: { 1388 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1389 UndefElts, Depth + 1); 1390 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1391 1392 // If lowest element of a scalar op isn't used then use Arg0. 1393 if (!DemandedElts[0]) { 1394 Worklist.Add(II); 1395 return II->getArgOperand(0); 1396 } 1397 1398 // Only lower element is used for operand 1. 1399 DemandedElts = 1; 1400 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts, 1401 UndefElts2, Depth + 1); 1402 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1403 1404 // Lower element is undefined if both lower elements are undefined. 1405 // Consider things like undef&0. The result is known zero, not undef. 1406 if (!UndefElts2[0]) 1407 UndefElts.clearBit(0); 1408 1409 break; 1410 } 1411 1412 // Binary scalar-as-vector operations that work column-wise. The high 1413 // elements come from operand 0 and the low element comes from operand 1. 1414 case Intrinsic::x86_sse41_round_ss: 1415 case Intrinsic::x86_sse41_round_sd: { 1416 // Don't use the low element of operand 0. 1417 APInt DemandedElts2 = DemandedElts; 1418 DemandedElts2.clearBit(0); 1419 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts2, 1420 UndefElts, Depth + 1); 1421 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1422 1423 // If lowest element of a scalar op isn't used then use Arg0. 1424 if (!DemandedElts[0]) { 1425 Worklist.Add(II); 1426 return II->getArgOperand(0); 1427 } 1428 1429 // Only lower element is used for operand 1. 1430 DemandedElts = 1; 1431 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts, 1432 UndefElts2, Depth + 1); 1433 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1434 1435 // Take the high undef elements from operand 0 and take the lower element 1436 // from operand 1. 1437 UndefElts.clearBit(0); 1438 UndefElts |= UndefElts2[0]; 1439 break; 1440 } 1441 1442 // Three input scalar-as-vector operations that work column-wise. The high 1443 // elements come from operand 0 and the low element is a function of all 1444 // three inputs. 1445 case Intrinsic::x86_avx512_mask_add_ss_round: 1446 case Intrinsic::x86_avx512_mask_div_ss_round: 1447 case Intrinsic::x86_avx512_mask_mul_ss_round: 1448 case Intrinsic::x86_avx512_mask_sub_ss_round: 1449 case Intrinsic::x86_avx512_mask_max_ss_round: 1450 case Intrinsic::x86_avx512_mask_min_ss_round: 1451 case Intrinsic::x86_avx512_mask_add_sd_round: 1452 case Intrinsic::x86_avx512_mask_div_sd_round: 1453 case Intrinsic::x86_avx512_mask_mul_sd_round: 1454 case Intrinsic::x86_avx512_mask_sub_sd_round: 1455 case Intrinsic::x86_avx512_mask_max_sd_round: 1456 case Intrinsic::x86_avx512_mask_min_sd_round: 1457 case Intrinsic::x86_fma_vfmadd_ss: 1458 case Intrinsic::x86_fma_vfmsub_ss: 1459 case Intrinsic::x86_fma_vfnmadd_ss: 1460 case Intrinsic::x86_fma_vfnmsub_ss: 1461 case Intrinsic::x86_fma_vfmadd_sd: 1462 case Intrinsic::x86_fma_vfmsub_sd: 1463 case Intrinsic::x86_fma_vfnmadd_sd: 1464 case Intrinsic::x86_fma_vfnmsub_sd: 1465 case Intrinsic::x86_avx512_mask_vfmadd_ss: 1466 case Intrinsic::x86_avx512_mask_vfmadd_sd: 1467 case Intrinsic::x86_avx512_maskz_vfmadd_ss: 1468 case Intrinsic::x86_avx512_maskz_vfmadd_sd: 1469 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1470 UndefElts, Depth + 1); 1471 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1472 1473 // If lowest element of a scalar op isn't used then use Arg0. 1474 if (!DemandedElts[0]) { 1475 Worklist.Add(II); 1476 return II->getArgOperand(0); 1477 } 1478 1479 // Only lower element is used for operand 1 and 2. 1480 DemandedElts = 1; 1481 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts, 1482 UndefElts2, Depth + 1); 1483 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1484 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(2), DemandedElts, 1485 UndefElts3, Depth + 1); 1486 if (TmpV) { II->setArgOperand(2, TmpV); MadeChange = true; } 1487 1488 // Lower element is undefined if all three lower elements are undefined. 1489 // Consider things like undef&0. The result is known zero, not undef. 1490 if (!UndefElts2[0] || !UndefElts3[0]) 1491 UndefElts.clearBit(0); 1492 1493 break; 1494 1495 case Intrinsic::x86_avx512_mask3_vfmadd_ss: 1496 case Intrinsic::x86_avx512_mask3_vfmadd_sd: 1497 case Intrinsic::x86_avx512_mask3_vfmsub_ss: 1498 case Intrinsic::x86_avx512_mask3_vfmsub_sd: 1499 case Intrinsic::x86_avx512_mask3_vfnmsub_ss: 1500 case Intrinsic::x86_avx512_mask3_vfnmsub_sd: 1501 // These intrinsics get the passthru bits from operand 2. 1502 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(2), DemandedElts, 1503 UndefElts, Depth + 1); 1504 if (TmpV) { II->setArgOperand(2, TmpV); MadeChange = true; } 1505 1506 // If lowest element of a scalar op isn't used then use Arg2. 1507 if (!DemandedElts[0]) { 1508 Worklist.Add(II); 1509 return II->getArgOperand(2); 1510 } 1511 1512 // Only lower element is used for operand 0 and 1. 1513 DemandedElts = 1; 1514 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1515 UndefElts2, Depth + 1); 1516 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1517 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts, 1518 UndefElts3, Depth + 1); 1519 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1520 1521 // Lower element is undefined if all three lower elements are undefined. 1522 // Consider things like undef&0. The result is known zero, not undef. 1523 if (!UndefElts2[0] || !UndefElts3[0]) 1524 UndefElts.clearBit(0); 1525 1526 break; 1527 1528 case Intrinsic::x86_sse2_pmulu_dq: 1529 case Intrinsic::x86_sse41_pmuldq: 1530 case Intrinsic::x86_avx2_pmul_dq: 1531 case Intrinsic::x86_avx2_pmulu_dq: 1532 case Intrinsic::x86_avx512_pmul_dq_512: 1533 case Intrinsic::x86_avx512_pmulu_dq_512: { 1534 Value *Op0 = II->getArgOperand(0); 1535 Value *Op1 = II->getArgOperand(1); 1536 unsigned InnerVWidth = Op0->getType()->getVectorNumElements(); 1537 assert((VWidth * 2) == InnerVWidth && "Unexpected input size"); 1538 1539 APInt InnerDemandedElts(InnerVWidth, 0); 1540 for (unsigned i = 0; i != VWidth; ++i) 1541 if (DemandedElts[i]) 1542 InnerDemandedElts.setBit(i * 2); 1543 1544 UndefElts2 = APInt(InnerVWidth, 0); 1545 TmpV = SimplifyDemandedVectorElts(Op0, InnerDemandedElts, UndefElts2, 1546 Depth + 1); 1547 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1548 1549 UndefElts3 = APInt(InnerVWidth, 0); 1550 TmpV = SimplifyDemandedVectorElts(Op1, InnerDemandedElts, UndefElts3, 1551 Depth + 1); 1552 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1553 1554 break; 1555 } 1556 1557 case Intrinsic::x86_sse2_packssdw_128: 1558 case Intrinsic::x86_sse2_packsswb_128: 1559 case Intrinsic::x86_sse2_packuswb_128: 1560 case Intrinsic::x86_sse41_packusdw: 1561 case Intrinsic::x86_avx2_packssdw: 1562 case Intrinsic::x86_avx2_packsswb: 1563 case Intrinsic::x86_avx2_packusdw: 1564 case Intrinsic::x86_avx2_packuswb: 1565 case Intrinsic::x86_avx512_packssdw_512: 1566 case Intrinsic::x86_avx512_packsswb_512: 1567 case Intrinsic::x86_avx512_packusdw_512: 1568 case Intrinsic::x86_avx512_packuswb_512: { 1569 auto *Ty0 = II->getArgOperand(0)->getType(); 1570 unsigned InnerVWidth = Ty0->getVectorNumElements(); 1571 assert(VWidth == (InnerVWidth * 2) && "Unexpected input size"); 1572 1573 unsigned NumLanes = Ty0->getPrimitiveSizeInBits() / 128; 1574 unsigned VWidthPerLane = VWidth / NumLanes; 1575 unsigned InnerVWidthPerLane = InnerVWidth / NumLanes; 1576 1577 // Per lane, pack the elements of the first input and then the second. 1578 // e.g. 1579 // v8i16 PACK(v4i32 X, v4i32 Y) - (X[0..3],Y[0..3]) 1580 // v32i8 PACK(v16i16 X, v16i16 Y) - (X[0..7],Y[0..7]),(X[8..15],Y[8..15]) 1581 for (int OpNum = 0; OpNum != 2; ++OpNum) { 1582 APInt OpDemandedElts(InnerVWidth, 0); 1583 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1584 unsigned LaneIdx = Lane * VWidthPerLane; 1585 for (unsigned Elt = 0; Elt != InnerVWidthPerLane; ++Elt) { 1586 unsigned Idx = LaneIdx + Elt + InnerVWidthPerLane * OpNum; 1587 if (DemandedElts[Idx]) 1588 OpDemandedElts.setBit((Lane * InnerVWidthPerLane) + Elt); 1589 } 1590 } 1591 1592 // Demand elements from the operand. 1593 auto *Op = II->getArgOperand(OpNum); 1594 APInt OpUndefElts(InnerVWidth, 0); 1595 TmpV = SimplifyDemandedVectorElts(Op, OpDemandedElts, OpUndefElts, 1596 Depth + 1); 1597 if (TmpV) { 1598 II->setArgOperand(OpNum, TmpV); 1599 MadeChange = true; 1600 } 1601 1602 // Pack the operand's UNDEF elements, one lane at a time. 1603 OpUndefElts = OpUndefElts.zext(VWidth); 1604 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1605 APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane); 1606 LaneElts = LaneElts.getLoBits(InnerVWidthPerLane); 1607 LaneElts = LaneElts.shl(InnerVWidthPerLane * (2 * Lane + OpNum)); 1608 UndefElts |= LaneElts; 1609 } 1610 } 1611 break; 1612 } 1613 1614 // PSHUFB 1615 case Intrinsic::x86_ssse3_pshuf_b_128: 1616 case Intrinsic::x86_avx2_pshuf_b: 1617 case Intrinsic::x86_avx512_pshuf_b_512: 1618 // PERMILVAR 1619 case Intrinsic::x86_avx_vpermilvar_ps: 1620 case Intrinsic::x86_avx_vpermilvar_ps_256: 1621 case Intrinsic::x86_avx512_vpermilvar_ps_512: 1622 case Intrinsic::x86_avx_vpermilvar_pd: 1623 case Intrinsic::x86_avx_vpermilvar_pd_256: 1624 case Intrinsic::x86_avx512_vpermilvar_pd_512: 1625 // PERMV 1626 case Intrinsic::x86_avx2_permd: 1627 case Intrinsic::x86_avx2_permps: { 1628 Value *Op1 = II->getArgOperand(1); 1629 TmpV = SimplifyDemandedVectorElts(Op1, DemandedElts, UndefElts, 1630 Depth + 1); 1631 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1632 break; 1633 } 1634 1635 // SSE4A instructions leave the upper 64-bits of the 128-bit result 1636 // in an undefined state. 1637 case Intrinsic::x86_sse4a_extrq: 1638 case Intrinsic::x86_sse4a_extrqi: 1639 case Intrinsic::x86_sse4a_insertq: 1640 case Intrinsic::x86_sse4a_insertqi: 1641 UndefElts.setHighBits(VWidth / 2); 1642 break; 1643 case Intrinsic::amdgcn_buffer_load: 1644 case Intrinsic::amdgcn_buffer_load_format: 1645 case Intrinsic::amdgcn_image_sample: 1646 case Intrinsic::amdgcn_image_sample_cl: 1647 case Intrinsic::amdgcn_image_sample_d: 1648 case Intrinsic::amdgcn_image_sample_d_cl: 1649 case Intrinsic::amdgcn_image_sample_l: 1650 case Intrinsic::amdgcn_image_sample_b: 1651 case Intrinsic::amdgcn_image_sample_b_cl: 1652 case Intrinsic::amdgcn_image_sample_lz: 1653 case Intrinsic::amdgcn_image_sample_cd: 1654 case Intrinsic::amdgcn_image_sample_cd_cl: 1655 1656 case Intrinsic::amdgcn_image_sample_c: 1657 case Intrinsic::amdgcn_image_sample_c_cl: 1658 case Intrinsic::amdgcn_image_sample_c_d: 1659 case Intrinsic::amdgcn_image_sample_c_d_cl: 1660 case Intrinsic::amdgcn_image_sample_c_l: 1661 case Intrinsic::amdgcn_image_sample_c_b: 1662 case Intrinsic::amdgcn_image_sample_c_b_cl: 1663 case Intrinsic::amdgcn_image_sample_c_lz: 1664 case Intrinsic::amdgcn_image_sample_c_cd: 1665 case Intrinsic::amdgcn_image_sample_c_cd_cl: 1666 1667 case Intrinsic::amdgcn_image_sample_o: 1668 case Intrinsic::amdgcn_image_sample_cl_o: 1669 case Intrinsic::amdgcn_image_sample_d_o: 1670 case Intrinsic::amdgcn_image_sample_d_cl_o: 1671 case Intrinsic::amdgcn_image_sample_l_o: 1672 case Intrinsic::amdgcn_image_sample_b_o: 1673 case Intrinsic::amdgcn_image_sample_b_cl_o: 1674 case Intrinsic::amdgcn_image_sample_lz_o: 1675 case Intrinsic::amdgcn_image_sample_cd_o: 1676 case Intrinsic::amdgcn_image_sample_cd_cl_o: 1677 1678 case Intrinsic::amdgcn_image_sample_c_o: 1679 case Intrinsic::amdgcn_image_sample_c_cl_o: 1680 case Intrinsic::amdgcn_image_sample_c_d_o: 1681 case Intrinsic::amdgcn_image_sample_c_d_cl_o: 1682 case Intrinsic::amdgcn_image_sample_c_l_o: 1683 case Intrinsic::amdgcn_image_sample_c_b_o: 1684 case Intrinsic::amdgcn_image_sample_c_b_cl_o: 1685 case Intrinsic::amdgcn_image_sample_c_lz_o: 1686 case Intrinsic::amdgcn_image_sample_c_cd_o: 1687 case Intrinsic::amdgcn_image_sample_c_cd_cl_o: 1688 1689 case Intrinsic::amdgcn_image_getlod: { 1690 if (VWidth == 1 || !DemandedElts.isMask()) 1691 return nullptr; 1692 1693 // TODO: Handle 3 vectors when supported in code gen. 1694 unsigned NewNumElts = PowerOf2Ceil(DemandedElts.countTrailingOnes()); 1695 if (NewNumElts == VWidth) 1696 return nullptr; 1697 1698 Module *M = II->getParent()->getParent()->getParent(); 1699 Type *EltTy = V->getType()->getVectorElementType(); 1700 1701 Type *NewTy = (NewNumElts == 1) ? EltTy : 1702 VectorType::get(EltTy, NewNumElts); 1703 1704 auto IID = II->getIntrinsicID(); 1705 1706 bool IsBuffer = IID == Intrinsic::amdgcn_buffer_load || 1707 IID == Intrinsic::amdgcn_buffer_load_format; 1708 1709 Function *NewIntrin = IsBuffer ? 1710 Intrinsic::getDeclaration(M, IID, NewTy) : 1711 // Samplers have 3 mangled types. 1712 Intrinsic::getDeclaration(M, IID, 1713 { NewTy, II->getArgOperand(0)->getType(), 1714 II->getArgOperand(1)->getType()}); 1715 1716 SmallVector<Value *, 5> Args; 1717 for (unsigned I = 0, E = II->getNumArgOperands(); I != E; ++I) 1718 Args.push_back(II->getArgOperand(I)); 1719 1720 IRBuilderBase::InsertPointGuard Guard(*Builder); 1721 Builder->SetInsertPoint(II); 1722 1723 CallInst *NewCall = Builder->CreateCall(NewIntrin, Args); 1724 NewCall->takeName(II); 1725 NewCall->copyMetadata(*II); 1726 1727 if (!IsBuffer) { 1728 ConstantInt *DMask = dyn_cast<ConstantInt>(NewCall->getArgOperand(3)); 1729 if (DMask) { 1730 unsigned DMaskVal = DMask->getZExtValue() & 0xf; 1731 1732 unsigned PopCnt = 0; 1733 unsigned NewDMask = 0; 1734 for (unsigned I = 0; I < 4; ++I) { 1735 const unsigned Bit = 1 << I; 1736 if (!!(DMaskVal & Bit)) { 1737 if (++PopCnt > NewNumElts) 1738 break; 1739 1740 NewDMask |= Bit; 1741 } 1742 } 1743 1744 NewCall->setArgOperand(3, ConstantInt::get(DMask->getType(), NewDMask)); 1745 } 1746 } 1747 1748 1749 if (NewNumElts == 1) { 1750 return Builder->CreateInsertElement(UndefValue::get(V->getType()), 1751 NewCall, static_cast<uint64_t>(0)); 1752 } 1753 1754 SmallVector<uint32_t, 8> EltMask; 1755 for (unsigned I = 0; I < VWidth; ++I) 1756 EltMask.push_back(I); 1757 1758 Value *Shuffle = Builder->CreateShuffleVector( 1759 NewCall, UndefValue::get(NewTy), EltMask); 1760 1761 MadeChange = true; 1762 return Shuffle; 1763 } 1764 } 1765 break; 1766 } 1767 } 1768 return MadeChange ? I : nullptr; 1769 } 1770