1 //===- InstCombineSimplifyDemanded.cpp ------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains logic for simplifying instructions based on information 11 // about how they are used. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "InstCombineInternal.h" 16 #include "llvm/Analysis/ValueTracking.h" 17 #include "llvm/IR/IntrinsicInst.h" 18 #include "llvm/IR/PatternMatch.h" 19 20 using namespace llvm; 21 using namespace llvm::PatternMatch; 22 23 #define DEBUG_TYPE "instcombine" 24 25 /// Check to see if the specified operand of the specified instruction is a 26 /// constant integer. If so, check to see if there are any bits set in the 27 /// constant that are not demanded. If so, shrink the constant and return true. 28 static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo, 29 APInt Demanded) { 30 assert(I && "No instruction?"); 31 assert(OpNo < I->getNumOperands() && "Operand index too large"); 32 33 // The operand must be a constant integer or splat integer. 34 Value *Op = I->getOperand(OpNo); 35 const APInt *C; 36 if (!match(Op, m_APInt(C))) 37 return false; 38 39 // If there are no bits set that aren't demanded, nothing to do. 40 Demanded = Demanded.zextOrTrunc(C->getBitWidth()); 41 if ((~Demanded & *C) == 0) 42 return false; 43 44 // This instruction is producing bits that are not demanded. Shrink the RHS. 45 Demanded &= *C; 46 I->setOperand(OpNo, ConstantInt::get(Op->getType(), Demanded)); 47 48 return true; 49 } 50 51 52 53 /// Inst is an integer instruction that SimplifyDemandedBits knows about. See if 54 /// the instruction has any properties that allow us to simplify its operands. 55 bool InstCombiner::SimplifyDemandedInstructionBits(Instruction &Inst) { 56 unsigned BitWidth = Inst.getType()->getScalarSizeInBits(); 57 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 58 APInt DemandedMask(APInt::getAllOnesValue(BitWidth)); 59 60 Value *V = SimplifyDemandedUseBits(&Inst, DemandedMask, KnownZero, KnownOne, 61 0, &Inst); 62 if (!V) return false; 63 if (V == &Inst) return true; 64 replaceInstUsesWith(Inst, V); 65 return true; 66 } 67 68 /// This form of SimplifyDemandedBits simplifies the specified instruction 69 /// operand if possible, updating it in place. It returns true if it made any 70 /// change and false otherwise. 71 bool InstCombiner::SimplifyDemandedBits(Instruction *I, unsigned OpNo, 72 const APInt &DemandedMask, 73 APInt &KnownZero, APInt &KnownOne, 74 unsigned Depth) { 75 Use &U = I->getOperandUse(OpNo); 76 Value *NewVal = SimplifyDemandedUseBits(U.get(), DemandedMask, KnownZero, 77 KnownOne, Depth, I); 78 if (!NewVal) return false; 79 U = NewVal; 80 return true; 81 } 82 83 84 /// This function attempts to replace V with a simpler value based on the 85 /// demanded bits. When this function is called, it is known that only the bits 86 /// set in DemandedMask of the result of V are ever used downstream. 87 /// Consequently, depending on the mask and V, it may be possible to replace V 88 /// with a constant or one of its operands. In such cases, this function does 89 /// the replacement and returns true. In all other cases, it returns false after 90 /// analyzing the expression and setting KnownOne and known to be one in the 91 /// expression. KnownZero contains all the bits that are known to be zero in the 92 /// expression. These are provided to potentially allow the caller (which might 93 /// recursively be SimplifyDemandedBits itself) to simplify the expression. 94 /// KnownOne and KnownZero always follow the invariant that: 95 /// KnownOne & KnownZero == 0. 96 /// That is, a bit can't be both 1 and 0. Note that the bits in KnownOne and 97 /// KnownZero may only be accurate for those bits set in DemandedMask. Note also 98 /// that the bitwidth of V, DemandedMask, KnownZero and KnownOne must all be the 99 /// same. 100 /// 101 /// This returns null if it did not change anything and it permits no 102 /// simplification. This returns V itself if it did some simplification of V's 103 /// operands based on the information about what bits are demanded. This returns 104 /// some other non-null value if it found out that V is equal to another value 105 /// in the context where the specified bits are demanded, but not for all users. 106 Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, 107 APInt &KnownZero, APInt &KnownOne, 108 unsigned Depth, 109 Instruction *CxtI) { 110 assert(V != nullptr && "Null pointer of Value???"); 111 assert(Depth <= 6 && "Limit Search Depth"); 112 uint32_t BitWidth = DemandedMask.getBitWidth(); 113 Type *VTy = V->getType(); 114 assert( 115 (!VTy->isIntOrIntVectorTy() || VTy->getScalarSizeInBits() == BitWidth) && 116 KnownZero.getBitWidth() == BitWidth && 117 KnownOne.getBitWidth() == BitWidth && 118 "Value *V, DemandedMask, KnownZero and KnownOne " 119 "must have same BitWidth"); 120 const APInt *C; 121 if (match(V, m_APInt(C))) { 122 // We know all of the bits for a scalar constant or a splat vector constant! 123 KnownOne = *C & DemandedMask; 124 KnownZero = ~KnownOne & DemandedMask; 125 return nullptr; 126 } 127 if (isa<ConstantPointerNull>(V)) { 128 // We know all of the bits for a constant! 129 KnownOne.clearAllBits(); 130 KnownZero = DemandedMask; 131 return nullptr; 132 } 133 134 KnownZero.clearAllBits(); 135 KnownOne.clearAllBits(); 136 if (DemandedMask == 0) { // Not demanding any bits from V. 137 if (isa<UndefValue>(V)) 138 return nullptr; 139 return UndefValue::get(VTy); 140 } 141 142 if (Depth == 6) // Limit search depth. 143 return nullptr; 144 145 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 146 APInt RHSKnownZero(BitWidth, 0), RHSKnownOne(BitWidth, 0); 147 148 Instruction *I = dyn_cast<Instruction>(V); 149 if (!I) { 150 computeKnownBits(V, KnownZero, KnownOne, Depth, CxtI); 151 return nullptr; // Only analyze instructions. 152 } 153 154 // If there are multiple uses of this value and we aren't at the root, then 155 // we can't do any simplifications of the operands, because DemandedMask 156 // only reflects the bits demanded by *one* of the users. 157 if (Depth != 0 && !I->hasOneUse()) { 158 // Despite the fact that we can't simplify this instruction in all User's 159 // context, we can at least compute the knownzero/knownone bits, and we can 160 // do simplifications that apply to *just* the one user if we know that 161 // this instruction has a simpler value in that context. 162 if (I->getOpcode() == Instruction::And) { 163 // If either the LHS or the RHS are Zero, the result is zero. 164 computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth + 1, 165 CxtI); 166 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1, 167 CxtI); 168 169 // If all of the demanded bits are known 1 on one side, return the other. 170 // These bits cannot contribute to the result of the 'and' in this 171 // context. 172 if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) == 173 (DemandedMask & ~LHSKnownZero)) 174 return I->getOperand(0); 175 if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) == 176 (DemandedMask & ~RHSKnownZero)) 177 return I->getOperand(1); 178 179 // If all of the demanded bits in the inputs are known zeros, return zero. 180 if ((DemandedMask & (RHSKnownZero|LHSKnownZero)) == DemandedMask) 181 return Constant::getNullValue(VTy); 182 183 } else if (I->getOpcode() == Instruction::Or) { 184 // We can simplify (X|Y) -> X or Y in the user's context if we know that 185 // only bits from X or Y are demanded. 186 187 // If either the LHS or the RHS are One, the result is One. 188 computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth + 1, 189 CxtI); 190 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1, 191 CxtI); 192 193 // If all of the demanded bits are known zero on one side, return the 194 // other. These bits cannot contribute to the result of the 'or' in this 195 // context. 196 if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) == 197 (DemandedMask & ~LHSKnownOne)) 198 return I->getOperand(0); 199 if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) == 200 (DemandedMask & ~RHSKnownOne)) 201 return I->getOperand(1); 202 203 // If all of the potentially set bits on one side are known to be set on 204 // the other side, just use the 'other' side. 205 if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) == 206 (DemandedMask & (~RHSKnownZero))) 207 return I->getOperand(0); 208 if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) == 209 (DemandedMask & (~LHSKnownZero))) 210 return I->getOperand(1); 211 } else if (I->getOpcode() == Instruction::Xor) { 212 // We can simplify (X^Y) -> X or Y in the user's context if we know that 213 // only bits from X or Y are demanded. 214 215 computeKnownBits(I->getOperand(1), RHSKnownZero, RHSKnownOne, Depth + 1, 216 CxtI); 217 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1, 218 CxtI); 219 220 // If all of the demanded bits are known zero on one side, return the 221 // other. 222 if ((DemandedMask & RHSKnownZero) == DemandedMask) 223 return I->getOperand(0); 224 if ((DemandedMask & LHSKnownZero) == DemandedMask) 225 return I->getOperand(1); 226 } 227 228 // Compute the KnownZero/KnownOne bits to simplify things downstream. 229 computeKnownBits(I, KnownZero, KnownOne, Depth, CxtI); 230 return nullptr; 231 } 232 233 // If this is the root being simplified, allow it to have multiple uses, 234 // just set the DemandedMask to all bits so that we can try to simplify the 235 // operands. This allows visitTruncInst (for example) to simplify the 236 // operand of a trunc without duplicating all the logic below. 237 if (Depth == 0 && !V->hasOneUse()) 238 DemandedMask.setAllBits(); 239 240 switch (I->getOpcode()) { 241 default: 242 computeKnownBits(I, KnownZero, KnownOne, Depth, CxtI); 243 break; 244 case Instruction::And: 245 // If either the LHS or the RHS are Zero, the result is zero. 246 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnownZero, RHSKnownOne, 247 Depth + 1) || 248 SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnownZero, LHSKnownZero, 249 LHSKnownOne, Depth + 1)) 250 return I; 251 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?"); 252 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?"); 253 254 // If the client is only demanding bits that we know, return the known 255 // constant. 256 if ((DemandedMask & ((RHSKnownZero | LHSKnownZero)| 257 (RHSKnownOne & LHSKnownOne))) == DemandedMask) 258 return Constant::getIntegerValue(VTy, RHSKnownOne & LHSKnownOne); 259 260 // If all of the demanded bits are known 1 on one side, return the other. 261 // These bits cannot contribute to the result of the 'and'. 262 if ((DemandedMask & ~LHSKnownZero & RHSKnownOne) == 263 (DemandedMask & ~LHSKnownZero)) 264 return I->getOperand(0); 265 if ((DemandedMask & ~RHSKnownZero & LHSKnownOne) == 266 (DemandedMask & ~RHSKnownZero)) 267 return I->getOperand(1); 268 269 // If all of the demanded bits in the inputs are known zeros, return zero. 270 if ((DemandedMask & (RHSKnownZero|LHSKnownZero)) == DemandedMask) 271 return Constant::getNullValue(VTy); 272 273 // If the RHS is a constant, see if we can simplify it. 274 if (ShrinkDemandedConstant(I, 1, DemandedMask & ~LHSKnownZero)) 275 return I; 276 277 // Output known-1 bits are only known if set in both the LHS & RHS. 278 KnownOne = RHSKnownOne & LHSKnownOne; 279 // Output known-0 are known to be clear if zero in either the LHS | RHS. 280 KnownZero = RHSKnownZero | LHSKnownZero; 281 break; 282 case Instruction::Or: 283 // If either the LHS or the RHS are One, the result is One. 284 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnownZero, RHSKnownOne, 285 Depth + 1) || 286 SimplifyDemandedBits(I, 0, DemandedMask & ~RHSKnownOne, LHSKnownZero, 287 LHSKnownOne, Depth + 1)) 288 return I; 289 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?"); 290 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?"); 291 292 // If the client is only demanding bits that we know, return the known 293 // constant. 294 if ((DemandedMask & ((RHSKnownZero & LHSKnownZero)| 295 (RHSKnownOne | LHSKnownOne))) == DemandedMask) 296 return Constant::getIntegerValue(VTy, RHSKnownOne | LHSKnownOne); 297 298 // If all of the demanded bits are known zero on one side, return the other. 299 // These bits cannot contribute to the result of the 'or'. 300 if ((DemandedMask & ~LHSKnownOne & RHSKnownZero) == 301 (DemandedMask & ~LHSKnownOne)) 302 return I->getOperand(0); 303 if ((DemandedMask & ~RHSKnownOne & LHSKnownZero) == 304 (DemandedMask & ~RHSKnownOne)) 305 return I->getOperand(1); 306 307 // If all of the potentially set bits on one side are known to be set on 308 // the other side, just use the 'other' side. 309 if ((DemandedMask & (~RHSKnownZero) & LHSKnownOne) == 310 (DemandedMask & (~RHSKnownZero))) 311 return I->getOperand(0); 312 if ((DemandedMask & (~LHSKnownZero) & RHSKnownOne) == 313 (DemandedMask & (~LHSKnownZero))) 314 return I->getOperand(1); 315 316 // If the RHS is a constant, see if we can simplify it. 317 if (ShrinkDemandedConstant(I, 1, DemandedMask)) 318 return I; 319 320 // Output known-0 bits are only known if clear in both the LHS & RHS. 321 KnownZero = RHSKnownZero & LHSKnownZero; 322 // Output known-1 are known to be set if set in either the LHS | RHS. 323 KnownOne = RHSKnownOne | LHSKnownOne; 324 break; 325 case Instruction::Xor: { 326 if (SimplifyDemandedBits(I, 1, DemandedMask, RHSKnownZero, RHSKnownOne, 327 Depth + 1) || 328 SimplifyDemandedBits(I, 0, DemandedMask, LHSKnownZero, LHSKnownOne, 329 Depth + 1)) 330 return I; 331 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?"); 332 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?"); 333 334 // Output known-0 bits are known if clear or set in both the LHS & RHS. 335 APInt IKnownZero = (RHSKnownZero & LHSKnownZero) | 336 (RHSKnownOne & LHSKnownOne); 337 // Output known-1 are known to be set if set in only one of the LHS, RHS. 338 APInt IKnownOne = (RHSKnownZero & LHSKnownOne) | 339 (RHSKnownOne & LHSKnownZero); 340 341 // If the client is only demanding bits that we know, return the known 342 // constant. 343 if ((DemandedMask & (IKnownZero|IKnownOne)) == DemandedMask) 344 return Constant::getIntegerValue(VTy, IKnownOne); 345 346 // If all of the demanded bits are known zero on one side, return the other. 347 // These bits cannot contribute to the result of the 'xor'. 348 if ((DemandedMask & RHSKnownZero) == DemandedMask) 349 return I->getOperand(0); 350 if ((DemandedMask & LHSKnownZero) == DemandedMask) 351 return I->getOperand(1); 352 353 // If all of the demanded bits are known to be zero on one side or the 354 // other, turn this into an *inclusive* or. 355 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 356 if ((DemandedMask & ~RHSKnownZero & ~LHSKnownZero) == 0) { 357 Instruction *Or = 358 BinaryOperator::CreateOr(I->getOperand(0), I->getOperand(1), 359 I->getName()); 360 return InsertNewInstWith(Or, *I); 361 } 362 363 // If all of the demanded bits on one side are known, and all of the set 364 // bits on that side are also known to be set on the other side, turn this 365 // into an AND, as we know the bits will be cleared. 366 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 367 if ((DemandedMask & (RHSKnownZero|RHSKnownOne)) == DemandedMask) { 368 // all known 369 if ((RHSKnownOne & LHSKnownOne) == RHSKnownOne) { 370 Constant *AndC = Constant::getIntegerValue(VTy, 371 ~RHSKnownOne & DemandedMask); 372 Instruction *And = BinaryOperator::CreateAnd(I->getOperand(0), AndC); 373 return InsertNewInstWith(And, *I); 374 } 375 } 376 377 // If the RHS is a constant, see if we can simplify it. 378 // FIXME: for XOR, we prefer to force bits to 1 if they will make a -1. 379 if (ShrinkDemandedConstant(I, 1, DemandedMask)) 380 return I; 381 382 // If our LHS is an 'and' and if it has one use, and if any of the bits we 383 // are flipping are known to be set, then the xor is just resetting those 384 // bits to zero. We can just knock out bits from the 'and' and the 'xor', 385 // simplifying both of them. 386 if (Instruction *LHSInst = dyn_cast<Instruction>(I->getOperand(0))) 387 if (LHSInst->getOpcode() == Instruction::And && LHSInst->hasOneUse() && 388 isa<ConstantInt>(I->getOperand(1)) && 389 isa<ConstantInt>(LHSInst->getOperand(1)) && 390 (LHSKnownOne & RHSKnownOne & DemandedMask) != 0) { 391 ConstantInt *AndRHS = cast<ConstantInt>(LHSInst->getOperand(1)); 392 ConstantInt *XorRHS = cast<ConstantInt>(I->getOperand(1)); 393 APInt NewMask = ~(LHSKnownOne & RHSKnownOne & DemandedMask); 394 395 Constant *AndC = 396 ConstantInt::get(I->getType(), NewMask & AndRHS->getValue()); 397 Instruction *NewAnd = BinaryOperator::CreateAnd(I->getOperand(0), AndC); 398 InsertNewInstWith(NewAnd, *I); 399 400 Constant *XorC = 401 ConstantInt::get(I->getType(), NewMask & XorRHS->getValue()); 402 Instruction *NewXor = BinaryOperator::CreateXor(NewAnd, XorC); 403 return InsertNewInstWith(NewXor, *I); 404 } 405 406 // Output known-0 bits are known if clear or set in both the LHS & RHS. 407 KnownZero= (RHSKnownZero & LHSKnownZero) | (RHSKnownOne & LHSKnownOne); 408 // Output known-1 are known to be set if set in only one of the LHS, RHS. 409 KnownOne = (RHSKnownZero & LHSKnownOne) | (RHSKnownOne & LHSKnownZero); 410 break; 411 } 412 case Instruction::Select: 413 // If this is a select as part of a min/max pattern, don't simplify any 414 // further in case we break the structure. 415 Value *LHS, *RHS; 416 if (matchSelectPattern(I, LHS, RHS).Flavor != SPF_UNKNOWN) 417 return nullptr; 418 419 if (SimplifyDemandedBits(I, 2, DemandedMask, RHSKnownZero, RHSKnownOne, 420 Depth + 1) || 421 SimplifyDemandedBits(I, 1, DemandedMask, LHSKnownZero, LHSKnownOne, 422 Depth + 1)) 423 return I; 424 assert(!(RHSKnownZero & RHSKnownOne) && "Bits known to be one AND zero?"); 425 assert(!(LHSKnownZero & LHSKnownOne) && "Bits known to be one AND zero?"); 426 427 // If the operands are constants, see if we can simplify them. 428 if (ShrinkDemandedConstant(I, 1, DemandedMask) || 429 ShrinkDemandedConstant(I, 2, DemandedMask)) 430 return I; 431 432 // Only known if known in both the LHS and RHS. 433 KnownOne = RHSKnownOne & LHSKnownOne; 434 KnownZero = RHSKnownZero & LHSKnownZero; 435 break; 436 case Instruction::Trunc: { 437 unsigned truncBf = I->getOperand(0)->getType()->getScalarSizeInBits(); 438 DemandedMask = DemandedMask.zext(truncBf); 439 KnownZero = KnownZero.zext(truncBf); 440 KnownOne = KnownOne.zext(truncBf); 441 if (SimplifyDemandedBits(I, 0, DemandedMask, KnownZero, KnownOne, 442 Depth + 1)) 443 return I; 444 DemandedMask = DemandedMask.trunc(BitWidth); 445 KnownZero = KnownZero.trunc(BitWidth); 446 KnownOne = KnownOne.trunc(BitWidth); 447 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); 448 break; 449 } 450 case Instruction::BitCast: 451 if (!I->getOperand(0)->getType()->isIntOrIntVectorTy()) 452 return nullptr; // vector->int or fp->int? 453 454 if (VectorType *DstVTy = dyn_cast<VectorType>(I->getType())) { 455 if (VectorType *SrcVTy = 456 dyn_cast<VectorType>(I->getOperand(0)->getType())) { 457 if (DstVTy->getNumElements() != SrcVTy->getNumElements()) 458 // Don't touch a bitcast between vectors of different element counts. 459 return nullptr; 460 } else 461 // Don't touch a scalar-to-vector bitcast. 462 return nullptr; 463 } else if (I->getOperand(0)->getType()->isVectorTy()) 464 // Don't touch a vector-to-scalar bitcast. 465 return nullptr; 466 467 if (SimplifyDemandedBits(I, 0, DemandedMask, KnownZero, KnownOne, 468 Depth + 1)) 469 return I; 470 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); 471 break; 472 case Instruction::ZExt: { 473 // Compute the bits in the result that are not present in the input. 474 unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits(); 475 476 DemandedMask = DemandedMask.trunc(SrcBitWidth); 477 KnownZero = KnownZero.trunc(SrcBitWidth); 478 KnownOne = KnownOne.trunc(SrcBitWidth); 479 if (SimplifyDemandedBits(I, 0, DemandedMask, KnownZero, KnownOne, 480 Depth + 1)) 481 return I; 482 DemandedMask = DemandedMask.zext(BitWidth); 483 KnownZero = KnownZero.zext(BitWidth); 484 KnownOne = KnownOne.zext(BitWidth); 485 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); 486 // The top bits are known to be zero. 487 KnownZero.setBitsFrom(SrcBitWidth); 488 break; 489 } 490 case Instruction::SExt: { 491 // Compute the bits in the result that are not present in the input. 492 unsigned SrcBitWidth =I->getOperand(0)->getType()->getScalarSizeInBits(); 493 494 APInt InputDemandedBits = DemandedMask & 495 APInt::getLowBitsSet(BitWidth, SrcBitWidth); 496 497 APInt NewBits(APInt::getBitsSetFrom(BitWidth, SrcBitWidth)); 498 // If any of the sign extended bits are demanded, we know that the sign 499 // bit is demanded. 500 if ((NewBits & DemandedMask) != 0) 501 InputDemandedBits.setBit(SrcBitWidth-1); 502 503 InputDemandedBits = InputDemandedBits.trunc(SrcBitWidth); 504 KnownZero = KnownZero.trunc(SrcBitWidth); 505 KnownOne = KnownOne.trunc(SrcBitWidth); 506 if (SimplifyDemandedBits(I, 0, InputDemandedBits, KnownZero, KnownOne, 507 Depth + 1)) 508 return I; 509 InputDemandedBits = InputDemandedBits.zext(BitWidth); 510 KnownZero = KnownZero.zext(BitWidth); 511 KnownOne = KnownOne.zext(BitWidth); 512 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); 513 514 // If the sign bit of the input is known set or clear, then we know the 515 // top bits of the result. 516 517 // If the input sign bit is known zero, or if the NewBits are not demanded 518 // convert this into a zero extension. 519 if (KnownZero[SrcBitWidth-1] || (NewBits & ~DemandedMask) == NewBits) { 520 // Convert to ZExt cast 521 CastInst *NewCast = new ZExtInst(I->getOperand(0), VTy, I->getName()); 522 return InsertNewInstWith(NewCast, *I); 523 } else if (KnownOne[SrcBitWidth-1]) { // Input sign bit known set 524 KnownOne |= NewBits; 525 } 526 break; 527 } 528 case Instruction::Add: 529 case Instruction::Sub: { 530 /// If the high-bits of an ADD/SUB are not demanded, then we do not care 531 /// about the high bits of the operands. 532 unsigned NLZ = DemandedMask.countLeadingZeros(); 533 if (NLZ > 0) { 534 // Right fill the mask of bits for this ADD/SUB to demand the most 535 // significant bit and all those below it. 536 APInt DemandedFromOps(APInt::getLowBitsSet(BitWidth, BitWidth-NLZ)); 537 if (ShrinkDemandedConstant(I, 0, DemandedFromOps) || 538 SimplifyDemandedBits(I, 0, DemandedFromOps, LHSKnownZero, LHSKnownOne, 539 Depth + 1) || 540 ShrinkDemandedConstant(I, 1, DemandedFromOps) || 541 SimplifyDemandedBits(I, 1, DemandedFromOps, LHSKnownZero, LHSKnownOne, 542 Depth + 1)) { 543 // Disable the nsw and nuw flags here: We can no longer guarantee that 544 // we won't wrap after simplification. Removing the nsw/nuw flags is 545 // legal here because the top bit is not demanded. 546 BinaryOperator &BinOP = *cast<BinaryOperator>(I); 547 BinOP.setHasNoSignedWrap(false); 548 BinOP.setHasNoUnsignedWrap(false); 549 return I; 550 } 551 } 552 553 // Otherwise just hand the add/sub off to computeKnownBits to fill in 554 // the known zeros and ones. 555 computeKnownBits(V, KnownZero, KnownOne, Depth, CxtI); 556 break; 557 } 558 case Instruction::Shl: 559 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 560 { 561 Value *VarX; ConstantInt *C1; 562 if (match(I->getOperand(0), m_Shr(m_Value(VarX), m_ConstantInt(C1)))) { 563 Instruction *Shr = cast<Instruction>(I->getOperand(0)); 564 Value *R = SimplifyShrShlDemandedBits(Shr, I, DemandedMask, 565 KnownZero, KnownOne); 566 if (R) 567 return R; 568 } 569 } 570 571 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 572 APInt DemandedMaskIn(DemandedMask.lshr(ShiftAmt)); 573 574 // If the shift is NUW/NSW, then it does demand the high bits. 575 ShlOperator *IOp = cast<ShlOperator>(I); 576 if (IOp->hasNoSignedWrap()) 577 DemandedMaskIn.setHighBits(ShiftAmt+1); 578 else if (IOp->hasNoUnsignedWrap()) 579 DemandedMaskIn.setHighBits(ShiftAmt); 580 581 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, KnownZero, KnownOne, 582 Depth + 1)) 583 return I; 584 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); 585 KnownZero <<= ShiftAmt; 586 KnownOne <<= ShiftAmt; 587 // low bits known zero. 588 if (ShiftAmt) 589 KnownZero.setLowBits(ShiftAmt); 590 } 591 break; 592 case Instruction::LShr: 593 // For a logical shift right 594 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 595 uint64_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 596 597 // Unsigned shift right. 598 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt)); 599 600 // If the shift is exact, then it does demand the low bits (and knows that 601 // they are zero). 602 if (cast<LShrOperator>(I)->isExact()) 603 DemandedMaskIn.setLowBits(ShiftAmt); 604 605 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, KnownZero, KnownOne, 606 Depth + 1)) 607 return I; 608 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); 609 KnownZero = KnownZero.lshr(ShiftAmt); 610 KnownOne = KnownOne.lshr(ShiftAmt); 611 if (ShiftAmt) 612 KnownZero.setHighBits(ShiftAmt); // high bits known zero. 613 } 614 break; 615 case Instruction::AShr: 616 // If this is an arithmetic shift right and only the low-bit is set, we can 617 // always convert this into a logical shr, even if the shift amount is 618 // variable. The low bit of the shift cannot be an input sign bit unless 619 // the shift amount is >= the size of the datatype, which is undefined. 620 if (DemandedMask == 1) { 621 // Perform the logical shift right. 622 Instruction *NewVal = BinaryOperator::CreateLShr( 623 I->getOperand(0), I->getOperand(1), I->getName()); 624 return InsertNewInstWith(NewVal, *I); 625 } 626 627 // If the sign bit is the only bit demanded by this ashr, then there is no 628 // need to do it, the shift doesn't change the high bit. 629 if (DemandedMask.isSignBit()) 630 return I->getOperand(0); 631 632 if (ConstantInt *SA = dyn_cast<ConstantInt>(I->getOperand(1))) { 633 uint32_t ShiftAmt = SA->getLimitedValue(BitWidth-1); 634 635 // Signed shift right. 636 APInt DemandedMaskIn(DemandedMask.shl(ShiftAmt)); 637 // If any of the "high bits" are demanded, we should set the sign bit as 638 // demanded. 639 if (DemandedMask.countLeadingZeros() <= ShiftAmt) 640 DemandedMaskIn.setBit(BitWidth-1); 641 642 // If the shift is exact, then it does demand the low bits (and knows that 643 // they are zero). 644 if (cast<AShrOperator>(I)->isExact()) 645 DemandedMaskIn.setLowBits(ShiftAmt); 646 647 if (SimplifyDemandedBits(I, 0, DemandedMaskIn, KnownZero, KnownOne, 648 Depth + 1)) 649 return I; 650 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); 651 // Compute the new bits that are at the top now. 652 APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt)); 653 KnownZero = KnownZero.lshr(ShiftAmt); 654 KnownOne = KnownOne.lshr(ShiftAmt); 655 656 // Handle the sign bits. 657 APInt SignBit(APInt::getSignBit(BitWidth)); 658 // Adjust to where it is now in the mask. 659 SignBit = SignBit.lshr(ShiftAmt); 660 661 // If the input sign bit is known to be zero, or if none of the top bits 662 // are demanded, turn this into an unsigned shift right. 663 if (BitWidth <= ShiftAmt || KnownZero[BitWidth-ShiftAmt-1] || 664 (HighBits & ~DemandedMask) == HighBits) { 665 // Perform the logical shift right. 666 BinaryOperator *NewVal = BinaryOperator::CreateLShr(I->getOperand(0), 667 SA, I->getName()); 668 NewVal->setIsExact(cast<BinaryOperator>(I)->isExact()); 669 return InsertNewInstWith(NewVal, *I); 670 } else if ((KnownOne & SignBit) != 0) { // New bits are known one. 671 KnownOne |= HighBits; 672 } 673 } 674 break; 675 case Instruction::SRem: 676 if (ConstantInt *Rem = dyn_cast<ConstantInt>(I->getOperand(1))) { 677 // X % -1 demands all the bits because we don't want to introduce 678 // INT_MIN % -1 (== undef) by accident. 679 if (Rem->isAllOnesValue()) 680 break; 681 APInt RA = Rem->getValue().abs(); 682 if (RA.isPowerOf2()) { 683 if (DemandedMask.ult(RA)) // srem won't affect demanded bits 684 return I->getOperand(0); 685 686 APInt LowBits = RA - 1; 687 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth); 688 if (SimplifyDemandedBits(I, 0, Mask2, LHSKnownZero, LHSKnownOne, 689 Depth + 1)) 690 return I; 691 692 // The low bits of LHS are unchanged by the srem. 693 KnownZero = LHSKnownZero & LowBits; 694 KnownOne = LHSKnownOne & LowBits; 695 696 // If LHS is non-negative or has all low bits zero, then the upper bits 697 // are all zero. 698 if (LHSKnownZero[BitWidth-1] || ((LHSKnownZero & LowBits) == LowBits)) 699 KnownZero |= ~LowBits; 700 701 // If LHS is negative and not all low bits are zero, then the upper bits 702 // are all one. 703 if (LHSKnownOne[BitWidth-1] && ((LHSKnownOne & LowBits) != 0)) 704 KnownOne |= ~LowBits; 705 706 assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); 707 } 708 } 709 710 // The sign bit is the LHS's sign bit, except when the result of the 711 // remainder is zero. 712 if (DemandedMask.isNegative() && KnownZero.isNonNegative()) { 713 APInt LHSKnownZero(BitWidth, 0), LHSKnownOne(BitWidth, 0); 714 computeKnownBits(I->getOperand(0), LHSKnownZero, LHSKnownOne, Depth + 1, 715 CxtI); 716 // If it's known zero, our sign bit is also zero. 717 if (LHSKnownZero.isNegative()) 718 KnownZero.setSignBit(); 719 } 720 break; 721 case Instruction::URem: { 722 APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0); 723 APInt AllOnes = APInt::getAllOnesValue(BitWidth); 724 if (SimplifyDemandedBits(I, 0, AllOnes, KnownZero2, KnownOne2, Depth + 1) || 725 SimplifyDemandedBits(I, 1, AllOnes, KnownZero2, KnownOne2, Depth + 1)) 726 return I; 727 728 unsigned Leaders = KnownZero2.countLeadingOnes(); 729 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders) & DemandedMask; 730 break; 731 } 732 case Instruction::Call: 733 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 734 switch (II->getIntrinsicID()) { 735 default: break; 736 case Intrinsic::bswap: { 737 // If the only bits demanded come from one byte of the bswap result, 738 // just shift the input byte into position to eliminate the bswap. 739 unsigned NLZ = DemandedMask.countLeadingZeros(); 740 unsigned NTZ = DemandedMask.countTrailingZeros(); 741 742 // Round NTZ down to the next byte. If we have 11 trailing zeros, then 743 // we need all the bits down to bit 8. Likewise, round NLZ. If we 744 // have 14 leading zeros, round to 8. 745 NLZ &= ~7; 746 NTZ &= ~7; 747 // If we need exactly one byte, we can do this transformation. 748 if (BitWidth-NLZ-NTZ == 8) { 749 unsigned ResultBit = NTZ; 750 unsigned InputBit = BitWidth-NTZ-8; 751 752 // Replace this with either a left or right shift to get the byte into 753 // the right place. 754 Instruction *NewVal; 755 if (InputBit > ResultBit) 756 NewVal = BinaryOperator::CreateLShr(II->getArgOperand(0), 757 ConstantInt::get(I->getType(), InputBit-ResultBit)); 758 else 759 NewVal = BinaryOperator::CreateShl(II->getArgOperand(0), 760 ConstantInt::get(I->getType(), ResultBit-InputBit)); 761 NewVal->takeName(I); 762 return InsertNewInstWith(NewVal, *I); 763 } 764 765 // TODO: Could compute known zero/one bits based on the input. 766 break; 767 } 768 case Intrinsic::x86_mmx_pmovmskb: 769 case Intrinsic::x86_sse_movmsk_ps: 770 case Intrinsic::x86_sse2_movmsk_pd: 771 case Intrinsic::x86_sse2_pmovmskb_128: 772 case Intrinsic::x86_avx_movmsk_ps_256: 773 case Intrinsic::x86_avx_movmsk_pd_256: 774 case Intrinsic::x86_avx2_pmovmskb: { 775 // MOVMSK copies the vector elements' sign bits to the low bits 776 // and zeros the high bits. 777 unsigned ArgWidth; 778 if (II->getIntrinsicID() == Intrinsic::x86_mmx_pmovmskb) { 779 ArgWidth = 8; // Arg is x86_mmx, but treated as <8 x i8>. 780 } else { 781 auto Arg = II->getArgOperand(0); 782 auto ArgType = cast<VectorType>(Arg->getType()); 783 ArgWidth = ArgType->getNumElements(); 784 } 785 786 // If we don't need any of low bits then return zero, 787 // we know that DemandedMask is non-zero already. 788 APInt DemandedElts = DemandedMask.zextOrTrunc(ArgWidth); 789 if (DemandedElts == 0) 790 return ConstantInt::getNullValue(VTy); 791 792 // We know that the upper bits are set to zero. 793 KnownZero.setBitsFrom(ArgWidth); 794 return nullptr; 795 } 796 case Intrinsic::x86_sse42_crc32_64_64: 797 KnownZero.setBitsFrom(32); 798 return nullptr; 799 } 800 } 801 computeKnownBits(V, KnownZero, KnownOne, Depth, CxtI); 802 break; 803 } 804 805 // If the client is only demanding bits that we know, return the known 806 // constant. 807 if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask) 808 return Constant::getIntegerValue(VTy, KnownOne); 809 return nullptr; 810 } 811 812 /// Helper routine of SimplifyDemandedUseBits. It tries to simplify 813 /// "E1 = (X lsr C1) << C2", where the C1 and C2 are constant, into 814 /// "E2 = X << (C2 - C1)" or "E2 = X >> (C1 - C2)", depending on the sign 815 /// of "C2-C1". 816 /// 817 /// Suppose E1 and E2 are generally different in bits S={bm, bm+1, 818 /// ..., bn}, without considering the specific value X is holding. 819 /// This transformation is legal iff one of following conditions is hold: 820 /// 1) All the bit in S are 0, in this case E1 == E2. 821 /// 2) We don't care those bits in S, per the input DemandedMask. 822 /// 3) Combination of 1) and 2). Some bits in S are 0, and we don't care the 823 /// rest bits. 824 /// 825 /// Currently we only test condition 2). 826 /// 827 /// As with SimplifyDemandedUseBits, it returns NULL if the simplification was 828 /// not successful. 829 Value *InstCombiner::SimplifyShrShlDemandedBits(Instruction *Shr, 830 Instruction *Shl, 831 const APInt &DemandedMask, 832 APInt &KnownZero, 833 APInt &KnownOne) { 834 835 const APInt &ShlOp1 = cast<ConstantInt>(Shl->getOperand(1))->getValue(); 836 const APInt &ShrOp1 = cast<ConstantInt>(Shr->getOperand(1))->getValue(); 837 if (!ShlOp1 || !ShrOp1) 838 return nullptr; // Noop. 839 840 Value *VarX = Shr->getOperand(0); 841 Type *Ty = VarX->getType(); 842 unsigned BitWidth = Ty->getIntegerBitWidth(); 843 if (ShlOp1.uge(BitWidth) || ShrOp1.uge(BitWidth)) 844 return nullptr; // Undef. 845 846 unsigned ShlAmt = ShlOp1.getZExtValue(); 847 unsigned ShrAmt = ShrOp1.getZExtValue(); 848 849 KnownOne.clearAllBits(); 850 KnownZero.setLowBits(ShlAmt - 1); 851 KnownZero &= DemandedMask; 852 853 APInt BitMask1(APInt::getAllOnesValue(BitWidth)); 854 APInt BitMask2(APInt::getAllOnesValue(BitWidth)); 855 856 bool isLshr = (Shr->getOpcode() == Instruction::LShr); 857 BitMask1 = isLshr ? (BitMask1.lshr(ShrAmt) << ShlAmt) : 858 (BitMask1.ashr(ShrAmt) << ShlAmt); 859 860 if (ShrAmt <= ShlAmt) { 861 BitMask2 <<= (ShlAmt - ShrAmt); 862 } else { 863 BitMask2 = isLshr ? BitMask2.lshr(ShrAmt - ShlAmt): 864 BitMask2.ashr(ShrAmt - ShlAmt); 865 } 866 867 // Check if condition-2 (see the comment to this function) is satified. 868 if ((BitMask1 & DemandedMask) == (BitMask2 & DemandedMask)) { 869 if (ShrAmt == ShlAmt) 870 return VarX; 871 872 if (!Shr->hasOneUse()) 873 return nullptr; 874 875 BinaryOperator *New; 876 if (ShrAmt < ShlAmt) { 877 Constant *Amt = ConstantInt::get(VarX->getType(), ShlAmt - ShrAmt); 878 New = BinaryOperator::CreateShl(VarX, Amt); 879 BinaryOperator *Orig = cast<BinaryOperator>(Shl); 880 New->setHasNoSignedWrap(Orig->hasNoSignedWrap()); 881 New->setHasNoUnsignedWrap(Orig->hasNoUnsignedWrap()); 882 } else { 883 Constant *Amt = ConstantInt::get(VarX->getType(), ShrAmt - ShlAmt); 884 New = isLshr ? BinaryOperator::CreateLShr(VarX, Amt) : 885 BinaryOperator::CreateAShr(VarX, Amt); 886 if (cast<BinaryOperator>(Shr)->isExact()) 887 New->setIsExact(true); 888 } 889 890 return InsertNewInstWith(New, *Shl); 891 } 892 893 return nullptr; 894 } 895 896 /// The specified value produces a vector with any number of elements. 897 /// DemandedElts contains the set of elements that are actually used by the 898 /// caller. This method analyzes which elements of the operand are undef and 899 /// returns that information in UndefElts. 900 /// 901 /// If the information about demanded elements can be used to simplify the 902 /// operation, the operation is simplified, then the resultant value is 903 /// returned. This returns null if no change was made. 904 Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, 905 APInt &UndefElts, 906 unsigned Depth) { 907 unsigned VWidth = V->getType()->getVectorNumElements(); 908 APInt EltMask(APInt::getAllOnesValue(VWidth)); 909 assert((DemandedElts & ~EltMask) == 0 && "Invalid DemandedElts!"); 910 911 if (isa<UndefValue>(V)) { 912 // If the entire vector is undefined, just return this info. 913 UndefElts = EltMask; 914 return nullptr; 915 } 916 917 if (DemandedElts == 0) { // If nothing is demanded, provide undef. 918 UndefElts = EltMask; 919 return UndefValue::get(V->getType()); 920 } 921 922 UndefElts = 0; 923 924 // Handle ConstantAggregateZero, ConstantVector, ConstantDataSequential. 925 if (Constant *C = dyn_cast<Constant>(V)) { 926 // Check if this is identity. If so, return 0 since we are not simplifying 927 // anything. 928 if (DemandedElts.isAllOnesValue()) 929 return nullptr; 930 931 Type *EltTy = cast<VectorType>(V->getType())->getElementType(); 932 Constant *Undef = UndefValue::get(EltTy); 933 934 SmallVector<Constant*, 16> Elts; 935 for (unsigned i = 0; i != VWidth; ++i) { 936 if (!DemandedElts[i]) { // If not demanded, set to undef. 937 Elts.push_back(Undef); 938 UndefElts.setBit(i); 939 continue; 940 } 941 942 Constant *Elt = C->getAggregateElement(i); 943 if (!Elt) return nullptr; 944 945 if (isa<UndefValue>(Elt)) { // Already undef. 946 Elts.push_back(Undef); 947 UndefElts.setBit(i); 948 } else { // Otherwise, defined. 949 Elts.push_back(Elt); 950 } 951 } 952 953 // If we changed the constant, return it. 954 Constant *NewCV = ConstantVector::get(Elts); 955 return NewCV != C ? NewCV : nullptr; 956 } 957 958 // Limit search depth. 959 if (Depth == 10) 960 return nullptr; 961 962 // If multiple users are using the root value, proceed with 963 // simplification conservatively assuming that all elements 964 // are needed. 965 if (!V->hasOneUse()) { 966 // Quit if we find multiple users of a non-root value though. 967 // They'll be handled when it's their turn to be visited by 968 // the main instcombine process. 969 if (Depth != 0) 970 // TODO: Just compute the UndefElts information recursively. 971 return nullptr; 972 973 // Conservatively assume that all elements are needed. 974 DemandedElts = EltMask; 975 } 976 977 Instruction *I = dyn_cast<Instruction>(V); 978 if (!I) return nullptr; // Only analyze instructions. 979 980 bool MadeChange = false; 981 APInt UndefElts2(VWidth, 0); 982 APInt UndefElts3(VWidth, 0); 983 Value *TmpV; 984 switch (I->getOpcode()) { 985 default: break; 986 987 case Instruction::InsertElement: { 988 // If this is a variable index, we don't know which element it overwrites. 989 // demand exactly the same input as we produce. 990 ConstantInt *Idx = dyn_cast<ConstantInt>(I->getOperand(2)); 991 if (!Idx) { 992 // Note that we can't propagate undef elt info, because we don't know 993 // which elt is getting updated. 994 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, 995 UndefElts2, Depth + 1); 996 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 997 break; 998 } 999 1000 // If this is inserting an element that isn't demanded, remove this 1001 // insertelement. 1002 unsigned IdxNo = Idx->getZExtValue(); 1003 if (IdxNo >= VWidth || !DemandedElts[IdxNo]) { 1004 Worklist.Add(I); 1005 return I->getOperand(0); 1006 } 1007 1008 // Otherwise, the element inserted overwrites whatever was there, so the 1009 // input demanded set is simpler than the output set. 1010 APInt DemandedElts2 = DemandedElts; 1011 DemandedElts2.clearBit(IdxNo); 1012 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts2, 1013 UndefElts, Depth + 1); 1014 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1015 1016 // The inserted element is defined. 1017 UndefElts.clearBit(IdxNo); 1018 break; 1019 } 1020 case Instruction::ShuffleVector: { 1021 ShuffleVectorInst *Shuffle = cast<ShuffleVectorInst>(I); 1022 unsigned LHSVWidth = 1023 Shuffle->getOperand(0)->getType()->getVectorNumElements(); 1024 APInt LeftDemanded(LHSVWidth, 0), RightDemanded(LHSVWidth, 0); 1025 for (unsigned i = 0; i < VWidth; i++) { 1026 if (DemandedElts[i]) { 1027 unsigned MaskVal = Shuffle->getMaskValue(i); 1028 if (MaskVal != -1u) { 1029 assert(MaskVal < LHSVWidth * 2 && 1030 "shufflevector mask index out of range!"); 1031 if (MaskVal < LHSVWidth) 1032 LeftDemanded.setBit(MaskVal); 1033 else 1034 RightDemanded.setBit(MaskVal - LHSVWidth); 1035 } 1036 } 1037 } 1038 1039 APInt LHSUndefElts(LHSVWidth, 0); 1040 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), LeftDemanded, 1041 LHSUndefElts, Depth + 1); 1042 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1043 1044 APInt RHSUndefElts(LHSVWidth, 0); 1045 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), RightDemanded, 1046 RHSUndefElts, Depth + 1); 1047 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; } 1048 1049 bool NewUndefElts = false; 1050 unsigned LHSIdx = -1u, LHSValIdx = -1u; 1051 unsigned RHSIdx = -1u, RHSValIdx = -1u; 1052 bool LHSUniform = true; 1053 bool RHSUniform = true; 1054 for (unsigned i = 0; i < VWidth; i++) { 1055 unsigned MaskVal = Shuffle->getMaskValue(i); 1056 if (MaskVal == -1u) { 1057 UndefElts.setBit(i); 1058 } else if (!DemandedElts[i]) { 1059 NewUndefElts = true; 1060 UndefElts.setBit(i); 1061 } else if (MaskVal < LHSVWidth) { 1062 if (LHSUndefElts[MaskVal]) { 1063 NewUndefElts = true; 1064 UndefElts.setBit(i); 1065 } else { 1066 LHSIdx = LHSIdx == -1u ? i : LHSVWidth; 1067 LHSValIdx = LHSValIdx == -1u ? MaskVal : LHSVWidth; 1068 LHSUniform = LHSUniform && (MaskVal == i); 1069 } 1070 } else { 1071 if (RHSUndefElts[MaskVal - LHSVWidth]) { 1072 NewUndefElts = true; 1073 UndefElts.setBit(i); 1074 } else { 1075 RHSIdx = RHSIdx == -1u ? i : LHSVWidth; 1076 RHSValIdx = RHSValIdx == -1u ? MaskVal - LHSVWidth : LHSVWidth; 1077 RHSUniform = RHSUniform && (MaskVal - LHSVWidth == i); 1078 } 1079 } 1080 } 1081 1082 // Try to transform shuffle with constant vector and single element from 1083 // this constant vector to single insertelement instruction. 1084 // shufflevector V, C, <v1, v2, .., ci, .., vm> -> 1085 // insertelement V, C[ci], ci-n 1086 if (LHSVWidth == Shuffle->getType()->getNumElements()) { 1087 Value *Op = nullptr; 1088 Constant *Value = nullptr; 1089 unsigned Idx = -1u; 1090 1091 // Find constant vector with the single element in shuffle (LHS or RHS). 1092 if (LHSIdx < LHSVWidth && RHSUniform) { 1093 if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(0))) { 1094 Op = Shuffle->getOperand(1); 1095 Value = CV->getOperand(LHSValIdx); 1096 Idx = LHSIdx; 1097 } 1098 } 1099 if (RHSIdx < LHSVWidth && LHSUniform) { 1100 if (auto *CV = dyn_cast<ConstantVector>(Shuffle->getOperand(1))) { 1101 Op = Shuffle->getOperand(0); 1102 Value = CV->getOperand(RHSValIdx); 1103 Idx = RHSIdx; 1104 } 1105 } 1106 // Found constant vector with single element - convert to insertelement. 1107 if (Op && Value) { 1108 Instruction *New = InsertElementInst::Create( 1109 Op, Value, ConstantInt::get(Type::getInt32Ty(I->getContext()), Idx), 1110 Shuffle->getName()); 1111 InsertNewInstWith(New, *Shuffle); 1112 return New; 1113 } 1114 } 1115 if (NewUndefElts) { 1116 // Add additional discovered undefs. 1117 SmallVector<Constant*, 16> Elts; 1118 for (unsigned i = 0; i < VWidth; ++i) { 1119 if (UndefElts[i]) 1120 Elts.push_back(UndefValue::get(Type::getInt32Ty(I->getContext()))); 1121 else 1122 Elts.push_back(ConstantInt::get(Type::getInt32Ty(I->getContext()), 1123 Shuffle->getMaskValue(i))); 1124 } 1125 I->setOperand(2, ConstantVector::get(Elts)); 1126 MadeChange = true; 1127 } 1128 break; 1129 } 1130 case Instruction::Select: { 1131 APInt LeftDemanded(DemandedElts), RightDemanded(DemandedElts); 1132 if (ConstantVector* CV = dyn_cast<ConstantVector>(I->getOperand(0))) { 1133 for (unsigned i = 0; i < VWidth; i++) { 1134 Constant *CElt = CV->getAggregateElement(i); 1135 // Method isNullValue always returns false when called on a 1136 // ConstantExpr. If CElt is a ConstantExpr then skip it in order to 1137 // to avoid propagating incorrect information. 1138 if (isa<ConstantExpr>(CElt)) 1139 continue; 1140 if (CElt->isNullValue()) 1141 LeftDemanded.clearBit(i); 1142 else 1143 RightDemanded.clearBit(i); 1144 } 1145 } 1146 1147 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), LeftDemanded, UndefElts, 1148 Depth + 1); 1149 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; } 1150 1151 TmpV = SimplifyDemandedVectorElts(I->getOperand(2), RightDemanded, 1152 UndefElts2, Depth + 1); 1153 if (TmpV) { I->setOperand(2, TmpV); MadeChange = true; } 1154 1155 // Output elements are undefined if both are undefined. 1156 UndefElts &= UndefElts2; 1157 break; 1158 } 1159 case Instruction::BitCast: { 1160 // Vector->vector casts only. 1161 VectorType *VTy = dyn_cast<VectorType>(I->getOperand(0)->getType()); 1162 if (!VTy) break; 1163 unsigned InVWidth = VTy->getNumElements(); 1164 APInt InputDemandedElts(InVWidth, 0); 1165 UndefElts2 = APInt(InVWidth, 0); 1166 unsigned Ratio; 1167 1168 if (VWidth == InVWidth) { 1169 // If we are converting from <4 x i32> -> <4 x f32>, we demand the same 1170 // elements as are demanded of us. 1171 Ratio = 1; 1172 InputDemandedElts = DemandedElts; 1173 } else if ((VWidth % InVWidth) == 0) { 1174 // If the number of elements in the output is a multiple of the number of 1175 // elements in the input then an input element is live if any of the 1176 // corresponding output elements are live. 1177 Ratio = VWidth / InVWidth; 1178 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) 1179 if (DemandedElts[OutIdx]) 1180 InputDemandedElts.setBit(OutIdx / Ratio); 1181 } else if ((InVWidth % VWidth) == 0) { 1182 // If the number of elements in the input is a multiple of the number of 1183 // elements in the output then an input element is live if the 1184 // corresponding output element is live. 1185 Ratio = InVWidth / VWidth; 1186 for (unsigned InIdx = 0; InIdx != InVWidth; ++InIdx) 1187 if (DemandedElts[InIdx / Ratio]) 1188 InputDemandedElts.setBit(InIdx); 1189 } else { 1190 // Unsupported so far. 1191 break; 1192 } 1193 1194 // div/rem demand all inputs, because they don't want divide by zero. 1195 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), InputDemandedElts, 1196 UndefElts2, Depth + 1); 1197 if (TmpV) { 1198 I->setOperand(0, TmpV); 1199 MadeChange = true; 1200 } 1201 1202 if (VWidth == InVWidth) { 1203 UndefElts = UndefElts2; 1204 } else if ((VWidth % InVWidth) == 0) { 1205 // If the number of elements in the output is a multiple of the number of 1206 // elements in the input then an output element is undef if the 1207 // corresponding input element is undef. 1208 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) 1209 if (UndefElts2[OutIdx / Ratio]) 1210 UndefElts.setBit(OutIdx); 1211 } else if ((InVWidth % VWidth) == 0) { 1212 // If the number of elements in the input is a multiple of the number of 1213 // elements in the output then an output element is undef if all of the 1214 // corresponding input elements are undef. 1215 for (unsigned OutIdx = 0; OutIdx != VWidth; ++OutIdx) { 1216 APInt SubUndef = UndefElts2.lshr(OutIdx * Ratio).zextOrTrunc(Ratio); 1217 if (SubUndef.countPopulation() == Ratio) 1218 UndefElts.setBit(OutIdx); 1219 } 1220 } else { 1221 llvm_unreachable("Unimp"); 1222 } 1223 break; 1224 } 1225 case Instruction::And: 1226 case Instruction::Or: 1227 case Instruction::Xor: 1228 case Instruction::Add: 1229 case Instruction::Sub: 1230 case Instruction::Mul: 1231 // div/rem demand all inputs, because they don't want divide by zero. 1232 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, UndefElts, 1233 Depth + 1); 1234 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1235 TmpV = SimplifyDemandedVectorElts(I->getOperand(1), DemandedElts, 1236 UndefElts2, Depth + 1); 1237 if (TmpV) { I->setOperand(1, TmpV); MadeChange = true; } 1238 1239 // Output elements are undefined if both are undefined. Consider things 1240 // like undef&0. The result is known zero, not undef. 1241 UndefElts &= UndefElts2; 1242 break; 1243 case Instruction::FPTrunc: 1244 case Instruction::FPExt: 1245 TmpV = SimplifyDemandedVectorElts(I->getOperand(0), DemandedElts, UndefElts, 1246 Depth + 1); 1247 if (TmpV) { I->setOperand(0, TmpV); MadeChange = true; } 1248 break; 1249 1250 case Instruction::Call: { 1251 IntrinsicInst *II = dyn_cast<IntrinsicInst>(I); 1252 if (!II) break; 1253 switch (II->getIntrinsicID()) { 1254 default: break; 1255 1256 case Intrinsic::x86_xop_vfrcz_ss: 1257 case Intrinsic::x86_xop_vfrcz_sd: 1258 // The instructions for these intrinsics are speced to zero upper bits not 1259 // pass them through like other scalar intrinsics. So we shouldn't just 1260 // use Arg0 if DemandedElts[0] is clear like we do for other intrinsics. 1261 // Instead we should return a zero vector. 1262 if (!DemandedElts[0]) { 1263 Worklist.Add(II); 1264 return ConstantAggregateZero::get(II->getType()); 1265 } 1266 1267 // Only the lower element is used. 1268 DemandedElts = 1; 1269 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1270 UndefElts, Depth + 1); 1271 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1272 1273 // Only the lower element is undefined. The high elements are zero. 1274 UndefElts = UndefElts[0]; 1275 break; 1276 1277 // Unary scalar-as-vector operations that work column-wise. 1278 case Intrinsic::x86_sse_rcp_ss: 1279 case Intrinsic::x86_sse_rsqrt_ss: 1280 case Intrinsic::x86_sse_sqrt_ss: 1281 case Intrinsic::x86_sse2_sqrt_sd: 1282 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1283 UndefElts, Depth + 1); 1284 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1285 1286 // If lowest element of a scalar op isn't used then use Arg0. 1287 if (!DemandedElts[0]) { 1288 Worklist.Add(II); 1289 return II->getArgOperand(0); 1290 } 1291 // TODO: If only low elt lower SQRT to FSQRT (with rounding/exceptions 1292 // checks). 1293 break; 1294 1295 // Binary scalar-as-vector operations that work column-wise. The high 1296 // elements come from operand 0. The low element is a function of both 1297 // operands. 1298 case Intrinsic::x86_sse_min_ss: 1299 case Intrinsic::x86_sse_max_ss: 1300 case Intrinsic::x86_sse_cmp_ss: 1301 case Intrinsic::x86_sse2_min_sd: 1302 case Intrinsic::x86_sse2_max_sd: 1303 case Intrinsic::x86_sse2_cmp_sd: { 1304 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1305 UndefElts, Depth + 1); 1306 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1307 1308 // If lowest element of a scalar op isn't used then use Arg0. 1309 if (!DemandedElts[0]) { 1310 Worklist.Add(II); 1311 return II->getArgOperand(0); 1312 } 1313 1314 // Only lower element is used for operand 1. 1315 DemandedElts = 1; 1316 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts, 1317 UndefElts2, Depth + 1); 1318 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1319 1320 // Lower element is undefined if both lower elements are undefined. 1321 // Consider things like undef&0. The result is known zero, not undef. 1322 if (!UndefElts2[0]) 1323 UndefElts.clearBit(0); 1324 1325 break; 1326 } 1327 1328 // Binary scalar-as-vector operations that work column-wise. The high 1329 // elements come from operand 0 and the low element comes from operand 1. 1330 case Intrinsic::x86_sse41_round_ss: 1331 case Intrinsic::x86_sse41_round_sd: { 1332 // Don't use the low element of operand 0. 1333 APInt DemandedElts2 = DemandedElts; 1334 DemandedElts2.clearBit(0); 1335 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts2, 1336 UndefElts, Depth + 1); 1337 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1338 1339 // If lowest element of a scalar op isn't used then use Arg0. 1340 if (!DemandedElts[0]) { 1341 Worklist.Add(II); 1342 return II->getArgOperand(0); 1343 } 1344 1345 // Only lower element is used for operand 1. 1346 DemandedElts = 1; 1347 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts, 1348 UndefElts2, Depth + 1); 1349 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1350 1351 // Take the high undef elements from operand 0 and take the lower element 1352 // from operand 1. 1353 UndefElts.clearBit(0); 1354 UndefElts |= UndefElts2[0]; 1355 break; 1356 } 1357 1358 // Three input scalar-as-vector operations that work column-wise. The high 1359 // elements come from operand 0 and the low element is a function of all 1360 // three inputs. 1361 case Intrinsic::x86_avx512_mask_add_ss_round: 1362 case Intrinsic::x86_avx512_mask_div_ss_round: 1363 case Intrinsic::x86_avx512_mask_mul_ss_round: 1364 case Intrinsic::x86_avx512_mask_sub_ss_round: 1365 case Intrinsic::x86_avx512_mask_max_ss_round: 1366 case Intrinsic::x86_avx512_mask_min_ss_round: 1367 case Intrinsic::x86_avx512_mask_add_sd_round: 1368 case Intrinsic::x86_avx512_mask_div_sd_round: 1369 case Intrinsic::x86_avx512_mask_mul_sd_round: 1370 case Intrinsic::x86_avx512_mask_sub_sd_round: 1371 case Intrinsic::x86_avx512_mask_max_sd_round: 1372 case Intrinsic::x86_avx512_mask_min_sd_round: 1373 case Intrinsic::x86_fma_vfmadd_ss: 1374 case Intrinsic::x86_fma_vfmsub_ss: 1375 case Intrinsic::x86_fma_vfnmadd_ss: 1376 case Intrinsic::x86_fma_vfnmsub_ss: 1377 case Intrinsic::x86_fma_vfmadd_sd: 1378 case Intrinsic::x86_fma_vfmsub_sd: 1379 case Intrinsic::x86_fma_vfnmadd_sd: 1380 case Intrinsic::x86_fma_vfnmsub_sd: 1381 case Intrinsic::x86_avx512_mask_vfmadd_ss: 1382 case Intrinsic::x86_avx512_mask_vfmadd_sd: 1383 case Intrinsic::x86_avx512_maskz_vfmadd_ss: 1384 case Intrinsic::x86_avx512_maskz_vfmadd_sd: 1385 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1386 UndefElts, Depth + 1); 1387 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1388 1389 // If lowest element of a scalar op isn't used then use Arg0. 1390 if (!DemandedElts[0]) { 1391 Worklist.Add(II); 1392 return II->getArgOperand(0); 1393 } 1394 1395 // Only lower element is used for operand 1 and 2. 1396 DemandedElts = 1; 1397 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts, 1398 UndefElts2, Depth + 1); 1399 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1400 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(2), DemandedElts, 1401 UndefElts3, Depth + 1); 1402 if (TmpV) { II->setArgOperand(2, TmpV); MadeChange = true; } 1403 1404 // Lower element is undefined if all three lower elements are undefined. 1405 // Consider things like undef&0. The result is known zero, not undef. 1406 if (!UndefElts2[0] || !UndefElts3[0]) 1407 UndefElts.clearBit(0); 1408 1409 break; 1410 1411 case Intrinsic::x86_avx512_mask3_vfmadd_ss: 1412 case Intrinsic::x86_avx512_mask3_vfmadd_sd: 1413 case Intrinsic::x86_avx512_mask3_vfmsub_ss: 1414 case Intrinsic::x86_avx512_mask3_vfmsub_sd: 1415 case Intrinsic::x86_avx512_mask3_vfnmsub_ss: 1416 case Intrinsic::x86_avx512_mask3_vfnmsub_sd: 1417 // These intrinsics get the passthru bits from operand 2. 1418 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(2), DemandedElts, 1419 UndefElts, Depth + 1); 1420 if (TmpV) { II->setArgOperand(2, TmpV); MadeChange = true; } 1421 1422 // If lowest element of a scalar op isn't used then use Arg2. 1423 if (!DemandedElts[0]) { 1424 Worklist.Add(II); 1425 return II->getArgOperand(2); 1426 } 1427 1428 // Only lower element is used for operand 0 and 1. 1429 DemandedElts = 1; 1430 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0), DemandedElts, 1431 UndefElts2, Depth + 1); 1432 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1433 TmpV = SimplifyDemandedVectorElts(II->getArgOperand(1), DemandedElts, 1434 UndefElts3, Depth + 1); 1435 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1436 1437 // Lower element is undefined if all three lower elements are undefined. 1438 // Consider things like undef&0. The result is known zero, not undef. 1439 if (!UndefElts2[0] || !UndefElts3[0]) 1440 UndefElts.clearBit(0); 1441 1442 break; 1443 1444 case Intrinsic::x86_sse2_pmulu_dq: 1445 case Intrinsic::x86_sse41_pmuldq: 1446 case Intrinsic::x86_avx2_pmul_dq: 1447 case Intrinsic::x86_avx2_pmulu_dq: 1448 case Intrinsic::x86_avx512_pmul_dq_512: 1449 case Intrinsic::x86_avx512_pmulu_dq_512: { 1450 Value *Op0 = II->getArgOperand(0); 1451 Value *Op1 = II->getArgOperand(1); 1452 unsigned InnerVWidth = Op0->getType()->getVectorNumElements(); 1453 assert((VWidth * 2) == InnerVWidth && "Unexpected input size"); 1454 1455 APInt InnerDemandedElts(InnerVWidth, 0); 1456 for (unsigned i = 0; i != VWidth; ++i) 1457 if (DemandedElts[i]) 1458 InnerDemandedElts.setBit(i * 2); 1459 1460 UndefElts2 = APInt(InnerVWidth, 0); 1461 TmpV = SimplifyDemandedVectorElts(Op0, InnerDemandedElts, UndefElts2, 1462 Depth + 1); 1463 if (TmpV) { II->setArgOperand(0, TmpV); MadeChange = true; } 1464 1465 UndefElts3 = APInt(InnerVWidth, 0); 1466 TmpV = SimplifyDemandedVectorElts(Op1, InnerDemandedElts, UndefElts3, 1467 Depth + 1); 1468 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1469 1470 break; 1471 } 1472 1473 case Intrinsic::x86_sse2_packssdw_128: 1474 case Intrinsic::x86_sse2_packsswb_128: 1475 case Intrinsic::x86_sse2_packuswb_128: 1476 case Intrinsic::x86_sse41_packusdw: 1477 case Intrinsic::x86_avx2_packssdw: 1478 case Intrinsic::x86_avx2_packsswb: 1479 case Intrinsic::x86_avx2_packusdw: 1480 case Intrinsic::x86_avx2_packuswb: 1481 case Intrinsic::x86_avx512_packssdw_512: 1482 case Intrinsic::x86_avx512_packsswb_512: 1483 case Intrinsic::x86_avx512_packusdw_512: 1484 case Intrinsic::x86_avx512_packuswb_512: { 1485 auto *Ty0 = II->getArgOperand(0)->getType(); 1486 unsigned InnerVWidth = Ty0->getVectorNumElements(); 1487 assert(VWidth == (InnerVWidth * 2) && "Unexpected input size"); 1488 1489 unsigned NumLanes = Ty0->getPrimitiveSizeInBits() / 128; 1490 unsigned VWidthPerLane = VWidth / NumLanes; 1491 unsigned InnerVWidthPerLane = InnerVWidth / NumLanes; 1492 1493 // Per lane, pack the elements of the first input and then the second. 1494 // e.g. 1495 // v8i16 PACK(v4i32 X, v4i32 Y) - (X[0..3],Y[0..3]) 1496 // v32i8 PACK(v16i16 X, v16i16 Y) - (X[0..7],Y[0..7]),(X[8..15],Y[8..15]) 1497 for (int OpNum = 0; OpNum != 2; ++OpNum) { 1498 APInt OpDemandedElts(InnerVWidth, 0); 1499 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1500 unsigned LaneIdx = Lane * VWidthPerLane; 1501 for (unsigned Elt = 0; Elt != InnerVWidthPerLane; ++Elt) { 1502 unsigned Idx = LaneIdx + Elt + InnerVWidthPerLane * OpNum; 1503 if (DemandedElts[Idx]) 1504 OpDemandedElts.setBit((Lane * InnerVWidthPerLane) + Elt); 1505 } 1506 } 1507 1508 // Demand elements from the operand. 1509 auto *Op = II->getArgOperand(OpNum); 1510 APInt OpUndefElts(InnerVWidth, 0); 1511 TmpV = SimplifyDemandedVectorElts(Op, OpDemandedElts, OpUndefElts, 1512 Depth + 1); 1513 if (TmpV) { 1514 II->setArgOperand(OpNum, TmpV); 1515 MadeChange = true; 1516 } 1517 1518 // Pack the operand's UNDEF elements, one lane at a time. 1519 OpUndefElts = OpUndefElts.zext(VWidth); 1520 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { 1521 APInt LaneElts = OpUndefElts.lshr(InnerVWidthPerLane * Lane); 1522 LaneElts = LaneElts.getLoBits(InnerVWidthPerLane); 1523 LaneElts = LaneElts.shl(InnerVWidthPerLane * (2 * Lane + OpNum)); 1524 UndefElts |= LaneElts; 1525 } 1526 } 1527 break; 1528 } 1529 1530 // PSHUFB 1531 case Intrinsic::x86_ssse3_pshuf_b_128: 1532 case Intrinsic::x86_avx2_pshuf_b: 1533 case Intrinsic::x86_avx512_pshuf_b_512: 1534 // PERMILVAR 1535 case Intrinsic::x86_avx_vpermilvar_ps: 1536 case Intrinsic::x86_avx_vpermilvar_ps_256: 1537 case Intrinsic::x86_avx512_vpermilvar_ps_512: 1538 case Intrinsic::x86_avx_vpermilvar_pd: 1539 case Intrinsic::x86_avx_vpermilvar_pd_256: 1540 case Intrinsic::x86_avx512_vpermilvar_pd_512: 1541 // PERMV 1542 case Intrinsic::x86_avx2_permd: 1543 case Intrinsic::x86_avx2_permps: { 1544 Value *Op1 = II->getArgOperand(1); 1545 TmpV = SimplifyDemandedVectorElts(Op1, DemandedElts, UndefElts, 1546 Depth + 1); 1547 if (TmpV) { II->setArgOperand(1, TmpV); MadeChange = true; } 1548 break; 1549 } 1550 1551 // SSE4A instructions leave the upper 64-bits of the 128-bit result 1552 // in an undefined state. 1553 case Intrinsic::x86_sse4a_extrq: 1554 case Intrinsic::x86_sse4a_extrqi: 1555 case Intrinsic::x86_sse4a_insertq: 1556 case Intrinsic::x86_sse4a_insertqi: 1557 UndefElts.setHighBits(VWidth / 2); 1558 break; 1559 case Intrinsic::amdgcn_buffer_load: 1560 case Intrinsic::amdgcn_buffer_load_format: { 1561 if (VWidth == 1 || !DemandedElts.isMask()) 1562 return nullptr; 1563 1564 // TODO: Handle 3 vectors when supported in code gen. 1565 unsigned NewNumElts = PowerOf2Ceil(DemandedElts.countTrailingOnes()); 1566 if (NewNumElts == VWidth) 1567 return nullptr; 1568 1569 Module *M = II->getParent()->getParent()->getParent(); 1570 Type *EltTy = V->getType()->getVectorElementType(); 1571 1572 Type *NewTy = (NewNumElts == 1) ? EltTy : 1573 VectorType::get(EltTy, NewNumElts); 1574 1575 Function *NewIntrin = Intrinsic::getDeclaration(M, II->getIntrinsicID(), 1576 NewTy); 1577 1578 SmallVector<Value *, 5> Args; 1579 for (unsigned I = 0, E = II->getNumArgOperands(); I != E; ++I) 1580 Args.push_back(II->getArgOperand(I)); 1581 1582 IRBuilderBase::InsertPointGuard Guard(*Builder); 1583 Builder->SetInsertPoint(II); 1584 1585 CallInst *NewCall = Builder->CreateCall(NewIntrin, Args); 1586 NewCall->takeName(II); 1587 NewCall->copyMetadata(*II); 1588 if (NewNumElts == 1) { 1589 return Builder->CreateInsertElement(UndefValue::get(V->getType()), 1590 NewCall, static_cast<uint64_t>(0)); 1591 } 1592 1593 SmallVector<uint32_t, 8> EltMask; 1594 for (unsigned I = 0; I < VWidth; ++I) 1595 EltMask.push_back(I); 1596 1597 Value *Shuffle = Builder->CreateShuffleVector( 1598 NewCall, UndefValue::get(NewTy), EltMask); 1599 1600 MadeChange = true; 1601 return Shuffle; 1602 } 1603 } 1604 break; 1605 } 1606 } 1607 return MadeChange ? I : nullptr; 1608 } 1609