1 //===- InstCombineCasts.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visit functions for cast operations. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/SetVector.h" 15 #include "llvm/Analysis/ConstantFolding.h" 16 #include "llvm/Analysis/TargetLibraryInfo.h" 17 #include "llvm/IR/DataLayout.h" 18 #include "llvm/IR/DIBuilder.h" 19 #include "llvm/IR/PatternMatch.h" 20 #include "llvm/Support/KnownBits.h" 21 #include <numeric> 22 using namespace llvm; 23 using namespace PatternMatch; 24 25 #define DEBUG_TYPE "instcombine" 26 27 /// Analyze 'Val', seeing if it is a simple linear expression. 28 /// If so, decompose it, returning some value X, such that Val is 29 /// X*Scale+Offset. 30 /// 31 static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale, 32 uint64_t &Offset) { 33 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { 34 Offset = CI->getZExtValue(); 35 Scale = 0; 36 return ConstantInt::get(Val->getType(), 0); 37 } 38 39 if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) { 40 // Cannot look past anything that might overflow. 41 OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val); 42 if (OBI && !OBI->hasNoUnsignedWrap() && !OBI->hasNoSignedWrap()) { 43 Scale = 1; 44 Offset = 0; 45 return Val; 46 } 47 48 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) { 49 if (I->getOpcode() == Instruction::Shl) { 50 // This is a value scaled by '1 << the shift amt'. 51 Scale = UINT64_C(1) << RHS->getZExtValue(); 52 Offset = 0; 53 return I->getOperand(0); 54 } 55 56 if (I->getOpcode() == Instruction::Mul) { 57 // This value is scaled by 'RHS'. 58 Scale = RHS->getZExtValue(); 59 Offset = 0; 60 return I->getOperand(0); 61 } 62 63 if (I->getOpcode() == Instruction::Add) { 64 // We have X+C. Check to see if we really have (X*C2)+C1, 65 // where C1 is divisible by C2. 66 unsigned SubScale; 67 Value *SubVal = 68 decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset); 69 Offset += RHS->getZExtValue(); 70 Scale = SubScale; 71 return SubVal; 72 } 73 } 74 } 75 76 // Otherwise, we can't look past this. 77 Scale = 1; 78 Offset = 0; 79 return Val; 80 } 81 82 /// If we find a cast of an allocation instruction, try to eliminate the cast by 83 /// moving the type information into the alloc. 84 Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, 85 AllocaInst &AI) { 86 PointerType *PTy = cast<PointerType>(CI.getType()); 87 88 BuilderTy AllocaBuilder(Builder); 89 AllocaBuilder.SetInsertPoint(&AI); 90 91 // Get the type really allocated and the type casted to. 92 Type *AllocElTy = AI.getAllocatedType(); 93 Type *CastElTy = PTy->getElementType(); 94 if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr; 95 96 unsigned AllocElTyAlign = DL.getABITypeAlignment(AllocElTy); 97 unsigned CastElTyAlign = DL.getABITypeAlignment(CastElTy); 98 if (CastElTyAlign < AllocElTyAlign) return nullptr; 99 100 // If the allocation has multiple uses, only promote it if we are strictly 101 // increasing the alignment of the resultant allocation. If we keep it the 102 // same, we open the door to infinite loops of various kinds. 103 if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr; 104 105 uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy); 106 uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy); 107 if (CastElTySize == 0 || AllocElTySize == 0) return nullptr; 108 109 // If the allocation has multiple uses, only promote it if we're not 110 // shrinking the amount of memory being allocated. 111 uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy); 112 uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy); 113 if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr; 114 115 // See if we can satisfy the modulus by pulling a scale out of the array 116 // size argument. 117 unsigned ArraySizeScale; 118 uint64_t ArrayOffset; 119 Value *NumElements = // See if the array size is a decomposable linear expr. 120 decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset); 121 122 // If we can now satisfy the modulus, by using a non-1 scale, we really can 123 // do the xform. 124 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 || 125 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return nullptr; 126 127 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize; 128 Value *Amt = nullptr; 129 if (Scale == 1) { 130 Amt = NumElements; 131 } else { 132 Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale); 133 // Insert before the alloca, not before the cast. 134 Amt = AllocaBuilder.CreateMul(Amt, NumElements); 135 } 136 137 if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) { 138 Value *Off = ConstantInt::get(AI.getArraySize()->getType(), 139 Offset, true); 140 Amt = AllocaBuilder.CreateAdd(Amt, Off); 141 } 142 143 AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt); 144 New->setAlignment(MaybeAlign(AI.getAlignment())); 145 New->takeName(&AI); 146 New->setUsedWithInAlloca(AI.isUsedWithInAlloca()); 147 148 // If the allocation has multiple real uses, insert a cast and change all 149 // things that used it to use the new cast. This will also hack on CI, but it 150 // will die soon. 151 if (!AI.hasOneUse()) { 152 // New is the allocation instruction, pointer typed. AI is the original 153 // allocation instruction, also pointer typed. Thus, cast to use is BitCast. 154 Value *NewCast = AllocaBuilder.CreateBitCast(New, AI.getType(), "tmpcast"); 155 replaceInstUsesWith(AI, NewCast); 156 } 157 return replaceInstUsesWith(CI, New); 158 } 159 160 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns 161 /// true for, actually insert the code to evaluate the expression. 162 Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty, 163 bool isSigned) { 164 if (Constant *C = dyn_cast<Constant>(V)) { 165 C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/); 166 // If we got a constantexpr back, try to simplify it with DL info. 167 if (Constant *FoldedC = ConstantFoldConstant(C, DL, &TLI)) 168 C = FoldedC; 169 return C; 170 } 171 172 // Otherwise, it must be an instruction. 173 Instruction *I = cast<Instruction>(V); 174 Instruction *Res = nullptr; 175 unsigned Opc = I->getOpcode(); 176 switch (Opc) { 177 case Instruction::Add: 178 case Instruction::Sub: 179 case Instruction::Mul: 180 case Instruction::And: 181 case Instruction::Or: 182 case Instruction::Xor: 183 case Instruction::AShr: 184 case Instruction::LShr: 185 case Instruction::Shl: 186 case Instruction::UDiv: 187 case Instruction::URem: { 188 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned); 189 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 190 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS); 191 break; 192 } 193 case Instruction::Trunc: 194 case Instruction::ZExt: 195 case Instruction::SExt: 196 // If the source type of the cast is the type we're trying for then we can 197 // just return the source. There's no need to insert it because it is not 198 // new. 199 if (I->getOperand(0)->getType() == Ty) 200 return I->getOperand(0); 201 202 // Otherwise, must be the same type of cast, so just reinsert a new one. 203 // This also handles the case of zext(trunc(x)) -> zext(x). 204 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty, 205 Opc == Instruction::SExt); 206 break; 207 case Instruction::Select: { 208 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 209 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned); 210 Res = SelectInst::Create(I->getOperand(0), True, False); 211 break; 212 } 213 case Instruction::PHI: { 214 PHINode *OPN = cast<PHINode>(I); 215 PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues()); 216 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) { 217 Value *V = 218 EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned); 219 NPN->addIncoming(V, OPN->getIncomingBlock(i)); 220 } 221 Res = NPN; 222 break; 223 } 224 default: 225 // TODO: Can handle more cases here. 226 llvm_unreachable("Unreachable!"); 227 } 228 229 Res->takeName(I); 230 return InsertNewInstWith(Res, *I); 231 } 232 233 Instruction::CastOps InstCombiner::isEliminableCastPair(const CastInst *CI1, 234 const CastInst *CI2) { 235 Type *SrcTy = CI1->getSrcTy(); 236 Type *MidTy = CI1->getDestTy(); 237 Type *DstTy = CI2->getDestTy(); 238 239 Instruction::CastOps firstOp = CI1->getOpcode(); 240 Instruction::CastOps secondOp = CI2->getOpcode(); 241 Type *SrcIntPtrTy = 242 SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr; 243 Type *MidIntPtrTy = 244 MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr; 245 Type *DstIntPtrTy = 246 DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr; 247 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, 248 DstTy, SrcIntPtrTy, MidIntPtrTy, 249 DstIntPtrTy); 250 251 // We don't want to form an inttoptr or ptrtoint that converts to an integer 252 // type that differs from the pointer size. 253 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) || 254 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy)) 255 Res = 0; 256 257 return Instruction::CastOps(Res); 258 } 259 260 /// Implement the transforms common to all CastInst visitors. 261 Instruction *InstCombiner::commonCastTransforms(CastInst &CI) { 262 Value *Src = CI.getOperand(0); 263 264 // Try to eliminate a cast of a cast. 265 if (auto *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast 266 if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) { 267 // The first cast (CSrc) is eliminable so we need to fix up or replace 268 // the second cast (CI). CSrc will then have a good chance of being dead. 269 auto *Ty = CI.getType(); 270 auto *Res = CastInst::Create(NewOpc, CSrc->getOperand(0), Ty); 271 // Point debug users of the dying cast to the new one. 272 if (CSrc->hasOneUse()) 273 replaceAllDbgUsesWith(*CSrc, *Res, CI, DT); 274 return Res; 275 } 276 } 277 278 if (auto *Sel = dyn_cast<SelectInst>(Src)) { 279 // We are casting a select. Try to fold the cast into the select if the 280 // select does not have a compare instruction with matching operand types 281 // or the select is likely better done in a narrow type. 282 // Creating a select with operands that are different sizes than its 283 // condition may inhibit other folds and lead to worse codegen. 284 auto *Cmp = dyn_cast<CmpInst>(Sel->getCondition()); 285 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType() || 286 (CI.getOpcode() == Instruction::Trunc && 287 shouldChangeType(CI.getSrcTy(), CI.getType()))) { 288 if (Instruction *NV = FoldOpIntoSelect(CI, Sel)) { 289 replaceAllDbgUsesWith(*Sel, *NV, CI, DT); 290 return NV; 291 } 292 } 293 } 294 295 // If we are casting a PHI, then fold the cast into the PHI. 296 if (auto *PN = dyn_cast<PHINode>(Src)) { 297 // Don't do this if it would create a PHI node with an illegal type from a 298 // legal type. 299 if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() || 300 shouldChangeType(CI.getSrcTy(), CI.getType())) 301 if (Instruction *NV = foldOpIntoPhi(CI, PN)) 302 return NV; 303 } 304 305 return nullptr; 306 } 307 308 /// Constants and extensions/truncates from the destination type are always 309 /// free to be evaluated in that type. This is a helper for canEvaluate*. 310 static bool canAlwaysEvaluateInType(Value *V, Type *Ty) { 311 if (isa<Constant>(V)) 312 return true; 313 Value *X; 314 if ((match(V, m_ZExtOrSExt(m_Value(X))) || match(V, m_Trunc(m_Value(X)))) && 315 X->getType() == Ty) 316 return true; 317 318 return false; 319 } 320 321 /// Filter out values that we can not evaluate in the destination type for free. 322 /// This is a helper for canEvaluate*. 323 static bool canNotEvaluateInType(Value *V, Type *Ty) { 324 assert(!isa<Constant>(V) && "Constant should already be handled."); 325 if (!isa<Instruction>(V)) 326 return true; 327 // We don't extend or shrink something that has multiple uses -- doing so 328 // would require duplicating the instruction which isn't profitable. 329 if (!V->hasOneUse()) 330 return true; 331 332 return false; 333 } 334 335 /// Return true if we can evaluate the specified expression tree as type Ty 336 /// instead of its larger type, and arrive with the same value. 337 /// This is used by code that tries to eliminate truncates. 338 /// 339 /// Ty will always be a type smaller than V. We should return true if trunc(V) 340 /// can be computed by computing V in the smaller type. If V is an instruction, 341 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only 342 /// makes sense if x and y can be efficiently truncated. 343 /// 344 /// This function works on both vectors and scalars. 345 /// 346 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombiner &IC, 347 Instruction *CxtI) { 348 if (canAlwaysEvaluateInType(V, Ty)) 349 return true; 350 if (canNotEvaluateInType(V, Ty)) 351 return false; 352 353 auto *I = cast<Instruction>(V); 354 Type *OrigTy = V->getType(); 355 switch (I->getOpcode()) { 356 case Instruction::Add: 357 case Instruction::Sub: 358 case Instruction::Mul: 359 case Instruction::And: 360 case Instruction::Or: 361 case Instruction::Xor: 362 // These operators can all arbitrarily be extended or truncated. 363 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 364 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 365 366 case Instruction::UDiv: 367 case Instruction::URem: { 368 // UDiv and URem can be truncated if all the truncated bits are zero. 369 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 370 uint32_t BitWidth = Ty->getScalarSizeInBits(); 371 assert(BitWidth < OrigBitWidth && "Unexpected bitwidths!"); 372 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth); 373 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) && 374 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) { 375 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 376 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 377 } 378 break; 379 } 380 case Instruction::Shl: { 381 // If we are truncating the result of this SHL, and if it's a shift of a 382 // constant amount, we can always perform a SHL in a smaller type. 383 const APInt *Amt; 384 if (match(I->getOperand(1), m_APInt(Amt))) { 385 uint32_t BitWidth = Ty->getScalarSizeInBits(); 386 if (Amt->getLimitedValue(BitWidth) < BitWidth) 387 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI); 388 } 389 break; 390 } 391 case Instruction::LShr: { 392 // If this is a truncate of a logical shr, we can truncate it to a smaller 393 // lshr iff we know that the bits we would otherwise be shifting in are 394 // already zeros. 395 const APInt *Amt; 396 if (match(I->getOperand(1), m_APInt(Amt))) { 397 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 398 uint32_t BitWidth = Ty->getScalarSizeInBits(); 399 if (Amt->getLimitedValue(BitWidth) < BitWidth && 400 IC.MaskedValueIsZero(I->getOperand(0), 401 APInt::getBitsSetFrom(OrigBitWidth, BitWidth), 0, CxtI)) { 402 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI); 403 } 404 } 405 break; 406 } 407 case Instruction::AShr: { 408 // If this is a truncate of an arithmetic shr, we can truncate it to a 409 // smaller ashr iff we know that all the bits from the sign bit of the 410 // original type and the sign bit of the truncate type are similar. 411 // TODO: It is enough to check that the bits we would be shifting in are 412 // similar to sign bit of the truncate type. 413 const APInt *Amt; 414 if (match(I->getOperand(1), m_APInt(Amt))) { 415 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 416 uint32_t BitWidth = Ty->getScalarSizeInBits(); 417 if (Amt->getLimitedValue(BitWidth) < BitWidth && 418 OrigBitWidth - BitWidth < 419 IC.ComputeNumSignBits(I->getOperand(0), 0, CxtI)) 420 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI); 421 } 422 break; 423 } 424 case Instruction::Trunc: 425 // trunc(trunc(x)) -> trunc(x) 426 return true; 427 case Instruction::ZExt: 428 case Instruction::SExt: 429 // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest 430 // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest 431 return true; 432 case Instruction::Select: { 433 SelectInst *SI = cast<SelectInst>(I); 434 return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) && 435 canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI); 436 } 437 case Instruction::PHI: { 438 // We can change a phi if we can change all operands. Note that we never 439 // get into trouble with cyclic PHIs here because we only consider 440 // instructions with a single use. 441 PHINode *PN = cast<PHINode>(I); 442 for (Value *IncValue : PN->incoming_values()) 443 if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI)) 444 return false; 445 return true; 446 } 447 default: 448 // TODO: Can handle more cases here. 449 break; 450 } 451 452 return false; 453 } 454 455 /// Given a vector that is bitcast to an integer, optionally logically 456 /// right-shifted, and truncated, convert it to an extractelement. 457 /// Example (big endian): 458 /// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32 459 /// ---> 460 /// extractelement <4 x i32> %X, 1 461 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, InstCombiner &IC) { 462 Value *TruncOp = Trunc.getOperand(0); 463 Type *DestType = Trunc.getType(); 464 if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType)) 465 return nullptr; 466 467 Value *VecInput = nullptr; 468 ConstantInt *ShiftVal = nullptr; 469 if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)), 470 m_LShr(m_BitCast(m_Value(VecInput)), 471 m_ConstantInt(ShiftVal)))) || 472 !isa<VectorType>(VecInput->getType())) 473 return nullptr; 474 475 VectorType *VecType = cast<VectorType>(VecInput->getType()); 476 unsigned VecWidth = VecType->getPrimitiveSizeInBits(); 477 unsigned DestWidth = DestType->getPrimitiveSizeInBits(); 478 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0; 479 480 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0)) 481 return nullptr; 482 483 // If the element type of the vector doesn't match the result type, 484 // bitcast it to a vector type that we can extract from. 485 unsigned NumVecElts = VecWidth / DestWidth; 486 if (VecType->getElementType() != DestType) { 487 VecType = VectorType::get(DestType, NumVecElts); 488 VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc"); 489 } 490 491 unsigned Elt = ShiftAmount / DestWidth; 492 if (IC.getDataLayout().isBigEndian()) 493 Elt = NumVecElts - 1 - Elt; 494 495 return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt)); 496 } 497 498 /// Rotate left/right may occur in a wider type than necessary because of type 499 /// promotion rules. Try to narrow the inputs and convert to funnel shift. 500 Instruction *InstCombiner::narrowRotate(TruncInst &Trunc) { 501 assert((isa<VectorType>(Trunc.getSrcTy()) || 502 shouldChangeType(Trunc.getSrcTy(), Trunc.getType())) && 503 "Don't narrow to an illegal scalar type"); 504 505 // Bail out on strange types. It is possible to handle some of these patterns 506 // even with non-power-of-2 sizes, but it is not a likely scenario. 507 Type *DestTy = Trunc.getType(); 508 unsigned NarrowWidth = DestTy->getScalarSizeInBits(); 509 if (!isPowerOf2_32(NarrowWidth)) 510 return nullptr; 511 512 // First, find an or'd pair of opposite shifts with the same shifted operand: 513 // trunc (or (lshr ShVal, ShAmt0), (shl ShVal, ShAmt1)) 514 Value *Or0, *Or1; 515 if (!match(Trunc.getOperand(0), m_OneUse(m_Or(m_Value(Or0), m_Value(Or1))))) 516 return nullptr; 517 518 Value *ShVal, *ShAmt0, *ShAmt1; 519 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal), m_Value(ShAmt0)))) || 520 !match(Or1, m_OneUse(m_LogicalShift(m_Specific(ShVal), m_Value(ShAmt1))))) 521 return nullptr; 522 523 auto ShiftOpcode0 = cast<BinaryOperator>(Or0)->getOpcode(); 524 auto ShiftOpcode1 = cast<BinaryOperator>(Or1)->getOpcode(); 525 if (ShiftOpcode0 == ShiftOpcode1) 526 return nullptr; 527 528 // Match the shift amount operands for a rotate pattern. This always matches 529 // a subtraction on the R operand. 530 auto matchShiftAmount = [](Value *L, Value *R, unsigned Width) -> Value * { 531 // The shift amounts may add up to the narrow bit width: 532 // (shl ShVal, L) | (lshr ShVal, Width - L) 533 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) 534 return L; 535 536 // The shift amount may be masked with negation: 537 // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1))) 538 Value *X; 539 unsigned Mask = Width - 1; 540 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) && 541 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))) 542 return X; 543 544 // Same as above, but the shift amount may be extended after masking: 545 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) && 546 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))) 547 return X; 548 549 return nullptr; 550 }; 551 552 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth); 553 bool SubIsOnLHS = false; 554 if (!ShAmt) { 555 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth); 556 SubIsOnLHS = true; 557 } 558 if (!ShAmt) 559 return nullptr; 560 561 // The shifted value must have high zeros in the wide type. Typically, this 562 // will be a zext, but it could also be the result of an 'and' or 'shift'. 563 unsigned WideWidth = Trunc.getSrcTy()->getScalarSizeInBits(); 564 APInt HiBitMask = APInt::getHighBitsSet(WideWidth, WideWidth - NarrowWidth); 565 if (!MaskedValueIsZero(ShVal, HiBitMask, 0, &Trunc)) 566 return nullptr; 567 568 // We have an unnecessarily wide rotate! 569 // trunc (or (lshr ShVal, ShAmt), (shl ShVal, BitWidth - ShAmt)) 570 // Narrow the inputs and convert to funnel shift intrinsic: 571 // llvm.fshl.i8(trunc(ShVal), trunc(ShVal), trunc(ShAmt)) 572 Value *NarrowShAmt = Builder.CreateTrunc(ShAmt, DestTy); 573 Value *X = Builder.CreateTrunc(ShVal, DestTy); 574 bool IsFshl = (!SubIsOnLHS && ShiftOpcode0 == BinaryOperator::Shl) || 575 (SubIsOnLHS && ShiftOpcode1 == BinaryOperator::Shl); 576 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr; 577 Function *F = Intrinsic::getDeclaration(Trunc.getModule(), IID, DestTy); 578 return IntrinsicInst::Create(F, { X, X, NarrowShAmt }); 579 } 580 581 /// Try to narrow the width of math or bitwise logic instructions by pulling a 582 /// truncate ahead of binary operators. 583 /// TODO: Transforms for truncated shifts should be moved into here. 584 Instruction *InstCombiner::narrowBinOp(TruncInst &Trunc) { 585 Type *SrcTy = Trunc.getSrcTy(); 586 Type *DestTy = Trunc.getType(); 587 if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy)) 588 return nullptr; 589 590 BinaryOperator *BinOp; 591 if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(BinOp)))) 592 return nullptr; 593 594 Value *BinOp0 = BinOp->getOperand(0); 595 Value *BinOp1 = BinOp->getOperand(1); 596 switch (BinOp->getOpcode()) { 597 case Instruction::And: 598 case Instruction::Or: 599 case Instruction::Xor: 600 case Instruction::Add: 601 case Instruction::Sub: 602 case Instruction::Mul: { 603 Constant *C; 604 if (match(BinOp0, m_Constant(C))) { 605 // trunc (binop C, X) --> binop (trunc C', X) 606 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 607 Value *TruncX = Builder.CreateTrunc(BinOp1, DestTy); 608 return BinaryOperator::Create(BinOp->getOpcode(), NarrowC, TruncX); 609 } 610 if (match(BinOp1, m_Constant(C))) { 611 // trunc (binop X, C) --> binop (trunc X, C') 612 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 613 Value *TruncX = Builder.CreateTrunc(BinOp0, DestTy); 614 return BinaryOperator::Create(BinOp->getOpcode(), TruncX, NarrowC); 615 } 616 Value *X; 617 if (match(BinOp0, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) { 618 // trunc (binop (ext X), Y) --> binop X, (trunc Y) 619 Value *NarrowOp1 = Builder.CreateTrunc(BinOp1, DestTy); 620 return BinaryOperator::Create(BinOp->getOpcode(), X, NarrowOp1); 621 } 622 if (match(BinOp1, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) { 623 // trunc (binop Y, (ext X)) --> binop (trunc Y), X 624 Value *NarrowOp0 = Builder.CreateTrunc(BinOp0, DestTy); 625 return BinaryOperator::Create(BinOp->getOpcode(), NarrowOp0, X); 626 } 627 break; 628 } 629 630 default: break; 631 } 632 633 if (Instruction *NarrowOr = narrowRotate(Trunc)) 634 return NarrowOr; 635 636 return nullptr; 637 } 638 639 /// Try to narrow the width of a splat shuffle. This could be generalized to any 640 /// shuffle with a constant operand, but we limit the transform to avoid 641 /// creating a shuffle type that targets may not be able to lower effectively. 642 static Instruction *shrinkSplatShuffle(TruncInst &Trunc, 643 InstCombiner::BuilderTy &Builder) { 644 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0)); 645 if (Shuf && Shuf->hasOneUse() && isa<UndefValue>(Shuf->getOperand(1)) && 646 Shuf->getMask()->getSplatValue() && 647 Shuf->getType() == Shuf->getOperand(0)->getType()) { 648 // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Undef, SplatMask 649 Constant *NarrowUndef = UndefValue::get(Trunc.getType()); 650 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType()); 651 return new ShuffleVectorInst(NarrowOp, NarrowUndef, Shuf->getMask()); 652 } 653 654 return nullptr; 655 } 656 657 /// Try to narrow the width of an insert element. This could be generalized for 658 /// any vector constant, but we limit the transform to insertion into undef to 659 /// avoid potential backend problems from unsupported insertion widths. This 660 /// could also be extended to handle the case of inserting a scalar constant 661 /// into a vector variable. 662 static Instruction *shrinkInsertElt(CastInst &Trunc, 663 InstCombiner::BuilderTy &Builder) { 664 Instruction::CastOps Opcode = Trunc.getOpcode(); 665 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) && 666 "Unexpected instruction for shrinking"); 667 668 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0)); 669 if (!InsElt || !InsElt->hasOneUse()) 670 return nullptr; 671 672 Type *DestTy = Trunc.getType(); 673 Type *DestScalarTy = DestTy->getScalarType(); 674 Value *VecOp = InsElt->getOperand(0); 675 Value *ScalarOp = InsElt->getOperand(1); 676 Value *Index = InsElt->getOperand(2); 677 678 if (isa<UndefValue>(VecOp)) { 679 // trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index 680 // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index 681 UndefValue *NarrowUndef = UndefValue::get(DestTy); 682 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy); 683 return InsertElementInst::Create(NarrowUndef, NarrowOp, Index); 684 } 685 686 return nullptr; 687 } 688 689 Instruction *InstCombiner::visitTrunc(TruncInst &CI) { 690 if (Instruction *Result = commonCastTransforms(CI)) 691 return Result; 692 693 Value *Src = CI.getOperand(0); 694 Type *DestTy = CI.getType(), *SrcTy = Src->getType(); 695 696 // Attempt to truncate the entire input expression tree to the destination 697 // type. Only do this if the dest type is a simple type, don't convert the 698 // expression tree to something weird like i93 unless the source is also 699 // strange. 700 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) && 701 canEvaluateTruncated(Src, DestTy, *this, &CI)) { 702 703 // If this cast is a truncate, evaluting in a different type always 704 // eliminates the cast, so it is always a win. 705 LLVM_DEBUG( 706 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 707 " to avoid cast: " 708 << CI << '\n'); 709 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 710 assert(Res->getType() == DestTy); 711 return replaceInstUsesWith(CI, Res); 712 } 713 714 // Test if the trunc is the user of a select which is part of a 715 // minimum or maximum operation. If so, don't do any more simplification. 716 // Even simplifying demanded bits can break the canonical form of a 717 // min/max. 718 Value *LHS, *RHS; 719 if (SelectInst *SI = dyn_cast<SelectInst>(CI.getOperand(0))) 720 if (matchSelectPattern(SI, LHS, RHS).Flavor != SPF_UNKNOWN) 721 return nullptr; 722 723 // See if we can simplify any instructions used by the input whose sole 724 // purpose is to compute bits we don't care about. 725 if (SimplifyDemandedInstructionBits(CI)) 726 return &CI; 727 728 if (DestTy->getScalarSizeInBits() == 1) { 729 Value *Zero = Constant::getNullValue(Src->getType()); 730 if (DestTy->isIntegerTy()) { 731 // Canonicalize trunc x to i1 -> icmp ne (and x, 1), 0 (scalar only). 732 // TODO: We canonicalize to more instructions here because we are probably 733 // lacking equivalent analysis for trunc relative to icmp. There may also 734 // be codegen concerns. If those trunc limitations were removed, we could 735 // remove this transform. 736 Value *And = Builder.CreateAnd(Src, ConstantInt::get(SrcTy, 1)); 737 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 738 } 739 740 // For vectors, we do not canonicalize all truncs to icmp, so optimize 741 // patterns that would be covered within visitICmpInst. 742 Value *X; 743 const APInt *C; 744 if (match(Src, m_OneUse(m_LShr(m_Value(X), m_APInt(C))))) { 745 // trunc (lshr X, C) to i1 --> icmp ne (and X, C'), 0 746 APInt MaskC = APInt(SrcTy->getScalarSizeInBits(), 1).shl(*C); 747 Value *And = Builder.CreateAnd(X, ConstantInt::get(SrcTy, MaskC)); 748 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 749 } 750 if (match(Src, m_OneUse(m_c_Or(m_LShr(m_Value(X), m_APInt(C)), 751 m_Deferred(X))))) { 752 // trunc (or (lshr X, C), X) to i1 --> icmp ne (and X, C'), 0 753 APInt MaskC = APInt(SrcTy->getScalarSizeInBits(), 1).shl(*C) | 1; 754 Value *And = Builder.CreateAnd(X, ConstantInt::get(SrcTy, MaskC)); 755 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 756 } 757 } 758 759 // FIXME: Maybe combine the next two transforms to handle the no cast case 760 // more efficiently. Support vector types. Cleanup code by using m_OneUse. 761 762 // Transform trunc(lshr (zext A), Cst) to eliminate one type conversion. 763 Value *A = nullptr; ConstantInt *Cst = nullptr; 764 if (Src->hasOneUse() && 765 match(Src, m_LShr(m_ZExt(m_Value(A)), m_ConstantInt(Cst)))) { 766 // We have three types to worry about here, the type of A, the source of 767 // the truncate (MidSize), and the destination of the truncate. We know that 768 // ASize < MidSize and MidSize > ResultSize, but don't know the relation 769 // between ASize and ResultSize. 770 unsigned ASize = A->getType()->getPrimitiveSizeInBits(); 771 772 // If the shift amount is larger than the size of A, then the result is 773 // known to be zero because all the input bits got shifted out. 774 if (Cst->getZExtValue() >= ASize) 775 return replaceInstUsesWith(CI, Constant::getNullValue(DestTy)); 776 777 // Since we're doing an lshr and a zero extend, and know that the shift 778 // amount is smaller than ASize, it is always safe to do the shift in A's 779 // type, then zero extend or truncate to the result. 780 Value *Shift = Builder.CreateLShr(A, Cst->getZExtValue()); 781 Shift->takeName(Src); 782 return CastInst::CreateIntegerCast(Shift, DestTy, false); 783 } 784 785 // FIXME: We should canonicalize to zext/trunc and remove this transform. 786 // Transform trunc(lshr (sext A), Cst) to ashr A, Cst to eliminate type 787 // conversion. 788 // It works because bits coming from sign extension have the same value as 789 // the sign bit of the original value; performing ashr instead of lshr 790 // generates bits of the same value as the sign bit. 791 if (Src->hasOneUse() && 792 match(Src, m_LShr(m_SExt(m_Value(A)), m_ConstantInt(Cst)))) { 793 Value *SExt = cast<Instruction>(Src)->getOperand(0); 794 const unsigned SExtSize = SExt->getType()->getPrimitiveSizeInBits(); 795 const unsigned ASize = A->getType()->getPrimitiveSizeInBits(); 796 const unsigned CISize = CI.getType()->getPrimitiveSizeInBits(); 797 const unsigned MaxAmt = SExtSize - std::max(CISize, ASize); 798 unsigned ShiftAmt = Cst->getZExtValue(); 799 800 // This optimization can be only performed when zero bits generated by 801 // the original lshr aren't pulled into the value after truncation, so we 802 // can only shift by values no larger than the number of extension bits. 803 // FIXME: Instead of bailing when the shift is too large, use and to clear 804 // the extra bits. 805 if (ShiftAmt <= MaxAmt) { 806 if (CISize == ASize) 807 return BinaryOperator::CreateAShr(A, ConstantInt::get(CI.getType(), 808 std::min(ShiftAmt, ASize - 1))); 809 if (SExt->hasOneUse()) { 810 Value *Shift = Builder.CreateAShr(A, std::min(ShiftAmt, ASize - 1)); 811 Shift->takeName(Src); 812 return CastInst::CreateIntegerCast(Shift, CI.getType(), true); 813 } 814 } 815 } 816 817 if (Instruction *I = narrowBinOp(CI)) 818 return I; 819 820 if (Instruction *I = shrinkSplatShuffle(CI, Builder)) 821 return I; 822 823 if (Instruction *I = shrinkInsertElt(CI, Builder)) 824 return I; 825 826 if (Src->hasOneUse() && isa<IntegerType>(SrcTy) && 827 shouldChangeType(SrcTy, DestTy)) { 828 // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the 829 // dest type is native and cst < dest size. 830 if (match(Src, m_Shl(m_Value(A), m_ConstantInt(Cst))) && 831 !match(A, m_Shr(m_Value(), m_Constant()))) { 832 // Skip shifts of shift by constants. It undoes a combine in 833 // FoldShiftByConstant and is the extend in reg pattern. 834 const unsigned DestSize = DestTy->getScalarSizeInBits(); 835 if (Cst->getValue().ult(DestSize)) { 836 Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr"); 837 838 return BinaryOperator::Create( 839 Instruction::Shl, NewTrunc, 840 ConstantInt::get(DestTy, Cst->getValue().trunc(DestSize))); 841 } 842 } 843 } 844 845 if (Instruction *I = foldVecTruncToExtElt(CI, *this)) 846 return I; 847 848 return nullptr; 849 } 850 851 Instruction *InstCombiner::transformZExtICmp(ICmpInst *Cmp, ZExtInst &Zext, 852 bool DoTransform) { 853 // If we are just checking for a icmp eq of a single bit and zext'ing it 854 // to an integer, then shift the bit to the appropriate place and then 855 // cast to integer to avoid the comparison. 856 const APInt *Op1CV; 857 if (match(Cmp->getOperand(1), m_APInt(Op1CV))) { 858 859 // zext (x <s 0) to i32 --> x>>u31 true if signbit set. 860 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear. 861 if ((Cmp->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isNullValue()) || 862 (Cmp->getPredicate() == ICmpInst::ICMP_SGT && Op1CV->isAllOnesValue())) { 863 if (!DoTransform) return Cmp; 864 865 Value *In = Cmp->getOperand(0); 866 Value *Sh = ConstantInt::get(In->getType(), 867 In->getType()->getScalarSizeInBits() - 1); 868 In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit"); 869 if (In->getType() != Zext.getType()) 870 In = Builder.CreateIntCast(In, Zext.getType(), false /*ZExt*/); 871 872 if (Cmp->getPredicate() == ICmpInst::ICMP_SGT) { 873 Constant *One = ConstantInt::get(In->getType(), 1); 874 In = Builder.CreateXor(In, One, In->getName() + ".not"); 875 } 876 877 return replaceInstUsesWith(Zext, In); 878 } 879 880 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set. 881 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 882 // zext (X == 1) to i32 --> X iff X has only the low bit set. 883 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set. 884 // zext (X != 0) to i32 --> X iff X has only the low bit set. 885 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set. 886 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set. 887 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 888 if ((Op1CV->isNullValue() || Op1CV->isPowerOf2()) && 889 // This only works for EQ and NE 890 Cmp->isEquality()) { 891 // If Op1C some other power of two, convert: 892 KnownBits Known = computeKnownBits(Cmp->getOperand(0), 0, &Zext); 893 894 APInt KnownZeroMask(~Known.Zero); 895 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1? 896 if (!DoTransform) return Cmp; 897 898 bool isNE = Cmp->getPredicate() == ICmpInst::ICMP_NE; 899 if (!Op1CV->isNullValue() && (*Op1CV != KnownZeroMask)) { 900 // (X&4) == 2 --> false 901 // (X&4) != 2 --> true 902 Constant *Res = ConstantInt::get(Zext.getType(), isNE); 903 return replaceInstUsesWith(Zext, Res); 904 } 905 906 uint32_t ShAmt = KnownZeroMask.logBase2(); 907 Value *In = Cmp->getOperand(0); 908 if (ShAmt) { 909 // Perform a logical shr by shiftamt. 910 // Insert the shift to put the result in the low bit. 911 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt), 912 In->getName() + ".lobit"); 913 } 914 915 if (!Op1CV->isNullValue() == isNE) { // Toggle the low bit. 916 Constant *One = ConstantInt::get(In->getType(), 1); 917 In = Builder.CreateXor(In, One); 918 } 919 920 if (Zext.getType() == In->getType()) 921 return replaceInstUsesWith(Zext, In); 922 923 Value *IntCast = Builder.CreateIntCast(In, Zext.getType(), false); 924 return replaceInstUsesWith(Zext, IntCast); 925 } 926 } 927 } 928 929 // icmp ne A, B is equal to xor A, B when A and B only really have one bit. 930 // It is also profitable to transform icmp eq into not(xor(A, B)) because that 931 // may lead to additional simplifications. 932 if (Cmp->isEquality() && Zext.getType() == Cmp->getOperand(0)->getType()) { 933 if (IntegerType *ITy = dyn_cast<IntegerType>(Zext.getType())) { 934 Value *LHS = Cmp->getOperand(0); 935 Value *RHS = Cmp->getOperand(1); 936 937 KnownBits KnownLHS = computeKnownBits(LHS, 0, &Zext); 938 KnownBits KnownRHS = computeKnownBits(RHS, 0, &Zext); 939 940 if (KnownLHS.Zero == KnownRHS.Zero && KnownLHS.One == KnownRHS.One) { 941 APInt KnownBits = KnownLHS.Zero | KnownLHS.One; 942 APInt UnknownBit = ~KnownBits; 943 if (UnknownBit.countPopulation() == 1) { 944 if (!DoTransform) return Cmp; 945 946 Value *Result = Builder.CreateXor(LHS, RHS); 947 948 // Mask off any bits that are set and won't be shifted away. 949 if (KnownLHS.One.uge(UnknownBit)) 950 Result = Builder.CreateAnd(Result, 951 ConstantInt::get(ITy, UnknownBit)); 952 953 // Shift the bit we're testing down to the lsb. 954 Result = Builder.CreateLShr( 955 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros())); 956 957 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ) 958 Result = Builder.CreateXor(Result, ConstantInt::get(ITy, 1)); 959 Result->takeName(Cmp); 960 return replaceInstUsesWith(Zext, Result); 961 } 962 } 963 } 964 } 965 966 return nullptr; 967 } 968 969 /// Determine if the specified value can be computed in the specified wider type 970 /// and produce the same low bits. If not, return false. 971 /// 972 /// If this function returns true, it can also return a non-zero number of bits 973 /// (in BitsToClear) which indicates that the value it computes is correct for 974 /// the zero extend, but that the additional BitsToClear bits need to be zero'd 975 /// out. For example, to promote something like: 976 /// 977 /// %B = trunc i64 %A to i32 978 /// %C = lshr i32 %B, 8 979 /// %E = zext i32 %C to i64 980 /// 981 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be 982 /// set to 8 to indicate that the promoted value needs to have bits 24-31 983 /// cleared in addition to bits 32-63. Since an 'and' will be generated to 984 /// clear the top bits anyway, doing this has no extra cost. 985 /// 986 /// This function works on both vectors and scalars. 987 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear, 988 InstCombiner &IC, Instruction *CxtI) { 989 BitsToClear = 0; 990 if (canAlwaysEvaluateInType(V, Ty)) 991 return true; 992 if (canNotEvaluateInType(V, Ty)) 993 return false; 994 995 auto *I = cast<Instruction>(V); 996 unsigned Tmp; 997 switch (I->getOpcode()) { 998 case Instruction::ZExt: // zext(zext(x)) -> zext(x). 999 case Instruction::SExt: // zext(sext(x)) -> sext(x). 1000 case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x) 1001 return true; 1002 case Instruction::And: 1003 case Instruction::Or: 1004 case Instruction::Xor: 1005 case Instruction::Add: 1006 case Instruction::Sub: 1007 case Instruction::Mul: 1008 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) || 1009 !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI)) 1010 return false; 1011 // These can all be promoted if neither operand has 'bits to clear'. 1012 if (BitsToClear == 0 && Tmp == 0) 1013 return true; 1014 1015 // If the operation is an AND/OR/XOR and the bits to clear are zero in the 1016 // other side, BitsToClear is ok. 1017 if (Tmp == 0 && I->isBitwiseLogicOp()) { 1018 // We use MaskedValueIsZero here for generality, but the case we care 1019 // about the most is constant RHS. 1020 unsigned VSize = V->getType()->getScalarSizeInBits(); 1021 if (IC.MaskedValueIsZero(I->getOperand(1), 1022 APInt::getHighBitsSet(VSize, BitsToClear), 1023 0, CxtI)) { 1024 // If this is an And instruction and all of the BitsToClear are 1025 // known to be zero we can reset BitsToClear. 1026 if (I->getOpcode() == Instruction::And) 1027 BitsToClear = 0; 1028 return true; 1029 } 1030 } 1031 1032 // Otherwise, we don't know how to analyze this BitsToClear case yet. 1033 return false; 1034 1035 case Instruction::Shl: { 1036 // We can promote shl(x, cst) if we can promote x. Since shl overwrites the 1037 // upper bits we can reduce BitsToClear by the shift amount. 1038 const APInt *Amt; 1039 if (match(I->getOperand(1), m_APInt(Amt))) { 1040 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 1041 return false; 1042 uint64_t ShiftAmt = Amt->getZExtValue(); 1043 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0; 1044 return true; 1045 } 1046 return false; 1047 } 1048 case Instruction::LShr: { 1049 // We can promote lshr(x, cst) if we can promote x. This requires the 1050 // ultimate 'and' to clear out the high zero bits we're clearing out though. 1051 const APInt *Amt; 1052 if (match(I->getOperand(1), m_APInt(Amt))) { 1053 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 1054 return false; 1055 BitsToClear += Amt->getZExtValue(); 1056 if (BitsToClear > V->getType()->getScalarSizeInBits()) 1057 BitsToClear = V->getType()->getScalarSizeInBits(); 1058 return true; 1059 } 1060 // Cannot promote variable LSHR. 1061 return false; 1062 } 1063 case Instruction::Select: 1064 if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) || 1065 !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) || 1066 // TODO: If important, we could handle the case when the BitsToClear are 1067 // known zero in the disagreeing side. 1068 Tmp != BitsToClear) 1069 return false; 1070 return true; 1071 1072 case Instruction::PHI: { 1073 // We can change a phi if we can change all operands. Note that we never 1074 // get into trouble with cyclic PHIs here because we only consider 1075 // instructions with a single use. 1076 PHINode *PN = cast<PHINode>(I); 1077 if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI)) 1078 return false; 1079 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) 1080 if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) || 1081 // TODO: If important, we could handle the case when the BitsToClear 1082 // are known zero in the disagreeing input. 1083 Tmp != BitsToClear) 1084 return false; 1085 return true; 1086 } 1087 default: 1088 // TODO: Can handle more cases here. 1089 return false; 1090 } 1091 } 1092 1093 Instruction *InstCombiner::visitZExt(ZExtInst &CI) { 1094 // If this zero extend is only used by a truncate, let the truncate be 1095 // eliminated before we try to optimize this zext. 1096 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back())) 1097 return nullptr; 1098 1099 // If one of the common conversion will work, do it. 1100 if (Instruction *Result = commonCastTransforms(CI)) 1101 return Result; 1102 1103 Value *Src = CI.getOperand(0); 1104 Type *SrcTy = Src->getType(), *DestTy = CI.getType(); 1105 1106 // Try to extend the entire expression tree to the wide destination type. 1107 unsigned BitsToClear; 1108 if (shouldChangeType(SrcTy, DestTy) && 1109 canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) { 1110 assert(BitsToClear <= SrcTy->getScalarSizeInBits() && 1111 "Can't clear more bits than in SrcTy"); 1112 1113 // Okay, we can transform this! Insert the new expression now. 1114 LLVM_DEBUG( 1115 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1116 " to avoid zero extend: " 1117 << CI << '\n'); 1118 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 1119 assert(Res->getType() == DestTy); 1120 1121 // Preserve debug values referring to Src if the zext is its last use. 1122 if (auto *SrcOp = dyn_cast<Instruction>(Src)) 1123 if (SrcOp->hasOneUse()) 1124 replaceAllDbgUsesWith(*SrcOp, *Res, CI, DT); 1125 1126 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear; 1127 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 1128 1129 // If the high bits are already filled with zeros, just replace this 1130 // cast with the result. 1131 if (MaskedValueIsZero(Res, 1132 APInt::getHighBitsSet(DestBitSize, 1133 DestBitSize-SrcBitsKept), 1134 0, &CI)) 1135 return replaceInstUsesWith(CI, Res); 1136 1137 // We need to emit an AND to clear the high bits. 1138 Constant *C = ConstantInt::get(Res->getType(), 1139 APInt::getLowBitsSet(DestBitSize, SrcBitsKept)); 1140 return BinaryOperator::CreateAnd(Res, C); 1141 } 1142 1143 // If this is a TRUNC followed by a ZEXT then we are dealing with integral 1144 // types and if the sizes are just right we can convert this into a logical 1145 // 'and' which will be much cheaper than the pair of casts. 1146 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast 1147 // TODO: Subsume this into EvaluateInDifferentType. 1148 1149 // Get the sizes of the types involved. We know that the intermediate type 1150 // will be smaller than A or C, but don't know the relation between A and C. 1151 Value *A = CSrc->getOperand(0); 1152 unsigned SrcSize = A->getType()->getScalarSizeInBits(); 1153 unsigned MidSize = CSrc->getType()->getScalarSizeInBits(); 1154 unsigned DstSize = CI.getType()->getScalarSizeInBits(); 1155 // If we're actually extending zero bits, then if 1156 // SrcSize < DstSize: zext(a & mask) 1157 // SrcSize == DstSize: a & mask 1158 // SrcSize > DstSize: trunc(a) & mask 1159 if (SrcSize < DstSize) { 1160 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 1161 Constant *AndConst = ConstantInt::get(A->getType(), AndValue); 1162 Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask"); 1163 return new ZExtInst(And, CI.getType()); 1164 } 1165 1166 if (SrcSize == DstSize) { 1167 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 1168 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(), 1169 AndValue)); 1170 } 1171 if (SrcSize > DstSize) { 1172 Value *Trunc = Builder.CreateTrunc(A, CI.getType()); 1173 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize)); 1174 return BinaryOperator::CreateAnd(Trunc, 1175 ConstantInt::get(Trunc->getType(), 1176 AndValue)); 1177 } 1178 } 1179 1180 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(Src)) 1181 return transformZExtICmp(Cmp, CI); 1182 1183 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src); 1184 if (SrcI && SrcI->getOpcode() == Instruction::Or) { 1185 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) if at least one 1186 // of the (zext icmp) can be eliminated. If so, immediately perform the 1187 // according elimination. 1188 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0)); 1189 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1)); 1190 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() && 1191 (transformZExtICmp(LHS, CI, false) || 1192 transformZExtICmp(RHS, CI, false))) { 1193 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) 1194 Value *LCast = Builder.CreateZExt(LHS, CI.getType(), LHS->getName()); 1195 Value *RCast = Builder.CreateZExt(RHS, CI.getType(), RHS->getName()); 1196 Value *Or = Builder.CreateOr(LCast, RCast, CI.getName()); 1197 if (auto *OrInst = dyn_cast<Instruction>(Or)) 1198 Builder.SetInsertPoint(OrInst); 1199 1200 // Perform the elimination. 1201 if (auto *LZExt = dyn_cast<ZExtInst>(LCast)) 1202 transformZExtICmp(LHS, *LZExt); 1203 if (auto *RZExt = dyn_cast<ZExtInst>(RCast)) 1204 transformZExtICmp(RHS, *RZExt); 1205 1206 return replaceInstUsesWith(CI, Or); 1207 } 1208 } 1209 1210 // zext(trunc(X) & C) -> (X & zext(C)). 1211 Constant *C; 1212 Value *X; 1213 if (SrcI && 1214 match(SrcI, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) && 1215 X->getType() == CI.getType()) 1216 return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType())); 1217 1218 // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)). 1219 Value *And; 1220 if (SrcI && match(SrcI, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) && 1221 match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) && 1222 X->getType() == CI.getType()) { 1223 Constant *ZC = ConstantExpr::getZExt(C, CI.getType()); 1224 return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC); 1225 } 1226 1227 return nullptr; 1228 } 1229 1230 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp. 1231 Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) { 1232 Value *Op0 = ICI->getOperand(0), *Op1 = ICI->getOperand(1); 1233 ICmpInst::Predicate Pred = ICI->getPredicate(); 1234 1235 // Don't bother if Op1 isn't of vector or integer type. 1236 if (!Op1->getType()->isIntOrIntVectorTy()) 1237 return nullptr; 1238 1239 if ((Pred == ICmpInst::ICMP_SLT && match(Op1, m_ZeroInt())) || 1240 (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))) { 1241 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if negative 1242 // (x >s -1) ? -1 : 0 -> not (ashr x, 31) -> all ones if positive 1243 Value *Sh = ConstantInt::get(Op0->getType(), 1244 Op0->getType()->getScalarSizeInBits() - 1); 1245 Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit"); 1246 if (In->getType() != CI.getType()) 1247 In = Builder.CreateIntCast(In, CI.getType(), true /*SExt*/); 1248 1249 if (Pred == ICmpInst::ICMP_SGT) 1250 In = Builder.CreateNot(In, In->getName() + ".not"); 1251 return replaceInstUsesWith(CI, In); 1252 } 1253 1254 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { 1255 // If we know that only one bit of the LHS of the icmp can be set and we 1256 // have an equality comparison with zero or a power of 2, we can transform 1257 // the icmp and sext into bitwise/integer operations. 1258 if (ICI->hasOneUse() && 1259 ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){ 1260 KnownBits Known = computeKnownBits(Op0, 0, &CI); 1261 1262 APInt KnownZeroMask(~Known.Zero); 1263 if (KnownZeroMask.isPowerOf2()) { 1264 Value *In = ICI->getOperand(0); 1265 1266 // If the icmp tests for a known zero bit we can constant fold it. 1267 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) { 1268 Value *V = Pred == ICmpInst::ICMP_NE ? 1269 ConstantInt::getAllOnesValue(CI.getType()) : 1270 ConstantInt::getNullValue(CI.getType()); 1271 return replaceInstUsesWith(CI, V); 1272 } 1273 1274 if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) { 1275 // sext ((x & 2^n) == 0) -> (x >> n) - 1 1276 // sext ((x & 2^n) != 2^n) -> (x >> n) - 1 1277 unsigned ShiftAmt = KnownZeroMask.countTrailingZeros(); 1278 // Perform a right shift to place the desired bit in the LSB. 1279 if (ShiftAmt) 1280 In = Builder.CreateLShr(In, 1281 ConstantInt::get(In->getType(), ShiftAmt)); 1282 1283 // At this point "In" is either 1 or 0. Subtract 1 to turn 1284 // {1, 0} -> {0, -1}. 1285 In = Builder.CreateAdd(In, 1286 ConstantInt::getAllOnesValue(In->getType()), 1287 "sext"); 1288 } else { 1289 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1 1290 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1 1291 unsigned ShiftAmt = KnownZeroMask.countLeadingZeros(); 1292 // Perform a left shift to place the desired bit in the MSB. 1293 if (ShiftAmt) 1294 In = Builder.CreateShl(In, 1295 ConstantInt::get(In->getType(), ShiftAmt)); 1296 1297 // Distribute the bit over the whole bit width. 1298 In = Builder.CreateAShr(In, ConstantInt::get(In->getType(), 1299 KnownZeroMask.getBitWidth() - 1), "sext"); 1300 } 1301 1302 if (CI.getType() == In->getType()) 1303 return replaceInstUsesWith(CI, In); 1304 return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/); 1305 } 1306 } 1307 } 1308 1309 return nullptr; 1310 } 1311 1312 /// Return true if we can take the specified value and return it as type Ty 1313 /// without inserting any new casts and without changing the value of the common 1314 /// low bits. This is used by code that tries to promote integer operations to 1315 /// a wider types will allow us to eliminate the extension. 1316 /// 1317 /// This function works on both vectors and scalars. 1318 /// 1319 static bool canEvaluateSExtd(Value *V, Type *Ty) { 1320 assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() && 1321 "Can't sign extend type to a smaller type"); 1322 if (canAlwaysEvaluateInType(V, Ty)) 1323 return true; 1324 if (canNotEvaluateInType(V, Ty)) 1325 return false; 1326 1327 auto *I = cast<Instruction>(V); 1328 switch (I->getOpcode()) { 1329 case Instruction::SExt: // sext(sext(x)) -> sext(x) 1330 case Instruction::ZExt: // sext(zext(x)) -> zext(x) 1331 case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x) 1332 return true; 1333 case Instruction::And: 1334 case Instruction::Or: 1335 case Instruction::Xor: 1336 case Instruction::Add: 1337 case Instruction::Sub: 1338 case Instruction::Mul: 1339 // These operators can all arbitrarily be extended if their inputs can. 1340 return canEvaluateSExtd(I->getOperand(0), Ty) && 1341 canEvaluateSExtd(I->getOperand(1), Ty); 1342 1343 //case Instruction::Shl: TODO 1344 //case Instruction::LShr: TODO 1345 1346 case Instruction::Select: 1347 return canEvaluateSExtd(I->getOperand(1), Ty) && 1348 canEvaluateSExtd(I->getOperand(2), Ty); 1349 1350 case Instruction::PHI: { 1351 // We can change a phi if we can change all operands. Note that we never 1352 // get into trouble with cyclic PHIs here because we only consider 1353 // instructions with a single use. 1354 PHINode *PN = cast<PHINode>(I); 1355 for (Value *IncValue : PN->incoming_values()) 1356 if (!canEvaluateSExtd(IncValue, Ty)) return false; 1357 return true; 1358 } 1359 default: 1360 // TODO: Can handle more cases here. 1361 break; 1362 } 1363 1364 return false; 1365 } 1366 1367 Instruction *InstCombiner::visitSExt(SExtInst &CI) { 1368 // If this sign extend is only used by a truncate, let the truncate be 1369 // eliminated before we try to optimize this sext. 1370 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back())) 1371 return nullptr; 1372 1373 if (Instruction *I = commonCastTransforms(CI)) 1374 return I; 1375 1376 Value *Src = CI.getOperand(0); 1377 Type *SrcTy = Src->getType(), *DestTy = CI.getType(); 1378 1379 // If we know that the value being extended is positive, we can use a zext 1380 // instead. 1381 KnownBits Known = computeKnownBits(Src, 0, &CI); 1382 if (Known.isNonNegative()) 1383 return CastInst::Create(Instruction::ZExt, Src, DestTy); 1384 1385 // Try to extend the entire expression tree to the wide destination type. 1386 if (shouldChangeType(SrcTy, DestTy) && canEvaluateSExtd(Src, DestTy)) { 1387 // Okay, we can transform this! Insert the new expression now. 1388 LLVM_DEBUG( 1389 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1390 " to avoid sign extend: " 1391 << CI << '\n'); 1392 Value *Res = EvaluateInDifferentType(Src, DestTy, true); 1393 assert(Res->getType() == DestTy); 1394 1395 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits(); 1396 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 1397 1398 // If the high bits are already filled with sign bit, just replace this 1399 // cast with the result. 1400 if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize) 1401 return replaceInstUsesWith(CI, Res); 1402 1403 // We need to emit a shl + ashr to do the sign extend. 1404 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize); 1405 return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"), 1406 ShAmt); 1407 } 1408 1409 // If the input is a trunc from the destination type, then turn sext(trunc(x)) 1410 // into shifts. 1411 Value *X; 1412 if (match(Src, m_OneUse(m_Trunc(m_Value(X)))) && X->getType() == DestTy) { 1413 // sext(trunc(X)) --> ashr(shl(X, C), C) 1414 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 1415 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 1416 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize); 1417 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt); 1418 } 1419 1420 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src)) 1421 return transformSExtICmp(ICI, CI); 1422 1423 // If the input is a shl/ashr pair of a same constant, then this is a sign 1424 // extension from a smaller value. If we could trust arbitrary bitwidth 1425 // integers, we could turn this into a truncate to the smaller bit and then 1426 // use a sext for the whole extension. Since we don't, look deeper and check 1427 // for a truncate. If the source and dest are the same type, eliminate the 1428 // trunc and extend and just do shifts. For example, turn: 1429 // %a = trunc i32 %i to i8 1430 // %b = shl i8 %a, 6 1431 // %c = ashr i8 %b, 6 1432 // %d = sext i8 %c to i32 1433 // into: 1434 // %a = shl i32 %i, 30 1435 // %d = ashr i32 %a, 30 1436 Value *A = nullptr; 1437 // TODO: Eventually this could be subsumed by EvaluateInDifferentType. 1438 ConstantInt *BA = nullptr, *CA = nullptr; 1439 if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_ConstantInt(BA)), 1440 m_ConstantInt(CA))) && 1441 BA == CA && A->getType() == CI.getType()) { 1442 unsigned MidSize = Src->getType()->getScalarSizeInBits(); 1443 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits(); 1444 unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize; 1445 Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt); 1446 A = Builder.CreateShl(A, ShAmtV, CI.getName()); 1447 return BinaryOperator::CreateAShr(A, ShAmtV); 1448 } 1449 1450 return nullptr; 1451 } 1452 1453 1454 /// Return a Constant* for the specified floating-point constant if it fits 1455 /// in the specified FP type without changing its value. 1456 static bool fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) { 1457 bool losesInfo; 1458 APFloat F = CFP->getValueAPF(); 1459 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo); 1460 return !losesInfo; 1461 } 1462 1463 static Type *shrinkFPConstant(ConstantFP *CFP) { 1464 if (CFP->getType() == Type::getPPC_FP128Ty(CFP->getContext())) 1465 return nullptr; // No constant folding of this. 1466 // See if the value can be truncated to half and then reextended. 1467 if (fitsInFPType(CFP, APFloat::IEEEhalf())) 1468 return Type::getHalfTy(CFP->getContext()); 1469 // See if the value can be truncated to float and then reextended. 1470 if (fitsInFPType(CFP, APFloat::IEEEsingle())) 1471 return Type::getFloatTy(CFP->getContext()); 1472 if (CFP->getType()->isDoubleTy()) 1473 return nullptr; // Won't shrink. 1474 if (fitsInFPType(CFP, APFloat::IEEEdouble())) 1475 return Type::getDoubleTy(CFP->getContext()); 1476 // Don't try to shrink to various long double types. 1477 return nullptr; 1478 } 1479 1480 // Determine if this is a vector of ConstantFPs and if so, return the minimal 1481 // type we can safely truncate all elements to. 1482 // TODO: Make these support undef elements. 1483 static Type *shrinkFPConstantVector(Value *V) { 1484 auto *CV = dyn_cast<Constant>(V); 1485 if (!CV || !CV->getType()->isVectorTy()) 1486 return nullptr; 1487 1488 Type *MinType = nullptr; 1489 1490 unsigned NumElts = CV->getType()->getVectorNumElements(); 1491 for (unsigned i = 0; i != NumElts; ++i) { 1492 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 1493 if (!CFP) 1494 return nullptr; 1495 1496 Type *T = shrinkFPConstant(CFP); 1497 if (!T) 1498 return nullptr; 1499 1500 // If we haven't found a type yet or this type has a larger mantissa than 1501 // our previous type, this is our new minimal type. 1502 if (!MinType || T->getFPMantissaWidth() > MinType->getFPMantissaWidth()) 1503 MinType = T; 1504 } 1505 1506 // Make a vector type from the minimal type. 1507 return VectorType::get(MinType, NumElts); 1508 } 1509 1510 /// Find the minimum FP type we can safely truncate to. 1511 static Type *getMinimumFPType(Value *V) { 1512 if (auto *FPExt = dyn_cast<FPExtInst>(V)) 1513 return FPExt->getOperand(0)->getType(); 1514 1515 // If this value is a constant, return the constant in the smallest FP type 1516 // that can accurately represent it. This allows us to turn 1517 // (float)((double)X+2.0) into x+2.0f. 1518 if (auto *CFP = dyn_cast<ConstantFP>(V)) 1519 if (Type *T = shrinkFPConstant(CFP)) 1520 return T; 1521 1522 // Try to shrink a vector of FP constants. 1523 if (Type *T = shrinkFPConstantVector(V)) 1524 return T; 1525 1526 return V->getType(); 1527 } 1528 1529 Instruction *InstCombiner::visitFPTrunc(FPTruncInst &FPT) { 1530 if (Instruction *I = commonCastTransforms(FPT)) 1531 return I; 1532 1533 // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to 1534 // simplify this expression to avoid one or more of the trunc/extend 1535 // operations if we can do so without changing the numerical results. 1536 // 1537 // The exact manner in which the widths of the operands interact to limit 1538 // what we can and cannot do safely varies from operation to operation, and 1539 // is explained below in the various case statements. 1540 Type *Ty = FPT.getType(); 1541 auto *BO = dyn_cast<BinaryOperator>(FPT.getOperand(0)); 1542 if (BO && BO->hasOneUse()) { 1543 Type *LHSMinType = getMinimumFPType(BO->getOperand(0)); 1544 Type *RHSMinType = getMinimumFPType(BO->getOperand(1)); 1545 unsigned OpWidth = BO->getType()->getFPMantissaWidth(); 1546 unsigned LHSWidth = LHSMinType->getFPMantissaWidth(); 1547 unsigned RHSWidth = RHSMinType->getFPMantissaWidth(); 1548 unsigned SrcWidth = std::max(LHSWidth, RHSWidth); 1549 unsigned DstWidth = Ty->getFPMantissaWidth(); 1550 switch (BO->getOpcode()) { 1551 default: break; 1552 case Instruction::FAdd: 1553 case Instruction::FSub: 1554 // For addition and subtraction, the infinitely precise result can 1555 // essentially be arbitrarily wide; proving that double rounding 1556 // will not occur because the result of OpI is exact (as we will for 1557 // FMul, for example) is hopeless. However, we *can* nonetheless 1558 // frequently know that double rounding cannot occur (or that it is 1559 // innocuous) by taking advantage of the specific structure of 1560 // infinitely-precise results that admit double rounding. 1561 // 1562 // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient 1563 // to represent both sources, we can guarantee that the double 1564 // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis, 1565 // "A Rigorous Framework for Fully Supporting the IEEE Standard ..." 1566 // for proof of this fact). 1567 // 1568 // Note: Figueroa does not consider the case where DstFormat != 1569 // SrcFormat. It's possible (likely even!) that this analysis 1570 // could be tightened for those cases, but they are rare (the main 1571 // case of interest here is (float)((double)float + float)). 1572 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) { 1573 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1574 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1575 Instruction *RI = BinaryOperator::Create(BO->getOpcode(), LHS, RHS); 1576 RI->copyFastMathFlags(BO); 1577 return RI; 1578 } 1579 break; 1580 case Instruction::FMul: 1581 // For multiplication, the infinitely precise result has at most 1582 // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient 1583 // that such a value can be exactly represented, then no double 1584 // rounding can possibly occur; we can safely perform the operation 1585 // in the destination format if it can represent both sources. 1586 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) { 1587 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1588 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1589 return BinaryOperator::CreateFMulFMF(LHS, RHS, BO); 1590 } 1591 break; 1592 case Instruction::FDiv: 1593 // For division, we use again use the bound from Figueroa's 1594 // dissertation. I am entirely certain that this bound can be 1595 // tightened in the unbalanced operand case by an analysis based on 1596 // the diophantine rational approximation bound, but the well-known 1597 // condition used here is a good conservative first pass. 1598 // TODO: Tighten bound via rigorous analysis of the unbalanced case. 1599 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) { 1600 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1601 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1602 return BinaryOperator::CreateFDivFMF(LHS, RHS, BO); 1603 } 1604 break; 1605 case Instruction::FRem: { 1606 // Remainder is straightforward. Remainder is always exact, so the 1607 // type of OpI doesn't enter into things at all. We simply evaluate 1608 // in whichever source type is larger, then convert to the 1609 // destination type. 1610 if (SrcWidth == OpWidth) 1611 break; 1612 Value *LHS, *RHS; 1613 if (LHSWidth == SrcWidth) { 1614 LHS = Builder.CreateFPTrunc(BO->getOperand(0), LHSMinType); 1615 RHS = Builder.CreateFPTrunc(BO->getOperand(1), LHSMinType); 1616 } else { 1617 LHS = Builder.CreateFPTrunc(BO->getOperand(0), RHSMinType); 1618 RHS = Builder.CreateFPTrunc(BO->getOperand(1), RHSMinType); 1619 } 1620 1621 Value *ExactResult = Builder.CreateFRemFMF(LHS, RHS, BO); 1622 return CastInst::CreateFPCast(ExactResult, Ty); 1623 } 1624 } 1625 } 1626 1627 // (fptrunc (fneg x)) -> (fneg (fptrunc x)) 1628 Value *X; 1629 Instruction *Op = dyn_cast<Instruction>(FPT.getOperand(0)); 1630 if (Op && Op->hasOneUse()) { 1631 // FIXME: The FMF should propagate from the fptrunc, not the source op. 1632 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 1633 if (isa<FPMathOperator>(Op)) 1634 Builder.setFastMathFlags(Op->getFastMathFlags()); 1635 1636 if (match(Op, m_FNeg(m_Value(X)))) { 1637 Value *InnerTrunc = Builder.CreateFPTrunc(X, Ty); 1638 1639 // FIXME: Once we're sure that unary FNeg optimizations are on par with 1640 // binary FNeg, this should always return a unary operator. 1641 if (isa<BinaryOperator>(Op)) 1642 return BinaryOperator::CreateFNegFMF(InnerTrunc, Op); 1643 return UnaryOperator::CreateFNegFMF(InnerTrunc, Op); 1644 } 1645 1646 // If we are truncating a select that has an extended operand, we can 1647 // narrow the other operand and do the select as a narrow op. 1648 Value *Cond, *X, *Y; 1649 if (match(Op, m_Select(m_Value(Cond), m_FPExt(m_Value(X)), m_Value(Y))) && 1650 X->getType() == Ty) { 1651 // fptrunc (select Cond, (fpext X), Y --> select Cond, X, (fptrunc Y) 1652 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty); 1653 Value *Sel = Builder.CreateSelect(Cond, X, NarrowY, "narrow.sel", Op); 1654 return replaceInstUsesWith(FPT, Sel); 1655 } 1656 if (match(Op, m_Select(m_Value(Cond), m_Value(Y), m_FPExt(m_Value(X)))) && 1657 X->getType() == Ty) { 1658 // fptrunc (select Cond, Y, (fpext X) --> select Cond, (fptrunc Y), X 1659 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty); 1660 Value *Sel = Builder.CreateSelect(Cond, NarrowY, X, "narrow.sel", Op); 1661 return replaceInstUsesWith(FPT, Sel); 1662 } 1663 } 1664 1665 if (auto *II = dyn_cast<IntrinsicInst>(FPT.getOperand(0))) { 1666 switch (II->getIntrinsicID()) { 1667 default: break; 1668 case Intrinsic::ceil: 1669 case Intrinsic::fabs: 1670 case Intrinsic::floor: 1671 case Intrinsic::nearbyint: 1672 case Intrinsic::rint: 1673 case Intrinsic::round: 1674 case Intrinsic::trunc: { 1675 Value *Src = II->getArgOperand(0); 1676 if (!Src->hasOneUse()) 1677 break; 1678 1679 // Except for fabs, this transformation requires the input of the unary FP 1680 // operation to be itself an fpext from the type to which we're 1681 // truncating. 1682 if (II->getIntrinsicID() != Intrinsic::fabs) { 1683 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src); 1684 if (!FPExtSrc || FPExtSrc->getSrcTy() != Ty) 1685 break; 1686 } 1687 1688 // Do unary FP operation on smaller type. 1689 // (fptrunc (fabs x)) -> (fabs (fptrunc x)) 1690 Value *InnerTrunc = Builder.CreateFPTrunc(Src, Ty); 1691 Function *Overload = Intrinsic::getDeclaration(FPT.getModule(), 1692 II->getIntrinsicID(), Ty); 1693 SmallVector<OperandBundleDef, 1> OpBundles; 1694 II->getOperandBundlesAsDefs(OpBundles); 1695 CallInst *NewCI = 1696 CallInst::Create(Overload, {InnerTrunc}, OpBundles, II->getName()); 1697 NewCI->copyFastMathFlags(II); 1698 return NewCI; 1699 } 1700 } 1701 } 1702 1703 if (Instruction *I = shrinkInsertElt(FPT, Builder)) 1704 return I; 1705 1706 return nullptr; 1707 } 1708 1709 Instruction *InstCombiner::visitFPExt(CastInst &CI) { 1710 return commonCastTransforms(CI); 1711 } 1712 1713 // fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X) 1714 // This is safe if the intermediate type has enough bits in its mantissa to 1715 // accurately represent all values of X. For example, this won't work with 1716 // i64 -> float -> i64. 1717 Instruction *InstCombiner::FoldItoFPtoI(Instruction &FI) { 1718 if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0))) 1719 return nullptr; 1720 Instruction *OpI = cast<Instruction>(FI.getOperand(0)); 1721 1722 Value *SrcI = OpI->getOperand(0); 1723 Type *FITy = FI.getType(); 1724 Type *OpITy = OpI->getType(); 1725 Type *SrcTy = SrcI->getType(); 1726 bool IsInputSigned = isa<SIToFPInst>(OpI); 1727 bool IsOutputSigned = isa<FPToSIInst>(FI); 1728 1729 // We can safely assume the conversion won't overflow the output range, 1730 // because (for example) (uint8_t)18293.f is undefined behavior. 1731 1732 // Since we can assume the conversion won't overflow, our decision as to 1733 // whether the input will fit in the float should depend on the minimum 1734 // of the input range and output range. 1735 1736 // This means this is also safe for a signed input and unsigned output, since 1737 // a negative input would lead to undefined behavior. 1738 int InputSize = (int)SrcTy->getScalarSizeInBits() - IsInputSigned; 1739 int OutputSize = (int)FITy->getScalarSizeInBits() - IsOutputSigned; 1740 int ActualSize = std::min(InputSize, OutputSize); 1741 1742 if (ActualSize <= OpITy->getFPMantissaWidth()) { 1743 if (FITy->getScalarSizeInBits() > SrcTy->getScalarSizeInBits()) { 1744 if (IsInputSigned && IsOutputSigned) 1745 return new SExtInst(SrcI, FITy); 1746 return new ZExtInst(SrcI, FITy); 1747 } 1748 if (FITy->getScalarSizeInBits() < SrcTy->getScalarSizeInBits()) 1749 return new TruncInst(SrcI, FITy); 1750 if (SrcTy == FITy) 1751 return replaceInstUsesWith(FI, SrcI); 1752 return new BitCastInst(SrcI, FITy); 1753 } 1754 return nullptr; 1755 } 1756 1757 Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) { 1758 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0)); 1759 if (!OpI) 1760 return commonCastTransforms(FI); 1761 1762 if (Instruction *I = FoldItoFPtoI(FI)) 1763 return I; 1764 1765 return commonCastTransforms(FI); 1766 } 1767 1768 Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) { 1769 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0)); 1770 if (!OpI) 1771 return commonCastTransforms(FI); 1772 1773 if (Instruction *I = FoldItoFPtoI(FI)) 1774 return I; 1775 1776 return commonCastTransforms(FI); 1777 } 1778 1779 Instruction *InstCombiner::visitUIToFP(CastInst &CI) { 1780 return commonCastTransforms(CI); 1781 } 1782 1783 Instruction *InstCombiner::visitSIToFP(CastInst &CI) { 1784 return commonCastTransforms(CI); 1785 } 1786 1787 Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) { 1788 // If the source integer type is not the intptr_t type for this target, do a 1789 // trunc or zext to the intptr_t type, then inttoptr of it. This allows the 1790 // cast to be exposed to other transforms. 1791 unsigned AS = CI.getAddressSpace(); 1792 if (CI.getOperand(0)->getType()->getScalarSizeInBits() != 1793 DL.getPointerSizeInBits(AS)) { 1794 Type *Ty = DL.getIntPtrType(CI.getContext(), AS); 1795 if (CI.getType()->isVectorTy()) // Handle vectors of pointers. 1796 Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements()); 1797 1798 Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty); 1799 return new IntToPtrInst(P, CI.getType()); 1800 } 1801 1802 if (Instruction *I = commonCastTransforms(CI)) 1803 return I; 1804 1805 return nullptr; 1806 } 1807 1808 /// Implement the transforms for cast of pointer (bitcast/ptrtoint) 1809 Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) { 1810 Value *Src = CI.getOperand(0); 1811 1812 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) { 1813 // If casting the result of a getelementptr instruction with no offset, turn 1814 // this into a cast of the original pointer! 1815 if (GEP->hasAllZeroIndices() && 1816 // If CI is an addrspacecast and GEP changes the poiner type, merging 1817 // GEP into CI would undo canonicalizing addrspacecast with different 1818 // pointer types, causing infinite loops. 1819 (!isa<AddrSpaceCastInst>(CI) || 1820 GEP->getType() == GEP->getPointerOperandType())) { 1821 // Changing the cast operand is usually not a good idea but it is safe 1822 // here because the pointer operand is being replaced with another 1823 // pointer operand so the opcode doesn't need to change. 1824 return replaceOperand(CI, 0, GEP->getOperand(0)); 1825 } 1826 } 1827 1828 return commonCastTransforms(CI); 1829 } 1830 1831 Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) { 1832 // If the destination integer type is not the intptr_t type for this target, 1833 // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast 1834 // to be exposed to other transforms. 1835 1836 Type *Ty = CI.getType(); 1837 unsigned AS = CI.getPointerAddressSpace(); 1838 1839 if (Ty->getScalarSizeInBits() == DL.getPointerSizeInBits(AS)) 1840 return commonPointerCastTransforms(CI); 1841 1842 Type *PtrTy = DL.getIntPtrType(CI.getContext(), AS); 1843 if (Ty->isVectorTy()) // Handle vectors of pointers. 1844 PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements()); 1845 1846 Value *P = Builder.CreatePtrToInt(CI.getOperand(0), PtrTy); 1847 return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false); 1848 } 1849 1850 /// This input value (which is known to have vector type) is being zero extended 1851 /// or truncated to the specified vector type. Since the zext/trunc is done 1852 /// using an integer type, we have a (bitcast(cast(bitcast))) pattern, 1853 /// endianness will impact which end of the vector that is extended or 1854 /// truncated. 1855 /// 1856 /// A vector is always stored with index 0 at the lowest address, which 1857 /// corresponds to the most significant bits for a big endian stored integer and 1858 /// the least significant bits for little endian. A trunc/zext of an integer 1859 /// impacts the big end of the integer. Thus, we need to add/remove elements at 1860 /// the front of the vector for big endian targets, and the back of the vector 1861 /// for little endian targets. 1862 /// 1863 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible. 1864 /// 1865 /// The source and destination vector types may have different element types. 1866 static Instruction *optimizeVectorResizeWithIntegerBitCasts(Value *InVal, 1867 VectorType *DestTy, 1868 InstCombiner &IC) { 1869 // We can only do this optimization if the output is a multiple of the input 1870 // element size, or the input is a multiple of the output element size. 1871 // Convert the input type to have the same element type as the output. 1872 VectorType *SrcTy = cast<VectorType>(InVal->getType()); 1873 1874 if (SrcTy->getElementType() != DestTy->getElementType()) { 1875 // The input types don't need to be identical, but for now they must be the 1876 // same size. There is no specific reason we couldn't handle things like 1877 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten 1878 // there yet. 1879 if (SrcTy->getElementType()->getPrimitiveSizeInBits() != 1880 DestTy->getElementType()->getPrimitiveSizeInBits()) 1881 return nullptr; 1882 1883 SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements()); 1884 InVal = IC.Builder.CreateBitCast(InVal, SrcTy); 1885 } 1886 1887 bool IsBigEndian = IC.getDataLayout().isBigEndian(); 1888 unsigned SrcElts = SrcTy->getNumElements(); 1889 unsigned DestElts = DestTy->getNumElements(); 1890 1891 assert(SrcElts != DestElts && "Element counts should be different."); 1892 1893 // Now that the element types match, get the shuffle mask and RHS of the 1894 // shuffle to use, which depends on whether we're increasing or decreasing the 1895 // size of the input. 1896 SmallVector<uint32_t, 16> ShuffleMaskStorage; 1897 ArrayRef<uint32_t> ShuffleMask; 1898 Value *V2; 1899 1900 // Produce an identify shuffle mask for the src vector. 1901 ShuffleMaskStorage.resize(SrcElts); 1902 std::iota(ShuffleMaskStorage.begin(), ShuffleMaskStorage.end(), 0); 1903 1904 if (SrcElts > DestElts) { 1905 // If we're shrinking the number of elements (rewriting an integer 1906 // truncate), just shuffle in the elements corresponding to the least 1907 // significant bits from the input and use undef as the second shuffle 1908 // input. 1909 V2 = UndefValue::get(SrcTy); 1910 // Make sure the shuffle mask selects the "least significant bits" by 1911 // keeping elements from back of the src vector for big endian, and from the 1912 // front for little endian. 1913 ShuffleMask = ShuffleMaskStorage; 1914 if (IsBigEndian) 1915 ShuffleMask = ShuffleMask.take_back(DestElts); 1916 else 1917 ShuffleMask = ShuffleMask.take_front(DestElts); 1918 } else { 1919 // If we're increasing the number of elements (rewriting an integer zext), 1920 // shuffle in all of the elements from InVal. Fill the rest of the result 1921 // elements with zeros from a constant zero. 1922 V2 = Constant::getNullValue(SrcTy); 1923 // Use first elt from V2 when indicating zero in the shuffle mask. 1924 uint32_t NullElt = SrcElts; 1925 // Extend with null values in the "most significant bits" by adding elements 1926 // in front of the src vector for big endian, and at the back for little 1927 // endian. 1928 unsigned DeltaElts = DestElts - SrcElts; 1929 if (IsBigEndian) 1930 ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt); 1931 else 1932 ShuffleMaskStorage.append(DeltaElts, NullElt); 1933 ShuffleMask = ShuffleMaskStorage; 1934 } 1935 1936 return new ShuffleVectorInst(InVal, V2, 1937 ConstantDataVector::get(V2->getContext(), 1938 ShuffleMask)); 1939 } 1940 1941 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) { 1942 return Value % Ty->getPrimitiveSizeInBits() == 0; 1943 } 1944 1945 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) { 1946 return Value / Ty->getPrimitiveSizeInBits(); 1947 } 1948 1949 /// V is a value which is inserted into a vector of VecEltTy. 1950 /// Look through the value to see if we can decompose it into 1951 /// insertions into the vector. See the example in the comment for 1952 /// OptimizeIntegerToVectorInsertions for the pattern this handles. 1953 /// The type of V is always a non-zero multiple of VecEltTy's size. 1954 /// Shift is the number of bits between the lsb of V and the lsb of 1955 /// the vector. 1956 /// 1957 /// This returns false if the pattern can't be matched or true if it can, 1958 /// filling in Elements with the elements found here. 1959 static bool collectInsertionElements(Value *V, unsigned Shift, 1960 SmallVectorImpl<Value *> &Elements, 1961 Type *VecEltTy, bool isBigEndian) { 1962 assert(isMultipleOfTypeSize(Shift, VecEltTy) && 1963 "Shift should be a multiple of the element type size"); 1964 1965 // Undef values never contribute useful bits to the result. 1966 if (isa<UndefValue>(V)) return true; 1967 1968 // If we got down to a value of the right type, we win, try inserting into the 1969 // right element. 1970 if (V->getType() == VecEltTy) { 1971 // Inserting null doesn't actually insert any elements. 1972 if (Constant *C = dyn_cast<Constant>(V)) 1973 if (C->isNullValue()) 1974 return true; 1975 1976 unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy); 1977 if (isBigEndian) 1978 ElementIndex = Elements.size() - ElementIndex - 1; 1979 1980 // Fail if multiple elements are inserted into this slot. 1981 if (Elements[ElementIndex]) 1982 return false; 1983 1984 Elements[ElementIndex] = V; 1985 return true; 1986 } 1987 1988 if (Constant *C = dyn_cast<Constant>(V)) { 1989 // Figure out the # elements this provides, and bitcast it or slice it up 1990 // as required. 1991 unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(), 1992 VecEltTy); 1993 // If the constant is the size of a vector element, we just need to bitcast 1994 // it to the right type so it gets properly inserted. 1995 if (NumElts == 1) 1996 return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy), 1997 Shift, Elements, VecEltTy, isBigEndian); 1998 1999 // Okay, this is a constant that covers multiple elements. Slice it up into 2000 // pieces and insert each element-sized piece into the vector. 2001 if (!isa<IntegerType>(C->getType())) 2002 C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(), 2003 C->getType()->getPrimitiveSizeInBits())); 2004 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits(); 2005 Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize); 2006 2007 for (unsigned i = 0; i != NumElts; ++i) { 2008 unsigned ShiftI = Shift+i*ElementSize; 2009 Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(), 2010 ShiftI)); 2011 Piece = ConstantExpr::getTrunc(Piece, ElementIntTy); 2012 if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy, 2013 isBigEndian)) 2014 return false; 2015 } 2016 return true; 2017 } 2018 2019 if (!V->hasOneUse()) return false; 2020 2021 Instruction *I = dyn_cast<Instruction>(V); 2022 if (!I) return false; 2023 switch (I->getOpcode()) { 2024 default: return false; // Unhandled case. 2025 case Instruction::BitCast: 2026 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2027 isBigEndian); 2028 case Instruction::ZExt: 2029 if (!isMultipleOfTypeSize( 2030 I->getOperand(0)->getType()->getPrimitiveSizeInBits(), 2031 VecEltTy)) 2032 return false; 2033 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2034 isBigEndian); 2035 case Instruction::Or: 2036 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2037 isBigEndian) && 2038 collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy, 2039 isBigEndian); 2040 case Instruction::Shl: { 2041 // Must be shifting by a constant that is a multiple of the element size. 2042 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1)); 2043 if (!CI) return false; 2044 Shift += CI->getZExtValue(); 2045 if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false; 2046 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2047 isBigEndian); 2048 } 2049 2050 } 2051 } 2052 2053 2054 /// If the input is an 'or' instruction, we may be doing shifts and ors to 2055 /// assemble the elements of the vector manually. 2056 /// Try to rip the code out and replace it with insertelements. This is to 2057 /// optimize code like this: 2058 /// 2059 /// %tmp37 = bitcast float %inc to i32 2060 /// %tmp38 = zext i32 %tmp37 to i64 2061 /// %tmp31 = bitcast float %inc5 to i32 2062 /// %tmp32 = zext i32 %tmp31 to i64 2063 /// %tmp33 = shl i64 %tmp32, 32 2064 /// %ins35 = or i64 %tmp33, %tmp38 2065 /// %tmp43 = bitcast i64 %ins35 to <2 x float> 2066 /// 2067 /// Into two insertelements that do "buildvector{%inc, %inc5}". 2068 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI, 2069 InstCombiner &IC) { 2070 VectorType *DestVecTy = cast<VectorType>(CI.getType()); 2071 Value *IntInput = CI.getOperand(0); 2072 2073 SmallVector<Value*, 8> Elements(DestVecTy->getNumElements()); 2074 if (!collectInsertionElements(IntInput, 0, Elements, 2075 DestVecTy->getElementType(), 2076 IC.getDataLayout().isBigEndian())) 2077 return nullptr; 2078 2079 // If we succeeded, we know that all of the element are specified by Elements 2080 // or are zero if Elements has a null entry. Recast this as a set of 2081 // insertions. 2082 Value *Result = Constant::getNullValue(CI.getType()); 2083 for (unsigned i = 0, e = Elements.size(); i != e; ++i) { 2084 if (!Elements[i]) continue; // Unset element. 2085 2086 Result = IC.Builder.CreateInsertElement(Result, Elements[i], 2087 IC.Builder.getInt32(i)); 2088 } 2089 2090 return Result; 2091 } 2092 2093 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the 2094 /// vector followed by extract element. The backend tends to handle bitcasts of 2095 /// vectors better than bitcasts of scalars because vector registers are 2096 /// usually not type-specific like scalar integer or scalar floating-point. 2097 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast, 2098 InstCombiner &IC) { 2099 // TODO: Create and use a pattern matcher for ExtractElementInst. 2100 auto *ExtElt = dyn_cast<ExtractElementInst>(BitCast.getOperand(0)); 2101 if (!ExtElt || !ExtElt->hasOneUse()) 2102 return nullptr; 2103 2104 // The bitcast must be to a vectorizable type, otherwise we can't make a new 2105 // type to extract from. 2106 Type *DestType = BitCast.getType(); 2107 if (!VectorType::isValidElementType(DestType)) 2108 return nullptr; 2109 2110 unsigned NumElts = ExtElt->getVectorOperandType()->getNumElements(); 2111 auto *NewVecType = VectorType::get(DestType, NumElts); 2112 auto *NewBC = IC.Builder.CreateBitCast(ExtElt->getVectorOperand(), 2113 NewVecType, "bc"); 2114 return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand()); 2115 } 2116 2117 /// Change the type of a bitwise logic operation if we can eliminate a bitcast. 2118 static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast, 2119 InstCombiner::BuilderTy &Builder) { 2120 Type *DestTy = BitCast.getType(); 2121 BinaryOperator *BO; 2122 if (!DestTy->isIntOrIntVectorTy() || 2123 !match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) || 2124 !BO->isBitwiseLogicOp()) 2125 return nullptr; 2126 2127 // FIXME: This transform is restricted to vector types to avoid backend 2128 // problems caused by creating potentially illegal operations. If a fix-up is 2129 // added to handle that situation, we can remove this check. 2130 if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy()) 2131 return nullptr; 2132 2133 Value *X; 2134 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) && 2135 X->getType() == DestTy && !isa<Constant>(X)) { 2136 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y)) 2137 Value *CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy); 2138 return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1); 2139 } 2140 2141 if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) && 2142 X->getType() == DestTy && !isa<Constant>(X)) { 2143 // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X) 2144 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 2145 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X); 2146 } 2147 2148 // Canonicalize vector bitcasts to come before vector bitwise logic with a 2149 // constant. This eases recognition of special constants for later ops. 2150 // Example: 2151 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b 2152 Constant *C; 2153 if (match(BO->getOperand(1), m_Constant(C))) { 2154 // bitcast (logic X, C) --> logic (bitcast X, C') 2155 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 2156 Value *CastedC = ConstantExpr::getBitCast(C, DestTy); 2157 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC); 2158 } 2159 2160 return nullptr; 2161 } 2162 2163 /// Change the type of a select if we can eliminate a bitcast. 2164 static Instruction *foldBitCastSelect(BitCastInst &BitCast, 2165 InstCombiner::BuilderTy &Builder) { 2166 Value *Cond, *TVal, *FVal; 2167 if (!match(BitCast.getOperand(0), 2168 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) 2169 return nullptr; 2170 2171 // A vector select must maintain the same number of elements in its operands. 2172 Type *CondTy = Cond->getType(); 2173 Type *DestTy = BitCast.getType(); 2174 if (CondTy->isVectorTy()) { 2175 if (!DestTy->isVectorTy()) 2176 return nullptr; 2177 if (DestTy->getVectorNumElements() != CondTy->getVectorNumElements()) 2178 return nullptr; 2179 } 2180 2181 // FIXME: This transform is restricted from changing the select between 2182 // scalars and vectors to avoid backend problems caused by creating 2183 // potentially illegal operations. If a fix-up is added to handle that 2184 // situation, we can remove this check. 2185 if (DestTy->isVectorTy() != TVal->getType()->isVectorTy()) 2186 return nullptr; 2187 2188 auto *Sel = cast<Instruction>(BitCast.getOperand(0)); 2189 Value *X; 2190 if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 2191 !isa<Constant>(X)) { 2192 // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y)) 2193 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy); 2194 return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel); 2195 } 2196 2197 if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 2198 !isa<Constant>(X)) { 2199 // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X) 2200 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy); 2201 return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel); 2202 } 2203 2204 return nullptr; 2205 } 2206 2207 /// Check if all users of CI are StoreInsts. 2208 static bool hasStoreUsersOnly(CastInst &CI) { 2209 for (User *U : CI.users()) { 2210 if (!isa<StoreInst>(U)) 2211 return false; 2212 } 2213 return true; 2214 } 2215 2216 /// This function handles following case 2217 /// 2218 /// A -> B cast 2219 /// PHI 2220 /// B -> A cast 2221 /// 2222 /// All the related PHI nodes can be replaced by new PHI nodes with type A. 2223 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN. 2224 Instruction *InstCombiner::optimizeBitCastFromPhi(CastInst &CI, PHINode *PN) { 2225 // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp. 2226 if (hasStoreUsersOnly(CI)) 2227 return nullptr; 2228 2229 Value *Src = CI.getOperand(0); 2230 Type *SrcTy = Src->getType(); // Type B 2231 Type *DestTy = CI.getType(); // Type A 2232 2233 SmallVector<PHINode *, 4> PhiWorklist; 2234 SmallSetVector<PHINode *, 4> OldPhiNodes; 2235 2236 // Find all of the A->B casts and PHI nodes. 2237 // We need to inspect all related PHI nodes, but PHIs can be cyclic, so 2238 // OldPhiNodes is used to track all known PHI nodes, before adding a new 2239 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first. 2240 PhiWorklist.push_back(PN); 2241 OldPhiNodes.insert(PN); 2242 while (!PhiWorklist.empty()) { 2243 auto *OldPN = PhiWorklist.pop_back_val(); 2244 for (Value *IncValue : OldPN->incoming_values()) { 2245 if (isa<Constant>(IncValue)) 2246 continue; 2247 2248 if (auto *LI = dyn_cast<LoadInst>(IncValue)) { 2249 // If there is a sequence of one or more load instructions, each loaded 2250 // value is used as address of later load instruction, bitcast is 2251 // necessary to change the value type, don't optimize it. For 2252 // simplicity we give up if the load address comes from another load. 2253 Value *Addr = LI->getOperand(0); 2254 if (Addr == &CI || isa<LoadInst>(Addr)) 2255 return nullptr; 2256 if (LI->hasOneUse() && LI->isSimple()) 2257 continue; 2258 // If a LoadInst has more than one use, changing the type of loaded 2259 // value may create another bitcast. 2260 return nullptr; 2261 } 2262 2263 if (auto *PNode = dyn_cast<PHINode>(IncValue)) { 2264 if (OldPhiNodes.insert(PNode)) 2265 PhiWorklist.push_back(PNode); 2266 continue; 2267 } 2268 2269 auto *BCI = dyn_cast<BitCastInst>(IncValue); 2270 // We can't handle other instructions. 2271 if (!BCI) 2272 return nullptr; 2273 2274 // Verify it's a A->B cast. 2275 Type *TyA = BCI->getOperand(0)->getType(); 2276 Type *TyB = BCI->getType(); 2277 if (TyA != DestTy || TyB != SrcTy) 2278 return nullptr; 2279 } 2280 } 2281 2282 // Check that each user of each old PHI node is something that we can 2283 // rewrite, so that all of the old PHI nodes can be cleaned up afterwards. 2284 for (auto *OldPN : OldPhiNodes) { 2285 for (User *V : OldPN->users()) { 2286 if (auto *SI = dyn_cast<StoreInst>(V)) { 2287 if (!SI->isSimple() || SI->getOperand(0) != OldPN) 2288 return nullptr; 2289 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2290 // Verify it's a B->A cast. 2291 Type *TyB = BCI->getOperand(0)->getType(); 2292 Type *TyA = BCI->getType(); 2293 if (TyA != DestTy || TyB != SrcTy) 2294 return nullptr; 2295 } else if (auto *PHI = dyn_cast<PHINode>(V)) { 2296 // As long as the user is another old PHI node, then even if we don't 2297 // rewrite it, the PHI web we're considering won't have any users 2298 // outside itself, so it'll be dead. 2299 if (OldPhiNodes.count(PHI) == 0) 2300 return nullptr; 2301 } else { 2302 return nullptr; 2303 } 2304 } 2305 } 2306 2307 // For each old PHI node, create a corresponding new PHI node with a type A. 2308 SmallDenseMap<PHINode *, PHINode *> NewPNodes; 2309 for (auto *OldPN : OldPhiNodes) { 2310 Builder.SetInsertPoint(OldPN); 2311 PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands()); 2312 NewPNodes[OldPN] = NewPN; 2313 } 2314 2315 // Fill in the operands of new PHI nodes. 2316 for (auto *OldPN : OldPhiNodes) { 2317 PHINode *NewPN = NewPNodes[OldPN]; 2318 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) { 2319 Value *V = OldPN->getOperand(j); 2320 Value *NewV = nullptr; 2321 if (auto *C = dyn_cast<Constant>(V)) { 2322 NewV = ConstantExpr::getBitCast(C, DestTy); 2323 } else if (auto *LI = dyn_cast<LoadInst>(V)) { 2324 // Explicitly perform load combine to make sure no opposing transform 2325 // can remove the bitcast in the meantime and trigger an infinite loop. 2326 Builder.SetInsertPoint(LI); 2327 NewV = combineLoadToNewType(*LI, DestTy); 2328 // Remove the old load and its use in the old phi, which itself becomes 2329 // dead once the whole transform finishes. 2330 replaceInstUsesWith(*LI, UndefValue::get(LI->getType())); 2331 eraseInstFromFunction(*LI); 2332 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2333 NewV = BCI->getOperand(0); 2334 } else if (auto *PrevPN = dyn_cast<PHINode>(V)) { 2335 NewV = NewPNodes[PrevPN]; 2336 } 2337 assert(NewV); 2338 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j)); 2339 } 2340 } 2341 2342 // Traverse all accumulated PHI nodes and process its users, 2343 // which are Stores and BitcCasts. Without this processing 2344 // NewPHI nodes could be replicated and could lead to extra 2345 // moves generated after DeSSA. 2346 // If there is a store with type B, change it to type A. 2347 2348 2349 // Replace users of BitCast B->A with NewPHI. These will help 2350 // later to get rid off a closure formed by OldPHI nodes. 2351 Instruction *RetVal = nullptr; 2352 for (auto *OldPN : OldPhiNodes) { 2353 PHINode *NewPN = NewPNodes[OldPN]; 2354 for (auto It = OldPN->user_begin(), End = OldPN->user_end(); It != End; ) { 2355 User *V = *It; 2356 // We may remove this user, advance to avoid iterator invalidation. 2357 ++It; 2358 if (auto *SI = dyn_cast<StoreInst>(V)) { 2359 assert(SI->isSimple() && SI->getOperand(0) == OldPN); 2360 Builder.SetInsertPoint(SI); 2361 auto *NewBC = 2362 cast<BitCastInst>(Builder.CreateBitCast(NewPN, SrcTy)); 2363 SI->setOperand(0, NewBC); 2364 Worklist.push(SI); 2365 assert(hasStoreUsersOnly(*NewBC)); 2366 } 2367 else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2368 Type *TyB = BCI->getOperand(0)->getType(); 2369 Type *TyA = BCI->getType(); 2370 assert(TyA == DestTy && TyB == SrcTy); 2371 (void) TyA; 2372 (void) TyB; 2373 Instruction *I = replaceInstUsesWith(*BCI, NewPN); 2374 if (BCI == &CI) 2375 RetVal = I; 2376 } else if (auto *PHI = dyn_cast<PHINode>(V)) { 2377 assert(OldPhiNodes.count(PHI) > 0); 2378 (void) PHI; 2379 } else { 2380 llvm_unreachable("all uses should be handled"); 2381 } 2382 } 2383 } 2384 2385 return RetVal; 2386 } 2387 2388 Instruction *InstCombiner::visitBitCast(BitCastInst &CI) { 2389 // If the operands are integer typed then apply the integer transforms, 2390 // otherwise just apply the common ones. 2391 Value *Src = CI.getOperand(0); 2392 Type *SrcTy = Src->getType(); 2393 Type *DestTy = CI.getType(); 2394 2395 // Get rid of casts from one type to the same type. These are useless and can 2396 // be replaced by the operand. 2397 if (DestTy == Src->getType()) 2398 return replaceInstUsesWith(CI, Src); 2399 2400 if (PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) { 2401 PointerType *SrcPTy = cast<PointerType>(SrcTy); 2402 Type *DstElTy = DstPTy->getElementType(); 2403 Type *SrcElTy = SrcPTy->getElementType(); 2404 2405 // Casting pointers between the same type, but with different address spaces 2406 // is an addrspace cast rather than a bitcast. 2407 if ((DstElTy == SrcElTy) && 2408 (DstPTy->getAddressSpace() != SrcPTy->getAddressSpace())) 2409 return new AddrSpaceCastInst(Src, DestTy); 2410 2411 // If we are casting a alloca to a pointer to a type of the same 2412 // size, rewrite the allocation instruction to allocate the "right" type. 2413 // There is no need to modify malloc calls because it is their bitcast that 2414 // needs to be cleaned up. 2415 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src)) 2416 if (Instruction *V = PromoteCastOfAllocation(CI, *AI)) 2417 return V; 2418 2419 // When the type pointed to is not sized the cast cannot be 2420 // turned into a gep. 2421 Type *PointeeType = 2422 cast<PointerType>(Src->getType()->getScalarType())->getElementType(); 2423 if (!PointeeType->isSized()) 2424 return nullptr; 2425 2426 // If the source and destination are pointers, and this cast is equivalent 2427 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep. 2428 // This can enhance SROA and other transforms that want type-safe pointers. 2429 unsigned NumZeros = 0; 2430 while (SrcElTy != DstElTy && 2431 isa<CompositeType>(SrcElTy) && !SrcElTy->isPointerTy() && 2432 SrcElTy->getNumContainedTypes() /* not "{}" */) { 2433 SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(0U); 2434 ++NumZeros; 2435 } 2436 2437 // If we found a path from the src to dest, create the getelementptr now. 2438 if (SrcElTy == DstElTy) { 2439 SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0)); 2440 GetElementPtrInst *GEP = 2441 GetElementPtrInst::Create(SrcPTy->getElementType(), Src, Idxs); 2442 2443 // If the source pointer is dereferenceable, then assume it points to an 2444 // allocated object and apply "inbounds" to the GEP. 2445 bool CanBeNull; 2446 if (Src->getPointerDereferenceableBytes(DL, CanBeNull)) { 2447 // In a non-default address space (not 0), a null pointer can not be 2448 // assumed inbounds, so ignore that case (dereferenceable_or_null). 2449 // The reason is that 'null' is not treated differently in these address 2450 // spaces, and we consequently ignore the 'gep inbounds' special case 2451 // for 'null' which allows 'inbounds' on 'null' if the indices are 2452 // zeros. 2453 if (SrcPTy->getAddressSpace() == 0 || !CanBeNull) 2454 GEP->setIsInBounds(); 2455 } 2456 return GEP; 2457 } 2458 } 2459 2460 if (VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) { 2461 if (DestVTy->getNumElements() == 1 && !SrcTy->isVectorTy()) { 2462 Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType()); 2463 return InsertElementInst::Create(UndefValue::get(DestTy), Elem, 2464 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2465 // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast) 2466 } 2467 2468 if (isa<IntegerType>(SrcTy)) { 2469 // If this is a cast from an integer to vector, check to see if the input 2470 // is a trunc or zext of a bitcast from vector. If so, we can replace all 2471 // the casts with a shuffle and (potentially) a bitcast. 2472 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) { 2473 CastInst *SrcCast = cast<CastInst>(Src); 2474 if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0))) 2475 if (isa<VectorType>(BCIn->getOperand(0)->getType())) 2476 if (Instruction *I = optimizeVectorResizeWithIntegerBitCasts( 2477 BCIn->getOperand(0), cast<VectorType>(DestTy), *this)) 2478 return I; 2479 } 2480 2481 // If the input is an 'or' instruction, we may be doing shifts and ors to 2482 // assemble the elements of the vector manually. Try to rip the code out 2483 // and replace it with insertelements. 2484 if (Value *V = optimizeIntegerToVectorInsertions(CI, *this)) 2485 return replaceInstUsesWith(CI, V); 2486 } 2487 } 2488 2489 if (VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) { 2490 if (SrcVTy->getNumElements() == 1) { 2491 // If our destination is not a vector, then make this a straight 2492 // scalar-scalar cast. 2493 if (!DestTy->isVectorTy()) { 2494 Value *Elem = 2495 Builder.CreateExtractElement(Src, 2496 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2497 return CastInst::Create(Instruction::BitCast, Elem, DestTy); 2498 } 2499 2500 // Otherwise, see if our source is an insert. If so, then use the scalar 2501 // component directly: 2502 // bitcast (inselt <1 x elt> V, X, 0) to <n x m> --> bitcast X to <n x m> 2503 if (auto *InsElt = dyn_cast<InsertElementInst>(Src)) 2504 return new BitCastInst(InsElt->getOperand(1), DestTy); 2505 } 2506 } 2507 2508 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(Src)) { 2509 // Okay, we have (bitcast (shuffle ..)). Check to see if this is 2510 // a bitcast to a vector with the same # elts. 2511 Value *ShufOp0 = Shuf->getOperand(0); 2512 Value *ShufOp1 = Shuf->getOperand(1); 2513 unsigned NumShufElts = Shuf->getType()->getVectorNumElements(); 2514 unsigned NumSrcVecElts = ShufOp0->getType()->getVectorNumElements(); 2515 if (Shuf->hasOneUse() && DestTy->isVectorTy() && 2516 DestTy->getVectorNumElements() == NumShufElts && 2517 NumShufElts == NumSrcVecElts) { 2518 BitCastInst *Tmp; 2519 // If either of the operands is a cast from CI.getType(), then 2520 // evaluating the shuffle in the casted destination's type will allow 2521 // us to eliminate at least one cast. 2522 if (((Tmp = dyn_cast<BitCastInst>(ShufOp0)) && 2523 Tmp->getOperand(0)->getType() == DestTy) || 2524 ((Tmp = dyn_cast<BitCastInst>(ShufOp1)) && 2525 Tmp->getOperand(0)->getType() == DestTy)) { 2526 Value *LHS = Builder.CreateBitCast(ShufOp0, DestTy); 2527 Value *RHS = Builder.CreateBitCast(ShufOp1, DestTy); 2528 // Return a new shuffle vector. Use the same element ID's, as we 2529 // know the vector types match #elts. 2530 return new ShuffleVectorInst(LHS, RHS, Shuf->getOperand(2)); 2531 } 2532 } 2533 2534 // A bitcasted-to-scalar and byte-reversing shuffle is better recognized as 2535 // a byte-swap: 2536 // bitcast <N x i8> (shuf X, undef, <N, N-1,...0>) --> bswap (bitcast X) 2537 // TODO: We should match the related pattern for bitreverse. 2538 if (DestTy->isIntegerTy() && 2539 DL.isLegalInteger(DestTy->getScalarSizeInBits()) && 2540 SrcTy->getScalarSizeInBits() == 8 && NumShufElts % 2 == 0 && 2541 Shuf->hasOneUse() && Shuf->isReverse()) { 2542 assert(ShufOp0->getType() == SrcTy && "Unexpected shuffle mask"); 2543 assert(isa<UndefValue>(ShufOp1) && "Unexpected shuffle op"); 2544 Function *Bswap = 2545 Intrinsic::getDeclaration(CI.getModule(), Intrinsic::bswap, DestTy); 2546 Value *ScalarX = Builder.CreateBitCast(ShufOp0, DestTy); 2547 return IntrinsicInst::Create(Bswap, { ScalarX }); 2548 } 2549 } 2550 2551 // Handle the A->B->A cast, and there is an intervening PHI node. 2552 if (PHINode *PN = dyn_cast<PHINode>(Src)) 2553 if (Instruction *I = optimizeBitCastFromPhi(CI, PN)) 2554 return I; 2555 2556 if (Instruction *I = canonicalizeBitCastExtElt(CI, *this)) 2557 return I; 2558 2559 if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder)) 2560 return I; 2561 2562 if (Instruction *I = foldBitCastSelect(CI, Builder)) 2563 return I; 2564 2565 if (SrcTy->isPointerTy()) 2566 return commonPointerCastTransforms(CI); 2567 return commonCastTransforms(CI); 2568 } 2569 2570 Instruction *InstCombiner::visitAddrSpaceCast(AddrSpaceCastInst &CI) { 2571 // If the destination pointer element type is not the same as the source's 2572 // first do a bitcast to the destination type, and then the addrspacecast. 2573 // This allows the cast to be exposed to other transforms. 2574 Value *Src = CI.getOperand(0); 2575 PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType()); 2576 PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType()); 2577 2578 Type *DestElemTy = DestTy->getElementType(); 2579 if (SrcTy->getElementType() != DestElemTy) { 2580 Type *MidTy = PointerType::get(DestElemTy, SrcTy->getAddressSpace()); 2581 if (VectorType *VT = dyn_cast<VectorType>(CI.getType())) { 2582 // Handle vectors of pointers. 2583 MidTy = VectorType::get(MidTy, VT->getNumElements()); 2584 } 2585 2586 Value *NewBitCast = Builder.CreateBitCast(Src, MidTy); 2587 return new AddrSpaceCastInst(NewBitCast, CI.getType()); 2588 } 2589 2590 return commonPointerCastTransforms(CI); 2591 } 2592