1 //===- InstCombineCasts.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visit functions for cast operations. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/SetVector.h" 15 #include "llvm/Analysis/ConstantFolding.h" 16 #include "llvm/Analysis/TargetLibraryInfo.h" 17 #include "llvm/IR/DataLayout.h" 18 #include "llvm/IR/DIBuilder.h" 19 #include "llvm/IR/PatternMatch.h" 20 #include "llvm/Support/KnownBits.h" 21 #include <numeric> 22 using namespace llvm; 23 using namespace PatternMatch; 24 25 #define DEBUG_TYPE "instcombine" 26 27 /// Analyze 'Val', seeing if it is a simple linear expression. 28 /// If so, decompose it, returning some value X, such that Val is 29 /// X*Scale+Offset. 30 /// 31 static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale, 32 uint64_t &Offset) { 33 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { 34 Offset = CI->getZExtValue(); 35 Scale = 0; 36 return ConstantInt::get(Val->getType(), 0); 37 } 38 39 if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) { 40 // Cannot look past anything that might overflow. 41 OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val); 42 if (OBI && !OBI->hasNoUnsignedWrap() && !OBI->hasNoSignedWrap()) { 43 Scale = 1; 44 Offset = 0; 45 return Val; 46 } 47 48 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) { 49 if (I->getOpcode() == Instruction::Shl) { 50 // This is a value scaled by '1 << the shift amt'. 51 Scale = UINT64_C(1) << RHS->getZExtValue(); 52 Offset = 0; 53 return I->getOperand(0); 54 } 55 56 if (I->getOpcode() == Instruction::Mul) { 57 // This value is scaled by 'RHS'. 58 Scale = RHS->getZExtValue(); 59 Offset = 0; 60 return I->getOperand(0); 61 } 62 63 if (I->getOpcode() == Instruction::Add) { 64 // We have X+C. Check to see if we really have (X*C2)+C1, 65 // where C1 is divisible by C2. 66 unsigned SubScale; 67 Value *SubVal = 68 decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset); 69 Offset += RHS->getZExtValue(); 70 Scale = SubScale; 71 return SubVal; 72 } 73 } 74 } 75 76 // Otherwise, we can't look past this. 77 Scale = 1; 78 Offset = 0; 79 return Val; 80 } 81 82 /// If we find a cast of an allocation instruction, try to eliminate the cast by 83 /// moving the type information into the alloc. 84 Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI, 85 AllocaInst &AI) { 86 PointerType *PTy = cast<PointerType>(CI.getType()); 87 88 IRBuilderBase::InsertPointGuard Guard(Builder); 89 Builder.SetInsertPoint(&AI); 90 91 // Get the type really allocated and the type casted to. 92 Type *AllocElTy = AI.getAllocatedType(); 93 Type *CastElTy = PTy->getElementType(); 94 if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr; 95 96 unsigned AllocElTyAlign = DL.getABITypeAlignment(AllocElTy); 97 unsigned CastElTyAlign = DL.getABITypeAlignment(CastElTy); 98 if (CastElTyAlign < AllocElTyAlign) return nullptr; 99 100 // If the allocation has multiple uses, only promote it if we are strictly 101 // increasing the alignment of the resultant allocation. If we keep it the 102 // same, we open the door to infinite loops of various kinds. 103 if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr; 104 105 uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy); 106 uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy); 107 if (CastElTySize == 0 || AllocElTySize == 0) return nullptr; 108 109 // If the allocation has multiple uses, only promote it if we're not 110 // shrinking the amount of memory being allocated. 111 uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy); 112 uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy); 113 if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr; 114 115 // See if we can satisfy the modulus by pulling a scale out of the array 116 // size argument. 117 unsigned ArraySizeScale; 118 uint64_t ArrayOffset; 119 Value *NumElements = // See if the array size is a decomposable linear expr. 120 decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset); 121 122 // If we can now satisfy the modulus, by using a non-1 scale, we really can 123 // do the xform. 124 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 || 125 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return nullptr; 126 127 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize; 128 Value *Amt = nullptr; 129 if (Scale == 1) { 130 Amt = NumElements; 131 } else { 132 Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale); 133 // Insert before the alloca, not before the cast. 134 Amt = Builder.CreateMul(Amt, NumElements); 135 } 136 137 if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) { 138 Value *Off = ConstantInt::get(AI.getArraySize()->getType(), 139 Offset, true); 140 Amt = Builder.CreateAdd(Amt, Off); 141 } 142 143 AllocaInst *New = Builder.CreateAlloca(CastElTy, Amt); 144 New->setAlignment(MaybeAlign(AI.getAlignment())); 145 New->takeName(&AI); 146 New->setUsedWithInAlloca(AI.isUsedWithInAlloca()); 147 148 // If the allocation has multiple real uses, insert a cast and change all 149 // things that used it to use the new cast. This will also hack on CI, but it 150 // will die soon. 151 if (!AI.hasOneUse()) { 152 // New is the allocation instruction, pointer typed. AI is the original 153 // allocation instruction, also pointer typed. Thus, cast to use is BitCast. 154 Value *NewCast = Builder.CreateBitCast(New, AI.getType(), "tmpcast"); 155 replaceInstUsesWith(AI, NewCast); 156 } 157 return replaceInstUsesWith(CI, New); 158 } 159 160 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns 161 /// true for, actually insert the code to evaluate the expression. 162 Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty, 163 bool isSigned) { 164 if (Constant *C = dyn_cast<Constant>(V)) { 165 C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/); 166 // If we got a constantexpr back, try to simplify it with DL info. 167 return ConstantFoldConstant(C, DL, &TLI); 168 } 169 170 // Otherwise, it must be an instruction. 171 Instruction *I = cast<Instruction>(V); 172 Instruction *Res = nullptr; 173 unsigned Opc = I->getOpcode(); 174 switch (Opc) { 175 case Instruction::Add: 176 case Instruction::Sub: 177 case Instruction::Mul: 178 case Instruction::And: 179 case Instruction::Or: 180 case Instruction::Xor: 181 case Instruction::AShr: 182 case Instruction::LShr: 183 case Instruction::Shl: 184 case Instruction::UDiv: 185 case Instruction::URem: { 186 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned); 187 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 188 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS); 189 break; 190 } 191 case Instruction::Trunc: 192 case Instruction::ZExt: 193 case Instruction::SExt: 194 // If the source type of the cast is the type we're trying for then we can 195 // just return the source. There's no need to insert it because it is not 196 // new. 197 if (I->getOperand(0)->getType() == Ty) 198 return I->getOperand(0); 199 200 // Otherwise, must be the same type of cast, so just reinsert a new one. 201 // This also handles the case of zext(trunc(x)) -> zext(x). 202 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty, 203 Opc == Instruction::SExt); 204 break; 205 case Instruction::Select: { 206 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 207 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned); 208 Res = SelectInst::Create(I->getOperand(0), True, False); 209 break; 210 } 211 case Instruction::PHI: { 212 PHINode *OPN = cast<PHINode>(I); 213 PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues()); 214 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) { 215 Value *V = 216 EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned); 217 NPN->addIncoming(V, OPN->getIncomingBlock(i)); 218 } 219 Res = NPN; 220 break; 221 } 222 default: 223 // TODO: Can handle more cases here. 224 llvm_unreachable("Unreachable!"); 225 } 226 227 Res->takeName(I); 228 return InsertNewInstWith(Res, *I); 229 } 230 231 Instruction::CastOps InstCombiner::isEliminableCastPair(const CastInst *CI1, 232 const CastInst *CI2) { 233 Type *SrcTy = CI1->getSrcTy(); 234 Type *MidTy = CI1->getDestTy(); 235 Type *DstTy = CI2->getDestTy(); 236 237 Instruction::CastOps firstOp = CI1->getOpcode(); 238 Instruction::CastOps secondOp = CI2->getOpcode(); 239 Type *SrcIntPtrTy = 240 SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr; 241 Type *MidIntPtrTy = 242 MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr; 243 Type *DstIntPtrTy = 244 DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr; 245 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, 246 DstTy, SrcIntPtrTy, MidIntPtrTy, 247 DstIntPtrTy); 248 249 // We don't want to form an inttoptr or ptrtoint that converts to an integer 250 // type that differs from the pointer size. 251 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) || 252 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy)) 253 Res = 0; 254 255 return Instruction::CastOps(Res); 256 } 257 258 /// Implement the transforms common to all CastInst visitors. 259 Instruction *InstCombiner::commonCastTransforms(CastInst &CI) { 260 Value *Src = CI.getOperand(0); 261 262 // Try to eliminate a cast of a cast. 263 if (auto *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast 264 if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) { 265 // The first cast (CSrc) is eliminable so we need to fix up or replace 266 // the second cast (CI). CSrc will then have a good chance of being dead. 267 auto *Ty = CI.getType(); 268 auto *Res = CastInst::Create(NewOpc, CSrc->getOperand(0), Ty); 269 // Point debug users of the dying cast to the new one. 270 if (CSrc->hasOneUse()) 271 replaceAllDbgUsesWith(*CSrc, *Res, CI, DT); 272 return Res; 273 } 274 } 275 276 if (auto *Sel = dyn_cast<SelectInst>(Src)) { 277 // We are casting a select. Try to fold the cast into the select if the 278 // select does not have a compare instruction with matching operand types 279 // or the select is likely better done in a narrow type. 280 // Creating a select with operands that are different sizes than its 281 // condition may inhibit other folds and lead to worse codegen. 282 auto *Cmp = dyn_cast<CmpInst>(Sel->getCondition()); 283 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType() || 284 (CI.getOpcode() == Instruction::Trunc && 285 shouldChangeType(CI.getSrcTy(), CI.getType()))) { 286 if (Instruction *NV = FoldOpIntoSelect(CI, Sel)) { 287 replaceAllDbgUsesWith(*Sel, *NV, CI, DT); 288 return NV; 289 } 290 } 291 } 292 293 // If we are casting a PHI, then fold the cast into the PHI. 294 if (auto *PN = dyn_cast<PHINode>(Src)) { 295 // Don't do this if it would create a PHI node with an illegal type from a 296 // legal type. 297 if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() || 298 shouldChangeType(CI.getSrcTy(), CI.getType())) 299 if (Instruction *NV = foldOpIntoPhi(CI, PN)) 300 return NV; 301 } 302 303 return nullptr; 304 } 305 306 /// Constants and extensions/truncates from the destination type are always 307 /// free to be evaluated in that type. This is a helper for canEvaluate*. 308 static bool canAlwaysEvaluateInType(Value *V, Type *Ty) { 309 if (isa<Constant>(V)) 310 return true; 311 Value *X; 312 if ((match(V, m_ZExtOrSExt(m_Value(X))) || match(V, m_Trunc(m_Value(X)))) && 313 X->getType() == Ty) 314 return true; 315 316 return false; 317 } 318 319 /// Filter out values that we can not evaluate in the destination type for free. 320 /// This is a helper for canEvaluate*. 321 static bool canNotEvaluateInType(Value *V, Type *Ty) { 322 assert(!isa<Constant>(V) && "Constant should already be handled."); 323 if (!isa<Instruction>(V)) 324 return true; 325 // We don't extend or shrink something that has multiple uses -- doing so 326 // would require duplicating the instruction which isn't profitable. 327 if (!V->hasOneUse()) 328 return true; 329 330 return false; 331 } 332 333 /// Return true if we can evaluate the specified expression tree as type Ty 334 /// instead of its larger type, and arrive with the same value. 335 /// This is used by code that tries to eliminate truncates. 336 /// 337 /// Ty will always be a type smaller than V. We should return true if trunc(V) 338 /// can be computed by computing V in the smaller type. If V is an instruction, 339 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only 340 /// makes sense if x and y can be efficiently truncated. 341 /// 342 /// This function works on both vectors and scalars. 343 /// 344 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombiner &IC, 345 Instruction *CxtI) { 346 if (canAlwaysEvaluateInType(V, Ty)) 347 return true; 348 if (canNotEvaluateInType(V, Ty)) 349 return false; 350 351 auto *I = cast<Instruction>(V); 352 Type *OrigTy = V->getType(); 353 switch (I->getOpcode()) { 354 case Instruction::Add: 355 case Instruction::Sub: 356 case Instruction::Mul: 357 case Instruction::And: 358 case Instruction::Or: 359 case Instruction::Xor: 360 // These operators can all arbitrarily be extended or truncated. 361 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 362 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 363 364 case Instruction::UDiv: 365 case Instruction::URem: { 366 // UDiv and URem can be truncated if all the truncated bits are zero. 367 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 368 uint32_t BitWidth = Ty->getScalarSizeInBits(); 369 assert(BitWidth < OrigBitWidth && "Unexpected bitwidths!"); 370 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth); 371 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) && 372 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) { 373 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 374 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 375 } 376 break; 377 } 378 case Instruction::Shl: { 379 // If we are truncating the result of this SHL, and if it's a shift of a 380 // constant amount, we can always perform a SHL in a smaller type. 381 const APInt *Amt; 382 if (match(I->getOperand(1), m_APInt(Amt))) { 383 uint32_t BitWidth = Ty->getScalarSizeInBits(); 384 if (Amt->getLimitedValue(BitWidth) < BitWidth) 385 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI); 386 } 387 break; 388 } 389 case Instruction::LShr: { 390 // If this is a truncate of a logical shr, we can truncate it to a smaller 391 // lshr iff we know that the bits we would otherwise be shifting in are 392 // already zeros. 393 const APInt *Amt; 394 if (match(I->getOperand(1), m_APInt(Amt))) { 395 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 396 uint32_t BitWidth = Ty->getScalarSizeInBits(); 397 if (Amt->getLimitedValue(BitWidth) < BitWidth && 398 IC.MaskedValueIsZero(I->getOperand(0), 399 APInt::getBitsSetFrom(OrigBitWidth, BitWidth), 0, CxtI)) { 400 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI); 401 } 402 } 403 break; 404 } 405 case Instruction::AShr: { 406 // If this is a truncate of an arithmetic shr, we can truncate it to a 407 // smaller ashr iff we know that all the bits from the sign bit of the 408 // original type and the sign bit of the truncate type are similar. 409 // TODO: It is enough to check that the bits we would be shifting in are 410 // similar to sign bit of the truncate type. 411 const APInt *Amt; 412 if (match(I->getOperand(1), m_APInt(Amt))) { 413 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 414 uint32_t BitWidth = Ty->getScalarSizeInBits(); 415 if (Amt->getLimitedValue(BitWidth) < BitWidth && 416 OrigBitWidth - BitWidth < 417 IC.ComputeNumSignBits(I->getOperand(0), 0, CxtI)) 418 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI); 419 } 420 break; 421 } 422 case Instruction::Trunc: 423 // trunc(trunc(x)) -> trunc(x) 424 return true; 425 case Instruction::ZExt: 426 case Instruction::SExt: 427 // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest 428 // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest 429 return true; 430 case Instruction::Select: { 431 SelectInst *SI = cast<SelectInst>(I); 432 return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) && 433 canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI); 434 } 435 case Instruction::PHI: { 436 // We can change a phi if we can change all operands. Note that we never 437 // get into trouble with cyclic PHIs here because we only consider 438 // instructions with a single use. 439 PHINode *PN = cast<PHINode>(I); 440 for (Value *IncValue : PN->incoming_values()) 441 if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI)) 442 return false; 443 return true; 444 } 445 default: 446 // TODO: Can handle more cases here. 447 break; 448 } 449 450 return false; 451 } 452 453 /// Given a vector that is bitcast to an integer, optionally logically 454 /// right-shifted, and truncated, convert it to an extractelement. 455 /// Example (big endian): 456 /// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32 457 /// ---> 458 /// extractelement <4 x i32> %X, 1 459 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, InstCombiner &IC) { 460 Value *TruncOp = Trunc.getOperand(0); 461 Type *DestType = Trunc.getType(); 462 if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType)) 463 return nullptr; 464 465 Value *VecInput = nullptr; 466 ConstantInt *ShiftVal = nullptr; 467 if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)), 468 m_LShr(m_BitCast(m_Value(VecInput)), 469 m_ConstantInt(ShiftVal)))) || 470 !isa<VectorType>(VecInput->getType())) 471 return nullptr; 472 473 VectorType *VecType = cast<VectorType>(VecInput->getType()); 474 unsigned VecWidth = VecType->getPrimitiveSizeInBits(); 475 unsigned DestWidth = DestType->getPrimitiveSizeInBits(); 476 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0; 477 478 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0)) 479 return nullptr; 480 481 // If the element type of the vector doesn't match the result type, 482 // bitcast it to a vector type that we can extract from. 483 unsigned NumVecElts = VecWidth / DestWidth; 484 if (VecType->getElementType() != DestType) { 485 VecType = VectorType::get(DestType, NumVecElts); 486 VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc"); 487 } 488 489 unsigned Elt = ShiftAmount / DestWidth; 490 if (IC.getDataLayout().isBigEndian()) 491 Elt = NumVecElts - 1 - Elt; 492 493 return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt)); 494 } 495 496 /// Rotate left/right may occur in a wider type than necessary because of type 497 /// promotion rules. Try to narrow the inputs and convert to funnel shift. 498 Instruction *InstCombiner::narrowRotate(TruncInst &Trunc) { 499 assert((isa<VectorType>(Trunc.getSrcTy()) || 500 shouldChangeType(Trunc.getSrcTy(), Trunc.getType())) && 501 "Don't narrow to an illegal scalar type"); 502 503 // Bail out on strange types. It is possible to handle some of these patterns 504 // even with non-power-of-2 sizes, but it is not a likely scenario. 505 Type *DestTy = Trunc.getType(); 506 unsigned NarrowWidth = DestTy->getScalarSizeInBits(); 507 if (!isPowerOf2_32(NarrowWidth)) 508 return nullptr; 509 510 // First, find an or'd pair of opposite shifts with the same shifted operand: 511 // trunc (or (lshr ShVal, ShAmt0), (shl ShVal, ShAmt1)) 512 Value *Or0, *Or1; 513 if (!match(Trunc.getOperand(0), m_OneUse(m_Or(m_Value(Or0), m_Value(Or1))))) 514 return nullptr; 515 516 Value *ShVal, *ShAmt0, *ShAmt1; 517 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal), m_Value(ShAmt0)))) || 518 !match(Or1, m_OneUse(m_LogicalShift(m_Specific(ShVal), m_Value(ShAmt1))))) 519 return nullptr; 520 521 auto ShiftOpcode0 = cast<BinaryOperator>(Or0)->getOpcode(); 522 auto ShiftOpcode1 = cast<BinaryOperator>(Or1)->getOpcode(); 523 if (ShiftOpcode0 == ShiftOpcode1) 524 return nullptr; 525 526 // Match the shift amount operands for a rotate pattern. This always matches 527 // a subtraction on the R operand. 528 auto matchShiftAmount = [](Value *L, Value *R, unsigned Width) -> Value * { 529 // The shift amounts may add up to the narrow bit width: 530 // (shl ShVal, L) | (lshr ShVal, Width - L) 531 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) 532 return L; 533 534 // The shift amount may be masked with negation: 535 // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1))) 536 Value *X; 537 unsigned Mask = Width - 1; 538 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) && 539 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))) 540 return X; 541 542 // Same as above, but the shift amount may be extended after masking: 543 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) && 544 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))) 545 return X; 546 547 return nullptr; 548 }; 549 550 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth); 551 bool SubIsOnLHS = false; 552 if (!ShAmt) { 553 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth); 554 SubIsOnLHS = true; 555 } 556 if (!ShAmt) 557 return nullptr; 558 559 // The shifted value must have high zeros in the wide type. Typically, this 560 // will be a zext, but it could also be the result of an 'and' or 'shift'. 561 unsigned WideWidth = Trunc.getSrcTy()->getScalarSizeInBits(); 562 APInt HiBitMask = APInt::getHighBitsSet(WideWidth, WideWidth - NarrowWidth); 563 if (!MaskedValueIsZero(ShVal, HiBitMask, 0, &Trunc)) 564 return nullptr; 565 566 // We have an unnecessarily wide rotate! 567 // trunc (or (lshr ShVal, ShAmt), (shl ShVal, BitWidth - ShAmt)) 568 // Narrow the inputs and convert to funnel shift intrinsic: 569 // llvm.fshl.i8(trunc(ShVal), trunc(ShVal), trunc(ShAmt)) 570 Value *NarrowShAmt = Builder.CreateTrunc(ShAmt, DestTy); 571 Value *X = Builder.CreateTrunc(ShVal, DestTy); 572 bool IsFshl = (!SubIsOnLHS && ShiftOpcode0 == BinaryOperator::Shl) || 573 (SubIsOnLHS && ShiftOpcode1 == BinaryOperator::Shl); 574 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr; 575 Function *F = Intrinsic::getDeclaration(Trunc.getModule(), IID, DestTy); 576 return IntrinsicInst::Create(F, { X, X, NarrowShAmt }); 577 } 578 579 /// Try to narrow the width of math or bitwise logic instructions by pulling a 580 /// truncate ahead of binary operators. 581 /// TODO: Transforms for truncated shifts should be moved into here. 582 Instruction *InstCombiner::narrowBinOp(TruncInst &Trunc) { 583 Type *SrcTy = Trunc.getSrcTy(); 584 Type *DestTy = Trunc.getType(); 585 if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy)) 586 return nullptr; 587 588 BinaryOperator *BinOp; 589 if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(BinOp)))) 590 return nullptr; 591 592 Value *BinOp0 = BinOp->getOperand(0); 593 Value *BinOp1 = BinOp->getOperand(1); 594 switch (BinOp->getOpcode()) { 595 case Instruction::And: 596 case Instruction::Or: 597 case Instruction::Xor: 598 case Instruction::Add: 599 case Instruction::Sub: 600 case Instruction::Mul: { 601 Constant *C; 602 if (match(BinOp0, m_Constant(C))) { 603 // trunc (binop C, X) --> binop (trunc C', X) 604 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 605 Value *TruncX = Builder.CreateTrunc(BinOp1, DestTy); 606 return BinaryOperator::Create(BinOp->getOpcode(), NarrowC, TruncX); 607 } 608 if (match(BinOp1, m_Constant(C))) { 609 // trunc (binop X, C) --> binop (trunc X, C') 610 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 611 Value *TruncX = Builder.CreateTrunc(BinOp0, DestTy); 612 return BinaryOperator::Create(BinOp->getOpcode(), TruncX, NarrowC); 613 } 614 Value *X; 615 if (match(BinOp0, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) { 616 // trunc (binop (ext X), Y) --> binop X, (trunc Y) 617 Value *NarrowOp1 = Builder.CreateTrunc(BinOp1, DestTy); 618 return BinaryOperator::Create(BinOp->getOpcode(), X, NarrowOp1); 619 } 620 if (match(BinOp1, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) { 621 // trunc (binop Y, (ext X)) --> binop (trunc Y), X 622 Value *NarrowOp0 = Builder.CreateTrunc(BinOp0, DestTy); 623 return BinaryOperator::Create(BinOp->getOpcode(), NarrowOp0, X); 624 } 625 break; 626 } 627 628 default: break; 629 } 630 631 if (Instruction *NarrowOr = narrowRotate(Trunc)) 632 return NarrowOr; 633 634 return nullptr; 635 } 636 637 /// Try to narrow the width of a splat shuffle. This could be generalized to any 638 /// shuffle with a constant operand, but we limit the transform to avoid 639 /// creating a shuffle type that targets may not be able to lower effectively. 640 static Instruction *shrinkSplatShuffle(TruncInst &Trunc, 641 InstCombiner::BuilderTy &Builder) { 642 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0)); 643 if (Shuf && Shuf->hasOneUse() && isa<UndefValue>(Shuf->getOperand(1)) && 644 Shuf->getMask()->getSplatValue() && 645 Shuf->getType() == Shuf->getOperand(0)->getType()) { 646 // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Undef, SplatMask 647 Constant *NarrowUndef = UndefValue::get(Trunc.getType()); 648 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType()); 649 return new ShuffleVectorInst(NarrowOp, NarrowUndef, Shuf->getMask()); 650 } 651 652 return nullptr; 653 } 654 655 /// Try to narrow the width of an insert element. This could be generalized for 656 /// any vector constant, but we limit the transform to insertion into undef to 657 /// avoid potential backend problems from unsupported insertion widths. This 658 /// could also be extended to handle the case of inserting a scalar constant 659 /// into a vector variable. 660 static Instruction *shrinkInsertElt(CastInst &Trunc, 661 InstCombiner::BuilderTy &Builder) { 662 Instruction::CastOps Opcode = Trunc.getOpcode(); 663 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) && 664 "Unexpected instruction for shrinking"); 665 666 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0)); 667 if (!InsElt || !InsElt->hasOneUse()) 668 return nullptr; 669 670 Type *DestTy = Trunc.getType(); 671 Type *DestScalarTy = DestTy->getScalarType(); 672 Value *VecOp = InsElt->getOperand(0); 673 Value *ScalarOp = InsElt->getOperand(1); 674 Value *Index = InsElt->getOperand(2); 675 676 if (isa<UndefValue>(VecOp)) { 677 // trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index 678 // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index 679 UndefValue *NarrowUndef = UndefValue::get(DestTy); 680 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy); 681 return InsertElementInst::Create(NarrowUndef, NarrowOp, Index); 682 } 683 684 return nullptr; 685 } 686 687 Instruction *InstCombiner::visitTrunc(TruncInst &CI) { 688 if (Instruction *Result = commonCastTransforms(CI)) 689 return Result; 690 691 Value *Src = CI.getOperand(0); 692 Type *DestTy = CI.getType(), *SrcTy = Src->getType(); 693 694 // Attempt to truncate the entire input expression tree to the destination 695 // type. Only do this if the dest type is a simple type, don't convert the 696 // expression tree to something weird like i93 unless the source is also 697 // strange. 698 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) && 699 canEvaluateTruncated(Src, DestTy, *this, &CI)) { 700 701 // If this cast is a truncate, evaluting in a different type always 702 // eliminates the cast, so it is always a win. 703 LLVM_DEBUG( 704 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 705 " to avoid cast: " 706 << CI << '\n'); 707 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 708 assert(Res->getType() == DestTy); 709 return replaceInstUsesWith(CI, Res); 710 } 711 712 // Test if the trunc is the user of a select which is part of a 713 // minimum or maximum operation. If so, don't do any more simplification. 714 // Even simplifying demanded bits can break the canonical form of a 715 // min/max. 716 Value *LHS, *RHS; 717 if (SelectInst *SI = dyn_cast<SelectInst>(CI.getOperand(0))) 718 if (matchSelectPattern(SI, LHS, RHS).Flavor != SPF_UNKNOWN) 719 return nullptr; 720 721 // See if we can simplify any instructions used by the input whose sole 722 // purpose is to compute bits we don't care about. 723 if (SimplifyDemandedInstructionBits(CI)) 724 return &CI; 725 726 if (DestTy->getScalarSizeInBits() == 1) { 727 Value *Zero = Constant::getNullValue(Src->getType()); 728 if (DestTy->isIntegerTy()) { 729 // Canonicalize trunc x to i1 -> icmp ne (and x, 1), 0 (scalar only). 730 // TODO: We canonicalize to more instructions here because we are probably 731 // lacking equivalent analysis for trunc relative to icmp. There may also 732 // be codegen concerns. If those trunc limitations were removed, we could 733 // remove this transform. 734 Value *And = Builder.CreateAnd(Src, ConstantInt::get(SrcTy, 1)); 735 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 736 } 737 738 // For vectors, we do not canonicalize all truncs to icmp, so optimize 739 // patterns that would be covered within visitICmpInst. 740 Value *X; 741 const APInt *C; 742 if (match(Src, m_OneUse(m_LShr(m_Value(X), m_APInt(C))))) { 743 // trunc (lshr X, C) to i1 --> icmp ne (and X, C'), 0 744 APInt MaskC = APInt(SrcTy->getScalarSizeInBits(), 1).shl(*C); 745 Value *And = Builder.CreateAnd(X, ConstantInt::get(SrcTy, MaskC)); 746 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 747 } 748 if (match(Src, m_OneUse(m_c_Or(m_LShr(m_Value(X), m_APInt(C)), 749 m_Deferred(X))))) { 750 // trunc (or (lshr X, C), X) to i1 --> icmp ne (and X, C'), 0 751 APInt MaskC = APInt(SrcTy->getScalarSizeInBits(), 1).shl(*C) | 1; 752 Value *And = Builder.CreateAnd(X, ConstantInt::get(SrcTy, MaskC)); 753 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 754 } 755 } 756 757 // FIXME: Maybe combine the next two transforms to handle the no cast case 758 // more efficiently. Support vector types. Cleanup code by using m_OneUse. 759 760 // Transform trunc(lshr (zext A), Cst) to eliminate one type conversion. 761 Value *A = nullptr; ConstantInt *Cst = nullptr; 762 if (Src->hasOneUse() && 763 match(Src, m_LShr(m_ZExt(m_Value(A)), m_ConstantInt(Cst)))) { 764 // We have three types to worry about here, the type of A, the source of 765 // the truncate (MidSize), and the destination of the truncate. We know that 766 // ASize < MidSize and MidSize > ResultSize, but don't know the relation 767 // between ASize and ResultSize. 768 unsigned ASize = A->getType()->getPrimitiveSizeInBits(); 769 770 // If the shift amount is larger than the size of A, then the result is 771 // known to be zero because all the input bits got shifted out. 772 if (Cst->getZExtValue() >= ASize) 773 return replaceInstUsesWith(CI, Constant::getNullValue(DestTy)); 774 775 // Since we're doing an lshr and a zero extend, and know that the shift 776 // amount is smaller than ASize, it is always safe to do the shift in A's 777 // type, then zero extend or truncate to the result. 778 Value *Shift = Builder.CreateLShr(A, Cst->getZExtValue()); 779 Shift->takeName(Src); 780 return CastInst::CreateIntegerCast(Shift, DestTy, false); 781 } 782 783 // FIXME: We should canonicalize to zext/trunc and remove this transform. 784 // Transform trunc(lshr (sext A), Cst) to ashr A, Cst to eliminate type 785 // conversion. 786 // It works because bits coming from sign extension have the same value as 787 // the sign bit of the original value; performing ashr instead of lshr 788 // generates bits of the same value as the sign bit. 789 if (Src->hasOneUse() && 790 match(Src, m_LShr(m_SExt(m_Value(A)), m_ConstantInt(Cst)))) { 791 Value *SExt = cast<Instruction>(Src)->getOperand(0); 792 const unsigned SExtSize = SExt->getType()->getPrimitiveSizeInBits(); 793 const unsigned ASize = A->getType()->getPrimitiveSizeInBits(); 794 const unsigned CISize = CI.getType()->getPrimitiveSizeInBits(); 795 const unsigned MaxAmt = SExtSize - std::max(CISize, ASize); 796 unsigned ShiftAmt = Cst->getZExtValue(); 797 798 // This optimization can be only performed when zero bits generated by 799 // the original lshr aren't pulled into the value after truncation, so we 800 // can only shift by values no larger than the number of extension bits. 801 // FIXME: Instead of bailing when the shift is too large, use and to clear 802 // the extra bits. 803 if (ShiftAmt <= MaxAmt) { 804 if (CISize == ASize) 805 return BinaryOperator::CreateAShr(A, ConstantInt::get(CI.getType(), 806 std::min(ShiftAmt, ASize - 1))); 807 if (SExt->hasOneUse()) { 808 Value *Shift = Builder.CreateAShr(A, std::min(ShiftAmt, ASize - 1)); 809 Shift->takeName(Src); 810 return CastInst::CreateIntegerCast(Shift, CI.getType(), true); 811 } 812 } 813 } 814 815 if (Instruction *I = narrowBinOp(CI)) 816 return I; 817 818 if (Instruction *I = shrinkSplatShuffle(CI, Builder)) 819 return I; 820 821 if (Instruction *I = shrinkInsertElt(CI, Builder)) 822 return I; 823 824 if (Src->hasOneUse() && isa<IntegerType>(SrcTy) && 825 shouldChangeType(SrcTy, DestTy)) { 826 // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the 827 // dest type is native and cst < dest size. 828 if (match(Src, m_Shl(m_Value(A), m_ConstantInt(Cst))) && 829 !match(A, m_Shr(m_Value(), m_Constant()))) { 830 // Skip shifts of shift by constants. It undoes a combine in 831 // FoldShiftByConstant and is the extend in reg pattern. 832 const unsigned DestSize = DestTy->getScalarSizeInBits(); 833 if (Cst->getValue().ult(DestSize)) { 834 Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr"); 835 836 return BinaryOperator::Create( 837 Instruction::Shl, NewTrunc, 838 ConstantInt::get(DestTy, Cst->getValue().trunc(DestSize))); 839 } 840 } 841 } 842 843 if (Instruction *I = foldVecTruncToExtElt(CI, *this)) 844 return I; 845 846 return nullptr; 847 } 848 849 Instruction *InstCombiner::transformZExtICmp(ICmpInst *Cmp, ZExtInst &Zext, 850 bool DoTransform) { 851 // If we are just checking for a icmp eq of a single bit and zext'ing it 852 // to an integer, then shift the bit to the appropriate place and then 853 // cast to integer to avoid the comparison. 854 const APInt *Op1CV; 855 if (match(Cmp->getOperand(1), m_APInt(Op1CV))) { 856 857 // zext (x <s 0) to i32 --> x>>u31 true if signbit set. 858 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear. 859 if ((Cmp->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isNullValue()) || 860 (Cmp->getPredicate() == ICmpInst::ICMP_SGT && Op1CV->isAllOnesValue())) { 861 if (!DoTransform) return Cmp; 862 863 Value *In = Cmp->getOperand(0); 864 Value *Sh = ConstantInt::get(In->getType(), 865 In->getType()->getScalarSizeInBits() - 1); 866 In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit"); 867 if (In->getType() != Zext.getType()) 868 In = Builder.CreateIntCast(In, Zext.getType(), false /*ZExt*/); 869 870 if (Cmp->getPredicate() == ICmpInst::ICMP_SGT) { 871 Constant *One = ConstantInt::get(In->getType(), 1); 872 In = Builder.CreateXor(In, One, In->getName() + ".not"); 873 } 874 875 return replaceInstUsesWith(Zext, In); 876 } 877 878 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set. 879 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 880 // zext (X == 1) to i32 --> X iff X has only the low bit set. 881 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set. 882 // zext (X != 0) to i32 --> X iff X has only the low bit set. 883 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set. 884 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set. 885 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 886 if ((Op1CV->isNullValue() || Op1CV->isPowerOf2()) && 887 // This only works for EQ and NE 888 Cmp->isEquality()) { 889 // If Op1C some other power of two, convert: 890 KnownBits Known = computeKnownBits(Cmp->getOperand(0), 0, &Zext); 891 892 APInt KnownZeroMask(~Known.Zero); 893 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1? 894 if (!DoTransform) return Cmp; 895 896 bool isNE = Cmp->getPredicate() == ICmpInst::ICMP_NE; 897 if (!Op1CV->isNullValue() && (*Op1CV != KnownZeroMask)) { 898 // (X&4) == 2 --> false 899 // (X&4) != 2 --> true 900 Constant *Res = ConstantInt::get(Zext.getType(), isNE); 901 return replaceInstUsesWith(Zext, Res); 902 } 903 904 uint32_t ShAmt = KnownZeroMask.logBase2(); 905 Value *In = Cmp->getOperand(0); 906 if (ShAmt) { 907 // Perform a logical shr by shiftamt. 908 // Insert the shift to put the result in the low bit. 909 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt), 910 In->getName() + ".lobit"); 911 } 912 913 if (!Op1CV->isNullValue() == isNE) { // Toggle the low bit. 914 Constant *One = ConstantInt::get(In->getType(), 1); 915 In = Builder.CreateXor(In, One); 916 } 917 918 if (Zext.getType() == In->getType()) 919 return replaceInstUsesWith(Zext, In); 920 921 Value *IntCast = Builder.CreateIntCast(In, Zext.getType(), false); 922 return replaceInstUsesWith(Zext, IntCast); 923 } 924 } 925 } 926 927 // icmp ne A, B is equal to xor A, B when A and B only really have one bit. 928 // It is also profitable to transform icmp eq into not(xor(A, B)) because that 929 // may lead to additional simplifications. 930 if (Cmp->isEquality() && Zext.getType() == Cmp->getOperand(0)->getType()) { 931 if (IntegerType *ITy = dyn_cast<IntegerType>(Zext.getType())) { 932 Value *LHS = Cmp->getOperand(0); 933 Value *RHS = Cmp->getOperand(1); 934 935 KnownBits KnownLHS = computeKnownBits(LHS, 0, &Zext); 936 KnownBits KnownRHS = computeKnownBits(RHS, 0, &Zext); 937 938 if (KnownLHS.Zero == KnownRHS.Zero && KnownLHS.One == KnownRHS.One) { 939 APInt KnownBits = KnownLHS.Zero | KnownLHS.One; 940 APInt UnknownBit = ~KnownBits; 941 if (UnknownBit.countPopulation() == 1) { 942 if (!DoTransform) return Cmp; 943 944 Value *Result = Builder.CreateXor(LHS, RHS); 945 946 // Mask off any bits that are set and won't be shifted away. 947 if (KnownLHS.One.uge(UnknownBit)) 948 Result = Builder.CreateAnd(Result, 949 ConstantInt::get(ITy, UnknownBit)); 950 951 // Shift the bit we're testing down to the lsb. 952 Result = Builder.CreateLShr( 953 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros())); 954 955 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ) 956 Result = Builder.CreateXor(Result, ConstantInt::get(ITy, 1)); 957 Result->takeName(Cmp); 958 return replaceInstUsesWith(Zext, Result); 959 } 960 } 961 } 962 } 963 964 return nullptr; 965 } 966 967 /// Determine if the specified value can be computed in the specified wider type 968 /// and produce the same low bits. If not, return false. 969 /// 970 /// If this function returns true, it can also return a non-zero number of bits 971 /// (in BitsToClear) which indicates that the value it computes is correct for 972 /// the zero extend, but that the additional BitsToClear bits need to be zero'd 973 /// out. For example, to promote something like: 974 /// 975 /// %B = trunc i64 %A to i32 976 /// %C = lshr i32 %B, 8 977 /// %E = zext i32 %C to i64 978 /// 979 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be 980 /// set to 8 to indicate that the promoted value needs to have bits 24-31 981 /// cleared in addition to bits 32-63. Since an 'and' will be generated to 982 /// clear the top bits anyway, doing this has no extra cost. 983 /// 984 /// This function works on both vectors and scalars. 985 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear, 986 InstCombiner &IC, Instruction *CxtI) { 987 BitsToClear = 0; 988 if (canAlwaysEvaluateInType(V, Ty)) 989 return true; 990 if (canNotEvaluateInType(V, Ty)) 991 return false; 992 993 auto *I = cast<Instruction>(V); 994 unsigned Tmp; 995 switch (I->getOpcode()) { 996 case Instruction::ZExt: // zext(zext(x)) -> zext(x). 997 case Instruction::SExt: // zext(sext(x)) -> sext(x). 998 case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x) 999 return true; 1000 case Instruction::And: 1001 case Instruction::Or: 1002 case Instruction::Xor: 1003 case Instruction::Add: 1004 case Instruction::Sub: 1005 case Instruction::Mul: 1006 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) || 1007 !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI)) 1008 return false; 1009 // These can all be promoted if neither operand has 'bits to clear'. 1010 if (BitsToClear == 0 && Tmp == 0) 1011 return true; 1012 1013 // If the operation is an AND/OR/XOR and the bits to clear are zero in the 1014 // other side, BitsToClear is ok. 1015 if (Tmp == 0 && I->isBitwiseLogicOp()) { 1016 // We use MaskedValueIsZero here for generality, but the case we care 1017 // about the most is constant RHS. 1018 unsigned VSize = V->getType()->getScalarSizeInBits(); 1019 if (IC.MaskedValueIsZero(I->getOperand(1), 1020 APInt::getHighBitsSet(VSize, BitsToClear), 1021 0, CxtI)) { 1022 // If this is an And instruction and all of the BitsToClear are 1023 // known to be zero we can reset BitsToClear. 1024 if (I->getOpcode() == Instruction::And) 1025 BitsToClear = 0; 1026 return true; 1027 } 1028 } 1029 1030 // Otherwise, we don't know how to analyze this BitsToClear case yet. 1031 return false; 1032 1033 case Instruction::Shl: { 1034 // We can promote shl(x, cst) if we can promote x. Since shl overwrites the 1035 // upper bits we can reduce BitsToClear by the shift amount. 1036 const APInt *Amt; 1037 if (match(I->getOperand(1), m_APInt(Amt))) { 1038 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 1039 return false; 1040 uint64_t ShiftAmt = Amt->getZExtValue(); 1041 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0; 1042 return true; 1043 } 1044 return false; 1045 } 1046 case Instruction::LShr: { 1047 // We can promote lshr(x, cst) if we can promote x. This requires the 1048 // ultimate 'and' to clear out the high zero bits we're clearing out though. 1049 const APInt *Amt; 1050 if (match(I->getOperand(1), m_APInt(Amt))) { 1051 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 1052 return false; 1053 BitsToClear += Amt->getZExtValue(); 1054 if (BitsToClear > V->getType()->getScalarSizeInBits()) 1055 BitsToClear = V->getType()->getScalarSizeInBits(); 1056 return true; 1057 } 1058 // Cannot promote variable LSHR. 1059 return false; 1060 } 1061 case Instruction::Select: 1062 if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) || 1063 !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) || 1064 // TODO: If important, we could handle the case when the BitsToClear are 1065 // known zero in the disagreeing side. 1066 Tmp != BitsToClear) 1067 return false; 1068 return true; 1069 1070 case Instruction::PHI: { 1071 // We can change a phi if we can change all operands. Note that we never 1072 // get into trouble with cyclic PHIs here because we only consider 1073 // instructions with a single use. 1074 PHINode *PN = cast<PHINode>(I); 1075 if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI)) 1076 return false; 1077 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) 1078 if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) || 1079 // TODO: If important, we could handle the case when the BitsToClear 1080 // are known zero in the disagreeing input. 1081 Tmp != BitsToClear) 1082 return false; 1083 return true; 1084 } 1085 default: 1086 // TODO: Can handle more cases here. 1087 return false; 1088 } 1089 } 1090 1091 Instruction *InstCombiner::visitZExt(ZExtInst &CI) { 1092 // If this zero extend is only used by a truncate, let the truncate be 1093 // eliminated before we try to optimize this zext. 1094 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back())) 1095 return nullptr; 1096 1097 // If one of the common conversion will work, do it. 1098 if (Instruction *Result = commonCastTransforms(CI)) 1099 return Result; 1100 1101 Value *Src = CI.getOperand(0); 1102 Type *SrcTy = Src->getType(), *DestTy = CI.getType(); 1103 1104 // Try to extend the entire expression tree to the wide destination type. 1105 unsigned BitsToClear; 1106 if (shouldChangeType(SrcTy, DestTy) && 1107 canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) { 1108 assert(BitsToClear <= SrcTy->getScalarSizeInBits() && 1109 "Can't clear more bits than in SrcTy"); 1110 1111 // Okay, we can transform this! Insert the new expression now. 1112 LLVM_DEBUG( 1113 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1114 " to avoid zero extend: " 1115 << CI << '\n'); 1116 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 1117 assert(Res->getType() == DestTy); 1118 1119 // Preserve debug values referring to Src if the zext is its last use. 1120 if (auto *SrcOp = dyn_cast<Instruction>(Src)) 1121 if (SrcOp->hasOneUse()) 1122 replaceAllDbgUsesWith(*SrcOp, *Res, CI, DT); 1123 1124 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear; 1125 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 1126 1127 // If the high bits are already filled with zeros, just replace this 1128 // cast with the result. 1129 if (MaskedValueIsZero(Res, 1130 APInt::getHighBitsSet(DestBitSize, 1131 DestBitSize-SrcBitsKept), 1132 0, &CI)) 1133 return replaceInstUsesWith(CI, Res); 1134 1135 // We need to emit an AND to clear the high bits. 1136 Constant *C = ConstantInt::get(Res->getType(), 1137 APInt::getLowBitsSet(DestBitSize, SrcBitsKept)); 1138 return BinaryOperator::CreateAnd(Res, C); 1139 } 1140 1141 // If this is a TRUNC followed by a ZEXT then we are dealing with integral 1142 // types and if the sizes are just right we can convert this into a logical 1143 // 'and' which will be much cheaper than the pair of casts. 1144 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast 1145 // TODO: Subsume this into EvaluateInDifferentType. 1146 1147 // Get the sizes of the types involved. We know that the intermediate type 1148 // will be smaller than A or C, but don't know the relation between A and C. 1149 Value *A = CSrc->getOperand(0); 1150 unsigned SrcSize = A->getType()->getScalarSizeInBits(); 1151 unsigned MidSize = CSrc->getType()->getScalarSizeInBits(); 1152 unsigned DstSize = CI.getType()->getScalarSizeInBits(); 1153 // If we're actually extending zero bits, then if 1154 // SrcSize < DstSize: zext(a & mask) 1155 // SrcSize == DstSize: a & mask 1156 // SrcSize > DstSize: trunc(a) & mask 1157 if (SrcSize < DstSize) { 1158 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 1159 Constant *AndConst = ConstantInt::get(A->getType(), AndValue); 1160 Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask"); 1161 return new ZExtInst(And, CI.getType()); 1162 } 1163 1164 if (SrcSize == DstSize) { 1165 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 1166 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(), 1167 AndValue)); 1168 } 1169 if (SrcSize > DstSize) { 1170 Value *Trunc = Builder.CreateTrunc(A, CI.getType()); 1171 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize)); 1172 return BinaryOperator::CreateAnd(Trunc, 1173 ConstantInt::get(Trunc->getType(), 1174 AndValue)); 1175 } 1176 } 1177 1178 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(Src)) 1179 return transformZExtICmp(Cmp, CI); 1180 1181 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src); 1182 if (SrcI && SrcI->getOpcode() == Instruction::Or) { 1183 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) if at least one 1184 // of the (zext icmp) can be eliminated. If so, immediately perform the 1185 // according elimination. 1186 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0)); 1187 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1)); 1188 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() && 1189 (transformZExtICmp(LHS, CI, false) || 1190 transformZExtICmp(RHS, CI, false))) { 1191 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) 1192 Value *LCast = Builder.CreateZExt(LHS, CI.getType(), LHS->getName()); 1193 Value *RCast = Builder.CreateZExt(RHS, CI.getType(), RHS->getName()); 1194 Value *Or = Builder.CreateOr(LCast, RCast, CI.getName()); 1195 if (auto *OrInst = dyn_cast<Instruction>(Or)) 1196 Builder.SetInsertPoint(OrInst); 1197 1198 // Perform the elimination. 1199 if (auto *LZExt = dyn_cast<ZExtInst>(LCast)) 1200 transformZExtICmp(LHS, *LZExt); 1201 if (auto *RZExt = dyn_cast<ZExtInst>(RCast)) 1202 transformZExtICmp(RHS, *RZExt); 1203 1204 return replaceInstUsesWith(CI, Or); 1205 } 1206 } 1207 1208 // zext(trunc(X) & C) -> (X & zext(C)). 1209 Constant *C; 1210 Value *X; 1211 if (SrcI && 1212 match(SrcI, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) && 1213 X->getType() == CI.getType()) 1214 return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType())); 1215 1216 // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)). 1217 Value *And; 1218 if (SrcI && match(SrcI, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) && 1219 match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) && 1220 X->getType() == CI.getType()) { 1221 Constant *ZC = ConstantExpr::getZExt(C, CI.getType()); 1222 return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC); 1223 } 1224 1225 return nullptr; 1226 } 1227 1228 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp. 1229 Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) { 1230 Value *Op0 = ICI->getOperand(0), *Op1 = ICI->getOperand(1); 1231 ICmpInst::Predicate Pred = ICI->getPredicate(); 1232 1233 // Don't bother if Op1 isn't of vector or integer type. 1234 if (!Op1->getType()->isIntOrIntVectorTy()) 1235 return nullptr; 1236 1237 if ((Pred == ICmpInst::ICMP_SLT && match(Op1, m_ZeroInt())) || 1238 (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))) { 1239 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if negative 1240 // (x >s -1) ? -1 : 0 -> not (ashr x, 31) -> all ones if positive 1241 Value *Sh = ConstantInt::get(Op0->getType(), 1242 Op0->getType()->getScalarSizeInBits() - 1); 1243 Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit"); 1244 if (In->getType() != CI.getType()) 1245 In = Builder.CreateIntCast(In, CI.getType(), true /*SExt*/); 1246 1247 if (Pred == ICmpInst::ICMP_SGT) 1248 In = Builder.CreateNot(In, In->getName() + ".not"); 1249 return replaceInstUsesWith(CI, In); 1250 } 1251 1252 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { 1253 // If we know that only one bit of the LHS of the icmp can be set and we 1254 // have an equality comparison with zero or a power of 2, we can transform 1255 // the icmp and sext into bitwise/integer operations. 1256 if (ICI->hasOneUse() && 1257 ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){ 1258 KnownBits Known = computeKnownBits(Op0, 0, &CI); 1259 1260 APInt KnownZeroMask(~Known.Zero); 1261 if (KnownZeroMask.isPowerOf2()) { 1262 Value *In = ICI->getOperand(0); 1263 1264 // If the icmp tests for a known zero bit we can constant fold it. 1265 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) { 1266 Value *V = Pred == ICmpInst::ICMP_NE ? 1267 ConstantInt::getAllOnesValue(CI.getType()) : 1268 ConstantInt::getNullValue(CI.getType()); 1269 return replaceInstUsesWith(CI, V); 1270 } 1271 1272 if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) { 1273 // sext ((x & 2^n) == 0) -> (x >> n) - 1 1274 // sext ((x & 2^n) != 2^n) -> (x >> n) - 1 1275 unsigned ShiftAmt = KnownZeroMask.countTrailingZeros(); 1276 // Perform a right shift to place the desired bit in the LSB. 1277 if (ShiftAmt) 1278 In = Builder.CreateLShr(In, 1279 ConstantInt::get(In->getType(), ShiftAmt)); 1280 1281 // At this point "In" is either 1 or 0. Subtract 1 to turn 1282 // {1, 0} -> {0, -1}. 1283 In = Builder.CreateAdd(In, 1284 ConstantInt::getAllOnesValue(In->getType()), 1285 "sext"); 1286 } else { 1287 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1 1288 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1 1289 unsigned ShiftAmt = KnownZeroMask.countLeadingZeros(); 1290 // Perform a left shift to place the desired bit in the MSB. 1291 if (ShiftAmt) 1292 In = Builder.CreateShl(In, 1293 ConstantInt::get(In->getType(), ShiftAmt)); 1294 1295 // Distribute the bit over the whole bit width. 1296 In = Builder.CreateAShr(In, ConstantInt::get(In->getType(), 1297 KnownZeroMask.getBitWidth() - 1), "sext"); 1298 } 1299 1300 if (CI.getType() == In->getType()) 1301 return replaceInstUsesWith(CI, In); 1302 return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/); 1303 } 1304 } 1305 } 1306 1307 return nullptr; 1308 } 1309 1310 /// Return true if we can take the specified value and return it as type Ty 1311 /// without inserting any new casts and without changing the value of the common 1312 /// low bits. This is used by code that tries to promote integer operations to 1313 /// a wider types will allow us to eliminate the extension. 1314 /// 1315 /// This function works on both vectors and scalars. 1316 /// 1317 static bool canEvaluateSExtd(Value *V, Type *Ty) { 1318 assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() && 1319 "Can't sign extend type to a smaller type"); 1320 if (canAlwaysEvaluateInType(V, Ty)) 1321 return true; 1322 if (canNotEvaluateInType(V, Ty)) 1323 return false; 1324 1325 auto *I = cast<Instruction>(V); 1326 switch (I->getOpcode()) { 1327 case Instruction::SExt: // sext(sext(x)) -> sext(x) 1328 case Instruction::ZExt: // sext(zext(x)) -> zext(x) 1329 case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x) 1330 return true; 1331 case Instruction::And: 1332 case Instruction::Or: 1333 case Instruction::Xor: 1334 case Instruction::Add: 1335 case Instruction::Sub: 1336 case Instruction::Mul: 1337 // These operators can all arbitrarily be extended if their inputs can. 1338 return canEvaluateSExtd(I->getOperand(0), Ty) && 1339 canEvaluateSExtd(I->getOperand(1), Ty); 1340 1341 //case Instruction::Shl: TODO 1342 //case Instruction::LShr: TODO 1343 1344 case Instruction::Select: 1345 return canEvaluateSExtd(I->getOperand(1), Ty) && 1346 canEvaluateSExtd(I->getOperand(2), Ty); 1347 1348 case Instruction::PHI: { 1349 // We can change a phi if we can change all operands. Note that we never 1350 // get into trouble with cyclic PHIs here because we only consider 1351 // instructions with a single use. 1352 PHINode *PN = cast<PHINode>(I); 1353 for (Value *IncValue : PN->incoming_values()) 1354 if (!canEvaluateSExtd(IncValue, Ty)) return false; 1355 return true; 1356 } 1357 default: 1358 // TODO: Can handle more cases here. 1359 break; 1360 } 1361 1362 return false; 1363 } 1364 1365 Instruction *InstCombiner::visitSExt(SExtInst &CI) { 1366 // If this sign extend is only used by a truncate, let the truncate be 1367 // eliminated before we try to optimize this sext. 1368 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back())) 1369 return nullptr; 1370 1371 if (Instruction *I = commonCastTransforms(CI)) 1372 return I; 1373 1374 Value *Src = CI.getOperand(0); 1375 Type *SrcTy = Src->getType(), *DestTy = CI.getType(); 1376 1377 // If we know that the value being extended is positive, we can use a zext 1378 // instead. 1379 KnownBits Known = computeKnownBits(Src, 0, &CI); 1380 if (Known.isNonNegative()) 1381 return CastInst::Create(Instruction::ZExt, Src, DestTy); 1382 1383 // Try to extend the entire expression tree to the wide destination type. 1384 if (shouldChangeType(SrcTy, DestTy) && canEvaluateSExtd(Src, DestTy)) { 1385 // Okay, we can transform this! Insert the new expression now. 1386 LLVM_DEBUG( 1387 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1388 " to avoid sign extend: " 1389 << CI << '\n'); 1390 Value *Res = EvaluateInDifferentType(Src, DestTy, true); 1391 assert(Res->getType() == DestTy); 1392 1393 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits(); 1394 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 1395 1396 // If the high bits are already filled with sign bit, just replace this 1397 // cast with the result. 1398 if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize) 1399 return replaceInstUsesWith(CI, Res); 1400 1401 // We need to emit a shl + ashr to do the sign extend. 1402 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize); 1403 return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"), 1404 ShAmt); 1405 } 1406 1407 // If the input is a trunc from the destination type, then turn sext(trunc(x)) 1408 // into shifts. 1409 Value *X; 1410 if (match(Src, m_OneUse(m_Trunc(m_Value(X)))) && X->getType() == DestTy) { 1411 // sext(trunc(X)) --> ashr(shl(X, C), C) 1412 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 1413 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 1414 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize); 1415 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt); 1416 } 1417 1418 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src)) 1419 return transformSExtICmp(ICI, CI); 1420 1421 // If the input is a shl/ashr pair of a same constant, then this is a sign 1422 // extension from a smaller value. If we could trust arbitrary bitwidth 1423 // integers, we could turn this into a truncate to the smaller bit and then 1424 // use a sext for the whole extension. Since we don't, look deeper and check 1425 // for a truncate. If the source and dest are the same type, eliminate the 1426 // trunc and extend and just do shifts. For example, turn: 1427 // %a = trunc i32 %i to i8 1428 // %b = shl i8 %a, 6 1429 // %c = ashr i8 %b, 6 1430 // %d = sext i8 %c to i32 1431 // into: 1432 // %a = shl i32 %i, 30 1433 // %d = ashr i32 %a, 30 1434 Value *A = nullptr; 1435 // TODO: Eventually this could be subsumed by EvaluateInDifferentType. 1436 ConstantInt *BA = nullptr, *CA = nullptr; 1437 if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_ConstantInt(BA)), 1438 m_ConstantInt(CA))) && 1439 BA == CA && A->getType() == CI.getType()) { 1440 unsigned MidSize = Src->getType()->getScalarSizeInBits(); 1441 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits(); 1442 unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize; 1443 Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt); 1444 A = Builder.CreateShl(A, ShAmtV, CI.getName()); 1445 return BinaryOperator::CreateAShr(A, ShAmtV); 1446 } 1447 1448 return nullptr; 1449 } 1450 1451 1452 /// Return a Constant* for the specified floating-point constant if it fits 1453 /// in the specified FP type without changing its value. 1454 static bool fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) { 1455 bool losesInfo; 1456 APFloat F = CFP->getValueAPF(); 1457 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo); 1458 return !losesInfo; 1459 } 1460 1461 static Type *shrinkFPConstant(ConstantFP *CFP) { 1462 if (CFP->getType() == Type::getPPC_FP128Ty(CFP->getContext())) 1463 return nullptr; // No constant folding of this. 1464 // See if the value can be truncated to half and then reextended. 1465 if (fitsInFPType(CFP, APFloat::IEEEhalf())) 1466 return Type::getHalfTy(CFP->getContext()); 1467 // See if the value can be truncated to float and then reextended. 1468 if (fitsInFPType(CFP, APFloat::IEEEsingle())) 1469 return Type::getFloatTy(CFP->getContext()); 1470 if (CFP->getType()->isDoubleTy()) 1471 return nullptr; // Won't shrink. 1472 if (fitsInFPType(CFP, APFloat::IEEEdouble())) 1473 return Type::getDoubleTy(CFP->getContext()); 1474 // Don't try to shrink to various long double types. 1475 return nullptr; 1476 } 1477 1478 // Determine if this is a vector of ConstantFPs and if so, return the minimal 1479 // type we can safely truncate all elements to. 1480 // TODO: Make these support undef elements. 1481 static Type *shrinkFPConstantVector(Value *V) { 1482 auto *CV = dyn_cast<Constant>(V); 1483 if (!CV || !CV->getType()->isVectorTy()) 1484 return nullptr; 1485 1486 Type *MinType = nullptr; 1487 1488 unsigned NumElts = CV->getType()->getVectorNumElements(); 1489 for (unsigned i = 0; i != NumElts; ++i) { 1490 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 1491 if (!CFP) 1492 return nullptr; 1493 1494 Type *T = shrinkFPConstant(CFP); 1495 if (!T) 1496 return nullptr; 1497 1498 // If we haven't found a type yet or this type has a larger mantissa than 1499 // our previous type, this is our new minimal type. 1500 if (!MinType || T->getFPMantissaWidth() > MinType->getFPMantissaWidth()) 1501 MinType = T; 1502 } 1503 1504 // Make a vector type from the minimal type. 1505 return VectorType::get(MinType, NumElts); 1506 } 1507 1508 /// Find the minimum FP type we can safely truncate to. 1509 static Type *getMinimumFPType(Value *V) { 1510 if (auto *FPExt = dyn_cast<FPExtInst>(V)) 1511 return FPExt->getOperand(0)->getType(); 1512 1513 // If this value is a constant, return the constant in the smallest FP type 1514 // that can accurately represent it. This allows us to turn 1515 // (float)((double)X+2.0) into x+2.0f. 1516 if (auto *CFP = dyn_cast<ConstantFP>(V)) 1517 if (Type *T = shrinkFPConstant(CFP)) 1518 return T; 1519 1520 // Try to shrink a vector of FP constants. 1521 if (Type *T = shrinkFPConstantVector(V)) 1522 return T; 1523 1524 return V->getType(); 1525 } 1526 1527 Instruction *InstCombiner::visitFPTrunc(FPTruncInst &FPT) { 1528 if (Instruction *I = commonCastTransforms(FPT)) 1529 return I; 1530 1531 // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to 1532 // simplify this expression to avoid one or more of the trunc/extend 1533 // operations if we can do so without changing the numerical results. 1534 // 1535 // The exact manner in which the widths of the operands interact to limit 1536 // what we can and cannot do safely varies from operation to operation, and 1537 // is explained below in the various case statements. 1538 Type *Ty = FPT.getType(); 1539 auto *BO = dyn_cast<BinaryOperator>(FPT.getOperand(0)); 1540 if (BO && BO->hasOneUse()) { 1541 Type *LHSMinType = getMinimumFPType(BO->getOperand(0)); 1542 Type *RHSMinType = getMinimumFPType(BO->getOperand(1)); 1543 unsigned OpWidth = BO->getType()->getFPMantissaWidth(); 1544 unsigned LHSWidth = LHSMinType->getFPMantissaWidth(); 1545 unsigned RHSWidth = RHSMinType->getFPMantissaWidth(); 1546 unsigned SrcWidth = std::max(LHSWidth, RHSWidth); 1547 unsigned DstWidth = Ty->getFPMantissaWidth(); 1548 switch (BO->getOpcode()) { 1549 default: break; 1550 case Instruction::FAdd: 1551 case Instruction::FSub: 1552 // For addition and subtraction, the infinitely precise result can 1553 // essentially be arbitrarily wide; proving that double rounding 1554 // will not occur because the result of OpI is exact (as we will for 1555 // FMul, for example) is hopeless. However, we *can* nonetheless 1556 // frequently know that double rounding cannot occur (or that it is 1557 // innocuous) by taking advantage of the specific structure of 1558 // infinitely-precise results that admit double rounding. 1559 // 1560 // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient 1561 // to represent both sources, we can guarantee that the double 1562 // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis, 1563 // "A Rigorous Framework for Fully Supporting the IEEE Standard ..." 1564 // for proof of this fact). 1565 // 1566 // Note: Figueroa does not consider the case where DstFormat != 1567 // SrcFormat. It's possible (likely even!) that this analysis 1568 // could be tightened for those cases, but they are rare (the main 1569 // case of interest here is (float)((double)float + float)). 1570 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) { 1571 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1572 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1573 Instruction *RI = BinaryOperator::Create(BO->getOpcode(), LHS, RHS); 1574 RI->copyFastMathFlags(BO); 1575 return RI; 1576 } 1577 break; 1578 case Instruction::FMul: 1579 // For multiplication, the infinitely precise result has at most 1580 // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient 1581 // that such a value can be exactly represented, then no double 1582 // rounding can possibly occur; we can safely perform the operation 1583 // in the destination format if it can represent both sources. 1584 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) { 1585 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1586 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1587 return BinaryOperator::CreateFMulFMF(LHS, RHS, BO); 1588 } 1589 break; 1590 case Instruction::FDiv: 1591 // For division, we use again use the bound from Figueroa's 1592 // dissertation. I am entirely certain that this bound can be 1593 // tightened in the unbalanced operand case by an analysis based on 1594 // the diophantine rational approximation bound, but the well-known 1595 // condition used here is a good conservative first pass. 1596 // TODO: Tighten bound via rigorous analysis of the unbalanced case. 1597 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) { 1598 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1599 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1600 return BinaryOperator::CreateFDivFMF(LHS, RHS, BO); 1601 } 1602 break; 1603 case Instruction::FRem: { 1604 // Remainder is straightforward. Remainder is always exact, so the 1605 // type of OpI doesn't enter into things at all. We simply evaluate 1606 // in whichever source type is larger, then convert to the 1607 // destination type. 1608 if (SrcWidth == OpWidth) 1609 break; 1610 Value *LHS, *RHS; 1611 if (LHSWidth == SrcWidth) { 1612 LHS = Builder.CreateFPTrunc(BO->getOperand(0), LHSMinType); 1613 RHS = Builder.CreateFPTrunc(BO->getOperand(1), LHSMinType); 1614 } else { 1615 LHS = Builder.CreateFPTrunc(BO->getOperand(0), RHSMinType); 1616 RHS = Builder.CreateFPTrunc(BO->getOperand(1), RHSMinType); 1617 } 1618 1619 Value *ExactResult = Builder.CreateFRemFMF(LHS, RHS, BO); 1620 return CastInst::CreateFPCast(ExactResult, Ty); 1621 } 1622 } 1623 } 1624 1625 // (fptrunc (fneg x)) -> (fneg (fptrunc x)) 1626 Value *X; 1627 Instruction *Op = dyn_cast<Instruction>(FPT.getOperand(0)); 1628 if (Op && Op->hasOneUse()) { 1629 // FIXME: The FMF should propagate from the fptrunc, not the source op. 1630 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 1631 if (isa<FPMathOperator>(Op)) 1632 Builder.setFastMathFlags(Op->getFastMathFlags()); 1633 1634 if (match(Op, m_FNeg(m_Value(X)))) { 1635 Value *InnerTrunc = Builder.CreateFPTrunc(X, Ty); 1636 1637 return UnaryOperator::CreateFNegFMF(InnerTrunc, Op); 1638 } 1639 1640 // If we are truncating a select that has an extended operand, we can 1641 // narrow the other operand and do the select as a narrow op. 1642 Value *Cond, *X, *Y; 1643 if (match(Op, m_Select(m_Value(Cond), m_FPExt(m_Value(X)), m_Value(Y))) && 1644 X->getType() == Ty) { 1645 // fptrunc (select Cond, (fpext X), Y --> select Cond, X, (fptrunc Y) 1646 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty); 1647 Value *Sel = Builder.CreateSelect(Cond, X, NarrowY, "narrow.sel", Op); 1648 return replaceInstUsesWith(FPT, Sel); 1649 } 1650 if (match(Op, m_Select(m_Value(Cond), m_Value(Y), m_FPExt(m_Value(X)))) && 1651 X->getType() == Ty) { 1652 // fptrunc (select Cond, Y, (fpext X) --> select Cond, (fptrunc Y), X 1653 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty); 1654 Value *Sel = Builder.CreateSelect(Cond, NarrowY, X, "narrow.sel", Op); 1655 return replaceInstUsesWith(FPT, Sel); 1656 } 1657 } 1658 1659 if (auto *II = dyn_cast<IntrinsicInst>(FPT.getOperand(0))) { 1660 switch (II->getIntrinsicID()) { 1661 default: break; 1662 case Intrinsic::ceil: 1663 case Intrinsic::fabs: 1664 case Intrinsic::floor: 1665 case Intrinsic::nearbyint: 1666 case Intrinsic::rint: 1667 case Intrinsic::round: 1668 case Intrinsic::trunc: { 1669 Value *Src = II->getArgOperand(0); 1670 if (!Src->hasOneUse()) 1671 break; 1672 1673 // Except for fabs, this transformation requires the input of the unary FP 1674 // operation to be itself an fpext from the type to which we're 1675 // truncating. 1676 if (II->getIntrinsicID() != Intrinsic::fabs) { 1677 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src); 1678 if (!FPExtSrc || FPExtSrc->getSrcTy() != Ty) 1679 break; 1680 } 1681 1682 // Do unary FP operation on smaller type. 1683 // (fptrunc (fabs x)) -> (fabs (fptrunc x)) 1684 Value *InnerTrunc = Builder.CreateFPTrunc(Src, Ty); 1685 Function *Overload = Intrinsic::getDeclaration(FPT.getModule(), 1686 II->getIntrinsicID(), Ty); 1687 SmallVector<OperandBundleDef, 1> OpBundles; 1688 II->getOperandBundlesAsDefs(OpBundles); 1689 CallInst *NewCI = 1690 CallInst::Create(Overload, {InnerTrunc}, OpBundles, II->getName()); 1691 NewCI->copyFastMathFlags(II); 1692 return NewCI; 1693 } 1694 } 1695 } 1696 1697 if (Instruction *I = shrinkInsertElt(FPT, Builder)) 1698 return I; 1699 1700 return nullptr; 1701 } 1702 1703 Instruction *InstCombiner::visitFPExt(CastInst &CI) { 1704 return commonCastTransforms(CI); 1705 } 1706 1707 // fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X) 1708 // This is safe if the intermediate type has enough bits in its mantissa to 1709 // accurately represent all values of X. For example, this won't work with 1710 // i64 -> float -> i64. 1711 Instruction *InstCombiner::FoldItoFPtoI(Instruction &FI) { 1712 if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0))) 1713 return nullptr; 1714 Instruction *OpI = cast<Instruction>(FI.getOperand(0)); 1715 1716 Value *SrcI = OpI->getOperand(0); 1717 Type *FITy = FI.getType(); 1718 Type *OpITy = OpI->getType(); 1719 Type *SrcTy = SrcI->getType(); 1720 bool IsInputSigned = isa<SIToFPInst>(OpI); 1721 bool IsOutputSigned = isa<FPToSIInst>(FI); 1722 1723 // We can safely assume the conversion won't overflow the output range, 1724 // because (for example) (uint8_t)18293.f is undefined behavior. 1725 1726 // Since we can assume the conversion won't overflow, our decision as to 1727 // whether the input will fit in the float should depend on the minimum 1728 // of the input range and output range. 1729 1730 // This means this is also safe for a signed input and unsigned output, since 1731 // a negative input would lead to undefined behavior. 1732 int InputSize = (int)SrcTy->getScalarSizeInBits() - IsInputSigned; 1733 int OutputSize = (int)FITy->getScalarSizeInBits() - IsOutputSigned; 1734 int ActualSize = std::min(InputSize, OutputSize); 1735 1736 if (ActualSize <= OpITy->getFPMantissaWidth()) { 1737 if (FITy->getScalarSizeInBits() > SrcTy->getScalarSizeInBits()) { 1738 if (IsInputSigned && IsOutputSigned) 1739 return new SExtInst(SrcI, FITy); 1740 return new ZExtInst(SrcI, FITy); 1741 } 1742 if (FITy->getScalarSizeInBits() < SrcTy->getScalarSizeInBits()) 1743 return new TruncInst(SrcI, FITy); 1744 if (SrcTy == FITy) 1745 return replaceInstUsesWith(FI, SrcI); 1746 return new BitCastInst(SrcI, FITy); 1747 } 1748 return nullptr; 1749 } 1750 1751 Instruction *InstCombiner::visitFPToUI(FPToUIInst &FI) { 1752 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0)); 1753 if (!OpI) 1754 return commonCastTransforms(FI); 1755 1756 if (Instruction *I = FoldItoFPtoI(FI)) 1757 return I; 1758 1759 return commonCastTransforms(FI); 1760 } 1761 1762 Instruction *InstCombiner::visitFPToSI(FPToSIInst &FI) { 1763 Instruction *OpI = dyn_cast<Instruction>(FI.getOperand(0)); 1764 if (!OpI) 1765 return commonCastTransforms(FI); 1766 1767 if (Instruction *I = FoldItoFPtoI(FI)) 1768 return I; 1769 1770 return commonCastTransforms(FI); 1771 } 1772 1773 Instruction *InstCombiner::visitUIToFP(CastInst &CI) { 1774 return commonCastTransforms(CI); 1775 } 1776 1777 Instruction *InstCombiner::visitSIToFP(CastInst &CI) { 1778 return commonCastTransforms(CI); 1779 } 1780 1781 Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) { 1782 // If the source integer type is not the intptr_t type for this target, do a 1783 // trunc or zext to the intptr_t type, then inttoptr of it. This allows the 1784 // cast to be exposed to other transforms. 1785 unsigned AS = CI.getAddressSpace(); 1786 if (CI.getOperand(0)->getType()->getScalarSizeInBits() != 1787 DL.getPointerSizeInBits(AS)) { 1788 Type *Ty = DL.getIntPtrType(CI.getContext(), AS); 1789 if (CI.getType()->isVectorTy()) // Handle vectors of pointers. 1790 Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements()); 1791 1792 Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty); 1793 return new IntToPtrInst(P, CI.getType()); 1794 } 1795 1796 if (Instruction *I = commonCastTransforms(CI)) 1797 return I; 1798 1799 return nullptr; 1800 } 1801 1802 /// Implement the transforms for cast of pointer (bitcast/ptrtoint) 1803 Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) { 1804 Value *Src = CI.getOperand(0); 1805 1806 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) { 1807 // If casting the result of a getelementptr instruction with no offset, turn 1808 // this into a cast of the original pointer! 1809 if (GEP->hasAllZeroIndices() && 1810 // If CI is an addrspacecast and GEP changes the poiner type, merging 1811 // GEP into CI would undo canonicalizing addrspacecast with different 1812 // pointer types, causing infinite loops. 1813 (!isa<AddrSpaceCastInst>(CI) || 1814 GEP->getType() == GEP->getPointerOperandType())) { 1815 // Changing the cast operand is usually not a good idea but it is safe 1816 // here because the pointer operand is being replaced with another 1817 // pointer operand so the opcode doesn't need to change. 1818 return replaceOperand(CI, 0, GEP->getOperand(0)); 1819 } 1820 } 1821 1822 return commonCastTransforms(CI); 1823 } 1824 1825 Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) { 1826 // If the destination integer type is not the intptr_t type for this target, 1827 // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast 1828 // to be exposed to other transforms. 1829 1830 Type *Ty = CI.getType(); 1831 unsigned AS = CI.getPointerAddressSpace(); 1832 1833 if (Ty->getScalarSizeInBits() == DL.getPointerSizeInBits(AS)) 1834 return commonPointerCastTransforms(CI); 1835 1836 Type *PtrTy = DL.getIntPtrType(CI.getContext(), AS); 1837 if (Ty->isVectorTy()) // Handle vectors of pointers. 1838 PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements()); 1839 1840 Value *P = Builder.CreatePtrToInt(CI.getOperand(0), PtrTy); 1841 return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false); 1842 } 1843 1844 /// This input value (which is known to have vector type) is being zero extended 1845 /// or truncated to the specified vector type. Since the zext/trunc is done 1846 /// using an integer type, we have a (bitcast(cast(bitcast))) pattern, 1847 /// endianness will impact which end of the vector that is extended or 1848 /// truncated. 1849 /// 1850 /// A vector is always stored with index 0 at the lowest address, which 1851 /// corresponds to the most significant bits for a big endian stored integer and 1852 /// the least significant bits for little endian. A trunc/zext of an integer 1853 /// impacts the big end of the integer. Thus, we need to add/remove elements at 1854 /// the front of the vector for big endian targets, and the back of the vector 1855 /// for little endian targets. 1856 /// 1857 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible. 1858 /// 1859 /// The source and destination vector types may have different element types. 1860 static Instruction *optimizeVectorResizeWithIntegerBitCasts(Value *InVal, 1861 VectorType *DestTy, 1862 InstCombiner &IC) { 1863 // We can only do this optimization if the output is a multiple of the input 1864 // element size, or the input is a multiple of the output element size. 1865 // Convert the input type to have the same element type as the output. 1866 VectorType *SrcTy = cast<VectorType>(InVal->getType()); 1867 1868 if (SrcTy->getElementType() != DestTy->getElementType()) { 1869 // The input types don't need to be identical, but for now they must be the 1870 // same size. There is no specific reason we couldn't handle things like 1871 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten 1872 // there yet. 1873 if (SrcTy->getElementType()->getPrimitiveSizeInBits() != 1874 DestTy->getElementType()->getPrimitiveSizeInBits()) 1875 return nullptr; 1876 1877 SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements()); 1878 InVal = IC.Builder.CreateBitCast(InVal, SrcTy); 1879 } 1880 1881 bool IsBigEndian = IC.getDataLayout().isBigEndian(); 1882 unsigned SrcElts = SrcTy->getNumElements(); 1883 unsigned DestElts = DestTy->getNumElements(); 1884 1885 assert(SrcElts != DestElts && "Element counts should be different."); 1886 1887 // Now that the element types match, get the shuffle mask and RHS of the 1888 // shuffle to use, which depends on whether we're increasing or decreasing the 1889 // size of the input. 1890 SmallVector<uint32_t, 16> ShuffleMaskStorage; 1891 ArrayRef<uint32_t> ShuffleMask; 1892 Value *V2; 1893 1894 // Produce an identify shuffle mask for the src vector. 1895 ShuffleMaskStorage.resize(SrcElts); 1896 std::iota(ShuffleMaskStorage.begin(), ShuffleMaskStorage.end(), 0); 1897 1898 if (SrcElts > DestElts) { 1899 // If we're shrinking the number of elements (rewriting an integer 1900 // truncate), just shuffle in the elements corresponding to the least 1901 // significant bits from the input and use undef as the second shuffle 1902 // input. 1903 V2 = UndefValue::get(SrcTy); 1904 // Make sure the shuffle mask selects the "least significant bits" by 1905 // keeping elements from back of the src vector for big endian, and from the 1906 // front for little endian. 1907 ShuffleMask = ShuffleMaskStorage; 1908 if (IsBigEndian) 1909 ShuffleMask = ShuffleMask.take_back(DestElts); 1910 else 1911 ShuffleMask = ShuffleMask.take_front(DestElts); 1912 } else { 1913 // If we're increasing the number of elements (rewriting an integer zext), 1914 // shuffle in all of the elements from InVal. Fill the rest of the result 1915 // elements with zeros from a constant zero. 1916 V2 = Constant::getNullValue(SrcTy); 1917 // Use first elt from V2 when indicating zero in the shuffle mask. 1918 uint32_t NullElt = SrcElts; 1919 // Extend with null values in the "most significant bits" by adding elements 1920 // in front of the src vector for big endian, and at the back for little 1921 // endian. 1922 unsigned DeltaElts = DestElts - SrcElts; 1923 if (IsBigEndian) 1924 ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt); 1925 else 1926 ShuffleMaskStorage.append(DeltaElts, NullElt); 1927 ShuffleMask = ShuffleMaskStorage; 1928 } 1929 1930 return new ShuffleVectorInst(InVal, V2, 1931 ConstantDataVector::get(V2->getContext(), 1932 ShuffleMask)); 1933 } 1934 1935 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) { 1936 return Value % Ty->getPrimitiveSizeInBits() == 0; 1937 } 1938 1939 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) { 1940 return Value / Ty->getPrimitiveSizeInBits(); 1941 } 1942 1943 /// V is a value which is inserted into a vector of VecEltTy. 1944 /// Look through the value to see if we can decompose it into 1945 /// insertions into the vector. See the example in the comment for 1946 /// OptimizeIntegerToVectorInsertions for the pattern this handles. 1947 /// The type of V is always a non-zero multiple of VecEltTy's size. 1948 /// Shift is the number of bits between the lsb of V and the lsb of 1949 /// the vector. 1950 /// 1951 /// This returns false if the pattern can't be matched or true if it can, 1952 /// filling in Elements with the elements found here. 1953 static bool collectInsertionElements(Value *V, unsigned Shift, 1954 SmallVectorImpl<Value *> &Elements, 1955 Type *VecEltTy, bool isBigEndian) { 1956 assert(isMultipleOfTypeSize(Shift, VecEltTy) && 1957 "Shift should be a multiple of the element type size"); 1958 1959 // Undef values never contribute useful bits to the result. 1960 if (isa<UndefValue>(V)) return true; 1961 1962 // If we got down to a value of the right type, we win, try inserting into the 1963 // right element. 1964 if (V->getType() == VecEltTy) { 1965 // Inserting null doesn't actually insert any elements. 1966 if (Constant *C = dyn_cast<Constant>(V)) 1967 if (C->isNullValue()) 1968 return true; 1969 1970 unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy); 1971 if (isBigEndian) 1972 ElementIndex = Elements.size() - ElementIndex - 1; 1973 1974 // Fail if multiple elements are inserted into this slot. 1975 if (Elements[ElementIndex]) 1976 return false; 1977 1978 Elements[ElementIndex] = V; 1979 return true; 1980 } 1981 1982 if (Constant *C = dyn_cast<Constant>(V)) { 1983 // Figure out the # elements this provides, and bitcast it or slice it up 1984 // as required. 1985 unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(), 1986 VecEltTy); 1987 // If the constant is the size of a vector element, we just need to bitcast 1988 // it to the right type so it gets properly inserted. 1989 if (NumElts == 1) 1990 return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy), 1991 Shift, Elements, VecEltTy, isBigEndian); 1992 1993 // Okay, this is a constant that covers multiple elements. Slice it up into 1994 // pieces and insert each element-sized piece into the vector. 1995 if (!isa<IntegerType>(C->getType())) 1996 C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(), 1997 C->getType()->getPrimitiveSizeInBits())); 1998 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits(); 1999 Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize); 2000 2001 for (unsigned i = 0; i != NumElts; ++i) { 2002 unsigned ShiftI = Shift+i*ElementSize; 2003 Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(), 2004 ShiftI)); 2005 Piece = ConstantExpr::getTrunc(Piece, ElementIntTy); 2006 if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy, 2007 isBigEndian)) 2008 return false; 2009 } 2010 return true; 2011 } 2012 2013 if (!V->hasOneUse()) return false; 2014 2015 Instruction *I = dyn_cast<Instruction>(V); 2016 if (!I) return false; 2017 switch (I->getOpcode()) { 2018 default: return false; // Unhandled case. 2019 case Instruction::BitCast: 2020 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2021 isBigEndian); 2022 case Instruction::ZExt: 2023 if (!isMultipleOfTypeSize( 2024 I->getOperand(0)->getType()->getPrimitiveSizeInBits(), 2025 VecEltTy)) 2026 return false; 2027 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2028 isBigEndian); 2029 case Instruction::Or: 2030 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2031 isBigEndian) && 2032 collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy, 2033 isBigEndian); 2034 case Instruction::Shl: { 2035 // Must be shifting by a constant that is a multiple of the element size. 2036 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1)); 2037 if (!CI) return false; 2038 Shift += CI->getZExtValue(); 2039 if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false; 2040 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2041 isBigEndian); 2042 } 2043 2044 } 2045 } 2046 2047 2048 /// If the input is an 'or' instruction, we may be doing shifts and ors to 2049 /// assemble the elements of the vector manually. 2050 /// Try to rip the code out and replace it with insertelements. This is to 2051 /// optimize code like this: 2052 /// 2053 /// %tmp37 = bitcast float %inc to i32 2054 /// %tmp38 = zext i32 %tmp37 to i64 2055 /// %tmp31 = bitcast float %inc5 to i32 2056 /// %tmp32 = zext i32 %tmp31 to i64 2057 /// %tmp33 = shl i64 %tmp32, 32 2058 /// %ins35 = or i64 %tmp33, %tmp38 2059 /// %tmp43 = bitcast i64 %ins35 to <2 x float> 2060 /// 2061 /// Into two insertelements that do "buildvector{%inc, %inc5}". 2062 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI, 2063 InstCombiner &IC) { 2064 VectorType *DestVecTy = cast<VectorType>(CI.getType()); 2065 Value *IntInput = CI.getOperand(0); 2066 2067 SmallVector<Value*, 8> Elements(DestVecTy->getNumElements()); 2068 if (!collectInsertionElements(IntInput, 0, Elements, 2069 DestVecTy->getElementType(), 2070 IC.getDataLayout().isBigEndian())) 2071 return nullptr; 2072 2073 // If we succeeded, we know that all of the element are specified by Elements 2074 // or are zero if Elements has a null entry. Recast this as a set of 2075 // insertions. 2076 Value *Result = Constant::getNullValue(CI.getType()); 2077 for (unsigned i = 0, e = Elements.size(); i != e; ++i) { 2078 if (!Elements[i]) continue; // Unset element. 2079 2080 Result = IC.Builder.CreateInsertElement(Result, Elements[i], 2081 IC.Builder.getInt32(i)); 2082 } 2083 2084 return Result; 2085 } 2086 2087 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the 2088 /// vector followed by extract element. The backend tends to handle bitcasts of 2089 /// vectors better than bitcasts of scalars because vector registers are 2090 /// usually not type-specific like scalar integer or scalar floating-point. 2091 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast, 2092 InstCombiner &IC) { 2093 // TODO: Create and use a pattern matcher for ExtractElementInst. 2094 auto *ExtElt = dyn_cast<ExtractElementInst>(BitCast.getOperand(0)); 2095 if (!ExtElt || !ExtElt->hasOneUse()) 2096 return nullptr; 2097 2098 // The bitcast must be to a vectorizable type, otherwise we can't make a new 2099 // type to extract from. 2100 Type *DestType = BitCast.getType(); 2101 if (!VectorType::isValidElementType(DestType)) 2102 return nullptr; 2103 2104 unsigned NumElts = ExtElt->getVectorOperandType()->getNumElements(); 2105 auto *NewVecType = VectorType::get(DestType, NumElts); 2106 auto *NewBC = IC.Builder.CreateBitCast(ExtElt->getVectorOperand(), 2107 NewVecType, "bc"); 2108 return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand()); 2109 } 2110 2111 /// Change the type of a bitwise logic operation if we can eliminate a bitcast. 2112 static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast, 2113 InstCombiner::BuilderTy &Builder) { 2114 Type *DestTy = BitCast.getType(); 2115 BinaryOperator *BO; 2116 if (!DestTy->isIntOrIntVectorTy() || 2117 !match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) || 2118 !BO->isBitwiseLogicOp()) 2119 return nullptr; 2120 2121 // FIXME: This transform is restricted to vector types to avoid backend 2122 // problems caused by creating potentially illegal operations. If a fix-up is 2123 // added to handle that situation, we can remove this check. 2124 if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy()) 2125 return nullptr; 2126 2127 Value *X; 2128 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) && 2129 X->getType() == DestTy && !isa<Constant>(X)) { 2130 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y)) 2131 Value *CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy); 2132 return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1); 2133 } 2134 2135 if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) && 2136 X->getType() == DestTy && !isa<Constant>(X)) { 2137 // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X) 2138 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 2139 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X); 2140 } 2141 2142 // Canonicalize vector bitcasts to come before vector bitwise logic with a 2143 // constant. This eases recognition of special constants for later ops. 2144 // Example: 2145 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b 2146 Constant *C; 2147 if (match(BO->getOperand(1), m_Constant(C))) { 2148 // bitcast (logic X, C) --> logic (bitcast X, C') 2149 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 2150 Value *CastedC = Builder.CreateBitCast(C, DestTy); 2151 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC); 2152 } 2153 2154 return nullptr; 2155 } 2156 2157 /// Change the type of a select if we can eliminate a bitcast. 2158 static Instruction *foldBitCastSelect(BitCastInst &BitCast, 2159 InstCombiner::BuilderTy &Builder) { 2160 Value *Cond, *TVal, *FVal; 2161 if (!match(BitCast.getOperand(0), 2162 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) 2163 return nullptr; 2164 2165 // A vector select must maintain the same number of elements in its operands. 2166 Type *CondTy = Cond->getType(); 2167 Type *DestTy = BitCast.getType(); 2168 if (CondTy->isVectorTy()) { 2169 if (!DestTy->isVectorTy()) 2170 return nullptr; 2171 if (DestTy->getVectorNumElements() != CondTy->getVectorNumElements()) 2172 return nullptr; 2173 } 2174 2175 // FIXME: This transform is restricted from changing the select between 2176 // scalars and vectors to avoid backend problems caused by creating 2177 // potentially illegal operations. If a fix-up is added to handle that 2178 // situation, we can remove this check. 2179 if (DestTy->isVectorTy() != TVal->getType()->isVectorTy()) 2180 return nullptr; 2181 2182 auto *Sel = cast<Instruction>(BitCast.getOperand(0)); 2183 Value *X; 2184 if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 2185 !isa<Constant>(X)) { 2186 // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y)) 2187 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy); 2188 return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel); 2189 } 2190 2191 if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 2192 !isa<Constant>(X)) { 2193 // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X) 2194 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy); 2195 return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel); 2196 } 2197 2198 return nullptr; 2199 } 2200 2201 /// Check if all users of CI are StoreInsts. 2202 static bool hasStoreUsersOnly(CastInst &CI) { 2203 for (User *U : CI.users()) { 2204 if (!isa<StoreInst>(U)) 2205 return false; 2206 } 2207 return true; 2208 } 2209 2210 /// This function handles following case 2211 /// 2212 /// A -> B cast 2213 /// PHI 2214 /// B -> A cast 2215 /// 2216 /// All the related PHI nodes can be replaced by new PHI nodes with type A. 2217 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN. 2218 Instruction *InstCombiner::optimizeBitCastFromPhi(CastInst &CI, PHINode *PN) { 2219 // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp. 2220 if (hasStoreUsersOnly(CI)) 2221 return nullptr; 2222 2223 Value *Src = CI.getOperand(0); 2224 Type *SrcTy = Src->getType(); // Type B 2225 Type *DestTy = CI.getType(); // Type A 2226 2227 SmallVector<PHINode *, 4> PhiWorklist; 2228 SmallSetVector<PHINode *, 4> OldPhiNodes; 2229 2230 // Find all of the A->B casts and PHI nodes. 2231 // We need to inspect all related PHI nodes, but PHIs can be cyclic, so 2232 // OldPhiNodes is used to track all known PHI nodes, before adding a new 2233 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first. 2234 PhiWorklist.push_back(PN); 2235 OldPhiNodes.insert(PN); 2236 while (!PhiWorklist.empty()) { 2237 auto *OldPN = PhiWorklist.pop_back_val(); 2238 for (Value *IncValue : OldPN->incoming_values()) { 2239 if (isa<Constant>(IncValue)) 2240 continue; 2241 2242 if (auto *LI = dyn_cast<LoadInst>(IncValue)) { 2243 // If there is a sequence of one or more load instructions, each loaded 2244 // value is used as address of later load instruction, bitcast is 2245 // necessary to change the value type, don't optimize it. For 2246 // simplicity we give up if the load address comes from another load. 2247 Value *Addr = LI->getOperand(0); 2248 if (Addr == &CI || isa<LoadInst>(Addr)) 2249 return nullptr; 2250 if (LI->hasOneUse() && LI->isSimple()) 2251 continue; 2252 // If a LoadInst has more than one use, changing the type of loaded 2253 // value may create another bitcast. 2254 return nullptr; 2255 } 2256 2257 if (auto *PNode = dyn_cast<PHINode>(IncValue)) { 2258 if (OldPhiNodes.insert(PNode)) 2259 PhiWorklist.push_back(PNode); 2260 continue; 2261 } 2262 2263 auto *BCI = dyn_cast<BitCastInst>(IncValue); 2264 // We can't handle other instructions. 2265 if (!BCI) 2266 return nullptr; 2267 2268 // Verify it's a A->B cast. 2269 Type *TyA = BCI->getOperand(0)->getType(); 2270 Type *TyB = BCI->getType(); 2271 if (TyA != DestTy || TyB != SrcTy) 2272 return nullptr; 2273 } 2274 } 2275 2276 // Check that each user of each old PHI node is something that we can 2277 // rewrite, so that all of the old PHI nodes can be cleaned up afterwards. 2278 for (auto *OldPN : OldPhiNodes) { 2279 for (User *V : OldPN->users()) { 2280 if (auto *SI = dyn_cast<StoreInst>(V)) { 2281 if (!SI->isSimple() || SI->getOperand(0) != OldPN) 2282 return nullptr; 2283 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2284 // Verify it's a B->A cast. 2285 Type *TyB = BCI->getOperand(0)->getType(); 2286 Type *TyA = BCI->getType(); 2287 if (TyA != DestTy || TyB != SrcTy) 2288 return nullptr; 2289 } else if (auto *PHI = dyn_cast<PHINode>(V)) { 2290 // As long as the user is another old PHI node, then even if we don't 2291 // rewrite it, the PHI web we're considering won't have any users 2292 // outside itself, so it'll be dead. 2293 if (OldPhiNodes.count(PHI) == 0) 2294 return nullptr; 2295 } else { 2296 return nullptr; 2297 } 2298 } 2299 } 2300 2301 // For each old PHI node, create a corresponding new PHI node with a type A. 2302 SmallDenseMap<PHINode *, PHINode *> NewPNodes; 2303 for (auto *OldPN : OldPhiNodes) { 2304 Builder.SetInsertPoint(OldPN); 2305 PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands()); 2306 NewPNodes[OldPN] = NewPN; 2307 } 2308 2309 // Fill in the operands of new PHI nodes. 2310 for (auto *OldPN : OldPhiNodes) { 2311 PHINode *NewPN = NewPNodes[OldPN]; 2312 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) { 2313 Value *V = OldPN->getOperand(j); 2314 Value *NewV = nullptr; 2315 if (auto *C = dyn_cast<Constant>(V)) { 2316 NewV = ConstantExpr::getBitCast(C, DestTy); 2317 } else if (auto *LI = dyn_cast<LoadInst>(V)) { 2318 // Explicitly perform load combine to make sure no opposing transform 2319 // can remove the bitcast in the meantime and trigger an infinite loop. 2320 Builder.SetInsertPoint(LI); 2321 NewV = combineLoadToNewType(*LI, DestTy); 2322 // Remove the old load and its use in the old phi, which itself becomes 2323 // dead once the whole transform finishes. 2324 replaceInstUsesWith(*LI, UndefValue::get(LI->getType())); 2325 eraseInstFromFunction(*LI); 2326 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2327 NewV = BCI->getOperand(0); 2328 } else if (auto *PrevPN = dyn_cast<PHINode>(V)) { 2329 NewV = NewPNodes[PrevPN]; 2330 } 2331 assert(NewV); 2332 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j)); 2333 } 2334 } 2335 2336 // Traverse all accumulated PHI nodes and process its users, 2337 // which are Stores and BitcCasts. Without this processing 2338 // NewPHI nodes could be replicated and could lead to extra 2339 // moves generated after DeSSA. 2340 // If there is a store with type B, change it to type A. 2341 2342 2343 // Replace users of BitCast B->A with NewPHI. These will help 2344 // later to get rid off a closure formed by OldPHI nodes. 2345 Instruction *RetVal = nullptr; 2346 for (auto *OldPN : OldPhiNodes) { 2347 PHINode *NewPN = NewPNodes[OldPN]; 2348 for (auto It = OldPN->user_begin(), End = OldPN->user_end(); It != End; ) { 2349 User *V = *It; 2350 // We may remove this user, advance to avoid iterator invalidation. 2351 ++It; 2352 if (auto *SI = dyn_cast<StoreInst>(V)) { 2353 assert(SI->isSimple() && SI->getOperand(0) == OldPN); 2354 Builder.SetInsertPoint(SI); 2355 auto *NewBC = 2356 cast<BitCastInst>(Builder.CreateBitCast(NewPN, SrcTy)); 2357 SI->setOperand(0, NewBC); 2358 Worklist.push(SI); 2359 assert(hasStoreUsersOnly(*NewBC)); 2360 } 2361 else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2362 Type *TyB = BCI->getOperand(0)->getType(); 2363 Type *TyA = BCI->getType(); 2364 assert(TyA == DestTy && TyB == SrcTy); 2365 (void) TyA; 2366 (void) TyB; 2367 Instruction *I = replaceInstUsesWith(*BCI, NewPN); 2368 if (BCI == &CI) 2369 RetVal = I; 2370 } else if (auto *PHI = dyn_cast<PHINode>(V)) { 2371 assert(OldPhiNodes.count(PHI) > 0); 2372 (void) PHI; 2373 } else { 2374 llvm_unreachable("all uses should be handled"); 2375 } 2376 } 2377 } 2378 2379 return RetVal; 2380 } 2381 2382 Instruction *InstCombiner::visitBitCast(BitCastInst &CI) { 2383 // If the operands are integer typed then apply the integer transforms, 2384 // otherwise just apply the common ones. 2385 Value *Src = CI.getOperand(0); 2386 Type *SrcTy = Src->getType(); 2387 Type *DestTy = CI.getType(); 2388 2389 // Get rid of casts from one type to the same type. These are useless and can 2390 // be replaced by the operand. 2391 if (DestTy == Src->getType()) 2392 return replaceInstUsesWith(CI, Src); 2393 2394 if (PointerType *DstPTy = dyn_cast<PointerType>(DestTy)) { 2395 PointerType *SrcPTy = cast<PointerType>(SrcTy); 2396 Type *DstElTy = DstPTy->getElementType(); 2397 Type *SrcElTy = SrcPTy->getElementType(); 2398 2399 // Casting pointers between the same type, but with different address spaces 2400 // is an addrspace cast rather than a bitcast. 2401 if ((DstElTy == SrcElTy) && 2402 (DstPTy->getAddressSpace() != SrcPTy->getAddressSpace())) 2403 return new AddrSpaceCastInst(Src, DestTy); 2404 2405 // If we are casting a alloca to a pointer to a type of the same 2406 // size, rewrite the allocation instruction to allocate the "right" type. 2407 // There is no need to modify malloc calls because it is their bitcast that 2408 // needs to be cleaned up. 2409 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src)) 2410 if (Instruction *V = PromoteCastOfAllocation(CI, *AI)) 2411 return V; 2412 2413 // When the type pointed to is not sized the cast cannot be 2414 // turned into a gep. 2415 Type *PointeeType = 2416 cast<PointerType>(Src->getType()->getScalarType())->getElementType(); 2417 if (!PointeeType->isSized()) 2418 return nullptr; 2419 2420 // If the source and destination are pointers, and this cast is equivalent 2421 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep. 2422 // This can enhance SROA and other transforms that want type-safe pointers. 2423 unsigned NumZeros = 0; 2424 while (SrcElTy && SrcElTy != DstElTy) { 2425 SrcElTy = GetElementPtrInst::getTypeAtIndex(SrcElTy, (uint64_t)0); 2426 ++NumZeros; 2427 } 2428 2429 // If we found a path from the src to dest, create the getelementptr now. 2430 if (SrcElTy == DstElTy) { 2431 SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0)); 2432 GetElementPtrInst *GEP = 2433 GetElementPtrInst::Create(SrcPTy->getElementType(), Src, Idxs); 2434 2435 // If the source pointer is dereferenceable, then assume it points to an 2436 // allocated object and apply "inbounds" to the GEP. 2437 bool CanBeNull; 2438 if (Src->getPointerDereferenceableBytes(DL, CanBeNull)) { 2439 // In a non-default address space (not 0), a null pointer can not be 2440 // assumed inbounds, so ignore that case (dereferenceable_or_null). 2441 // The reason is that 'null' is not treated differently in these address 2442 // spaces, and we consequently ignore the 'gep inbounds' special case 2443 // for 'null' which allows 'inbounds' on 'null' if the indices are 2444 // zeros. 2445 if (SrcPTy->getAddressSpace() == 0 || !CanBeNull) 2446 GEP->setIsInBounds(); 2447 } 2448 return GEP; 2449 } 2450 } 2451 2452 if (VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) { 2453 if (DestVTy->getNumElements() == 1 && !SrcTy->isVectorTy()) { 2454 Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType()); 2455 return InsertElementInst::Create(UndefValue::get(DestTy), Elem, 2456 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2457 // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast) 2458 } 2459 2460 if (isa<IntegerType>(SrcTy)) { 2461 // If this is a cast from an integer to vector, check to see if the input 2462 // is a trunc or zext of a bitcast from vector. If so, we can replace all 2463 // the casts with a shuffle and (potentially) a bitcast. 2464 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) { 2465 CastInst *SrcCast = cast<CastInst>(Src); 2466 if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0))) 2467 if (isa<VectorType>(BCIn->getOperand(0)->getType())) 2468 if (Instruction *I = optimizeVectorResizeWithIntegerBitCasts( 2469 BCIn->getOperand(0), cast<VectorType>(DestTy), *this)) 2470 return I; 2471 } 2472 2473 // If the input is an 'or' instruction, we may be doing shifts and ors to 2474 // assemble the elements of the vector manually. Try to rip the code out 2475 // and replace it with insertelements. 2476 if (Value *V = optimizeIntegerToVectorInsertions(CI, *this)) 2477 return replaceInstUsesWith(CI, V); 2478 } 2479 } 2480 2481 if (VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy)) { 2482 if (SrcVTy->getNumElements() == 1) { 2483 // If our destination is not a vector, then make this a straight 2484 // scalar-scalar cast. 2485 if (!DestTy->isVectorTy()) { 2486 Value *Elem = 2487 Builder.CreateExtractElement(Src, 2488 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2489 return CastInst::Create(Instruction::BitCast, Elem, DestTy); 2490 } 2491 2492 // Otherwise, see if our source is an insert. If so, then use the scalar 2493 // component directly: 2494 // bitcast (inselt <1 x elt> V, X, 0) to <n x m> --> bitcast X to <n x m> 2495 if (auto *InsElt = dyn_cast<InsertElementInst>(Src)) 2496 return new BitCastInst(InsElt->getOperand(1), DestTy); 2497 } 2498 } 2499 2500 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(Src)) { 2501 // Okay, we have (bitcast (shuffle ..)). Check to see if this is 2502 // a bitcast to a vector with the same # elts. 2503 Value *ShufOp0 = Shuf->getOperand(0); 2504 Value *ShufOp1 = Shuf->getOperand(1); 2505 unsigned NumShufElts = Shuf->getType()->getVectorNumElements(); 2506 unsigned NumSrcVecElts = ShufOp0->getType()->getVectorNumElements(); 2507 if (Shuf->hasOneUse() && DestTy->isVectorTy() && 2508 DestTy->getVectorNumElements() == NumShufElts && 2509 NumShufElts == NumSrcVecElts) { 2510 BitCastInst *Tmp; 2511 // If either of the operands is a cast from CI.getType(), then 2512 // evaluating the shuffle in the casted destination's type will allow 2513 // us to eliminate at least one cast. 2514 if (((Tmp = dyn_cast<BitCastInst>(ShufOp0)) && 2515 Tmp->getOperand(0)->getType() == DestTy) || 2516 ((Tmp = dyn_cast<BitCastInst>(ShufOp1)) && 2517 Tmp->getOperand(0)->getType() == DestTy)) { 2518 Value *LHS = Builder.CreateBitCast(ShufOp0, DestTy); 2519 Value *RHS = Builder.CreateBitCast(ShufOp1, DestTy); 2520 // Return a new shuffle vector. Use the same element ID's, as we 2521 // know the vector types match #elts. 2522 return new ShuffleVectorInst(LHS, RHS, Shuf->getOperand(2)); 2523 } 2524 } 2525 2526 // A bitcasted-to-scalar and byte-reversing shuffle is better recognized as 2527 // a byte-swap: 2528 // bitcast <N x i8> (shuf X, undef, <N, N-1,...0>) --> bswap (bitcast X) 2529 // TODO: We should match the related pattern for bitreverse. 2530 if (DestTy->isIntegerTy() && 2531 DL.isLegalInteger(DestTy->getScalarSizeInBits()) && 2532 SrcTy->getScalarSizeInBits() == 8 && NumShufElts % 2 == 0 && 2533 Shuf->hasOneUse() && Shuf->isReverse()) { 2534 assert(ShufOp0->getType() == SrcTy && "Unexpected shuffle mask"); 2535 assert(isa<UndefValue>(ShufOp1) && "Unexpected shuffle op"); 2536 Function *Bswap = 2537 Intrinsic::getDeclaration(CI.getModule(), Intrinsic::bswap, DestTy); 2538 Value *ScalarX = Builder.CreateBitCast(ShufOp0, DestTy); 2539 return IntrinsicInst::Create(Bswap, { ScalarX }); 2540 } 2541 } 2542 2543 // Handle the A->B->A cast, and there is an intervening PHI node. 2544 if (PHINode *PN = dyn_cast<PHINode>(Src)) 2545 if (Instruction *I = optimizeBitCastFromPhi(CI, PN)) 2546 return I; 2547 2548 if (Instruction *I = canonicalizeBitCastExtElt(CI, *this)) 2549 return I; 2550 2551 if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder)) 2552 return I; 2553 2554 if (Instruction *I = foldBitCastSelect(CI, Builder)) 2555 return I; 2556 2557 if (SrcTy->isPointerTy()) 2558 return commonPointerCastTransforms(CI); 2559 return commonCastTransforms(CI); 2560 } 2561 2562 Instruction *InstCombiner::visitAddrSpaceCast(AddrSpaceCastInst &CI) { 2563 // If the destination pointer element type is not the same as the source's 2564 // first do a bitcast to the destination type, and then the addrspacecast. 2565 // This allows the cast to be exposed to other transforms. 2566 Value *Src = CI.getOperand(0); 2567 PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType()); 2568 PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType()); 2569 2570 Type *DestElemTy = DestTy->getElementType(); 2571 if (SrcTy->getElementType() != DestElemTy) { 2572 Type *MidTy = PointerType::get(DestElemTy, SrcTy->getAddressSpace()); 2573 if (VectorType *VT = dyn_cast<VectorType>(CI.getType())) { 2574 // Handle vectors of pointers. 2575 MidTy = VectorType::get(MidTy, VT->getNumElements()); 2576 } 2577 2578 Value *NewBitCast = Builder.CreateBitCast(Src, MidTy); 2579 return new AddrSpaceCastInst(NewBitCast, CI.getType()); 2580 } 2581 2582 return commonPointerCastTransforms(CI); 2583 } 2584