1 //===- InstCombineCasts.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visit functions for cast operations. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/SetVector.h" 15 #include "llvm/Analysis/ConstantFolding.h" 16 #include "llvm/Analysis/TargetLibraryInfo.h" 17 #include "llvm/IR/DIBuilder.h" 18 #include "llvm/IR/DataLayout.h" 19 #include "llvm/IR/PatternMatch.h" 20 #include "llvm/Support/KnownBits.h" 21 #include "llvm/Transforms/InstCombine/InstCombiner.h" 22 #include <numeric> 23 using namespace llvm; 24 using namespace PatternMatch; 25 26 #define DEBUG_TYPE "instcombine" 27 28 /// Analyze 'Val', seeing if it is a simple linear expression. 29 /// If so, decompose it, returning some value X, such that Val is 30 /// X*Scale+Offset. 31 /// 32 static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale, 33 uint64_t &Offset) { 34 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { 35 Offset = CI->getZExtValue(); 36 Scale = 0; 37 return ConstantInt::get(Val->getType(), 0); 38 } 39 40 if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) { 41 // Cannot look past anything that might overflow. 42 OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val); 43 if (OBI && !OBI->hasNoUnsignedWrap() && !OBI->hasNoSignedWrap()) { 44 Scale = 1; 45 Offset = 0; 46 return Val; 47 } 48 49 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) { 50 if (I->getOpcode() == Instruction::Shl) { 51 // This is a value scaled by '1 << the shift amt'. 52 Scale = UINT64_C(1) << RHS->getZExtValue(); 53 Offset = 0; 54 return I->getOperand(0); 55 } 56 57 if (I->getOpcode() == Instruction::Mul) { 58 // This value is scaled by 'RHS'. 59 Scale = RHS->getZExtValue(); 60 Offset = 0; 61 return I->getOperand(0); 62 } 63 64 if (I->getOpcode() == Instruction::Add) { 65 // We have X+C. Check to see if we really have (X*C2)+C1, 66 // where C1 is divisible by C2. 67 unsigned SubScale; 68 Value *SubVal = 69 decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset); 70 Offset += RHS->getZExtValue(); 71 Scale = SubScale; 72 return SubVal; 73 } 74 } 75 } 76 77 // Otherwise, we can't look past this. 78 Scale = 1; 79 Offset = 0; 80 return Val; 81 } 82 83 /// If we find a cast of an allocation instruction, try to eliminate the cast by 84 /// moving the type information into the alloc. 85 Instruction *InstCombinerImpl::PromoteCastOfAllocation(BitCastInst &CI, 86 AllocaInst &AI) { 87 PointerType *PTy = cast<PointerType>(CI.getType()); 88 89 IRBuilderBase::InsertPointGuard Guard(Builder); 90 Builder.SetInsertPoint(&AI); 91 92 // Get the type really allocated and the type casted to. 93 Type *AllocElTy = AI.getAllocatedType(); 94 Type *CastElTy = PTy->getElementType(); 95 if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr; 96 97 Align AllocElTyAlign = DL.getABITypeAlign(AllocElTy); 98 Align CastElTyAlign = DL.getABITypeAlign(CastElTy); 99 if (CastElTyAlign < AllocElTyAlign) return nullptr; 100 101 // If the allocation has multiple uses, only promote it if we are strictly 102 // increasing the alignment of the resultant allocation. If we keep it the 103 // same, we open the door to infinite loops of various kinds. 104 if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr; 105 106 uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy); 107 uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy); 108 if (CastElTySize == 0 || AllocElTySize == 0) return nullptr; 109 110 // If the allocation has multiple uses, only promote it if we're not 111 // shrinking the amount of memory being allocated. 112 uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy); 113 uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy); 114 if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr; 115 116 // See if we can satisfy the modulus by pulling a scale out of the array 117 // size argument. 118 unsigned ArraySizeScale; 119 uint64_t ArrayOffset; 120 Value *NumElements = // See if the array size is a decomposable linear expr. 121 decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset); 122 123 // If we can now satisfy the modulus, by using a non-1 scale, we really can 124 // do the xform. 125 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 || 126 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return nullptr; 127 128 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize; 129 Value *Amt = nullptr; 130 if (Scale == 1) { 131 Amt = NumElements; 132 } else { 133 Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale); 134 // Insert before the alloca, not before the cast. 135 Amt = Builder.CreateMul(Amt, NumElements); 136 } 137 138 if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) { 139 Value *Off = ConstantInt::get(AI.getArraySize()->getType(), 140 Offset, true); 141 Amt = Builder.CreateAdd(Amt, Off); 142 } 143 144 AllocaInst *New = Builder.CreateAlloca(CastElTy, Amt); 145 New->setAlignment(AI.getAlign()); 146 New->takeName(&AI); 147 New->setUsedWithInAlloca(AI.isUsedWithInAlloca()); 148 149 // If the allocation has multiple real uses, insert a cast and change all 150 // things that used it to use the new cast. This will also hack on CI, but it 151 // will die soon. 152 if (!AI.hasOneUse()) { 153 // New is the allocation instruction, pointer typed. AI is the original 154 // allocation instruction, also pointer typed. Thus, cast to use is BitCast. 155 Value *NewCast = Builder.CreateBitCast(New, AI.getType(), "tmpcast"); 156 replaceInstUsesWith(AI, NewCast); 157 eraseInstFromFunction(AI); 158 } 159 return replaceInstUsesWith(CI, New); 160 } 161 162 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns 163 /// true for, actually insert the code to evaluate the expression. 164 Value *InstCombinerImpl::EvaluateInDifferentType(Value *V, Type *Ty, 165 bool isSigned) { 166 if (Constant *C = dyn_cast<Constant>(V)) { 167 C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/); 168 // If we got a constantexpr back, try to simplify it with DL info. 169 return ConstantFoldConstant(C, DL, &TLI); 170 } 171 172 // Otherwise, it must be an instruction. 173 Instruction *I = cast<Instruction>(V); 174 Instruction *Res = nullptr; 175 unsigned Opc = I->getOpcode(); 176 switch (Opc) { 177 case Instruction::Add: 178 case Instruction::Sub: 179 case Instruction::Mul: 180 case Instruction::And: 181 case Instruction::Or: 182 case Instruction::Xor: 183 case Instruction::AShr: 184 case Instruction::LShr: 185 case Instruction::Shl: 186 case Instruction::UDiv: 187 case Instruction::URem: { 188 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned); 189 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 190 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS); 191 break; 192 } 193 case Instruction::Trunc: 194 case Instruction::ZExt: 195 case Instruction::SExt: 196 // If the source type of the cast is the type we're trying for then we can 197 // just return the source. There's no need to insert it because it is not 198 // new. 199 if (I->getOperand(0)->getType() == Ty) 200 return I->getOperand(0); 201 202 // Otherwise, must be the same type of cast, so just reinsert a new one. 203 // This also handles the case of zext(trunc(x)) -> zext(x). 204 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty, 205 Opc == Instruction::SExt); 206 break; 207 case Instruction::Select: { 208 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 209 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned); 210 Res = SelectInst::Create(I->getOperand(0), True, False); 211 break; 212 } 213 case Instruction::PHI: { 214 PHINode *OPN = cast<PHINode>(I); 215 PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues()); 216 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) { 217 Value *V = 218 EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned); 219 NPN->addIncoming(V, OPN->getIncomingBlock(i)); 220 } 221 Res = NPN; 222 break; 223 } 224 default: 225 // TODO: Can handle more cases here. 226 llvm_unreachable("Unreachable!"); 227 } 228 229 Res->takeName(I); 230 return InsertNewInstWith(Res, *I); 231 } 232 233 Instruction::CastOps 234 InstCombinerImpl::isEliminableCastPair(const CastInst *CI1, 235 const CastInst *CI2) { 236 Type *SrcTy = CI1->getSrcTy(); 237 Type *MidTy = CI1->getDestTy(); 238 Type *DstTy = CI2->getDestTy(); 239 240 Instruction::CastOps firstOp = CI1->getOpcode(); 241 Instruction::CastOps secondOp = CI2->getOpcode(); 242 Type *SrcIntPtrTy = 243 SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr; 244 Type *MidIntPtrTy = 245 MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr; 246 Type *DstIntPtrTy = 247 DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr; 248 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, 249 DstTy, SrcIntPtrTy, MidIntPtrTy, 250 DstIntPtrTy); 251 252 // We don't want to form an inttoptr or ptrtoint that converts to an integer 253 // type that differs from the pointer size. 254 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) || 255 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy)) 256 Res = 0; 257 258 return Instruction::CastOps(Res); 259 } 260 261 /// Implement the transforms common to all CastInst visitors. 262 Instruction *InstCombinerImpl::commonCastTransforms(CastInst &CI) { 263 Value *Src = CI.getOperand(0); 264 265 // Try to eliminate a cast of a cast. 266 if (auto *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast 267 if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) { 268 // The first cast (CSrc) is eliminable so we need to fix up or replace 269 // the second cast (CI). CSrc will then have a good chance of being dead. 270 auto *Ty = CI.getType(); 271 auto *Res = CastInst::Create(NewOpc, CSrc->getOperand(0), Ty); 272 // Point debug users of the dying cast to the new one. 273 if (CSrc->hasOneUse()) 274 replaceAllDbgUsesWith(*CSrc, *Res, CI, DT); 275 return Res; 276 } 277 } 278 279 if (auto *Sel = dyn_cast<SelectInst>(Src)) { 280 // We are casting a select. Try to fold the cast into the select if the 281 // select does not have a compare instruction with matching operand types 282 // or the select is likely better done in a narrow type. 283 // Creating a select with operands that are different sizes than its 284 // condition may inhibit other folds and lead to worse codegen. 285 auto *Cmp = dyn_cast<CmpInst>(Sel->getCondition()); 286 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType() || 287 (CI.getOpcode() == Instruction::Trunc && 288 shouldChangeType(CI.getSrcTy(), CI.getType()))) { 289 if (Instruction *NV = FoldOpIntoSelect(CI, Sel)) { 290 replaceAllDbgUsesWith(*Sel, *NV, CI, DT); 291 return NV; 292 } 293 } 294 } 295 296 // If we are casting a PHI, then fold the cast into the PHI. 297 if (auto *PN = dyn_cast<PHINode>(Src)) { 298 // Don't do this if it would create a PHI node with an illegal type from a 299 // legal type. 300 if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() || 301 shouldChangeType(CI.getSrcTy(), CI.getType())) 302 if (Instruction *NV = foldOpIntoPhi(CI, PN)) 303 return NV; 304 } 305 306 return nullptr; 307 } 308 309 /// Constants and extensions/truncates from the destination type are always 310 /// free to be evaluated in that type. This is a helper for canEvaluate*. 311 static bool canAlwaysEvaluateInType(Value *V, Type *Ty) { 312 if (isa<Constant>(V)) 313 return true; 314 Value *X; 315 if ((match(V, m_ZExtOrSExt(m_Value(X))) || match(V, m_Trunc(m_Value(X)))) && 316 X->getType() == Ty) 317 return true; 318 319 return false; 320 } 321 322 /// Filter out values that we can not evaluate in the destination type for free. 323 /// This is a helper for canEvaluate*. 324 static bool canNotEvaluateInType(Value *V, Type *Ty) { 325 assert(!isa<Constant>(V) && "Constant should already be handled."); 326 if (!isa<Instruction>(V)) 327 return true; 328 // We don't extend or shrink something that has multiple uses -- doing so 329 // would require duplicating the instruction which isn't profitable. 330 if (!V->hasOneUse()) 331 return true; 332 333 return false; 334 } 335 336 /// Return true if we can evaluate the specified expression tree as type Ty 337 /// instead of its larger type, and arrive with the same value. 338 /// This is used by code that tries to eliminate truncates. 339 /// 340 /// Ty will always be a type smaller than V. We should return true if trunc(V) 341 /// can be computed by computing V in the smaller type. If V is an instruction, 342 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only 343 /// makes sense if x and y can be efficiently truncated. 344 /// 345 /// This function works on both vectors and scalars. 346 /// 347 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC, 348 Instruction *CxtI) { 349 if (canAlwaysEvaluateInType(V, Ty)) 350 return true; 351 if (canNotEvaluateInType(V, Ty)) 352 return false; 353 354 auto *I = cast<Instruction>(V); 355 Type *OrigTy = V->getType(); 356 switch (I->getOpcode()) { 357 case Instruction::Add: 358 case Instruction::Sub: 359 case Instruction::Mul: 360 case Instruction::And: 361 case Instruction::Or: 362 case Instruction::Xor: 363 // These operators can all arbitrarily be extended or truncated. 364 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 365 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 366 367 case Instruction::UDiv: 368 case Instruction::URem: { 369 // UDiv and URem can be truncated if all the truncated bits are zero. 370 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 371 uint32_t BitWidth = Ty->getScalarSizeInBits(); 372 assert(BitWidth < OrigBitWidth && "Unexpected bitwidths!"); 373 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth); 374 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) && 375 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) { 376 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 377 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 378 } 379 break; 380 } 381 case Instruction::Shl: { 382 // If we are truncating the result of this SHL, and if it's a shift of an 383 // inrange amount, we can always perform a SHL in a smaller type. 384 uint32_t BitWidth = Ty->getScalarSizeInBits(); 385 KnownBits AmtKnownBits = 386 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 387 if (AmtKnownBits.getMaxValue().ult(BitWidth)) 388 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 389 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 390 break; 391 } 392 case Instruction::LShr: { 393 // If this is a truncate of a logical shr, we can truncate it to a smaller 394 // lshr iff we know that the bits we would otherwise be shifting in are 395 // already zeros. 396 // TODO: It is enough to check that the bits we would be shifting in are 397 // zero - use AmtKnownBits.getMaxValue(). 398 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 399 uint32_t BitWidth = Ty->getScalarSizeInBits(); 400 KnownBits AmtKnownBits = 401 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 402 APInt ShiftedBits = APInt::getBitsSetFrom(OrigBitWidth, BitWidth); 403 if (AmtKnownBits.getMaxValue().ult(BitWidth) && 404 IC.MaskedValueIsZero(I->getOperand(0), ShiftedBits, 0, CxtI)) { 405 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 406 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 407 } 408 break; 409 } 410 case Instruction::AShr: { 411 // If this is a truncate of an arithmetic shr, we can truncate it to a 412 // smaller ashr iff we know that all the bits from the sign bit of the 413 // original type and the sign bit of the truncate type are similar. 414 // TODO: It is enough to check that the bits we would be shifting in are 415 // similar to sign bit of the truncate type. 416 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 417 uint32_t BitWidth = Ty->getScalarSizeInBits(); 418 KnownBits AmtKnownBits = 419 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 420 unsigned ShiftedBits = OrigBitWidth - BitWidth; 421 if (AmtKnownBits.getMaxValue().ult(BitWidth) && 422 ShiftedBits < IC.ComputeNumSignBits(I->getOperand(0), 0, CxtI)) 423 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 424 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 425 break; 426 } 427 case Instruction::Trunc: 428 // trunc(trunc(x)) -> trunc(x) 429 return true; 430 case Instruction::ZExt: 431 case Instruction::SExt: 432 // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest 433 // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest 434 return true; 435 case Instruction::Select: { 436 SelectInst *SI = cast<SelectInst>(I); 437 return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) && 438 canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI); 439 } 440 case Instruction::PHI: { 441 // We can change a phi if we can change all operands. Note that we never 442 // get into trouble with cyclic PHIs here because we only consider 443 // instructions with a single use. 444 PHINode *PN = cast<PHINode>(I); 445 for (Value *IncValue : PN->incoming_values()) 446 if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI)) 447 return false; 448 return true; 449 } 450 default: 451 // TODO: Can handle more cases here. 452 break; 453 } 454 455 return false; 456 } 457 458 /// Given a vector that is bitcast to an integer, optionally logically 459 /// right-shifted, and truncated, convert it to an extractelement. 460 /// Example (big endian): 461 /// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32 462 /// ---> 463 /// extractelement <4 x i32> %X, 1 464 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, 465 InstCombinerImpl &IC) { 466 Value *TruncOp = Trunc.getOperand(0); 467 Type *DestType = Trunc.getType(); 468 if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType)) 469 return nullptr; 470 471 Value *VecInput = nullptr; 472 ConstantInt *ShiftVal = nullptr; 473 if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)), 474 m_LShr(m_BitCast(m_Value(VecInput)), 475 m_ConstantInt(ShiftVal)))) || 476 !isa<VectorType>(VecInput->getType())) 477 return nullptr; 478 479 VectorType *VecType = cast<VectorType>(VecInput->getType()); 480 unsigned VecWidth = VecType->getPrimitiveSizeInBits(); 481 unsigned DestWidth = DestType->getPrimitiveSizeInBits(); 482 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0; 483 484 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0)) 485 return nullptr; 486 487 // If the element type of the vector doesn't match the result type, 488 // bitcast it to a vector type that we can extract from. 489 unsigned NumVecElts = VecWidth / DestWidth; 490 if (VecType->getElementType() != DestType) { 491 VecType = FixedVectorType::get(DestType, NumVecElts); 492 VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc"); 493 } 494 495 unsigned Elt = ShiftAmount / DestWidth; 496 if (IC.getDataLayout().isBigEndian()) 497 Elt = NumVecElts - 1 - Elt; 498 499 return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt)); 500 } 501 502 /// Rotate left/right may occur in a wider type than necessary because of type 503 /// promotion rules. Try to narrow the inputs and convert to funnel shift. 504 Instruction *InstCombinerImpl::narrowRotate(TruncInst &Trunc) { 505 assert((isa<VectorType>(Trunc.getSrcTy()) || 506 shouldChangeType(Trunc.getSrcTy(), Trunc.getType())) && 507 "Don't narrow to an illegal scalar type"); 508 509 // Bail out on strange types. It is possible to handle some of these patterns 510 // even with non-power-of-2 sizes, but it is not a likely scenario. 511 Type *DestTy = Trunc.getType(); 512 unsigned NarrowWidth = DestTy->getScalarSizeInBits(); 513 if (!isPowerOf2_32(NarrowWidth)) 514 return nullptr; 515 516 // First, find an or'd pair of opposite shifts with the same shifted operand: 517 // trunc (or (lshr ShVal, ShAmt0), (shl ShVal, ShAmt1)) 518 Value *Or0, *Or1; 519 if (!match(Trunc.getOperand(0), m_OneUse(m_Or(m_Value(Or0), m_Value(Or1))))) 520 return nullptr; 521 522 Value *ShVal, *ShAmt0, *ShAmt1; 523 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal), m_Value(ShAmt0)))) || 524 !match(Or1, m_OneUse(m_LogicalShift(m_Specific(ShVal), m_Value(ShAmt1))))) 525 return nullptr; 526 527 auto ShiftOpcode0 = cast<BinaryOperator>(Or0)->getOpcode(); 528 auto ShiftOpcode1 = cast<BinaryOperator>(Or1)->getOpcode(); 529 if (ShiftOpcode0 == ShiftOpcode1) 530 return nullptr; 531 532 // Match the shift amount operands for a rotate pattern. This always matches 533 // a subtraction on the R operand. 534 auto matchShiftAmount = [](Value *L, Value *R, unsigned Width) -> Value * { 535 // The shift amounts may add up to the narrow bit width: 536 // (shl ShVal, L) | (lshr ShVal, Width - L) 537 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) 538 return L; 539 540 // The shift amount may be masked with negation: 541 // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1))) 542 Value *X; 543 unsigned Mask = Width - 1; 544 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) && 545 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))) 546 return X; 547 548 // Same as above, but the shift amount may be extended after masking: 549 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) && 550 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))) 551 return X; 552 553 return nullptr; 554 }; 555 556 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth); 557 bool SubIsOnLHS = false; 558 if (!ShAmt) { 559 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth); 560 SubIsOnLHS = true; 561 } 562 if (!ShAmt) 563 return nullptr; 564 565 // The shifted value must have high zeros in the wide type. Typically, this 566 // will be a zext, but it could also be the result of an 'and' or 'shift'. 567 unsigned WideWidth = Trunc.getSrcTy()->getScalarSizeInBits(); 568 APInt HiBitMask = APInt::getHighBitsSet(WideWidth, WideWidth - NarrowWidth); 569 if (!MaskedValueIsZero(ShVal, HiBitMask, 0, &Trunc)) 570 return nullptr; 571 572 // We have an unnecessarily wide rotate! 573 // trunc (or (lshr ShVal, ShAmt), (shl ShVal, BitWidth - ShAmt)) 574 // Narrow the inputs and convert to funnel shift intrinsic: 575 // llvm.fshl.i8(trunc(ShVal), trunc(ShVal), trunc(ShAmt)) 576 Value *NarrowShAmt = Builder.CreateTrunc(ShAmt, DestTy); 577 Value *X = Builder.CreateTrunc(ShVal, DestTy); 578 bool IsFshl = (!SubIsOnLHS && ShiftOpcode0 == BinaryOperator::Shl) || 579 (SubIsOnLHS && ShiftOpcode1 == BinaryOperator::Shl); 580 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr; 581 Function *F = Intrinsic::getDeclaration(Trunc.getModule(), IID, DestTy); 582 return IntrinsicInst::Create(F, { X, X, NarrowShAmt }); 583 } 584 585 /// Try to narrow the width of math or bitwise logic instructions by pulling a 586 /// truncate ahead of binary operators. 587 /// TODO: Transforms for truncated shifts should be moved into here. 588 Instruction *InstCombinerImpl::narrowBinOp(TruncInst &Trunc) { 589 Type *SrcTy = Trunc.getSrcTy(); 590 Type *DestTy = Trunc.getType(); 591 if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy)) 592 return nullptr; 593 594 BinaryOperator *BinOp; 595 if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(BinOp)))) 596 return nullptr; 597 598 Value *BinOp0 = BinOp->getOperand(0); 599 Value *BinOp1 = BinOp->getOperand(1); 600 switch (BinOp->getOpcode()) { 601 case Instruction::And: 602 case Instruction::Or: 603 case Instruction::Xor: 604 case Instruction::Add: 605 case Instruction::Sub: 606 case Instruction::Mul: { 607 Constant *C; 608 if (match(BinOp0, m_Constant(C))) { 609 // trunc (binop C, X) --> binop (trunc C', X) 610 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 611 Value *TruncX = Builder.CreateTrunc(BinOp1, DestTy); 612 return BinaryOperator::Create(BinOp->getOpcode(), NarrowC, TruncX); 613 } 614 if (match(BinOp1, m_Constant(C))) { 615 // trunc (binop X, C) --> binop (trunc X, C') 616 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 617 Value *TruncX = Builder.CreateTrunc(BinOp0, DestTy); 618 return BinaryOperator::Create(BinOp->getOpcode(), TruncX, NarrowC); 619 } 620 Value *X; 621 if (match(BinOp0, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) { 622 // trunc (binop (ext X), Y) --> binop X, (trunc Y) 623 Value *NarrowOp1 = Builder.CreateTrunc(BinOp1, DestTy); 624 return BinaryOperator::Create(BinOp->getOpcode(), X, NarrowOp1); 625 } 626 if (match(BinOp1, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) { 627 // trunc (binop Y, (ext X)) --> binop (trunc Y), X 628 Value *NarrowOp0 = Builder.CreateTrunc(BinOp0, DestTy); 629 return BinaryOperator::Create(BinOp->getOpcode(), NarrowOp0, X); 630 } 631 break; 632 } 633 634 default: break; 635 } 636 637 if (Instruction *NarrowOr = narrowRotate(Trunc)) 638 return NarrowOr; 639 640 return nullptr; 641 } 642 643 /// Try to narrow the width of a splat shuffle. This could be generalized to any 644 /// shuffle with a constant operand, but we limit the transform to avoid 645 /// creating a shuffle type that targets may not be able to lower effectively. 646 static Instruction *shrinkSplatShuffle(TruncInst &Trunc, 647 InstCombiner::BuilderTy &Builder) { 648 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0)); 649 if (Shuf && Shuf->hasOneUse() && isa<UndefValue>(Shuf->getOperand(1)) && 650 is_splat(Shuf->getShuffleMask()) && 651 Shuf->getType() == Shuf->getOperand(0)->getType()) { 652 // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Undef, SplatMask 653 Constant *NarrowUndef = UndefValue::get(Trunc.getType()); 654 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType()); 655 return new ShuffleVectorInst(NarrowOp, NarrowUndef, Shuf->getShuffleMask()); 656 } 657 658 return nullptr; 659 } 660 661 /// Try to narrow the width of an insert element. This could be generalized for 662 /// any vector constant, but we limit the transform to insertion into undef to 663 /// avoid potential backend problems from unsupported insertion widths. This 664 /// could also be extended to handle the case of inserting a scalar constant 665 /// into a vector variable. 666 static Instruction *shrinkInsertElt(CastInst &Trunc, 667 InstCombiner::BuilderTy &Builder) { 668 Instruction::CastOps Opcode = Trunc.getOpcode(); 669 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) && 670 "Unexpected instruction for shrinking"); 671 672 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0)); 673 if (!InsElt || !InsElt->hasOneUse()) 674 return nullptr; 675 676 Type *DestTy = Trunc.getType(); 677 Type *DestScalarTy = DestTy->getScalarType(); 678 Value *VecOp = InsElt->getOperand(0); 679 Value *ScalarOp = InsElt->getOperand(1); 680 Value *Index = InsElt->getOperand(2); 681 682 if (isa<UndefValue>(VecOp)) { 683 // trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index 684 // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index 685 UndefValue *NarrowUndef = UndefValue::get(DestTy); 686 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy); 687 return InsertElementInst::Create(NarrowUndef, NarrowOp, Index); 688 } 689 690 return nullptr; 691 } 692 693 Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) { 694 if (Instruction *Result = commonCastTransforms(Trunc)) 695 return Result; 696 697 Value *Src = Trunc.getOperand(0); 698 Type *DestTy = Trunc.getType(), *SrcTy = Src->getType(); 699 unsigned DestWidth = DestTy->getScalarSizeInBits(); 700 unsigned SrcWidth = SrcTy->getScalarSizeInBits(); 701 ConstantInt *Cst; 702 703 // Attempt to truncate the entire input expression tree to the destination 704 // type. Only do this if the dest type is a simple type, don't convert the 705 // expression tree to something weird like i93 unless the source is also 706 // strange. 707 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) && 708 canEvaluateTruncated(Src, DestTy, *this, &Trunc)) { 709 710 // If this cast is a truncate, evaluting in a different type always 711 // eliminates the cast, so it is always a win. 712 LLVM_DEBUG( 713 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 714 " to avoid cast: " 715 << Trunc << '\n'); 716 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 717 assert(Res->getType() == DestTy); 718 return replaceInstUsesWith(Trunc, Res); 719 } 720 721 // For integer types, check if we can shorten the entire input expression to 722 // DestWidth * 2, which won't allow removing the truncate, but reducing the 723 // width may enable further optimizations, e.g. allowing for larger 724 // vectorization factors. 725 if (auto *DestITy = dyn_cast<IntegerType>(DestTy)) { 726 if (DestWidth * 2 < SrcWidth) { 727 auto *NewDestTy = DestITy->getExtendedType(); 728 if (shouldChangeType(SrcTy, NewDestTy) && 729 canEvaluateTruncated(Src, NewDestTy, *this, &Trunc)) { 730 LLVM_DEBUG( 731 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 732 " to reduce the width of operand of" 733 << Trunc << '\n'); 734 Value *Res = EvaluateInDifferentType(Src, NewDestTy, false); 735 return new TruncInst(Res, DestTy); 736 } 737 } 738 } 739 740 // Test if the trunc is the user of a select which is part of a 741 // minimum or maximum operation. If so, don't do any more simplification. 742 // Even simplifying demanded bits can break the canonical form of a 743 // min/max. 744 Value *LHS, *RHS; 745 if (SelectInst *Sel = dyn_cast<SelectInst>(Src)) 746 if (matchSelectPattern(Sel, LHS, RHS).Flavor != SPF_UNKNOWN) 747 return nullptr; 748 749 // See if we can simplify any instructions used by the input whose sole 750 // purpose is to compute bits we don't care about. 751 if (SimplifyDemandedInstructionBits(Trunc)) 752 return &Trunc; 753 754 if (DestWidth == 1) { 755 Value *Zero = Constant::getNullValue(SrcTy); 756 if (DestTy->isIntegerTy()) { 757 // Canonicalize trunc x to i1 -> icmp ne (and x, 1), 0 (scalar only). 758 // TODO: We canonicalize to more instructions here because we are probably 759 // lacking equivalent analysis for trunc relative to icmp. There may also 760 // be codegen concerns. If those trunc limitations were removed, we could 761 // remove this transform. 762 Value *And = Builder.CreateAnd(Src, ConstantInt::get(SrcTy, 1)); 763 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 764 } 765 766 // For vectors, we do not canonicalize all truncs to icmp, so optimize 767 // patterns that would be covered within visitICmpInst. 768 Value *X; 769 Constant *C; 770 if (match(Src, m_OneUse(m_LShr(m_Value(X), m_Constant(C))))) { 771 // trunc (lshr X, C) to i1 --> icmp ne (and X, C'), 0 772 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1)); 773 Constant *MaskC = ConstantExpr::getShl(One, C); 774 Value *And = Builder.CreateAnd(X, MaskC); 775 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 776 } 777 if (match(Src, m_OneUse(m_c_Or(m_LShr(m_Value(X), m_Constant(C)), 778 m_Deferred(X))))) { 779 // trunc (or (lshr X, C), X) to i1 --> icmp ne (and X, C'), 0 780 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1)); 781 Constant *MaskC = ConstantExpr::getShl(One, C); 782 MaskC = ConstantExpr::getOr(MaskC, One); 783 Value *And = Builder.CreateAnd(X, MaskC); 784 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 785 } 786 } 787 788 // FIXME: Maybe combine the next two transforms to handle the no cast case 789 // more efficiently. Support vector types. Cleanup code by using m_OneUse. 790 791 // Transform trunc(lshr (zext A), Cst) to eliminate one type conversion. 792 Value *A = nullptr; 793 if (Src->hasOneUse() && 794 match(Src, m_LShr(m_ZExt(m_Value(A)), m_ConstantInt(Cst)))) { 795 // We have three types to worry about here, the type of A, the source of 796 // the truncate (MidSize), and the destination of the truncate. We know that 797 // ASize < MidSize and MidSize > ResultSize, but don't know the relation 798 // between ASize and ResultSize. 799 unsigned ASize = A->getType()->getPrimitiveSizeInBits(); 800 801 // If the shift amount is larger than the size of A, then the result is 802 // known to be zero because all the input bits got shifted out. 803 if (Cst->getZExtValue() >= ASize) 804 return replaceInstUsesWith(Trunc, Constant::getNullValue(DestTy)); 805 806 // Since we're doing an lshr and a zero extend, and know that the shift 807 // amount is smaller than ASize, it is always safe to do the shift in A's 808 // type, then zero extend or truncate to the result. 809 Value *Shift = Builder.CreateLShr(A, Cst->getZExtValue()); 810 Shift->takeName(Src); 811 return CastInst::CreateIntegerCast(Shift, DestTy, false); 812 } 813 814 const APInt *C; 815 if (match(Src, m_LShr(m_SExt(m_Value(A)), m_APInt(C)))) { 816 unsigned AWidth = A->getType()->getScalarSizeInBits(); 817 unsigned MaxShiftAmt = SrcWidth - std::max(DestWidth, AWidth); 818 819 // If the shift is small enough, all zero bits created by the shift are 820 // removed by the trunc. 821 if (C->getZExtValue() <= MaxShiftAmt) { 822 // trunc (lshr (sext A), C) --> ashr A, C 823 if (A->getType() == DestTy) { 824 unsigned ShAmt = std::min((unsigned)C->getZExtValue(), DestWidth - 1); 825 return BinaryOperator::CreateAShr(A, ConstantInt::get(DestTy, ShAmt)); 826 } 827 // The types are mismatched, so create a cast after shifting: 828 // trunc (lshr (sext A), C) --> sext/trunc (ashr A, C) 829 if (Src->hasOneUse()) { 830 unsigned ShAmt = std::min((unsigned)C->getZExtValue(), AWidth - 1); 831 Value *Shift = Builder.CreateAShr(A, ShAmt); 832 return CastInst::CreateIntegerCast(Shift, DestTy, true); 833 } 834 } 835 // TODO: Mask high bits with 'and'. 836 } 837 838 if (Instruction *I = narrowBinOp(Trunc)) 839 return I; 840 841 if (Instruction *I = shrinkSplatShuffle(Trunc, Builder)) 842 return I; 843 844 if (Instruction *I = shrinkInsertElt(Trunc, Builder)) 845 return I; 846 847 if (Src->hasOneUse() && isa<IntegerType>(SrcTy) && 848 shouldChangeType(SrcTy, DestTy)) { 849 // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the 850 // dest type is native and cst < dest size. 851 if (match(Src, m_Shl(m_Value(A), m_ConstantInt(Cst))) && 852 !match(A, m_Shr(m_Value(), m_Constant()))) { 853 // Skip shifts of shift by constants. It undoes a combine in 854 // FoldShiftByConstant and is the extend in reg pattern. 855 if (Cst->getValue().ult(DestWidth)) { 856 Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr"); 857 858 return BinaryOperator::Create( 859 Instruction::Shl, NewTrunc, 860 ConstantInt::get(DestTy, Cst->getValue().trunc(DestWidth))); 861 } 862 } 863 } 864 865 if (Instruction *I = foldVecTruncToExtElt(Trunc, *this)) 866 return I; 867 868 // Whenever an element is extracted from a vector, and then truncated, 869 // canonicalize by converting it to a bitcast followed by an 870 // extractelement. 871 // 872 // Example (little endian): 873 // trunc (extractelement <4 x i64> %X, 0) to i32 874 // ---> 875 // extractelement <8 x i32> (bitcast <4 x i64> %X to <8 x i32>), i32 0 876 Value *VecOp; 877 if (match(Src, m_OneUse(m_ExtractElt(m_Value(VecOp), m_ConstantInt(Cst))))) { 878 auto *VecOpTy = cast<FixedVectorType>(VecOp->getType()); 879 unsigned VecNumElts = VecOpTy->getNumElements(); 880 881 // A badly fit destination size would result in an invalid cast. 882 if (SrcWidth % DestWidth == 0) { 883 uint64_t TruncRatio = SrcWidth / DestWidth; 884 uint64_t BitCastNumElts = VecNumElts * TruncRatio; 885 uint64_t VecOpIdx = Cst->getZExtValue(); 886 uint64_t NewIdx = DL.isBigEndian() ? (VecOpIdx + 1) * TruncRatio - 1 887 : VecOpIdx * TruncRatio; 888 assert(BitCastNumElts <= std::numeric_limits<uint32_t>::max() && 889 "overflow 32-bits"); 890 891 auto *BitCastTo = FixedVectorType::get(DestTy, BitCastNumElts); 892 Value *BitCast = Builder.CreateBitCast(VecOp, BitCastTo); 893 return ExtractElementInst::Create(BitCast, Builder.getInt32(NewIdx)); 894 } 895 } 896 897 return nullptr; 898 } 899 900 Instruction *InstCombinerImpl::transformZExtICmp(ICmpInst *Cmp, ZExtInst &Zext, 901 bool DoTransform) { 902 // If we are just checking for a icmp eq of a single bit and zext'ing it 903 // to an integer, then shift the bit to the appropriate place and then 904 // cast to integer to avoid the comparison. 905 const APInt *Op1CV; 906 if (match(Cmp->getOperand(1), m_APInt(Op1CV))) { 907 908 // zext (x <s 0) to i32 --> x>>u31 true if signbit set. 909 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear. 910 if ((Cmp->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isNullValue()) || 911 (Cmp->getPredicate() == ICmpInst::ICMP_SGT && Op1CV->isAllOnesValue())) { 912 if (!DoTransform) return Cmp; 913 914 Value *In = Cmp->getOperand(0); 915 Value *Sh = ConstantInt::get(In->getType(), 916 In->getType()->getScalarSizeInBits() - 1); 917 In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit"); 918 if (In->getType() != Zext.getType()) 919 In = Builder.CreateIntCast(In, Zext.getType(), false /*ZExt*/); 920 921 if (Cmp->getPredicate() == ICmpInst::ICMP_SGT) { 922 Constant *One = ConstantInt::get(In->getType(), 1); 923 In = Builder.CreateXor(In, One, In->getName() + ".not"); 924 } 925 926 return replaceInstUsesWith(Zext, In); 927 } 928 929 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set. 930 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 931 // zext (X == 1) to i32 --> X iff X has only the low bit set. 932 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set. 933 // zext (X != 0) to i32 --> X iff X has only the low bit set. 934 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set. 935 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set. 936 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 937 if ((Op1CV->isNullValue() || Op1CV->isPowerOf2()) && 938 // This only works for EQ and NE 939 Cmp->isEquality()) { 940 // If Op1C some other power of two, convert: 941 KnownBits Known = computeKnownBits(Cmp->getOperand(0), 0, &Zext); 942 943 APInt KnownZeroMask(~Known.Zero); 944 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1? 945 if (!DoTransform) return Cmp; 946 947 bool isNE = Cmp->getPredicate() == ICmpInst::ICMP_NE; 948 if (!Op1CV->isNullValue() && (*Op1CV != KnownZeroMask)) { 949 // (X&4) == 2 --> false 950 // (X&4) != 2 --> true 951 Constant *Res = ConstantInt::get(Zext.getType(), isNE); 952 return replaceInstUsesWith(Zext, Res); 953 } 954 955 uint32_t ShAmt = KnownZeroMask.logBase2(); 956 Value *In = Cmp->getOperand(0); 957 if (ShAmt) { 958 // Perform a logical shr by shiftamt. 959 // Insert the shift to put the result in the low bit. 960 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt), 961 In->getName() + ".lobit"); 962 } 963 964 if (!Op1CV->isNullValue() == isNE) { // Toggle the low bit. 965 Constant *One = ConstantInt::get(In->getType(), 1); 966 In = Builder.CreateXor(In, One); 967 } 968 969 if (Zext.getType() == In->getType()) 970 return replaceInstUsesWith(Zext, In); 971 972 Value *IntCast = Builder.CreateIntCast(In, Zext.getType(), false); 973 return replaceInstUsesWith(Zext, IntCast); 974 } 975 } 976 } 977 978 // icmp ne A, B is equal to xor A, B when A and B only really have one bit. 979 // It is also profitable to transform icmp eq into not(xor(A, B)) because that 980 // may lead to additional simplifications. 981 if (Cmp->isEquality() && Zext.getType() == Cmp->getOperand(0)->getType()) { 982 if (IntegerType *ITy = dyn_cast<IntegerType>(Zext.getType())) { 983 Value *LHS = Cmp->getOperand(0); 984 Value *RHS = Cmp->getOperand(1); 985 986 KnownBits KnownLHS = computeKnownBits(LHS, 0, &Zext); 987 KnownBits KnownRHS = computeKnownBits(RHS, 0, &Zext); 988 989 if (KnownLHS.Zero == KnownRHS.Zero && KnownLHS.One == KnownRHS.One) { 990 APInt KnownBits = KnownLHS.Zero | KnownLHS.One; 991 APInt UnknownBit = ~KnownBits; 992 if (UnknownBit.countPopulation() == 1) { 993 if (!DoTransform) return Cmp; 994 995 Value *Result = Builder.CreateXor(LHS, RHS); 996 997 // Mask off any bits that are set and won't be shifted away. 998 if (KnownLHS.One.uge(UnknownBit)) 999 Result = Builder.CreateAnd(Result, 1000 ConstantInt::get(ITy, UnknownBit)); 1001 1002 // Shift the bit we're testing down to the lsb. 1003 Result = Builder.CreateLShr( 1004 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros())); 1005 1006 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ) 1007 Result = Builder.CreateXor(Result, ConstantInt::get(ITy, 1)); 1008 Result->takeName(Cmp); 1009 return replaceInstUsesWith(Zext, Result); 1010 } 1011 } 1012 } 1013 } 1014 1015 return nullptr; 1016 } 1017 1018 /// Determine if the specified value can be computed in the specified wider type 1019 /// and produce the same low bits. If not, return false. 1020 /// 1021 /// If this function returns true, it can also return a non-zero number of bits 1022 /// (in BitsToClear) which indicates that the value it computes is correct for 1023 /// the zero extend, but that the additional BitsToClear bits need to be zero'd 1024 /// out. For example, to promote something like: 1025 /// 1026 /// %B = trunc i64 %A to i32 1027 /// %C = lshr i32 %B, 8 1028 /// %E = zext i32 %C to i64 1029 /// 1030 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be 1031 /// set to 8 to indicate that the promoted value needs to have bits 24-31 1032 /// cleared in addition to bits 32-63. Since an 'and' will be generated to 1033 /// clear the top bits anyway, doing this has no extra cost. 1034 /// 1035 /// This function works on both vectors and scalars. 1036 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear, 1037 InstCombinerImpl &IC, Instruction *CxtI) { 1038 BitsToClear = 0; 1039 if (canAlwaysEvaluateInType(V, Ty)) 1040 return true; 1041 if (canNotEvaluateInType(V, Ty)) 1042 return false; 1043 1044 auto *I = cast<Instruction>(V); 1045 unsigned Tmp; 1046 switch (I->getOpcode()) { 1047 case Instruction::ZExt: // zext(zext(x)) -> zext(x). 1048 case Instruction::SExt: // zext(sext(x)) -> sext(x). 1049 case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x) 1050 return true; 1051 case Instruction::And: 1052 case Instruction::Or: 1053 case Instruction::Xor: 1054 case Instruction::Add: 1055 case Instruction::Sub: 1056 case Instruction::Mul: 1057 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) || 1058 !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI)) 1059 return false; 1060 // These can all be promoted if neither operand has 'bits to clear'. 1061 if (BitsToClear == 0 && Tmp == 0) 1062 return true; 1063 1064 // If the operation is an AND/OR/XOR and the bits to clear are zero in the 1065 // other side, BitsToClear is ok. 1066 if (Tmp == 0 && I->isBitwiseLogicOp()) { 1067 // We use MaskedValueIsZero here for generality, but the case we care 1068 // about the most is constant RHS. 1069 unsigned VSize = V->getType()->getScalarSizeInBits(); 1070 if (IC.MaskedValueIsZero(I->getOperand(1), 1071 APInt::getHighBitsSet(VSize, BitsToClear), 1072 0, CxtI)) { 1073 // If this is an And instruction and all of the BitsToClear are 1074 // known to be zero we can reset BitsToClear. 1075 if (I->getOpcode() == Instruction::And) 1076 BitsToClear = 0; 1077 return true; 1078 } 1079 } 1080 1081 // Otherwise, we don't know how to analyze this BitsToClear case yet. 1082 return false; 1083 1084 case Instruction::Shl: { 1085 // We can promote shl(x, cst) if we can promote x. Since shl overwrites the 1086 // upper bits we can reduce BitsToClear by the shift amount. 1087 const APInt *Amt; 1088 if (match(I->getOperand(1), m_APInt(Amt))) { 1089 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 1090 return false; 1091 uint64_t ShiftAmt = Amt->getZExtValue(); 1092 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0; 1093 return true; 1094 } 1095 return false; 1096 } 1097 case Instruction::LShr: { 1098 // We can promote lshr(x, cst) if we can promote x. This requires the 1099 // ultimate 'and' to clear out the high zero bits we're clearing out though. 1100 const APInt *Amt; 1101 if (match(I->getOperand(1), m_APInt(Amt))) { 1102 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 1103 return false; 1104 BitsToClear += Amt->getZExtValue(); 1105 if (BitsToClear > V->getType()->getScalarSizeInBits()) 1106 BitsToClear = V->getType()->getScalarSizeInBits(); 1107 return true; 1108 } 1109 // Cannot promote variable LSHR. 1110 return false; 1111 } 1112 case Instruction::Select: 1113 if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) || 1114 !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) || 1115 // TODO: If important, we could handle the case when the BitsToClear are 1116 // known zero in the disagreeing side. 1117 Tmp != BitsToClear) 1118 return false; 1119 return true; 1120 1121 case Instruction::PHI: { 1122 // We can change a phi if we can change all operands. Note that we never 1123 // get into trouble with cyclic PHIs here because we only consider 1124 // instructions with a single use. 1125 PHINode *PN = cast<PHINode>(I); 1126 if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI)) 1127 return false; 1128 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) 1129 if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) || 1130 // TODO: If important, we could handle the case when the BitsToClear 1131 // are known zero in the disagreeing input. 1132 Tmp != BitsToClear) 1133 return false; 1134 return true; 1135 } 1136 default: 1137 // TODO: Can handle more cases here. 1138 return false; 1139 } 1140 } 1141 1142 Instruction *InstCombinerImpl::visitZExt(ZExtInst &CI) { 1143 // If this zero extend is only used by a truncate, let the truncate be 1144 // eliminated before we try to optimize this zext. 1145 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back())) 1146 return nullptr; 1147 1148 // If one of the common conversion will work, do it. 1149 if (Instruction *Result = commonCastTransforms(CI)) 1150 return Result; 1151 1152 Value *Src = CI.getOperand(0); 1153 Type *SrcTy = Src->getType(), *DestTy = CI.getType(); 1154 1155 // Try to extend the entire expression tree to the wide destination type. 1156 unsigned BitsToClear; 1157 if (shouldChangeType(SrcTy, DestTy) && 1158 canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) { 1159 assert(BitsToClear <= SrcTy->getScalarSizeInBits() && 1160 "Can't clear more bits than in SrcTy"); 1161 1162 // Okay, we can transform this! Insert the new expression now. 1163 LLVM_DEBUG( 1164 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1165 " to avoid zero extend: " 1166 << CI << '\n'); 1167 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 1168 assert(Res->getType() == DestTy); 1169 1170 // Preserve debug values referring to Src if the zext is its last use. 1171 if (auto *SrcOp = dyn_cast<Instruction>(Src)) 1172 if (SrcOp->hasOneUse()) 1173 replaceAllDbgUsesWith(*SrcOp, *Res, CI, DT); 1174 1175 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear; 1176 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 1177 1178 // If the high bits are already filled with zeros, just replace this 1179 // cast with the result. 1180 if (MaskedValueIsZero(Res, 1181 APInt::getHighBitsSet(DestBitSize, 1182 DestBitSize-SrcBitsKept), 1183 0, &CI)) 1184 return replaceInstUsesWith(CI, Res); 1185 1186 // We need to emit an AND to clear the high bits. 1187 Constant *C = ConstantInt::get(Res->getType(), 1188 APInt::getLowBitsSet(DestBitSize, SrcBitsKept)); 1189 return BinaryOperator::CreateAnd(Res, C); 1190 } 1191 1192 // If this is a TRUNC followed by a ZEXT then we are dealing with integral 1193 // types and if the sizes are just right we can convert this into a logical 1194 // 'and' which will be much cheaper than the pair of casts. 1195 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast 1196 // TODO: Subsume this into EvaluateInDifferentType. 1197 1198 // Get the sizes of the types involved. We know that the intermediate type 1199 // will be smaller than A or C, but don't know the relation between A and C. 1200 Value *A = CSrc->getOperand(0); 1201 unsigned SrcSize = A->getType()->getScalarSizeInBits(); 1202 unsigned MidSize = CSrc->getType()->getScalarSizeInBits(); 1203 unsigned DstSize = CI.getType()->getScalarSizeInBits(); 1204 // If we're actually extending zero bits, then if 1205 // SrcSize < DstSize: zext(a & mask) 1206 // SrcSize == DstSize: a & mask 1207 // SrcSize > DstSize: trunc(a) & mask 1208 if (SrcSize < DstSize) { 1209 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 1210 Constant *AndConst = ConstantInt::get(A->getType(), AndValue); 1211 Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask"); 1212 return new ZExtInst(And, CI.getType()); 1213 } 1214 1215 if (SrcSize == DstSize) { 1216 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 1217 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(), 1218 AndValue)); 1219 } 1220 if (SrcSize > DstSize) { 1221 Value *Trunc = Builder.CreateTrunc(A, CI.getType()); 1222 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize)); 1223 return BinaryOperator::CreateAnd(Trunc, 1224 ConstantInt::get(Trunc->getType(), 1225 AndValue)); 1226 } 1227 } 1228 1229 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(Src)) 1230 return transformZExtICmp(Cmp, CI); 1231 1232 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src); 1233 if (SrcI && SrcI->getOpcode() == Instruction::Or) { 1234 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) if at least one 1235 // of the (zext icmp) can be eliminated. If so, immediately perform the 1236 // according elimination. 1237 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0)); 1238 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1)); 1239 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() && 1240 (transformZExtICmp(LHS, CI, false) || 1241 transformZExtICmp(RHS, CI, false))) { 1242 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) 1243 Value *LCast = Builder.CreateZExt(LHS, CI.getType(), LHS->getName()); 1244 Value *RCast = Builder.CreateZExt(RHS, CI.getType(), RHS->getName()); 1245 Value *Or = Builder.CreateOr(LCast, RCast, CI.getName()); 1246 if (auto *OrInst = dyn_cast<Instruction>(Or)) 1247 Builder.SetInsertPoint(OrInst); 1248 1249 // Perform the elimination. 1250 if (auto *LZExt = dyn_cast<ZExtInst>(LCast)) 1251 transformZExtICmp(LHS, *LZExt); 1252 if (auto *RZExt = dyn_cast<ZExtInst>(RCast)) 1253 transformZExtICmp(RHS, *RZExt); 1254 1255 return replaceInstUsesWith(CI, Or); 1256 } 1257 } 1258 1259 // zext(trunc(X) & C) -> (X & zext(C)). 1260 Constant *C; 1261 Value *X; 1262 if (SrcI && 1263 match(SrcI, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) && 1264 X->getType() == CI.getType()) 1265 return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType())); 1266 1267 // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)). 1268 Value *And; 1269 if (SrcI && match(SrcI, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) && 1270 match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) && 1271 X->getType() == CI.getType()) { 1272 Constant *ZC = ConstantExpr::getZExt(C, CI.getType()); 1273 return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC); 1274 } 1275 1276 return nullptr; 1277 } 1278 1279 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp. 1280 Instruction *InstCombinerImpl::transformSExtICmp(ICmpInst *ICI, 1281 Instruction &CI) { 1282 Value *Op0 = ICI->getOperand(0), *Op1 = ICI->getOperand(1); 1283 ICmpInst::Predicate Pred = ICI->getPredicate(); 1284 1285 // Don't bother if Op1 isn't of vector or integer type. 1286 if (!Op1->getType()->isIntOrIntVectorTy()) 1287 return nullptr; 1288 1289 if ((Pred == ICmpInst::ICMP_SLT && match(Op1, m_ZeroInt())) || 1290 (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))) { 1291 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if negative 1292 // (x >s -1) ? -1 : 0 -> not (ashr x, 31) -> all ones if positive 1293 Value *Sh = ConstantInt::get(Op0->getType(), 1294 Op0->getType()->getScalarSizeInBits() - 1); 1295 Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit"); 1296 if (In->getType() != CI.getType()) 1297 In = Builder.CreateIntCast(In, CI.getType(), true /*SExt*/); 1298 1299 if (Pred == ICmpInst::ICMP_SGT) 1300 In = Builder.CreateNot(In, In->getName() + ".not"); 1301 return replaceInstUsesWith(CI, In); 1302 } 1303 1304 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { 1305 // If we know that only one bit of the LHS of the icmp can be set and we 1306 // have an equality comparison with zero or a power of 2, we can transform 1307 // the icmp and sext into bitwise/integer operations. 1308 if (ICI->hasOneUse() && 1309 ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){ 1310 KnownBits Known = computeKnownBits(Op0, 0, &CI); 1311 1312 APInt KnownZeroMask(~Known.Zero); 1313 if (KnownZeroMask.isPowerOf2()) { 1314 Value *In = ICI->getOperand(0); 1315 1316 // If the icmp tests for a known zero bit we can constant fold it. 1317 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) { 1318 Value *V = Pred == ICmpInst::ICMP_NE ? 1319 ConstantInt::getAllOnesValue(CI.getType()) : 1320 ConstantInt::getNullValue(CI.getType()); 1321 return replaceInstUsesWith(CI, V); 1322 } 1323 1324 if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) { 1325 // sext ((x & 2^n) == 0) -> (x >> n) - 1 1326 // sext ((x & 2^n) != 2^n) -> (x >> n) - 1 1327 unsigned ShiftAmt = KnownZeroMask.countTrailingZeros(); 1328 // Perform a right shift to place the desired bit in the LSB. 1329 if (ShiftAmt) 1330 In = Builder.CreateLShr(In, 1331 ConstantInt::get(In->getType(), ShiftAmt)); 1332 1333 // At this point "In" is either 1 or 0. Subtract 1 to turn 1334 // {1, 0} -> {0, -1}. 1335 In = Builder.CreateAdd(In, 1336 ConstantInt::getAllOnesValue(In->getType()), 1337 "sext"); 1338 } else { 1339 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1 1340 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1 1341 unsigned ShiftAmt = KnownZeroMask.countLeadingZeros(); 1342 // Perform a left shift to place the desired bit in the MSB. 1343 if (ShiftAmt) 1344 In = Builder.CreateShl(In, 1345 ConstantInt::get(In->getType(), ShiftAmt)); 1346 1347 // Distribute the bit over the whole bit width. 1348 In = Builder.CreateAShr(In, ConstantInt::get(In->getType(), 1349 KnownZeroMask.getBitWidth() - 1), "sext"); 1350 } 1351 1352 if (CI.getType() == In->getType()) 1353 return replaceInstUsesWith(CI, In); 1354 return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/); 1355 } 1356 } 1357 } 1358 1359 return nullptr; 1360 } 1361 1362 /// Return true if we can take the specified value and return it as type Ty 1363 /// without inserting any new casts and without changing the value of the common 1364 /// low bits. This is used by code that tries to promote integer operations to 1365 /// a wider types will allow us to eliminate the extension. 1366 /// 1367 /// This function works on both vectors and scalars. 1368 /// 1369 static bool canEvaluateSExtd(Value *V, Type *Ty) { 1370 assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() && 1371 "Can't sign extend type to a smaller type"); 1372 if (canAlwaysEvaluateInType(V, Ty)) 1373 return true; 1374 if (canNotEvaluateInType(V, Ty)) 1375 return false; 1376 1377 auto *I = cast<Instruction>(V); 1378 switch (I->getOpcode()) { 1379 case Instruction::SExt: // sext(sext(x)) -> sext(x) 1380 case Instruction::ZExt: // sext(zext(x)) -> zext(x) 1381 case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x) 1382 return true; 1383 case Instruction::And: 1384 case Instruction::Or: 1385 case Instruction::Xor: 1386 case Instruction::Add: 1387 case Instruction::Sub: 1388 case Instruction::Mul: 1389 // These operators can all arbitrarily be extended if their inputs can. 1390 return canEvaluateSExtd(I->getOperand(0), Ty) && 1391 canEvaluateSExtd(I->getOperand(1), Ty); 1392 1393 //case Instruction::Shl: TODO 1394 //case Instruction::LShr: TODO 1395 1396 case Instruction::Select: 1397 return canEvaluateSExtd(I->getOperand(1), Ty) && 1398 canEvaluateSExtd(I->getOperand(2), Ty); 1399 1400 case Instruction::PHI: { 1401 // We can change a phi if we can change all operands. Note that we never 1402 // get into trouble with cyclic PHIs here because we only consider 1403 // instructions with a single use. 1404 PHINode *PN = cast<PHINode>(I); 1405 for (Value *IncValue : PN->incoming_values()) 1406 if (!canEvaluateSExtd(IncValue, Ty)) return false; 1407 return true; 1408 } 1409 default: 1410 // TODO: Can handle more cases here. 1411 break; 1412 } 1413 1414 return false; 1415 } 1416 1417 Instruction *InstCombinerImpl::visitSExt(SExtInst &CI) { 1418 // If this sign extend is only used by a truncate, let the truncate be 1419 // eliminated before we try to optimize this sext. 1420 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back())) 1421 return nullptr; 1422 1423 if (Instruction *I = commonCastTransforms(CI)) 1424 return I; 1425 1426 Value *Src = CI.getOperand(0); 1427 Type *SrcTy = Src->getType(), *DestTy = CI.getType(); 1428 1429 // If we know that the value being extended is positive, we can use a zext 1430 // instead. 1431 KnownBits Known = computeKnownBits(Src, 0, &CI); 1432 if (Known.isNonNegative()) 1433 return CastInst::Create(Instruction::ZExt, Src, DestTy); 1434 1435 // Try to extend the entire expression tree to the wide destination type. 1436 if (shouldChangeType(SrcTy, DestTy) && canEvaluateSExtd(Src, DestTy)) { 1437 // Okay, we can transform this! Insert the new expression now. 1438 LLVM_DEBUG( 1439 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1440 " to avoid sign extend: " 1441 << CI << '\n'); 1442 Value *Res = EvaluateInDifferentType(Src, DestTy, true); 1443 assert(Res->getType() == DestTy); 1444 1445 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits(); 1446 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 1447 1448 // If the high bits are already filled with sign bit, just replace this 1449 // cast with the result. 1450 if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize) 1451 return replaceInstUsesWith(CI, Res); 1452 1453 // We need to emit a shl + ashr to do the sign extend. 1454 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize); 1455 return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"), 1456 ShAmt); 1457 } 1458 1459 // If the input is a trunc from the destination type, then turn sext(trunc(x)) 1460 // into shifts. 1461 Value *X; 1462 if (match(Src, m_OneUse(m_Trunc(m_Value(X)))) && X->getType() == DestTy) { 1463 // sext(trunc(X)) --> ashr(shl(X, C), C) 1464 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 1465 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 1466 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize); 1467 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt); 1468 } 1469 1470 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src)) 1471 return transformSExtICmp(ICI, CI); 1472 1473 // If the input is a shl/ashr pair of a same constant, then this is a sign 1474 // extension from a smaller value. If we could trust arbitrary bitwidth 1475 // integers, we could turn this into a truncate to the smaller bit and then 1476 // use a sext for the whole extension. Since we don't, look deeper and check 1477 // for a truncate. If the source and dest are the same type, eliminate the 1478 // trunc and extend and just do shifts. For example, turn: 1479 // %a = trunc i32 %i to i8 1480 // %b = shl i8 %a, 6 1481 // %c = ashr i8 %b, 6 1482 // %d = sext i8 %c to i32 1483 // into: 1484 // %a = shl i32 %i, 30 1485 // %d = ashr i32 %a, 30 1486 Value *A = nullptr; 1487 // TODO: Eventually this could be subsumed by EvaluateInDifferentType. 1488 Constant *BA = nullptr, *CA = nullptr; 1489 if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_Constant(BA)), 1490 m_Constant(CA))) && 1491 BA == CA && A->getType() == CI.getType()) { 1492 unsigned MidSize = Src->getType()->getScalarSizeInBits(); 1493 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits(); 1494 Constant *SizeDiff = ConstantInt::get(CA->getType(), SrcDstSize - MidSize); 1495 Constant *ShAmt = ConstantExpr::getAdd(CA, SizeDiff); 1496 Constant *ShAmtExt = ConstantExpr::getSExt(ShAmt, CI.getType()); 1497 A = Builder.CreateShl(A, ShAmtExt, CI.getName()); 1498 return BinaryOperator::CreateAShr(A, ShAmtExt); 1499 } 1500 1501 return nullptr; 1502 } 1503 1504 /// Return a Constant* for the specified floating-point constant if it fits 1505 /// in the specified FP type without changing its value. 1506 static bool fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) { 1507 bool losesInfo; 1508 APFloat F = CFP->getValueAPF(); 1509 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo); 1510 return !losesInfo; 1511 } 1512 1513 static Type *shrinkFPConstant(ConstantFP *CFP) { 1514 if (CFP->getType() == Type::getPPC_FP128Ty(CFP->getContext())) 1515 return nullptr; // No constant folding of this. 1516 // See if the value can be truncated to half and then reextended. 1517 if (fitsInFPType(CFP, APFloat::IEEEhalf())) 1518 return Type::getHalfTy(CFP->getContext()); 1519 // See if the value can be truncated to float and then reextended. 1520 if (fitsInFPType(CFP, APFloat::IEEEsingle())) 1521 return Type::getFloatTy(CFP->getContext()); 1522 if (CFP->getType()->isDoubleTy()) 1523 return nullptr; // Won't shrink. 1524 if (fitsInFPType(CFP, APFloat::IEEEdouble())) 1525 return Type::getDoubleTy(CFP->getContext()); 1526 // Don't try to shrink to various long double types. 1527 return nullptr; 1528 } 1529 1530 // Determine if this is a vector of ConstantFPs and if so, return the minimal 1531 // type we can safely truncate all elements to. 1532 // TODO: Make these support undef elements. 1533 static Type *shrinkFPConstantVector(Value *V) { 1534 auto *CV = dyn_cast<Constant>(V); 1535 auto *CVVTy = dyn_cast<VectorType>(V->getType()); 1536 if (!CV || !CVVTy) 1537 return nullptr; 1538 1539 Type *MinType = nullptr; 1540 1541 unsigned NumElts = cast<FixedVectorType>(CVVTy)->getNumElements(); 1542 for (unsigned i = 0; i != NumElts; ++i) { 1543 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 1544 if (!CFP) 1545 return nullptr; 1546 1547 Type *T = shrinkFPConstant(CFP); 1548 if (!T) 1549 return nullptr; 1550 1551 // If we haven't found a type yet or this type has a larger mantissa than 1552 // our previous type, this is our new minimal type. 1553 if (!MinType || T->getFPMantissaWidth() > MinType->getFPMantissaWidth()) 1554 MinType = T; 1555 } 1556 1557 // Make a vector type from the minimal type. 1558 return FixedVectorType::get(MinType, NumElts); 1559 } 1560 1561 /// Find the minimum FP type we can safely truncate to. 1562 static Type *getMinimumFPType(Value *V) { 1563 if (auto *FPExt = dyn_cast<FPExtInst>(V)) 1564 return FPExt->getOperand(0)->getType(); 1565 1566 // If this value is a constant, return the constant in the smallest FP type 1567 // that can accurately represent it. This allows us to turn 1568 // (float)((double)X+2.0) into x+2.0f. 1569 if (auto *CFP = dyn_cast<ConstantFP>(V)) 1570 if (Type *T = shrinkFPConstant(CFP)) 1571 return T; 1572 1573 // Try to shrink a vector of FP constants. 1574 if (Type *T = shrinkFPConstantVector(V)) 1575 return T; 1576 1577 return V->getType(); 1578 } 1579 1580 /// Return true if the cast from integer to FP can be proven to be exact for all 1581 /// possible inputs (the conversion does not lose any precision). 1582 static bool isKnownExactCastIntToFP(CastInst &I) { 1583 CastInst::CastOps Opcode = I.getOpcode(); 1584 assert((Opcode == CastInst::SIToFP || Opcode == CastInst::UIToFP) && 1585 "Unexpected cast"); 1586 Value *Src = I.getOperand(0); 1587 Type *SrcTy = Src->getType(); 1588 Type *FPTy = I.getType(); 1589 bool IsSigned = Opcode == Instruction::SIToFP; 1590 int SrcSize = (int)SrcTy->getScalarSizeInBits() - IsSigned; 1591 1592 // Easy case - if the source integer type has less bits than the FP mantissa, 1593 // then the cast must be exact. 1594 int DestNumSigBits = FPTy->getFPMantissaWidth(); 1595 if (SrcSize <= DestNumSigBits) 1596 return true; 1597 1598 // Cast from FP to integer and back to FP is independent of the intermediate 1599 // integer width because of poison on overflow. 1600 Value *F; 1601 if (match(Src, m_FPToSI(m_Value(F))) || match(Src, m_FPToUI(m_Value(F)))) { 1602 // If this is uitofp (fptosi F), the source needs an extra bit to avoid 1603 // potential rounding of negative FP input values. 1604 int SrcNumSigBits = F->getType()->getFPMantissaWidth(); 1605 if (!IsSigned && match(Src, m_FPToSI(m_Value()))) 1606 SrcNumSigBits++; 1607 1608 // [su]itofp (fpto[su]i F) --> exact if the source type has less or equal 1609 // significant bits than the destination (and make sure neither type is 1610 // weird -- ppc_fp128). 1611 if (SrcNumSigBits > 0 && DestNumSigBits > 0 && 1612 SrcNumSigBits <= DestNumSigBits) 1613 return true; 1614 } 1615 1616 // TODO: 1617 // Try harder to find if the source integer type has less significant bits. 1618 // For example, compute number of sign bits or compute low bit mask. 1619 return false; 1620 } 1621 1622 Instruction *InstCombinerImpl::visitFPTrunc(FPTruncInst &FPT) { 1623 if (Instruction *I = commonCastTransforms(FPT)) 1624 return I; 1625 1626 // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to 1627 // simplify this expression to avoid one or more of the trunc/extend 1628 // operations if we can do so without changing the numerical results. 1629 // 1630 // The exact manner in which the widths of the operands interact to limit 1631 // what we can and cannot do safely varies from operation to operation, and 1632 // is explained below in the various case statements. 1633 Type *Ty = FPT.getType(); 1634 auto *BO = dyn_cast<BinaryOperator>(FPT.getOperand(0)); 1635 if (BO && BO->hasOneUse()) { 1636 Type *LHSMinType = getMinimumFPType(BO->getOperand(0)); 1637 Type *RHSMinType = getMinimumFPType(BO->getOperand(1)); 1638 unsigned OpWidth = BO->getType()->getFPMantissaWidth(); 1639 unsigned LHSWidth = LHSMinType->getFPMantissaWidth(); 1640 unsigned RHSWidth = RHSMinType->getFPMantissaWidth(); 1641 unsigned SrcWidth = std::max(LHSWidth, RHSWidth); 1642 unsigned DstWidth = Ty->getFPMantissaWidth(); 1643 switch (BO->getOpcode()) { 1644 default: break; 1645 case Instruction::FAdd: 1646 case Instruction::FSub: 1647 // For addition and subtraction, the infinitely precise result can 1648 // essentially be arbitrarily wide; proving that double rounding 1649 // will not occur because the result of OpI is exact (as we will for 1650 // FMul, for example) is hopeless. However, we *can* nonetheless 1651 // frequently know that double rounding cannot occur (or that it is 1652 // innocuous) by taking advantage of the specific structure of 1653 // infinitely-precise results that admit double rounding. 1654 // 1655 // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient 1656 // to represent both sources, we can guarantee that the double 1657 // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis, 1658 // "A Rigorous Framework for Fully Supporting the IEEE Standard ..." 1659 // for proof of this fact). 1660 // 1661 // Note: Figueroa does not consider the case where DstFormat != 1662 // SrcFormat. It's possible (likely even!) that this analysis 1663 // could be tightened for those cases, but they are rare (the main 1664 // case of interest here is (float)((double)float + float)). 1665 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) { 1666 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1667 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1668 Instruction *RI = BinaryOperator::Create(BO->getOpcode(), LHS, RHS); 1669 RI->copyFastMathFlags(BO); 1670 return RI; 1671 } 1672 break; 1673 case Instruction::FMul: 1674 // For multiplication, the infinitely precise result has at most 1675 // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient 1676 // that such a value can be exactly represented, then no double 1677 // rounding can possibly occur; we can safely perform the operation 1678 // in the destination format if it can represent both sources. 1679 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) { 1680 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1681 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1682 return BinaryOperator::CreateFMulFMF(LHS, RHS, BO); 1683 } 1684 break; 1685 case Instruction::FDiv: 1686 // For division, we use again use the bound from Figueroa's 1687 // dissertation. I am entirely certain that this bound can be 1688 // tightened in the unbalanced operand case by an analysis based on 1689 // the diophantine rational approximation bound, but the well-known 1690 // condition used here is a good conservative first pass. 1691 // TODO: Tighten bound via rigorous analysis of the unbalanced case. 1692 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) { 1693 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1694 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1695 return BinaryOperator::CreateFDivFMF(LHS, RHS, BO); 1696 } 1697 break; 1698 case Instruction::FRem: { 1699 // Remainder is straightforward. Remainder is always exact, so the 1700 // type of OpI doesn't enter into things at all. We simply evaluate 1701 // in whichever source type is larger, then convert to the 1702 // destination type. 1703 if (SrcWidth == OpWidth) 1704 break; 1705 Value *LHS, *RHS; 1706 if (LHSWidth == SrcWidth) { 1707 LHS = Builder.CreateFPTrunc(BO->getOperand(0), LHSMinType); 1708 RHS = Builder.CreateFPTrunc(BO->getOperand(1), LHSMinType); 1709 } else { 1710 LHS = Builder.CreateFPTrunc(BO->getOperand(0), RHSMinType); 1711 RHS = Builder.CreateFPTrunc(BO->getOperand(1), RHSMinType); 1712 } 1713 1714 Value *ExactResult = Builder.CreateFRemFMF(LHS, RHS, BO); 1715 return CastInst::CreateFPCast(ExactResult, Ty); 1716 } 1717 } 1718 } 1719 1720 // (fptrunc (fneg x)) -> (fneg (fptrunc x)) 1721 Value *X; 1722 Instruction *Op = dyn_cast<Instruction>(FPT.getOperand(0)); 1723 if (Op && Op->hasOneUse()) { 1724 // FIXME: The FMF should propagate from the fptrunc, not the source op. 1725 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 1726 if (isa<FPMathOperator>(Op)) 1727 Builder.setFastMathFlags(Op->getFastMathFlags()); 1728 1729 if (match(Op, m_FNeg(m_Value(X)))) { 1730 Value *InnerTrunc = Builder.CreateFPTrunc(X, Ty); 1731 1732 return UnaryOperator::CreateFNegFMF(InnerTrunc, Op); 1733 } 1734 1735 // If we are truncating a select that has an extended operand, we can 1736 // narrow the other operand and do the select as a narrow op. 1737 Value *Cond, *X, *Y; 1738 if (match(Op, m_Select(m_Value(Cond), m_FPExt(m_Value(X)), m_Value(Y))) && 1739 X->getType() == Ty) { 1740 // fptrunc (select Cond, (fpext X), Y --> select Cond, X, (fptrunc Y) 1741 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty); 1742 Value *Sel = Builder.CreateSelect(Cond, X, NarrowY, "narrow.sel", Op); 1743 return replaceInstUsesWith(FPT, Sel); 1744 } 1745 if (match(Op, m_Select(m_Value(Cond), m_Value(Y), m_FPExt(m_Value(X)))) && 1746 X->getType() == Ty) { 1747 // fptrunc (select Cond, Y, (fpext X) --> select Cond, (fptrunc Y), X 1748 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty); 1749 Value *Sel = Builder.CreateSelect(Cond, NarrowY, X, "narrow.sel", Op); 1750 return replaceInstUsesWith(FPT, Sel); 1751 } 1752 } 1753 1754 if (auto *II = dyn_cast<IntrinsicInst>(FPT.getOperand(0))) { 1755 switch (II->getIntrinsicID()) { 1756 default: break; 1757 case Intrinsic::ceil: 1758 case Intrinsic::fabs: 1759 case Intrinsic::floor: 1760 case Intrinsic::nearbyint: 1761 case Intrinsic::rint: 1762 case Intrinsic::round: 1763 case Intrinsic::roundeven: 1764 case Intrinsic::trunc: { 1765 Value *Src = II->getArgOperand(0); 1766 if (!Src->hasOneUse()) 1767 break; 1768 1769 // Except for fabs, this transformation requires the input of the unary FP 1770 // operation to be itself an fpext from the type to which we're 1771 // truncating. 1772 if (II->getIntrinsicID() != Intrinsic::fabs) { 1773 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src); 1774 if (!FPExtSrc || FPExtSrc->getSrcTy() != Ty) 1775 break; 1776 } 1777 1778 // Do unary FP operation on smaller type. 1779 // (fptrunc (fabs x)) -> (fabs (fptrunc x)) 1780 Value *InnerTrunc = Builder.CreateFPTrunc(Src, Ty); 1781 Function *Overload = Intrinsic::getDeclaration(FPT.getModule(), 1782 II->getIntrinsicID(), Ty); 1783 SmallVector<OperandBundleDef, 1> OpBundles; 1784 II->getOperandBundlesAsDefs(OpBundles); 1785 CallInst *NewCI = 1786 CallInst::Create(Overload, {InnerTrunc}, OpBundles, II->getName()); 1787 NewCI->copyFastMathFlags(II); 1788 return NewCI; 1789 } 1790 } 1791 } 1792 1793 if (Instruction *I = shrinkInsertElt(FPT, Builder)) 1794 return I; 1795 1796 Value *Src = FPT.getOperand(0); 1797 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) { 1798 auto *FPCast = cast<CastInst>(Src); 1799 if (isKnownExactCastIntToFP(*FPCast)) 1800 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty); 1801 } 1802 1803 return nullptr; 1804 } 1805 1806 Instruction *InstCombinerImpl::visitFPExt(CastInst &FPExt) { 1807 // If the source operand is a cast from integer to FP and known exact, then 1808 // cast the integer operand directly to the destination type. 1809 Type *Ty = FPExt.getType(); 1810 Value *Src = FPExt.getOperand(0); 1811 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) { 1812 auto *FPCast = cast<CastInst>(Src); 1813 if (isKnownExactCastIntToFP(*FPCast)) 1814 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty); 1815 } 1816 1817 return commonCastTransforms(FPExt); 1818 } 1819 1820 /// fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X) 1821 /// This is safe if the intermediate type has enough bits in its mantissa to 1822 /// accurately represent all values of X. For example, this won't work with 1823 /// i64 -> float -> i64. 1824 Instruction *InstCombinerImpl::foldItoFPtoI(CastInst &FI) { 1825 if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0))) 1826 return nullptr; 1827 1828 auto *OpI = cast<CastInst>(FI.getOperand(0)); 1829 Value *X = OpI->getOperand(0); 1830 Type *XType = X->getType(); 1831 Type *DestType = FI.getType(); 1832 bool IsOutputSigned = isa<FPToSIInst>(FI); 1833 1834 // Since we can assume the conversion won't overflow, our decision as to 1835 // whether the input will fit in the float should depend on the minimum 1836 // of the input range and output range. 1837 1838 // This means this is also safe for a signed input and unsigned output, since 1839 // a negative input would lead to undefined behavior. 1840 if (!isKnownExactCastIntToFP(*OpI)) { 1841 // The first cast may not round exactly based on the source integer width 1842 // and FP width, but the overflow UB rules can still allow this to fold. 1843 // If the destination type is narrow, that means the intermediate FP value 1844 // must be large enough to hold the source value exactly. 1845 // For example, (uint8_t)((float)(uint32_t 16777217) is undefined behavior. 1846 int OutputSize = (int)DestType->getScalarSizeInBits() - IsOutputSigned; 1847 if (OutputSize > OpI->getType()->getFPMantissaWidth()) 1848 return nullptr; 1849 } 1850 1851 if (DestType->getScalarSizeInBits() > XType->getScalarSizeInBits()) { 1852 bool IsInputSigned = isa<SIToFPInst>(OpI); 1853 if (IsInputSigned && IsOutputSigned) 1854 return new SExtInst(X, DestType); 1855 return new ZExtInst(X, DestType); 1856 } 1857 if (DestType->getScalarSizeInBits() < XType->getScalarSizeInBits()) 1858 return new TruncInst(X, DestType); 1859 1860 assert(XType == DestType && "Unexpected types for int to FP to int casts"); 1861 return replaceInstUsesWith(FI, X); 1862 } 1863 1864 Instruction *InstCombinerImpl::visitFPToUI(FPToUIInst &FI) { 1865 if (Instruction *I = foldItoFPtoI(FI)) 1866 return I; 1867 1868 return commonCastTransforms(FI); 1869 } 1870 1871 Instruction *InstCombinerImpl::visitFPToSI(FPToSIInst &FI) { 1872 if (Instruction *I = foldItoFPtoI(FI)) 1873 return I; 1874 1875 return commonCastTransforms(FI); 1876 } 1877 1878 Instruction *InstCombinerImpl::visitUIToFP(CastInst &CI) { 1879 return commonCastTransforms(CI); 1880 } 1881 1882 Instruction *InstCombinerImpl::visitSIToFP(CastInst &CI) { 1883 return commonCastTransforms(CI); 1884 } 1885 1886 Instruction *InstCombinerImpl::visitIntToPtr(IntToPtrInst &CI) { 1887 // If the source integer type is not the intptr_t type for this target, do a 1888 // trunc or zext to the intptr_t type, then inttoptr of it. This allows the 1889 // cast to be exposed to other transforms. 1890 unsigned AS = CI.getAddressSpace(); 1891 if (CI.getOperand(0)->getType()->getScalarSizeInBits() != 1892 DL.getPointerSizeInBits(AS)) { 1893 Type *Ty = DL.getIntPtrType(CI.getContext(), AS); 1894 // Handle vectors of pointers. 1895 if (auto *CIVTy = dyn_cast<VectorType>(CI.getType())) 1896 Ty = VectorType::get(Ty, CIVTy->getElementCount()); 1897 1898 Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty); 1899 return new IntToPtrInst(P, CI.getType()); 1900 } 1901 1902 if (Instruction *I = commonCastTransforms(CI)) 1903 return I; 1904 1905 return nullptr; 1906 } 1907 1908 /// Implement the transforms for cast of pointer (bitcast/ptrtoint) 1909 Instruction *InstCombinerImpl::commonPointerCastTransforms(CastInst &CI) { 1910 Value *Src = CI.getOperand(0); 1911 1912 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) { 1913 // If casting the result of a getelementptr instruction with no offset, turn 1914 // this into a cast of the original pointer! 1915 if (GEP->hasAllZeroIndices() && 1916 // If CI is an addrspacecast and GEP changes the poiner type, merging 1917 // GEP into CI would undo canonicalizing addrspacecast with different 1918 // pointer types, causing infinite loops. 1919 (!isa<AddrSpaceCastInst>(CI) || 1920 GEP->getType() == GEP->getPointerOperandType())) { 1921 // Changing the cast operand is usually not a good idea but it is safe 1922 // here because the pointer operand is being replaced with another 1923 // pointer operand so the opcode doesn't need to change. 1924 return replaceOperand(CI, 0, GEP->getOperand(0)); 1925 } 1926 } 1927 1928 return commonCastTransforms(CI); 1929 } 1930 1931 Instruction *InstCombinerImpl::visitPtrToInt(PtrToIntInst &CI) { 1932 // If the destination integer type is not the intptr_t type for this target, 1933 // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast 1934 // to be exposed to other transforms. 1935 Value *SrcOp = CI.getPointerOperand(); 1936 Type *Ty = CI.getType(); 1937 unsigned AS = CI.getPointerAddressSpace(); 1938 unsigned TySize = Ty->getScalarSizeInBits(); 1939 unsigned PtrSize = DL.getPointerSizeInBits(AS); 1940 if (TySize != PtrSize) { 1941 Type *IntPtrTy = DL.getIntPtrType(CI.getContext(), AS); 1942 if (auto *VecTy = dyn_cast<VectorType>(Ty)) { 1943 // Handle vectors of pointers. 1944 // FIXME: what should happen for scalable vectors? 1945 IntPtrTy = FixedVectorType::get( 1946 IntPtrTy, cast<FixedVectorType>(VecTy)->getNumElements()); 1947 } 1948 1949 Value *P = Builder.CreatePtrToInt(SrcOp, IntPtrTy); 1950 return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false); 1951 } 1952 1953 Value *Vec, *Scalar, *Index; 1954 if (match(SrcOp, m_OneUse(m_InsertElt(m_IntToPtr(m_Value(Vec)), 1955 m_Value(Scalar), m_Value(Index)))) && 1956 Vec->getType() == Ty) { 1957 assert(Vec->getType()->getScalarSizeInBits() == PtrSize && "Wrong type"); 1958 // Convert the scalar to int followed by insert to eliminate one cast: 1959 // p2i (ins (i2p Vec), Scalar, Index --> ins Vec, (p2i Scalar), Index 1960 Value *NewCast = Builder.CreatePtrToInt(Scalar, Ty->getScalarType()); 1961 return InsertElementInst::Create(Vec, NewCast, Index); 1962 } 1963 1964 return commonPointerCastTransforms(CI); 1965 } 1966 1967 /// This input value (which is known to have vector type) is being zero extended 1968 /// or truncated to the specified vector type. Since the zext/trunc is done 1969 /// using an integer type, we have a (bitcast(cast(bitcast))) pattern, 1970 /// endianness will impact which end of the vector that is extended or 1971 /// truncated. 1972 /// 1973 /// A vector is always stored with index 0 at the lowest address, which 1974 /// corresponds to the most significant bits for a big endian stored integer and 1975 /// the least significant bits for little endian. A trunc/zext of an integer 1976 /// impacts the big end of the integer. Thus, we need to add/remove elements at 1977 /// the front of the vector for big endian targets, and the back of the vector 1978 /// for little endian targets. 1979 /// 1980 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible. 1981 /// 1982 /// The source and destination vector types may have different element types. 1983 static Instruction * 1984 optimizeVectorResizeWithIntegerBitCasts(Value *InVal, VectorType *DestTy, 1985 InstCombinerImpl &IC) { 1986 // We can only do this optimization if the output is a multiple of the input 1987 // element size, or the input is a multiple of the output element size. 1988 // Convert the input type to have the same element type as the output. 1989 VectorType *SrcTy = cast<VectorType>(InVal->getType()); 1990 1991 if (SrcTy->getElementType() != DestTy->getElementType()) { 1992 // The input types don't need to be identical, but for now they must be the 1993 // same size. There is no specific reason we couldn't handle things like 1994 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten 1995 // there yet. 1996 if (SrcTy->getElementType()->getPrimitiveSizeInBits() != 1997 DestTy->getElementType()->getPrimitiveSizeInBits()) 1998 return nullptr; 1999 2000 SrcTy = 2001 FixedVectorType::get(DestTy->getElementType(), 2002 cast<FixedVectorType>(SrcTy)->getNumElements()); 2003 InVal = IC.Builder.CreateBitCast(InVal, SrcTy); 2004 } 2005 2006 bool IsBigEndian = IC.getDataLayout().isBigEndian(); 2007 unsigned SrcElts = cast<FixedVectorType>(SrcTy)->getNumElements(); 2008 unsigned DestElts = cast<FixedVectorType>(DestTy)->getNumElements(); 2009 2010 assert(SrcElts != DestElts && "Element counts should be different."); 2011 2012 // Now that the element types match, get the shuffle mask and RHS of the 2013 // shuffle to use, which depends on whether we're increasing or decreasing the 2014 // size of the input. 2015 SmallVector<int, 16> ShuffleMaskStorage; 2016 ArrayRef<int> ShuffleMask; 2017 Value *V2; 2018 2019 // Produce an identify shuffle mask for the src vector. 2020 ShuffleMaskStorage.resize(SrcElts); 2021 std::iota(ShuffleMaskStorage.begin(), ShuffleMaskStorage.end(), 0); 2022 2023 if (SrcElts > DestElts) { 2024 // If we're shrinking the number of elements (rewriting an integer 2025 // truncate), just shuffle in the elements corresponding to the least 2026 // significant bits from the input and use undef as the second shuffle 2027 // input. 2028 V2 = UndefValue::get(SrcTy); 2029 // Make sure the shuffle mask selects the "least significant bits" by 2030 // keeping elements from back of the src vector for big endian, and from the 2031 // front for little endian. 2032 ShuffleMask = ShuffleMaskStorage; 2033 if (IsBigEndian) 2034 ShuffleMask = ShuffleMask.take_back(DestElts); 2035 else 2036 ShuffleMask = ShuffleMask.take_front(DestElts); 2037 } else { 2038 // If we're increasing the number of elements (rewriting an integer zext), 2039 // shuffle in all of the elements from InVal. Fill the rest of the result 2040 // elements with zeros from a constant zero. 2041 V2 = Constant::getNullValue(SrcTy); 2042 // Use first elt from V2 when indicating zero in the shuffle mask. 2043 uint32_t NullElt = SrcElts; 2044 // Extend with null values in the "most significant bits" by adding elements 2045 // in front of the src vector for big endian, and at the back for little 2046 // endian. 2047 unsigned DeltaElts = DestElts - SrcElts; 2048 if (IsBigEndian) 2049 ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt); 2050 else 2051 ShuffleMaskStorage.append(DeltaElts, NullElt); 2052 ShuffleMask = ShuffleMaskStorage; 2053 } 2054 2055 return new ShuffleVectorInst(InVal, V2, ShuffleMask); 2056 } 2057 2058 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) { 2059 return Value % Ty->getPrimitiveSizeInBits() == 0; 2060 } 2061 2062 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) { 2063 return Value / Ty->getPrimitiveSizeInBits(); 2064 } 2065 2066 /// V is a value which is inserted into a vector of VecEltTy. 2067 /// Look through the value to see if we can decompose it into 2068 /// insertions into the vector. See the example in the comment for 2069 /// OptimizeIntegerToVectorInsertions for the pattern this handles. 2070 /// The type of V is always a non-zero multiple of VecEltTy's size. 2071 /// Shift is the number of bits between the lsb of V and the lsb of 2072 /// the vector. 2073 /// 2074 /// This returns false if the pattern can't be matched or true if it can, 2075 /// filling in Elements with the elements found here. 2076 static bool collectInsertionElements(Value *V, unsigned Shift, 2077 SmallVectorImpl<Value *> &Elements, 2078 Type *VecEltTy, bool isBigEndian) { 2079 assert(isMultipleOfTypeSize(Shift, VecEltTy) && 2080 "Shift should be a multiple of the element type size"); 2081 2082 // Undef values never contribute useful bits to the result. 2083 if (isa<UndefValue>(V)) return true; 2084 2085 // If we got down to a value of the right type, we win, try inserting into the 2086 // right element. 2087 if (V->getType() == VecEltTy) { 2088 // Inserting null doesn't actually insert any elements. 2089 if (Constant *C = dyn_cast<Constant>(V)) 2090 if (C->isNullValue()) 2091 return true; 2092 2093 unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy); 2094 if (isBigEndian) 2095 ElementIndex = Elements.size() - ElementIndex - 1; 2096 2097 // Fail if multiple elements are inserted into this slot. 2098 if (Elements[ElementIndex]) 2099 return false; 2100 2101 Elements[ElementIndex] = V; 2102 return true; 2103 } 2104 2105 if (Constant *C = dyn_cast<Constant>(V)) { 2106 // Figure out the # elements this provides, and bitcast it or slice it up 2107 // as required. 2108 unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(), 2109 VecEltTy); 2110 // If the constant is the size of a vector element, we just need to bitcast 2111 // it to the right type so it gets properly inserted. 2112 if (NumElts == 1) 2113 return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy), 2114 Shift, Elements, VecEltTy, isBigEndian); 2115 2116 // Okay, this is a constant that covers multiple elements. Slice it up into 2117 // pieces and insert each element-sized piece into the vector. 2118 if (!isa<IntegerType>(C->getType())) 2119 C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(), 2120 C->getType()->getPrimitiveSizeInBits())); 2121 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits(); 2122 Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize); 2123 2124 for (unsigned i = 0; i != NumElts; ++i) { 2125 unsigned ShiftI = Shift+i*ElementSize; 2126 Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(), 2127 ShiftI)); 2128 Piece = ConstantExpr::getTrunc(Piece, ElementIntTy); 2129 if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy, 2130 isBigEndian)) 2131 return false; 2132 } 2133 return true; 2134 } 2135 2136 if (!V->hasOneUse()) return false; 2137 2138 Instruction *I = dyn_cast<Instruction>(V); 2139 if (!I) return false; 2140 switch (I->getOpcode()) { 2141 default: return false; // Unhandled case. 2142 case Instruction::BitCast: 2143 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2144 isBigEndian); 2145 case Instruction::ZExt: 2146 if (!isMultipleOfTypeSize( 2147 I->getOperand(0)->getType()->getPrimitiveSizeInBits(), 2148 VecEltTy)) 2149 return false; 2150 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2151 isBigEndian); 2152 case Instruction::Or: 2153 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2154 isBigEndian) && 2155 collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy, 2156 isBigEndian); 2157 case Instruction::Shl: { 2158 // Must be shifting by a constant that is a multiple of the element size. 2159 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1)); 2160 if (!CI) return false; 2161 Shift += CI->getZExtValue(); 2162 if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false; 2163 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2164 isBigEndian); 2165 } 2166 2167 } 2168 } 2169 2170 2171 /// If the input is an 'or' instruction, we may be doing shifts and ors to 2172 /// assemble the elements of the vector manually. 2173 /// Try to rip the code out and replace it with insertelements. This is to 2174 /// optimize code like this: 2175 /// 2176 /// %tmp37 = bitcast float %inc to i32 2177 /// %tmp38 = zext i32 %tmp37 to i64 2178 /// %tmp31 = bitcast float %inc5 to i32 2179 /// %tmp32 = zext i32 %tmp31 to i64 2180 /// %tmp33 = shl i64 %tmp32, 32 2181 /// %ins35 = or i64 %tmp33, %tmp38 2182 /// %tmp43 = bitcast i64 %ins35 to <2 x float> 2183 /// 2184 /// Into two insertelements that do "buildvector{%inc, %inc5}". 2185 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI, 2186 InstCombinerImpl &IC) { 2187 auto *DestVecTy = cast<FixedVectorType>(CI.getType()); 2188 Value *IntInput = CI.getOperand(0); 2189 2190 SmallVector<Value*, 8> Elements(DestVecTy->getNumElements()); 2191 if (!collectInsertionElements(IntInput, 0, Elements, 2192 DestVecTy->getElementType(), 2193 IC.getDataLayout().isBigEndian())) 2194 return nullptr; 2195 2196 // If we succeeded, we know that all of the element are specified by Elements 2197 // or are zero if Elements has a null entry. Recast this as a set of 2198 // insertions. 2199 Value *Result = Constant::getNullValue(CI.getType()); 2200 for (unsigned i = 0, e = Elements.size(); i != e; ++i) { 2201 if (!Elements[i]) continue; // Unset element. 2202 2203 Result = IC.Builder.CreateInsertElement(Result, Elements[i], 2204 IC.Builder.getInt32(i)); 2205 } 2206 2207 return Result; 2208 } 2209 2210 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the 2211 /// vector followed by extract element. The backend tends to handle bitcasts of 2212 /// vectors better than bitcasts of scalars because vector registers are 2213 /// usually not type-specific like scalar integer or scalar floating-point. 2214 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast, 2215 InstCombinerImpl &IC) { 2216 // TODO: Create and use a pattern matcher for ExtractElementInst. 2217 auto *ExtElt = dyn_cast<ExtractElementInst>(BitCast.getOperand(0)); 2218 if (!ExtElt || !ExtElt->hasOneUse()) 2219 return nullptr; 2220 2221 // The bitcast must be to a vectorizable type, otherwise we can't make a new 2222 // type to extract from. 2223 Type *DestType = BitCast.getType(); 2224 if (!VectorType::isValidElementType(DestType)) 2225 return nullptr; 2226 2227 auto *NewVecType = VectorType::get(DestType, ExtElt->getVectorOperandType()); 2228 auto *NewBC = IC.Builder.CreateBitCast(ExtElt->getVectorOperand(), 2229 NewVecType, "bc"); 2230 return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand()); 2231 } 2232 2233 /// Change the type of a bitwise logic operation if we can eliminate a bitcast. 2234 static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast, 2235 InstCombiner::BuilderTy &Builder) { 2236 Type *DestTy = BitCast.getType(); 2237 BinaryOperator *BO; 2238 if (!DestTy->isIntOrIntVectorTy() || 2239 !match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) || 2240 !BO->isBitwiseLogicOp()) 2241 return nullptr; 2242 2243 // FIXME: This transform is restricted to vector types to avoid backend 2244 // problems caused by creating potentially illegal operations. If a fix-up is 2245 // added to handle that situation, we can remove this check. 2246 if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy()) 2247 return nullptr; 2248 2249 Value *X; 2250 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) && 2251 X->getType() == DestTy && !isa<Constant>(X)) { 2252 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y)) 2253 Value *CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy); 2254 return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1); 2255 } 2256 2257 if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) && 2258 X->getType() == DestTy && !isa<Constant>(X)) { 2259 // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X) 2260 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 2261 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X); 2262 } 2263 2264 // Canonicalize vector bitcasts to come before vector bitwise logic with a 2265 // constant. This eases recognition of special constants for later ops. 2266 // Example: 2267 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b 2268 Constant *C; 2269 if (match(BO->getOperand(1), m_Constant(C))) { 2270 // bitcast (logic X, C) --> logic (bitcast X, C') 2271 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 2272 Value *CastedC = Builder.CreateBitCast(C, DestTy); 2273 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC); 2274 } 2275 2276 return nullptr; 2277 } 2278 2279 /// Change the type of a select if we can eliminate a bitcast. 2280 static Instruction *foldBitCastSelect(BitCastInst &BitCast, 2281 InstCombiner::BuilderTy &Builder) { 2282 Value *Cond, *TVal, *FVal; 2283 if (!match(BitCast.getOperand(0), 2284 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) 2285 return nullptr; 2286 2287 // A vector select must maintain the same number of elements in its operands. 2288 Type *CondTy = Cond->getType(); 2289 Type *DestTy = BitCast.getType(); 2290 if (auto *CondVTy = dyn_cast<VectorType>(CondTy)) { 2291 if (!DestTy->isVectorTy()) 2292 return nullptr; 2293 if (cast<FixedVectorType>(DestTy)->getNumElements() != 2294 cast<FixedVectorType>(CondVTy)->getNumElements()) 2295 return nullptr; 2296 } 2297 2298 // FIXME: This transform is restricted from changing the select between 2299 // scalars and vectors to avoid backend problems caused by creating 2300 // potentially illegal operations. If a fix-up is added to handle that 2301 // situation, we can remove this check. 2302 if (DestTy->isVectorTy() != TVal->getType()->isVectorTy()) 2303 return nullptr; 2304 2305 auto *Sel = cast<Instruction>(BitCast.getOperand(0)); 2306 Value *X; 2307 if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 2308 !isa<Constant>(X)) { 2309 // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y)) 2310 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy); 2311 return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel); 2312 } 2313 2314 if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 2315 !isa<Constant>(X)) { 2316 // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X) 2317 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy); 2318 return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel); 2319 } 2320 2321 return nullptr; 2322 } 2323 2324 /// Check if all users of CI are StoreInsts. 2325 static bool hasStoreUsersOnly(CastInst &CI) { 2326 for (User *U : CI.users()) { 2327 if (!isa<StoreInst>(U)) 2328 return false; 2329 } 2330 return true; 2331 } 2332 2333 /// This function handles following case 2334 /// 2335 /// A -> B cast 2336 /// PHI 2337 /// B -> A cast 2338 /// 2339 /// All the related PHI nodes can be replaced by new PHI nodes with type A. 2340 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN. 2341 Instruction *InstCombinerImpl::optimizeBitCastFromPhi(CastInst &CI, 2342 PHINode *PN) { 2343 // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp. 2344 if (hasStoreUsersOnly(CI)) 2345 return nullptr; 2346 2347 Value *Src = CI.getOperand(0); 2348 Type *SrcTy = Src->getType(); // Type B 2349 Type *DestTy = CI.getType(); // Type A 2350 2351 SmallVector<PHINode *, 4> PhiWorklist; 2352 SmallSetVector<PHINode *, 4> OldPhiNodes; 2353 2354 // Find all of the A->B casts and PHI nodes. 2355 // We need to inspect all related PHI nodes, but PHIs can be cyclic, so 2356 // OldPhiNodes is used to track all known PHI nodes, before adding a new 2357 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first. 2358 PhiWorklist.push_back(PN); 2359 OldPhiNodes.insert(PN); 2360 while (!PhiWorklist.empty()) { 2361 auto *OldPN = PhiWorklist.pop_back_val(); 2362 for (Value *IncValue : OldPN->incoming_values()) { 2363 if (isa<Constant>(IncValue)) 2364 continue; 2365 2366 if (auto *LI = dyn_cast<LoadInst>(IncValue)) { 2367 // If there is a sequence of one or more load instructions, each loaded 2368 // value is used as address of later load instruction, bitcast is 2369 // necessary to change the value type, don't optimize it. For 2370 // simplicity we give up if the load address comes from another load. 2371 Value *Addr = LI->getOperand(0); 2372 if (Addr == &CI || isa<LoadInst>(Addr)) 2373 return nullptr; 2374 if (LI->hasOneUse() && LI->isSimple()) 2375 continue; 2376 // If a LoadInst has more than one use, changing the type of loaded 2377 // value may create another bitcast. 2378 return nullptr; 2379 } 2380 2381 if (auto *PNode = dyn_cast<PHINode>(IncValue)) { 2382 if (OldPhiNodes.insert(PNode)) 2383 PhiWorklist.push_back(PNode); 2384 continue; 2385 } 2386 2387 auto *BCI = dyn_cast<BitCastInst>(IncValue); 2388 // We can't handle other instructions. 2389 if (!BCI) 2390 return nullptr; 2391 2392 // Verify it's a A->B cast. 2393 Type *TyA = BCI->getOperand(0)->getType(); 2394 Type *TyB = BCI->getType(); 2395 if (TyA != DestTy || TyB != SrcTy) 2396 return nullptr; 2397 } 2398 } 2399 2400 // Check that each user of each old PHI node is something that we can 2401 // rewrite, so that all of the old PHI nodes can be cleaned up afterwards. 2402 for (auto *OldPN : OldPhiNodes) { 2403 for (User *V : OldPN->users()) { 2404 if (auto *SI = dyn_cast<StoreInst>(V)) { 2405 if (!SI->isSimple() || SI->getOperand(0) != OldPN) 2406 return nullptr; 2407 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2408 // Verify it's a B->A cast. 2409 Type *TyB = BCI->getOperand(0)->getType(); 2410 Type *TyA = BCI->getType(); 2411 if (TyA != DestTy || TyB != SrcTy) 2412 return nullptr; 2413 } else if (auto *PHI = dyn_cast<PHINode>(V)) { 2414 // As long as the user is another old PHI node, then even if we don't 2415 // rewrite it, the PHI web we're considering won't have any users 2416 // outside itself, so it'll be dead. 2417 if (OldPhiNodes.count(PHI) == 0) 2418 return nullptr; 2419 } else { 2420 return nullptr; 2421 } 2422 } 2423 } 2424 2425 // For each old PHI node, create a corresponding new PHI node with a type A. 2426 SmallDenseMap<PHINode *, PHINode *> NewPNodes; 2427 for (auto *OldPN : OldPhiNodes) { 2428 Builder.SetInsertPoint(OldPN); 2429 PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands()); 2430 NewPNodes[OldPN] = NewPN; 2431 } 2432 2433 // Fill in the operands of new PHI nodes. 2434 for (auto *OldPN : OldPhiNodes) { 2435 PHINode *NewPN = NewPNodes[OldPN]; 2436 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) { 2437 Value *V = OldPN->getOperand(j); 2438 Value *NewV = nullptr; 2439 if (auto *C = dyn_cast<Constant>(V)) { 2440 NewV = ConstantExpr::getBitCast(C, DestTy); 2441 } else if (auto *LI = dyn_cast<LoadInst>(V)) { 2442 // Explicitly perform load combine to make sure no opposing transform 2443 // can remove the bitcast in the meantime and trigger an infinite loop. 2444 Builder.SetInsertPoint(LI); 2445 NewV = combineLoadToNewType(*LI, DestTy); 2446 // Remove the old load and its use in the old phi, which itself becomes 2447 // dead once the whole transform finishes. 2448 replaceInstUsesWith(*LI, UndefValue::get(LI->getType())); 2449 eraseInstFromFunction(*LI); 2450 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2451 NewV = BCI->getOperand(0); 2452 } else if (auto *PrevPN = dyn_cast<PHINode>(V)) { 2453 NewV = NewPNodes[PrevPN]; 2454 } 2455 assert(NewV); 2456 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j)); 2457 } 2458 } 2459 2460 // Traverse all accumulated PHI nodes and process its users, 2461 // which are Stores and BitcCasts. Without this processing 2462 // NewPHI nodes could be replicated and could lead to extra 2463 // moves generated after DeSSA. 2464 // If there is a store with type B, change it to type A. 2465 2466 2467 // Replace users of BitCast B->A with NewPHI. These will help 2468 // later to get rid off a closure formed by OldPHI nodes. 2469 Instruction *RetVal = nullptr; 2470 for (auto *OldPN : OldPhiNodes) { 2471 PHINode *NewPN = NewPNodes[OldPN]; 2472 for (auto It = OldPN->user_begin(), End = OldPN->user_end(); It != End; ) { 2473 User *V = *It; 2474 // We may remove this user, advance to avoid iterator invalidation. 2475 ++It; 2476 if (auto *SI = dyn_cast<StoreInst>(V)) { 2477 assert(SI->isSimple() && SI->getOperand(0) == OldPN); 2478 Builder.SetInsertPoint(SI); 2479 auto *NewBC = 2480 cast<BitCastInst>(Builder.CreateBitCast(NewPN, SrcTy)); 2481 SI->setOperand(0, NewBC); 2482 Worklist.push(SI); 2483 assert(hasStoreUsersOnly(*NewBC)); 2484 } 2485 else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2486 Type *TyB = BCI->getOperand(0)->getType(); 2487 Type *TyA = BCI->getType(); 2488 assert(TyA == DestTy && TyB == SrcTy); 2489 (void) TyA; 2490 (void) TyB; 2491 Instruction *I = replaceInstUsesWith(*BCI, NewPN); 2492 if (BCI == &CI) 2493 RetVal = I; 2494 } else if (auto *PHI = dyn_cast<PHINode>(V)) { 2495 assert(OldPhiNodes.count(PHI) > 0); 2496 (void) PHI; 2497 } else { 2498 llvm_unreachable("all uses should be handled"); 2499 } 2500 } 2501 } 2502 2503 return RetVal; 2504 } 2505 2506 Instruction *InstCombinerImpl::visitBitCast(BitCastInst &CI) { 2507 // If the operands are integer typed then apply the integer transforms, 2508 // otherwise just apply the common ones. 2509 Value *Src = CI.getOperand(0); 2510 Type *SrcTy = Src->getType(); 2511 Type *DestTy = CI.getType(); 2512 2513 // Get rid of casts from one type to the same type. These are useless and can 2514 // be replaced by the operand. 2515 if (DestTy == Src->getType()) 2516 return replaceInstUsesWith(CI, Src); 2517 2518 if (isa<PointerType>(SrcTy) && isa<PointerType>(DestTy)) { 2519 PointerType *SrcPTy = cast<PointerType>(SrcTy); 2520 PointerType *DstPTy = cast<PointerType>(DestTy); 2521 Type *DstElTy = DstPTy->getElementType(); 2522 Type *SrcElTy = SrcPTy->getElementType(); 2523 2524 // Casting pointers between the same type, but with different address spaces 2525 // is an addrspace cast rather than a bitcast. 2526 if ((DstElTy == SrcElTy) && 2527 (DstPTy->getAddressSpace() != SrcPTy->getAddressSpace())) 2528 return new AddrSpaceCastInst(Src, DestTy); 2529 2530 // If we are casting a alloca to a pointer to a type of the same 2531 // size, rewrite the allocation instruction to allocate the "right" type. 2532 // There is no need to modify malloc calls because it is their bitcast that 2533 // needs to be cleaned up. 2534 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src)) 2535 if (Instruction *V = PromoteCastOfAllocation(CI, *AI)) 2536 return V; 2537 2538 // When the type pointed to is not sized the cast cannot be 2539 // turned into a gep. 2540 Type *PointeeType = 2541 cast<PointerType>(Src->getType()->getScalarType())->getElementType(); 2542 if (!PointeeType->isSized()) 2543 return nullptr; 2544 2545 // If the source and destination are pointers, and this cast is equivalent 2546 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep. 2547 // This can enhance SROA and other transforms that want type-safe pointers. 2548 unsigned NumZeros = 0; 2549 while (SrcElTy && SrcElTy != DstElTy) { 2550 SrcElTy = GetElementPtrInst::getTypeAtIndex(SrcElTy, (uint64_t)0); 2551 ++NumZeros; 2552 } 2553 2554 // If we found a path from the src to dest, create the getelementptr now. 2555 if (SrcElTy == DstElTy) { 2556 SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0)); 2557 GetElementPtrInst *GEP = 2558 GetElementPtrInst::Create(SrcPTy->getElementType(), Src, Idxs); 2559 2560 // If the source pointer is dereferenceable, then assume it points to an 2561 // allocated object and apply "inbounds" to the GEP. 2562 bool CanBeNull; 2563 if (Src->getPointerDereferenceableBytes(DL, CanBeNull)) { 2564 // In a non-default address space (not 0), a null pointer can not be 2565 // assumed inbounds, so ignore that case (dereferenceable_or_null). 2566 // The reason is that 'null' is not treated differently in these address 2567 // spaces, and we consequently ignore the 'gep inbounds' special case 2568 // for 'null' which allows 'inbounds' on 'null' if the indices are 2569 // zeros. 2570 if (SrcPTy->getAddressSpace() == 0 || !CanBeNull) 2571 GEP->setIsInBounds(); 2572 } 2573 return GEP; 2574 } 2575 } 2576 2577 if (FixedVectorType *DestVTy = dyn_cast<FixedVectorType>(DestTy)) { 2578 // Beware: messing with this target-specific oddity may cause trouble. 2579 if (DestVTy->getNumElements() == 1 && SrcTy->isX86_MMXTy()) { 2580 Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType()); 2581 return InsertElementInst::Create(UndefValue::get(DestTy), Elem, 2582 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2583 } 2584 2585 if (isa<IntegerType>(SrcTy)) { 2586 // If this is a cast from an integer to vector, check to see if the input 2587 // is a trunc or zext of a bitcast from vector. If so, we can replace all 2588 // the casts with a shuffle and (potentially) a bitcast. 2589 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) { 2590 CastInst *SrcCast = cast<CastInst>(Src); 2591 if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0))) 2592 if (isa<VectorType>(BCIn->getOperand(0)->getType())) 2593 if (Instruction *I = optimizeVectorResizeWithIntegerBitCasts( 2594 BCIn->getOperand(0), cast<VectorType>(DestTy), *this)) 2595 return I; 2596 } 2597 2598 // If the input is an 'or' instruction, we may be doing shifts and ors to 2599 // assemble the elements of the vector manually. Try to rip the code out 2600 // and replace it with insertelements. 2601 if (Value *V = optimizeIntegerToVectorInsertions(CI, *this)) 2602 return replaceInstUsesWith(CI, V); 2603 } 2604 } 2605 2606 if (FixedVectorType *SrcVTy = dyn_cast<FixedVectorType>(SrcTy)) { 2607 if (SrcVTy->getNumElements() == 1) { 2608 // If our destination is not a vector, then make this a straight 2609 // scalar-scalar cast. 2610 if (!DestTy->isVectorTy()) { 2611 Value *Elem = 2612 Builder.CreateExtractElement(Src, 2613 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2614 return CastInst::Create(Instruction::BitCast, Elem, DestTy); 2615 } 2616 2617 // Otherwise, see if our source is an insert. If so, then use the scalar 2618 // component directly: 2619 // bitcast (inselt <1 x elt> V, X, 0) to <n x m> --> bitcast X to <n x m> 2620 if (auto *InsElt = dyn_cast<InsertElementInst>(Src)) 2621 return new BitCastInst(InsElt->getOperand(1), DestTy); 2622 } 2623 } 2624 2625 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(Src)) { 2626 // Okay, we have (bitcast (shuffle ..)). Check to see if this is 2627 // a bitcast to a vector with the same # elts. 2628 Value *ShufOp0 = Shuf->getOperand(0); 2629 Value *ShufOp1 = Shuf->getOperand(1); 2630 unsigned NumShufElts = 2631 cast<FixedVectorType>(Shuf->getType())->getNumElements(); 2632 unsigned NumSrcVecElts = 2633 cast<FixedVectorType>(ShufOp0->getType())->getNumElements(); 2634 if (Shuf->hasOneUse() && DestTy->isVectorTy() && 2635 cast<FixedVectorType>(DestTy)->getNumElements() == NumShufElts && 2636 NumShufElts == NumSrcVecElts) { 2637 BitCastInst *Tmp; 2638 // If either of the operands is a cast from CI.getType(), then 2639 // evaluating the shuffle in the casted destination's type will allow 2640 // us to eliminate at least one cast. 2641 if (((Tmp = dyn_cast<BitCastInst>(ShufOp0)) && 2642 Tmp->getOperand(0)->getType() == DestTy) || 2643 ((Tmp = dyn_cast<BitCastInst>(ShufOp1)) && 2644 Tmp->getOperand(0)->getType() == DestTy)) { 2645 Value *LHS = Builder.CreateBitCast(ShufOp0, DestTy); 2646 Value *RHS = Builder.CreateBitCast(ShufOp1, DestTy); 2647 // Return a new shuffle vector. Use the same element ID's, as we 2648 // know the vector types match #elts. 2649 return new ShuffleVectorInst(LHS, RHS, Shuf->getShuffleMask()); 2650 } 2651 } 2652 2653 // A bitcasted-to-scalar and byte-reversing shuffle is better recognized as 2654 // a byte-swap: 2655 // bitcast <N x i8> (shuf X, undef, <N, N-1,...0>) --> bswap (bitcast X) 2656 // TODO: We should match the related pattern for bitreverse. 2657 if (DestTy->isIntegerTy() && 2658 DL.isLegalInteger(DestTy->getScalarSizeInBits()) && 2659 SrcTy->getScalarSizeInBits() == 8 && NumShufElts % 2 == 0 && 2660 Shuf->hasOneUse() && Shuf->isReverse()) { 2661 assert(ShufOp0->getType() == SrcTy && "Unexpected shuffle mask"); 2662 assert(isa<UndefValue>(ShufOp1) && "Unexpected shuffle op"); 2663 Function *Bswap = 2664 Intrinsic::getDeclaration(CI.getModule(), Intrinsic::bswap, DestTy); 2665 Value *ScalarX = Builder.CreateBitCast(ShufOp0, DestTy); 2666 return IntrinsicInst::Create(Bswap, { ScalarX }); 2667 } 2668 } 2669 2670 // Handle the A->B->A cast, and there is an intervening PHI node. 2671 if (PHINode *PN = dyn_cast<PHINode>(Src)) 2672 if (Instruction *I = optimizeBitCastFromPhi(CI, PN)) 2673 return I; 2674 2675 if (Instruction *I = canonicalizeBitCastExtElt(CI, *this)) 2676 return I; 2677 2678 if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder)) 2679 return I; 2680 2681 if (Instruction *I = foldBitCastSelect(CI, Builder)) 2682 return I; 2683 2684 if (SrcTy->isPointerTy()) 2685 return commonPointerCastTransforms(CI); 2686 return commonCastTransforms(CI); 2687 } 2688 2689 Instruction *InstCombinerImpl::visitAddrSpaceCast(AddrSpaceCastInst &CI) { 2690 // If the destination pointer element type is not the same as the source's 2691 // first do a bitcast to the destination type, and then the addrspacecast. 2692 // This allows the cast to be exposed to other transforms. 2693 Value *Src = CI.getOperand(0); 2694 PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType()); 2695 PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType()); 2696 2697 Type *DestElemTy = DestTy->getElementType(); 2698 if (SrcTy->getElementType() != DestElemTy) { 2699 Type *MidTy = PointerType::get(DestElemTy, SrcTy->getAddressSpace()); 2700 if (VectorType *VT = dyn_cast<VectorType>(CI.getType())) { 2701 // Handle vectors of pointers. 2702 // FIXME: what should happen for scalable vectors? 2703 MidTy = FixedVectorType::get(MidTy, 2704 cast<FixedVectorType>(VT)->getNumElements()); 2705 } 2706 2707 Value *NewBitCast = Builder.CreateBitCast(Src, MidTy); 2708 return new AddrSpaceCastInst(NewBitCast, CI.getType()); 2709 } 2710 2711 return commonPointerCastTransforms(CI); 2712 } 2713