1 //===- InstCombineCasts.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visit functions for cast operations. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/SetVector.h" 15 #include "llvm/Analysis/ConstantFolding.h" 16 #include "llvm/Analysis/TargetLibraryInfo.h" 17 #include "llvm/IR/DIBuilder.h" 18 #include "llvm/IR/DataLayout.h" 19 #include "llvm/IR/PatternMatch.h" 20 #include "llvm/Support/KnownBits.h" 21 #include "llvm/Transforms/InstCombine/InstCombiner.h" 22 #include <numeric> 23 using namespace llvm; 24 using namespace PatternMatch; 25 26 #define DEBUG_TYPE "instcombine" 27 28 /// Analyze 'Val', seeing if it is a simple linear expression. 29 /// If so, decompose it, returning some value X, such that Val is 30 /// X*Scale+Offset. 31 /// 32 static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale, 33 uint64_t &Offset) { 34 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { 35 Offset = CI->getZExtValue(); 36 Scale = 0; 37 return ConstantInt::get(Val->getType(), 0); 38 } 39 40 if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) { 41 // Cannot look past anything that might overflow. 42 OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val); 43 if (OBI && !OBI->hasNoUnsignedWrap() && !OBI->hasNoSignedWrap()) { 44 Scale = 1; 45 Offset = 0; 46 return Val; 47 } 48 49 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) { 50 if (I->getOpcode() == Instruction::Shl) { 51 // This is a value scaled by '1 << the shift amt'. 52 Scale = UINT64_C(1) << RHS->getZExtValue(); 53 Offset = 0; 54 return I->getOperand(0); 55 } 56 57 if (I->getOpcode() == Instruction::Mul) { 58 // This value is scaled by 'RHS'. 59 Scale = RHS->getZExtValue(); 60 Offset = 0; 61 return I->getOperand(0); 62 } 63 64 if (I->getOpcode() == Instruction::Add) { 65 // We have X+C. Check to see if we really have (X*C2)+C1, 66 // where C1 is divisible by C2. 67 unsigned SubScale; 68 Value *SubVal = 69 decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset); 70 Offset += RHS->getZExtValue(); 71 Scale = SubScale; 72 return SubVal; 73 } 74 } 75 } 76 77 // Otherwise, we can't look past this. 78 Scale = 1; 79 Offset = 0; 80 return Val; 81 } 82 83 /// If we find a cast of an allocation instruction, try to eliminate the cast by 84 /// moving the type information into the alloc. 85 Instruction *InstCombinerImpl::PromoteCastOfAllocation(BitCastInst &CI, 86 AllocaInst &AI) { 87 PointerType *PTy = cast<PointerType>(CI.getType()); 88 89 IRBuilderBase::InsertPointGuard Guard(Builder); 90 Builder.SetInsertPoint(&AI); 91 92 // Get the type really allocated and the type casted to. 93 Type *AllocElTy = AI.getAllocatedType(); 94 Type *CastElTy = PTy->getElementType(); 95 if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr; 96 97 // This optimisation does not work for cases where the cast type 98 // is scalable and the allocated type is not. This because we need to 99 // know how many times the casted type fits into the allocated type. 100 // For the opposite case where the allocated type is scalable and the 101 // cast type is not this leads to poor code quality due to the 102 // introduction of 'vscale' into the calculations. It seems better to 103 // bail out for this case too until we've done a proper cost-benefit 104 // analysis. 105 bool AllocIsScalable = isa<ScalableVectorType>(AllocElTy); 106 bool CastIsScalable = isa<ScalableVectorType>(CastElTy); 107 if (AllocIsScalable != CastIsScalable) return nullptr; 108 109 Align AllocElTyAlign = DL.getABITypeAlign(AllocElTy); 110 Align CastElTyAlign = DL.getABITypeAlign(CastElTy); 111 if (CastElTyAlign < AllocElTyAlign) return nullptr; 112 113 // If the allocation has multiple uses, only promote it if we are strictly 114 // increasing the alignment of the resultant allocation. If we keep it the 115 // same, we open the door to infinite loops of various kinds. 116 if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr; 117 118 // The alloc and cast types should be either both fixed or both scalable. 119 uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy).getKnownMinSize(); 120 uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy).getKnownMinSize(); 121 if (CastElTySize == 0 || AllocElTySize == 0) return nullptr; 122 123 // If the allocation has multiple uses, only promote it if we're not 124 // shrinking the amount of memory being allocated. 125 uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy).getKnownMinSize(); 126 uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy).getKnownMinSize(); 127 if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr; 128 129 // See if we can satisfy the modulus by pulling a scale out of the array 130 // size argument. 131 unsigned ArraySizeScale; 132 uint64_t ArrayOffset; 133 Value *NumElements = // See if the array size is a decomposable linear expr. 134 decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset); 135 136 // If we can now satisfy the modulus, by using a non-1 scale, we really can 137 // do the xform. 138 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 || 139 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return nullptr; 140 141 // We don't currently support arrays of scalable types. 142 assert(!AllocIsScalable || (ArrayOffset == 1 && ArraySizeScale == 0)); 143 144 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize; 145 Value *Amt = nullptr; 146 if (Scale == 1) { 147 Amt = NumElements; 148 } else { 149 Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale); 150 // Insert before the alloca, not before the cast. 151 Amt = Builder.CreateMul(Amt, NumElements); 152 } 153 154 if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) { 155 Value *Off = ConstantInt::get(AI.getArraySize()->getType(), 156 Offset, true); 157 Amt = Builder.CreateAdd(Amt, Off); 158 } 159 160 AllocaInst *New = Builder.CreateAlloca(CastElTy, Amt); 161 New->setAlignment(AI.getAlign()); 162 New->takeName(&AI); 163 New->setUsedWithInAlloca(AI.isUsedWithInAlloca()); 164 165 // If the allocation has multiple real uses, insert a cast and change all 166 // things that used it to use the new cast. This will also hack on CI, but it 167 // will die soon. 168 if (!AI.hasOneUse()) { 169 // New is the allocation instruction, pointer typed. AI is the original 170 // allocation instruction, also pointer typed. Thus, cast to use is BitCast. 171 Value *NewCast = Builder.CreateBitCast(New, AI.getType(), "tmpcast"); 172 replaceInstUsesWith(AI, NewCast); 173 eraseInstFromFunction(AI); 174 } 175 return replaceInstUsesWith(CI, New); 176 } 177 178 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns 179 /// true for, actually insert the code to evaluate the expression. 180 Value *InstCombinerImpl::EvaluateInDifferentType(Value *V, Type *Ty, 181 bool isSigned) { 182 if (Constant *C = dyn_cast<Constant>(V)) { 183 C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/); 184 // If we got a constantexpr back, try to simplify it with DL info. 185 return ConstantFoldConstant(C, DL, &TLI); 186 } 187 188 // Otherwise, it must be an instruction. 189 Instruction *I = cast<Instruction>(V); 190 Instruction *Res = nullptr; 191 unsigned Opc = I->getOpcode(); 192 switch (Opc) { 193 case Instruction::Add: 194 case Instruction::Sub: 195 case Instruction::Mul: 196 case Instruction::And: 197 case Instruction::Or: 198 case Instruction::Xor: 199 case Instruction::AShr: 200 case Instruction::LShr: 201 case Instruction::Shl: 202 case Instruction::UDiv: 203 case Instruction::URem: { 204 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned); 205 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 206 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS); 207 break; 208 } 209 case Instruction::Trunc: 210 case Instruction::ZExt: 211 case Instruction::SExt: 212 // If the source type of the cast is the type we're trying for then we can 213 // just return the source. There's no need to insert it because it is not 214 // new. 215 if (I->getOperand(0)->getType() == Ty) 216 return I->getOperand(0); 217 218 // Otherwise, must be the same type of cast, so just reinsert a new one. 219 // This also handles the case of zext(trunc(x)) -> zext(x). 220 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty, 221 Opc == Instruction::SExt); 222 break; 223 case Instruction::Select: { 224 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 225 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned); 226 Res = SelectInst::Create(I->getOperand(0), True, False); 227 break; 228 } 229 case Instruction::PHI: { 230 PHINode *OPN = cast<PHINode>(I); 231 PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues()); 232 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) { 233 Value *V = 234 EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned); 235 NPN->addIncoming(V, OPN->getIncomingBlock(i)); 236 } 237 Res = NPN; 238 break; 239 } 240 default: 241 // TODO: Can handle more cases here. 242 llvm_unreachable("Unreachable!"); 243 } 244 245 Res->takeName(I); 246 return InsertNewInstWith(Res, *I); 247 } 248 249 Instruction::CastOps 250 InstCombinerImpl::isEliminableCastPair(const CastInst *CI1, 251 const CastInst *CI2) { 252 Type *SrcTy = CI1->getSrcTy(); 253 Type *MidTy = CI1->getDestTy(); 254 Type *DstTy = CI2->getDestTy(); 255 256 Instruction::CastOps firstOp = CI1->getOpcode(); 257 Instruction::CastOps secondOp = CI2->getOpcode(); 258 Type *SrcIntPtrTy = 259 SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr; 260 Type *MidIntPtrTy = 261 MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr; 262 Type *DstIntPtrTy = 263 DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr; 264 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, 265 DstTy, SrcIntPtrTy, MidIntPtrTy, 266 DstIntPtrTy); 267 268 // We don't want to form an inttoptr or ptrtoint that converts to an integer 269 // type that differs from the pointer size. 270 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) || 271 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy)) 272 Res = 0; 273 274 return Instruction::CastOps(Res); 275 } 276 277 /// Implement the transforms common to all CastInst visitors. 278 Instruction *InstCombinerImpl::commonCastTransforms(CastInst &CI) { 279 Value *Src = CI.getOperand(0); 280 281 // Try to eliminate a cast of a cast. 282 if (auto *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast 283 if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) { 284 // The first cast (CSrc) is eliminable so we need to fix up or replace 285 // the second cast (CI). CSrc will then have a good chance of being dead. 286 auto *Ty = CI.getType(); 287 auto *Res = CastInst::Create(NewOpc, CSrc->getOperand(0), Ty); 288 // Point debug users of the dying cast to the new one. 289 if (CSrc->hasOneUse()) 290 replaceAllDbgUsesWith(*CSrc, *Res, CI, DT); 291 return Res; 292 } 293 } 294 295 if (auto *Sel = dyn_cast<SelectInst>(Src)) { 296 // We are casting a select. Try to fold the cast into the select if the 297 // select does not have a compare instruction with matching operand types 298 // or the select is likely better done in a narrow type. 299 // Creating a select with operands that are different sizes than its 300 // condition may inhibit other folds and lead to worse codegen. 301 auto *Cmp = dyn_cast<CmpInst>(Sel->getCondition()); 302 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType() || 303 (CI.getOpcode() == Instruction::Trunc && 304 shouldChangeType(CI.getSrcTy(), CI.getType()))) { 305 if (Instruction *NV = FoldOpIntoSelect(CI, Sel)) { 306 replaceAllDbgUsesWith(*Sel, *NV, CI, DT); 307 return NV; 308 } 309 } 310 } 311 312 // If we are casting a PHI, then fold the cast into the PHI. 313 if (auto *PN = dyn_cast<PHINode>(Src)) { 314 // Don't do this if it would create a PHI node with an illegal type from a 315 // legal type. 316 if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() || 317 shouldChangeType(CI.getSrcTy(), CI.getType())) 318 if (Instruction *NV = foldOpIntoPhi(CI, PN)) 319 return NV; 320 } 321 322 return nullptr; 323 } 324 325 /// Constants and extensions/truncates from the destination type are always 326 /// free to be evaluated in that type. This is a helper for canEvaluate*. 327 static bool canAlwaysEvaluateInType(Value *V, Type *Ty) { 328 if (isa<Constant>(V)) 329 return true; 330 Value *X; 331 if ((match(V, m_ZExtOrSExt(m_Value(X))) || match(V, m_Trunc(m_Value(X)))) && 332 X->getType() == Ty) 333 return true; 334 335 return false; 336 } 337 338 /// Filter out values that we can not evaluate in the destination type for free. 339 /// This is a helper for canEvaluate*. 340 static bool canNotEvaluateInType(Value *V, Type *Ty) { 341 assert(!isa<Constant>(V) && "Constant should already be handled."); 342 if (!isa<Instruction>(V)) 343 return true; 344 // We don't extend or shrink something that has multiple uses -- doing so 345 // would require duplicating the instruction which isn't profitable. 346 if (!V->hasOneUse()) 347 return true; 348 349 return false; 350 } 351 352 /// Return true if we can evaluate the specified expression tree as type Ty 353 /// instead of its larger type, and arrive with the same value. 354 /// This is used by code that tries to eliminate truncates. 355 /// 356 /// Ty will always be a type smaller than V. We should return true if trunc(V) 357 /// can be computed by computing V in the smaller type. If V is an instruction, 358 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only 359 /// makes sense if x and y can be efficiently truncated. 360 /// 361 /// This function works on both vectors and scalars. 362 /// 363 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC, 364 Instruction *CxtI) { 365 if (canAlwaysEvaluateInType(V, Ty)) 366 return true; 367 if (canNotEvaluateInType(V, Ty)) 368 return false; 369 370 auto *I = cast<Instruction>(V); 371 Type *OrigTy = V->getType(); 372 switch (I->getOpcode()) { 373 case Instruction::Add: 374 case Instruction::Sub: 375 case Instruction::Mul: 376 case Instruction::And: 377 case Instruction::Or: 378 case Instruction::Xor: 379 // These operators can all arbitrarily be extended or truncated. 380 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 381 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 382 383 case Instruction::UDiv: 384 case Instruction::URem: { 385 // UDiv and URem can be truncated if all the truncated bits are zero. 386 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 387 uint32_t BitWidth = Ty->getScalarSizeInBits(); 388 assert(BitWidth < OrigBitWidth && "Unexpected bitwidths!"); 389 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth); 390 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) && 391 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) { 392 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 393 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 394 } 395 break; 396 } 397 case Instruction::Shl: { 398 // If we are truncating the result of this SHL, and if it's a shift of an 399 // inrange amount, we can always perform a SHL in a smaller type. 400 uint32_t BitWidth = Ty->getScalarSizeInBits(); 401 KnownBits AmtKnownBits = 402 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 403 if (AmtKnownBits.getMaxValue().ult(BitWidth)) 404 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 405 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 406 break; 407 } 408 case Instruction::LShr: { 409 // If this is a truncate of a logical shr, we can truncate it to a smaller 410 // lshr iff we know that the bits we would otherwise be shifting in are 411 // already zeros. 412 // TODO: It is enough to check that the bits we would be shifting in are 413 // zero - use AmtKnownBits.getMaxValue(). 414 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 415 uint32_t BitWidth = Ty->getScalarSizeInBits(); 416 KnownBits AmtKnownBits = 417 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 418 APInt ShiftedBits = APInt::getBitsSetFrom(OrigBitWidth, BitWidth); 419 if (AmtKnownBits.getMaxValue().ult(BitWidth) && 420 IC.MaskedValueIsZero(I->getOperand(0), ShiftedBits, 0, CxtI)) { 421 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 422 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 423 } 424 break; 425 } 426 case Instruction::AShr: { 427 // If this is a truncate of an arithmetic shr, we can truncate it to a 428 // smaller ashr iff we know that all the bits from the sign bit of the 429 // original type and the sign bit of the truncate type are similar. 430 // TODO: It is enough to check that the bits we would be shifting in are 431 // similar to sign bit of the truncate type. 432 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 433 uint32_t BitWidth = Ty->getScalarSizeInBits(); 434 KnownBits AmtKnownBits = 435 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 436 unsigned ShiftedBits = OrigBitWidth - BitWidth; 437 if (AmtKnownBits.getMaxValue().ult(BitWidth) && 438 ShiftedBits < IC.ComputeNumSignBits(I->getOperand(0), 0, CxtI)) 439 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 440 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 441 break; 442 } 443 case Instruction::Trunc: 444 // trunc(trunc(x)) -> trunc(x) 445 return true; 446 case Instruction::ZExt: 447 case Instruction::SExt: 448 // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest 449 // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest 450 return true; 451 case Instruction::Select: { 452 SelectInst *SI = cast<SelectInst>(I); 453 return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) && 454 canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI); 455 } 456 case Instruction::PHI: { 457 // We can change a phi if we can change all operands. Note that we never 458 // get into trouble with cyclic PHIs here because we only consider 459 // instructions with a single use. 460 PHINode *PN = cast<PHINode>(I); 461 for (Value *IncValue : PN->incoming_values()) 462 if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI)) 463 return false; 464 return true; 465 } 466 default: 467 // TODO: Can handle more cases here. 468 break; 469 } 470 471 return false; 472 } 473 474 /// Given a vector that is bitcast to an integer, optionally logically 475 /// right-shifted, and truncated, convert it to an extractelement. 476 /// Example (big endian): 477 /// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32 478 /// ---> 479 /// extractelement <4 x i32> %X, 1 480 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, 481 InstCombinerImpl &IC) { 482 Value *TruncOp = Trunc.getOperand(0); 483 Type *DestType = Trunc.getType(); 484 if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType)) 485 return nullptr; 486 487 Value *VecInput = nullptr; 488 ConstantInt *ShiftVal = nullptr; 489 if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)), 490 m_LShr(m_BitCast(m_Value(VecInput)), 491 m_ConstantInt(ShiftVal)))) || 492 !isa<VectorType>(VecInput->getType())) 493 return nullptr; 494 495 VectorType *VecType = cast<VectorType>(VecInput->getType()); 496 unsigned VecWidth = VecType->getPrimitiveSizeInBits(); 497 unsigned DestWidth = DestType->getPrimitiveSizeInBits(); 498 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0; 499 500 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0)) 501 return nullptr; 502 503 // If the element type of the vector doesn't match the result type, 504 // bitcast it to a vector type that we can extract from. 505 unsigned NumVecElts = VecWidth / DestWidth; 506 if (VecType->getElementType() != DestType) { 507 VecType = FixedVectorType::get(DestType, NumVecElts); 508 VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc"); 509 } 510 511 unsigned Elt = ShiftAmount / DestWidth; 512 if (IC.getDataLayout().isBigEndian()) 513 Elt = NumVecElts - 1 - Elt; 514 515 return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt)); 516 } 517 518 /// Rotate left/right may occur in a wider type than necessary because of type 519 /// promotion rules. Try to narrow the inputs and convert to funnel shift. 520 Instruction *InstCombinerImpl::narrowRotate(TruncInst &Trunc) { 521 assert((isa<VectorType>(Trunc.getSrcTy()) || 522 shouldChangeType(Trunc.getSrcTy(), Trunc.getType())) && 523 "Don't narrow to an illegal scalar type"); 524 525 // Bail out on strange types. It is possible to handle some of these patterns 526 // even with non-power-of-2 sizes, but it is not a likely scenario. 527 Type *DestTy = Trunc.getType(); 528 unsigned NarrowWidth = DestTy->getScalarSizeInBits(); 529 if (!isPowerOf2_32(NarrowWidth)) 530 return nullptr; 531 532 // First, find an or'd pair of opposite shifts with the same shifted operand: 533 // trunc (or (lshr ShVal, ShAmt0), (shl ShVal, ShAmt1)) 534 Value *Or0, *Or1; 535 if (!match(Trunc.getOperand(0), m_OneUse(m_Or(m_Value(Or0), m_Value(Or1))))) 536 return nullptr; 537 538 Value *ShVal, *ShAmt0, *ShAmt1; 539 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal), m_Value(ShAmt0)))) || 540 !match(Or1, m_OneUse(m_LogicalShift(m_Specific(ShVal), m_Value(ShAmt1))))) 541 return nullptr; 542 543 auto ShiftOpcode0 = cast<BinaryOperator>(Or0)->getOpcode(); 544 auto ShiftOpcode1 = cast<BinaryOperator>(Or1)->getOpcode(); 545 if (ShiftOpcode0 == ShiftOpcode1) 546 return nullptr; 547 548 // Match the shift amount operands for a rotate pattern. This always matches 549 // a subtraction on the R operand. 550 auto matchShiftAmount = [](Value *L, Value *R, unsigned Width) -> Value * { 551 // The shift amounts may add up to the narrow bit width: 552 // (shl ShVal, L) | (lshr ShVal, Width - L) 553 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) 554 return L; 555 556 // The shift amount may be masked with negation: 557 // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1))) 558 Value *X; 559 unsigned Mask = Width - 1; 560 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) && 561 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))) 562 return X; 563 564 // Same as above, but the shift amount may be extended after masking: 565 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) && 566 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))) 567 return X; 568 569 return nullptr; 570 }; 571 572 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth); 573 bool SubIsOnLHS = false; 574 if (!ShAmt) { 575 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth); 576 SubIsOnLHS = true; 577 } 578 if (!ShAmt) 579 return nullptr; 580 581 // The shifted value must have high zeros in the wide type. Typically, this 582 // will be a zext, but it could also be the result of an 'and' or 'shift'. 583 unsigned WideWidth = Trunc.getSrcTy()->getScalarSizeInBits(); 584 APInt HiBitMask = APInt::getHighBitsSet(WideWidth, WideWidth - NarrowWidth); 585 if (!MaskedValueIsZero(ShVal, HiBitMask, 0, &Trunc)) 586 return nullptr; 587 588 // We have an unnecessarily wide rotate! 589 // trunc (or (lshr ShVal, ShAmt), (shl ShVal, BitWidth - ShAmt)) 590 // Narrow the inputs and convert to funnel shift intrinsic: 591 // llvm.fshl.i8(trunc(ShVal), trunc(ShVal), trunc(ShAmt)) 592 Value *NarrowShAmt = Builder.CreateTrunc(ShAmt, DestTy); 593 Value *X = Builder.CreateTrunc(ShVal, DestTy); 594 bool IsFshl = (!SubIsOnLHS && ShiftOpcode0 == BinaryOperator::Shl) || 595 (SubIsOnLHS && ShiftOpcode1 == BinaryOperator::Shl); 596 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr; 597 Function *F = Intrinsic::getDeclaration(Trunc.getModule(), IID, DestTy); 598 return IntrinsicInst::Create(F, { X, X, NarrowShAmt }); 599 } 600 601 /// Try to narrow the width of math or bitwise logic instructions by pulling a 602 /// truncate ahead of binary operators. 603 /// TODO: Transforms for truncated shifts should be moved into here. 604 Instruction *InstCombinerImpl::narrowBinOp(TruncInst &Trunc) { 605 Type *SrcTy = Trunc.getSrcTy(); 606 Type *DestTy = Trunc.getType(); 607 if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy)) 608 return nullptr; 609 610 BinaryOperator *BinOp; 611 if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(BinOp)))) 612 return nullptr; 613 614 Value *BinOp0 = BinOp->getOperand(0); 615 Value *BinOp1 = BinOp->getOperand(1); 616 switch (BinOp->getOpcode()) { 617 case Instruction::And: 618 case Instruction::Or: 619 case Instruction::Xor: 620 case Instruction::Add: 621 case Instruction::Sub: 622 case Instruction::Mul: { 623 Constant *C; 624 if (match(BinOp0, m_Constant(C))) { 625 // trunc (binop C, X) --> binop (trunc C', X) 626 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 627 Value *TruncX = Builder.CreateTrunc(BinOp1, DestTy); 628 return BinaryOperator::Create(BinOp->getOpcode(), NarrowC, TruncX); 629 } 630 if (match(BinOp1, m_Constant(C))) { 631 // trunc (binop X, C) --> binop (trunc X, C') 632 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 633 Value *TruncX = Builder.CreateTrunc(BinOp0, DestTy); 634 return BinaryOperator::Create(BinOp->getOpcode(), TruncX, NarrowC); 635 } 636 Value *X; 637 if (match(BinOp0, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) { 638 // trunc (binop (ext X), Y) --> binop X, (trunc Y) 639 Value *NarrowOp1 = Builder.CreateTrunc(BinOp1, DestTy); 640 return BinaryOperator::Create(BinOp->getOpcode(), X, NarrowOp1); 641 } 642 if (match(BinOp1, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) { 643 // trunc (binop Y, (ext X)) --> binop (trunc Y), X 644 Value *NarrowOp0 = Builder.CreateTrunc(BinOp0, DestTy); 645 return BinaryOperator::Create(BinOp->getOpcode(), NarrowOp0, X); 646 } 647 break; 648 } 649 650 default: break; 651 } 652 653 if (Instruction *NarrowOr = narrowRotate(Trunc)) 654 return NarrowOr; 655 656 return nullptr; 657 } 658 659 /// Try to narrow the width of a splat shuffle. This could be generalized to any 660 /// shuffle with a constant operand, but we limit the transform to avoid 661 /// creating a shuffle type that targets may not be able to lower effectively. 662 static Instruction *shrinkSplatShuffle(TruncInst &Trunc, 663 InstCombiner::BuilderTy &Builder) { 664 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0)); 665 if (Shuf && Shuf->hasOneUse() && isa<UndefValue>(Shuf->getOperand(1)) && 666 is_splat(Shuf->getShuffleMask()) && 667 Shuf->getType() == Shuf->getOperand(0)->getType()) { 668 // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Undef, SplatMask 669 Constant *NarrowUndef = UndefValue::get(Trunc.getType()); 670 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType()); 671 return new ShuffleVectorInst(NarrowOp, NarrowUndef, Shuf->getShuffleMask()); 672 } 673 674 return nullptr; 675 } 676 677 /// Try to narrow the width of an insert element. This could be generalized for 678 /// any vector constant, but we limit the transform to insertion into undef to 679 /// avoid potential backend problems from unsupported insertion widths. This 680 /// could also be extended to handle the case of inserting a scalar constant 681 /// into a vector variable. 682 static Instruction *shrinkInsertElt(CastInst &Trunc, 683 InstCombiner::BuilderTy &Builder) { 684 Instruction::CastOps Opcode = Trunc.getOpcode(); 685 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) && 686 "Unexpected instruction for shrinking"); 687 688 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0)); 689 if (!InsElt || !InsElt->hasOneUse()) 690 return nullptr; 691 692 Type *DestTy = Trunc.getType(); 693 Type *DestScalarTy = DestTy->getScalarType(); 694 Value *VecOp = InsElt->getOperand(0); 695 Value *ScalarOp = InsElt->getOperand(1); 696 Value *Index = InsElt->getOperand(2); 697 698 if (isa<UndefValue>(VecOp)) { 699 // trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index 700 // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index 701 UndefValue *NarrowUndef = UndefValue::get(DestTy); 702 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy); 703 return InsertElementInst::Create(NarrowUndef, NarrowOp, Index); 704 } 705 706 return nullptr; 707 } 708 709 Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) { 710 if (Instruction *Result = commonCastTransforms(Trunc)) 711 return Result; 712 713 Value *Src = Trunc.getOperand(0); 714 Type *DestTy = Trunc.getType(), *SrcTy = Src->getType(); 715 unsigned DestWidth = DestTy->getScalarSizeInBits(); 716 unsigned SrcWidth = SrcTy->getScalarSizeInBits(); 717 718 // Attempt to truncate the entire input expression tree to the destination 719 // type. Only do this if the dest type is a simple type, don't convert the 720 // expression tree to something weird like i93 unless the source is also 721 // strange. 722 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) && 723 canEvaluateTruncated(Src, DestTy, *this, &Trunc)) { 724 725 // If this cast is a truncate, evaluting in a different type always 726 // eliminates the cast, so it is always a win. 727 LLVM_DEBUG( 728 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 729 " to avoid cast: " 730 << Trunc << '\n'); 731 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 732 assert(Res->getType() == DestTy); 733 return replaceInstUsesWith(Trunc, Res); 734 } 735 736 // For integer types, check if we can shorten the entire input expression to 737 // DestWidth * 2, which won't allow removing the truncate, but reducing the 738 // width may enable further optimizations, e.g. allowing for larger 739 // vectorization factors. 740 if (auto *DestITy = dyn_cast<IntegerType>(DestTy)) { 741 if (DestWidth * 2 < SrcWidth) { 742 auto *NewDestTy = DestITy->getExtendedType(); 743 if (shouldChangeType(SrcTy, NewDestTy) && 744 canEvaluateTruncated(Src, NewDestTy, *this, &Trunc)) { 745 LLVM_DEBUG( 746 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 747 " to reduce the width of operand of" 748 << Trunc << '\n'); 749 Value *Res = EvaluateInDifferentType(Src, NewDestTy, false); 750 return new TruncInst(Res, DestTy); 751 } 752 } 753 } 754 755 // Test if the trunc is the user of a select which is part of a 756 // minimum or maximum operation. If so, don't do any more simplification. 757 // Even simplifying demanded bits can break the canonical form of a 758 // min/max. 759 Value *LHS, *RHS; 760 if (SelectInst *Sel = dyn_cast<SelectInst>(Src)) 761 if (matchSelectPattern(Sel, LHS, RHS).Flavor != SPF_UNKNOWN) 762 return nullptr; 763 764 // See if we can simplify any instructions used by the input whose sole 765 // purpose is to compute bits we don't care about. 766 if (SimplifyDemandedInstructionBits(Trunc)) 767 return &Trunc; 768 769 if (DestWidth == 1) { 770 Value *Zero = Constant::getNullValue(SrcTy); 771 if (DestTy->isIntegerTy()) { 772 // Canonicalize trunc x to i1 -> icmp ne (and x, 1), 0 (scalar only). 773 // TODO: We canonicalize to more instructions here because we are probably 774 // lacking equivalent analysis for trunc relative to icmp. There may also 775 // be codegen concerns. If those trunc limitations were removed, we could 776 // remove this transform. 777 Value *And = Builder.CreateAnd(Src, ConstantInt::get(SrcTy, 1)); 778 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 779 } 780 781 // For vectors, we do not canonicalize all truncs to icmp, so optimize 782 // patterns that would be covered within visitICmpInst. 783 Value *X; 784 Constant *C; 785 if (match(Src, m_OneUse(m_LShr(m_Value(X), m_Constant(C))))) { 786 // trunc (lshr X, C) to i1 --> icmp ne (and X, C'), 0 787 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1)); 788 Constant *MaskC = ConstantExpr::getShl(One, C); 789 Value *And = Builder.CreateAnd(X, MaskC); 790 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 791 } 792 if (match(Src, m_OneUse(m_c_Or(m_LShr(m_Value(X), m_Constant(C)), 793 m_Deferred(X))))) { 794 // trunc (or (lshr X, C), X) to i1 --> icmp ne (and X, C'), 0 795 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1)); 796 Constant *MaskC = ConstantExpr::getShl(One, C); 797 MaskC = ConstantExpr::getOr(MaskC, One); 798 Value *And = Builder.CreateAnd(X, MaskC); 799 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 800 } 801 } 802 803 Value *A; 804 Constant *C; 805 if (match(Src, m_LShr(m_SExt(m_Value(A)), m_Constant(C)))) { 806 unsigned AWidth = A->getType()->getScalarSizeInBits(); 807 unsigned MaxShiftAmt = SrcWidth - std::max(DestWidth, AWidth); 808 auto *OldSh = cast<Instruction>(Src); 809 bool IsExact = OldSh->isExact(); 810 811 // If the shift is small enough, all zero bits created by the shift are 812 // removed by the trunc. 813 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE, 814 APInt(SrcWidth, MaxShiftAmt)))) { 815 // trunc (lshr (sext A), C) --> ashr A, C 816 if (A->getType() == DestTy) { 817 Constant *MaxAmt = ConstantInt::get(SrcTy, DestWidth - 1, false); 818 Constant *ShAmt = ConstantExpr::getUMin(C, MaxAmt); 819 ShAmt = ConstantExpr::getTrunc(ShAmt, A->getType()); 820 ShAmt = Constant::mergeUndefsWith(ShAmt, C); 821 return IsExact ? BinaryOperator::CreateExactAShr(A, ShAmt) 822 : BinaryOperator::CreateAShr(A, ShAmt); 823 } 824 // The types are mismatched, so create a cast after shifting: 825 // trunc (lshr (sext A), C) --> sext/trunc (ashr A, C) 826 if (Src->hasOneUse()) { 827 Constant *MaxAmt = ConstantInt::get(SrcTy, AWidth - 1, false); 828 Constant *ShAmt = ConstantExpr::getUMin(C, MaxAmt); 829 ShAmt = ConstantExpr::getTrunc(ShAmt, A->getType()); 830 Value *Shift = Builder.CreateAShr(A, ShAmt, "", IsExact); 831 return CastInst::CreateIntegerCast(Shift, DestTy, true); 832 } 833 } 834 // TODO: Mask high bits with 'and'. 835 } 836 837 // trunc (*shr (trunc A), C) --> trunc(*shr A, C) 838 if (match(Src, m_OneUse(m_Shr(m_Trunc(m_Value(A)), m_Constant(C))))) { 839 unsigned MaxShiftAmt = SrcWidth - DestWidth; 840 841 // If the shift is small enough, all zero/sign bits created by the shift are 842 // removed by the trunc. 843 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE, 844 APInt(SrcWidth, MaxShiftAmt)))) { 845 auto *OldShift = cast<Instruction>(Src); 846 bool IsExact = OldShift->isExact(); 847 auto *ShAmt = ConstantExpr::getIntegerCast(C, A->getType(), true); 848 ShAmt = Constant::mergeUndefsWith(ShAmt, C); 849 Value *Shift = 850 OldShift->getOpcode() == Instruction::AShr 851 ? Builder.CreateAShr(A, ShAmt, OldShift->getName(), IsExact) 852 : Builder.CreateLShr(A, ShAmt, OldShift->getName(), IsExact); 853 return CastInst::CreateTruncOrBitCast(Shift, DestTy); 854 } 855 } 856 857 if (Instruction *I = narrowBinOp(Trunc)) 858 return I; 859 860 if (Instruction *I = shrinkSplatShuffle(Trunc, Builder)) 861 return I; 862 863 if (Instruction *I = shrinkInsertElt(Trunc, Builder)) 864 return I; 865 866 if (Src->hasOneUse() && 867 (isa<VectorType>(SrcTy) || shouldChangeType(SrcTy, DestTy))) { 868 // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the 869 // dest type is native and cst < dest size. 870 if (match(Src, m_Shl(m_Value(A), m_Constant(C))) && 871 !match(A, m_Shr(m_Value(), m_Constant()))) { 872 // Skip shifts of shift by constants. It undoes a combine in 873 // FoldShiftByConstant and is the extend in reg pattern. 874 APInt Threshold = APInt(C->getType()->getScalarSizeInBits(), DestWidth); 875 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, Threshold))) { 876 Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr"); 877 return BinaryOperator::Create(Instruction::Shl, NewTrunc, 878 ConstantExpr::getTrunc(C, DestTy)); 879 } 880 } 881 } 882 883 if (Instruction *I = foldVecTruncToExtElt(Trunc, *this)) 884 return I; 885 886 // Whenever an element is extracted from a vector, and then truncated, 887 // canonicalize by converting it to a bitcast followed by an 888 // extractelement. 889 // 890 // Example (little endian): 891 // trunc (extractelement <4 x i64> %X, 0) to i32 892 // ---> 893 // extractelement <8 x i32> (bitcast <4 x i64> %X to <8 x i32>), i32 0 894 Value *VecOp; 895 ConstantInt *Cst; 896 if (match(Src, m_OneUse(m_ExtractElt(m_Value(VecOp), m_ConstantInt(Cst))))) { 897 auto *VecOpTy = cast<FixedVectorType>(VecOp->getType()); 898 unsigned VecNumElts = VecOpTy->getNumElements(); 899 900 // A badly fit destination size would result in an invalid cast. 901 if (SrcWidth % DestWidth == 0) { 902 uint64_t TruncRatio = SrcWidth / DestWidth; 903 uint64_t BitCastNumElts = VecNumElts * TruncRatio; 904 uint64_t VecOpIdx = Cst->getZExtValue(); 905 uint64_t NewIdx = DL.isBigEndian() ? (VecOpIdx + 1) * TruncRatio - 1 906 : VecOpIdx * TruncRatio; 907 assert(BitCastNumElts <= std::numeric_limits<uint32_t>::max() && 908 "overflow 32-bits"); 909 910 auto *BitCastTo = FixedVectorType::get(DestTy, BitCastNumElts); 911 Value *BitCast = Builder.CreateBitCast(VecOp, BitCastTo); 912 return ExtractElementInst::Create(BitCast, Builder.getInt32(NewIdx)); 913 } 914 } 915 916 return nullptr; 917 } 918 919 Instruction *InstCombinerImpl::transformZExtICmp(ICmpInst *Cmp, ZExtInst &Zext, 920 bool DoTransform) { 921 // If we are just checking for a icmp eq of a single bit and zext'ing it 922 // to an integer, then shift the bit to the appropriate place and then 923 // cast to integer to avoid the comparison. 924 const APInt *Op1CV; 925 if (match(Cmp->getOperand(1), m_APInt(Op1CV))) { 926 927 // zext (x <s 0) to i32 --> x>>u31 true if signbit set. 928 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear. 929 if ((Cmp->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isNullValue()) || 930 (Cmp->getPredicate() == ICmpInst::ICMP_SGT && Op1CV->isAllOnesValue())) { 931 if (!DoTransform) return Cmp; 932 933 Value *In = Cmp->getOperand(0); 934 Value *Sh = ConstantInt::get(In->getType(), 935 In->getType()->getScalarSizeInBits() - 1); 936 In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit"); 937 if (In->getType() != Zext.getType()) 938 In = Builder.CreateIntCast(In, Zext.getType(), false /*ZExt*/); 939 940 if (Cmp->getPredicate() == ICmpInst::ICMP_SGT) { 941 Constant *One = ConstantInt::get(In->getType(), 1); 942 In = Builder.CreateXor(In, One, In->getName() + ".not"); 943 } 944 945 return replaceInstUsesWith(Zext, In); 946 } 947 948 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set. 949 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 950 // zext (X == 1) to i32 --> X iff X has only the low bit set. 951 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set. 952 // zext (X != 0) to i32 --> X iff X has only the low bit set. 953 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set. 954 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set. 955 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 956 if ((Op1CV->isNullValue() || Op1CV->isPowerOf2()) && 957 // This only works for EQ and NE 958 Cmp->isEquality()) { 959 // If Op1C some other power of two, convert: 960 KnownBits Known = computeKnownBits(Cmp->getOperand(0), 0, &Zext); 961 962 APInt KnownZeroMask(~Known.Zero); 963 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1? 964 if (!DoTransform) return Cmp; 965 966 bool isNE = Cmp->getPredicate() == ICmpInst::ICMP_NE; 967 if (!Op1CV->isNullValue() && (*Op1CV != KnownZeroMask)) { 968 // (X&4) == 2 --> false 969 // (X&4) != 2 --> true 970 Constant *Res = ConstantInt::get(Zext.getType(), isNE); 971 return replaceInstUsesWith(Zext, Res); 972 } 973 974 uint32_t ShAmt = KnownZeroMask.logBase2(); 975 Value *In = Cmp->getOperand(0); 976 if (ShAmt) { 977 // Perform a logical shr by shiftamt. 978 // Insert the shift to put the result in the low bit. 979 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt), 980 In->getName() + ".lobit"); 981 } 982 983 if (!Op1CV->isNullValue() == isNE) { // Toggle the low bit. 984 Constant *One = ConstantInt::get(In->getType(), 1); 985 In = Builder.CreateXor(In, One); 986 } 987 988 if (Zext.getType() == In->getType()) 989 return replaceInstUsesWith(Zext, In); 990 991 Value *IntCast = Builder.CreateIntCast(In, Zext.getType(), false); 992 return replaceInstUsesWith(Zext, IntCast); 993 } 994 } 995 } 996 997 // icmp ne A, B is equal to xor A, B when A and B only really have one bit. 998 // It is also profitable to transform icmp eq into not(xor(A, B)) because that 999 // may lead to additional simplifications. 1000 if (Cmp->isEquality() && Zext.getType() == Cmp->getOperand(0)->getType()) { 1001 if (IntegerType *ITy = dyn_cast<IntegerType>(Zext.getType())) { 1002 Value *LHS = Cmp->getOperand(0); 1003 Value *RHS = Cmp->getOperand(1); 1004 1005 KnownBits KnownLHS = computeKnownBits(LHS, 0, &Zext); 1006 KnownBits KnownRHS = computeKnownBits(RHS, 0, &Zext); 1007 1008 if (KnownLHS.Zero == KnownRHS.Zero && KnownLHS.One == KnownRHS.One) { 1009 APInt KnownBits = KnownLHS.Zero | KnownLHS.One; 1010 APInt UnknownBit = ~KnownBits; 1011 if (UnknownBit.countPopulation() == 1) { 1012 if (!DoTransform) return Cmp; 1013 1014 Value *Result = Builder.CreateXor(LHS, RHS); 1015 1016 // Mask off any bits that are set and won't be shifted away. 1017 if (KnownLHS.One.uge(UnknownBit)) 1018 Result = Builder.CreateAnd(Result, 1019 ConstantInt::get(ITy, UnknownBit)); 1020 1021 // Shift the bit we're testing down to the lsb. 1022 Result = Builder.CreateLShr( 1023 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros())); 1024 1025 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ) 1026 Result = Builder.CreateXor(Result, ConstantInt::get(ITy, 1)); 1027 Result->takeName(Cmp); 1028 return replaceInstUsesWith(Zext, Result); 1029 } 1030 } 1031 } 1032 } 1033 1034 return nullptr; 1035 } 1036 1037 /// Determine if the specified value can be computed in the specified wider type 1038 /// and produce the same low bits. If not, return false. 1039 /// 1040 /// If this function returns true, it can also return a non-zero number of bits 1041 /// (in BitsToClear) which indicates that the value it computes is correct for 1042 /// the zero extend, but that the additional BitsToClear bits need to be zero'd 1043 /// out. For example, to promote something like: 1044 /// 1045 /// %B = trunc i64 %A to i32 1046 /// %C = lshr i32 %B, 8 1047 /// %E = zext i32 %C to i64 1048 /// 1049 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be 1050 /// set to 8 to indicate that the promoted value needs to have bits 24-31 1051 /// cleared in addition to bits 32-63. Since an 'and' will be generated to 1052 /// clear the top bits anyway, doing this has no extra cost. 1053 /// 1054 /// This function works on both vectors and scalars. 1055 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear, 1056 InstCombinerImpl &IC, Instruction *CxtI) { 1057 BitsToClear = 0; 1058 if (canAlwaysEvaluateInType(V, Ty)) 1059 return true; 1060 if (canNotEvaluateInType(V, Ty)) 1061 return false; 1062 1063 auto *I = cast<Instruction>(V); 1064 unsigned Tmp; 1065 switch (I->getOpcode()) { 1066 case Instruction::ZExt: // zext(zext(x)) -> zext(x). 1067 case Instruction::SExt: // zext(sext(x)) -> sext(x). 1068 case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x) 1069 return true; 1070 case Instruction::And: 1071 case Instruction::Or: 1072 case Instruction::Xor: 1073 case Instruction::Add: 1074 case Instruction::Sub: 1075 case Instruction::Mul: 1076 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) || 1077 !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI)) 1078 return false; 1079 // These can all be promoted if neither operand has 'bits to clear'. 1080 if (BitsToClear == 0 && Tmp == 0) 1081 return true; 1082 1083 // If the operation is an AND/OR/XOR and the bits to clear are zero in the 1084 // other side, BitsToClear is ok. 1085 if (Tmp == 0 && I->isBitwiseLogicOp()) { 1086 // We use MaskedValueIsZero here for generality, but the case we care 1087 // about the most is constant RHS. 1088 unsigned VSize = V->getType()->getScalarSizeInBits(); 1089 if (IC.MaskedValueIsZero(I->getOperand(1), 1090 APInt::getHighBitsSet(VSize, BitsToClear), 1091 0, CxtI)) { 1092 // If this is an And instruction and all of the BitsToClear are 1093 // known to be zero we can reset BitsToClear. 1094 if (I->getOpcode() == Instruction::And) 1095 BitsToClear = 0; 1096 return true; 1097 } 1098 } 1099 1100 // Otherwise, we don't know how to analyze this BitsToClear case yet. 1101 return false; 1102 1103 case Instruction::Shl: { 1104 // We can promote shl(x, cst) if we can promote x. Since shl overwrites the 1105 // upper bits we can reduce BitsToClear by the shift amount. 1106 const APInt *Amt; 1107 if (match(I->getOperand(1), m_APInt(Amt))) { 1108 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 1109 return false; 1110 uint64_t ShiftAmt = Amt->getZExtValue(); 1111 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0; 1112 return true; 1113 } 1114 return false; 1115 } 1116 case Instruction::LShr: { 1117 // We can promote lshr(x, cst) if we can promote x. This requires the 1118 // ultimate 'and' to clear out the high zero bits we're clearing out though. 1119 const APInt *Amt; 1120 if (match(I->getOperand(1), m_APInt(Amt))) { 1121 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 1122 return false; 1123 BitsToClear += Amt->getZExtValue(); 1124 if (BitsToClear > V->getType()->getScalarSizeInBits()) 1125 BitsToClear = V->getType()->getScalarSizeInBits(); 1126 return true; 1127 } 1128 // Cannot promote variable LSHR. 1129 return false; 1130 } 1131 case Instruction::Select: 1132 if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) || 1133 !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) || 1134 // TODO: If important, we could handle the case when the BitsToClear are 1135 // known zero in the disagreeing side. 1136 Tmp != BitsToClear) 1137 return false; 1138 return true; 1139 1140 case Instruction::PHI: { 1141 // We can change a phi if we can change all operands. Note that we never 1142 // get into trouble with cyclic PHIs here because we only consider 1143 // instructions with a single use. 1144 PHINode *PN = cast<PHINode>(I); 1145 if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI)) 1146 return false; 1147 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) 1148 if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) || 1149 // TODO: If important, we could handle the case when the BitsToClear 1150 // are known zero in the disagreeing input. 1151 Tmp != BitsToClear) 1152 return false; 1153 return true; 1154 } 1155 default: 1156 // TODO: Can handle more cases here. 1157 return false; 1158 } 1159 } 1160 1161 Instruction *InstCombinerImpl::visitZExt(ZExtInst &CI) { 1162 // If this zero extend is only used by a truncate, let the truncate be 1163 // eliminated before we try to optimize this zext. 1164 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back())) 1165 return nullptr; 1166 1167 // If one of the common conversion will work, do it. 1168 if (Instruction *Result = commonCastTransforms(CI)) 1169 return Result; 1170 1171 Value *Src = CI.getOperand(0); 1172 Type *SrcTy = Src->getType(), *DestTy = CI.getType(); 1173 1174 // Try to extend the entire expression tree to the wide destination type. 1175 unsigned BitsToClear; 1176 if (shouldChangeType(SrcTy, DestTy) && 1177 canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) { 1178 assert(BitsToClear <= SrcTy->getScalarSizeInBits() && 1179 "Can't clear more bits than in SrcTy"); 1180 1181 // Okay, we can transform this! Insert the new expression now. 1182 LLVM_DEBUG( 1183 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1184 " to avoid zero extend: " 1185 << CI << '\n'); 1186 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 1187 assert(Res->getType() == DestTy); 1188 1189 // Preserve debug values referring to Src if the zext is its last use. 1190 if (auto *SrcOp = dyn_cast<Instruction>(Src)) 1191 if (SrcOp->hasOneUse()) 1192 replaceAllDbgUsesWith(*SrcOp, *Res, CI, DT); 1193 1194 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear; 1195 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 1196 1197 // If the high bits are already filled with zeros, just replace this 1198 // cast with the result. 1199 if (MaskedValueIsZero(Res, 1200 APInt::getHighBitsSet(DestBitSize, 1201 DestBitSize-SrcBitsKept), 1202 0, &CI)) 1203 return replaceInstUsesWith(CI, Res); 1204 1205 // We need to emit an AND to clear the high bits. 1206 Constant *C = ConstantInt::get(Res->getType(), 1207 APInt::getLowBitsSet(DestBitSize, SrcBitsKept)); 1208 return BinaryOperator::CreateAnd(Res, C); 1209 } 1210 1211 // If this is a TRUNC followed by a ZEXT then we are dealing with integral 1212 // types and if the sizes are just right we can convert this into a logical 1213 // 'and' which will be much cheaper than the pair of casts. 1214 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast 1215 // TODO: Subsume this into EvaluateInDifferentType. 1216 1217 // Get the sizes of the types involved. We know that the intermediate type 1218 // will be smaller than A or C, but don't know the relation between A and C. 1219 Value *A = CSrc->getOperand(0); 1220 unsigned SrcSize = A->getType()->getScalarSizeInBits(); 1221 unsigned MidSize = CSrc->getType()->getScalarSizeInBits(); 1222 unsigned DstSize = CI.getType()->getScalarSizeInBits(); 1223 // If we're actually extending zero bits, then if 1224 // SrcSize < DstSize: zext(a & mask) 1225 // SrcSize == DstSize: a & mask 1226 // SrcSize > DstSize: trunc(a) & mask 1227 if (SrcSize < DstSize) { 1228 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 1229 Constant *AndConst = ConstantInt::get(A->getType(), AndValue); 1230 Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask"); 1231 return new ZExtInst(And, CI.getType()); 1232 } 1233 1234 if (SrcSize == DstSize) { 1235 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 1236 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(), 1237 AndValue)); 1238 } 1239 if (SrcSize > DstSize) { 1240 Value *Trunc = Builder.CreateTrunc(A, CI.getType()); 1241 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize)); 1242 return BinaryOperator::CreateAnd(Trunc, 1243 ConstantInt::get(Trunc->getType(), 1244 AndValue)); 1245 } 1246 } 1247 1248 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(Src)) 1249 return transformZExtICmp(Cmp, CI); 1250 1251 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src); 1252 if (SrcI && SrcI->getOpcode() == Instruction::Or) { 1253 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) if at least one 1254 // of the (zext icmp) can be eliminated. If so, immediately perform the 1255 // according elimination. 1256 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0)); 1257 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1)); 1258 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() && 1259 (transformZExtICmp(LHS, CI, false) || 1260 transformZExtICmp(RHS, CI, false))) { 1261 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) 1262 Value *LCast = Builder.CreateZExt(LHS, CI.getType(), LHS->getName()); 1263 Value *RCast = Builder.CreateZExt(RHS, CI.getType(), RHS->getName()); 1264 Value *Or = Builder.CreateOr(LCast, RCast, CI.getName()); 1265 if (auto *OrInst = dyn_cast<Instruction>(Or)) 1266 Builder.SetInsertPoint(OrInst); 1267 1268 // Perform the elimination. 1269 if (auto *LZExt = dyn_cast<ZExtInst>(LCast)) 1270 transformZExtICmp(LHS, *LZExt); 1271 if (auto *RZExt = dyn_cast<ZExtInst>(RCast)) 1272 transformZExtICmp(RHS, *RZExt); 1273 1274 return replaceInstUsesWith(CI, Or); 1275 } 1276 } 1277 1278 // zext(trunc(X) & C) -> (X & zext(C)). 1279 Constant *C; 1280 Value *X; 1281 if (SrcI && 1282 match(SrcI, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) && 1283 X->getType() == CI.getType()) 1284 return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType())); 1285 1286 // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)). 1287 Value *And; 1288 if (SrcI && match(SrcI, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) && 1289 match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) && 1290 X->getType() == CI.getType()) { 1291 Constant *ZC = ConstantExpr::getZExt(C, CI.getType()); 1292 return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC); 1293 } 1294 1295 return nullptr; 1296 } 1297 1298 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp. 1299 Instruction *InstCombinerImpl::transformSExtICmp(ICmpInst *ICI, 1300 Instruction &CI) { 1301 Value *Op0 = ICI->getOperand(0), *Op1 = ICI->getOperand(1); 1302 ICmpInst::Predicate Pred = ICI->getPredicate(); 1303 1304 // Don't bother if Op1 isn't of vector or integer type. 1305 if (!Op1->getType()->isIntOrIntVectorTy()) 1306 return nullptr; 1307 1308 if ((Pred == ICmpInst::ICMP_SLT && match(Op1, m_ZeroInt())) || 1309 (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))) { 1310 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if negative 1311 // (x >s -1) ? -1 : 0 -> not (ashr x, 31) -> all ones if positive 1312 Value *Sh = ConstantInt::get(Op0->getType(), 1313 Op0->getType()->getScalarSizeInBits() - 1); 1314 Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit"); 1315 if (In->getType() != CI.getType()) 1316 In = Builder.CreateIntCast(In, CI.getType(), true /*SExt*/); 1317 1318 if (Pred == ICmpInst::ICMP_SGT) 1319 In = Builder.CreateNot(In, In->getName() + ".not"); 1320 return replaceInstUsesWith(CI, In); 1321 } 1322 1323 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { 1324 // If we know that only one bit of the LHS of the icmp can be set and we 1325 // have an equality comparison with zero or a power of 2, we can transform 1326 // the icmp and sext into bitwise/integer operations. 1327 if (ICI->hasOneUse() && 1328 ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){ 1329 KnownBits Known = computeKnownBits(Op0, 0, &CI); 1330 1331 APInt KnownZeroMask(~Known.Zero); 1332 if (KnownZeroMask.isPowerOf2()) { 1333 Value *In = ICI->getOperand(0); 1334 1335 // If the icmp tests for a known zero bit we can constant fold it. 1336 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) { 1337 Value *V = Pred == ICmpInst::ICMP_NE ? 1338 ConstantInt::getAllOnesValue(CI.getType()) : 1339 ConstantInt::getNullValue(CI.getType()); 1340 return replaceInstUsesWith(CI, V); 1341 } 1342 1343 if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) { 1344 // sext ((x & 2^n) == 0) -> (x >> n) - 1 1345 // sext ((x & 2^n) != 2^n) -> (x >> n) - 1 1346 unsigned ShiftAmt = KnownZeroMask.countTrailingZeros(); 1347 // Perform a right shift to place the desired bit in the LSB. 1348 if (ShiftAmt) 1349 In = Builder.CreateLShr(In, 1350 ConstantInt::get(In->getType(), ShiftAmt)); 1351 1352 // At this point "In" is either 1 or 0. Subtract 1 to turn 1353 // {1, 0} -> {0, -1}. 1354 In = Builder.CreateAdd(In, 1355 ConstantInt::getAllOnesValue(In->getType()), 1356 "sext"); 1357 } else { 1358 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1 1359 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1 1360 unsigned ShiftAmt = KnownZeroMask.countLeadingZeros(); 1361 // Perform a left shift to place the desired bit in the MSB. 1362 if (ShiftAmt) 1363 In = Builder.CreateShl(In, 1364 ConstantInt::get(In->getType(), ShiftAmt)); 1365 1366 // Distribute the bit over the whole bit width. 1367 In = Builder.CreateAShr(In, ConstantInt::get(In->getType(), 1368 KnownZeroMask.getBitWidth() - 1), "sext"); 1369 } 1370 1371 if (CI.getType() == In->getType()) 1372 return replaceInstUsesWith(CI, In); 1373 return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/); 1374 } 1375 } 1376 } 1377 1378 return nullptr; 1379 } 1380 1381 /// Return true if we can take the specified value and return it as type Ty 1382 /// without inserting any new casts and without changing the value of the common 1383 /// low bits. This is used by code that tries to promote integer operations to 1384 /// a wider types will allow us to eliminate the extension. 1385 /// 1386 /// This function works on both vectors and scalars. 1387 /// 1388 static bool canEvaluateSExtd(Value *V, Type *Ty) { 1389 assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() && 1390 "Can't sign extend type to a smaller type"); 1391 if (canAlwaysEvaluateInType(V, Ty)) 1392 return true; 1393 if (canNotEvaluateInType(V, Ty)) 1394 return false; 1395 1396 auto *I = cast<Instruction>(V); 1397 switch (I->getOpcode()) { 1398 case Instruction::SExt: // sext(sext(x)) -> sext(x) 1399 case Instruction::ZExt: // sext(zext(x)) -> zext(x) 1400 case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x) 1401 return true; 1402 case Instruction::And: 1403 case Instruction::Or: 1404 case Instruction::Xor: 1405 case Instruction::Add: 1406 case Instruction::Sub: 1407 case Instruction::Mul: 1408 // These operators can all arbitrarily be extended if their inputs can. 1409 return canEvaluateSExtd(I->getOperand(0), Ty) && 1410 canEvaluateSExtd(I->getOperand(1), Ty); 1411 1412 //case Instruction::Shl: TODO 1413 //case Instruction::LShr: TODO 1414 1415 case Instruction::Select: 1416 return canEvaluateSExtd(I->getOperand(1), Ty) && 1417 canEvaluateSExtd(I->getOperand(2), Ty); 1418 1419 case Instruction::PHI: { 1420 // We can change a phi if we can change all operands. Note that we never 1421 // get into trouble with cyclic PHIs here because we only consider 1422 // instructions with a single use. 1423 PHINode *PN = cast<PHINode>(I); 1424 for (Value *IncValue : PN->incoming_values()) 1425 if (!canEvaluateSExtd(IncValue, Ty)) return false; 1426 return true; 1427 } 1428 default: 1429 // TODO: Can handle more cases here. 1430 break; 1431 } 1432 1433 return false; 1434 } 1435 1436 Instruction *InstCombinerImpl::visitSExt(SExtInst &CI) { 1437 // If this sign extend is only used by a truncate, let the truncate be 1438 // eliminated before we try to optimize this sext. 1439 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back())) 1440 return nullptr; 1441 1442 if (Instruction *I = commonCastTransforms(CI)) 1443 return I; 1444 1445 Value *Src = CI.getOperand(0); 1446 Type *SrcTy = Src->getType(), *DestTy = CI.getType(); 1447 1448 // If we know that the value being extended is positive, we can use a zext 1449 // instead. 1450 KnownBits Known = computeKnownBits(Src, 0, &CI); 1451 if (Known.isNonNegative()) 1452 return CastInst::Create(Instruction::ZExt, Src, DestTy); 1453 1454 // Try to extend the entire expression tree to the wide destination type. 1455 if (shouldChangeType(SrcTy, DestTy) && canEvaluateSExtd(Src, DestTy)) { 1456 // Okay, we can transform this! Insert the new expression now. 1457 LLVM_DEBUG( 1458 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1459 " to avoid sign extend: " 1460 << CI << '\n'); 1461 Value *Res = EvaluateInDifferentType(Src, DestTy, true); 1462 assert(Res->getType() == DestTy); 1463 1464 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits(); 1465 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 1466 1467 // If the high bits are already filled with sign bit, just replace this 1468 // cast with the result. 1469 if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize) 1470 return replaceInstUsesWith(CI, Res); 1471 1472 // We need to emit a shl + ashr to do the sign extend. 1473 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize); 1474 return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"), 1475 ShAmt); 1476 } 1477 1478 // If the input is a trunc from the destination type, then turn sext(trunc(x)) 1479 // into shifts. 1480 Value *X; 1481 if (match(Src, m_OneUse(m_Trunc(m_Value(X)))) && X->getType() == DestTy) { 1482 // sext(trunc(X)) --> ashr(shl(X, C), C) 1483 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 1484 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 1485 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize); 1486 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt); 1487 } 1488 1489 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src)) 1490 return transformSExtICmp(ICI, CI); 1491 1492 // If the input is a shl/ashr pair of a same constant, then this is a sign 1493 // extension from a smaller value. If we could trust arbitrary bitwidth 1494 // integers, we could turn this into a truncate to the smaller bit and then 1495 // use a sext for the whole extension. Since we don't, look deeper and check 1496 // for a truncate. If the source and dest are the same type, eliminate the 1497 // trunc and extend and just do shifts. For example, turn: 1498 // %a = trunc i32 %i to i8 1499 // %b = shl i8 %a, 6 1500 // %c = ashr i8 %b, 6 1501 // %d = sext i8 %c to i32 1502 // into: 1503 // %a = shl i32 %i, 30 1504 // %d = ashr i32 %a, 30 1505 Value *A = nullptr; 1506 // TODO: Eventually this could be subsumed by EvaluateInDifferentType. 1507 Constant *BA = nullptr, *CA = nullptr; 1508 if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_Constant(BA)), 1509 m_Constant(CA))) && 1510 BA == CA && A->getType() == CI.getType()) { 1511 unsigned MidSize = Src->getType()->getScalarSizeInBits(); 1512 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits(); 1513 Constant *SizeDiff = ConstantInt::get(CA->getType(), SrcDstSize - MidSize); 1514 Constant *ShAmt = ConstantExpr::getAdd(CA, SizeDiff); 1515 Constant *ShAmtExt = ConstantExpr::getSExt(ShAmt, CI.getType()); 1516 A = Builder.CreateShl(A, ShAmtExt, CI.getName()); 1517 return BinaryOperator::CreateAShr(A, ShAmtExt); 1518 } 1519 1520 return nullptr; 1521 } 1522 1523 /// Return a Constant* for the specified floating-point constant if it fits 1524 /// in the specified FP type without changing its value. 1525 static bool fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) { 1526 bool losesInfo; 1527 APFloat F = CFP->getValueAPF(); 1528 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo); 1529 return !losesInfo; 1530 } 1531 1532 static Type *shrinkFPConstant(ConstantFP *CFP) { 1533 if (CFP->getType() == Type::getPPC_FP128Ty(CFP->getContext())) 1534 return nullptr; // No constant folding of this. 1535 // See if the value can be truncated to half and then reextended. 1536 if (fitsInFPType(CFP, APFloat::IEEEhalf())) 1537 return Type::getHalfTy(CFP->getContext()); 1538 // See if the value can be truncated to float and then reextended. 1539 if (fitsInFPType(CFP, APFloat::IEEEsingle())) 1540 return Type::getFloatTy(CFP->getContext()); 1541 if (CFP->getType()->isDoubleTy()) 1542 return nullptr; // Won't shrink. 1543 if (fitsInFPType(CFP, APFloat::IEEEdouble())) 1544 return Type::getDoubleTy(CFP->getContext()); 1545 // Don't try to shrink to various long double types. 1546 return nullptr; 1547 } 1548 1549 // Determine if this is a vector of ConstantFPs and if so, return the minimal 1550 // type we can safely truncate all elements to. 1551 // TODO: Make these support undef elements. 1552 static Type *shrinkFPConstantVector(Value *V) { 1553 auto *CV = dyn_cast<Constant>(V); 1554 auto *CVVTy = dyn_cast<VectorType>(V->getType()); 1555 if (!CV || !CVVTy) 1556 return nullptr; 1557 1558 Type *MinType = nullptr; 1559 1560 unsigned NumElts = cast<FixedVectorType>(CVVTy)->getNumElements(); 1561 for (unsigned i = 0; i != NumElts; ++i) { 1562 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 1563 if (!CFP) 1564 return nullptr; 1565 1566 Type *T = shrinkFPConstant(CFP); 1567 if (!T) 1568 return nullptr; 1569 1570 // If we haven't found a type yet or this type has a larger mantissa than 1571 // our previous type, this is our new minimal type. 1572 if (!MinType || T->getFPMantissaWidth() > MinType->getFPMantissaWidth()) 1573 MinType = T; 1574 } 1575 1576 // Make a vector type from the minimal type. 1577 return FixedVectorType::get(MinType, NumElts); 1578 } 1579 1580 /// Find the minimum FP type we can safely truncate to. 1581 static Type *getMinimumFPType(Value *V) { 1582 if (auto *FPExt = dyn_cast<FPExtInst>(V)) 1583 return FPExt->getOperand(0)->getType(); 1584 1585 // If this value is a constant, return the constant in the smallest FP type 1586 // that can accurately represent it. This allows us to turn 1587 // (float)((double)X+2.0) into x+2.0f. 1588 if (auto *CFP = dyn_cast<ConstantFP>(V)) 1589 if (Type *T = shrinkFPConstant(CFP)) 1590 return T; 1591 1592 // Try to shrink a vector of FP constants. 1593 if (Type *T = shrinkFPConstantVector(V)) 1594 return T; 1595 1596 return V->getType(); 1597 } 1598 1599 /// Return true if the cast from integer to FP can be proven to be exact for all 1600 /// possible inputs (the conversion does not lose any precision). 1601 static bool isKnownExactCastIntToFP(CastInst &I) { 1602 CastInst::CastOps Opcode = I.getOpcode(); 1603 assert((Opcode == CastInst::SIToFP || Opcode == CastInst::UIToFP) && 1604 "Unexpected cast"); 1605 Value *Src = I.getOperand(0); 1606 Type *SrcTy = Src->getType(); 1607 Type *FPTy = I.getType(); 1608 bool IsSigned = Opcode == Instruction::SIToFP; 1609 int SrcSize = (int)SrcTy->getScalarSizeInBits() - IsSigned; 1610 1611 // Easy case - if the source integer type has less bits than the FP mantissa, 1612 // then the cast must be exact. 1613 int DestNumSigBits = FPTy->getFPMantissaWidth(); 1614 if (SrcSize <= DestNumSigBits) 1615 return true; 1616 1617 // Cast from FP to integer and back to FP is independent of the intermediate 1618 // integer width because of poison on overflow. 1619 Value *F; 1620 if (match(Src, m_FPToSI(m_Value(F))) || match(Src, m_FPToUI(m_Value(F)))) { 1621 // If this is uitofp (fptosi F), the source needs an extra bit to avoid 1622 // potential rounding of negative FP input values. 1623 int SrcNumSigBits = F->getType()->getFPMantissaWidth(); 1624 if (!IsSigned && match(Src, m_FPToSI(m_Value()))) 1625 SrcNumSigBits++; 1626 1627 // [su]itofp (fpto[su]i F) --> exact if the source type has less or equal 1628 // significant bits than the destination (and make sure neither type is 1629 // weird -- ppc_fp128). 1630 if (SrcNumSigBits > 0 && DestNumSigBits > 0 && 1631 SrcNumSigBits <= DestNumSigBits) 1632 return true; 1633 } 1634 1635 // TODO: 1636 // Try harder to find if the source integer type has less significant bits. 1637 // For example, compute number of sign bits or compute low bit mask. 1638 return false; 1639 } 1640 1641 Instruction *InstCombinerImpl::visitFPTrunc(FPTruncInst &FPT) { 1642 if (Instruction *I = commonCastTransforms(FPT)) 1643 return I; 1644 1645 // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to 1646 // simplify this expression to avoid one or more of the trunc/extend 1647 // operations if we can do so without changing the numerical results. 1648 // 1649 // The exact manner in which the widths of the operands interact to limit 1650 // what we can and cannot do safely varies from operation to operation, and 1651 // is explained below in the various case statements. 1652 Type *Ty = FPT.getType(); 1653 auto *BO = dyn_cast<BinaryOperator>(FPT.getOperand(0)); 1654 if (BO && BO->hasOneUse()) { 1655 Type *LHSMinType = getMinimumFPType(BO->getOperand(0)); 1656 Type *RHSMinType = getMinimumFPType(BO->getOperand(1)); 1657 unsigned OpWidth = BO->getType()->getFPMantissaWidth(); 1658 unsigned LHSWidth = LHSMinType->getFPMantissaWidth(); 1659 unsigned RHSWidth = RHSMinType->getFPMantissaWidth(); 1660 unsigned SrcWidth = std::max(LHSWidth, RHSWidth); 1661 unsigned DstWidth = Ty->getFPMantissaWidth(); 1662 switch (BO->getOpcode()) { 1663 default: break; 1664 case Instruction::FAdd: 1665 case Instruction::FSub: 1666 // For addition and subtraction, the infinitely precise result can 1667 // essentially be arbitrarily wide; proving that double rounding 1668 // will not occur because the result of OpI is exact (as we will for 1669 // FMul, for example) is hopeless. However, we *can* nonetheless 1670 // frequently know that double rounding cannot occur (or that it is 1671 // innocuous) by taking advantage of the specific structure of 1672 // infinitely-precise results that admit double rounding. 1673 // 1674 // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient 1675 // to represent both sources, we can guarantee that the double 1676 // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis, 1677 // "A Rigorous Framework for Fully Supporting the IEEE Standard ..." 1678 // for proof of this fact). 1679 // 1680 // Note: Figueroa does not consider the case where DstFormat != 1681 // SrcFormat. It's possible (likely even!) that this analysis 1682 // could be tightened for those cases, but they are rare (the main 1683 // case of interest here is (float)((double)float + float)). 1684 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) { 1685 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1686 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1687 Instruction *RI = BinaryOperator::Create(BO->getOpcode(), LHS, RHS); 1688 RI->copyFastMathFlags(BO); 1689 return RI; 1690 } 1691 break; 1692 case Instruction::FMul: 1693 // For multiplication, the infinitely precise result has at most 1694 // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient 1695 // that such a value can be exactly represented, then no double 1696 // rounding can possibly occur; we can safely perform the operation 1697 // in the destination format if it can represent both sources. 1698 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) { 1699 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1700 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1701 return BinaryOperator::CreateFMulFMF(LHS, RHS, BO); 1702 } 1703 break; 1704 case Instruction::FDiv: 1705 // For division, we use again use the bound from Figueroa's 1706 // dissertation. I am entirely certain that this bound can be 1707 // tightened in the unbalanced operand case by an analysis based on 1708 // the diophantine rational approximation bound, but the well-known 1709 // condition used here is a good conservative first pass. 1710 // TODO: Tighten bound via rigorous analysis of the unbalanced case. 1711 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) { 1712 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1713 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1714 return BinaryOperator::CreateFDivFMF(LHS, RHS, BO); 1715 } 1716 break; 1717 case Instruction::FRem: { 1718 // Remainder is straightforward. Remainder is always exact, so the 1719 // type of OpI doesn't enter into things at all. We simply evaluate 1720 // in whichever source type is larger, then convert to the 1721 // destination type. 1722 if (SrcWidth == OpWidth) 1723 break; 1724 Value *LHS, *RHS; 1725 if (LHSWidth == SrcWidth) { 1726 LHS = Builder.CreateFPTrunc(BO->getOperand(0), LHSMinType); 1727 RHS = Builder.CreateFPTrunc(BO->getOperand(1), LHSMinType); 1728 } else { 1729 LHS = Builder.CreateFPTrunc(BO->getOperand(0), RHSMinType); 1730 RHS = Builder.CreateFPTrunc(BO->getOperand(1), RHSMinType); 1731 } 1732 1733 Value *ExactResult = Builder.CreateFRemFMF(LHS, RHS, BO); 1734 return CastInst::CreateFPCast(ExactResult, Ty); 1735 } 1736 } 1737 } 1738 1739 // (fptrunc (fneg x)) -> (fneg (fptrunc x)) 1740 Value *X; 1741 Instruction *Op = dyn_cast<Instruction>(FPT.getOperand(0)); 1742 if (Op && Op->hasOneUse()) { 1743 // FIXME: The FMF should propagate from the fptrunc, not the source op. 1744 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 1745 if (isa<FPMathOperator>(Op)) 1746 Builder.setFastMathFlags(Op->getFastMathFlags()); 1747 1748 if (match(Op, m_FNeg(m_Value(X)))) { 1749 Value *InnerTrunc = Builder.CreateFPTrunc(X, Ty); 1750 1751 return UnaryOperator::CreateFNegFMF(InnerTrunc, Op); 1752 } 1753 1754 // If we are truncating a select that has an extended operand, we can 1755 // narrow the other operand and do the select as a narrow op. 1756 Value *Cond, *X, *Y; 1757 if (match(Op, m_Select(m_Value(Cond), m_FPExt(m_Value(X)), m_Value(Y))) && 1758 X->getType() == Ty) { 1759 // fptrunc (select Cond, (fpext X), Y --> select Cond, X, (fptrunc Y) 1760 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty); 1761 Value *Sel = Builder.CreateSelect(Cond, X, NarrowY, "narrow.sel", Op); 1762 return replaceInstUsesWith(FPT, Sel); 1763 } 1764 if (match(Op, m_Select(m_Value(Cond), m_Value(Y), m_FPExt(m_Value(X)))) && 1765 X->getType() == Ty) { 1766 // fptrunc (select Cond, Y, (fpext X) --> select Cond, (fptrunc Y), X 1767 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty); 1768 Value *Sel = Builder.CreateSelect(Cond, NarrowY, X, "narrow.sel", Op); 1769 return replaceInstUsesWith(FPT, Sel); 1770 } 1771 } 1772 1773 if (auto *II = dyn_cast<IntrinsicInst>(FPT.getOperand(0))) { 1774 switch (II->getIntrinsicID()) { 1775 default: break; 1776 case Intrinsic::ceil: 1777 case Intrinsic::fabs: 1778 case Intrinsic::floor: 1779 case Intrinsic::nearbyint: 1780 case Intrinsic::rint: 1781 case Intrinsic::round: 1782 case Intrinsic::roundeven: 1783 case Intrinsic::trunc: { 1784 Value *Src = II->getArgOperand(0); 1785 if (!Src->hasOneUse()) 1786 break; 1787 1788 // Except for fabs, this transformation requires the input of the unary FP 1789 // operation to be itself an fpext from the type to which we're 1790 // truncating. 1791 if (II->getIntrinsicID() != Intrinsic::fabs) { 1792 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src); 1793 if (!FPExtSrc || FPExtSrc->getSrcTy() != Ty) 1794 break; 1795 } 1796 1797 // Do unary FP operation on smaller type. 1798 // (fptrunc (fabs x)) -> (fabs (fptrunc x)) 1799 Value *InnerTrunc = Builder.CreateFPTrunc(Src, Ty); 1800 Function *Overload = Intrinsic::getDeclaration(FPT.getModule(), 1801 II->getIntrinsicID(), Ty); 1802 SmallVector<OperandBundleDef, 1> OpBundles; 1803 II->getOperandBundlesAsDefs(OpBundles); 1804 CallInst *NewCI = 1805 CallInst::Create(Overload, {InnerTrunc}, OpBundles, II->getName()); 1806 NewCI->copyFastMathFlags(II); 1807 return NewCI; 1808 } 1809 } 1810 } 1811 1812 if (Instruction *I = shrinkInsertElt(FPT, Builder)) 1813 return I; 1814 1815 Value *Src = FPT.getOperand(0); 1816 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) { 1817 auto *FPCast = cast<CastInst>(Src); 1818 if (isKnownExactCastIntToFP(*FPCast)) 1819 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty); 1820 } 1821 1822 return nullptr; 1823 } 1824 1825 Instruction *InstCombinerImpl::visitFPExt(CastInst &FPExt) { 1826 // If the source operand is a cast from integer to FP and known exact, then 1827 // cast the integer operand directly to the destination type. 1828 Type *Ty = FPExt.getType(); 1829 Value *Src = FPExt.getOperand(0); 1830 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) { 1831 auto *FPCast = cast<CastInst>(Src); 1832 if (isKnownExactCastIntToFP(*FPCast)) 1833 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty); 1834 } 1835 1836 return commonCastTransforms(FPExt); 1837 } 1838 1839 /// fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X) 1840 /// This is safe if the intermediate type has enough bits in its mantissa to 1841 /// accurately represent all values of X. For example, this won't work with 1842 /// i64 -> float -> i64. 1843 Instruction *InstCombinerImpl::foldItoFPtoI(CastInst &FI) { 1844 if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0))) 1845 return nullptr; 1846 1847 auto *OpI = cast<CastInst>(FI.getOperand(0)); 1848 Value *X = OpI->getOperand(0); 1849 Type *XType = X->getType(); 1850 Type *DestType = FI.getType(); 1851 bool IsOutputSigned = isa<FPToSIInst>(FI); 1852 1853 // Since we can assume the conversion won't overflow, our decision as to 1854 // whether the input will fit in the float should depend on the minimum 1855 // of the input range and output range. 1856 1857 // This means this is also safe for a signed input and unsigned output, since 1858 // a negative input would lead to undefined behavior. 1859 if (!isKnownExactCastIntToFP(*OpI)) { 1860 // The first cast may not round exactly based on the source integer width 1861 // and FP width, but the overflow UB rules can still allow this to fold. 1862 // If the destination type is narrow, that means the intermediate FP value 1863 // must be large enough to hold the source value exactly. 1864 // For example, (uint8_t)((float)(uint32_t 16777217) is undefined behavior. 1865 int OutputSize = (int)DestType->getScalarSizeInBits() - IsOutputSigned; 1866 if (OutputSize > OpI->getType()->getFPMantissaWidth()) 1867 return nullptr; 1868 } 1869 1870 if (DestType->getScalarSizeInBits() > XType->getScalarSizeInBits()) { 1871 bool IsInputSigned = isa<SIToFPInst>(OpI); 1872 if (IsInputSigned && IsOutputSigned) 1873 return new SExtInst(X, DestType); 1874 return new ZExtInst(X, DestType); 1875 } 1876 if (DestType->getScalarSizeInBits() < XType->getScalarSizeInBits()) 1877 return new TruncInst(X, DestType); 1878 1879 assert(XType == DestType && "Unexpected types for int to FP to int casts"); 1880 return replaceInstUsesWith(FI, X); 1881 } 1882 1883 Instruction *InstCombinerImpl::visitFPToUI(FPToUIInst &FI) { 1884 if (Instruction *I = foldItoFPtoI(FI)) 1885 return I; 1886 1887 return commonCastTransforms(FI); 1888 } 1889 1890 Instruction *InstCombinerImpl::visitFPToSI(FPToSIInst &FI) { 1891 if (Instruction *I = foldItoFPtoI(FI)) 1892 return I; 1893 1894 return commonCastTransforms(FI); 1895 } 1896 1897 Instruction *InstCombinerImpl::visitUIToFP(CastInst &CI) { 1898 return commonCastTransforms(CI); 1899 } 1900 1901 Instruction *InstCombinerImpl::visitSIToFP(CastInst &CI) { 1902 return commonCastTransforms(CI); 1903 } 1904 1905 Instruction *InstCombinerImpl::visitIntToPtr(IntToPtrInst &CI) { 1906 // If the source integer type is not the intptr_t type for this target, do a 1907 // trunc or zext to the intptr_t type, then inttoptr of it. This allows the 1908 // cast to be exposed to other transforms. 1909 unsigned AS = CI.getAddressSpace(); 1910 if (CI.getOperand(0)->getType()->getScalarSizeInBits() != 1911 DL.getPointerSizeInBits(AS)) { 1912 Type *Ty = DL.getIntPtrType(CI.getContext(), AS); 1913 // Handle vectors of pointers. 1914 if (auto *CIVTy = dyn_cast<VectorType>(CI.getType())) 1915 Ty = VectorType::get(Ty, CIVTy->getElementCount()); 1916 1917 Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty); 1918 return new IntToPtrInst(P, CI.getType()); 1919 } 1920 1921 if (Instruction *I = commonCastTransforms(CI)) 1922 return I; 1923 1924 return nullptr; 1925 } 1926 1927 /// Implement the transforms for cast of pointer (bitcast/ptrtoint) 1928 Instruction *InstCombinerImpl::commonPointerCastTransforms(CastInst &CI) { 1929 Value *Src = CI.getOperand(0); 1930 1931 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) { 1932 // If casting the result of a getelementptr instruction with no offset, turn 1933 // this into a cast of the original pointer! 1934 if (GEP->hasAllZeroIndices() && 1935 // If CI is an addrspacecast and GEP changes the poiner type, merging 1936 // GEP into CI would undo canonicalizing addrspacecast with different 1937 // pointer types, causing infinite loops. 1938 (!isa<AddrSpaceCastInst>(CI) || 1939 GEP->getType() == GEP->getPointerOperandType())) { 1940 // Changing the cast operand is usually not a good idea but it is safe 1941 // here because the pointer operand is being replaced with another 1942 // pointer operand so the opcode doesn't need to change. 1943 return replaceOperand(CI, 0, GEP->getOperand(0)); 1944 } 1945 } 1946 1947 return commonCastTransforms(CI); 1948 } 1949 1950 Instruction *InstCombinerImpl::visitPtrToInt(PtrToIntInst &CI) { 1951 // If the destination integer type is not the intptr_t type for this target, 1952 // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast 1953 // to be exposed to other transforms. 1954 Value *SrcOp = CI.getPointerOperand(); 1955 Type *Ty = CI.getType(); 1956 unsigned AS = CI.getPointerAddressSpace(); 1957 unsigned TySize = Ty->getScalarSizeInBits(); 1958 unsigned PtrSize = DL.getPointerSizeInBits(AS); 1959 if (TySize != PtrSize) { 1960 Type *IntPtrTy = DL.getIntPtrType(CI.getContext(), AS); 1961 if (auto *VecTy = dyn_cast<VectorType>(Ty)) { 1962 // Handle vectors of pointers. 1963 // FIXME: what should happen for scalable vectors? 1964 IntPtrTy = FixedVectorType::get( 1965 IntPtrTy, cast<FixedVectorType>(VecTy)->getNumElements()); 1966 } 1967 1968 Value *P = Builder.CreatePtrToInt(SrcOp, IntPtrTy); 1969 return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false); 1970 } 1971 1972 Value *Vec, *Scalar, *Index; 1973 if (match(SrcOp, m_OneUse(m_InsertElt(m_IntToPtr(m_Value(Vec)), 1974 m_Value(Scalar), m_Value(Index)))) && 1975 Vec->getType() == Ty) { 1976 assert(Vec->getType()->getScalarSizeInBits() == PtrSize && "Wrong type"); 1977 // Convert the scalar to int followed by insert to eliminate one cast: 1978 // p2i (ins (i2p Vec), Scalar, Index --> ins Vec, (p2i Scalar), Index 1979 Value *NewCast = Builder.CreatePtrToInt(Scalar, Ty->getScalarType()); 1980 return InsertElementInst::Create(Vec, NewCast, Index); 1981 } 1982 1983 return commonPointerCastTransforms(CI); 1984 } 1985 1986 /// This input value (which is known to have vector type) is being zero extended 1987 /// or truncated to the specified vector type. Since the zext/trunc is done 1988 /// using an integer type, we have a (bitcast(cast(bitcast))) pattern, 1989 /// endianness will impact which end of the vector that is extended or 1990 /// truncated. 1991 /// 1992 /// A vector is always stored with index 0 at the lowest address, which 1993 /// corresponds to the most significant bits for a big endian stored integer and 1994 /// the least significant bits for little endian. A trunc/zext of an integer 1995 /// impacts the big end of the integer. Thus, we need to add/remove elements at 1996 /// the front of the vector for big endian targets, and the back of the vector 1997 /// for little endian targets. 1998 /// 1999 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible. 2000 /// 2001 /// The source and destination vector types may have different element types. 2002 static Instruction * 2003 optimizeVectorResizeWithIntegerBitCasts(Value *InVal, VectorType *DestTy, 2004 InstCombinerImpl &IC) { 2005 // We can only do this optimization if the output is a multiple of the input 2006 // element size, or the input is a multiple of the output element size. 2007 // Convert the input type to have the same element type as the output. 2008 VectorType *SrcTy = cast<VectorType>(InVal->getType()); 2009 2010 if (SrcTy->getElementType() != DestTy->getElementType()) { 2011 // The input types don't need to be identical, but for now they must be the 2012 // same size. There is no specific reason we couldn't handle things like 2013 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten 2014 // there yet. 2015 if (SrcTy->getElementType()->getPrimitiveSizeInBits() != 2016 DestTy->getElementType()->getPrimitiveSizeInBits()) 2017 return nullptr; 2018 2019 SrcTy = 2020 FixedVectorType::get(DestTy->getElementType(), 2021 cast<FixedVectorType>(SrcTy)->getNumElements()); 2022 InVal = IC.Builder.CreateBitCast(InVal, SrcTy); 2023 } 2024 2025 bool IsBigEndian = IC.getDataLayout().isBigEndian(); 2026 unsigned SrcElts = cast<FixedVectorType>(SrcTy)->getNumElements(); 2027 unsigned DestElts = cast<FixedVectorType>(DestTy)->getNumElements(); 2028 2029 assert(SrcElts != DestElts && "Element counts should be different."); 2030 2031 // Now that the element types match, get the shuffle mask and RHS of the 2032 // shuffle to use, which depends on whether we're increasing or decreasing the 2033 // size of the input. 2034 SmallVector<int, 16> ShuffleMaskStorage; 2035 ArrayRef<int> ShuffleMask; 2036 Value *V2; 2037 2038 // Produce an identify shuffle mask for the src vector. 2039 ShuffleMaskStorage.resize(SrcElts); 2040 std::iota(ShuffleMaskStorage.begin(), ShuffleMaskStorage.end(), 0); 2041 2042 if (SrcElts > DestElts) { 2043 // If we're shrinking the number of elements (rewriting an integer 2044 // truncate), just shuffle in the elements corresponding to the least 2045 // significant bits from the input and use undef as the second shuffle 2046 // input. 2047 V2 = UndefValue::get(SrcTy); 2048 // Make sure the shuffle mask selects the "least significant bits" by 2049 // keeping elements from back of the src vector for big endian, and from the 2050 // front for little endian. 2051 ShuffleMask = ShuffleMaskStorage; 2052 if (IsBigEndian) 2053 ShuffleMask = ShuffleMask.take_back(DestElts); 2054 else 2055 ShuffleMask = ShuffleMask.take_front(DestElts); 2056 } else { 2057 // If we're increasing the number of elements (rewriting an integer zext), 2058 // shuffle in all of the elements from InVal. Fill the rest of the result 2059 // elements with zeros from a constant zero. 2060 V2 = Constant::getNullValue(SrcTy); 2061 // Use first elt from V2 when indicating zero in the shuffle mask. 2062 uint32_t NullElt = SrcElts; 2063 // Extend with null values in the "most significant bits" by adding elements 2064 // in front of the src vector for big endian, and at the back for little 2065 // endian. 2066 unsigned DeltaElts = DestElts - SrcElts; 2067 if (IsBigEndian) 2068 ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt); 2069 else 2070 ShuffleMaskStorage.append(DeltaElts, NullElt); 2071 ShuffleMask = ShuffleMaskStorage; 2072 } 2073 2074 return new ShuffleVectorInst(InVal, V2, ShuffleMask); 2075 } 2076 2077 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) { 2078 return Value % Ty->getPrimitiveSizeInBits() == 0; 2079 } 2080 2081 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) { 2082 return Value / Ty->getPrimitiveSizeInBits(); 2083 } 2084 2085 /// V is a value which is inserted into a vector of VecEltTy. 2086 /// Look through the value to see if we can decompose it into 2087 /// insertions into the vector. See the example in the comment for 2088 /// OptimizeIntegerToVectorInsertions for the pattern this handles. 2089 /// The type of V is always a non-zero multiple of VecEltTy's size. 2090 /// Shift is the number of bits between the lsb of V and the lsb of 2091 /// the vector. 2092 /// 2093 /// This returns false if the pattern can't be matched or true if it can, 2094 /// filling in Elements with the elements found here. 2095 static bool collectInsertionElements(Value *V, unsigned Shift, 2096 SmallVectorImpl<Value *> &Elements, 2097 Type *VecEltTy, bool isBigEndian) { 2098 assert(isMultipleOfTypeSize(Shift, VecEltTy) && 2099 "Shift should be a multiple of the element type size"); 2100 2101 // Undef values never contribute useful bits to the result. 2102 if (isa<UndefValue>(V)) return true; 2103 2104 // If we got down to a value of the right type, we win, try inserting into the 2105 // right element. 2106 if (V->getType() == VecEltTy) { 2107 // Inserting null doesn't actually insert any elements. 2108 if (Constant *C = dyn_cast<Constant>(V)) 2109 if (C->isNullValue()) 2110 return true; 2111 2112 unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy); 2113 if (isBigEndian) 2114 ElementIndex = Elements.size() - ElementIndex - 1; 2115 2116 // Fail if multiple elements are inserted into this slot. 2117 if (Elements[ElementIndex]) 2118 return false; 2119 2120 Elements[ElementIndex] = V; 2121 return true; 2122 } 2123 2124 if (Constant *C = dyn_cast<Constant>(V)) { 2125 // Figure out the # elements this provides, and bitcast it or slice it up 2126 // as required. 2127 unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(), 2128 VecEltTy); 2129 // If the constant is the size of a vector element, we just need to bitcast 2130 // it to the right type so it gets properly inserted. 2131 if (NumElts == 1) 2132 return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy), 2133 Shift, Elements, VecEltTy, isBigEndian); 2134 2135 // Okay, this is a constant that covers multiple elements. Slice it up into 2136 // pieces and insert each element-sized piece into the vector. 2137 if (!isa<IntegerType>(C->getType())) 2138 C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(), 2139 C->getType()->getPrimitiveSizeInBits())); 2140 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits(); 2141 Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize); 2142 2143 for (unsigned i = 0; i != NumElts; ++i) { 2144 unsigned ShiftI = Shift+i*ElementSize; 2145 Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(), 2146 ShiftI)); 2147 Piece = ConstantExpr::getTrunc(Piece, ElementIntTy); 2148 if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy, 2149 isBigEndian)) 2150 return false; 2151 } 2152 return true; 2153 } 2154 2155 if (!V->hasOneUse()) return false; 2156 2157 Instruction *I = dyn_cast<Instruction>(V); 2158 if (!I) return false; 2159 switch (I->getOpcode()) { 2160 default: return false; // Unhandled case. 2161 case Instruction::BitCast: 2162 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2163 isBigEndian); 2164 case Instruction::ZExt: 2165 if (!isMultipleOfTypeSize( 2166 I->getOperand(0)->getType()->getPrimitiveSizeInBits(), 2167 VecEltTy)) 2168 return false; 2169 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2170 isBigEndian); 2171 case Instruction::Or: 2172 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2173 isBigEndian) && 2174 collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy, 2175 isBigEndian); 2176 case Instruction::Shl: { 2177 // Must be shifting by a constant that is a multiple of the element size. 2178 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1)); 2179 if (!CI) return false; 2180 Shift += CI->getZExtValue(); 2181 if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false; 2182 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2183 isBigEndian); 2184 } 2185 2186 } 2187 } 2188 2189 2190 /// If the input is an 'or' instruction, we may be doing shifts and ors to 2191 /// assemble the elements of the vector manually. 2192 /// Try to rip the code out and replace it with insertelements. This is to 2193 /// optimize code like this: 2194 /// 2195 /// %tmp37 = bitcast float %inc to i32 2196 /// %tmp38 = zext i32 %tmp37 to i64 2197 /// %tmp31 = bitcast float %inc5 to i32 2198 /// %tmp32 = zext i32 %tmp31 to i64 2199 /// %tmp33 = shl i64 %tmp32, 32 2200 /// %ins35 = or i64 %tmp33, %tmp38 2201 /// %tmp43 = bitcast i64 %ins35 to <2 x float> 2202 /// 2203 /// Into two insertelements that do "buildvector{%inc, %inc5}". 2204 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI, 2205 InstCombinerImpl &IC) { 2206 auto *DestVecTy = cast<FixedVectorType>(CI.getType()); 2207 Value *IntInput = CI.getOperand(0); 2208 2209 SmallVector<Value*, 8> Elements(DestVecTy->getNumElements()); 2210 if (!collectInsertionElements(IntInput, 0, Elements, 2211 DestVecTy->getElementType(), 2212 IC.getDataLayout().isBigEndian())) 2213 return nullptr; 2214 2215 // If we succeeded, we know that all of the element are specified by Elements 2216 // or are zero if Elements has a null entry. Recast this as a set of 2217 // insertions. 2218 Value *Result = Constant::getNullValue(CI.getType()); 2219 for (unsigned i = 0, e = Elements.size(); i != e; ++i) { 2220 if (!Elements[i]) continue; // Unset element. 2221 2222 Result = IC.Builder.CreateInsertElement(Result, Elements[i], 2223 IC.Builder.getInt32(i)); 2224 } 2225 2226 return Result; 2227 } 2228 2229 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the 2230 /// vector followed by extract element. The backend tends to handle bitcasts of 2231 /// vectors better than bitcasts of scalars because vector registers are 2232 /// usually not type-specific like scalar integer or scalar floating-point. 2233 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast, 2234 InstCombinerImpl &IC) { 2235 // TODO: Create and use a pattern matcher for ExtractElementInst. 2236 auto *ExtElt = dyn_cast<ExtractElementInst>(BitCast.getOperand(0)); 2237 if (!ExtElt || !ExtElt->hasOneUse()) 2238 return nullptr; 2239 2240 // The bitcast must be to a vectorizable type, otherwise we can't make a new 2241 // type to extract from. 2242 Type *DestType = BitCast.getType(); 2243 if (!VectorType::isValidElementType(DestType)) 2244 return nullptr; 2245 2246 auto *NewVecType = VectorType::get(DestType, ExtElt->getVectorOperandType()); 2247 auto *NewBC = IC.Builder.CreateBitCast(ExtElt->getVectorOperand(), 2248 NewVecType, "bc"); 2249 return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand()); 2250 } 2251 2252 /// Change the type of a bitwise logic operation if we can eliminate a bitcast. 2253 static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast, 2254 InstCombiner::BuilderTy &Builder) { 2255 Type *DestTy = BitCast.getType(); 2256 BinaryOperator *BO; 2257 if (!DestTy->isIntOrIntVectorTy() || 2258 !match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) || 2259 !BO->isBitwiseLogicOp()) 2260 return nullptr; 2261 2262 // FIXME: This transform is restricted to vector types to avoid backend 2263 // problems caused by creating potentially illegal operations. If a fix-up is 2264 // added to handle that situation, we can remove this check. 2265 if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy()) 2266 return nullptr; 2267 2268 Value *X; 2269 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) && 2270 X->getType() == DestTy && !isa<Constant>(X)) { 2271 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y)) 2272 Value *CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy); 2273 return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1); 2274 } 2275 2276 if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) && 2277 X->getType() == DestTy && !isa<Constant>(X)) { 2278 // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X) 2279 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 2280 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X); 2281 } 2282 2283 // Canonicalize vector bitcasts to come before vector bitwise logic with a 2284 // constant. This eases recognition of special constants for later ops. 2285 // Example: 2286 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b 2287 Constant *C; 2288 if (match(BO->getOperand(1), m_Constant(C))) { 2289 // bitcast (logic X, C) --> logic (bitcast X, C') 2290 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 2291 Value *CastedC = Builder.CreateBitCast(C, DestTy); 2292 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC); 2293 } 2294 2295 return nullptr; 2296 } 2297 2298 /// Change the type of a select if we can eliminate a bitcast. 2299 static Instruction *foldBitCastSelect(BitCastInst &BitCast, 2300 InstCombiner::BuilderTy &Builder) { 2301 Value *Cond, *TVal, *FVal; 2302 if (!match(BitCast.getOperand(0), 2303 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) 2304 return nullptr; 2305 2306 // A vector select must maintain the same number of elements in its operands. 2307 Type *CondTy = Cond->getType(); 2308 Type *DestTy = BitCast.getType(); 2309 if (auto *CondVTy = dyn_cast<VectorType>(CondTy)) { 2310 if (!DestTy->isVectorTy()) 2311 return nullptr; 2312 if (cast<FixedVectorType>(DestTy)->getNumElements() != 2313 cast<FixedVectorType>(CondVTy)->getNumElements()) 2314 return nullptr; 2315 } 2316 2317 // FIXME: This transform is restricted from changing the select between 2318 // scalars and vectors to avoid backend problems caused by creating 2319 // potentially illegal operations. If a fix-up is added to handle that 2320 // situation, we can remove this check. 2321 if (DestTy->isVectorTy() != TVal->getType()->isVectorTy()) 2322 return nullptr; 2323 2324 auto *Sel = cast<Instruction>(BitCast.getOperand(0)); 2325 Value *X; 2326 if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 2327 !isa<Constant>(X)) { 2328 // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y)) 2329 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy); 2330 return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel); 2331 } 2332 2333 if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 2334 !isa<Constant>(X)) { 2335 // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X) 2336 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy); 2337 return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel); 2338 } 2339 2340 return nullptr; 2341 } 2342 2343 /// Check if all users of CI are StoreInsts. 2344 static bool hasStoreUsersOnly(CastInst &CI) { 2345 for (User *U : CI.users()) { 2346 if (!isa<StoreInst>(U)) 2347 return false; 2348 } 2349 return true; 2350 } 2351 2352 /// This function handles following case 2353 /// 2354 /// A -> B cast 2355 /// PHI 2356 /// B -> A cast 2357 /// 2358 /// All the related PHI nodes can be replaced by new PHI nodes with type A. 2359 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN. 2360 Instruction *InstCombinerImpl::optimizeBitCastFromPhi(CastInst &CI, 2361 PHINode *PN) { 2362 // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp. 2363 if (hasStoreUsersOnly(CI)) 2364 return nullptr; 2365 2366 Value *Src = CI.getOperand(0); 2367 Type *SrcTy = Src->getType(); // Type B 2368 Type *DestTy = CI.getType(); // Type A 2369 2370 SmallVector<PHINode *, 4> PhiWorklist; 2371 SmallSetVector<PHINode *, 4> OldPhiNodes; 2372 2373 // Find all of the A->B casts and PHI nodes. 2374 // We need to inspect all related PHI nodes, but PHIs can be cyclic, so 2375 // OldPhiNodes is used to track all known PHI nodes, before adding a new 2376 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first. 2377 PhiWorklist.push_back(PN); 2378 OldPhiNodes.insert(PN); 2379 while (!PhiWorklist.empty()) { 2380 auto *OldPN = PhiWorklist.pop_back_val(); 2381 for (Value *IncValue : OldPN->incoming_values()) { 2382 if (isa<Constant>(IncValue)) 2383 continue; 2384 2385 if (auto *LI = dyn_cast<LoadInst>(IncValue)) { 2386 // If there is a sequence of one or more load instructions, each loaded 2387 // value is used as address of later load instruction, bitcast is 2388 // necessary to change the value type, don't optimize it. For 2389 // simplicity we give up if the load address comes from another load. 2390 Value *Addr = LI->getOperand(0); 2391 if (Addr == &CI || isa<LoadInst>(Addr)) 2392 return nullptr; 2393 if (LI->hasOneUse() && LI->isSimple()) 2394 continue; 2395 // If a LoadInst has more than one use, changing the type of loaded 2396 // value may create another bitcast. 2397 return nullptr; 2398 } 2399 2400 if (auto *PNode = dyn_cast<PHINode>(IncValue)) { 2401 if (OldPhiNodes.insert(PNode)) 2402 PhiWorklist.push_back(PNode); 2403 continue; 2404 } 2405 2406 auto *BCI = dyn_cast<BitCastInst>(IncValue); 2407 // We can't handle other instructions. 2408 if (!BCI) 2409 return nullptr; 2410 2411 // Verify it's a A->B cast. 2412 Type *TyA = BCI->getOperand(0)->getType(); 2413 Type *TyB = BCI->getType(); 2414 if (TyA != DestTy || TyB != SrcTy) 2415 return nullptr; 2416 } 2417 } 2418 2419 // Check that each user of each old PHI node is something that we can 2420 // rewrite, so that all of the old PHI nodes can be cleaned up afterwards. 2421 for (auto *OldPN : OldPhiNodes) { 2422 for (User *V : OldPN->users()) { 2423 if (auto *SI = dyn_cast<StoreInst>(V)) { 2424 if (!SI->isSimple() || SI->getOperand(0) != OldPN) 2425 return nullptr; 2426 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2427 // Verify it's a B->A cast. 2428 Type *TyB = BCI->getOperand(0)->getType(); 2429 Type *TyA = BCI->getType(); 2430 if (TyA != DestTy || TyB != SrcTy) 2431 return nullptr; 2432 } else if (auto *PHI = dyn_cast<PHINode>(V)) { 2433 // As long as the user is another old PHI node, then even if we don't 2434 // rewrite it, the PHI web we're considering won't have any users 2435 // outside itself, so it'll be dead. 2436 if (OldPhiNodes.count(PHI) == 0) 2437 return nullptr; 2438 } else { 2439 return nullptr; 2440 } 2441 } 2442 } 2443 2444 // For each old PHI node, create a corresponding new PHI node with a type A. 2445 SmallDenseMap<PHINode *, PHINode *> NewPNodes; 2446 for (auto *OldPN : OldPhiNodes) { 2447 Builder.SetInsertPoint(OldPN); 2448 PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands()); 2449 NewPNodes[OldPN] = NewPN; 2450 } 2451 2452 // Fill in the operands of new PHI nodes. 2453 for (auto *OldPN : OldPhiNodes) { 2454 PHINode *NewPN = NewPNodes[OldPN]; 2455 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) { 2456 Value *V = OldPN->getOperand(j); 2457 Value *NewV = nullptr; 2458 if (auto *C = dyn_cast<Constant>(V)) { 2459 NewV = ConstantExpr::getBitCast(C, DestTy); 2460 } else if (auto *LI = dyn_cast<LoadInst>(V)) { 2461 // Explicitly perform load combine to make sure no opposing transform 2462 // can remove the bitcast in the meantime and trigger an infinite loop. 2463 Builder.SetInsertPoint(LI); 2464 NewV = combineLoadToNewType(*LI, DestTy); 2465 // Remove the old load and its use in the old phi, which itself becomes 2466 // dead once the whole transform finishes. 2467 replaceInstUsesWith(*LI, UndefValue::get(LI->getType())); 2468 eraseInstFromFunction(*LI); 2469 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2470 NewV = BCI->getOperand(0); 2471 } else if (auto *PrevPN = dyn_cast<PHINode>(V)) { 2472 NewV = NewPNodes[PrevPN]; 2473 } 2474 assert(NewV); 2475 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j)); 2476 } 2477 } 2478 2479 // Traverse all accumulated PHI nodes and process its users, 2480 // which are Stores and BitcCasts. Without this processing 2481 // NewPHI nodes could be replicated and could lead to extra 2482 // moves generated after DeSSA. 2483 // If there is a store with type B, change it to type A. 2484 2485 2486 // Replace users of BitCast B->A with NewPHI. These will help 2487 // later to get rid off a closure formed by OldPHI nodes. 2488 Instruction *RetVal = nullptr; 2489 for (auto *OldPN : OldPhiNodes) { 2490 PHINode *NewPN = NewPNodes[OldPN]; 2491 for (auto It = OldPN->user_begin(), End = OldPN->user_end(); It != End; ) { 2492 User *V = *It; 2493 // We may remove this user, advance to avoid iterator invalidation. 2494 ++It; 2495 if (auto *SI = dyn_cast<StoreInst>(V)) { 2496 assert(SI->isSimple() && SI->getOperand(0) == OldPN); 2497 Builder.SetInsertPoint(SI); 2498 auto *NewBC = 2499 cast<BitCastInst>(Builder.CreateBitCast(NewPN, SrcTy)); 2500 SI->setOperand(0, NewBC); 2501 Worklist.push(SI); 2502 assert(hasStoreUsersOnly(*NewBC)); 2503 } 2504 else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2505 Type *TyB = BCI->getOperand(0)->getType(); 2506 Type *TyA = BCI->getType(); 2507 assert(TyA == DestTy && TyB == SrcTy); 2508 (void) TyA; 2509 (void) TyB; 2510 Instruction *I = replaceInstUsesWith(*BCI, NewPN); 2511 if (BCI == &CI) 2512 RetVal = I; 2513 } else if (auto *PHI = dyn_cast<PHINode>(V)) { 2514 assert(OldPhiNodes.count(PHI) > 0); 2515 (void) PHI; 2516 } else { 2517 llvm_unreachable("all uses should be handled"); 2518 } 2519 } 2520 } 2521 2522 return RetVal; 2523 } 2524 2525 Instruction *InstCombinerImpl::visitBitCast(BitCastInst &CI) { 2526 // If the operands are integer typed then apply the integer transforms, 2527 // otherwise just apply the common ones. 2528 Value *Src = CI.getOperand(0); 2529 Type *SrcTy = Src->getType(); 2530 Type *DestTy = CI.getType(); 2531 2532 // Get rid of casts from one type to the same type. These are useless and can 2533 // be replaced by the operand. 2534 if (DestTy == Src->getType()) 2535 return replaceInstUsesWith(CI, Src); 2536 2537 if (isa<PointerType>(SrcTy) && isa<PointerType>(DestTy)) { 2538 PointerType *SrcPTy = cast<PointerType>(SrcTy); 2539 PointerType *DstPTy = cast<PointerType>(DestTy); 2540 Type *DstElTy = DstPTy->getElementType(); 2541 Type *SrcElTy = SrcPTy->getElementType(); 2542 2543 // Casting pointers between the same type, but with different address spaces 2544 // is an addrspace cast rather than a bitcast. 2545 if ((DstElTy == SrcElTy) && 2546 (DstPTy->getAddressSpace() != SrcPTy->getAddressSpace())) 2547 return new AddrSpaceCastInst(Src, DestTy); 2548 2549 // If we are casting a alloca to a pointer to a type of the same 2550 // size, rewrite the allocation instruction to allocate the "right" type. 2551 // There is no need to modify malloc calls because it is their bitcast that 2552 // needs to be cleaned up. 2553 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src)) 2554 if (Instruction *V = PromoteCastOfAllocation(CI, *AI)) 2555 return V; 2556 2557 // When the type pointed to is not sized the cast cannot be 2558 // turned into a gep. 2559 Type *PointeeType = 2560 cast<PointerType>(Src->getType()->getScalarType())->getElementType(); 2561 if (!PointeeType->isSized()) 2562 return nullptr; 2563 2564 // If the source and destination are pointers, and this cast is equivalent 2565 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep. 2566 // This can enhance SROA and other transforms that want type-safe pointers. 2567 unsigned NumZeros = 0; 2568 while (SrcElTy && SrcElTy != DstElTy) { 2569 SrcElTy = GetElementPtrInst::getTypeAtIndex(SrcElTy, (uint64_t)0); 2570 ++NumZeros; 2571 } 2572 2573 // If we found a path from the src to dest, create the getelementptr now. 2574 if (SrcElTy == DstElTy) { 2575 SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0)); 2576 GetElementPtrInst *GEP = 2577 GetElementPtrInst::Create(SrcPTy->getElementType(), Src, Idxs); 2578 2579 // If the source pointer is dereferenceable, then assume it points to an 2580 // allocated object and apply "inbounds" to the GEP. 2581 bool CanBeNull; 2582 if (Src->getPointerDereferenceableBytes(DL, CanBeNull)) { 2583 // In a non-default address space (not 0), a null pointer can not be 2584 // assumed inbounds, so ignore that case (dereferenceable_or_null). 2585 // The reason is that 'null' is not treated differently in these address 2586 // spaces, and we consequently ignore the 'gep inbounds' special case 2587 // for 'null' which allows 'inbounds' on 'null' if the indices are 2588 // zeros. 2589 if (SrcPTy->getAddressSpace() == 0 || !CanBeNull) 2590 GEP->setIsInBounds(); 2591 } 2592 return GEP; 2593 } 2594 } 2595 2596 if (FixedVectorType *DestVTy = dyn_cast<FixedVectorType>(DestTy)) { 2597 // Beware: messing with this target-specific oddity may cause trouble. 2598 if (DestVTy->getNumElements() == 1 && SrcTy->isX86_MMXTy()) { 2599 Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType()); 2600 return InsertElementInst::Create(UndefValue::get(DestTy), Elem, 2601 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2602 } 2603 2604 if (isa<IntegerType>(SrcTy)) { 2605 // If this is a cast from an integer to vector, check to see if the input 2606 // is a trunc or zext of a bitcast from vector. If so, we can replace all 2607 // the casts with a shuffle and (potentially) a bitcast. 2608 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) { 2609 CastInst *SrcCast = cast<CastInst>(Src); 2610 if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0))) 2611 if (isa<VectorType>(BCIn->getOperand(0)->getType())) 2612 if (Instruction *I = optimizeVectorResizeWithIntegerBitCasts( 2613 BCIn->getOperand(0), cast<VectorType>(DestTy), *this)) 2614 return I; 2615 } 2616 2617 // If the input is an 'or' instruction, we may be doing shifts and ors to 2618 // assemble the elements of the vector manually. Try to rip the code out 2619 // and replace it with insertelements. 2620 if (Value *V = optimizeIntegerToVectorInsertions(CI, *this)) 2621 return replaceInstUsesWith(CI, V); 2622 } 2623 } 2624 2625 if (FixedVectorType *SrcVTy = dyn_cast<FixedVectorType>(SrcTy)) { 2626 if (SrcVTy->getNumElements() == 1) { 2627 // If our destination is not a vector, then make this a straight 2628 // scalar-scalar cast. 2629 if (!DestTy->isVectorTy()) { 2630 Value *Elem = 2631 Builder.CreateExtractElement(Src, 2632 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2633 return CastInst::Create(Instruction::BitCast, Elem, DestTy); 2634 } 2635 2636 // Otherwise, see if our source is an insert. If so, then use the scalar 2637 // component directly: 2638 // bitcast (inselt <1 x elt> V, X, 0) to <n x m> --> bitcast X to <n x m> 2639 if (auto *InsElt = dyn_cast<InsertElementInst>(Src)) 2640 return new BitCastInst(InsElt->getOperand(1), DestTy); 2641 } 2642 } 2643 2644 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(Src)) { 2645 // Okay, we have (bitcast (shuffle ..)). Check to see if this is 2646 // a bitcast to a vector with the same # elts. 2647 Value *ShufOp0 = Shuf->getOperand(0); 2648 Value *ShufOp1 = Shuf->getOperand(1); 2649 unsigned NumShufElts = 2650 cast<FixedVectorType>(Shuf->getType())->getNumElements(); 2651 unsigned NumSrcVecElts = 2652 cast<FixedVectorType>(ShufOp0->getType())->getNumElements(); 2653 if (Shuf->hasOneUse() && DestTy->isVectorTy() && 2654 cast<FixedVectorType>(DestTy)->getNumElements() == NumShufElts && 2655 NumShufElts == NumSrcVecElts) { 2656 BitCastInst *Tmp; 2657 // If either of the operands is a cast from CI.getType(), then 2658 // evaluating the shuffle in the casted destination's type will allow 2659 // us to eliminate at least one cast. 2660 if (((Tmp = dyn_cast<BitCastInst>(ShufOp0)) && 2661 Tmp->getOperand(0)->getType() == DestTy) || 2662 ((Tmp = dyn_cast<BitCastInst>(ShufOp1)) && 2663 Tmp->getOperand(0)->getType() == DestTy)) { 2664 Value *LHS = Builder.CreateBitCast(ShufOp0, DestTy); 2665 Value *RHS = Builder.CreateBitCast(ShufOp1, DestTy); 2666 // Return a new shuffle vector. Use the same element ID's, as we 2667 // know the vector types match #elts. 2668 return new ShuffleVectorInst(LHS, RHS, Shuf->getShuffleMask()); 2669 } 2670 } 2671 2672 // A bitcasted-to-scalar and byte-reversing shuffle is better recognized as 2673 // a byte-swap: 2674 // bitcast <N x i8> (shuf X, undef, <N, N-1,...0>) --> bswap (bitcast X) 2675 // TODO: We should match the related pattern for bitreverse. 2676 if (DestTy->isIntegerTy() && 2677 DL.isLegalInteger(DestTy->getScalarSizeInBits()) && 2678 SrcTy->getScalarSizeInBits() == 8 && NumShufElts % 2 == 0 && 2679 Shuf->hasOneUse() && Shuf->isReverse()) { 2680 assert(ShufOp0->getType() == SrcTy && "Unexpected shuffle mask"); 2681 assert(isa<UndefValue>(ShufOp1) && "Unexpected shuffle op"); 2682 Function *Bswap = 2683 Intrinsic::getDeclaration(CI.getModule(), Intrinsic::bswap, DestTy); 2684 Value *ScalarX = Builder.CreateBitCast(ShufOp0, DestTy); 2685 return IntrinsicInst::Create(Bswap, { ScalarX }); 2686 } 2687 } 2688 2689 // Handle the A->B->A cast, and there is an intervening PHI node. 2690 if (PHINode *PN = dyn_cast<PHINode>(Src)) 2691 if (Instruction *I = optimizeBitCastFromPhi(CI, PN)) 2692 return I; 2693 2694 if (Instruction *I = canonicalizeBitCastExtElt(CI, *this)) 2695 return I; 2696 2697 if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder)) 2698 return I; 2699 2700 if (Instruction *I = foldBitCastSelect(CI, Builder)) 2701 return I; 2702 2703 if (SrcTy->isPointerTy()) 2704 return commonPointerCastTransforms(CI); 2705 return commonCastTransforms(CI); 2706 } 2707 2708 Instruction *InstCombinerImpl::visitAddrSpaceCast(AddrSpaceCastInst &CI) { 2709 // If the destination pointer element type is not the same as the source's 2710 // first do a bitcast to the destination type, and then the addrspacecast. 2711 // This allows the cast to be exposed to other transforms. 2712 Value *Src = CI.getOperand(0); 2713 PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType()); 2714 PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType()); 2715 2716 Type *DestElemTy = DestTy->getElementType(); 2717 if (SrcTy->getElementType() != DestElemTy) { 2718 Type *MidTy = PointerType::get(DestElemTy, SrcTy->getAddressSpace()); 2719 if (VectorType *VT = dyn_cast<VectorType>(CI.getType())) { 2720 // Handle vectors of pointers. 2721 // FIXME: what should happen for scalable vectors? 2722 MidTy = FixedVectorType::get(MidTy, 2723 cast<FixedVectorType>(VT)->getNumElements()); 2724 } 2725 2726 Value *NewBitCast = Builder.CreateBitCast(Src, MidTy); 2727 return new AddrSpaceCastInst(NewBitCast, CI.getType()); 2728 } 2729 2730 return commonPointerCastTransforms(CI); 2731 } 2732