1 //===- InstCombineCasts.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visit functions for cast operations. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/ADT/SetVector.h" 15 #include "llvm/Analysis/ConstantFolding.h" 16 #include "llvm/Analysis/TargetLibraryInfo.h" 17 #include "llvm/IR/DIBuilder.h" 18 #include "llvm/IR/DataLayout.h" 19 #include "llvm/IR/PatternMatch.h" 20 #include "llvm/Support/KnownBits.h" 21 #include "llvm/Transforms/InstCombine/InstCombiner.h" 22 #include <numeric> 23 using namespace llvm; 24 using namespace PatternMatch; 25 26 #define DEBUG_TYPE "instcombine" 27 28 /// Analyze 'Val', seeing if it is a simple linear expression. 29 /// If so, decompose it, returning some value X, such that Val is 30 /// X*Scale+Offset. 31 /// 32 static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale, 33 uint64_t &Offset) { 34 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { 35 Offset = CI->getZExtValue(); 36 Scale = 0; 37 return ConstantInt::get(Val->getType(), 0); 38 } 39 40 if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) { 41 // Cannot look past anything that might overflow. 42 OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val); 43 if (OBI && !OBI->hasNoUnsignedWrap() && !OBI->hasNoSignedWrap()) { 44 Scale = 1; 45 Offset = 0; 46 return Val; 47 } 48 49 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) { 50 if (I->getOpcode() == Instruction::Shl) { 51 // This is a value scaled by '1 << the shift amt'. 52 Scale = UINT64_C(1) << RHS->getZExtValue(); 53 Offset = 0; 54 return I->getOperand(0); 55 } 56 57 if (I->getOpcode() == Instruction::Mul) { 58 // This value is scaled by 'RHS'. 59 Scale = RHS->getZExtValue(); 60 Offset = 0; 61 return I->getOperand(0); 62 } 63 64 if (I->getOpcode() == Instruction::Add) { 65 // We have X+C. Check to see if we really have (X*C2)+C1, 66 // where C1 is divisible by C2. 67 unsigned SubScale; 68 Value *SubVal = 69 decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset); 70 Offset += RHS->getZExtValue(); 71 Scale = SubScale; 72 return SubVal; 73 } 74 } 75 } 76 77 // Otherwise, we can't look past this. 78 Scale = 1; 79 Offset = 0; 80 return Val; 81 } 82 83 /// If we find a cast of an allocation instruction, try to eliminate the cast by 84 /// moving the type information into the alloc. 85 Instruction *InstCombinerImpl::PromoteCastOfAllocation(BitCastInst &CI, 86 AllocaInst &AI) { 87 PointerType *PTy = cast<PointerType>(CI.getType()); 88 89 IRBuilderBase::InsertPointGuard Guard(Builder); 90 Builder.SetInsertPoint(&AI); 91 92 // Get the type really allocated and the type casted to. 93 Type *AllocElTy = AI.getAllocatedType(); 94 Type *CastElTy = PTy->getElementType(); 95 if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr; 96 97 // This optimisation does not work for cases where the cast type 98 // is scalable and the allocated type is not. This because we need to 99 // know how many times the casted type fits into the allocated type. 100 // For the opposite case where the allocated type is scalable and the 101 // cast type is not this leads to poor code quality due to the 102 // introduction of 'vscale' into the calculations. It seems better to 103 // bail out for this case too until we've done a proper cost-benefit 104 // analysis. 105 bool AllocIsScalable = isa<ScalableVectorType>(AllocElTy); 106 bool CastIsScalable = isa<ScalableVectorType>(CastElTy); 107 if (AllocIsScalable != CastIsScalable) return nullptr; 108 109 Align AllocElTyAlign = DL.getABITypeAlign(AllocElTy); 110 Align CastElTyAlign = DL.getABITypeAlign(CastElTy); 111 if (CastElTyAlign < AllocElTyAlign) return nullptr; 112 113 // If the allocation has multiple uses, only promote it if we are strictly 114 // increasing the alignment of the resultant allocation. If we keep it the 115 // same, we open the door to infinite loops of various kinds. 116 if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr; 117 118 // The alloc and cast types should be either both fixed or both scalable. 119 uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy).getKnownMinSize(); 120 uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy).getKnownMinSize(); 121 if (CastElTySize == 0 || AllocElTySize == 0) return nullptr; 122 123 // If the allocation has multiple uses, only promote it if we're not 124 // shrinking the amount of memory being allocated. 125 uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy).getKnownMinSize(); 126 uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy).getKnownMinSize(); 127 if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr; 128 129 // See if we can satisfy the modulus by pulling a scale out of the array 130 // size argument. 131 unsigned ArraySizeScale; 132 uint64_t ArrayOffset; 133 Value *NumElements = // See if the array size is a decomposable linear expr. 134 decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset); 135 136 // If we can now satisfy the modulus, by using a non-1 scale, we really can 137 // do the xform. 138 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 || 139 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return nullptr; 140 141 // We don't currently support arrays of scalable types. 142 assert(!AllocIsScalable || (ArrayOffset == 1 && ArraySizeScale == 0)); 143 144 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize; 145 Value *Amt = nullptr; 146 if (Scale == 1) { 147 Amt = NumElements; 148 } else { 149 Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale); 150 // Insert before the alloca, not before the cast. 151 Amt = Builder.CreateMul(Amt, NumElements); 152 } 153 154 if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) { 155 Value *Off = ConstantInt::get(AI.getArraySize()->getType(), 156 Offset, true); 157 Amt = Builder.CreateAdd(Amt, Off); 158 } 159 160 AllocaInst *New = Builder.CreateAlloca(CastElTy, Amt); 161 New->setAlignment(AI.getAlign()); 162 New->takeName(&AI); 163 New->setUsedWithInAlloca(AI.isUsedWithInAlloca()); 164 165 // If the allocation has multiple real uses, insert a cast and change all 166 // things that used it to use the new cast. This will also hack on CI, but it 167 // will die soon. 168 if (!AI.hasOneUse()) { 169 // New is the allocation instruction, pointer typed. AI is the original 170 // allocation instruction, also pointer typed. Thus, cast to use is BitCast. 171 Value *NewCast = Builder.CreateBitCast(New, AI.getType(), "tmpcast"); 172 replaceInstUsesWith(AI, NewCast); 173 eraseInstFromFunction(AI); 174 } 175 return replaceInstUsesWith(CI, New); 176 } 177 178 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns 179 /// true for, actually insert the code to evaluate the expression. 180 Value *InstCombinerImpl::EvaluateInDifferentType(Value *V, Type *Ty, 181 bool isSigned) { 182 if (Constant *C = dyn_cast<Constant>(V)) { 183 C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/); 184 // If we got a constantexpr back, try to simplify it with DL info. 185 return ConstantFoldConstant(C, DL, &TLI); 186 } 187 188 // Otherwise, it must be an instruction. 189 Instruction *I = cast<Instruction>(V); 190 Instruction *Res = nullptr; 191 unsigned Opc = I->getOpcode(); 192 switch (Opc) { 193 case Instruction::Add: 194 case Instruction::Sub: 195 case Instruction::Mul: 196 case Instruction::And: 197 case Instruction::Or: 198 case Instruction::Xor: 199 case Instruction::AShr: 200 case Instruction::LShr: 201 case Instruction::Shl: 202 case Instruction::UDiv: 203 case Instruction::URem: { 204 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned); 205 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 206 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS); 207 break; 208 } 209 case Instruction::Trunc: 210 case Instruction::ZExt: 211 case Instruction::SExt: 212 // If the source type of the cast is the type we're trying for then we can 213 // just return the source. There's no need to insert it because it is not 214 // new. 215 if (I->getOperand(0)->getType() == Ty) 216 return I->getOperand(0); 217 218 // Otherwise, must be the same type of cast, so just reinsert a new one. 219 // This also handles the case of zext(trunc(x)) -> zext(x). 220 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty, 221 Opc == Instruction::SExt); 222 break; 223 case Instruction::Select: { 224 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned); 225 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned); 226 Res = SelectInst::Create(I->getOperand(0), True, False); 227 break; 228 } 229 case Instruction::PHI: { 230 PHINode *OPN = cast<PHINode>(I); 231 PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues()); 232 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) { 233 Value *V = 234 EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned); 235 NPN->addIncoming(V, OPN->getIncomingBlock(i)); 236 } 237 Res = NPN; 238 break; 239 } 240 default: 241 // TODO: Can handle more cases here. 242 llvm_unreachable("Unreachable!"); 243 } 244 245 Res->takeName(I); 246 return InsertNewInstWith(Res, *I); 247 } 248 249 Instruction::CastOps 250 InstCombinerImpl::isEliminableCastPair(const CastInst *CI1, 251 const CastInst *CI2) { 252 Type *SrcTy = CI1->getSrcTy(); 253 Type *MidTy = CI1->getDestTy(); 254 Type *DstTy = CI2->getDestTy(); 255 256 Instruction::CastOps firstOp = CI1->getOpcode(); 257 Instruction::CastOps secondOp = CI2->getOpcode(); 258 Type *SrcIntPtrTy = 259 SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr; 260 Type *MidIntPtrTy = 261 MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr; 262 Type *DstIntPtrTy = 263 DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr; 264 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, 265 DstTy, SrcIntPtrTy, MidIntPtrTy, 266 DstIntPtrTy); 267 268 // We don't want to form an inttoptr or ptrtoint that converts to an integer 269 // type that differs from the pointer size. 270 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) || 271 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy)) 272 Res = 0; 273 274 return Instruction::CastOps(Res); 275 } 276 277 /// Implement the transforms common to all CastInst visitors. 278 Instruction *InstCombinerImpl::commonCastTransforms(CastInst &CI) { 279 Value *Src = CI.getOperand(0); 280 281 // Try to eliminate a cast of a cast. 282 if (auto *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast 283 if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) { 284 // The first cast (CSrc) is eliminable so we need to fix up or replace 285 // the second cast (CI). CSrc will then have a good chance of being dead. 286 auto *Ty = CI.getType(); 287 auto *Res = CastInst::Create(NewOpc, CSrc->getOperand(0), Ty); 288 // Point debug users of the dying cast to the new one. 289 if (CSrc->hasOneUse()) 290 replaceAllDbgUsesWith(*CSrc, *Res, CI, DT); 291 return Res; 292 } 293 } 294 295 if (auto *Sel = dyn_cast<SelectInst>(Src)) { 296 // We are casting a select. Try to fold the cast into the select if the 297 // select does not have a compare instruction with matching operand types 298 // or the select is likely better done in a narrow type. 299 // Creating a select with operands that are different sizes than its 300 // condition may inhibit other folds and lead to worse codegen. 301 auto *Cmp = dyn_cast<CmpInst>(Sel->getCondition()); 302 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType() || 303 (CI.getOpcode() == Instruction::Trunc && 304 shouldChangeType(CI.getSrcTy(), CI.getType()))) { 305 if (Instruction *NV = FoldOpIntoSelect(CI, Sel)) { 306 replaceAllDbgUsesWith(*Sel, *NV, CI, DT); 307 return NV; 308 } 309 } 310 } 311 312 // If we are casting a PHI, then fold the cast into the PHI. 313 if (auto *PN = dyn_cast<PHINode>(Src)) { 314 // Don't do this if it would create a PHI node with an illegal type from a 315 // legal type. 316 if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() || 317 shouldChangeType(CI.getSrcTy(), CI.getType())) 318 if (Instruction *NV = foldOpIntoPhi(CI, PN)) 319 return NV; 320 } 321 322 return nullptr; 323 } 324 325 /// Constants and extensions/truncates from the destination type are always 326 /// free to be evaluated in that type. This is a helper for canEvaluate*. 327 static bool canAlwaysEvaluateInType(Value *V, Type *Ty) { 328 if (isa<Constant>(V)) 329 return true; 330 Value *X; 331 if ((match(V, m_ZExtOrSExt(m_Value(X))) || match(V, m_Trunc(m_Value(X)))) && 332 X->getType() == Ty) 333 return true; 334 335 return false; 336 } 337 338 /// Filter out values that we can not evaluate in the destination type for free. 339 /// This is a helper for canEvaluate*. 340 static bool canNotEvaluateInType(Value *V, Type *Ty) { 341 assert(!isa<Constant>(V) && "Constant should already be handled."); 342 if (!isa<Instruction>(V)) 343 return true; 344 // We don't extend or shrink something that has multiple uses -- doing so 345 // would require duplicating the instruction which isn't profitable. 346 if (!V->hasOneUse()) 347 return true; 348 349 return false; 350 } 351 352 /// Return true if we can evaluate the specified expression tree as type Ty 353 /// instead of its larger type, and arrive with the same value. 354 /// This is used by code that tries to eliminate truncates. 355 /// 356 /// Ty will always be a type smaller than V. We should return true if trunc(V) 357 /// can be computed by computing V in the smaller type. If V is an instruction, 358 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only 359 /// makes sense if x and y can be efficiently truncated. 360 /// 361 /// This function works on both vectors and scalars. 362 /// 363 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC, 364 Instruction *CxtI) { 365 if (canAlwaysEvaluateInType(V, Ty)) 366 return true; 367 if (canNotEvaluateInType(V, Ty)) 368 return false; 369 370 auto *I = cast<Instruction>(V); 371 Type *OrigTy = V->getType(); 372 switch (I->getOpcode()) { 373 case Instruction::Add: 374 case Instruction::Sub: 375 case Instruction::Mul: 376 case Instruction::And: 377 case Instruction::Or: 378 case Instruction::Xor: 379 // These operators can all arbitrarily be extended or truncated. 380 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 381 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 382 383 case Instruction::UDiv: 384 case Instruction::URem: { 385 // UDiv and URem can be truncated if all the truncated bits are zero. 386 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 387 uint32_t BitWidth = Ty->getScalarSizeInBits(); 388 assert(BitWidth < OrigBitWidth && "Unexpected bitwidths!"); 389 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth); 390 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) && 391 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) { 392 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 393 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 394 } 395 break; 396 } 397 case Instruction::Shl: { 398 // If we are truncating the result of this SHL, and if it's a shift of an 399 // inrange amount, we can always perform a SHL in a smaller type. 400 uint32_t BitWidth = Ty->getScalarSizeInBits(); 401 KnownBits AmtKnownBits = 402 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 403 if (AmtKnownBits.getMaxValue().ult(BitWidth)) 404 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 405 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 406 break; 407 } 408 case Instruction::LShr: { 409 // If this is a truncate of a logical shr, we can truncate it to a smaller 410 // lshr iff we know that the bits we would otherwise be shifting in are 411 // already zeros. 412 // TODO: It is enough to check that the bits we would be shifting in are 413 // zero - use AmtKnownBits.getMaxValue(). 414 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 415 uint32_t BitWidth = Ty->getScalarSizeInBits(); 416 KnownBits AmtKnownBits = 417 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 418 APInt ShiftedBits = APInt::getBitsSetFrom(OrigBitWidth, BitWidth); 419 if (AmtKnownBits.getMaxValue().ult(BitWidth) && 420 IC.MaskedValueIsZero(I->getOperand(0), ShiftedBits, 0, CxtI)) { 421 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 422 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 423 } 424 break; 425 } 426 case Instruction::AShr: { 427 // If this is a truncate of an arithmetic shr, we can truncate it to a 428 // smaller ashr iff we know that all the bits from the sign bit of the 429 // original type and the sign bit of the truncate type are similar. 430 // TODO: It is enough to check that the bits we would be shifting in are 431 // similar to sign bit of the truncate type. 432 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits(); 433 uint32_t BitWidth = Ty->getScalarSizeInBits(); 434 KnownBits AmtKnownBits = 435 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout()); 436 unsigned ShiftedBits = OrigBitWidth - BitWidth; 437 if (AmtKnownBits.getMaxValue().ult(BitWidth) && 438 ShiftedBits < IC.ComputeNumSignBits(I->getOperand(0), 0, CxtI)) 439 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) && 440 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI); 441 break; 442 } 443 case Instruction::Trunc: 444 // trunc(trunc(x)) -> trunc(x) 445 return true; 446 case Instruction::ZExt: 447 case Instruction::SExt: 448 // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest 449 // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest 450 return true; 451 case Instruction::Select: { 452 SelectInst *SI = cast<SelectInst>(I); 453 return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) && 454 canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI); 455 } 456 case Instruction::PHI: { 457 // We can change a phi if we can change all operands. Note that we never 458 // get into trouble with cyclic PHIs here because we only consider 459 // instructions with a single use. 460 PHINode *PN = cast<PHINode>(I); 461 for (Value *IncValue : PN->incoming_values()) 462 if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI)) 463 return false; 464 return true; 465 } 466 default: 467 // TODO: Can handle more cases here. 468 break; 469 } 470 471 return false; 472 } 473 474 /// Given a vector that is bitcast to an integer, optionally logically 475 /// right-shifted, and truncated, convert it to an extractelement. 476 /// Example (big endian): 477 /// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32 478 /// ---> 479 /// extractelement <4 x i32> %X, 1 480 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, 481 InstCombinerImpl &IC) { 482 Value *TruncOp = Trunc.getOperand(0); 483 Type *DestType = Trunc.getType(); 484 if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType)) 485 return nullptr; 486 487 Value *VecInput = nullptr; 488 ConstantInt *ShiftVal = nullptr; 489 if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)), 490 m_LShr(m_BitCast(m_Value(VecInput)), 491 m_ConstantInt(ShiftVal)))) || 492 !isa<VectorType>(VecInput->getType())) 493 return nullptr; 494 495 VectorType *VecType = cast<VectorType>(VecInput->getType()); 496 unsigned VecWidth = VecType->getPrimitiveSizeInBits(); 497 unsigned DestWidth = DestType->getPrimitiveSizeInBits(); 498 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0; 499 500 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0)) 501 return nullptr; 502 503 // If the element type of the vector doesn't match the result type, 504 // bitcast it to a vector type that we can extract from. 505 unsigned NumVecElts = VecWidth / DestWidth; 506 if (VecType->getElementType() != DestType) { 507 VecType = FixedVectorType::get(DestType, NumVecElts); 508 VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc"); 509 } 510 511 unsigned Elt = ShiftAmount / DestWidth; 512 if (IC.getDataLayout().isBigEndian()) 513 Elt = NumVecElts - 1 - Elt; 514 515 return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt)); 516 } 517 518 /// Funnel/Rotate left/right may occur in a wider type than necessary because of 519 /// type promotion rules. Try to narrow the inputs and convert to funnel shift. 520 Instruction *InstCombinerImpl::narrowFunnelShift(TruncInst &Trunc) { 521 assert((isa<VectorType>(Trunc.getSrcTy()) || 522 shouldChangeType(Trunc.getSrcTy(), Trunc.getType())) && 523 "Don't narrow to an illegal scalar type"); 524 525 // Bail out on strange types. It is possible to handle some of these patterns 526 // even with non-power-of-2 sizes, but it is not a likely scenario. 527 Type *DestTy = Trunc.getType(); 528 unsigned NarrowWidth = DestTy->getScalarSizeInBits(); 529 if (!isPowerOf2_32(NarrowWidth)) 530 return nullptr; 531 532 // First, find an or'd pair of opposite shifts: 533 // trunc (or (lshr ShVal0, ShAmt0), (shl ShVal1, ShAmt1)) 534 BinaryOperator *Or0, *Or1; 535 if (!match(Trunc.getOperand(0), m_OneUse(m_Or(m_BinOp(Or0), m_BinOp(Or1))))) 536 return nullptr; 537 538 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1; 539 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal0), m_Value(ShAmt0)))) || 540 !match(Or1, m_OneUse(m_LogicalShift(m_Value(ShVal1), m_Value(ShAmt1)))) || 541 Or0->getOpcode() == Or1->getOpcode()) 542 return nullptr; 543 544 // Canonicalize to or(shl(ShVal0, ShAmt0), lshr(ShVal1, ShAmt1)). 545 if (Or0->getOpcode() == BinaryOperator::LShr) { 546 std::swap(Or0, Or1); 547 std::swap(ShVal0, ShVal1); 548 std::swap(ShAmt0, ShAmt1); 549 } 550 assert(Or0->getOpcode() == BinaryOperator::Shl && 551 Or1->getOpcode() == BinaryOperator::LShr && 552 "Illegal or(shift,shift) pair"); 553 554 // Match the shift amount operands for a funnel/rotate pattern. This always 555 // matches a subtraction on the R operand. 556 auto matchShiftAmount = [](Value *L, Value *R, unsigned Width) -> Value * { 557 // The shift amounts may add up to the narrow bit width: 558 // (shl ShVal0, L) | (lshr ShVal1, Width - L) 559 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) 560 return L; 561 562 // The shift amount may be masked with negation: 563 // (shl ShVal0, (X & (Width - 1))) | (lshr ShVal1, ((-X) & (Width - 1))) 564 Value *X; 565 unsigned Mask = Width - 1; 566 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) && 567 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))) 568 return X; 569 570 // Same as above, but the shift amount may be extended after masking: 571 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) && 572 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))) 573 return X; 574 575 return nullptr; 576 }; 577 578 // TODO: Add support for funnel shifts (ShVal0 != ShVal1). 579 if (ShVal0 != ShVal1) 580 return nullptr; 581 Value *ShVal = ShVal0; 582 583 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth); 584 bool IsFshl = true; // Sub on LSHR. 585 if (!ShAmt) { 586 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth); 587 IsFshl = false; // Sub on SHL. 588 } 589 if (!ShAmt) 590 return nullptr; 591 592 // The shifted value must have high zeros in the wide type. Typically, this 593 // will be a zext, but it could also be the result of an 'and' or 'shift'. 594 unsigned WideWidth = Trunc.getSrcTy()->getScalarSizeInBits(); 595 APInt HiBitMask = APInt::getHighBitsSet(WideWidth, WideWidth - NarrowWidth); 596 if (!MaskedValueIsZero(ShVal, HiBitMask, 0, &Trunc)) 597 return nullptr; 598 599 // We have an unnecessarily wide rotate! 600 // trunc (or (lshr ShVal, ShAmt), (shl ShVal, BitWidth - ShAmt)) 601 // Narrow the inputs and convert to funnel shift intrinsic: 602 // llvm.fshl.i8(trunc(ShVal), trunc(ShVal), trunc(ShAmt)) 603 Value *NarrowShAmt = Builder.CreateTrunc(ShAmt, DestTy); 604 Value *X = Builder.CreateTrunc(ShVal, DestTy); 605 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr; 606 Function *F = Intrinsic::getDeclaration(Trunc.getModule(), IID, DestTy); 607 return IntrinsicInst::Create(F, { X, X, NarrowShAmt }); 608 } 609 610 /// Try to narrow the width of math or bitwise logic instructions by pulling a 611 /// truncate ahead of binary operators. 612 /// TODO: Transforms for truncated shifts should be moved into here. 613 Instruction *InstCombinerImpl::narrowBinOp(TruncInst &Trunc) { 614 Type *SrcTy = Trunc.getSrcTy(); 615 Type *DestTy = Trunc.getType(); 616 if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy)) 617 return nullptr; 618 619 BinaryOperator *BinOp; 620 if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(BinOp)))) 621 return nullptr; 622 623 Value *BinOp0 = BinOp->getOperand(0); 624 Value *BinOp1 = BinOp->getOperand(1); 625 switch (BinOp->getOpcode()) { 626 case Instruction::And: 627 case Instruction::Or: 628 case Instruction::Xor: 629 case Instruction::Add: 630 case Instruction::Sub: 631 case Instruction::Mul: { 632 Constant *C; 633 if (match(BinOp0, m_Constant(C))) { 634 // trunc (binop C, X) --> binop (trunc C', X) 635 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 636 Value *TruncX = Builder.CreateTrunc(BinOp1, DestTy); 637 return BinaryOperator::Create(BinOp->getOpcode(), NarrowC, TruncX); 638 } 639 if (match(BinOp1, m_Constant(C))) { 640 // trunc (binop X, C) --> binop (trunc X, C') 641 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); 642 Value *TruncX = Builder.CreateTrunc(BinOp0, DestTy); 643 return BinaryOperator::Create(BinOp->getOpcode(), TruncX, NarrowC); 644 } 645 Value *X; 646 if (match(BinOp0, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) { 647 // trunc (binop (ext X), Y) --> binop X, (trunc Y) 648 Value *NarrowOp1 = Builder.CreateTrunc(BinOp1, DestTy); 649 return BinaryOperator::Create(BinOp->getOpcode(), X, NarrowOp1); 650 } 651 if (match(BinOp1, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) { 652 // trunc (binop Y, (ext X)) --> binop (trunc Y), X 653 Value *NarrowOp0 = Builder.CreateTrunc(BinOp0, DestTy); 654 return BinaryOperator::Create(BinOp->getOpcode(), NarrowOp0, X); 655 } 656 break; 657 } 658 659 default: break; 660 } 661 662 if (Instruction *NarrowOr = narrowFunnelShift(Trunc)) 663 return NarrowOr; 664 665 return nullptr; 666 } 667 668 /// Try to narrow the width of a splat shuffle. This could be generalized to any 669 /// shuffle with a constant operand, but we limit the transform to avoid 670 /// creating a shuffle type that targets may not be able to lower effectively. 671 static Instruction *shrinkSplatShuffle(TruncInst &Trunc, 672 InstCombiner::BuilderTy &Builder) { 673 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0)); 674 if (Shuf && Shuf->hasOneUse() && isa<UndefValue>(Shuf->getOperand(1)) && 675 is_splat(Shuf->getShuffleMask()) && 676 Shuf->getType() == Shuf->getOperand(0)->getType()) { 677 // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Undef, SplatMask 678 Constant *NarrowUndef = UndefValue::get(Trunc.getType()); 679 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType()); 680 return new ShuffleVectorInst(NarrowOp, NarrowUndef, Shuf->getShuffleMask()); 681 } 682 683 return nullptr; 684 } 685 686 /// Try to narrow the width of an insert element. This could be generalized for 687 /// any vector constant, but we limit the transform to insertion into undef to 688 /// avoid potential backend problems from unsupported insertion widths. This 689 /// could also be extended to handle the case of inserting a scalar constant 690 /// into a vector variable. 691 static Instruction *shrinkInsertElt(CastInst &Trunc, 692 InstCombiner::BuilderTy &Builder) { 693 Instruction::CastOps Opcode = Trunc.getOpcode(); 694 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) && 695 "Unexpected instruction for shrinking"); 696 697 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0)); 698 if (!InsElt || !InsElt->hasOneUse()) 699 return nullptr; 700 701 Type *DestTy = Trunc.getType(); 702 Type *DestScalarTy = DestTy->getScalarType(); 703 Value *VecOp = InsElt->getOperand(0); 704 Value *ScalarOp = InsElt->getOperand(1); 705 Value *Index = InsElt->getOperand(2); 706 707 if (isa<UndefValue>(VecOp)) { 708 // trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index 709 // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index 710 UndefValue *NarrowUndef = UndefValue::get(DestTy); 711 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy); 712 return InsertElementInst::Create(NarrowUndef, NarrowOp, Index); 713 } 714 715 return nullptr; 716 } 717 718 Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) { 719 if (Instruction *Result = commonCastTransforms(Trunc)) 720 return Result; 721 722 Value *Src = Trunc.getOperand(0); 723 Type *DestTy = Trunc.getType(), *SrcTy = Src->getType(); 724 unsigned DestWidth = DestTy->getScalarSizeInBits(); 725 unsigned SrcWidth = SrcTy->getScalarSizeInBits(); 726 727 // Attempt to truncate the entire input expression tree to the destination 728 // type. Only do this if the dest type is a simple type, don't convert the 729 // expression tree to something weird like i93 unless the source is also 730 // strange. 731 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) && 732 canEvaluateTruncated(Src, DestTy, *this, &Trunc)) { 733 734 // If this cast is a truncate, evaluting in a different type always 735 // eliminates the cast, so it is always a win. 736 LLVM_DEBUG( 737 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 738 " to avoid cast: " 739 << Trunc << '\n'); 740 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 741 assert(Res->getType() == DestTy); 742 return replaceInstUsesWith(Trunc, Res); 743 } 744 745 // For integer types, check if we can shorten the entire input expression to 746 // DestWidth * 2, which won't allow removing the truncate, but reducing the 747 // width may enable further optimizations, e.g. allowing for larger 748 // vectorization factors. 749 if (auto *DestITy = dyn_cast<IntegerType>(DestTy)) { 750 if (DestWidth * 2 < SrcWidth) { 751 auto *NewDestTy = DestITy->getExtendedType(); 752 if (shouldChangeType(SrcTy, NewDestTy) && 753 canEvaluateTruncated(Src, NewDestTy, *this, &Trunc)) { 754 LLVM_DEBUG( 755 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 756 " to reduce the width of operand of" 757 << Trunc << '\n'); 758 Value *Res = EvaluateInDifferentType(Src, NewDestTy, false); 759 return new TruncInst(Res, DestTy); 760 } 761 } 762 } 763 764 // Test if the trunc is the user of a select which is part of a 765 // minimum or maximum operation. If so, don't do any more simplification. 766 // Even simplifying demanded bits can break the canonical form of a 767 // min/max. 768 Value *LHS, *RHS; 769 if (SelectInst *Sel = dyn_cast<SelectInst>(Src)) 770 if (matchSelectPattern(Sel, LHS, RHS).Flavor != SPF_UNKNOWN) 771 return nullptr; 772 773 // See if we can simplify any instructions used by the input whose sole 774 // purpose is to compute bits we don't care about. 775 if (SimplifyDemandedInstructionBits(Trunc)) 776 return &Trunc; 777 778 if (DestWidth == 1) { 779 Value *Zero = Constant::getNullValue(SrcTy); 780 if (DestTy->isIntegerTy()) { 781 // Canonicalize trunc x to i1 -> icmp ne (and x, 1), 0 (scalar only). 782 // TODO: We canonicalize to more instructions here because we are probably 783 // lacking equivalent analysis for trunc relative to icmp. There may also 784 // be codegen concerns. If those trunc limitations were removed, we could 785 // remove this transform. 786 Value *And = Builder.CreateAnd(Src, ConstantInt::get(SrcTy, 1)); 787 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 788 } 789 790 // For vectors, we do not canonicalize all truncs to icmp, so optimize 791 // patterns that would be covered within visitICmpInst. 792 Value *X; 793 Constant *C; 794 if (match(Src, m_OneUse(m_LShr(m_Value(X), m_Constant(C))))) { 795 // trunc (lshr X, C) to i1 --> icmp ne (and X, C'), 0 796 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1)); 797 Constant *MaskC = ConstantExpr::getShl(One, C); 798 Value *And = Builder.CreateAnd(X, MaskC); 799 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 800 } 801 if (match(Src, m_OneUse(m_c_Or(m_LShr(m_Value(X), m_Constant(C)), 802 m_Deferred(X))))) { 803 // trunc (or (lshr X, C), X) to i1 --> icmp ne (and X, C'), 0 804 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1)); 805 Constant *MaskC = ConstantExpr::getShl(One, C); 806 MaskC = ConstantExpr::getOr(MaskC, One); 807 Value *And = Builder.CreateAnd(X, MaskC); 808 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero); 809 } 810 } 811 812 Value *A; 813 Constant *C; 814 if (match(Src, m_LShr(m_SExt(m_Value(A)), m_Constant(C)))) { 815 unsigned AWidth = A->getType()->getScalarSizeInBits(); 816 unsigned MaxShiftAmt = SrcWidth - std::max(DestWidth, AWidth); 817 auto *OldSh = cast<Instruction>(Src); 818 bool IsExact = OldSh->isExact(); 819 820 // If the shift is small enough, all zero bits created by the shift are 821 // removed by the trunc. 822 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE, 823 APInt(SrcWidth, MaxShiftAmt)))) { 824 // trunc (lshr (sext A), C) --> ashr A, C 825 if (A->getType() == DestTy) { 826 Constant *MaxAmt = ConstantInt::get(SrcTy, DestWidth - 1, false); 827 Constant *ShAmt = ConstantExpr::getUMin(C, MaxAmt); 828 ShAmt = ConstantExpr::getTrunc(ShAmt, A->getType()); 829 ShAmt = Constant::mergeUndefsWith(ShAmt, C); 830 return IsExact ? BinaryOperator::CreateExactAShr(A, ShAmt) 831 : BinaryOperator::CreateAShr(A, ShAmt); 832 } 833 // The types are mismatched, so create a cast after shifting: 834 // trunc (lshr (sext A), C) --> sext/trunc (ashr A, C) 835 if (Src->hasOneUse()) { 836 Constant *MaxAmt = ConstantInt::get(SrcTy, AWidth - 1, false); 837 Constant *ShAmt = ConstantExpr::getUMin(C, MaxAmt); 838 ShAmt = ConstantExpr::getTrunc(ShAmt, A->getType()); 839 Value *Shift = Builder.CreateAShr(A, ShAmt, "", IsExact); 840 return CastInst::CreateIntegerCast(Shift, DestTy, true); 841 } 842 } 843 // TODO: Mask high bits with 'and'. 844 } 845 846 // trunc (*shr (trunc A), C) --> trunc(*shr A, C) 847 if (match(Src, m_OneUse(m_Shr(m_Trunc(m_Value(A)), m_Constant(C))))) { 848 unsigned MaxShiftAmt = SrcWidth - DestWidth; 849 850 // If the shift is small enough, all zero/sign bits created by the shift are 851 // removed by the trunc. 852 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE, 853 APInt(SrcWidth, MaxShiftAmt)))) { 854 auto *OldShift = cast<Instruction>(Src); 855 bool IsExact = OldShift->isExact(); 856 auto *ShAmt = ConstantExpr::getIntegerCast(C, A->getType(), true); 857 ShAmt = Constant::mergeUndefsWith(ShAmt, C); 858 Value *Shift = 859 OldShift->getOpcode() == Instruction::AShr 860 ? Builder.CreateAShr(A, ShAmt, OldShift->getName(), IsExact) 861 : Builder.CreateLShr(A, ShAmt, OldShift->getName(), IsExact); 862 return CastInst::CreateTruncOrBitCast(Shift, DestTy); 863 } 864 } 865 866 if (Instruction *I = narrowBinOp(Trunc)) 867 return I; 868 869 if (Instruction *I = shrinkSplatShuffle(Trunc, Builder)) 870 return I; 871 872 if (Instruction *I = shrinkInsertElt(Trunc, Builder)) 873 return I; 874 875 if (Src->hasOneUse() && 876 (isa<VectorType>(SrcTy) || shouldChangeType(SrcTy, DestTy))) { 877 // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the 878 // dest type is native and cst < dest size. 879 if (match(Src, m_Shl(m_Value(A), m_Constant(C))) && 880 !match(A, m_Shr(m_Value(), m_Constant()))) { 881 // Skip shifts of shift by constants. It undoes a combine in 882 // FoldShiftByConstant and is the extend in reg pattern. 883 APInt Threshold = APInt(C->getType()->getScalarSizeInBits(), DestWidth); 884 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, Threshold))) { 885 Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr"); 886 return BinaryOperator::Create(Instruction::Shl, NewTrunc, 887 ConstantExpr::getTrunc(C, DestTy)); 888 } 889 } 890 } 891 892 if (Instruction *I = foldVecTruncToExtElt(Trunc, *this)) 893 return I; 894 895 // Whenever an element is extracted from a vector, and then truncated, 896 // canonicalize by converting it to a bitcast followed by an 897 // extractelement. 898 // 899 // Example (little endian): 900 // trunc (extractelement <4 x i64> %X, 0) to i32 901 // ---> 902 // extractelement <8 x i32> (bitcast <4 x i64> %X to <8 x i32>), i32 0 903 Value *VecOp; 904 ConstantInt *Cst; 905 if (match(Src, m_OneUse(m_ExtractElt(m_Value(VecOp), m_ConstantInt(Cst))))) { 906 auto *VecOpTy = cast<FixedVectorType>(VecOp->getType()); 907 unsigned VecNumElts = VecOpTy->getNumElements(); 908 909 // A badly fit destination size would result in an invalid cast. 910 if (SrcWidth % DestWidth == 0) { 911 uint64_t TruncRatio = SrcWidth / DestWidth; 912 uint64_t BitCastNumElts = VecNumElts * TruncRatio; 913 uint64_t VecOpIdx = Cst->getZExtValue(); 914 uint64_t NewIdx = DL.isBigEndian() ? (VecOpIdx + 1) * TruncRatio - 1 915 : VecOpIdx * TruncRatio; 916 assert(BitCastNumElts <= std::numeric_limits<uint32_t>::max() && 917 "overflow 32-bits"); 918 919 auto *BitCastTo = FixedVectorType::get(DestTy, BitCastNumElts); 920 Value *BitCast = Builder.CreateBitCast(VecOp, BitCastTo); 921 return ExtractElementInst::Create(BitCast, Builder.getInt32(NewIdx)); 922 } 923 } 924 925 return nullptr; 926 } 927 928 Instruction *InstCombinerImpl::transformZExtICmp(ICmpInst *Cmp, ZExtInst &Zext, 929 bool DoTransform) { 930 // If we are just checking for a icmp eq of a single bit and zext'ing it 931 // to an integer, then shift the bit to the appropriate place and then 932 // cast to integer to avoid the comparison. 933 const APInt *Op1CV; 934 if (match(Cmp->getOperand(1), m_APInt(Op1CV))) { 935 936 // zext (x <s 0) to i32 --> x>>u31 true if signbit set. 937 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear. 938 if ((Cmp->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isNullValue()) || 939 (Cmp->getPredicate() == ICmpInst::ICMP_SGT && Op1CV->isAllOnesValue())) { 940 if (!DoTransform) return Cmp; 941 942 Value *In = Cmp->getOperand(0); 943 Value *Sh = ConstantInt::get(In->getType(), 944 In->getType()->getScalarSizeInBits() - 1); 945 In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit"); 946 if (In->getType() != Zext.getType()) 947 In = Builder.CreateIntCast(In, Zext.getType(), false /*ZExt*/); 948 949 if (Cmp->getPredicate() == ICmpInst::ICMP_SGT) { 950 Constant *One = ConstantInt::get(In->getType(), 1); 951 In = Builder.CreateXor(In, One, In->getName() + ".not"); 952 } 953 954 return replaceInstUsesWith(Zext, In); 955 } 956 957 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set. 958 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 959 // zext (X == 1) to i32 --> X iff X has only the low bit set. 960 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set. 961 // zext (X != 0) to i32 --> X iff X has only the low bit set. 962 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set. 963 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set. 964 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set. 965 if ((Op1CV->isNullValue() || Op1CV->isPowerOf2()) && 966 // This only works for EQ and NE 967 Cmp->isEquality()) { 968 // If Op1C some other power of two, convert: 969 KnownBits Known = computeKnownBits(Cmp->getOperand(0), 0, &Zext); 970 971 APInt KnownZeroMask(~Known.Zero); 972 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1? 973 if (!DoTransform) return Cmp; 974 975 bool isNE = Cmp->getPredicate() == ICmpInst::ICMP_NE; 976 if (!Op1CV->isNullValue() && (*Op1CV != KnownZeroMask)) { 977 // (X&4) == 2 --> false 978 // (X&4) != 2 --> true 979 Constant *Res = ConstantInt::get(Zext.getType(), isNE); 980 return replaceInstUsesWith(Zext, Res); 981 } 982 983 uint32_t ShAmt = KnownZeroMask.logBase2(); 984 Value *In = Cmp->getOperand(0); 985 if (ShAmt) { 986 // Perform a logical shr by shiftamt. 987 // Insert the shift to put the result in the low bit. 988 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt), 989 In->getName() + ".lobit"); 990 } 991 992 if (!Op1CV->isNullValue() == isNE) { // Toggle the low bit. 993 Constant *One = ConstantInt::get(In->getType(), 1); 994 In = Builder.CreateXor(In, One); 995 } 996 997 if (Zext.getType() == In->getType()) 998 return replaceInstUsesWith(Zext, In); 999 1000 Value *IntCast = Builder.CreateIntCast(In, Zext.getType(), false); 1001 return replaceInstUsesWith(Zext, IntCast); 1002 } 1003 } 1004 } 1005 1006 // icmp ne A, B is equal to xor A, B when A and B only really have one bit. 1007 // It is also profitable to transform icmp eq into not(xor(A, B)) because that 1008 // may lead to additional simplifications. 1009 if (Cmp->isEquality() && Zext.getType() == Cmp->getOperand(0)->getType()) { 1010 if (IntegerType *ITy = dyn_cast<IntegerType>(Zext.getType())) { 1011 Value *LHS = Cmp->getOperand(0); 1012 Value *RHS = Cmp->getOperand(1); 1013 1014 KnownBits KnownLHS = computeKnownBits(LHS, 0, &Zext); 1015 KnownBits KnownRHS = computeKnownBits(RHS, 0, &Zext); 1016 1017 if (KnownLHS.Zero == KnownRHS.Zero && KnownLHS.One == KnownRHS.One) { 1018 APInt KnownBits = KnownLHS.Zero | KnownLHS.One; 1019 APInt UnknownBit = ~KnownBits; 1020 if (UnknownBit.countPopulation() == 1) { 1021 if (!DoTransform) return Cmp; 1022 1023 Value *Result = Builder.CreateXor(LHS, RHS); 1024 1025 // Mask off any bits that are set and won't be shifted away. 1026 if (KnownLHS.One.uge(UnknownBit)) 1027 Result = Builder.CreateAnd(Result, 1028 ConstantInt::get(ITy, UnknownBit)); 1029 1030 // Shift the bit we're testing down to the lsb. 1031 Result = Builder.CreateLShr( 1032 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros())); 1033 1034 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ) 1035 Result = Builder.CreateXor(Result, ConstantInt::get(ITy, 1)); 1036 Result->takeName(Cmp); 1037 return replaceInstUsesWith(Zext, Result); 1038 } 1039 } 1040 } 1041 } 1042 1043 return nullptr; 1044 } 1045 1046 /// Determine if the specified value can be computed in the specified wider type 1047 /// and produce the same low bits. If not, return false. 1048 /// 1049 /// If this function returns true, it can also return a non-zero number of bits 1050 /// (in BitsToClear) which indicates that the value it computes is correct for 1051 /// the zero extend, but that the additional BitsToClear bits need to be zero'd 1052 /// out. For example, to promote something like: 1053 /// 1054 /// %B = trunc i64 %A to i32 1055 /// %C = lshr i32 %B, 8 1056 /// %E = zext i32 %C to i64 1057 /// 1058 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be 1059 /// set to 8 to indicate that the promoted value needs to have bits 24-31 1060 /// cleared in addition to bits 32-63. Since an 'and' will be generated to 1061 /// clear the top bits anyway, doing this has no extra cost. 1062 /// 1063 /// This function works on both vectors and scalars. 1064 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear, 1065 InstCombinerImpl &IC, Instruction *CxtI) { 1066 BitsToClear = 0; 1067 if (canAlwaysEvaluateInType(V, Ty)) 1068 return true; 1069 if (canNotEvaluateInType(V, Ty)) 1070 return false; 1071 1072 auto *I = cast<Instruction>(V); 1073 unsigned Tmp; 1074 switch (I->getOpcode()) { 1075 case Instruction::ZExt: // zext(zext(x)) -> zext(x). 1076 case Instruction::SExt: // zext(sext(x)) -> sext(x). 1077 case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x) 1078 return true; 1079 case Instruction::And: 1080 case Instruction::Or: 1081 case Instruction::Xor: 1082 case Instruction::Add: 1083 case Instruction::Sub: 1084 case Instruction::Mul: 1085 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) || 1086 !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI)) 1087 return false; 1088 // These can all be promoted if neither operand has 'bits to clear'. 1089 if (BitsToClear == 0 && Tmp == 0) 1090 return true; 1091 1092 // If the operation is an AND/OR/XOR and the bits to clear are zero in the 1093 // other side, BitsToClear is ok. 1094 if (Tmp == 0 && I->isBitwiseLogicOp()) { 1095 // We use MaskedValueIsZero here for generality, but the case we care 1096 // about the most is constant RHS. 1097 unsigned VSize = V->getType()->getScalarSizeInBits(); 1098 if (IC.MaskedValueIsZero(I->getOperand(1), 1099 APInt::getHighBitsSet(VSize, BitsToClear), 1100 0, CxtI)) { 1101 // If this is an And instruction and all of the BitsToClear are 1102 // known to be zero we can reset BitsToClear. 1103 if (I->getOpcode() == Instruction::And) 1104 BitsToClear = 0; 1105 return true; 1106 } 1107 } 1108 1109 // Otherwise, we don't know how to analyze this BitsToClear case yet. 1110 return false; 1111 1112 case Instruction::Shl: { 1113 // We can promote shl(x, cst) if we can promote x. Since shl overwrites the 1114 // upper bits we can reduce BitsToClear by the shift amount. 1115 const APInt *Amt; 1116 if (match(I->getOperand(1), m_APInt(Amt))) { 1117 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 1118 return false; 1119 uint64_t ShiftAmt = Amt->getZExtValue(); 1120 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0; 1121 return true; 1122 } 1123 return false; 1124 } 1125 case Instruction::LShr: { 1126 // We can promote lshr(x, cst) if we can promote x. This requires the 1127 // ultimate 'and' to clear out the high zero bits we're clearing out though. 1128 const APInt *Amt; 1129 if (match(I->getOperand(1), m_APInt(Amt))) { 1130 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI)) 1131 return false; 1132 BitsToClear += Amt->getZExtValue(); 1133 if (BitsToClear > V->getType()->getScalarSizeInBits()) 1134 BitsToClear = V->getType()->getScalarSizeInBits(); 1135 return true; 1136 } 1137 // Cannot promote variable LSHR. 1138 return false; 1139 } 1140 case Instruction::Select: 1141 if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) || 1142 !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) || 1143 // TODO: If important, we could handle the case when the BitsToClear are 1144 // known zero in the disagreeing side. 1145 Tmp != BitsToClear) 1146 return false; 1147 return true; 1148 1149 case Instruction::PHI: { 1150 // We can change a phi if we can change all operands. Note that we never 1151 // get into trouble with cyclic PHIs here because we only consider 1152 // instructions with a single use. 1153 PHINode *PN = cast<PHINode>(I); 1154 if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI)) 1155 return false; 1156 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) 1157 if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) || 1158 // TODO: If important, we could handle the case when the BitsToClear 1159 // are known zero in the disagreeing input. 1160 Tmp != BitsToClear) 1161 return false; 1162 return true; 1163 } 1164 default: 1165 // TODO: Can handle more cases here. 1166 return false; 1167 } 1168 } 1169 1170 Instruction *InstCombinerImpl::visitZExt(ZExtInst &CI) { 1171 // If this zero extend is only used by a truncate, let the truncate be 1172 // eliminated before we try to optimize this zext. 1173 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back())) 1174 return nullptr; 1175 1176 // If one of the common conversion will work, do it. 1177 if (Instruction *Result = commonCastTransforms(CI)) 1178 return Result; 1179 1180 Value *Src = CI.getOperand(0); 1181 Type *SrcTy = Src->getType(), *DestTy = CI.getType(); 1182 1183 // Try to extend the entire expression tree to the wide destination type. 1184 unsigned BitsToClear; 1185 if (shouldChangeType(SrcTy, DestTy) && 1186 canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) { 1187 assert(BitsToClear <= SrcTy->getScalarSizeInBits() && 1188 "Can't clear more bits than in SrcTy"); 1189 1190 // Okay, we can transform this! Insert the new expression now. 1191 LLVM_DEBUG( 1192 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1193 " to avoid zero extend: " 1194 << CI << '\n'); 1195 Value *Res = EvaluateInDifferentType(Src, DestTy, false); 1196 assert(Res->getType() == DestTy); 1197 1198 // Preserve debug values referring to Src if the zext is its last use. 1199 if (auto *SrcOp = dyn_cast<Instruction>(Src)) 1200 if (SrcOp->hasOneUse()) 1201 replaceAllDbgUsesWith(*SrcOp, *Res, CI, DT); 1202 1203 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear; 1204 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 1205 1206 // If the high bits are already filled with zeros, just replace this 1207 // cast with the result. 1208 if (MaskedValueIsZero(Res, 1209 APInt::getHighBitsSet(DestBitSize, 1210 DestBitSize-SrcBitsKept), 1211 0, &CI)) 1212 return replaceInstUsesWith(CI, Res); 1213 1214 // We need to emit an AND to clear the high bits. 1215 Constant *C = ConstantInt::get(Res->getType(), 1216 APInt::getLowBitsSet(DestBitSize, SrcBitsKept)); 1217 return BinaryOperator::CreateAnd(Res, C); 1218 } 1219 1220 // If this is a TRUNC followed by a ZEXT then we are dealing with integral 1221 // types and if the sizes are just right we can convert this into a logical 1222 // 'and' which will be much cheaper than the pair of casts. 1223 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast 1224 // TODO: Subsume this into EvaluateInDifferentType. 1225 1226 // Get the sizes of the types involved. We know that the intermediate type 1227 // will be smaller than A or C, but don't know the relation between A and C. 1228 Value *A = CSrc->getOperand(0); 1229 unsigned SrcSize = A->getType()->getScalarSizeInBits(); 1230 unsigned MidSize = CSrc->getType()->getScalarSizeInBits(); 1231 unsigned DstSize = CI.getType()->getScalarSizeInBits(); 1232 // If we're actually extending zero bits, then if 1233 // SrcSize < DstSize: zext(a & mask) 1234 // SrcSize == DstSize: a & mask 1235 // SrcSize > DstSize: trunc(a) & mask 1236 if (SrcSize < DstSize) { 1237 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 1238 Constant *AndConst = ConstantInt::get(A->getType(), AndValue); 1239 Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask"); 1240 return new ZExtInst(And, CI.getType()); 1241 } 1242 1243 if (SrcSize == DstSize) { 1244 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); 1245 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(), 1246 AndValue)); 1247 } 1248 if (SrcSize > DstSize) { 1249 Value *Trunc = Builder.CreateTrunc(A, CI.getType()); 1250 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize)); 1251 return BinaryOperator::CreateAnd(Trunc, 1252 ConstantInt::get(Trunc->getType(), 1253 AndValue)); 1254 } 1255 } 1256 1257 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(Src)) 1258 return transformZExtICmp(Cmp, CI); 1259 1260 BinaryOperator *SrcI = dyn_cast<BinaryOperator>(Src); 1261 if (SrcI && SrcI->getOpcode() == Instruction::Or) { 1262 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) if at least one 1263 // of the (zext icmp) can be eliminated. If so, immediately perform the 1264 // according elimination. 1265 ICmpInst *LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0)); 1266 ICmpInst *RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1)); 1267 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() && 1268 (transformZExtICmp(LHS, CI, false) || 1269 transformZExtICmp(RHS, CI, false))) { 1270 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) 1271 Value *LCast = Builder.CreateZExt(LHS, CI.getType(), LHS->getName()); 1272 Value *RCast = Builder.CreateZExt(RHS, CI.getType(), RHS->getName()); 1273 Value *Or = Builder.CreateOr(LCast, RCast, CI.getName()); 1274 if (auto *OrInst = dyn_cast<Instruction>(Or)) 1275 Builder.SetInsertPoint(OrInst); 1276 1277 // Perform the elimination. 1278 if (auto *LZExt = dyn_cast<ZExtInst>(LCast)) 1279 transformZExtICmp(LHS, *LZExt); 1280 if (auto *RZExt = dyn_cast<ZExtInst>(RCast)) 1281 transformZExtICmp(RHS, *RZExt); 1282 1283 return replaceInstUsesWith(CI, Or); 1284 } 1285 } 1286 1287 // zext(trunc(X) & C) -> (X & zext(C)). 1288 Constant *C; 1289 Value *X; 1290 if (SrcI && 1291 match(SrcI, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) && 1292 X->getType() == CI.getType()) 1293 return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType())); 1294 1295 // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)). 1296 Value *And; 1297 if (SrcI && match(SrcI, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) && 1298 match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) && 1299 X->getType() == CI.getType()) { 1300 Constant *ZC = ConstantExpr::getZExt(C, CI.getType()); 1301 return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC); 1302 } 1303 1304 return nullptr; 1305 } 1306 1307 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp. 1308 Instruction *InstCombinerImpl::transformSExtICmp(ICmpInst *ICI, 1309 Instruction &CI) { 1310 Value *Op0 = ICI->getOperand(0), *Op1 = ICI->getOperand(1); 1311 ICmpInst::Predicate Pred = ICI->getPredicate(); 1312 1313 // Don't bother if Op1 isn't of vector or integer type. 1314 if (!Op1->getType()->isIntOrIntVectorTy()) 1315 return nullptr; 1316 1317 if ((Pred == ICmpInst::ICMP_SLT && match(Op1, m_ZeroInt())) || 1318 (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))) { 1319 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if negative 1320 // (x >s -1) ? -1 : 0 -> not (ashr x, 31) -> all ones if positive 1321 Value *Sh = ConstantInt::get(Op0->getType(), 1322 Op0->getType()->getScalarSizeInBits() - 1); 1323 Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit"); 1324 if (In->getType() != CI.getType()) 1325 In = Builder.CreateIntCast(In, CI.getType(), true /*SExt*/); 1326 1327 if (Pred == ICmpInst::ICMP_SGT) 1328 In = Builder.CreateNot(In, In->getName() + ".not"); 1329 return replaceInstUsesWith(CI, In); 1330 } 1331 1332 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) { 1333 // If we know that only one bit of the LHS of the icmp can be set and we 1334 // have an equality comparison with zero or a power of 2, we can transform 1335 // the icmp and sext into bitwise/integer operations. 1336 if (ICI->hasOneUse() && 1337 ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){ 1338 KnownBits Known = computeKnownBits(Op0, 0, &CI); 1339 1340 APInt KnownZeroMask(~Known.Zero); 1341 if (KnownZeroMask.isPowerOf2()) { 1342 Value *In = ICI->getOperand(0); 1343 1344 // If the icmp tests for a known zero bit we can constant fold it. 1345 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) { 1346 Value *V = Pred == ICmpInst::ICMP_NE ? 1347 ConstantInt::getAllOnesValue(CI.getType()) : 1348 ConstantInt::getNullValue(CI.getType()); 1349 return replaceInstUsesWith(CI, V); 1350 } 1351 1352 if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) { 1353 // sext ((x & 2^n) == 0) -> (x >> n) - 1 1354 // sext ((x & 2^n) != 2^n) -> (x >> n) - 1 1355 unsigned ShiftAmt = KnownZeroMask.countTrailingZeros(); 1356 // Perform a right shift to place the desired bit in the LSB. 1357 if (ShiftAmt) 1358 In = Builder.CreateLShr(In, 1359 ConstantInt::get(In->getType(), ShiftAmt)); 1360 1361 // At this point "In" is either 1 or 0. Subtract 1 to turn 1362 // {1, 0} -> {0, -1}. 1363 In = Builder.CreateAdd(In, 1364 ConstantInt::getAllOnesValue(In->getType()), 1365 "sext"); 1366 } else { 1367 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1 1368 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1 1369 unsigned ShiftAmt = KnownZeroMask.countLeadingZeros(); 1370 // Perform a left shift to place the desired bit in the MSB. 1371 if (ShiftAmt) 1372 In = Builder.CreateShl(In, 1373 ConstantInt::get(In->getType(), ShiftAmt)); 1374 1375 // Distribute the bit over the whole bit width. 1376 In = Builder.CreateAShr(In, ConstantInt::get(In->getType(), 1377 KnownZeroMask.getBitWidth() - 1), "sext"); 1378 } 1379 1380 if (CI.getType() == In->getType()) 1381 return replaceInstUsesWith(CI, In); 1382 return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/); 1383 } 1384 } 1385 } 1386 1387 return nullptr; 1388 } 1389 1390 /// Return true if we can take the specified value and return it as type Ty 1391 /// without inserting any new casts and without changing the value of the common 1392 /// low bits. This is used by code that tries to promote integer operations to 1393 /// a wider types will allow us to eliminate the extension. 1394 /// 1395 /// This function works on both vectors and scalars. 1396 /// 1397 static bool canEvaluateSExtd(Value *V, Type *Ty) { 1398 assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() && 1399 "Can't sign extend type to a smaller type"); 1400 if (canAlwaysEvaluateInType(V, Ty)) 1401 return true; 1402 if (canNotEvaluateInType(V, Ty)) 1403 return false; 1404 1405 auto *I = cast<Instruction>(V); 1406 switch (I->getOpcode()) { 1407 case Instruction::SExt: // sext(sext(x)) -> sext(x) 1408 case Instruction::ZExt: // sext(zext(x)) -> zext(x) 1409 case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x) 1410 return true; 1411 case Instruction::And: 1412 case Instruction::Or: 1413 case Instruction::Xor: 1414 case Instruction::Add: 1415 case Instruction::Sub: 1416 case Instruction::Mul: 1417 // These operators can all arbitrarily be extended if their inputs can. 1418 return canEvaluateSExtd(I->getOperand(0), Ty) && 1419 canEvaluateSExtd(I->getOperand(1), Ty); 1420 1421 //case Instruction::Shl: TODO 1422 //case Instruction::LShr: TODO 1423 1424 case Instruction::Select: 1425 return canEvaluateSExtd(I->getOperand(1), Ty) && 1426 canEvaluateSExtd(I->getOperand(2), Ty); 1427 1428 case Instruction::PHI: { 1429 // We can change a phi if we can change all operands. Note that we never 1430 // get into trouble with cyclic PHIs here because we only consider 1431 // instructions with a single use. 1432 PHINode *PN = cast<PHINode>(I); 1433 for (Value *IncValue : PN->incoming_values()) 1434 if (!canEvaluateSExtd(IncValue, Ty)) return false; 1435 return true; 1436 } 1437 default: 1438 // TODO: Can handle more cases here. 1439 break; 1440 } 1441 1442 return false; 1443 } 1444 1445 Instruction *InstCombinerImpl::visitSExt(SExtInst &CI) { 1446 // If this sign extend is only used by a truncate, let the truncate be 1447 // eliminated before we try to optimize this sext. 1448 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back())) 1449 return nullptr; 1450 1451 if (Instruction *I = commonCastTransforms(CI)) 1452 return I; 1453 1454 Value *Src = CI.getOperand(0); 1455 Type *SrcTy = Src->getType(), *DestTy = CI.getType(); 1456 1457 // If we know that the value being extended is positive, we can use a zext 1458 // instead. 1459 KnownBits Known = computeKnownBits(Src, 0, &CI); 1460 if (Known.isNonNegative()) 1461 return CastInst::Create(Instruction::ZExt, Src, DestTy); 1462 1463 // Try to extend the entire expression tree to the wide destination type. 1464 if (shouldChangeType(SrcTy, DestTy) && canEvaluateSExtd(Src, DestTy)) { 1465 // Okay, we can transform this! Insert the new expression now. 1466 LLVM_DEBUG( 1467 dbgs() << "ICE: EvaluateInDifferentType converting expression type" 1468 " to avoid sign extend: " 1469 << CI << '\n'); 1470 Value *Res = EvaluateInDifferentType(Src, DestTy, true); 1471 assert(Res->getType() == DestTy); 1472 1473 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits(); 1474 uint32_t DestBitSize = DestTy->getScalarSizeInBits(); 1475 1476 // If the high bits are already filled with sign bit, just replace this 1477 // cast with the result. 1478 if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize) 1479 return replaceInstUsesWith(CI, Res); 1480 1481 // We need to emit a shl + ashr to do the sign extend. 1482 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize); 1483 return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"), 1484 ShAmt); 1485 } 1486 1487 // If the input is a trunc from the destination type, then turn sext(trunc(x)) 1488 // into shifts. 1489 Value *X; 1490 if (match(Src, m_OneUse(m_Trunc(m_Value(X)))) && X->getType() == DestTy) { 1491 // sext(trunc(X)) --> ashr(shl(X, C), C) 1492 unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); 1493 unsigned DestBitSize = DestTy->getScalarSizeInBits(); 1494 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize); 1495 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt); 1496 } 1497 1498 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src)) 1499 return transformSExtICmp(ICI, CI); 1500 1501 // If the input is a shl/ashr pair of a same constant, then this is a sign 1502 // extension from a smaller value. If we could trust arbitrary bitwidth 1503 // integers, we could turn this into a truncate to the smaller bit and then 1504 // use a sext for the whole extension. Since we don't, look deeper and check 1505 // for a truncate. If the source and dest are the same type, eliminate the 1506 // trunc and extend and just do shifts. For example, turn: 1507 // %a = trunc i32 %i to i8 1508 // %b = shl i8 %a, 6 1509 // %c = ashr i8 %b, 6 1510 // %d = sext i8 %c to i32 1511 // into: 1512 // %a = shl i32 %i, 30 1513 // %d = ashr i32 %a, 30 1514 Value *A = nullptr; 1515 // TODO: Eventually this could be subsumed by EvaluateInDifferentType. 1516 Constant *BA = nullptr, *CA = nullptr; 1517 if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_Constant(BA)), 1518 m_Constant(CA))) && 1519 BA == CA && A->getType() == CI.getType()) { 1520 unsigned MidSize = Src->getType()->getScalarSizeInBits(); 1521 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits(); 1522 Constant *SizeDiff = ConstantInt::get(CA->getType(), SrcDstSize - MidSize); 1523 Constant *ShAmt = ConstantExpr::getAdd(CA, SizeDiff); 1524 Constant *ShAmtExt = ConstantExpr::getSExt(ShAmt, CI.getType()); 1525 A = Builder.CreateShl(A, ShAmtExt, CI.getName()); 1526 return BinaryOperator::CreateAShr(A, ShAmtExt); 1527 } 1528 1529 return nullptr; 1530 } 1531 1532 /// Return a Constant* for the specified floating-point constant if it fits 1533 /// in the specified FP type without changing its value. 1534 static bool fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) { 1535 bool losesInfo; 1536 APFloat F = CFP->getValueAPF(); 1537 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo); 1538 return !losesInfo; 1539 } 1540 1541 static Type *shrinkFPConstant(ConstantFP *CFP) { 1542 if (CFP->getType() == Type::getPPC_FP128Ty(CFP->getContext())) 1543 return nullptr; // No constant folding of this. 1544 // See if the value can be truncated to half and then reextended. 1545 if (fitsInFPType(CFP, APFloat::IEEEhalf())) 1546 return Type::getHalfTy(CFP->getContext()); 1547 // See if the value can be truncated to float and then reextended. 1548 if (fitsInFPType(CFP, APFloat::IEEEsingle())) 1549 return Type::getFloatTy(CFP->getContext()); 1550 if (CFP->getType()->isDoubleTy()) 1551 return nullptr; // Won't shrink. 1552 if (fitsInFPType(CFP, APFloat::IEEEdouble())) 1553 return Type::getDoubleTy(CFP->getContext()); 1554 // Don't try to shrink to various long double types. 1555 return nullptr; 1556 } 1557 1558 // Determine if this is a vector of ConstantFPs and if so, return the minimal 1559 // type we can safely truncate all elements to. 1560 // TODO: Make these support undef elements. 1561 static Type *shrinkFPConstantVector(Value *V) { 1562 auto *CV = dyn_cast<Constant>(V); 1563 auto *CVVTy = dyn_cast<VectorType>(V->getType()); 1564 if (!CV || !CVVTy) 1565 return nullptr; 1566 1567 Type *MinType = nullptr; 1568 1569 unsigned NumElts = cast<FixedVectorType>(CVVTy)->getNumElements(); 1570 for (unsigned i = 0; i != NumElts; ++i) { 1571 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 1572 if (!CFP) 1573 return nullptr; 1574 1575 Type *T = shrinkFPConstant(CFP); 1576 if (!T) 1577 return nullptr; 1578 1579 // If we haven't found a type yet or this type has a larger mantissa than 1580 // our previous type, this is our new minimal type. 1581 if (!MinType || T->getFPMantissaWidth() > MinType->getFPMantissaWidth()) 1582 MinType = T; 1583 } 1584 1585 // Make a vector type from the minimal type. 1586 return FixedVectorType::get(MinType, NumElts); 1587 } 1588 1589 /// Find the minimum FP type we can safely truncate to. 1590 static Type *getMinimumFPType(Value *V) { 1591 if (auto *FPExt = dyn_cast<FPExtInst>(V)) 1592 return FPExt->getOperand(0)->getType(); 1593 1594 // If this value is a constant, return the constant in the smallest FP type 1595 // that can accurately represent it. This allows us to turn 1596 // (float)((double)X+2.0) into x+2.0f. 1597 if (auto *CFP = dyn_cast<ConstantFP>(V)) 1598 if (Type *T = shrinkFPConstant(CFP)) 1599 return T; 1600 1601 // Try to shrink a vector of FP constants. 1602 if (Type *T = shrinkFPConstantVector(V)) 1603 return T; 1604 1605 return V->getType(); 1606 } 1607 1608 /// Return true if the cast from integer to FP can be proven to be exact for all 1609 /// possible inputs (the conversion does not lose any precision). 1610 static bool isKnownExactCastIntToFP(CastInst &I) { 1611 CastInst::CastOps Opcode = I.getOpcode(); 1612 assert((Opcode == CastInst::SIToFP || Opcode == CastInst::UIToFP) && 1613 "Unexpected cast"); 1614 Value *Src = I.getOperand(0); 1615 Type *SrcTy = Src->getType(); 1616 Type *FPTy = I.getType(); 1617 bool IsSigned = Opcode == Instruction::SIToFP; 1618 int SrcSize = (int)SrcTy->getScalarSizeInBits() - IsSigned; 1619 1620 // Easy case - if the source integer type has less bits than the FP mantissa, 1621 // then the cast must be exact. 1622 int DestNumSigBits = FPTy->getFPMantissaWidth(); 1623 if (SrcSize <= DestNumSigBits) 1624 return true; 1625 1626 // Cast from FP to integer and back to FP is independent of the intermediate 1627 // integer width because of poison on overflow. 1628 Value *F; 1629 if (match(Src, m_FPToSI(m_Value(F))) || match(Src, m_FPToUI(m_Value(F)))) { 1630 // If this is uitofp (fptosi F), the source needs an extra bit to avoid 1631 // potential rounding of negative FP input values. 1632 int SrcNumSigBits = F->getType()->getFPMantissaWidth(); 1633 if (!IsSigned && match(Src, m_FPToSI(m_Value()))) 1634 SrcNumSigBits++; 1635 1636 // [su]itofp (fpto[su]i F) --> exact if the source type has less or equal 1637 // significant bits than the destination (and make sure neither type is 1638 // weird -- ppc_fp128). 1639 if (SrcNumSigBits > 0 && DestNumSigBits > 0 && 1640 SrcNumSigBits <= DestNumSigBits) 1641 return true; 1642 } 1643 1644 // TODO: 1645 // Try harder to find if the source integer type has less significant bits. 1646 // For example, compute number of sign bits or compute low bit mask. 1647 return false; 1648 } 1649 1650 Instruction *InstCombinerImpl::visitFPTrunc(FPTruncInst &FPT) { 1651 if (Instruction *I = commonCastTransforms(FPT)) 1652 return I; 1653 1654 // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to 1655 // simplify this expression to avoid one or more of the trunc/extend 1656 // operations if we can do so without changing the numerical results. 1657 // 1658 // The exact manner in which the widths of the operands interact to limit 1659 // what we can and cannot do safely varies from operation to operation, and 1660 // is explained below in the various case statements. 1661 Type *Ty = FPT.getType(); 1662 auto *BO = dyn_cast<BinaryOperator>(FPT.getOperand(0)); 1663 if (BO && BO->hasOneUse()) { 1664 Type *LHSMinType = getMinimumFPType(BO->getOperand(0)); 1665 Type *RHSMinType = getMinimumFPType(BO->getOperand(1)); 1666 unsigned OpWidth = BO->getType()->getFPMantissaWidth(); 1667 unsigned LHSWidth = LHSMinType->getFPMantissaWidth(); 1668 unsigned RHSWidth = RHSMinType->getFPMantissaWidth(); 1669 unsigned SrcWidth = std::max(LHSWidth, RHSWidth); 1670 unsigned DstWidth = Ty->getFPMantissaWidth(); 1671 switch (BO->getOpcode()) { 1672 default: break; 1673 case Instruction::FAdd: 1674 case Instruction::FSub: 1675 // For addition and subtraction, the infinitely precise result can 1676 // essentially be arbitrarily wide; proving that double rounding 1677 // will not occur because the result of OpI is exact (as we will for 1678 // FMul, for example) is hopeless. However, we *can* nonetheless 1679 // frequently know that double rounding cannot occur (or that it is 1680 // innocuous) by taking advantage of the specific structure of 1681 // infinitely-precise results that admit double rounding. 1682 // 1683 // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient 1684 // to represent both sources, we can guarantee that the double 1685 // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis, 1686 // "A Rigorous Framework for Fully Supporting the IEEE Standard ..." 1687 // for proof of this fact). 1688 // 1689 // Note: Figueroa does not consider the case where DstFormat != 1690 // SrcFormat. It's possible (likely even!) that this analysis 1691 // could be tightened for those cases, but they are rare (the main 1692 // case of interest here is (float)((double)float + float)). 1693 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) { 1694 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1695 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1696 Instruction *RI = BinaryOperator::Create(BO->getOpcode(), LHS, RHS); 1697 RI->copyFastMathFlags(BO); 1698 return RI; 1699 } 1700 break; 1701 case Instruction::FMul: 1702 // For multiplication, the infinitely precise result has at most 1703 // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient 1704 // that such a value can be exactly represented, then no double 1705 // rounding can possibly occur; we can safely perform the operation 1706 // in the destination format if it can represent both sources. 1707 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) { 1708 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1709 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1710 return BinaryOperator::CreateFMulFMF(LHS, RHS, BO); 1711 } 1712 break; 1713 case Instruction::FDiv: 1714 // For division, we use again use the bound from Figueroa's 1715 // dissertation. I am entirely certain that this bound can be 1716 // tightened in the unbalanced operand case by an analysis based on 1717 // the diophantine rational approximation bound, but the well-known 1718 // condition used here is a good conservative first pass. 1719 // TODO: Tighten bound via rigorous analysis of the unbalanced case. 1720 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) { 1721 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty); 1722 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty); 1723 return BinaryOperator::CreateFDivFMF(LHS, RHS, BO); 1724 } 1725 break; 1726 case Instruction::FRem: { 1727 // Remainder is straightforward. Remainder is always exact, so the 1728 // type of OpI doesn't enter into things at all. We simply evaluate 1729 // in whichever source type is larger, then convert to the 1730 // destination type. 1731 if (SrcWidth == OpWidth) 1732 break; 1733 Value *LHS, *RHS; 1734 if (LHSWidth == SrcWidth) { 1735 LHS = Builder.CreateFPTrunc(BO->getOperand(0), LHSMinType); 1736 RHS = Builder.CreateFPTrunc(BO->getOperand(1), LHSMinType); 1737 } else { 1738 LHS = Builder.CreateFPTrunc(BO->getOperand(0), RHSMinType); 1739 RHS = Builder.CreateFPTrunc(BO->getOperand(1), RHSMinType); 1740 } 1741 1742 Value *ExactResult = Builder.CreateFRemFMF(LHS, RHS, BO); 1743 return CastInst::CreateFPCast(ExactResult, Ty); 1744 } 1745 } 1746 } 1747 1748 // (fptrunc (fneg x)) -> (fneg (fptrunc x)) 1749 Value *X; 1750 Instruction *Op = dyn_cast<Instruction>(FPT.getOperand(0)); 1751 if (Op && Op->hasOneUse()) { 1752 // FIXME: The FMF should propagate from the fptrunc, not the source op. 1753 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 1754 if (isa<FPMathOperator>(Op)) 1755 Builder.setFastMathFlags(Op->getFastMathFlags()); 1756 1757 if (match(Op, m_FNeg(m_Value(X)))) { 1758 Value *InnerTrunc = Builder.CreateFPTrunc(X, Ty); 1759 1760 return UnaryOperator::CreateFNegFMF(InnerTrunc, Op); 1761 } 1762 1763 // If we are truncating a select that has an extended operand, we can 1764 // narrow the other operand and do the select as a narrow op. 1765 Value *Cond, *X, *Y; 1766 if (match(Op, m_Select(m_Value(Cond), m_FPExt(m_Value(X)), m_Value(Y))) && 1767 X->getType() == Ty) { 1768 // fptrunc (select Cond, (fpext X), Y --> select Cond, X, (fptrunc Y) 1769 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty); 1770 Value *Sel = Builder.CreateSelect(Cond, X, NarrowY, "narrow.sel", Op); 1771 return replaceInstUsesWith(FPT, Sel); 1772 } 1773 if (match(Op, m_Select(m_Value(Cond), m_Value(Y), m_FPExt(m_Value(X)))) && 1774 X->getType() == Ty) { 1775 // fptrunc (select Cond, Y, (fpext X) --> select Cond, (fptrunc Y), X 1776 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty); 1777 Value *Sel = Builder.CreateSelect(Cond, NarrowY, X, "narrow.sel", Op); 1778 return replaceInstUsesWith(FPT, Sel); 1779 } 1780 } 1781 1782 if (auto *II = dyn_cast<IntrinsicInst>(FPT.getOperand(0))) { 1783 switch (II->getIntrinsicID()) { 1784 default: break; 1785 case Intrinsic::ceil: 1786 case Intrinsic::fabs: 1787 case Intrinsic::floor: 1788 case Intrinsic::nearbyint: 1789 case Intrinsic::rint: 1790 case Intrinsic::round: 1791 case Intrinsic::roundeven: 1792 case Intrinsic::trunc: { 1793 Value *Src = II->getArgOperand(0); 1794 if (!Src->hasOneUse()) 1795 break; 1796 1797 // Except for fabs, this transformation requires the input of the unary FP 1798 // operation to be itself an fpext from the type to which we're 1799 // truncating. 1800 if (II->getIntrinsicID() != Intrinsic::fabs) { 1801 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src); 1802 if (!FPExtSrc || FPExtSrc->getSrcTy() != Ty) 1803 break; 1804 } 1805 1806 // Do unary FP operation on smaller type. 1807 // (fptrunc (fabs x)) -> (fabs (fptrunc x)) 1808 Value *InnerTrunc = Builder.CreateFPTrunc(Src, Ty); 1809 Function *Overload = Intrinsic::getDeclaration(FPT.getModule(), 1810 II->getIntrinsicID(), Ty); 1811 SmallVector<OperandBundleDef, 1> OpBundles; 1812 II->getOperandBundlesAsDefs(OpBundles); 1813 CallInst *NewCI = 1814 CallInst::Create(Overload, {InnerTrunc}, OpBundles, II->getName()); 1815 NewCI->copyFastMathFlags(II); 1816 return NewCI; 1817 } 1818 } 1819 } 1820 1821 if (Instruction *I = shrinkInsertElt(FPT, Builder)) 1822 return I; 1823 1824 Value *Src = FPT.getOperand(0); 1825 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) { 1826 auto *FPCast = cast<CastInst>(Src); 1827 if (isKnownExactCastIntToFP(*FPCast)) 1828 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty); 1829 } 1830 1831 return nullptr; 1832 } 1833 1834 Instruction *InstCombinerImpl::visitFPExt(CastInst &FPExt) { 1835 // If the source operand is a cast from integer to FP and known exact, then 1836 // cast the integer operand directly to the destination type. 1837 Type *Ty = FPExt.getType(); 1838 Value *Src = FPExt.getOperand(0); 1839 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) { 1840 auto *FPCast = cast<CastInst>(Src); 1841 if (isKnownExactCastIntToFP(*FPCast)) 1842 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty); 1843 } 1844 1845 return commonCastTransforms(FPExt); 1846 } 1847 1848 /// fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X) 1849 /// This is safe if the intermediate type has enough bits in its mantissa to 1850 /// accurately represent all values of X. For example, this won't work with 1851 /// i64 -> float -> i64. 1852 Instruction *InstCombinerImpl::foldItoFPtoI(CastInst &FI) { 1853 if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0))) 1854 return nullptr; 1855 1856 auto *OpI = cast<CastInst>(FI.getOperand(0)); 1857 Value *X = OpI->getOperand(0); 1858 Type *XType = X->getType(); 1859 Type *DestType = FI.getType(); 1860 bool IsOutputSigned = isa<FPToSIInst>(FI); 1861 1862 // Since we can assume the conversion won't overflow, our decision as to 1863 // whether the input will fit in the float should depend on the minimum 1864 // of the input range and output range. 1865 1866 // This means this is also safe for a signed input and unsigned output, since 1867 // a negative input would lead to undefined behavior. 1868 if (!isKnownExactCastIntToFP(*OpI)) { 1869 // The first cast may not round exactly based on the source integer width 1870 // and FP width, but the overflow UB rules can still allow this to fold. 1871 // If the destination type is narrow, that means the intermediate FP value 1872 // must be large enough to hold the source value exactly. 1873 // For example, (uint8_t)((float)(uint32_t 16777217) is undefined behavior. 1874 int OutputSize = (int)DestType->getScalarSizeInBits() - IsOutputSigned; 1875 if (OutputSize > OpI->getType()->getFPMantissaWidth()) 1876 return nullptr; 1877 } 1878 1879 if (DestType->getScalarSizeInBits() > XType->getScalarSizeInBits()) { 1880 bool IsInputSigned = isa<SIToFPInst>(OpI); 1881 if (IsInputSigned && IsOutputSigned) 1882 return new SExtInst(X, DestType); 1883 return new ZExtInst(X, DestType); 1884 } 1885 if (DestType->getScalarSizeInBits() < XType->getScalarSizeInBits()) 1886 return new TruncInst(X, DestType); 1887 1888 assert(XType == DestType && "Unexpected types for int to FP to int casts"); 1889 return replaceInstUsesWith(FI, X); 1890 } 1891 1892 Instruction *InstCombinerImpl::visitFPToUI(FPToUIInst &FI) { 1893 if (Instruction *I = foldItoFPtoI(FI)) 1894 return I; 1895 1896 return commonCastTransforms(FI); 1897 } 1898 1899 Instruction *InstCombinerImpl::visitFPToSI(FPToSIInst &FI) { 1900 if (Instruction *I = foldItoFPtoI(FI)) 1901 return I; 1902 1903 return commonCastTransforms(FI); 1904 } 1905 1906 Instruction *InstCombinerImpl::visitUIToFP(CastInst &CI) { 1907 return commonCastTransforms(CI); 1908 } 1909 1910 Instruction *InstCombinerImpl::visitSIToFP(CastInst &CI) { 1911 return commonCastTransforms(CI); 1912 } 1913 1914 Instruction *InstCombinerImpl::visitIntToPtr(IntToPtrInst &CI) { 1915 // If the source integer type is not the intptr_t type for this target, do a 1916 // trunc or zext to the intptr_t type, then inttoptr of it. This allows the 1917 // cast to be exposed to other transforms. 1918 unsigned AS = CI.getAddressSpace(); 1919 if (CI.getOperand(0)->getType()->getScalarSizeInBits() != 1920 DL.getPointerSizeInBits(AS)) { 1921 Type *Ty = DL.getIntPtrType(CI.getContext(), AS); 1922 // Handle vectors of pointers. 1923 if (auto *CIVTy = dyn_cast<VectorType>(CI.getType())) 1924 Ty = VectorType::get(Ty, CIVTy->getElementCount()); 1925 1926 Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty); 1927 return new IntToPtrInst(P, CI.getType()); 1928 } 1929 1930 if (Instruction *I = commonCastTransforms(CI)) 1931 return I; 1932 1933 return nullptr; 1934 } 1935 1936 /// Implement the transforms for cast of pointer (bitcast/ptrtoint) 1937 Instruction *InstCombinerImpl::commonPointerCastTransforms(CastInst &CI) { 1938 Value *Src = CI.getOperand(0); 1939 1940 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) { 1941 // If casting the result of a getelementptr instruction with no offset, turn 1942 // this into a cast of the original pointer! 1943 if (GEP->hasAllZeroIndices() && 1944 // If CI is an addrspacecast and GEP changes the poiner type, merging 1945 // GEP into CI would undo canonicalizing addrspacecast with different 1946 // pointer types, causing infinite loops. 1947 (!isa<AddrSpaceCastInst>(CI) || 1948 GEP->getType() == GEP->getPointerOperandType())) { 1949 // Changing the cast operand is usually not a good idea but it is safe 1950 // here because the pointer operand is being replaced with another 1951 // pointer operand so the opcode doesn't need to change. 1952 return replaceOperand(CI, 0, GEP->getOperand(0)); 1953 } 1954 } 1955 1956 return commonCastTransforms(CI); 1957 } 1958 1959 Instruction *InstCombinerImpl::visitPtrToInt(PtrToIntInst &CI) { 1960 // If the destination integer type is not the intptr_t type for this target, 1961 // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast 1962 // to be exposed to other transforms. 1963 Value *SrcOp = CI.getPointerOperand(); 1964 Type *Ty = CI.getType(); 1965 unsigned AS = CI.getPointerAddressSpace(); 1966 unsigned TySize = Ty->getScalarSizeInBits(); 1967 unsigned PtrSize = DL.getPointerSizeInBits(AS); 1968 if (TySize != PtrSize) { 1969 Type *IntPtrTy = DL.getIntPtrType(CI.getContext(), AS); 1970 if (auto *VecTy = dyn_cast<VectorType>(Ty)) { 1971 // Handle vectors of pointers. 1972 // FIXME: what should happen for scalable vectors? 1973 IntPtrTy = FixedVectorType::get( 1974 IntPtrTy, cast<FixedVectorType>(VecTy)->getNumElements()); 1975 } 1976 1977 Value *P = Builder.CreatePtrToInt(SrcOp, IntPtrTy); 1978 return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false); 1979 } 1980 1981 Value *Vec, *Scalar, *Index; 1982 if (match(SrcOp, m_OneUse(m_InsertElt(m_IntToPtr(m_Value(Vec)), 1983 m_Value(Scalar), m_Value(Index)))) && 1984 Vec->getType() == Ty) { 1985 assert(Vec->getType()->getScalarSizeInBits() == PtrSize && "Wrong type"); 1986 // Convert the scalar to int followed by insert to eliminate one cast: 1987 // p2i (ins (i2p Vec), Scalar, Index --> ins Vec, (p2i Scalar), Index 1988 Value *NewCast = Builder.CreatePtrToInt(Scalar, Ty->getScalarType()); 1989 return InsertElementInst::Create(Vec, NewCast, Index); 1990 } 1991 1992 return commonPointerCastTransforms(CI); 1993 } 1994 1995 /// This input value (which is known to have vector type) is being zero extended 1996 /// or truncated to the specified vector type. Since the zext/trunc is done 1997 /// using an integer type, we have a (bitcast(cast(bitcast))) pattern, 1998 /// endianness will impact which end of the vector that is extended or 1999 /// truncated. 2000 /// 2001 /// A vector is always stored with index 0 at the lowest address, which 2002 /// corresponds to the most significant bits for a big endian stored integer and 2003 /// the least significant bits for little endian. A trunc/zext of an integer 2004 /// impacts the big end of the integer. Thus, we need to add/remove elements at 2005 /// the front of the vector for big endian targets, and the back of the vector 2006 /// for little endian targets. 2007 /// 2008 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible. 2009 /// 2010 /// The source and destination vector types may have different element types. 2011 static Instruction * 2012 optimizeVectorResizeWithIntegerBitCasts(Value *InVal, VectorType *DestTy, 2013 InstCombinerImpl &IC) { 2014 // We can only do this optimization if the output is a multiple of the input 2015 // element size, or the input is a multiple of the output element size. 2016 // Convert the input type to have the same element type as the output. 2017 VectorType *SrcTy = cast<VectorType>(InVal->getType()); 2018 2019 if (SrcTy->getElementType() != DestTy->getElementType()) { 2020 // The input types don't need to be identical, but for now they must be the 2021 // same size. There is no specific reason we couldn't handle things like 2022 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten 2023 // there yet. 2024 if (SrcTy->getElementType()->getPrimitiveSizeInBits() != 2025 DestTy->getElementType()->getPrimitiveSizeInBits()) 2026 return nullptr; 2027 2028 SrcTy = 2029 FixedVectorType::get(DestTy->getElementType(), 2030 cast<FixedVectorType>(SrcTy)->getNumElements()); 2031 InVal = IC.Builder.CreateBitCast(InVal, SrcTy); 2032 } 2033 2034 bool IsBigEndian = IC.getDataLayout().isBigEndian(); 2035 unsigned SrcElts = cast<FixedVectorType>(SrcTy)->getNumElements(); 2036 unsigned DestElts = cast<FixedVectorType>(DestTy)->getNumElements(); 2037 2038 assert(SrcElts != DestElts && "Element counts should be different."); 2039 2040 // Now that the element types match, get the shuffle mask and RHS of the 2041 // shuffle to use, which depends on whether we're increasing or decreasing the 2042 // size of the input. 2043 SmallVector<int, 16> ShuffleMaskStorage; 2044 ArrayRef<int> ShuffleMask; 2045 Value *V2; 2046 2047 // Produce an identify shuffle mask for the src vector. 2048 ShuffleMaskStorage.resize(SrcElts); 2049 std::iota(ShuffleMaskStorage.begin(), ShuffleMaskStorage.end(), 0); 2050 2051 if (SrcElts > DestElts) { 2052 // If we're shrinking the number of elements (rewriting an integer 2053 // truncate), just shuffle in the elements corresponding to the least 2054 // significant bits from the input and use undef as the second shuffle 2055 // input. 2056 V2 = UndefValue::get(SrcTy); 2057 // Make sure the shuffle mask selects the "least significant bits" by 2058 // keeping elements from back of the src vector for big endian, and from the 2059 // front for little endian. 2060 ShuffleMask = ShuffleMaskStorage; 2061 if (IsBigEndian) 2062 ShuffleMask = ShuffleMask.take_back(DestElts); 2063 else 2064 ShuffleMask = ShuffleMask.take_front(DestElts); 2065 } else { 2066 // If we're increasing the number of elements (rewriting an integer zext), 2067 // shuffle in all of the elements from InVal. Fill the rest of the result 2068 // elements with zeros from a constant zero. 2069 V2 = Constant::getNullValue(SrcTy); 2070 // Use first elt from V2 when indicating zero in the shuffle mask. 2071 uint32_t NullElt = SrcElts; 2072 // Extend with null values in the "most significant bits" by adding elements 2073 // in front of the src vector for big endian, and at the back for little 2074 // endian. 2075 unsigned DeltaElts = DestElts - SrcElts; 2076 if (IsBigEndian) 2077 ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt); 2078 else 2079 ShuffleMaskStorage.append(DeltaElts, NullElt); 2080 ShuffleMask = ShuffleMaskStorage; 2081 } 2082 2083 return new ShuffleVectorInst(InVal, V2, ShuffleMask); 2084 } 2085 2086 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) { 2087 return Value % Ty->getPrimitiveSizeInBits() == 0; 2088 } 2089 2090 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) { 2091 return Value / Ty->getPrimitiveSizeInBits(); 2092 } 2093 2094 /// V is a value which is inserted into a vector of VecEltTy. 2095 /// Look through the value to see if we can decompose it into 2096 /// insertions into the vector. See the example in the comment for 2097 /// OptimizeIntegerToVectorInsertions for the pattern this handles. 2098 /// The type of V is always a non-zero multiple of VecEltTy's size. 2099 /// Shift is the number of bits between the lsb of V and the lsb of 2100 /// the vector. 2101 /// 2102 /// This returns false if the pattern can't be matched or true if it can, 2103 /// filling in Elements with the elements found here. 2104 static bool collectInsertionElements(Value *V, unsigned Shift, 2105 SmallVectorImpl<Value *> &Elements, 2106 Type *VecEltTy, bool isBigEndian) { 2107 assert(isMultipleOfTypeSize(Shift, VecEltTy) && 2108 "Shift should be a multiple of the element type size"); 2109 2110 // Undef values never contribute useful bits to the result. 2111 if (isa<UndefValue>(V)) return true; 2112 2113 // If we got down to a value of the right type, we win, try inserting into the 2114 // right element. 2115 if (V->getType() == VecEltTy) { 2116 // Inserting null doesn't actually insert any elements. 2117 if (Constant *C = dyn_cast<Constant>(V)) 2118 if (C->isNullValue()) 2119 return true; 2120 2121 unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy); 2122 if (isBigEndian) 2123 ElementIndex = Elements.size() - ElementIndex - 1; 2124 2125 // Fail if multiple elements are inserted into this slot. 2126 if (Elements[ElementIndex]) 2127 return false; 2128 2129 Elements[ElementIndex] = V; 2130 return true; 2131 } 2132 2133 if (Constant *C = dyn_cast<Constant>(V)) { 2134 // Figure out the # elements this provides, and bitcast it or slice it up 2135 // as required. 2136 unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(), 2137 VecEltTy); 2138 // If the constant is the size of a vector element, we just need to bitcast 2139 // it to the right type so it gets properly inserted. 2140 if (NumElts == 1) 2141 return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy), 2142 Shift, Elements, VecEltTy, isBigEndian); 2143 2144 // Okay, this is a constant that covers multiple elements. Slice it up into 2145 // pieces and insert each element-sized piece into the vector. 2146 if (!isa<IntegerType>(C->getType())) 2147 C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(), 2148 C->getType()->getPrimitiveSizeInBits())); 2149 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits(); 2150 Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize); 2151 2152 for (unsigned i = 0; i != NumElts; ++i) { 2153 unsigned ShiftI = Shift+i*ElementSize; 2154 Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(), 2155 ShiftI)); 2156 Piece = ConstantExpr::getTrunc(Piece, ElementIntTy); 2157 if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy, 2158 isBigEndian)) 2159 return false; 2160 } 2161 return true; 2162 } 2163 2164 if (!V->hasOneUse()) return false; 2165 2166 Instruction *I = dyn_cast<Instruction>(V); 2167 if (!I) return false; 2168 switch (I->getOpcode()) { 2169 default: return false; // Unhandled case. 2170 case Instruction::BitCast: 2171 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2172 isBigEndian); 2173 case Instruction::ZExt: 2174 if (!isMultipleOfTypeSize( 2175 I->getOperand(0)->getType()->getPrimitiveSizeInBits(), 2176 VecEltTy)) 2177 return false; 2178 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2179 isBigEndian); 2180 case Instruction::Or: 2181 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2182 isBigEndian) && 2183 collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy, 2184 isBigEndian); 2185 case Instruction::Shl: { 2186 // Must be shifting by a constant that is a multiple of the element size. 2187 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1)); 2188 if (!CI) return false; 2189 Shift += CI->getZExtValue(); 2190 if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false; 2191 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy, 2192 isBigEndian); 2193 } 2194 2195 } 2196 } 2197 2198 2199 /// If the input is an 'or' instruction, we may be doing shifts and ors to 2200 /// assemble the elements of the vector manually. 2201 /// Try to rip the code out and replace it with insertelements. This is to 2202 /// optimize code like this: 2203 /// 2204 /// %tmp37 = bitcast float %inc to i32 2205 /// %tmp38 = zext i32 %tmp37 to i64 2206 /// %tmp31 = bitcast float %inc5 to i32 2207 /// %tmp32 = zext i32 %tmp31 to i64 2208 /// %tmp33 = shl i64 %tmp32, 32 2209 /// %ins35 = or i64 %tmp33, %tmp38 2210 /// %tmp43 = bitcast i64 %ins35 to <2 x float> 2211 /// 2212 /// Into two insertelements that do "buildvector{%inc, %inc5}". 2213 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI, 2214 InstCombinerImpl &IC) { 2215 auto *DestVecTy = cast<FixedVectorType>(CI.getType()); 2216 Value *IntInput = CI.getOperand(0); 2217 2218 SmallVector<Value*, 8> Elements(DestVecTy->getNumElements()); 2219 if (!collectInsertionElements(IntInput, 0, Elements, 2220 DestVecTy->getElementType(), 2221 IC.getDataLayout().isBigEndian())) 2222 return nullptr; 2223 2224 // If we succeeded, we know that all of the element are specified by Elements 2225 // or are zero if Elements has a null entry. Recast this as a set of 2226 // insertions. 2227 Value *Result = Constant::getNullValue(CI.getType()); 2228 for (unsigned i = 0, e = Elements.size(); i != e; ++i) { 2229 if (!Elements[i]) continue; // Unset element. 2230 2231 Result = IC.Builder.CreateInsertElement(Result, Elements[i], 2232 IC.Builder.getInt32(i)); 2233 } 2234 2235 return Result; 2236 } 2237 2238 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the 2239 /// vector followed by extract element. The backend tends to handle bitcasts of 2240 /// vectors better than bitcasts of scalars because vector registers are 2241 /// usually not type-specific like scalar integer or scalar floating-point. 2242 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast, 2243 InstCombinerImpl &IC) { 2244 // TODO: Create and use a pattern matcher for ExtractElementInst. 2245 auto *ExtElt = dyn_cast<ExtractElementInst>(BitCast.getOperand(0)); 2246 if (!ExtElt || !ExtElt->hasOneUse()) 2247 return nullptr; 2248 2249 // The bitcast must be to a vectorizable type, otherwise we can't make a new 2250 // type to extract from. 2251 Type *DestType = BitCast.getType(); 2252 if (!VectorType::isValidElementType(DestType)) 2253 return nullptr; 2254 2255 auto *NewVecType = VectorType::get(DestType, ExtElt->getVectorOperandType()); 2256 auto *NewBC = IC.Builder.CreateBitCast(ExtElt->getVectorOperand(), 2257 NewVecType, "bc"); 2258 return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand()); 2259 } 2260 2261 /// Change the type of a bitwise logic operation if we can eliminate a bitcast. 2262 static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast, 2263 InstCombiner::BuilderTy &Builder) { 2264 Type *DestTy = BitCast.getType(); 2265 BinaryOperator *BO; 2266 if (!DestTy->isIntOrIntVectorTy() || 2267 !match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) || 2268 !BO->isBitwiseLogicOp()) 2269 return nullptr; 2270 2271 // FIXME: This transform is restricted to vector types to avoid backend 2272 // problems caused by creating potentially illegal operations. If a fix-up is 2273 // added to handle that situation, we can remove this check. 2274 if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy()) 2275 return nullptr; 2276 2277 Value *X; 2278 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) && 2279 X->getType() == DestTy && !isa<Constant>(X)) { 2280 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y)) 2281 Value *CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy); 2282 return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1); 2283 } 2284 2285 if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) && 2286 X->getType() == DestTy && !isa<Constant>(X)) { 2287 // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X) 2288 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 2289 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X); 2290 } 2291 2292 // Canonicalize vector bitcasts to come before vector bitwise logic with a 2293 // constant. This eases recognition of special constants for later ops. 2294 // Example: 2295 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b 2296 Constant *C; 2297 if (match(BO->getOperand(1), m_Constant(C))) { 2298 // bitcast (logic X, C) --> logic (bitcast X, C') 2299 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); 2300 Value *CastedC = Builder.CreateBitCast(C, DestTy); 2301 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC); 2302 } 2303 2304 return nullptr; 2305 } 2306 2307 /// Change the type of a select if we can eliminate a bitcast. 2308 static Instruction *foldBitCastSelect(BitCastInst &BitCast, 2309 InstCombiner::BuilderTy &Builder) { 2310 Value *Cond, *TVal, *FVal; 2311 if (!match(BitCast.getOperand(0), 2312 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal))))) 2313 return nullptr; 2314 2315 // A vector select must maintain the same number of elements in its operands. 2316 Type *CondTy = Cond->getType(); 2317 Type *DestTy = BitCast.getType(); 2318 if (auto *CondVTy = dyn_cast<VectorType>(CondTy)) { 2319 if (!DestTy->isVectorTy()) 2320 return nullptr; 2321 if (cast<FixedVectorType>(DestTy)->getNumElements() != 2322 cast<FixedVectorType>(CondVTy)->getNumElements()) 2323 return nullptr; 2324 } 2325 2326 // FIXME: This transform is restricted from changing the select between 2327 // scalars and vectors to avoid backend problems caused by creating 2328 // potentially illegal operations. If a fix-up is added to handle that 2329 // situation, we can remove this check. 2330 if (DestTy->isVectorTy() != TVal->getType()->isVectorTy()) 2331 return nullptr; 2332 2333 auto *Sel = cast<Instruction>(BitCast.getOperand(0)); 2334 Value *X; 2335 if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 2336 !isa<Constant>(X)) { 2337 // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y)) 2338 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy); 2339 return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel); 2340 } 2341 2342 if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy && 2343 !isa<Constant>(X)) { 2344 // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X) 2345 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy); 2346 return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel); 2347 } 2348 2349 return nullptr; 2350 } 2351 2352 /// Check if all users of CI are StoreInsts. 2353 static bool hasStoreUsersOnly(CastInst &CI) { 2354 for (User *U : CI.users()) { 2355 if (!isa<StoreInst>(U)) 2356 return false; 2357 } 2358 return true; 2359 } 2360 2361 /// This function handles following case 2362 /// 2363 /// A -> B cast 2364 /// PHI 2365 /// B -> A cast 2366 /// 2367 /// All the related PHI nodes can be replaced by new PHI nodes with type A. 2368 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN. 2369 Instruction *InstCombinerImpl::optimizeBitCastFromPhi(CastInst &CI, 2370 PHINode *PN) { 2371 // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp. 2372 if (hasStoreUsersOnly(CI)) 2373 return nullptr; 2374 2375 Value *Src = CI.getOperand(0); 2376 Type *SrcTy = Src->getType(); // Type B 2377 Type *DestTy = CI.getType(); // Type A 2378 2379 SmallVector<PHINode *, 4> PhiWorklist; 2380 SmallSetVector<PHINode *, 4> OldPhiNodes; 2381 2382 // Find all of the A->B casts and PHI nodes. 2383 // We need to inspect all related PHI nodes, but PHIs can be cyclic, so 2384 // OldPhiNodes is used to track all known PHI nodes, before adding a new 2385 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first. 2386 PhiWorklist.push_back(PN); 2387 OldPhiNodes.insert(PN); 2388 while (!PhiWorklist.empty()) { 2389 auto *OldPN = PhiWorklist.pop_back_val(); 2390 for (Value *IncValue : OldPN->incoming_values()) { 2391 if (isa<Constant>(IncValue)) 2392 continue; 2393 2394 if (auto *LI = dyn_cast<LoadInst>(IncValue)) { 2395 // If there is a sequence of one or more load instructions, each loaded 2396 // value is used as address of later load instruction, bitcast is 2397 // necessary to change the value type, don't optimize it. For 2398 // simplicity we give up if the load address comes from another load. 2399 Value *Addr = LI->getOperand(0); 2400 if (Addr == &CI || isa<LoadInst>(Addr)) 2401 return nullptr; 2402 if (LI->hasOneUse() && LI->isSimple()) 2403 continue; 2404 // If a LoadInst has more than one use, changing the type of loaded 2405 // value may create another bitcast. 2406 return nullptr; 2407 } 2408 2409 if (auto *PNode = dyn_cast<PHINode>(IncValue)) { 2410 if (OldPhiNodes.insert(PNode)) 2411 PhiWorklist.push_back(PNode); 2412 continue; 2413 } 2414 2415 auto *BCI = dyn_cast<BitCastInst>(IncValue); 2416 // We can't handle other instructions. 2417 if (!BCI) 2418 return nullptr; 2419 2420 // Verify it's a A->B cast. 2421 Type *TyA = BCI->getOperand(0)->getType(); 2422 Type *TyB = BCI->getType(); 2423 if (TyA != DestTy || TyB != SrcTy) 2424 return nullptr; 2425 } 2426 } 2427 2428 // Check that each user of each old PHI node is something that we can 2429 // rewrite, so that all of the old PHI nodes can be cleaned up afterwards. 2430 for (auto *OldPN : OldPhiNodes) { 2431 for (User *V : OldPN->users()) { 2432 if (auto *SI = dyn_cast<StoreInst>(V)) { 2433 if (!SI->isSimple() || SI->getOperand(0) != OldPN) 2434 return nullptr; 2435 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2436 // Verify it's a B->A cast. 2437 Type *TyB = BCI->getOperand(0)->getType(); 2438 Type *TyA = BCI->getType(); 2439 if (TyA != DestTy || TyB != SrcTy) 2440 return nullptr; 2441 } else if (auto *PHI = dyn_cast<PHINode>(V)) { 2442 // As long as the user is another old PHI node, then even if we don't 2443 // rewrite it, the PHI web we're considering won't have any users 2444 // outside itself, so it'll be dead. 2445 if (OldPhiNodes.count(PHI) == 0) 2446 return nullptr; 2447 } else { 2448 return nullptr; 2449 } 2450 } 2451 } 2452 2453 // For each old PHI node, create a corresponding new PHI node with a type A. 2454 SmallDenseMap<PHINode *, PHINode *> NewPNodes; 2455 for (auto *OldPN : OldPhiNodes) { 2456 Builder.SetInsertPoint(OldPN); 2457 PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands()); 2458 NewPNodes[OldPN] = NewPN; 2459 } 2460 2461 // Fill in the operands of new PHI nodes. 2462 for (auto *OldPN : OldPhiNodes) { 2463 PHINode *NewPN = NewPNodes[OldPN]; 2464 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) { 2465 Value *V = OldPN->getOperand(j); 2466 Value *NewV = nullptr; 2467 if (auto *C = dyn_cast<Constant>(V)) { 2468 NewV = ConstantExpr::getBitCast(C, DestTy); 2469 } else if (auto *LI = dyn_cast<LoadInst>(V)) { 2470 // Explicitly perform load combine to make sure no opposing transform 2471 // can remove the bitcast in the meantime and trigger an infinite loop. 2472 Builder.SetInsertPoint(LI); 2473 NewV = combineLoadToNewType(*LI, DestTy); 2474 // Remove the old load and its use in the old phi, which itself becomes 2475 // dead once the whole transform finishes. 2476 replaceInstUsesWith(*LI, UndefValue::get(LI->getType())); 2477 eraseInstFromFunction(*LI); 2478 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2479 NewV = BCI->getOperand(0); 2480 } else if (auto *PrevPN = dyn_cast<PHINode>(V)) { 2481 NewV = NewPNodes[PrevPN]; 2482 } 2483 assert(NewV); 2484 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j)); 2485 } 2486 } 2487 2488 // Traverse all accumulated PHI nodes and process its users, 2489 // which are Stores and BitcCasts. Without this processing 2490 // NewPHI nodes could be replicated and could lead to extra 2491 // moves generated after DeSSA. 2492 // If there is a store with type B, change it to type A. 2493 2494 2495 // Replace users of BitCast B->A with NewPHI. These will help 2496 // later to get rid off a closure formed by OldPHI nodes. 2497 Instruction *RetVal = nullptr; 2498 for (auto *OldPN : OldPhiNodes) { 2499 PHINode *NewPN = NewPNodes[OldPN]; 2500 for (auto It = OldPN->user_begin(), End = OldPN->user_end(); It != End; ) { 2501 User *V = *It; 2502 // We may remove this user, advance to avoid iterator invalidation. 2503 ++It; 2504 if (auto *SI = dyn_cast<StoreInst>(V)) { 2505 assert(SI->isSimple() && SI->getOperand(0) == OldPN); 2506 Builder.SetInsertPoint(SI); 2507 auto *NewBC = 2508 cast<BitCastInst>(Builder.CreateBitCast(NewPN, SrcTy)); 2509 SI->setOperand(0, NewBC); 2510 Worklist.push(SI); 2511 assert(hasStoreUsersOnly(*NewBC)); 2512 } 2513 else if (auto *BCI = dyn_cast<BitCastInst>(V)) { 2514 Type *TyB = BCI->getOperand(0)->getType(); 2515 Type *TyA = BCI->getType(); 2516 assert(TyA == DestTy && TyB == SrcTy); 2517 (void) TyA; 2518 (void) TyB; 2519 Instruction *I = replaceInstUsesWith(*BCI, NewPN); 2520 if (BCI == &CI) 2521 RetVal = I; 2522 } else if (auto *PHI = dyn_cast<PHINode>(V)) { 2523 assert(OldPhiNodes.count(PHI) > 0); 2524 (void) PHI; 2525 } else { 2526 llvm_unreachable("all uses should be handled"); 2527 } 2528 } 2529 } 2530 2531 return RetVal; 2532 } 2533 2534 Instruction *InstCombinerImpl::visitBitCast(BitCastInst &CI) { 2535 // If the operands are integer typed then apply the integer transforms, 2536 // otherwise just apply the common ones. 2537 Value *Src = CI.getOperand(0); 2538 Type *SrcTy = Src->getType(); 2539 Type *DestTy = CI.getType(); 2540 2541 // Get rid of casts from one type to the same type. These are useless and can 2542 // be replaced by the operand. 2543 if (DestTy == Src->getType()) 2544 return replaceInstUsesWith(CI, Src); 2545 2546 if (isa<PointerType>(SrcTy) && isa<PointerType>(DestTy)) { 2547 PointerType *SrcPTy = cast<PointerType>(SrcTy); 2548 PointerType *DstPTy = cast<PointerType>(DestTy); 2549 Type *DstElTy = DstPTy->getElementType(); 2550 Type *SrcElTy = SrcPTy->getElementType(); 2551 2552 // Casting pointers between the same type, but with different address spaces 2553 // is an addrspace cast rather than a bitcast. 2554 if ((DstElTy == SrcElTy) && 2555 (DstPTy->getAddressSpace() != SrcPTy->getAddressSpace())) 2556 return new AddrSpaceCastInst(Src, DestTy); 2557 2558 // If we are casting a alloca to a pointer to a type of the same 2559 // size, rewrite the allocation instruction to allocate the "right" type. 2560 // There is no need to modify malloc calls because it is their bitcast that 2561 // needs to be cleaned up. 2562 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src)) 2563 if (Instruction *V = PromoteCastOfAllocation(CI, *AI)) 2564 return V; 2565 2566 // When the type pointed to is not sized the cast cannot be 2567 // turned into a gep. 2568 Type *PointeeType = 2569 cast<PointerType>(Src->getType()->getScalarType())->getElementType(); 2570 if (!PointeeType->isSized()) 2571 return nullptr; 2572 2573 // If the source and destination are pointers, and this cast is equivalent 2574 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep. 2575 // This can enhance SROA and other transforms that want type-safe pointers. 2576 unsigned NumZeros = 0; 2577 while (SrcElTy && SrcElTy != DstElTy) { 2578 SrcElTy = GetElementPtrInst::getTypeAtIndex(SrcElTy, (uint64_t)0); 2579 ++NumZeros; 2580 } 2581 2582 // If we found a path from the src to dest, create the getelementptr now. 2583 if (SrcElTy == DstElTy) { 2584 SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0)); 2585 GetElementPtrInst *GEP = 2586 GetElementPtrInst::Create(SrcPTy->getElementType(), Src, Idxs); 2587 2588 // If the source pointer is dereferenceable, then assume it points to an 2589 // allocated object and apply "inbounds" to the GEP. 2590 bool CanBeNull; 2591 if (Src->getPointerDereferenceableBytes(DL, CanBeNull)) { 2592 // In a non-default address space (not 0), a null pointer can not be 2593 // assumed inbounds, so ignore that case (dereferenceable_or_null). 2594 // The reason is that 'null' is not treated differently in these address 2595 // spaces, and we consequently ignore the 'gep inbounds' special case 2596 // for 'null' which allows 'inbounds' on 'null' if the indices are 2597 // zeros. 2598 if (SrcPTy->getAddressSpace() == 0 || !CanBeNull) 2599 GEP->setIsInBounds(); 2600 } 2601 return GEP; 2602 } 2603 } 2604 2605 if (FixedVectorType *DestVTy = dyn_cast<FixedVectorType>(DestTy)) { 2606 // Beware: messing with this target-specific oddity may cause trouble. 2607 if (DestVTy->getNumElements() == 1 && SrcTy->isX86_MMXTy()) { 2608 Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType()); 2609 return InsertElementInst::Create(UndefValue::get(DestTy), Elem, 2610 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2611 } 2612 2613 if (isa<IntegerType>(SrcTy)) { 2614 // If this is a cast from an integer to vector, check to see if the input 2615 // is a trunc or zext of a bitcast from vector. If so, we can replace all 2616 // the casts with a shuffle and (potentially) a bitcast. 2617 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) { 2618 CastInst *SrcCast = cast<CastInst>(Src); 2619 if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0))) 2620 if (isa<VectorType>(BCIn->getOperand(0)->getType())) 2621 if (Instruction *I = optimizeVectorResizeWithIntegerBitCasts( 2622 BCIn->getOperand(0), cast<VectorType>(DestTy), *this)) 2623 return I; 2624 } 2625 2626 // If the input is an 'or' instruction, we may be doing shifts and ors to 2627 // assemble the elements of the vector manually. Try to rip the code out 2628 // and replace it with insertelements. 2629 if (Value *V = optimizeIntegerToVectorInsertions(CI, *this)) 2630 return replaceInstUsesWith(CI, V); 2631 } 2632 } 2633 2634 if (FixedVectorType *SrcVTy = dyn_cast<FixedVectorType>(SrcTy)) { 2635 if (SrcVTy->getNumElements() == 1) { 2636 // If our destination is not a vector, then make this a straight 2637 // scalar-scalar cast. 2638 if (!DestTy->isVectorTy()) { 2639 Value *Elem = 2640 Builder.CreateExtractElement(Src, 2641 Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); 2642 return CastInst::Create(Instruction::BitCast, Elem, DestTy); 2643 } 2644 2645 // Otherwise, see if our source is an insert. If so, then use the scalar 2646 // component directly: 2647 // bitcast (inselt <1 x elt> V, X, 0) to <n x m> --> bitcast X to <n x m> 2648 if (auto *InsElt = dyn_cast<InsertElementInst>(Src)) 2649 return new BitCastInst(InsElt->getOperand(1), DestTy); 2650 } 2651 } 2652 2653 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(Src)) { 2654 // Okay, we have (bitcast (shuffle ..)). Check to see if this is 2655 // a bitcast to a vector with the same # elts. 2656 Value *ShufOp0 = Shuf->getOperand(0); 2657 Value *ShufOp1 = Shuf->getOperand(1); 2658 unsigned NumShufElts = 2659 cast<FixedVectorType>(Shuf->getType())->getNumElements(); 2660 unsigned NumSrcVecElts = 2661 cast<FixedVectorType>(ShufOp0->getType())->getNumElements(); 2662 if (Shuf->hasOneUse() && DestTy->isVectorTy() && 2663 cast<FixedVectorType>(DestTy)->getNumElements() == NumShufElts && 2664 NumShufElts == NumSrcVecElts) { 2665 BitCastInst *Tmp; 2666 // If either of the operands is a cast from CI.getType(), then 2667 // evaluating the shuffle in the casted destination's type will allow 2668 // us to eliminate at least one cast. 2669 if (((Tmp = dyn_cast<BitCastInst>(ShufOp0)) && 2670 Tmp->getOperand(0)->getType() == DestTy) || 2671 ((Tmp = dyn_cast<BitCastInst>(ShufOp1)) && 2672 Tmp->getOperand(0)->getType() == DestTy)) { 2673 Value *LHS = Builder.CreateBitCast(ShufOp0, DestTy); 2674 Value *RHS = Builder.CreateBitCast(ShufOp1, DestTy); 2675 // Return a new shuffle vector. Use the same element ID's, as we 2676 // know the vector types match #elts. 2677 return new ShuffleVectorInst(LHS, RHS, Shuf->getShuffleMask()); 2678 } 2679 } 2680 2681 // A bitcasted-to-scalar and byte-reversing shuffle is better recognized as 2682 // a byte-swap: 2683 // bitcast <N x i8> (shuf X, undef, <N, N-1,...0>) --> bswap (bitcast X) 2684 // TODO: We should match the related pattern for bitreverse. 2685 if (DestTy->isIntegerTy() && 2686 DL.isLegalInteger(DestTy->getScalarSizeInBits()) && 2687 SrcTy->getScalarSizeInBits() == 8 && NumShufElts % 2 == 0 && 2688 Shuf->hasOneUse() && Shuf->isReverse()) { 2689 assert(ShufOp0->getType() == SrcTy && "Unexpected shuffle mask"); 2690 assert(isa<UndefValue>(ShufOp1) && "Unexpected shuffle op"); 2691 Function *Bswap = 2692 Intrinsic::getDeclaration(CI.getModule(), Intrinsic::bswap, DestTy); 2693 Value *ScalarX = Builder.CreateBitCast(ShufOp0, DestTy); 2694 return IntrinsicInst::Create(Bswap, { ScalarX }); 2695 } 2696 } 2697 2698 // Handle the A->B->A cast, and there is an intervening PHI node. 2699 if (PHINode *PN = dyn_cast<PHINode>(Src)) 2700 if (Instruction *I = optimizeBitCastFromPhi(CI, PN)) 2701 return I; 2702 2703 if (Instruction *I = canonicalizeBitCastExtElt(CI, *this)) 2704 return I; 2705 2706 if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder)) 2707 return I; 2708 2709 if (Instruction *I = foldBitCastSelect(CI, Builder)) 2710 return I; 2711 2712 if (SrcTy->isPointerTy()) 2713 return commonPointerCastTransforms(CI); 2714 return commonCastTransforms(CI); 2715 } 2716 2717 Instruction *InstCombinerImpl::visitAddrSpaceCast(AddrSpaceCastInst &CI) { 2718 // If the destination pointer element type is not the same as the source's 2719 // first do a bitcast to the destination type, and then the addrspacecast. 2720 // This allows the cast to be exposed to other transforms. 2721 Value *Src = CI.getOperand(0); 2722 PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType()); 2723 PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType()); 2724 2725 Type *DestElemTy = DestTy->getElementType(); 2726 if (SrcTy->getElementType() != DestElemTy) { 2727 Type *MidTy = PointerType::get(DestElemTy, SrcTy->getAddressSpace()); 2728 if (VectorType *VT = dyn_cast<VectorType>(CI.getType())) { 2729 // Handle vectors of pointers. 2730 // FIXME: what should happen for scalable vectors? 2731 MidTy = FixedVectorType::get(MidTy, 2732 cast<FixedVectorType>(VT)->getNumElements()); 2733 } 2734 2735 Value *NewBitCast = Builder.CreateBitCast(Src, MidTy); 2736 return new AddrSpaceCastInst(NewBitCast, CI.getType()); 2737 } 2738 2739 return commonPointerCastTransforms(CI); 2740 } 2741