1 //===- InstCombineCasts.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for cast operations.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/SetVector.h"
15 #include "llvm/Analysis/ConstantFolding.h"
16 #include "llvm/IR/DataLayout.h"
17 #include "llvm/IR/PatternMatch.h"
18 #include "llvm/Support/KnownBits.h"
19 #include "llvm/Transforms/InstCombine/InstCombiner.h"
20 using namespace llvm;
21 using namespace PatternMatch;
22
23 #define DEBUG_TYPE "instcombine"
24
25 /// Analyze 'Val', seeing if it is a simple linear expression.
26 /// If so, decompose it, returning some value X, such that Val is
27 /// X*Scale+Offset.
28 ///
decomposeSimpleLinearExpr(Value * Val,unsigned & Scale,uint64_t & Offset)29 static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
30 uint64_t &Offset) {
31 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
32 Offset = CI->getZExtValue();
33 Scale = 0;
34 return ConstantInt::get(Val->getType(), 0);
35 }
36
37 if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
38 // Cannot look past anything that might overflow.
39 // We specifically require nuw because we store the Scale in an unsigned
40 // and perform an unsigned divide on it.
41 OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val);
42 if (OBI && !OBI->hasNoUnsignedWrap()) {
43 Scale = 1;
44 Offset = 0;
45 return Val;
46 }
47
48 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
49 if (I->getOpcode() == Instruction::Shl) {
50 // This is a value scaled by '1 << the shift amt'.
51 Scale = UINT64_C(1) << RHS->getZExtValue();
52 Offset = 0;
53 return I->getOperand(0);
54 }
55
56 if (I->getOpcode() == Instruction::Mul) {
57 // This value is scaled by 'RHS'.
58 Scale = RHS->getZExtValue();
59 Offset = 0;
60 return I->getOperand(0);
61 }
62
63 if (I->getOpcode() == Instruction::Add) {
64 // We have X+C. Check to see if we really have (X*C2)+C1,
65 // where C1 is divisible by C2.
66 unsigned SubScale;
67 Value *SubVal =
68 decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
69 Offset += RHS->getZExtValue();
70 Scale = SubScale;
71 return SubVal;
72 }
73 }
74 }
75
76 // Otherwise, we can't look past this.
77 Scale = 1;
78 Offset = 0;
79 return Val;
80 }
81
82 /// If we find a cast of an allocation instruction, try to eliminate the cast by
83 /// moving the type information into the alloc.
PromoteCastOfAllocation(BitCastInst & CI,AllocaInst & AI)84 Instruction *InstCombinerImpl::PromoteCastOfAllocation(BitCastInst &CI,
85 AllocaInst &AI) {
86 PointerType *PTy = cast<PointerType>(CI.getType());
87 // Opaque pointers don't have an element type we could replace with.
88 if (PTy->isOpaque())
89 return nullptr;
90
91 IRBuilderBase::InsertPointGuard Guard(Builder);
92 Builder.SetInsertPoint(&AI);
93
94 // Get the type really allocated and the type casted to.
95 Type *AllocElTy = AI.getAllocatedType();
96 Type *CastElTy = PTy->getNonOpaquePointerElementType();
97 if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr;
98
99 // This optimisation does not work for cases where the cast type
100 // is scalable and the allocated type is not. This because we need to
101 // know how many times the casted type fits into the allocated type.
102 // For the opposite case where the allocated type is scalable and the
103 // cast type is not this leads to poor code quality due to the
104 // introduction of 'vscale' into the calculations. It seems better to
105 // bail out for this case too until we've done a proper cost-benefit
106 // analysis.
107 bool AllocIsScalable = isa<ScalableVectorType>(AllocElTy);
108 bool CastIsScalable = isa<ScalableVectorType>(CastElTy);
109 if (AllocIsScalable != CastIsScalable) return nullptr;
110
111 Align AllocElTyAlign = DL.getABITypeAlign(AllocElTy);
112 Align CastElTyAlign = DL.getABITypeAlign(CastElTy);
113 if (CastElTyAlign < AllocElTyAlign) return nullptr;
114
115 // If the allocation has multiple uses, only promote it if we are strictly
116 // increasing the alignment of the resultant allocation. If we keep it the
117 // same, we open the door to infinite loops of various kinds.
118 if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr;
119
120 // The alloc and cast types should be either both fixed or both scalable.
121 uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy).getKnownMinSize();
122 uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy).getKnownMinSize();
123 if (CastElTySize == 0 || AllocElTySize == 0) return nullptr;
124
125 // If the allocation has multiple uses, only promote it if we're not
126 // shrinking the amount of memory being allocated.
127 uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy).getKnownMinSize();
128 uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy).getKnownMinSize();
129 if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr;
130
131 // See if we can satisfy the modulus by pulling a scale out of the array
132 // size argument.
133 unsigned ArraySizeScale;
134 uint64_t ArrayOffset;
135 Value *NumElements = // See if the array size is a decomposable linear expr.
136 decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
137
138 // If we can now satisfy the modulus, by using a non-1 scale, we really can
139 // do the xform.
140 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
141 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return nullptr;
142
143 // We don't currently support arrays of scalable types.
144 assert(!AllocIsScalable || (ArrayOffset == 1 && ArraySizeScale == 0));
145
146 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
147 Value *Amt = nullptr;
148 if (Scale == 1) {
149 Amt = NumElements;
150 } else {
151 Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale);
152 // Insert before the alloca, not before the cast.
153 Amt = Builder.CreateMul(Amt, NumElements);
154 }
155
156 if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
157 Value *Off = ConstantInt::get(AI.getArraySize()->getType(),
158 Offset, true);
159 Amt = Builder.CreateAdd(Amt, Off);
160 }
161
162 AllocaInst *New = Builder.CreateAlloca(CastElTy, AI.getAddressSpace(), Amt);
163 New->setAlignment(AI.getAlign());
164 New->takeName(&AI);
165 New->setUsedWithInAlloca(AI.isUsedWithInAlloca());
166
167 // If the allocation has multiple real uses, insert a cast and change all
168 // things that used it to use the new cast. This will also hack on CI, but it
169 // will die soon.
170 if (!AI.hasOneUse()) {
171 // New is the allocation instruction, pointer typed. AI is the original
172 // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
173 Value *NewCast = Builder.CreateBitCast(New, AI.getType(), "tmpcast");
174 replaceInstUsesWith(AI, NewCast);
175 eraseInstFromFunction(AI);
176 }
177 return replaceInstUsesWith(CI, New);
178 }
179
180 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns
181 /// true for, actually insert the code to evaluate the expression.
EvaluateInDifferentType(Value * V,Type * Ty,bool isSigned)182 Value *InstCombinerImpl::EvaluateInDifferentType(Value *V, Type *Ty,
183 bool isSigned) {
184 if (Constant *C = dyn_cast<Constant>(V)) {
185 C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
186 // If we got a constantexpr back, try to simplify it with DL info.
187 return ConstantFoldConstant(C, DL, &TLI);
188 }
189
190 // Otherwise, it must be an instruction.
191 Instruction *I = cast<Instruction>(V);
192 Instruction *Res = nullptr;
193 unsigned Opc = I->getOpcode();
194 switch (Opc) {
195 case Instruction::Add:
196 case Instruction::Sub:
197 case Instruction::Mul:
198 case Instruction::And:
199 case Instruction::Or:
200 case Instruction::Xor:
201 case Instruction::AShr:
202 case Instruction::LShr:
203 case Instruction::Shl:
204 case Instruction::UDiv:
205 case Instruction::URem: {
206 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
207 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
208 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
209 break;
210 }
211 case Instruction::Trunc:
212 case Instruction::ZExt:
213 case Instruction::SExt:
214 // If the source type of the cast is the type we're trying for then we can
215 // just return the source. There's no need to insert it because it is not
216 // new.
217 if (I->getOperand(0)->getType() == Ty)
218 return I->getOperand(0);
219
220 // Otherwise, must be the same type of cast, so just reinsert a new one.
221 // This also handles the case of zext(trunc(x)) -> zext(x).
222 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty,
223 Opc == Instruction::SExt);
224 break;
225 case Instruction::Select: {
226 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
227 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned);
228 Res = SelectInst::Create(I->getOperand(0), True, False);
229 break;
230 }
231 case Instruction::PHI: {
232 PHINode *OPN = cast<PHINode>(I);
233 PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues());
234 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) {
235 Value *V =
236 EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
237 NPN->addIncoming(V, OPN->getIncomingBlock(i));
238 }
239 Res = NPN;
240 break;
241 }
242 default:
243 // TODO: Can handle more cases here.
244 llvm_unreachable("Unreachable!");
245 }
246
247 Res->takeName(I);
248 return InsertNewInstWith(Res, *I);
249 }
250
251 Instruction::CastOps
isEliminableCastPair(const CastInst * CI1,const CastInst * CI2)252 InstCombinerImpl::isEliminableCastPair(const CastInst *CI1,
253 const CastInst *CI2) {
254 Type *SrcTy = CI1->getSrcTy();
255 Type *MidTy = CI1->getDestTy();
256 Type *DstTy = CI2->getDestTy();
257
258 Instruction::CastOps firstOp = CI1->getOpcode();
259 Instruction::CastOps secondOp = CI2->getOpcode();
260 Type *SrcIntPtrTy =
261 SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr;
262 Type *MidIntPtrTy =
263 MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr;
264 Type *DstIntPtrTy =
265 DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr;
266 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
267 DstTy, SrcIntPtrTy, MidIntPtrTy,
268 DstIntPtrTy);
269
270 // We don't want to form an inttoptr or ptrtoint that converts to an integer
271 // type that differs from the pointer size.
272 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) ||
273 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy))
274 Res = 0;
275
276 return Instruction::CastOps(Res);
277 }
278
279 /// Implement the transforms common to all CastInst visitors.
commonCastTransforms(CastInst & CI)280 Instruction *InstCombinerImpl::commonCastTransforms(CastInst &CI) {
281 Value *Src = CI.getOperand(0);
282 Type *Ty = CI.getType();
283
284 // Try to eliminate a cast of a cast.
285 if (auto *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
286 if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) {
287 // The first cast (CSrc) is eliminable so we need to fix up or replace
288 // the second cast (CI). CSrc will then have a good chance of being dead.
289 auto *Res = CastInst::Create(NewOpc, CSrc->getOperand(0), Ty);
290 // Point debug users of the dying cast to the new one.
291 if (CSrc->hasOneUse())
292 replaceAllDbgUsesWith(*CSrc, *Res, CI, DT);
293 return Res;
294 }
295 }
296
297 if (auto *Sel = dyn_cast<SelectInst>(Src)) {
298 // We are casting a select. Try to fold the cast into the select if the
299 // select does not have a compare instruction with matching operand types
300 // or the select is likely better done in a narrow type.
301 // Creating a select with operands that are different sizes than its
302 // condition may inhibit other folds and lead to worse codegen.
303 auto *Cmp = dyn_cast<CmpInst>(Sel->getCondition());
304 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType() ||
305 (CI.getOpcode() == Instruction::Trunc &&
306 shouldChangeType(CI.getSrcTy(), CI.getType()))) {
307 if (Instruction *NV = FoldOpIntoSelect(CI, Sel)) {
308 replaceAllDbgUsesWith(*Sel, *NV, CI, DT);
309 return NV;
310 }
311 }
312 }
313
314 // If we are casting a PHI, then fold the cast into the PHI.
315 if (auto *PN = dyn_cast<PHINode>(Src)) {
316 // Don't do this if it would create a PHI node with an illegal type from a
317 // legal type.
318 if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() ||
319 shouldChangeType(CI.getSrcTy(), CI.getType()))
320 if (Instruction *NV = foldOpIntoPhi(CI, PN))
321 return NV;
322 }
323
324 // Canonicalize a unary shuffle after the cast if neither operation changes
325 // the size or element size of the input vector.
326 // TODO: We could allow size-changing ops if that doesn't harm codegen.
327 // cast (shuffle X, Mask) --> shuffle (cast X), Mask
328 Value *X;
329 ArrayRef<int> Mask;
330 if (match(Src, m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(Mask))))) {
331 // TODO: Allow scalable vectors?
332 auto *SrcTy = dyn_cast<FixedVectorType>(X->getType());
333 auto *DestTy = dyn_cast<FixedVectorType>(Ty);
334 if (SrcTy && DestTy &&
335 SrcTy->getNumElements() == DestTy->getNumElements() &&
336 SrcTy->getPrimitiveSizeInBits() == DestTy->getPrimitiveSizeInBits()) {
337 Value *CastX = Builder.CreateCast(CI.getOpcode(), X, DestTy);
338 return new ShuffleVectorInst(CastX, Mask);
339 }
340 }
341
342 return nullptr;
343 }
344
345 /// Constants and extensions/truncates from the destination type are always
346 /// free to be evaluated in that type. This is a helper for canEvaluate*.
canAlwaysEvaluateInType(Value * V,Type * Ty)347 static bool canAlwaysEvaluateInType(Value *V, Type *Ty) {
348 if (isa<Constant>(V))
349 return true;
350 Value *X;
351 if ((match(V, m_ZExtOrSExt(m_Value(X))) || match(V, m_Trunc(m_Value(X)))) &&
352 X->getType() == Ty)
353 return true;
354
355 return false;
356 }
357
358 /// Filter out values that we can not evaluate in the destination type for free.
359 /// This is a helper for canEvaluate*.
canNotEvaluateInType(Value * V,Type * Ty)360 static bool canNotEvaluateInType(Value *V, Type *Ty) {
361 assert(!isa<Constant>(V) && "Constant should already be handled.");
362 if (!isa<Instruction>(V))
363 return true;
364 // We don't extend or shrink something that has multiple uses -- doing so
365 // would require duplicating the instruction which isn't profitable.
366 if (!V->hasOneUse())
367 return true;
368
369 return false;
370 }
371
372 /// Return true if we can evaluate the specified expression tree as type Ty
373 /// instead of its larger type, and arrive with the same value.
374 /// This is used by code that tries to eliminate truncates.
375 ///
376 /// Ty will always be a type smaller than V. We should return true if trunc(V)
377 /// can be computed by computing V in the smaller type. If V is an instruction,
378 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only
379 /// makes sense if x and y can be efficiently truncated.
380 ///
381 /// This function works on both vectors and scalars.
382 ///
canEvaluateTruncated(Value * V,Type * Ty,InstCombinerImpl & IC,Instruction * CxtI)383 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC,
384 Instruction *CxtI) {
385 if (canAlwaysEvaluateInType(V, Ty))
386 return true;
387 if (canNotEvaluateInType(V, Ty))
388 return false;
389
390 auto *I = cast<Instruction>(V);
391 Type *OrigTy = V->getType();
392 switch (I->getOpcode()) {
393 case Instruction::Add:
394 case Instruction::Sub:
395 case Instruction::Mul:
396 case Instruction::And:
397 case Instruction::Or:
398 case Instruction::Xor:
399 // These operators can all arbitrarily be extended or truncated.
400 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
401 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
402
403 case Instruction::UDiv:
404 case Instruction::URem: {
405 // UDiv and URem can be truncated if all the truncated bits are zero.
406 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
407 uint32_t BitWidth = Ty->getScalarSizeInBits();
408 assert(BitWidth < OrigBitWidth && "Unexpected bitwidths!");
409 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
410 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) &&
411 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) {
412 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
413 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
414 }
415 break;
416 }
417 case Instruction::Shl: {
418 // If we are truncating the result of this SHL, and if it's a shift of an
419 // inrange amount, we can always perform a SHL in a smaller type.
420 uint32_t BitWidth = Ty->getScalarSizeInBits();
421 KnownBits AmtKnownBits =
422 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout());
423 if (AmtKnownBits.getMaxValue().ult(BitWidth))
424 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
425 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
426 break;
427 }
428 case Instruction::LShr: {
429 // If this is a truncate of a logical shr, we can truncate it to a smaller
430 // lshr iff we know that the bits we would otherwise be shifting in are
431 // already zeros.
432 // TODO: It is enough to check that the bits we would be shifting in are
433 // zero - use AmtKnownBits.getMaxValue().
434 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
435 uint32_t BitWidth = Ty->getScalarSizeInBits();
436 KnownBits AmtKnownBits =
437 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout());
438 APInt ShiftedBits = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
439 if (AmtKnownBits.getMaxValue().ult(BitWidth) &&
440 IC.MaskedValueIsZero(I->getOperand(0), ShiftedBits, 0, CxtI)) {
441 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
442 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
443 }
444 break;
445 }
446 case Instruction::AShr: {
447 // If this is a truncate of an arithmetic shr, we can truncate it to a
448 // smaller ashr iff we know that all the bits from the sign bit of the
449 // original type and the sign bit of the truncate type are similar.
450 // TODO: It is enough to check that the bits we would be shifting in are
451 // similar to sign bit of the truncate type.
452 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
453 uint32_t BitWidth = Ty->getScalarSizeInBits();
454 KnownBits AmtKnownBits =
455 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout());
456 unsigned ShiftedBits = OrigBitWidth - BitWidth;
457 if (AmtKnownBits.getMaxValue().ult(BitWidth) &&
458 ShiftedBits < IC.ComputeNumSignBits(I->getOperand(0), 0, CxtI))
459 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
460 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
461 break;
462 }
463 case Instruction::Trunc:
464 // trunc(trunc(x)) -> trunc(x)
465 return true;
466 case Instruction::ZExt:
467 case Instruction::SExt:
468 // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest
469 // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest
470 return true;
471 case Instruction::Select: {
472 SelectInst *SI = cast<SelectInst>(I);
473 return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) &&
474 canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI);
475 }
476 case Instruction::PHI: {
477 // We can change a phi if we can change all operands. Note that we never
478 // get into trouble with cyclic PHIs here because we only consider
479 // instructions with a single use.
480 PHINode *PN = cast<PHINode>(I);
481 for (Value *IncValue : PN->incoming_values())
482 if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI))
483 return false;
484 return true;
485 }
486 default:
487 // TODO: Can handle more cases here.
488 break;
489 }
490
491 return false;
492 }
493
494 /// Given a vector that is bitcast to an integer, optionally logically
495 /// right-shifted, and truncated, convert it to an extractelement.
496 /// Example (big endian):
497 /// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32
498 /// --->
499 /// extractelement <4 x i32> %X, 1
foldVecTruncToExtElt(TruncInst & Trunc,InstCombinerImpl & IC)500 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc,
501 InstCombinerImpl &IC) {
502 Value *TruncOp = Trunc.getOperand(0);
503 Type *DestType = Trunc.getType();
504 if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType))
505 return nullptr;
506
507 Value *VecInput = nullptr;
508 ConstantInt *ShiftVal = nullptr;
509 if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)),
510 m_LShr(m_BitCast(m_Value(VecInput)),
511 m_ConstantInt(ShiftVal)))) ||
512 !isa<VectorType>(VecInput->getType()))
513 return nullptr;
514
515 VectorType *VecType = cast<VectorType>(VecInput->getType());
516 unsigned VecWidth = VecType->getPrimitiveSizeInBits();
517 unsigned DestWidth = DestType->getPrimitiveSizeInBits();
518 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0;
519
520 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0))
521 return nullptr;
522
523 // If the element type of the vector doesn't match the result type,
524 // bitcast it to a vector type that we can extract from.
525 unsigned NumVecElts = VecWidth / DestWidth;
526 if (VecType->getElementType() != DestType) {
527 VecType = FixedVectorType::get(DestType, NumVecElts);
528 VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc");
529 }
530
531 unsigned Elt = ShiftAmount / DestWidth;
532 if (IC.getDataLayout().isBigEndian())
533 Elt = NumVecElts - 1 - Elt;
534
535 return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt));
536 }
537
538 /// Funnel/Rotate left/right may occur in a wider type than necessary because of
539 /// type promotion rules. Try to narrow the inputs and convert to funnel shift.
narrowFunnelShift(TruncInst & Trunc)540 Instruction *InstCombinerImpl::narrowFunnelShift(TruncInst &Trunc) {
541 assert((isa<VectorType>(Trunc.getSrcTy()) ||
542 shouldChangeType(Trunc.getSrcTy(), Trunc.getType())) &&
543 "Don't narrow to an illegal scalar type");
544
545 // Bail out on strange types. It is possible to handle some of these patterns
546 // even with non-power-of-2 sizes, but it is not a likely scenario.
547 Type *DestTy = Trunc.getType();
548 unsigned NarrowWidth = DestTy->getScalarSizeInBits();
549 unsigned WideWidth = Trunc.getSrcTy()->getScalarSizeInBits();
550 if (!isPowerOf2_32(NarrowWidth))
551 return nullptr;
552
553 // First, find an or'd pair of opposite shifts:
554 // trunc (or (lshr ShVal0, ShAmt0), (shl ShVal1, ShAmt1))
555 BinaryOperator *Or0, *Or1;
556 if (!match(Trunc.getOperand(0), m_OneUse(m_Or(m_BinOp(Or0), m_BinOp(Or1)))))
557 return nullptr;
558
559 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1;
560 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal0), m_Value(ShAmt0)))) ||
561 !match(Or1, m_OneUse(m_LogicalShift(m_Value(ShVal1), m_Value(ShAmt1)))) ||
562 Or0->getOpcode() == Or1->getOpcode())
563 return nullptr;
564
565 // Canonicalize to or(shl(ShVal0, ShAmt0), lshr(ShVal1, ShAmt1)).
566 if (Or0->getOpcode() == BinaryOperator::LShr) {
567 std::swap(Or0, Or1);
568 std::swap(ShVal0, ShVal1);
569 std::swap(ShAmt0, ShAmt1);
570 }
571 assert(Or0->getOpcode() == BinaryOperator::Shl &&
572 Or1->getOpcode() == BinaryOperator::LShr &&
573 "Illegal or(shift,shift) pair");
574
575 // Match the shift amount operands for a funnel/rotate pattern. This always
576 // matches a subtraction on the R operand.
577 auto matchShiftAmount = [&](Value *L, Value *R, unsigned Width) -> Value * {
578 // The shift amounts may add up to the narrow bit width:
579 // (shl ShVal0, L) | (lshr ShVal1, Width - L)
580 // If this is a funnel shift (different operands are shifted), then the
581 // shift amount can not over-shift (create poison) in the narrow type.
582 unsigned MaxShiftAmountWidth = Log2_32(NarrowWidth);
583 APInt HiBitMask = ~APInt::getLowBitsSet(WideWidth, MaxShiftAmountWidth);
584 if (ShVal0 == ShVal1 || MaskedValueIsZero(L, HiBitMask))
585 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L)))))
586 return L;
587
588 // The following patterns currently only work for rotation patterns.
589 // TODO: Add more general funnel-shift compatible patterns.
590 if (ShVal0 != ShVal1)
591 return nullptr;
592
593 // The shift amount may be masked with negation:
594 // (shl ShVal0, (X & (Width - 1))) | (lshr ShVal1, ((-X) & (Width - 1)))
595 Value *X;
596 unsigned Mask = Width - 1;
597 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) &&
598 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))
599 return X;
600
601 // Same as above, but the shift amount may be extended after masking:
602 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) &&
603 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))))
604 return X;
605
606 return nullptr;
607 };
608
609 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth);
610 bool IsFshl = true; // Sub on LSHR.
611 if (!ShAmt) {
612 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth);
613 IsFshl = false; // Sub on SHL.
614 }
615 if (!ShAmt)
616 return nullptr;
617
618 // The right-shifted value must have high zeros in the wide type (for example
619 // from 'zext', 'and' or 'shift'). High bits of the left-shifted value are
620 // truncated, so those do not matter.
621 APInt HiBitMask = APInt::getHighBitsSet(WideWidth, WideWidth - NarrowWidth);
622 if (!MaskedValueIsZero(ShVal1, HiBitMask, 0, &Trunc))
623 return nullptr;
624
625 // We have an unnecessarily wide rotate!
626 // trunc (or (shl ShVal0, ShAmt), (lshr ShVal1, BitWidth - ShAmt))
627 // Narrow the inputs and convert to funnel shift intrinsic:
628 // llvm.fshl.i8(trunc(ShVal), trunc(ShVal), trunc(ShAmt))
629 Value *NarrowShAmt = Builder.CreateTrunc(ShAmt, DestTy);
630 Value *X, *Y;
631 X = Y = Builder.CreateTrunc(ShVal0, DestTy);
632 if (ShVal0 != ShVal1)
633 Y = Builder.CreateTrunc(ShVal1, DestTy);
634 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
635 Function *F = Intrinsic::getDeclaration(Trunc.getModule(), IID, DestTy);
636 return CallInst::Create(F, {X, Y, NarrowShAmt});
637 }
638
639 /// Try to narrow the width of math or bitwise logic instructions by pulling a
640 /// truncate ahead of binary operators.
narrowBinOp(TruncInst & Trunc)641 Instruction *InstCombinerImpl::narrowBinOp(TruncInst &Trunc) {
642 Type *SrcTy = Trunc.getSrcTy();
643 Type *DestTy = Trunc.getType();
644 unsigned SrcWidth = SrcTy->getScalarSizeInBits();
645 unsigned DestWidth = DestTy->getScalarSizeInBits();
646
647 if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy))
648 return nullptr;
649
650 BinaryOperator *BinOp;
651 if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(BinOp))))
652 return nullptr;
653
654 Value *BinOp0 = BinOp->getOperand(0);
655 Value *BinOp1 = BinOp->getOperand(1);
656 switch (BinOp->getOpcode()) {
657 case Instruction::And:
658 case Instruction::Or:
659 case Instruction::Xor:
660 case Instruction::Add:
661 case Instruction::Sub:
662 case Instruction::Mul: {
663 Constant *C;
664 if (match(BinOp0, m_Constant(C))) {
665 // trunc (binop C, X) --> binop (trunc C', X)
666 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy);
667 Value *TruncX = Builder.CreateTrunc(BinOp1, DestTy);
668 return BinaryOperator::Create(BinOp->getOpcode(), NarrowC, TruncX);
669 }
670 if (match(BinOp1, m_Constant(C))) {
671 // trunc (binop X, C) --> binop (trunc X, C')
672 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy);
673 Value *TruncX = Builder.CreateTrunc(BinOp0, DestTy);
674 return BinaryOperator::Create(BinOp->getOpcode(), TruncX, NarrowC);
675 }
676 Value *X;
677 if (match(BinOp0, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) {
678 // trunc (binop (ext X), Y) --> binop X, (trunc Y)
679 Value *NarrowOp1 = Builder.CreateTrunc(BinOp1, DestTy);
680 return BinaryOperator::Create(BinOp->getOpcode(), X, NarrowOp1);
681 }
682 if (match(BinOp1, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) {
683 // trunc (binop Y, (ext X)) --> binop (trunc Y), X
684 Value *NarrowOp0 = Builder.CreateTrunc(BinOp0, DestTy);
685 return BinaryOperator::Create(BinOp->getOpcode(), NarrowOp0, X);
686 }
687 break;
688 }
689 case Instruction::LShr:
690 case Instruction::AShr: {
691 // trunc (*shr (trunc A), C) --> trunc(*shr A, C)
692 Value *A;
693 Constant *C;
694 if (match(BinOp0, m_Trunc(m_Value(A))) && match(BinOp1, m_Constant(C))) {
695 unsigned MaxShiftAmt = SrcWidth - DestWidth;
696 // If the shift is small enough, all zero/sign bits created by the shift
697 // are removed by the trunc.
698 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE,
699 APInt(SrcWidth, MaxShiftAmt)))) {
700 auto *OldShift = cast<Instruction>(Trunc.getOperand(0));
701 bool IsExact = OldShift->isExact();
702 auto *ShAmt = ConstantExpr::getIntegerCast(C, A->getType(), true);
703 ShAmt = Constant::mergeUndefsWith(ShAmt, C);
704 Value *Shift =
705 OldShift->getOpcode() == Instruction::AShr
706 ? Builder.CreateAShr(A, ShAmt, OldShift->getName(), IsExact)
707 : Builder.CreateLShr(A, ShAmt, OldShift->getName(), IsExact);
708 return CastInst::CreateTruncOrBitCast(Shift, DestTy);
709 }
710 }
711 break;
712 }
713 default: break;
714 }
715
716 if (Instruction *NarrowOr = narrowFunnelShift(Trunc))
717 return NarrowOr;
718
719 return nullptr;
720 }
721
722 /// Try to narrow the width of a splat shuffle. This could be generalized to any
723 /// shuffle with a constant operand, but we limit the transform to avoid
724 /// creating a shuffle type that targets may not be able to lower effectively.
shrinkSplatShuffle(TruncInst & Trunc,InstCombiner::BuilderTy & Builder)725 static Instruction *shrinkSplatShuffle(TruncInst &Trunc,
726 InstCombiner::BuilderTy &Builder) {
727 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0));
728 if (Shuf && Shuf->hasOneUse() && match(Shuf->getOperand(1), m_Undef()) &&
729 is_splat(Shuf->getShuffleMask()) &&
730 Shuf->getType() == Shuf->getOperand(0)->getType()) {
731 // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Poison, SplatMask
732 // trunc (shuf X, Poison, SplatMask) --> shuf (trunc X), Poison, SplatMask
733 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType());
734 return new ShuffleVectorInst(NarrowOp, Shuf->getShuffleMask());
735 }
736
737 return nullptr;
738 }
739
740 /// Try to narrow the width of an insert element. This could be generalized for
741 /// any vector constant, but we limit the transform to insertion into undef to
742 /// avoid potential backend problems from unsupported insertion widths. This
743 /// could also be extended to handle the case of inserting a scalar constant
744 /// into a vector variable.
shrinkInsertElt(CastInst & Trunc,InstCombiner::BuilderTy & Builder)745 static Instruction *shrinkInsertElt(CastInst &Trunc,
746 InstCombiner::BuilderTy &Builder) {
747 Instruction::CastOps Opcode = Trunc.getOpcode();
748 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) &&
749 "Unexpected instruction for shrinking");
750
751 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0));
752 if (!InsElt || !InsElt->hasOneUse())
753 return nullptr;
754
755 Type *DestTy = Trunc.getType();
756 Type *DestScalarTy = DestTy->getScalarType();
757 Value *VecOp = InsElt->getOperand(0);
758 Value *ScalarOp = InsElt->getOperand(1);
759 Value *Index = InsElt->getOperand(2);
760
761 if (match(VecOp, m_Undef())) {
762 // trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index
763 // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index
764 UndefValue *NarrowUndef = UndefValue::get(DestTy);
765 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy);
766 return InsertElementInst::Create(NarrowUndef, NarrowOp, Index);
767 }
768
769 return nullptr;
770 }
771
visitTrunc(TruncInst & Trunc)772 Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) {
773 if (Instruction *Result = commonCastTransforms(Trunc))
774 return Result;
775
776 Value *Src = Trunc.getOperand(0);
777 Type *DestTy = Trunc.getType(), *SrcTy = Src->getType();
778 unsigned DestWidth = DestTy->getScalarSizeInBits();
779 unsigned SrcWidth = SrcTy->getScalarSizeInBits();
780
781 // Attempt to truncate the entire input expression tree to the destination
782 // type. Only do this if the dest type is a simple type, don't convert the
783 // expression tree to something weird like i93 unless the source is also
784 // strange.
785 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
786 canEvaluateTruncated(Src, DestTy, *this, &Trunc)) {
787
788 // If this cast is a truncate, evaluting in a different type always
789 // eliminates the cast, so it is always a win.
790 LLVM_DEBUG(
791 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
792 " to avoid cast: "
793 << Trunc << '\n');
794 Value *Res = EvaluateInDifferentType(Src, DestTy, false);
795 assert(Res->getType() == DestTy);
796 return replaceInstUsesWith(Trunc, Res);
797 }
798
799 // For integer types, check if we can shorten the entire input expression to
800 // DestWidth * 2, which won't allow removing the truncate, but reducing the
801 // width may enable further optimizations, e.g. allowing for larger
802 // vectorization factors.
803 if (auto *DestITy = dyn_cast<IntegerType>(DestTy)) {
804 if (DestWidth * 2 < SrcWidth) {
805 auto *NewDestTy = DestITy->getExtendedType();
806 if (shouldChangeType(SrcTy, NewDestTy) &&
807 canEvaluateTruncated(Src, NewDestTy, *this, &Trunc)) {
808 LLVM_DEBUG(
809 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
810 " to reduce the width of operand of"
811 << Trunc << '\n');
812 Value *Res = EvaluateInDifferentType(Src, NewDestTy, false);
813 return new TruncInst(Res, DestTy);
814 }
815 }
816 }
817
818 // Test if the trunc is the user of a select which is part of a
819 // minimum or maximum operation. If so, don't do any more simplification.
820 // Even simplifying demanded bits can break the canonical form of a
821 // min/max.
822 Value *LHS, *RHS;
823 if (SelectInst *Sel = dyn_cast<SelectInst>(Src))
824 if (matchSelectPattern(Sel, LHS, RHS).Flavor != SPF_UNKNOWN)
825 return nullptr;
826
827 // See if we can simplify any instructions used by the input whose sole
828 // purpose is to compute bits we don't care about.
829 if (SimplifyDemandedInstructionBits(Trunc))
830 return &Trunc;
831
832 if (DestWidth == 1) {
833 Value *Zero = Constant::getNullValue(SrcTy);
834 if (DestTy->isIntegerTy()) {
835 // Canonicalize trunc x to i1 -> icmp ne (and x, 1), 0 (scalar only).
836 // TODO: We canonicalize to more instructions here because we are probably
837 // lacking equivalent analysis for trunc relative to icmp. There may also
838 // be codegen concerns. If those trunc limitations were removed, we could
839 // remove this transform.
840 Value *And = Builder.CreateAnd(Src, ConstantInt::get(SrcTy, 1));
841 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
842 }
843
844 // For vectors, we do not canonicalize all truncs to icmp, so optimize
845 // patterns that would be covered within visitICmpInst.
846 Value *X;
847 Constant *C;
848 if (match(Src, m_OneUse(m_LShr(m_Value(X), m_Constant(C))))) {
849 // trunc (lshr X, C) to i1 --> icmp ne (and X, C'), 0
850 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1));
851 Constant *MaskC = ConstantExpr::getShl(One, C);
852 Value *And = Builder.CreateAnd(X, MaskC);
853 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
854 }
855 if (match(Src, m_OneUse(m_c_Or(m_LShr(m_Value(X), m_Constant(C)),
856 m_Deferred(X))))) {
857 // trunc (or (lshr X, C), X) to i1 --> icmp ne (and X, C'), 0
858 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1));
859 Constant *MaskC = ConstantExpr::getShl(One, C);
860 MaskC = ConstantExpr::getOr(MaskC, One);
861 Value *And = Builder.CreateAnd(X, MaskC);
862 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
863 }
864 }
865
866 Value *A, *B;
867 Constant *C;
868 if (match(Src, m_LShr(m_SExt(m_Value(A)), m_Constant(C)))) {
869 unsigned AWidth = A->getType()->getScalarSizeInBits();
870 unsigned MaxShiftAmt = SrcWidth - std::max(DestWidth, AWidth);
871 auto *OldSh = cast<Instruction>(Src);
872 bool IsExact = OldSh->isExact();
873
874 // If the shift is small enough, all zero bits created by the shift are
875 // removed by the trunc.
876 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE,
877 APInt(SrcWidth, MaxShiftAmt)))) {
878 // trunc (lshr (sext A), C) --> ashr A, C
879 if (A->getType() == DestTy) {
880 Constant *MaxAmt = ConstantInt::get(SrcTy, DestWidth - 1, false);
881 Constant *ShAmt = ConstantExpr::getUMin(C, MaxAmt);
882 ShAmt = ConstantExpr::getTrunc(ShAmt, A->getType());
883 ShAmt = Constant::mergeUndefsWith(ShAmt, C);
884 return IsExact ? BinaryOperator::CreateExactAShr(A, ShAmt)
885 : BinaryOperator::CreateAShr(A, ShAmt);
886 }
887 // The types are mismatched, so create a cast after shifting:
888 // trunc (lshr (sext A), C) --> sext/trunc (ashr A, C)
889 if (Src->hasOneUse()) {
890 Constant *MaxAmt = ConstantInt::get(SrcTy, AWidth - 1, false);
891 Constant *ShAmt = ConstantExpr::getUMin(C, MaxAmt);
892 ShAmt = ConstantExpr::getTrunc(ShAmt, A->getType());
893 Value *Shift = Builder.CreateAShr(A, ShAmt, "", IsExact);
894 return CastInst::CreateIntegerCast(Shift, DestTy, true);
895 }
896 }
897 // TODO: Mask high bits with 'and'.
898 }
899
900 if (Instruction *I = narrowBinOp(Trunc))
901 return I;
902
903 if (Instruction *I = shrinkSplatShuffle(Trunc, Builder))
904 return I;
905
906 if (Instruction *I = shrinkInsertElt(Trunc, Builder))
907 return I;
908
909 if (Src->hasOneUse() &&
910 (isa<VectorType>(SrcTy) || shouldChangeType(SrcTy, DestTy))) {
911 // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the
912 // dest type is native and cst < dest size.
913 if (match(Src, m_Shl(m_Value(A), m_Constant(C))) &&
914 !match(A, m_Shr(m_Value(), m_Constant()))) {
915 // Skip shifts of shift by constants. It undoes a combine in
916 // FoldShiftByConstant and is the extend in reg pattern.
917 APInt Threshold = APInt(C->getType()->getScalarSizeInBits(), DestWidth);
918 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, Threshold))) {
919 Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr");
920 return BinaryOperator::Create(Instruction::Shl, NewTrunc,
921 ConstantExpr::getTrunc(C, DestTy));
922 }
923 }
924 }
925
926 if (Instruction *I = foldVecTruncToExtElt(Trunc, *this))
927 return I;
928
929 // Whenever an element is extracted from a vector, and then truncated,
930 // canonicalize by converting it to a bitcast followed by an
931 // extractelement.
932 //
933 // Example (little endian):
934 // trunc (extractelement <4 x i64> %X, 0) to i32
935 // --->
936 // extractelement <8 x i32> (bitcast <4 x i64> %X to <8 x i32>), i32 0
937 Value *VecOp;
938 ConstantInt *Cst;
939 if (match(Src, m_OneUse(m_ExtractElt(m_Value(VecOp), m_ConstantInt(Cst))))) {
940 auto *VecOpTy = cast<VectorType>(VecOp->getType());
941 auto VecElts = VecOpTy->getElementCount();
942
943 // A badly fit destination size would result in an invalid cast.
944 if (SrcWidth % DestWidth == 0) {
945 uint64_t TruncRatio = SrcWidth / DestWidth;
946 uint64_t BitCastNumElts = VecElts.getKnownMinValue() * TruncRatio;
947 uint64_t VecOpIdx = Cst->getZExtValue();
948 uint64_t NewIdx = DL.isBigEndian() ? (VecOpIdx + 1) * TruncRatio - 1
949 : VecOpIdx * TruncRatio;
950 assert(BitCastNumElts <= std::numeric_limits<uint32_t>::max() &&
951 "overflow 32-bits");
952
953 auto *BitCastTo =
954 VectorType::get(DestTy, BitCastNumElts, VecElts.isScalable());
955 Value *BitCast = Builder.CreateBitCast(VecOp, BitCastTo);
956 return ExtractElementInst::Create(BitCast, Builder.getInt32(NewIdx));
957 }
958 }
959
960 // trunc (ctlz_i32(zext(A), B) --> add(ctlz_i16(A, B), C)
961 if (match(Src, m_OneUse(m_Intrinsic<Intrinsic::ctlz>(m_ZExt(m_Value(A)),
962 m_Value(B))))) {
963 unsigned AWidth = A->getType()->getScalarSizeInBits();
964 if (AWidth == DestWidth && AWidth > Log2_32(SrcWidth)) {
965 Value *WidthDiff = ConstantInt::get(A->getType(), SrcWidth - AWidth);
966 Value *NarrowCtlz =
967 Builder.CreateIntrinsic(Intrinsic::ctlz, {Trunc.getType()}, {A, B});
968 return BinaryOperator::CreateAdd(NarrowCtlz, WidthDiff);
969 }
970 }
971
972 if (match(Src, m_VScale(DL))) {
973 if (Trunc.getFunction() &&
974 Trunc.getFunction()->hasFnAttribute(Attribute::VScaleRange)) {
975 Attribute Attr =
976 Trunc.getFunction()->getFnAttribute(Attribute::VScaleRange);
977 if (Optional<unsigned> MaxVScale = Attr.getVScaleRangeMax()) {
978 if (Log2_32(*MaxVScale) < DestWidth) {
979 Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1));
980 return replaceInstUsesWith(Trunc, VScale);
981 }
982 }
983 }
984 }
985
986 return nullptr;
987 }
988
transformZExtICmp(ICmpInst * Cmp,ZExtInst & Zext)989 Instruction *InstCombinerImpl::transformZExtICmp(ICmpInst *Cmp, ZExtInst &Zext) {
990 // If we are just checking for a icmp eq of a single bit and zext'ing it
991 // to an integer, then shift the bit to the appropriate place and then
992 // cast to integer to avoid the comparison.
993
994 // FIXME: This set of transforms does not check for extra uses and/or creates
995 // an extra instruction (an optional final cast is not included
996 // in the transform comments). We may also want to favor icmp over
997 // shifts in cases of equal instructions because icmp has better
998 // analysis in general (invert the transform).
999
1000 const APInt *Op1CV;
1001 if (match(Cmp->getOperand(1), m_APInt(Op1CV))) {
1002
1003 // zext (x <s 0) to i32 --> x>>u31 true if signbit set.
1004 if (Cmp->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isZero()) {
1005 Value *In = Cmp->getOperand(0);
1006 Value *Sh = ConstantInt::get(In->getType(),
1007 In->getType()->getScalarSizeInBits() - 1);
1008 In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit");
1009 if (In->getType() != Zext.getType())
1010 In = Builder.CreateIntCast(In, Zext.getType(), false /*ZExt*/);
1011
1012 return replaceInstUsesWith(Zext, In);
1013 }
1014
1015 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
1016 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
1017 // zext (X == 1) to i32 --> X iff X has only the low bit set.
1018 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
1019 // zext (X != 0) to i32 --> X iff X has only the low bit set.
1020 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
1021 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
1022 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
1023 if ((Op1CV->isZero() || Op1CV->isPowerOf2()) &&
1024 // This only works for EQ and NE
1025 Cmp->isEquality()) {
1026 // If Op1C some other power of two, convert:
1027 KnownBits Known = computeKnownBits(Cmp->getOperand(0), 0, &Zext);
1028
1029 APInt KnownZeroMask(~Known.Zero);
1030 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
1031 bool isNE = Cmp->getPredicate() == ICmpInst::ICMP_NE;
1032 if (!Op1CV->isZero() && (*Op1CV != KnownZeroMask)) {
1033 // (X&4) == 2 --> false
1034 // (X&4) != 2 --> true
1035 Constant *Res = ConstantInt::get(Zext.getType(), isNE);
1036 return replaceInstUsesWith(Zext, Res);
1037 }
1038
1039 uint32_t ShAmt = KnownZeroMask.logBase2();
1040 Value *In = Cmp->getOperand(0);
1041 if (ShAmt) {
1042 // Perform a logical shr by shiftamt.
1043 // Insert the shift to put the result in the low bit.
1044 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt),
1045 In->getName() + ".lobit");
1046 }
1047
1048 if (!Op1CV->isZero() == isNE) { // Toggle the low bit.
1049 Constant *One = ConstantInt::get(In->getType(), 1);
1050 In = Builder.CreateXor(In, One);
1051 }
1052
1053 if (Zext.getType() == In->getType())
1054 return replaceInstUsesWith(Zext, In);
1055
1056 Value *IntCast = Builder.CreateIntCast(In, Zext.getType(), false);
1057 return replaceInstUsesWith(Zext, IntCast);
1058 }
1059 }
1060 }
1061
1062 if (Cmp->isEquality() && Zext.getType() == Cmp->getOperand(0)->getType()) {
1063 // Test if a bit is clear/set using a shifted-one mask:
1064 // zext (icmp eq (and X, (1 << ShAmt)), 0) --> and (lshr (not X), ShAmt), 1
1065 // zext (icmp ne (and X, (1 << ShAmt)), 0) --> and (lshr X, ShAmt), 1
1066 Value *X, *ShAmt;
1067 if (Cmp->hasOneUse() && match(Cmp->getOperand(1), m_ZeroInt()) &&
1068 match(Cmp->getOperand(0),
1069 m_OneUse(m_c_And(m_Shl(m_One(), m_Value(ShAmt)), m_Value(X))))) {
1070 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ)
1071 X = Builder.CreateNot(X);
1072 Value *Lshr = Builder.CreateLShr(X, ShAmt);
1073 Value *And1 = Builder.CreateAnd(Lshr, ConstantInt::get(X->getType(), 1));
1074 return replaceInstUsesWith(Zext, And1);
1075 }
1076
1077 // icmp ne A, B is equal to xor A, B when A and B only really have one bit.
1078 // It is also profitable to transform icmp eq into not(xor(A, B)) because
1079 // that may lead to additional simplifications.
1080 if (IntegerType *ITy = dyn_cast<IntegerType>(Zext.getType())) {
1081 Value *LHS = Cmp->getOperand(0);
1082 Value *RHS = Cmp->getOperand(1);
1083
1084 KnownBits KnownLHS = computeKnownBits(LHS, 0, &Zext);
1085 KnownBits KnownRHS = computeKnownBits(RHS, 0, &Zext);
1086
1087 if (KnownLHS == KnownRHS) {
1088 APInt KnownBits = KnownLHS.Zero | KnownLHS.One;
1089 APInt UnknownBit = ~KnownBits;
1090 if (UnknownBit.countPopulation() == 1) {
1091 Value *Result = Builder.CreateXor(LHS, RHS);
1092
1093 // Mask off any bits that are set and won't be shifted away.
1094 if (KnownLHS.One.uge(UnknownBit))
1095 Result = Builder.CreateAnd(Result,
1096 ConstantInt::get(ITy, UnknownBit));
1097
1098 // Shift the bit we're testing down to the lsb.
1099 Result = Builder.CreateLShr(
1100 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros()));
1101
1102 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ)
1103 Result = Builder.CreateXor(Result, ConstantInt::get(ITy, 1));
1104 Result->takeName(Cmp);
1105 return replaceInstUsesWith(Zext, Result);
1106 }
1107 }
1108 }
1109 }
1110
1111 return nullptr;
1112 }
1113
1114 /// Determine if the specified value can be computed in the specified wider type
1115 /// and produce the same low bits. If not, return false.
1116 ///
1117 /// If this function returns true, it can also return a non-zero number of bits
1118 /// (in BitsToClear) which indicates that the value it computes is correct for
1119 /// the zero extend, but that the additional BitsToClear bits need to be zero'd
1120 /// out. For example, to promote something like:
1121 ///
1122 /// %B = trunc i64 %A to i32
1123 /// %C = lshr i32 %B, 8
1124 /// %E = zext i32 %C to i64
1125 ///
1126 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be
1127 /// set to 8 to indicate that the promoted value needs to have bits 24-31
1128 /// cleared in addition to bits 32-63. Since an 'and' will be generated to
1129 /// clear the top bits anyway, doing this has no extra cost.
1130 ///
1131 /// This function works on both vectors and scalars.
canEvaluateZExtd(Value * V,Type * Ty,unsigned & BitsToClear,InstCombinerImpl & IC,Instruction * CxtI)1132 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear,
1133 InstCombinerImpl &IC, Instruction *CxtI) {
1134 BitsToClear = 0;
1135 if (canAlwaysEvaluateInType(V, Ty))
1136 return true;
1137 if (canNotEvaluateInType(V, Ty))
1138 return false;
1139
1140 auto *I = cast<Instruction>(V);
1141 unsigned Tmp;
1142 switch (I->getOpcode()) {
1143 case Instruction::ZExt: // zext(zext(x)) -> zext(x).
1144 case Instruction::SExt: // zext(sext(x)) -> sext(x).
1145 case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x)
1146 return true;
1147 case Instruction::And:
1148 case Instruction::Or:
1149 case Instruction::Xor:
1150 case Instruction::Add:
1151 case Instruction::Sub:
1152 case Instruction::Mul:
1153 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) ||
1154 !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI))
1155 return false;
1156 // These can all be promoted if neither operand has 'bits to clear'.
1157 if (BitsToClear == 0 && Tmp == 0)
1158 return true;
1159
1160 // If the operation is an AND/OR/XOR and the bits to clear are zero in the
1161 // other side, BitsToClear is ok.
1162 if (Tmp == 0 && I->isBitwiseLogicOp()) {
1163 // We use MaskedValueIsZero here for generality, but the case we care
1164 // about the most is constant RHS.
1165 unsigned VSize = V->getType()->getScalarSizeInBits();
1166 if (IC.MaskedValueIsZero(I->getOperand(1),
1167 APInt::getHighBitsSet(VSize, BitsToClear),
1168 0, CxtI)) {
1169 // If this is an And instruction and all of the BitsToClear are
1170 // known to be zero we can reset BitsToClear.
1171 if (I->getOpcode() == Instruction::And)
1172 BitsToClear = 0;
1173 return true;
1174 }
1175 }
1176
1177 // Otherwise, we don't know how to analyze this BitsToClear case yet.
1178 return false;
1179
1180 case Instruction::Shl: {
1181 // We can promote shl(x, cst) if we can promote x. Since shl overwrites the
1182 // upper bits we can reduce BitsToClear by the shift amount.
1183 const APInt *Amt;
1184 if (match(I->getOperand(1), m_APInt(Amt))) {
1185 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI))
1186 return false;
1187 uint64_t ShiftAmt = Amt->getZExtValue();
1188 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0;
1189 return true;
1190 }
1191 return false;
1192 }
1193 case Instruction::LShr: {
1194 // We can promote lshr(x, cst) if we can promote x. This requires the
1195 // ultimate 'and' to clear out the high zero bits we're clearing out though.
1196 const APInt *Amt;
1197 if (match(I->getOperand(1), m_APInt(Amt))) {
1198 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI))
1199 return false;
1200 BitsToClear += Amt->getZExtValue();
1201 if (BitsToClear > V->getType()->getScalarSizeInBits())
1202 BitsToClear = V->getType()->getScalarSizeInBits();
1203 return true;
1204 }
1205 // Cannot promote variable LSHR.
1206 return false;
1207 }
1208 case Instruction::Select:
1209 if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) ||
1210 !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) ||
1211 // TODO: If important, we could handle the case when the BitsToClear are
1212 // known zero in the disagreeing side.
1213 Tmp != BitsToClear)
1214 return false;
1215 return true;
1216
1217 case Instruction::PHI: {
1218 // We can change a phi if we can change all operands. Note that we never
1219 // get into trouble with cyclic PHIs here because we only consider
1220 // instructions with a single use.
1221 PHINode *PN = cast<PHINode>(I);
1222 if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI))
1223 return false;
1224 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i)
1225 if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) ||
1226 // TODO: If important, we could handle the case when the BitsToClear
1227 // are known zero in the disagreeing input.
1228 Tmp != BitsToClear)
1229 return false;
1230 return true;
1231 }
1232 default:
1233 // TODO: Can handle more cases here.
1234 return false;
1235 }
1236 }
1237
visitZExt(ZExtInst & CI)1238 Instruction *InstCombinerImpl::visitZExt(ZExtInst &CI) {
1239 // If this zero extend is only used by a truncate, let the truncate be
1240 // eliminated before we try to optimize this zext.
1241 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
1242 return nullptr;
1243
1244 // If one of the common conversion will work, do it.
1245 if (Instruction *Result = commonCastTransforms(CI))
1246 return Result;
1247
1248 Value *Src = CI.getOperand(0);
1249 Type *SrcTy = Src->getType(), *DestTy = CI.getType();
1250
1251 // Try to extend the entire expression tree to the wide destination type.
1252 unsigned BitsToClear;
1253 if (shouldChangeType(SrcTy, DestTy) &&
1254 canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) {
1255 assert(BitsToClear <= SrcTy->getScalarSizeInBits() &&
1256 "Can't clear more bits than in SrcTy");
1257
1258 // Okay, we can transform this! Insert the new expression now.
1259 LLVM_DEBUG(
1260 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
1261 " to avoid zero extend: "
1262 << CI << '\n');
1263 Value *Res = EvaluateInDifferentType(Src, DestTy, false);
1264 assert(Res->getType() == DestTy);
1265
1266 // Preserve debug values referring to Src if the zext is its last use.
1267 if (auto *SrcOp = dyn_cast<Instruction>(Src))
1268 if (SrcOp->hasOneUse())
1269 replaceAllDbgUsesWith(*SrcOp, *Res, CI, DT);
1270
1271 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear;
1272 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
1273
1274 // If the high bits are already filled with zeros, just replace this
1275 // cast with the result.
1276 if (MaskedValueIsZero(Res,
1277 APInt::getHighBitsSet(DestBitSize,
1278 DestBitSize-SrcBitsKept),
1279 0, &CI))
1280 return replaceInstUsesWith(CI, Res);
1281
1282 // We need to emit an AND to clear the high bits.
1283 Constant *C = ConstantInt::get(Res->getType(),
1284 APInt::getLowBitsSet(DestBitSize, SrcBitsKept));
1285 return BinaryOperator::CreateAnd(Res, C);
1286 }
1287
1288 // If this is a TRUNC followed by a ZEXT then we are dealing with integral
1289 // types and if the sizes are just right we can convert this into a logical
1290 // 'and' which will be much cheaper than the pair of casts.
1291 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast
1292 // TODO: Subsume this into EvaluateInDifferentType.
1293
1294 // Get the sizes of the types involved. We know that the intermediate type
1295 // will be smaller than A or C, but don't know the relation between A and C.
1296 Value *A = CSrc->getOperand(0);
1297 unsigned SrcSize = A->getType()->getScalarSizeInBits();
1298 unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
1299 unsigned DstSize = CI.getType()->getScalarSizeInBits();
1300 // If we're actually extending zero bits, then if
1301 // SrcSize < DstSize: zext(a & mask)
1302 // SrcSize == DstSize: a & mask
1303 // SrcSize > DstSize: trunc(a) & mask
1304 if (SrcSize < DstSize) {
1305 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
1306 Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
1307 Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask");
1308 return new ZExtInst(And, CI.getType());
1309 }
1310
1311 if (SrcSize == DstSize) {
1312 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
1313 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
1314 AndValue));
1315 }
1316 if (SrcSize > DstSize) {
1317 Value *Trunc = Builder.CreateTrunc(A, CI.getType());
1318 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
1319 return BinaryOperator::CreateAnd(Trunc,
1320 ConstantInt::get(Trunc->getType(),
1321 AndValue));
1322 }
1323 }
1324
1325 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(Src))
1326 return transformZExtICmp(Cmp, CI);
1327
1328 // zext(trunc(X) & C) -> (X & zext(C)).
1329 Constant *C;
1330 Value *X;
1331 if (match(Src, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) &&
1332 X->getType() == CI.getType())
1333 return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType()));
1334
1335 // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)).
1336 Value *And;
1337 if (match(Src, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) &&
1338 match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) &&
1339 X->getType() == CI.getType()) {
1340 Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
1341 return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC);
1342 }
1343
1344 if (match(Src, m_VScale(DL))) {
1345 if (CI.getFunction() &&
1346 CI.getFunction()->hasFnAttribute(Attribute::VScaleRange)) {
1347 Attribute Attr = CI.getFunction()->getFnAttribute(Attribute::VScaleRange);
1348 if (Optional<unsigned> MaxVScale = Attr.getVScaleRangeMax()) {
1349 unsigned TypeWidth = Src->getType()->getScalarSizeInBits();
1350 if (Log2_32(*MaxVScale) < TypeWidth) {
1351 Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1));
1352 return replaceInstUsesWith(CI, VScale);
1353 }
1354 }
1355 }
1356 }
1357
1358 return nullptr;
1359 }
1360
1361 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp.
transformSExtICmp(ICmpInst * ICI,Instruction & CI)1362 Instruction *InstCombinerImpl::transformSExtICmp(ICmpInst *ICI,
1363 Instruction &CI) {
1364 Value *Op0 = ICI->getOperand(0), *Op1 = ICI->getOperand(1);
1365 ICmpInst::Predicate Pred = ICI->getPredicate();
1366
1367 // Don't bother if Op1 isn't of vector or integer type.
1368 if (!Op1->getType()->isIntOrIntVectorTy())
1369 return nullptr;
1370
1371 if ((Pred == ICmpInst::ICMP_SLT && match(Op1, m_ZeroInt())) ||
1372 (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))) {
1373 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if negative
1374 // (x >s -1) ? -1 : 0 -> not (ashr x, 31) -> all ones if positive
1375 Value *Sh = ConstantInt::get(Op0->getType(),
1376 Op0->getType()->getScalarSizeInBits() - 1);
1377 Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit");
1378 if (In->getType() != CI.getType())
1379 In = Builder.CreateIntCast(In, CI.getType(), true /*SExt*/);
1380
1381 if (Pred == ICmpInst::ICMP_SGT)
1382 In = Builder.CreateNot(In, In->getName() + ".not");
1383 return replaceInstUsesWith(CI, In);
1384 }
1385
1386 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1387 // If we know that only one bit of the LHS of the icmp can be set and we
1388 // have an equality comparison with zero or a power of 2, we can transform
1389 // the icmp and sext into bitwise/integer operations.
1390 if (ICI->hasOneUse() &&
1391 ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
1392 KnownBits Known = computeKnownBits(Op0, 0, &CI);
1393
1394 APInt KnownZeroMask(~Known.Zero);
1395 if (KnownZeroMask.isPowerOf2()) {
1396 Value *In = ICI->getOperand(0);
1397
1398 // If the icmp tests for a known zero bit we can constant fold it.
1399 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) {
1400 Value *V = Pred == ICmpInst::ICMP_NE ?
1401 ConstantInt::getAllOnesValue(CI.getType()) :
1402 ConstantInt::getNullValue(CI.getType());
1403 return replaceInstUsesWith(CI, V);
1404 }
1405
1406 if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) {
1407 // sext ((x & 2^n) == 0) -> (x >> n) - 1
1408 // sext ((x & 2^n) != 2^n) -> (x >> n) - 1
1409 unsigned ShiftAmt = KnownZeroMask.countTrailingZeros();
1410 // Perform a right shift to place the desired bit in the LSB.
1411 if (ShiftAmt)
1412 In = Builder.CreateLShr(In,
1413 ConstantInt::get(In->getType(), ShiftAmt));
1414
1415 // At this point "In" is either 1 or 0. Subtract 1 to turn
1416 // {1, 0} -> {0, -1}.
1417 In = Builder.CreateAdd(In,
1418 ConstantInt::getAllOnesValue(In->getType()),
1419 "sext");
1420 } else {
1421 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1
1422 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1
1423 unsigned ShiftAmt = KnownZeroMask.countLeadingZeros();
1424 // Perform a left shift to place the desired bit in the MSB.
1425 if (ShiftAmt)
1426 In = Builder.CreateShl(In,
1427 ConstantInt::get(In->getType(), ShiftAmt));
1428
1429 // Distribute the bit over the whole bit width.
1430 In = Builder.CreateAShr(In, ConstantInt::get(In->getType(),
1431 KnownZeroMask.getBitWidth() - 1), "sext");
1432 }
1433
1434 if (CI.getType() == In->getType())
1435 return replaceInstUsesWith(CI, In);
1436 return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/);
1437 }
1438 }
1439 }
1440
1441 return nullptr;
1442 }
1443
1444 /// Return true if we can take the specified value and return it as type Ty
1445 /// without inserting any new casts and without changing the value of the common
1446 /// low bits. This is used by code that tries to promote integer operations to
1447 /// a wider types will allow us to eliminate the extension.
1448 ///
1449 /// This function works on both vectors and scalars.
1450 ///
canEvaluateSExtd(Value * V,Type * Ty)1451 static bool canEvaluateSExtd(Value *V, Type *Ty) {
1452 assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() &&
1453 "Can't sign extend type to a smaller type");
1454 if (canAlwaysEvaluateInType(V, Ty))
1455 return true;
1456 if (canNotEvaluateInType(V, Ty))
1457 return false;
1458
1459 auto *I = cast<Instruction>(V);
1460 switch (I->getOpcode()) {
1461 case Instruction::SExt: // sext(sext(x)) -> sext(x)
1462 case Instruction::ZExt: // sext(zext(x)) -> zext(x)
1463 case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x)
1464 return true;
1465 case Instruction::And:
1466 case Instruction::Or:
1467 case Instruction::Xor:
1468 case Instruction::Add:
1469 case Instruction::Sub:
1470 case Instruction::Mul:
1471 // These operators can all arbitrarily be extended if their inputs can.
1472 return canEvaluateSExtd(I->getOperand(0), Ty) &&
1473 canEvaluateSExtd(I->getOperand(1), Ty);
1474
1475 //case Instruction::Shl: TODO
1476 //case Instruction::LShr: TODO
1477
1478 case Instruction::Select:
1479 return canEvaluateSExtd(I->getOperand(1), Ty) &&
1480 canEvaluateSExtd(I->getOperand(2), Ty);
1481
1482 case Instruction::PHI: {
1483 // We can change a phi if we can change all operands. Note that we never
1484 // get into trouble with cyclic PHIs here because we only consider
1485 // instructions with a single use.
1486 PHINode *PN = cast<PHINode>(I);
1487 for (Value *IncValue : PN->incoming_values())
1488 if (!canEvaluateSExtd(IncValue, Ty)) return false;
1489 return true;
1490 }
1491 default:
1492 // TODO: Can handle more cases here.
1493 break;
1494 }
1495
1496 return false;
1497 }
1498
visitSExt(SExtInst & CI)1499 Instruction *InstCombinerImpl::visitSExt(SExtInst &CI) {
1500 // If this sign extend is only used by a truncate, let the truncate be
1501 // eliminated before we try to optimize this sext.
1502 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
1503 return nullptr;
1504
1505 if (Instruction *I = commonCastTransforms(CI))
1506 return I;
1507
1508 Value *Src = CI.getOperand(0);
1509 Type *SrcTy = Src->getType(), *DestTy = CI.getType();
1510 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
1511 unsigned DestBitSize = DestTy->getScalarSizeInBits();
1512
1513 // If the value being extended is zero or positive, use a zext instead.
1514 if (isKnownNonNegative(Src, DL, 0, &AC, &CI, &DT))
1515 return CastInst::Create(Instruction::ZExt, Src, DestTy);
1516
1517 // Try to extend the entire expression tree to the wide destination type.
1518 if (shouldChangeType(SrcTy, DestTy) && canEvaluateSExtd(Src, DestTy)) {
1519 // Okay, we can transform this! Insert the new expression now.
1520 LLVM_DEBUG(
1521 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
1522 " to avoid sign extend: "
1523 << CI << '\n');
1524 Value *Res = EvaluateInDifferentType(Src, DestTy, true);
1525 assert(Res->getType() == DestTy);
1526
1527 // If the high bits are already filled with sign bit, just replace this
1528 // cast with the result.
1529 if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize)
1530 return replaceInstUsesWith(CI, Res);
1531
1532 // We need to emit a shl + ashr to do the sign extend.
1533 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
1534 return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"),
1535 ShAmt);
1536 }
1537
1538 Value *X;
1539 if (match(Src, m_Trunc(m_Value(X)))) {
1540 // If the input has more sign bits than bits truncated, then convert
1541 // directly to final type.
1542 unsigned XBitSize = X->getType()->getScalarSizeInBits();
1543 if (ComputeNumSignBits(X, 0, &CI) > XBitSize - SrcBitSize)
1544 return CastInst::CreateIntegerCast(X, DestTy, /* isSigned */ true);
1545
1546 // If input is a trunc from the destination type, then convert into shifts.
1547 if (Src->hasOneUse() && X->getType() == DestTy) {
1548 // sext (trunc X) --> ashr (shl X, C), C
1549 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
1550 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt);
1551 }
1552
1553 // If we are replacing shifted-in high zero bits with sign bits, convert
1554 // the logic shift to arithmetic shift and eliminate the cast to
1555 // intermediate type:
1556 // sext (trunc (lshr Y, C)) --> sext/trunc (ashr Y, C)
1557 Value *Y;
1558 if (Src->hasOneUse() &&
1559 match(X, m_LShr(m_Value(Y),
1560 m_SpecificIntAllowUndef(XBitSize - SrcBitSize)))) {
1561 Value *Ashr = Builder.CreateAShr(Y, XBitSize - SrcBitSize);
1562 return CastInst::CreateIntegerCast(Ashr, DestTy, /* isSigned */ true);
1563 }
1564 }
1565
1566 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
1567 return transformSExtICmp(ICI, CI);
1568
1569 // If the input is a shl/ashr pair of a same constant, then this is a sign
1570 // extension from a smaller value. If we could trust arbitrary bitwidth
1571 // integers, we could turn this into a truncate to the smaller bit and then
1572 // use a sext for the whole extension. Since we don't, look deeper and check
1573 // for a truncate. If the source and dest are the same type, eliminate the
1574 // trunc and extend and just do shifts. For example, turn:
1575 // %a = trunc i32 %i to i8
1576 // %b = shl i8 %a, C
1577 // %c = ashr i8 %b, C
1578 // %d = sext i8 %c to i32
1579 // into:
1580 // %a = shl i32 %i, 32-(8-C)
1581 // %d = ashr i32 %a, 32-(8-C)
1582 Value *A = nullptr;
1583 // TODO: Eventually this could be subsumed by EvaluateInDifferentType.
1584 Constant *BA = nullptr, *CA = nullptr;
1585 if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_Constant(BA)),
1586 m_Constant(CA))) &&
1587 BA->isElementWiseEqual(CA) && A->getType() == DestTy) {
1588 Constant *WideCurrShAmt = ConstantExpr::getSExt(CA, DestTy);
1589 Constant *NumLowbitsLeft = ConstantExpr::getSub(
1590 ConstantInt::get(DestTy, SrcTy->getScalarSizeInBits()), WideCurrShAmt);
1591 Constant *NewShAmt = ConstantExpr::getSub(
1592 ConstantInt::get(DestTy, DestTy->getScalarSizeInBits()),
1593 NumLowbitsLeft);
1594 NewShAmt =
1595 Constant::mergeUndefsWith(Constant::mergeUndefsWith(NewShAmt, BA), CA);
1596 A = Builder.CreateShl(A, NewShAmt, CI.getName());
1597 return BinaryOperator::CreateAShr(A, NewShAmt);
1598 }
1599
1600 // Splatting a bit of constant-index across a value:
1601 // sext (ashr (trunc iN X to iM), M-1) to iN --> ashr (shl X, N-M), N-1
1602 // If the dest type is different, use a cast (adjust use check).
1603 if (match(Src, m_OneUse(m_AShr(m_Trunc(m_Value(X)),
1604 m_SpecificInt(SrcBitSize - 1))))) {
1605 Type *XTy = X->getType();
1606 unsigned XBitSize = XTy->getScalarSizeInBits();
1607 Constant *ShlAmtC = ConstantInt::get(XTy, XBitSize - SrcBitSize);
1608 Constant *AshrAmtC = ConstantInt::get(XTy, XBitSize - 1);
1609 if (XTy == DestTy)
1610 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShlAmtC),
1611 AshrAmtC);
1612 if (cast<BinaryOperator>(Src)->getOperand(0)->hasOneUse()) {
1613 Value *Ashr = Builder.CreateAShr(Builder.CreateShl(X, ShlAmtC), AshrAmtC);
1614 return CastInst::CreateIntegerCast(Ashr, DestTy, /* isSigned */ true);
1615 }
1616 }
1617
1618 if (match(Src, m_VScale(DL))) {
1619 if (CI.getFunction() &&
1620 CI.getFunction()->hasFnAttribute(Attribute::VScaleRange)) {
1621 Attribute Attr = CI.getFunction()->getFnAttribute(Attribute::VScaleRange);
1622 if (Optional<unsigned> MaxVScale = Attr.getVScaleRangeMax()) {
1623 if (Log2_32(*MaxVScale) < (SrcBitSize - 1)) {
1624 Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1));
1625 return replaceInstUsesWith(CI, VScale);
1626 }
1627 }
1628 }
1629 }
1630
1631 return nullptr;
1632 }
1633
1634 /// Return a Constant* for the specified floating-point constant if it fits
1635 /// in the specified FP type without changing its value.
fitsInFPType(ConstantFP * CFP,const fltSemantics & Sem)1636 static bool fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) {
1637 bool losesInfo;
1638 APFloat F = CFP->getValueAPF();
1639 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
1640 return !losesInfo;
1641 }
1642
shrinkFPConstant(ConstantFP * CFP)1643 static Type *shrinkFPConstant(ConstantFP *CFP) {
1644 if (CFP->getType() == Type::getPPC_FP128Ty(CFP->getContext()))
1645 return nullptr; // No constant folding of this.
1646 // See if the value can be truncated to half and then reextended.
1647 if (fitsInFPType(CFP, APFloat::IEEEhalf()))
1648 return Type::getHalfTy(CFP->getContext());
1649 // See if the value can be truncated to float and then reextended.
1650 if (fitsInFPType(CFP, APFloat::IEEEsingle()))
1651 return Type::getFloatTy(CFP->getContext());
1652 if (CFP->getType()->isDoubleTy())
1653 return nullptr; // Won't shrink.
1654 if (fitsInFPType(CFP, APFloat::IEEEdouble()))
1655 return Type::getDoubleTy(CFP->getContext());
1656 // Don't try to shrink to various long double types.
1657 return nullptr;
1658 }
1659
1660 // Determine if this is a vector of ConstantFPs and if so, return the minimal
1661 // type we can safely truncate all elements to.
1662 // TODO: Make these support undef elements.
shrinkFPConstantVector(Value * V)1663 static Type *shrinkFPConstantVector(Value *V) {
1664 auto *CV = dyn_cast<Constant>(V);
1665 auto *CVVTy = dyn_cast<FixedVectorType>(V->getType());
1666 if (!CV || !CVVTy)
1667 return nullptr;
1668
1669 Type *MinType = nullptr;
1670
1671 unsigned NumElts = CVVTy->getNumElements();
1672
1673 // For fixed-width vectors we find the minimal type by looking
1674 // through the constant values of the vector.
1675 for (unsigned i = 0; i != NumElts; ++i) {
1676 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
1677 if (!CFP)
1678 return nullptr;
1679
1680 Type *T = shrinkFPConstant(CFP);
1681 if (!T)
1682 return nullptr;
1683
1684 // If we haven't found a type yet or this type has a larger mantissa than
1685 // our previous type, this is our new minimal type.
1686 if (!MinType || T->getFPMantissaWidth() > MinType->getFPMantissaWidth())
1687 MinType = T;
1688 }
1689
1690 // Make a vector type from the minimal type.
1691 return FixedVectorType::get(MinType, NumElts);
1692 }
1693
1694 /// Find the minimum FP type we can safely truncate to.
getMinimumFPType(Value * V)1695 static Type *getMinimumFPType(Value *V) {
1696 if (auto *FPExt = dyn_cast<FPExtInst>(V))
1697 return FPExt->getOperand(0)->getType();
1698
1699 // If this value is a constant, return the constant in the smallest FP type
1700 // that can accurately represent it. This allows us to turn
1701 // (float)((double)X+2.0) into x+2.0f.
1702 if (auto *CFP = dyn_cast<ConstantFP>(V))
1703 if (Type *T = shrinkFPConstant(CFP))
1704 return T;
1705
1706 // We can only correctly find a minimum type for a scalable vector when it is
1707 // a splat. For splats of constant values the fpext is wrapped up as a
1708 // ConstantExpr.
1709 if (auto *FPCExt = dyn_cast<ConstantExpr>(V))
1710 if (FPCExt->getOpcode() == Instruction::FPExt)
1711 return FPCExt->getOperand(0)->getType();
1712
1713 // Try to shrink a vector of FP constants. This returns nullptr on scalable
1714 // vectors
1715 if (Type *T = shrinkFPConstantVector(V))
1716 return T;
1717
1718 return V->getType();
1719 }
1720
1721 /// Return true if the cast from integer to FP can be proven to be exact for all
1722 /// possible inputs (the conversion does not lose any precision).
isKnownExactCastIntToFP(CastInst & I,InstCombinerImpl & IC)1723 static bool isKnownExactCastIntToFP(CastInst &I, InstCombinerImpl &IC) {
1724 CastInst::CastOps Opcode = I.getOpcode();
1725 assert((Opcode == CastInst::SIToFP || Opcode == CastInst::UIToFP) &&
1726 "Unexpected cast");
1727 Value *Src = I.getOperand(0);
1728 Type *SrcTy = Src->getType();
1729 Type *FPTy = I.getType();
1730 bool IsSigned = Opcode == Instruction::SIToFP;
1731 int SrcSize = (int)SrcTy->getScalarSizeInBits() - IsSigned;
1732
1733 // Easy case - if the source integer type has less bits than the FP mantissa,
1734 // then the cast must be exact.
1735 int DestNumSigBits = FPTy->getFPMantissaWidth();
1736 if (SrcSize <= DestNumSigBits)
1737 return true;
1738
1739 // Cast from FP to integer and back to FP is independent of the intermediate
1740 // integer width because of poison on overflow.
1741 Value *F;
1742 if (match(Src, m_FPToSI(m_Value(F))) || match(Src, m_FPToUI(m_Value(F)))) {
1743 // If this is uitofp (fptosi F), the source needs an extra bit to avoid
1744 // potential rounding of negative FP input values.
1745 int SrcNumSigBits = F->getType()->getFPMantissaWidth();
1746 if (!IsSigned && match(Src, m_FPToSI(m_Value())))
1747 SrcNumSigBits++;
1748
1749 // [su]itofp (fpto[su]i F) --> exact if the source type has less or equal
1750 // significant bits than the destination (and make sure neither type is
1751 // weird -- ppc_fp128).
1752 if (SrcNumSigBits > 0 && DestNumSigBits > 0 &&
1753 SrcNumSigBits <= DestNumSigBits)
1754 return true;
1755 }
1756
1757 // TODO:
1758 // Try harder to find if the source integer type has less significant bits.
1759 // For example, compute number of sign bits.
1760 KnownBits SrcKnown = IC.computeKnownBits(Src, 0, &I);
1761 int SigBits = (int)SrcTy->getScalarSizeInBits() -
1762 SrcKnown.countMinLeadingZeros() -
1763 SrcKnown.countMinTrailingZeros();
1764 if (SigBits <= DestNumSigBits)
1765 return true;
1766
1767 return false;
1768 }
1769
visitFPTrunc(FPTruncInst & FPT)1770 Instruction *InstCombinerImpl::visitFPTrunc(FPTruncInst &FPT) {
1771 if (Instruction *I = commonCastTransforms(FPT))
1772 return I;
1773
1774 // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to
1775 // simplify this expression to avoid one or more of the trunc/extend
1776 // operations if we can do so without changing the numerical results.
1777 //
1778 // The exact manner in which the widths of the operands interact to limit
1779 // what we can and cannot do safely varies from operation to operation, and
1780 // is explained below in the various case statements.
1781 Type *Ty = FPT.getType();
1782 auto *BO = dyn_cast<BinaryOperator>(FPT.getOperand(0));
1783 if (BO && BO->hasOneUse()) {
1784 Type *LHSMinType = getMinimumFPType(BO->getOperand(0));
1785 Type *RHSMinType = getMinimumFPType(BO->getOperand(1));
1786 unsigned OpWidth = BO->getType()->getFPMantissaWidth();
1787 unsigned LHSWidth = LHSMinType->getFPMantissaWidth();
1788 unsigned RHSWidth = RHSMinType->getFPMantissaWidth();
1789 unsigned SrcWidth = std::max(LHSWidth, RHSWidth);
1790 unsigned DstWidth = Ty->getFPMantissaWidth();
1791 switch (BO->getOpcode()) {
1792 default: break;
1793 case Instruction::FAdd:
1794 case Instruction::FSub:
1795 // For addition and subtraction, the infinitely precise result can
1796 // essentially be arbitrarily wide; proving that double rounding
1797 // will not occur because the result of OpI is exact (as we will for
1798 // FMul, for example) is hopeless. However, we *can* nonetheless
1799 // frequently know that double rounding cannot occur (or that it is
1800 // innocuous) by taking advantage of the specific structure of
1801 // infinitely-precise results that admit double rounding.
1802 //
1803 // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient
1804 // to represent both sources, we can guarantee that the double
1805 // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis,
1806 // "A Rigorous Framework for Fully Supporting the IEEE Standard ..."
1807 // for proof of this fact).
1808 //
1809 // Note: Figueroa does not consider the case where DstFormat !=
1810 // SrcFormat. It's possible (likely even!) that this analysis
1811 // could be tightened for those cases, but they are rare (the main
1812 // case of interest here is (float)((double)float + float)).
1813 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) {
1814 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1815 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1816 Instruction *RI = BinaryOperator::Create(BO->getOpcode(), LHS, RHS);
1817 RI->copyFastMathFlags(BO);
1818 return RI;
1819 }
1820 break;
1821 case Instruction::FMul:
1822 // For multiplication, the infinitely precise result has at most
1823 // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient
1824 // that such a value can be exactly represented, then no double
1825 // rounding can possibly occur; we can safely perform the operation
1826 // in the destination format if it can represent both sources.
1827 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) {
1828 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1829 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1830 return BinaryOperator::CreateFMulFMF(LHS, RHS, BO);
1831 }
1832 break;
1833 case Instruction::FDiv:
1834 // For division, we use again use the bound from Figueroa's
1835 // dissertation. I am entirely certain that this bound can be
1836 // tightened in the unbalanced operand case by an analysis based on
1837 // the diophantine rational approximation bound, but the well-known
1838 // condition used here is a good conservative first pass.
1839 // TODO: Tighten bound via rigorous analysis of the unbalanced case.
1840 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) {
1841 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1842 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1843 return BinaryOperator::CreateFDivFMF(LHS, RHS, BO);
1844 }
1845 break;
1846 case Instruction::FRem: {
1847 // Remainder is straightforward. Remainder is always exact, so the
1848 // type of OpI doesn't enter into things at all. We simply evaluate
1849 // in whichever source type is larger, then convert to the
1850 // destination type.
1851 if (SrcWidth == OpWidth)
1852 break;
1853 Value *LHS, *RHS;
1854 if (LHSWidth == SrcWidth) {
1855 LHS = Builder.CreateFPTrunc(BO->getOperand(0), LHSMinType);
1856 RHS = Builder.CreateFPTrunc(BO->getOperand(1), LHSMinType);
1857 } else {
1858 LHS = Builder.CreateFPTrunc(BO->getOperand(0), RHSMinType);
1859 RHS = Builder.CreateFPTrunc(BO->getOperand(1), RHSMinType);
1860 }
1861
1862 Value *ExactResult = Builder.CreateFRemFMF(LHS, RHS, BO);
1863 return CastInst::CreateFPCast(ExactResult, Ty);
1864 }
1865 }
1866 }
1867
1868 // (fptrunc (fneg x)) -> (fneg (fptrunc x))
1869 Value *X;
1870 Instruction *Op = dyn_cast<Instruction>(FPT.getOperand(0));
1871 if (Op && Op->hasOneUse()) {
1872 // FIXME: The FMF should propagate from the fptrunc, not the source op.
1873 IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1874 if (isa<FPMathOperator>(Op))
1875 Builder.setFastMathFlags(Op->getFastMathFlags());
1876
1877 if (match(Op, m_FNeg(m_Value(X)))) {
1878 Value *InnerTrunc = Builder.CreateFPTrunc(X, Ty);
1879
1880 return UnaryOperator::CreateFNegFMF(InnerTrunc, Op);
1881 }
1882
1883 // If we are truncating a select that has an extended operand, we can
1884 // narrow the other operand and do the select as a narrow op.
1885 Value *Cond, *X, *Y;
1886 if (match(Op, m_Select(m_Value(Cond), m_FPExt(m_Value(X)), m_Value(Y))) &&
1887 X->getType() == Ty) {
1888 // fptrunc (select Cond, (fpext X), Y --> select Cond, X, (fptrunc Y)
1889 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty);
1890 Value *Sel = Builder.CreateSelect(Cond, X, NarrowY, "narrow.sel", Op);
1891 return replaceInstUsesWith(FPT, Sel);
1892 }
1893 if (match(Op, m_Select(m_Value(Cond), m_Value(Y), m_FPExt(m_Value(X)))) &&
1894 X->getType() == Ty) {
1895 // fptrunc (select Cond, Y, (fpext X) --> select Cond, (fptrunc Y), X
1896 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty);
1897 Value *Sel = Builder.CreateSelect(Cond, NarrowY, X, "narrow.sel", Op);
1898 return replaceInstUsesWith(FPT, Sel);
1899 }
1900 }
1901
1902 if (auto *II = dyn_cast<IntrinsicInst>(FPT.getOperand(0))) {
1903 switch (II->getIntrinsicID()) {
1904 default: break;
1905 case Intrinsic::ceil:
1906 case Intrinsic::fabs:
1907 case Intrinsic::floor:
1908 case Intrinsic::nearbyint:
1909 case Intrinsic::rint:
1910 case Intrinsic::round:
1911 case Intrinsic::roundeven:
1912 case Intrinsic::trunc: {
1913 Value *Src = II->getArgOperand(0);
1914 if (!Src->hasOneUse())
1915 break;
1916
1917 // Except for fabs, this transformation requires the input of the unary FP
1918 // operation to be itself an fpext from the type to which we're
1919 // truncating.
1920 if (II->getIntrinsicID() != Intrinsic::fabs) {
1921 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src);
1922 if (!FPExtSrc || FPExtSrc->getSrcTy() != Ty)
1923 break;
1924 }
1925
1926 // Do unary FP operation on smaller type.
1927 // (fptrunc (fabs x)) -> (fabs (fptrunc x))
1928 Value *InnerTrunc = Builder.CreateFPTrunc(Src, Ty);
1929 Function *Overload = Intrinsic::getDeclaration(FPT.getModule(),
1930 II->getIntrinsicID(), Ty);
1931 SmallVector<OperandBundleDef, 1> OpBundles;
1932 II->getOperandBundlesAsDefs(OpBundles);
1933 CallInst *NewCI =
1934 CallInst::Create(Overload, {InnerTrunc}, OpBundles, II->getName());
1935 NewCI->copyFastMathFlags(II);
1936 return NewCI;
1937 }
1938 }
1939 }
1940
1941 if (Instruction *I = shrinkInsertElt(FPT, Builder))
1942 return I;
1943
1944 Value *Src = FPT.getOperand(0);
1945 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) {
1946 auto *FPCast = cast<CastInst>(Src);
1947 if (isKnownExactCastIntToFP(*FPCast, *this))
1948 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty);
1949 }
1950
1951 return nullptr;
1952 }
1953
visitFPExt(CastInst & FPExt)1954 Instruction *InstCombinerImpl::visitFPExt(CastInst &FPExt) {
1955 // If the source operand is a cast from integer to FP and known exact, then
1956 // cast the integer operand directly to the destination type.
1957 Type *Ty = FPExt.getType();
1958 Value *Src = FPExt.getOperand(0);
1959 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) {
1960 auto *FPCast = cast<CastInst>(Src);
1961 if (isKnownExactCastIntToFP(*FPCast, *this))
1962 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty);
1963 }
1964
1965 return commonCastTransforms(FPExt);
1966 }
1967
1968 /// fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X)
1969 /// This is safe if the intermediate type has enough bits in its mantissa to
1970 /// accurately represent all values of X. For example, this won't work with
1971 /// i64 -> float -> i64.
foldItoFPtoI(CastInst & FI)1972 Instruction *InstCombinerImpl::foldItoFPtoI(CastInst &FI) {
1973 if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0)))
1974 return nullptr;
1975
1976 auto *OpI = cast<CastInst>(FI.getOperand(0));
1977 Value *X = OpI->getOperand(0);
1978 Type *XType = X->getType();
1979 Type *DestType = FI.getType();
1980 bool IsOutputSigned = isa<FPToSIInst>(FI);
1981
1982 // Since we can assume the conversion won't overflow, our decision as to
1983 // whether the input will fit in the float should depend on the minimum
1984 // of the input range and output range.
1985
1986 // This means this is also safe for a signed input and unsigned output, since
1987 // a negative input would lead to undefined behavior.
1988 if (!isKnownExactCastIntToFP(*OpI, *this)) {
1989 // The first cast may not round exactly based on the source integer width
1990 // and FP width, but the overflow UB rules can still allow this to fold.
1991 // If the destination type is narrow, that means the intermediate FP value
1992 // must be large enough to hold the source value exactly.
1993 // For example, (uint8_t)((float)(uint32_t 16777217) is undefined behavior.
1994 int OutputSize = (int)DestType->getScalarSizeInBits();
1995 if (OutputSize > OpI->getType()->getFPMantissaWidth())
1996 return nullptr;
1997 }
1998
1999 if (DestType->getScalarSizeInBits() > XType->getScalarSizeInBits()) {
2000 bool IsInputSigned = isa<SIToFPInst>(OpI);
2001 if (IsInputSigned && IsOutputSigned)
2002 return new SExtInst(X, DestType);
2003 return new ZExtInst(X, DestType);
2004 }
2005 if (DestType->getScalarSizeInBits() < XType->getScalarSizeInBits())
2006 return new TruncInst(X, DestType);
2007
2008 assert(XType == DestType && "Unexpected types for int to FP to int casts");
2009 return replaceInstUsesWith(FI, X);
2010 }
2011
visitFPToUI(FPToUIInst & FI)2012 Instruction *InstCombinerImpl::visitFPToUI(FPToUIInst &FI) {
2013 if (Instruction *I = foldItoFPtoI(FI))
2014 return I;
2015
2016 return commonCastTransforms(FI);
2017 }
2018
visitFPToSI(FPToSIInst & FI)2019 Instruction *InstCombinerImpl::visitFPToSI(FPToSIInst &FI) {
2020 if (Instruction *I = foldItoFPtoI(FI))
2021 return I;
2022
2023 return commonCastTransforms(FI);
2024 }
2025
visitUIToFP(CastInst & CI)2026 Instruction *InstCombinerImpl::visitUIToFP(CastInst &CI) {
2027 return commonCastTransforms(CI);
2028 }
2029
visitSIToFP(CastInst & CI)2030 Instruction *InstCombinerImpl::visitSIToFP(CastInst &CI) {
2031 return commonCastTransforms(CI);
2032 }
2033
visitIntToPtr(IntToPtrInst & CI)2034 Instruction *InstCombinerImpl::visitIntToPtr(IntToPtrInst &CI) {
2035 // If the source integer type is not the intptr_t type for this target, do a
2036 // trunc or zext to the intptr_t type, then inttoptr of it. This allows the
2037 // cast to be exposed to other transforms.
2038 unsigned AS = CI.getAddressSpace();
2039 if (CI.getOperand(0)->getType()->getScalarSizeInBits() !=
2040 DL.getPointerSizeInBits(AS)) {
2041 Type *Ty = CI.getOperand(0)->getType()->getWithNewType(
2042 DL.getIntPtrType(CI.getContext(), AS));
2043 Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty);
2044 return new IntToPtrInst(P, CI.getType());
2045 }
2046
2047 if (Instruction *I = commonCastTransforms(CI))
2048 return I;
2049
2050 return nullptr;
2051 }
2052
2053 /// Implement the transforms for cast of pointer (bitcast/ptrtoint)
commonPointerCastTransforms(CastInst & CI)2054 Instruction *InstCombinerImpl::commonPointerCastTransforms(CastInst &CI) {
2055 Value *Src = CI.getOperand(0);
2056
2057 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
2058 // If casting the result of a getelementptr instruction with no offset, turn
2059 // this into a cast of the original pointer!
2060 if (GEP->hasAllZeroIndices() &&
2061 // If CI is an addrspacecast and GEP changes the poiner type, merging
2062 // GEP into CI would undo canonicalizing addrspacecast with different
2063 // pointer types, causing infinite loops.
2064 (!isa<AddrSpaceCastInst>(CI) ||
2065 GEP->getType() == GEP->getPointerOperandType())) {
2066 // Changing the cast operand is usually not a good idea but it is safe
2067 // here because the pointer operand is being replaced with another
2068 // pointer operand so the opcode doesn't need to change.
2069 return replaceOperand(CI, 0, GEP->getOperand(0));
2070 }
2071 }
2072
2073 return commonCastTransforms(CI);
2074 }
2075
visitPtrToInt(PtrToIntInst & CI)2076 Instruction *InstCombinerImpl::visitPtrToInt(PtrToIntInst &CI) {
2077 // If the destination integer type is not the intptr_t type for this target,
2078 // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast
2079 // to be exposed to other transforms.
2080 Value *SrcOp = CI.getPointerOperand();
2081 Type *SrcTy = SrcOp->getType();
2082 Type *Ty = CI.getType();
2083 unsigned AS = CI.getPointerAddressSpace();
2084 unsigned TySize = Ty->getScalarSizeInBits();
2085 unsigned PtrSize = DL.getPointerSizeInBits(AS);
2086 if (TySize != PtrSize) {
2087 Type *IntPtrTy =
2088 SrcTy->getWithNewType(DL.getIntPtrType(CI.getContext(), AS));
2089 Value *P = Builder.CreatePtrToInt(SrcOp, IntPtrTy);
2090 return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false);
2091 }
2092
2093 if (auto *GEP = dyn_cast<GetElementPtrInst>(SrcOp)) {
2094 // Fold ptrtoint(gep null, x) to multiply + constant if the GEP has one use.
2095 // While this can increase the number of instructions it doesn't actually
2096 // increase the overall complexity since the arithmetic is just part of
2097 // the GEP otherwise.
2098 if (GEP->hasOneUse() &&
2099 isa<ConstantPointerNull>(GEP->getPointerOperand())) {
2100 return replaceInstUsesWith(CI,
2101 Builder.CreateIntCast(EmitGEPOffset(GEP), Ty,
2102 /*isSigned=*/false));
2103 }
2104 }
2105
2106 Value *Vec, *Scalar, *Index;
2107 if (match(SrcOp, m_OneUse(m_InsertElt(m_IntToPtr(m_Value(Vec)),
2108 m_Value(Scalar), m_Value(Index)))) &&
2109 Vec->getType() == Ty) {
2110 assert(Vec->getType()->getScalarSizeInBits() == PtrSize && "Wrong type");
2111 // Convert the scalar to int followed by insert to eliminate one cast:
2112 // p2i (ins (i2p Vec), Scalar, Index --> ins Vec, (p2i Scalar), Index
2113 Value *NewCast = Builder.CreatePtrToInt(Scalar, Ty->getScalarType());
2114 return InsertElementInst::Create(Vec, NewCast, Index);
2115 }
2116
2117 return commonPointerCastTransforms(CI);
2118 }
2119
2120 /// This input value (which is known to have vector type) is being zero extended
2121 /// or truncated to the specified vector type. Since the zext/trunc is done
2122 /// using an integer type, we have a (bitcast(cast(bitcast))) pattern,
2123 /// endianness will impact which end of the vector that is extended or
2124 /// truncated.
2125 ///
2126 /// A vector is always stored with index 0 at the lowest address, which
2127 /// corresponds to the most significant bits for a big endian stored integer and
2128 /// the least significant bits for little endian. A trunc/zext of an integer
2129 /// impacts the big end of the integer. Thus, we need to add/remove elements at
2130 /// the front of the vector for big endian targets, and the back of the vector
2131 /// for little endian targets.
2132 ///
2133 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible.
2134 ///
2135 /// The source and destination vector types may have different element types.
2136 static Instruction *
optimizeVectorResizeWithIntegerBitCasts(Value * InVal,VectorType * DestTy,InstCombinerImpl & IC)2137 optimizeVectorResizeWithIntegerBitCasts(Value *InVal, VectorType *DestTy,
2138 InstCombinerImpl &IC) {
2139 // We can only do this optimization if the output is a multiple of the input
2140 // element size, or the input is a multiple of the output element size.
2141 // Convert the input type to have the same element type as the output.
2142 VectorType *SrcTy = cast<VectorType>(InVal->getType());
2143
2144 if (SrcTy->getElementType() != DestTy->getElementType()) {
2145 // The input types don't need to be identical, but for now they must be the
2146 // same size. There is no specific reason we couldn't handle things like
2147 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten
2148 // there yet.
2149 if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
2150 DestTy->getElementType()->getPrimitiveSizeInBits())
2151 return nullptr;
2152
2153 SrcTy =
2154 FixedVectorType::get(DestTy->getElementType(),
2155 cast<FixedVectorType>(SrcTy)->getNumElements());
2156 InVal = IC.Builder.CreateBitCast(InVal, SrcTy);
2157 }
2158
2159 bool IsBigEndian = IC.getDataLayout().isBigEndian();
2160 unsigned SrcElts = cast<FixedVectorType>(SrcTy)->getNumElements();
2161 unsigned DestElts = cast<FixedVectorType>(DestTy)->getNumElements();
2162
2163 assert(SrcElts != DestElts && "Element counts should be different.");
2164
2165 // Now that the element types match, get the shuffle mask and RHS of the
2166 // shuffle to use, which depends on whether we're increasing or decreasing the
2167 // size of the input.
2168 auto ShuffleMaskStorage = llvm::to_vector<16>(llvm::seq<int>(0, SrcElts));
2169 ArrayRef<int> ShuffleMask;
2170 Value *V2;
2171
2172 if (SrcElts > DestElts) {
2173 // If we're shrinking the number of elements (rewriting an integer
2174 // truncate), just shuffle in the elements corresponding to the least
2175 // significant bits from the input and use poison as the second shuffle
2176 // input.
2177 V2 = PoisonValue::get(SrcTy);
2178 // Make sure the shuffle mask selects the "least significant bits" by
2179 // keeping elements from back of the src vector for big endian, and from the
2180 // front for little endian.
2181 ShuffleMask = ShuffleMaskStorage;
2182 if (IsBigEndian)
2183 ShuffleMask = ShuffleMask.take_back(DestElts);
2184 else
2185 ShuffleMask = ShuffleMask.take_front(DestElts);
2186 } else {
2187 // If we're increasing the number of elements (rewriting an integer zext),
2188 // shuffle in all of the elements from InVal. Fill the rest of the result
2189 // elements with zeros from a constant zero.
2190 V2 = Constant::getNullValue(SrcTy);
2191 // Use first elt from V2 when indicating zero in the shuffle mask.
2192 uint32_t NullElt = SrcElts;
2193 // Extend with null values in the "most significant bits" by adding elements
2194 // in front of the src vector for big endian, and at the back for little
2195 // endian.
2196 unsigned DeltaElts = DestElts - SrcElts;
2197 if (IsBigEndian)
2198 ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt);
2199 else
2200 ShuffleMaskStorage.append(DeltaElts, NullElt);
2201 ShuffleMask = ShuffleMaskStorage;
2202 }
2203
2204 return new ShuffleVectorInst(InVal, V2, ShuffleMask);
2205 }
2206
isMultipleOfTypeSize(unsigned Value,Type * Ty)2207 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) {
2208 return Value % Ty->getPrimitiveSizeInBits() == 0;
2209 }
2210
getTypeSizeIndex(unsigned Value,Type * Ty)2211 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) {
2212 return Value / Ty->getPrimitiveSizeInBits();
2213 }
2214
2215 /// V is a value which is inserted into a vector of VecEltTy.
2216 /// Look through the value to see if we can decompose it into
2217 /// insertions into the vector. See the example in the comment for
2218 /// OptimizeIntegerToVectorInsertions for the pattern this handles.
2219 /// The type of V is always a non-zero multiple of VecEltTy's size.
2220 /// Shift is the number of bits between the lsb of V and the lsb of
2221 /// the vector.
2222 ///
2223 /// This returns false if the pattern can't be matched or true if it can,
2224 /// filling in Elements with the elements found here.
collectInsertionElements(Value * V,unsigned Shift,SmallVectorImpl<Value * > & Elements,Type * VecEltTy,bool isBigEndian)2225 static bool collectInsertionElements(Value *V, unsigned Shift,
2226 SmallVectorImpl<Value *> &Elements,
2227 Type *VecEltTy, bool isBigEndian) {
2228 assert(isMultipleOfTypeSize(Shift, VecEltTy) &&
2229 "Shift should be a multiple of the element type size");
2230
2231 // Undef values never contribute useful bits to the result.
2232 if (isa<UndefValue>(V)) return true;
2233
2234 // If we got down to a value of the right type, we win, try inserting into the
2235 // right element.
2236 if (V->getType() == VecEltTy) {
2237 // Inserting null doesn't actually insert any elements.
2238 if (Constant *C = dyn_cast<Constant>(V))
2239 if (C->isNullValue())
2240 return true;
2241
2242 unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy);
2243 if (isBigEndian)
2244 ElementIndex = Elements.size() - ElementIndex - 1;
2245
2246 // Fail if multiple elements are inserted into this slot.
2247 if (Elements[ElementIndex])
2248 return false;
2249
2250 Elements[ElementIndex] = V;
2251 return true;
2252 }
2253
2254 if (Constant *C = dyn_cast<Constant>(V)) {
2255 // Figure out the # elements this provides, and bitcast it or slice it up
2256 // as required.
2257 unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(),
2258 VecEltTy);
2259 // If the constant is the size of a vector element, we just need to bitcast
2260 // it to the right type so it gets properly inserted.
2261 if (NumElts == 1)
2262 return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy),
2263 Shift, Elements, VecEltTy, isBigEndian);
2264
2265 // Okay, this is a constant that covers multiple elements. Slice it up into
2266 // pieces and insert each element-sized piece into the vector.
2267 if (!isa<IntegerType>(C->getType()))
2268 C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(),
2269 C->getType()->getPrimitiveSizeInBits()));
2270 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits();
2271 Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize);
2272
2273 for (unsigned i = 0; i != NumElts; ++i) {
2274 unsigned ShiftI = Shift+i*ElementSize;
2275 Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(),
2276 ShiftI));
2277 Piece = ConstantExpr::getTrunc(Piece, ElementIntTy);
2278 if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy,
2279 isBigEndian))
2280 return false;
2281 }
2282 return true;
2283 }
2284
2285 if (!V->hasOneUse()) return false;
2286
2287 Instruction *I = dyn_cast<Instruction>(V);
2288 if (!I) return false;
2289 switch (I->getOpcode()) {
2290 default: return false; // Unhandled case.
2291 case Instruction::BitCast:
2292 if (I->getOperand(0)->getType()->isVectorTy())
2293 return false;
2294 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2295 isBigEndian);
2296 case Instruction::ZExt:
2297 if (!isMultipleOfTypeSize(
2298 I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
2299 VecEltTy))
2300 return false;
2301 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2302 isBigEndian);
2303 case Instruction::Or:
2304 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2305 isBigEndian) &&
2306 collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy,
2307 isBigEndian);
2308 case Instruction::Shl: {
2309 // Must be shifting by a constant that is a multiple of the element size.
2310 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
2311 if (!CI) return false;
2312 Shift += CI->getZExtValue();
2313 if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false;
2314 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2315 isBigEndian);
2316 }
2317
2318 }
2319 }
2320
2321
2322 /// If the input is an 'or' instruction, we may be doing shifts and ors to
2323 /// assemble the elements of the vector manually.
2324 /// Try to rip the code out and replace it with insertelements. This is to
2325 /// optimize code like this:
2326 ///
2327 /// %tmp37 = bitcast float %inc to i32
2328 /// %tmp38 = zext i32 %tmp37 to i64
2329 /// %tmp31 = bitcast float %inc5 to i32
2330 /// %tmp32 = zext i32 %tmp31 to i64
2331 /// %tmp33 = shl i64 %tmp32, 32
2332 /// %ins35 = or i64 %tmp33, %tmp38
2333 /// %tmp43 = bitcast i64 %ins35 to <2 x float>
2334 ///
2335 /// Into two insertelements that do "buildvector{%inc, %inc5}".
optimizeIntegerToVectorInsertions(BitCastInst & CI,InstCombinerImpl & IC)2336 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI,
2337 InstCombinerImpl &IC) {
2338 auto *DestVecTy = cast<FixedVectorType>(CI.getType());
2339 Value *IntInput = CI.getOperand(0);
2340
2341 SmallVector<Value*, 8> Elements(DestVecTy->getNumElements());
2342 if (!collectInsertionElements(IntInput, 0, Elements,
2343 DestVecTy->getElementType(),
2344 IC.getDataLayout().isBigEndian()))
2345 return nullptr;
2346
2347 // If we succeeded, we know that all of the element are specified by Elements
2348 // or are zero if Elements has a null entry. Recast this as a set of
2349 // insertions.
2350 Value *Result = Constant::getNullValue(CI.getType());
2351 for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
2352 if (!Elements[i]) continue; // Unset element.
2353
2354 Result = IC.Builder.CreateInsertElement(Result, Elements[i],
2355 IC.Builder.getInt32(i));
2356 }
2357
2358 return Result;
2359 }
2360
2361 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the
2362 /// vector followed by extract element. The backend tends to handle bitcasts of
2363 /// vectors better than bitcasts of scalars because vector registers are
2364 /// usually not type-specific like scalar integer or scalar floating-point.
canonicalizeBitCastExtElt(BitCastInst & BitCast,InstCombinerImpl & IC)2365 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast,
2366 InstCombinerImpl &IC) {
2367 Value *VecOp, *Index;
2368 if (!match(BitCast.getOperand(0),
2369 m_OneUse(m_ExtractElt(m_Value(VecOp), m_Value(Index)))))
2370 return nullptr;
2371
2372 // The bitcast must be to a vectorizable type, otherwise we can't make a new
2373 // type to extract from.
2374 Type *DestType = BitCast.getType();
2375 VectorType *VecType = cast<VectorType>(VecOp->getType());
2376 if (VectorType::isValidElementType(DestType)) {
2377 auto *NewVecType = VectorType::get(DestType, VecType);
2378 auto *NewBC = IC.Builder.CreateBitCast(VecOp, NewVecType, "bc");
2379 return ExtractElementInst::Create(NewBC, Index);
2380 }
2381
2382 // Only solve DestType is vector to avoid inverse transform in visitBitCast.
2383 // bitcast (extractelement <1 x elt>, dest) -> bitcast(<1 x elt>, dest)
2384 auto *FixedVType = dyn_cast<FixedVectorType>(VecType);
2385 if (DestType->isVectorTy() && FixedVType && FixedVType->getNumElements() == 1)
2386 return CastInst::Create(Instruction::BitCast, VecOp, DestType);
2387
2388 return nullptr;
2389 }
2390
2391 /// Change the type of a bitwise logic operation if we can eliminate a bitcast.
foldBitCastBitwiseLogic(BitCastInst & BitCast,InstCombiner::BuilderTy & Builder)2392 static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast,
2393 InstCombiner::BuilderTy &Builder) {
2394 Type *DestTy = BitCast.getType();
2395 BinaryOperator *BO;
2396
2397 if (!match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) ||
2398 !BO->isBitwiseLogicOp())
2399 return nullptr;
2400
2401 // FIXME: This transform is restricted to vector types to avoid backend
2402 // problems caused by creating potentially illegal operations. If a fix-up is
2403 // added to handle that situation, we can remove this check.
2404 if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy())
2405 return nullptr;
2406
2407 if (DestTy->isFPOrFPVectorTy()) {
2408 Value *X, *Y;
2409 // bitcast(logic(bitcast(X), bitcast(Y))) -> bitcast'(logic(bitcast'(X), Y))
2410 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) &&
2411 match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(Y))))) {
2412 if (X->getType()->isFPOrFPVectorTy() &&
2413 Y->getType()->isIntOrIntVectorTy()) {
2414 Value *CastedOp =
2415 Builder.CreateBitCast(BO->getOperand(0), Y->getType());
2416 Value *NewBO = Builder.CreateBinOp(BO->getOpcode(), CastedOp, Y);
2417 return CastInst::CreateBitOrPointerCast(NewBO, DestTy);
2418 }
2419 if (X->getType()->isIntOrIntVectorTy() &&
2420 Y->getType()->isFPOrFPVectorTy()) {
2421 Value *CastedOp =
2422 Builder.CreateBitCast(BO->getOperand(1), X->getType());
2423 Value *NewBO = Builder.CreateBinOp(BO->getOpcode(), CastedOp, X);
2424 return CastInst::CreateBitOrPointerCast(NewBO, DestTy);
2425 }
2426 }
2427 return nullptr;
2428 }
2429
2430 if (!DestTy->isIntOrIntVectorTy())
2431 return nullptr;
2432
2433 Value *X;
2434 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) &&
2435 X->getType() == DestTy && !isa<Constant>(X)) {
2436 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
2437 Value *CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy);
2438 return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1);
2439 }
2440
2441 if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) &&
2442 X->getType() == DestTy && !isa<Constant>(X)) {
2443 // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X)
2444 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy);
2445 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X);
2446 }
2447
2448 // Canonicalize vector bitcasts to come before vector bitwise logic with a
2449 // constant. This eases recognition of special constants for later ops.
2450 // Example:
2451 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
2452 Constant *C;
2453 if (match(BO->getOperand(1), m_Constant(C))) {
2454 // bitcast (logic X, C) --> logic (bitcast X, C')
2455 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy);
2456 Value *CastedC = Builder.CreateBitCast(C, DestTy);
2457 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC);
2458 }
2459
2460 return nullptr;
2461 }
2462
2463 /// Change the type of a select if we can eliminate a bitcast.
foldBitCastSelect(BitCastInst & BitCast,InstCombiner::BuilderTy & Builder)2464 static Instruction *foldBitCastSelect(BitCastInst &BitCast,
2465 InstCombiner::BuilderTy &Builder) {
2466 Value *Cond, *TVal, *FVal;
2467 if (!match(BitCast.getOperand(0),
2468 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
2469 return nullptr;
2470
2471 // A vector select must maintain the same number of elements in its operands.
2472 Type *CondTy = Cond->getType();
2473 Type *DestTy = BitCast.getType();
2474 if (auto *CondVTy = dyn_cast<VectorType>(CondTy))
2475 if (!DestTy->isVectorTy() ||
2476 CondVTy->getElementCount() !=
2477 cast<VectorType>(DestTy)->getElementCount())
2478 return nullptr;
2479
2480 // FIXME: This transform is restricted from changing the select between
2481 // scalars and vectors to avoid backend problems caused by creating
2482 // potentially illegal operations. If a fix-up is added to handle that
2483 // situation, we can remove this check.
2484 if (DestTy->isVectorTy() != TVal->getType()->isVectorTy())
2485 return nullptr;
2486
2487 auto *Sel = cast<Instruction>(BitCast.getOperand(0));
2488 Value *X;
2489 if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy &&
2490 !isa<Constant>(X)) {
2491 // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y))
2492 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy);
2493 return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel);
2494 }
2495
2496 if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy &&
2497 !isa<Constant>(X)) {
2498 // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X)
2499 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy);
2500 return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel);
2501 }
2502
2503 return nullptr;
2504 }
2505
2506 /// Check if all users of CI are StoreInsts.
hasStoreUsersOnly(CastInst & CI)2507 static bool hasStoreUsersOnly(CastInst &CI) {
2508 for (User *U : CI.users()) {
2509 if (!isa<StoreInst>(U))
2510 return false;
2511 }
2512 return true;
2513 }
2514
2515 /// This function handles following case
2516 ///
2517 /// A -> B cast
2518 /// PHI
2519 /// B -> A cast
2520 ///
2521 /// All the related PHI nodes can be replaced by new PHI nodes with type A.
2522 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN.
optimizeBitCastFromPhi(CastInst & CI,PHINode * PN)2523 Instruction *InstCombinerImpl::optimizeBitCastFromPhi(CastInst &CI,
2524 PHINode *PN) {
2525 // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp.
2526 if (hasStoreUsersOnly(CI))
2527 return nullptr;
2528
2529 Value *Src = CI.getOperand(0);
2530 Type *SrcTy = Src->getType(); // Type B
2531 Type *DestTy = CI.getType(); // Type A
2532
2533 SmallVector<PHINode *, 4> PhiWorklist;
2534 SmallSetVector<PHINode *, 4> OldPhiNodes;
2535
2536 // Find all of the A->B casts and PHI nodes.
2537 // We need to inspect all related PHI nodes, but PHIs can be cyclic, so
2538 // OldPhiNodes is used to track all known PHI nodes, before adding a new
2539 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first.
2540 PhiWorklist.push_back(PN);
2541 OldPhiNodes.insert(PN);
2542 while (!PhiWorklist.empty()) {
2543 auto *OldPN = PhiWorklist.pop_back_val();
2544 for (Value *IncValue : OldPN->incoming_values()) {
2545 if (isa<Constant>(IncValue))
2546 continue;
2547
2548 if (auto *LI = dyn_cast<LoadInst>(IncValue)) {
2549 // If there is a sequence of one or more load instructions, each loaded
2550 // value is used as address of later load instruction, bitcast is
2551 // necessary to change the value type, don't optimize it. For
2552 // simplicity we give up if the load address comes from another load.
2553 Value *Addr = LI->getOperand(0);
2554 if (Addr == &CI || isa<LoadInst>(Addr))
2555 return nullptr;
2556 // Don't tranform "load <256 x i32>, <256 x i32>*" to
2557 // "load x86_amx, x86_amx*", because x86_amx* is invalid.
2558 // TODO: Remove this check when bitcast between vector and x86_amx
2559 // is replaced with a specific intrinsic.
2560 if (DestTy->isX86_AMXTy())
2561 return nullptr;
2562 if (LI->hasOneUse() && LI->isSimple())
2563 continue;
2564 // If a LoadInst has more than one use, changing the type of loaded
2565 // value may create another bitcast.
2566 return nullptr;
2567 }
2568
2569 if (auto *PNode = dyn_cast<PHINode>(IncValue)) {
2570 if (OldPhiNodes.insert(PNode))
2571 PhiWorklist.push_back(PNode);
2572 continue;
2573 }
2574
2575 auto *BCI = dyn_cast<BitCastInst>(IncValue);
2576 // We can't handle other instructions.
2577 if (!BCI)
2578 return nullptr;
2579
2580 // Verify it's a A->B cast.
2581 Type *TyA = BCI->getOperand(0)->getType();
2582 Type *TyB = BCI->getType();
2583 if (TyA != DestTy || TyB != SrcTy)
2584 return nullptr;
2585 }
2586 }
2587
2588 // Check that each user of each old PHI node is something that we can
2589 // rewrite, so that all of the old PHI nodes can be cleaned up afterwards.
2590 for (auto *OldPN : OldPhiNodes) {
2591 for (User *V : OldPN->users()) {
2592 if (auto *SI = dyn_cast<StoreInst>(V)) {
2593 if (!SI->isSimple() || SI->getOperand(0) != OldPN)
2594 return nullptr;
2595 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
2596 // Verify it's a B->A cast.
2597 Type *TyB = BCI->getOperand(0)->getType();
2598 Type *TyA = BCI->getType();
2599 if (TyA != DestTy || TyB != SrcTy)
2600 return nullptr;
2601 } else if (auto *PHI = dyn_cast<PHINode>(V)) {
2602 // As long as the user is another old PHI node, then even if we don't
2603 // rewrite it, the PHI web we're considering won't have any users
2604 // outside itself, so it'll be dead.
2605 if (!OldPhiNodes.contains(PHI))
2606 return nullptr;
2607 } else {
2608 return nullptr;
2609 }
2610 }
2611 }
2612
2613 // For each old PHI node, create a corresponding new PHI node with a type A.
2614 SmallDenseMap<PHINode *, PHINode *> NewPNodes;
2615 for (auto *OldPN : OldPhiNodes) {
2616 Builder.SetInsertPoint(OldPN);
2617 PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands());
2618 NewPNodes[OldPN] = NewPN;
2619 }
2620
2621 // Fill in the operands of new PHI nodes.
2622 for (auto *OldPN : OldPhiNodes) {
2623 PHINode *NewPN = NewPNodes[OldPN];
2624 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) {
2625 Value *V = OldPN->getOperand(j);
2626 Value *NewV = nullptr;
2627 if (auto *C = dyn_cast<Constant>(V)) {
2628 NewV = ConstantExpr::getBitCast(C, DestTy);
2629 } else if (auto *LI = dyn_cast<LoadInst>(V)) {
2630 // Explicitly perform load combine to make sure no opposing transform
2631 // can remove the bitcast in the meantime and trigger an infinite loop.
2632 Builder.SetInsertPoint(LI);
2633 NewV = combineLoadToNewType(*LI, DestTy);
2634 // Remove the old load and its use in the old phi, which itself becomes
2635 // dead once the whole transform finishes.
2636 replaceInstUsesWith(*LI, PoisonValue::get(LI->getType()));
2637 eraseInstFromFunction(*LI);
2638 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
2639 NewV = BCI->getOperand(0);
2640 } else if (auto *PrevPN = dyn_cast<PHINode>(V)) {
2641 NewV = NewPNodes[PrevPN];
2642 }
2643 assert(NewV);
2644 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j));
2645 }
2646 }
2647
2648 // Traverse all accumulated PHI nodes and process its users,
2649 // which are Stores and BitcCasts. Without this processing
2650 // NewPHI nodes could be replicated and could lead to extra
2651 // moves generated after DeSSA.
2652 // If there is a store with type B, change it to type A.
2653
2654
2655 // Replace users of BitCast B->A with NewPHI. These will help
2656 // later to get rid off a closure formed by OldPHI nodes.
2657 Instruction *RetVal = nullptr;
2658 for (auto *OldPN : OldPhiNodes) {
2659 PHINode *NewPN = NewPNodes[OldPN];
2660 for (User *V : make_early_inc_range(OldPN->users())) {
2661 if (auto *SI = dyn_cast<StoreInst>(V)) {
2662 assert(SI->isSimple() && SI->getOperand(0) == OldPN);
2663 Builder.SetInsertPoint(SI);
2664 auto *NewBC =
2665 cast<BitCastInst>(Builder.CreateBitCast(NewPN, SrcTy));
2666 SI->setOperand(0, NewBC);
2667 Worklist.push(SI);
2668 assert(hasStoreUsersOnly(*NewBC));
2669 }
2670 else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
2671 Type *TyB = BCI->getOperand(0)->getType();
2672 Type *TyA = BCI->getType();
2673 assert(TyA == DestTy && TyB == SrcTy);
2674 (void) TyA;
2675 (void) TyB;
2676 Instruction *I = replaceInstUsesWith(*BCI, NewPN);
2677 if (BCI == &CI)
2678 RetVal = I;
2679 } else if (auto *PHI = dyn_cast<PHINode>(V)) {
2680 assert(OldPhiNodes.contains(PHI));
2681 (void) PHI;
2682 } else {
2683 llvm_unreachable("all uses should be handled");
2684 }
2685 }
2686 }
2687
2688 return RetVal;
2689 }
2690
convertBitCastToGEP(BitCastInst & CI,IRBuilderBase & Builder,const DataLayout & DL)2691 static Instruction *convertBitCastToGEP(BitCastInst &CI, IRBuilderBase &Builder,
2692 const DataLayout &DL) {
2693 Value *Src = CI.getOperand(0);
2694 PointerType *SrcPTy = cast<PointerType>(Src->getType());
2695 PointerType *DstPTy = cast<PointerType>(CI.getType());
2696
2697 // Bitcasts involving opaque pointers cannot be converted into a GEP.
2698 if (SrcPTy->isOpaque() || DstPTy->isOpaque())
2699 return nullptr;
2700
2701 Type *DstElTy = DstPTy->getNonOpaquePointerElementType();
2702 Type *SrcElTy = SrcPTy->getNonOpaquePointerElementType();
2703
2704 // When the type pointed to is not sized the cast cannot be
2705 // turned into a gep.
2706 if (!SrcElTy->isSized())
2707 return nullptr;
2708
2709 // If the source and destination are pointers, and this cast is equivalent
2710 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
2711 // This can enhance SROA and other transforms that want type-safe pointers.
2712 unsigned NumZeros = 0;
2713 while (SrcElTy && SrcElTy != DstElTy) {
2714 SrcElTy = GetElementPtrInst::getTypeAtIndex(SrcElTy, (uint64_t)0);
2715 ++NumZeros;
2716 }
2717
2718 // If we found a path from the src to dest, create the getelementptr now.
2719 if (SrcElTy == DstElTy) {
2720 SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0));
2721 GetElementPtrInst *GEP = GetElementPtrInst::Create(
2722 SrcPTy->getNonOpaquePointerElementType(), Src, Idxs);
2723
2724 // If the source pointer is dereferenceable, then assume it points to an
2725 // allocated object and apply "inbounds" to the GEP.
2726 bool CanBeNull, CanBeFreed;
2727 if (Src->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed)) {
2728 // In a non-default address space (not 0), a null pointer can not be
2729 // assumed inbounds, so ignore that case (dereferenceable_or_null).
2730 // The reason is that 'null' is not treated differently in these address
2731 // spaces, and we consequently ignore the 'gep inbounds' special case
2732 // for 'null' which allows 'inbounds' on 'null' if the indices are
2733 // zeros.
2734 if (SrcPTy->getAddressSpace() == 0 || !CanBeNull)
2735 GEP->setIsInBounds();
2736 }
2737 return GEP;
2738 }
2739 return nullptr;
2740 }
2741
visitBitCast(BitCastInst & CI)2742 Instruction *InstCombinerImpl::visitBitCast(BitCastInst &CI) {
2743 // If the operands are integer typed then apply the integer transforms,
2744 // otherwise just apply the common ones.
2745 Value *Src = CI.getOperand(0);
2746 Type *SrcTy = Src->getType();
2747 Type *DestTy = CI.getType();
2748
2749 // Get rid of casts from one type to the same type. These are useless and can
2750 // be replaced by the operand.
2751 if (DestTy == Src->getType())
2752 return replaceInstUsesWith(CI, Src);
2753
2754 if (isa<PointerType>(SrcTy) && isa<PointerType>(DestTy)) {
2755 // If we are casting a alloca to a pointer to a type of the same
2756 // size, rewrite the allocation instruction to allocate the "right" type.
2757 // There is no need to modify malloc calls because it is their bitcast that
2758 // needs to be cleaned up.
2759 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
2760 if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
2761 return V;
2762
2763 if (Instruction *I = convertBitCastToGEP(CI, Builder, DL))
2764 return I;
2765 }
2766
2767 if (FixedVectorType *DestVTy = dyn_cast<FixedVectorType>(DestTy)) {
2768 // Beware: messing with this target-specific oddity may cause trouble.
2769 if (DestVTy->getNumElements() == 1 && SrcTy->isX86_MMXTy()) {
2770 Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType());
2771 return InsertElementInst::Create(PoisonValue::get(DestTy), Elem,
2772 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
2773 }
2774
2775 if (isa<IntegerType>(SrcTy)) {
2776 // If this is a cast from an integer to vector, check to see if the input
2777 // is a trunc or zext of a bitcast from vector. If so, we can replace all
2778 // the casts with a shuffle and (potentially) a bitcast.
2779 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) {
2780 CastInst *SrcCast = cast<CastInst>(Src);
2781 if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0)))
2782 if (isa<VectorType>(BCIn->getOperand(0)->getType()))
2783 if (Instruction *I = optimizeVectorResizeWithIntegerBitCasts(
2784 BCIn->getOperand(0), cast<VectorType>(DestTy), *this))
2785 return I;
2786 }
2787
2788 // If the input is an 'or' instruction, we may be doing shifts and ors to
2789 // assemble the elements of the vector manually. Try to rip the code out
2790 // and replace it with insertelements.
2791 if (Value *V = optimizeIntegerToVectorInsertions(CI, *this))
2792 return replaceInstUsesWith(CI, V);
2793 }
2794 }
2795
2796 if (FixedVectorType *SrcVTy = dyn_cast<FixedVectorType>(SrcTy)) {
2797 if (SrcVTy->getNumElements() == 1) {
2798 // If our destination is not a vector, then make this a straight
2799 // scalar-scalar cast.
2800 if (!DestTy->isVectorTy()) {
2801 Value *Elem =
2802 Builder.CreateExtractElement(Src,
2803 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
2804 return CastInst::Create(Instruction::BitCast, Elem, DestTy);
2805 }
2806
2807 // Otherwise, see if our source is an insert. If so, then use the scalar
2808 // component directly:
2809 // bitcast (inselt <1 x elt> V, X, 0) to <n x m> --> bitcast X to <n x m>
2810 if (auto *InsElt = dyn_cast<InsertElementInst>(Src))
2811 return new BitCastInst(InsElt->getOperand(1), DestTy);
2812 }
2813
2814 // Convert an artificial vector insert into more analyzable bitwise logic.
2815 unsigned BitWidth = DestTy->getScalarSizeInBits();
2816 Value *X, *Y;
2817 uint64_t IndexC;
2818 if (match(Src, m_OneUse(m_InsertElt(m_OneUse(m_BitCast(m_Value(X))),
2819 m_Value(Y), m_ConstantInt(IndexC)))) &&
2820 DestTy->isIntegerTy() && X->getType() == DestTy &&
2821 Y->getType()->isIntegerTy() && isDesirableIntType(BitWidth)) {
2822 // Adjust for big endian - the LSBs are at the high index.
2823 if (DL.isBigEndian())
2824 IndexC = SrcVTy->getNumElements() - 1 - IndexC;
2825
2826 // We only handle (endian-normalized) insert to index 0. Any other insert
2827 // would require a left-shift, so that is an extra instruction.
2828 if (IndexC == 0) {
2829 // bitcast (inselt (bitcast X), Y, 0) --> or (and X, MaskC), (zext Y)
2830 unsigned EltWidth = Y->getType()->getScalarSizeInBits();
2831 APInt MaskC = APInt::getHighBitsSet(BitWidth, BitWidth - EltWidth);
2832 Value *AndX = Builder.CreateAnd(X, MaskC);
2833 Value *ZextY = Builder.CreateZExt(Y, DestTy);
2834 return BinaryOperator::CreateOr(AndX, ZextY);
2835 }
2836 }
2837 }
2838
2839 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(Src)) {
2840 // Okay, we have (bitcast (shuffle ..)). Check to see if this is
2841 // a bitcast to a vector with the same # elts.
2842 Value *ShufOp0 = Shuf->getOperand(0);
2843 Value *ShufOp1 = Shuf->getOperand(1);
2844 auto ShufElts = cast<VectorType>(Shuf->getType())->getElementCount();
2845 auto SrcVecElts = cast<VectorType>(ShufOp0->getType())->getElementCount();
2846 if (Shuf->hasOneUse() && DestTy->isVectorTy() &&
2847 cast<VectorType>(DestTy)->getElementCount() == ShufElts &&
2848 ShufElts == SrcVecElts) {
2849 BitCastInst *Tmp;
2850 // If either of the operands is a cast from CI.getType(), then
2851 // evaluating the shuffle in the casted destination's type will allow
2852 // us to eliminate at least one cast.
2853 if (((Tmp = dyn_cast<BitCastInst>(ShufOp0)) &&
2854 Tmp->getOperand(0)->getType() == DestTy) ||
2855 ((Tmp = dyn_cast<BitCastInst>(ShufOp1)) &&
2856 Tmp->getOperand(0)->getType() == DestTy)) {
2857 Value *LHS = Builder.CreateBitCast(ShufOp0, DestTy);
2858 Value *RHS = Builder.CreateBitCast(ShufOp1, DestTy);
2859 // Return a new shuffle vector. Use the same element ID's, as we
2860 // know the vector types match #elts.
2861 return new ShuffleVectorInst(LHS, RHS, Shuf->getShuffleMask());
2862 }
2863 }
2864
2865 // A bitcasted-to-scalar and byte-reversing shuffle is better recognized as
2866 // a byte-swap:
2867 // bitcast <N x i8> (shuf X, undef, <N, N-1,...0>) --> bswap (bitcast X)
2868 // TODO: We should match the related pattern for bitreverse.
2869 if (DestTy->isIntegerTy() &&
2870 DL.isLegalInteger(DestTy->getScalarSizeInBits()) &&
2871 SrcTy->getScalarSizeInBits() == 8 &&
2872 ShufElts.getKnownMinValue() % 2 == 0 && Shuf->hasOneUse() &&
2873 Shuf->isReverse()) {
2874 assert(ShufOp0->getType() == SrcTy && "Unexpected shuffle mask");
2875 assert(match(ShufOp1, m_Undef()) && "Unexpected shuffle op");
2876 Function *Bswap =
2877 Intrinsic::getDeclaration(CI.getModule(), Intrinsic::bswap, DestTy);
2878 Value *ScalarX = Builder.CreateBitCast(ShufOp0, DestTy);
2879 return CallInst::Create(Bswap, { ScalarX });
2880 }
2881 }
2882
2883 // Handle the A->B->A cast, and there is an intervening PHI node.
2884 if (PHINode *PN = dyn_cast<PHINode>(Src))
2885 if (Instruction *I = optimizeBitCastFromPhi(CI, PN))
2886 return I;
2887
2888 if (Instruction *I = canonicalizeBitCastExtElt(CI, *this))
2889 return I;
2890
2891 if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder))
2892 return I;
2893
2894 if (Instruction *I = foldBitCastSelect(CI, Builder))
2895 return I;
2896
2897 if (SrcTy->isPointerTy())
2898 return commonPointerCastTransforms(CI);
2899 return commonCastTransforms(CI);
2900 }
2901
visitAddrSpaceCast(AddrSpaceCastInst & CI)2902 Instruction *InstCombinerImpl::visitAddrSpaceCast(AddrSpaceCastInst &CI) {
2903 // If the destination pointer element type is not the same as the source's
2904 // first do a bitcast to the destination type, and then the addrspacecast.
2905 // This allows the cast to be exposed to other transforms.
2906 Value *Src = CI.getOperand(0);
2907 PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType());
2908 PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType());
2909
2910 if (!SrcTy->hasSameElementTypeAs(DestTy)) {
2911 Type *MidTy =
2912 PointerType::getWithSamePointeeType(DestTy, SrcTy->getAddressSpace());
2913 // Handle vectors of pointers.
2914 if (VectorType *VT = dyn_cast<VectorType>(CI.getType()))
2915 MidTy = VectorType::get(MidTy, VT->getElementCount());
2916
2917 Value *NewBitCast = Builder.CreateBitCast(Src, MidTy);
2918 return new AddrSpaceCastInst(NewBitCast, CI.getType());
2919 }
2920
2921 return commonPointerCastTransforms(CI);
2922 }
2923