1 //===- AMDGPInstCombineIntrinsic.cpp - AMDGPU specific InstCombine pass ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // \file
10 // This file implements a TargetTransformInfo analysis pass specific to the
11 // AMDGPU target machine. It uses the target's detailed information to provide
12 // more precise answers to certain TTI queries, while letting the target
13 // independent and default TTI implementations handle the rest.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPUTargetTransformInfo.h"
19 #include "GCNSubtarget.h"
20 #include "llvm/IR/IntrinsicsAMDGPU.h"
21 #include "llvm/Transforms/InstCombine/InstCombiner.h"
22 
23 using namespace llvm;
24 
25 #define DEBUG_TYPE "AMDGPUtti"
26 
27 namespace {
28 
29 struct AMDGPUImageDMaskIntrinsic {
30   unsigned Intr;
31 };
32 
33 #define GET_AMDGPUImageDMaskIntrinsicTable_IMPL
34 #include "InstCombineTables.inc"
35 
36 } // end anonymous namespace
37 
38 // Constant fold llvm.amdgcn.fmed3 intrinsics for standard inputs.
39 //
40 // A single NaN input is folded to minnum, so we rely on that folding for
41 // handling NaNs.
42 static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1,
43                            const APFloat &Src2) {
44   APFloat Max3 = maxnum(maxnum(Src0, Src1), Src2);
45 
46   APFloat::cmpResult Cmp0 = Max3.compare(Src0);
47   assert(Cmp0 != APFloat::cmpUnordered && "nans handled separately");
48   if (Cmp0 == APFloat::cmpEqual)
49     return maxnum(Src1, Src2);
50 
51   APFloat::cmpResult Cmp1 = Max3.compare(Src1);
52   assert(Cmp1 != APFloat::cmpUnordered && "nans handled separately");
53   if (Cmp1 == APFloat::cmpEqual)
54     return maxnum(Src0, Src2);
55 
56   return maxnum(Src0, Src1);
57 }
58 
59 // Check if a value can be converted to a 16-bit value without losing
60 // precision.
61 static bool canSafelyConvertTo16Bit(Value &V) {
62   Type *VTy = V.getType();
63   if (VTy->isHalfTy() || VTy->isIntegerTy(16)) {
64     // The value is already 16-bit, so we don't want to convert to 16-bit again!
65     return false;
66   }
67   if (ConstantFP *ConstFloat = dyn_cast<ConstantFP>(&V)) {
68     // We need to check that if we cast the index down to a half, we do not lose
69     // precision.
70     APFloat FloatValue(ConstFloat->getValueAPF());
71     bool LosesInfo = true;
72     FloatValue.convert(APFloat::IEEEhalf(), APFloat::rmTowardZero, &LosesInfo);
73     return !LosesInfo;
74   }
75   Value *CastSrc;
76   if (match(&V, m_FPExt(PatternMatch::m_Value(CastSrc))) ||
77       match(&V, m_SExt(PatternMatch::m_Value(CastSrc))) ||
78       match(&V, m_ZExt(PatternMatch::m_Value(CastSrc)))) {
79     Type *CastSrcTy = CastSrc->getType();
80     if (CastSrcTy->isHalfTy() || CastSrcTy->isIntegerTy(16))
81       return true;
82   }
83 
84   return false;
85 }
86 
87 // Convert a value to 16-bit.
88 static Value *convertTo16Bit(Value &V, InstCombiner::BuilderTy &Builder) {
89   Type *VTy = V.getType();
90   if (isa<FPExtInst>(&V) || isa<SExtInst>(&V) || isa<ZExtInst>(&V))
91     return cast<Instruction>(&V)->getOperand(0);
92   if (VTy->isIntegerTy())
93     return Builder.CreateIntCast(&V, Type::getInt16Ty(V.getContext()), false);
94   if (VTy->isFloatingPointTy())
95     return Builder.CreateFPCast(&V, Type::getHalfTy(V.getContext()));
96 
97   llvm_unreachable("Should never be called!");
98 }
99 
100 static Optional<Instruction *>
101 simplifyAMDGCNImageIntrinsic(const GCNSubtarget *ST,
102                              const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr,
103                              IntrinsicInst &II, InstCombiner &IC) {
104   if (!ST->hasA16() && !ST->hasG16())
105     return None;
106 
107   bool FloatCoord = false;
108   // true means derivatives can be converted to 16 bit, coordinates not
109   bool OnlyDerivatives = false;
110 
111   for (unsigned OperandIndex = ImageDimIntr->GradientStart;
112        OperandIndex < ImageDimIntr->VAddrEnd; OperandIndex++) {
113     Value *Coord = II.getOperand(OperandIndex);
114     // If the values are not derived from 16-bit values, we cannot optimize.
115     if (!canSafelyConvertTo16Bit(*Coord)) {
116       if (OperandIndex < ImageDimIntr->CoordStart ||
117           ImageDimIntr->GradientStart == ImageDimIntr->CoordStart) {
118         return None;
119       }
120       // All gradients can be converted, so convert only them
121       OnlyDerivatives = true;
122       break;
123     }
124 
125     assert(OperandIndex == ImageDimIntr->GradientStart ||
126            FloatCoord == Coord->getType()->isFloatingPointTy());
127     FloatCoord = Coord->getType()->isFloatingPointTy();
128   }
129 
130   if (OnlyDerivatives) {
131     if (!ST->hasG16())
132       return None;
133   } else {
134     if (!ST->hasA16())
135       OnlyDerivatives = true; // Only supports G16
136   }
137 
138   Type *CoordType = FloatCoord ? Type::getHalfTy(II.getContext())
139                                : Type::getInt16Ty(II.getContext());
140 
141   SmallVector<Type *, 4> ArgTys;
142   if (!Intrinsic::getIntrinsicSignature(II.getCalledFunction(), ArgTys))
143     return None;
144 
145   ArgTys[ImageDimIntr->GradientTyArg] = CoordType;
146   if (!OnlyDerivatives)
147     ArgTys[ImageDimIntr->CoordTyArg] = CoordType;
148   Function *I =
149       Intrinsic::getDeclaration(II.getModule(), II.getIntrinsicID(), ArgTys);
150 
151   SmallVector<Value *, 8> Args(II.arg_operands());
152 
153   unsigned EndIndex =
154       OnlyDerivatives ? ImageDimIntr->CoordStart : ImageDimIntr->VAddrEnd;
155   for (unsigned OperandIndex = ImageDimIntr->GradientStart;
156        OperandIndex < EndIndex; OperandIndex++) {
157     Args[OperandIndex] =
158         convertTo16Bit(*II.getOperand(OperandIndex), IC.Builder);
159   }
160 
161   CallInst *NewCall = IC.Builder.CreateCall(I, Args);
162   NewCall->takeName(&II);
163   NewCall->copyMetadata(II);
164   if (isa<FPMathOperator>(NewCall))
165     NewCall->copyFastMathFlags(&II);
166   return IC.replaceInstUsesWith(II, NewCall);
167 }
168 
169 bool GCNTTIImpl::canSimplifyLegacyMulToMul(const Value *Op0, const Value *Op1,
170                                            InstCombiner &IC) const {
171   // The legacy behaviour is that multiplying +/-0.0 by anything, even NaN or
172   // infinity, gives +0.0. If we can prove we don't have one of the special
173   // cases then we can use a normal multiply instead.
174   // TODO: Create and use isKnownFiniteNonZero instead of just matching
175   // constants here.
176   if (match(Op0, PatternMatch::m_FiniteNonZero()) ||
177       match(Op1, PatternMatch::m_FiniteNonZero())) {
178     // One operand is not zero or infinity or NaN.
179     return true;
180   }
181   auto *TLI = &IC.getTargetLibraryInfo();
182   if (isKnownNeverInfinity(Op0, TLI) && isKnownNeverNaN(Op0, TLI) &&
183       isKnownNeverInfinity(Op1, TLI) && isKnownNeverNaN(Op1, TLI)) {
184     // Neither operand is infinity or NaN.
185     return true;
186   }
187   return false;
188 }
189 
190 Optional<Instruction *>
191 GCNTTIImpl::instCombineIntrinsic(InstCombiner &IC, IntrinsicInst &II) const {
192   Intrinsic::ID IID = II.getIntrinsicID();
193   switch (IID) {
194   case Intrinsic::amdgcn_rcp: {
195     Value *Src = II.getArgOperand(0);
196 
197     // TODO: Move to ConstantFolding/InstSimplify?
198     if (isa<UndefValue>(Src)) {
199       Type *Ty = II.getType();
200       auto *QNaN = ConstantFP::get(Ty, APFloat::getQNaN(Ty->getFltSemantics()));
201       return IC.replaceInstUsesWith(II, QNaN);
202     }
203 
204     if (II.isStrictFP())
205       break;
206 
207     if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
208       const APFloat &ArgVal = C->getValueAPF();
209       APFloat Val(ArgVal.getSemantics(), 1);
210       Val.divide(ArgVal, APFloat::rmNearestTiesToEven);
211 
212       // This is more precise than the instruction may give.
213       //
214       // TODO: The instruction always flushes denormal results (except for f16),
215       // should this also?
216       return IC.replaceInstUsesWith(II, ConstantFP::get(II.getContext(), Val));
217     }
218 
219     break;
220   }
221   case Intrinsic::amdgcn_rsq: {
222     Value *Src = II.getArgOperand(0);
223 
224     // TODO: Move to ConstantFolding/InstSimplify?
225     if (isa<UndefValue>(Src)) {
226       Type *Ty = II.getType();
227       auto *QNaN = ConstantFP::get(Ty, APFloat::getQNaN(Ty->getFltSemantics()));
228       return IC.replaceInstUsesWith(II, QNaN);
229     }
230 
231     break;
232   }
233   case Intrinsic::amdgcn_frexp_mant:
234   case Intrinsic::amdgcn_frexp_exp: {
235     Value *Src = II.getArgOperand(0);
236     if (const ConstantFP *C = dyn_cast<ConstantFP>(Src)) {
237       int Exp;
238       APFloat Significand =
239           frexp(C->getValueAPF(), Exp, APFloat::rmNearestTiesToEven);
240 
241       if (IID == Intrinsic::amdgcn_frexp_mant) {
242         return IC.replaceInstUsesWith(
243             II, ConstantFP::get(II.getContext(), Significand));
244       }
245 
246       // Match instruction special case behavior.
247       if (Exp == APFloat::IEK_NaN || Exp == APFloat::IEK_Inf)
248         Exp = 0;
249 
250       return IC.replaceInstUsesWith(II, ConstantInt::get(II.getType(), Exp));
251     }
252 
253     if (isa<UndefValue>(Src)) {
254       return IC.replaceInstUsesWith(II, UndefValue::get(II.getType()));
255     }
256 
257     break;
258   }
259   case Intrinsic::amdgcn_class: {
260     enum {
261       S_NAN = 1 << 0,       // Signaling NaN
262       Q_NAN = 1 << 1,       // Quiet NaN
263       N_INFINITY = 1 << 2,  // Negative infinity
264       N_NORMAL = 1 << 3,    // Negative normal
265       N_SUBNORMAL = 1 << 4, // Negative subnormal
266       N_ZERO = 1 << 5,      // Negative zero
267       P_ZERO = 1 << 6,      // Positive zero
268       P_SUBNORMAL = 1 << 7, // Positive subnormal
269       P_NORMAL = 1 << 8,    // Positive normal
270       P_INFINITY = 1 << 9   // Positive infinity
271     };
272 
273     const uint32_t FullMask = S_NAN | Q_NAN | N_INFINITY | N_NORMAL |
274                               N_SUBNORMAL | N_ZERO | P_ZERO | P_SUBNORMAL |
275                               P_NORMAL | P_INFINITY;
276 
277     Value *Src0 = II.getArgOperand(0);
278     Value *Src1 = II.getArgOperand(1);
279     const ConstantInt *CMask = dyn_cast<ConstantInt>(Src1);
280     if (!CMask) {
281       if (isa<UndefValue>(Src0)) {
282         return IC.replaceInstUsesWith(II, UndefValue::get(II.getType()));
283       }
284 
285       if (isa<UndefValue>(Src1)) {
286         return IC.replaceInstUsesWith(II,
287                                       ConstantInt::get(II.getType(), false));
288       }
289       break;
290     }
291 
292     uint32_t Mask = CMask->getZExtValue();
293 
294     // If all tests are made, it doesn't matter what the value is.
295     if ((Mask & FullMask) == FullMask) {
296       return IC.replaceInstUsesWith(II, ConstantInt::get(II.getType(), true));
297     }
298 
299     if ((Mask & FullMask) == 0) {
300       return IC.replaceInstUsesWith(II, ConstantInt::get(II.getType(), false));
301     }
302 
303     if (Mask == (S_NAN | Q_NAN)) {
304       // Equivalent of isnan. Replace with standard fcmp.
305       Value *FCmp = IC.Builder.CreateFCmpUNO(Src0, Src0);
306       FCmp->takeName(&II);
307       return IC.replaceInstUsesWith(II, FCmp);
308     }
309 
310     if (Mask == (N_ZERO | P_ZERO)) {
311       // Equivalent of == 0.
312       Value *FCmp =
313           IC.Builder.CreateFCmpOEQ(Src0, ConstantFP::get(Src0->getType(), 0.0));
314 
315       FCmp->takeName(&II);
316       return IC.replaceInstUsesWith(II, FCmp);
317     }
318 
319     // fp_class (nnan x), qnan|snan|other -> fp_class (nnan x), other
320     if (((Mask & S_NAN) || (Mask & Q_NAN)) &&
321         isKnownNeverNaN(Src0, &IC.getTargetLibraryInfo())) {
322       return IC.replaceOperand(
323           II, 1, ConstantInt::get(Src1->getType(), Mask & ~(S_NAN | Q_NAN)));
324     }
325 
326     const ConstantFP *CVal = dyn_cast<ConstantFP>(Src0);
327     if (!CVal) {
328       if (isa<UndefValue>(Src0)) {
329         return IC.replaceInstUsesWith(II, UndefValue::get(II.getType()));
330       }
331 
332       // Clamp mask to used bits
333       if ((Mask & FullMask) != Mask) {
334         CallInst *NewCall = IC.Builder.CreateCall(
335             II.getCalledFunction(),
336             {Src0, ConstantInt::get(Src1->getType(), Mask & FullMask)});
337 
338         NewCall->takeName(&II);
339         return IC.replaceInstUsesWith(II, NewCall);
340       }
341 
342       break;
343     }
344 
345     const APFloat &Val = CVal->getValueAPF();
346 
347     bool Result =
348         ((Mask & S_NAN) && Val.isNaN() && Val.isSignaling()) ||
349         ((Mask & Q_NAN) && Val.isNaN() && !Val.isSignaling()) ||
350         ((Mask & N_INFINITY) && Val.isInfinity() && Val.isNegative()) ||
351         ((Mask & N_NORMAL) && Val.isNormal() && Val.isNegative()) ||
352         ((Mask & N_SUBNORMAL) && Val.isDenormal() && Val.isNegative()) ||
353         ((Mask & N_ZERO) && Val.isZero() && Val.isNegative()) ||
354         ((Mask & P_ZERO) && Val.isZero() && !Val.isNegative()) ||
355         ((Mask & P_SUBNORMAL) && Val.isDenormal() && !Val.isNegative()) ||
356         ((Mask & P_NORMAL) && Val.isNormal() && !Val.isNegative()) ||
357         ((Mask & P_INFINITY) && Val.isInfinity() && !Val.isNegative());
358 
359     return IC.replaceInstUsesWith(II, ConstantInt::get(II.getType(), Result));
360   }
361   case Intrinsic::amdgcn_cvt_pkrtz: {
362     Value *Src0 = II.getArgOperand(0);
363     Value *Src1 = II.getArgOperand(1);
364     if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
365       if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
366         const fltSemantics &HalfSem =
367             II.getType()->getScalarType()->getFltSemantics();
368         bool LosesInfo;
369         APFloat Val0 = C0->getValueAPF();
370         APFloat Val1 = C1->getValueAPF();
371         Val0.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
372         Val1.convert(HalfSem, APFloat::rmTowardZero, &LosesInfo);
373 
374         Constant *Folded =
375             ConstantVector::get({ConstantFP::get(II.getContext(), Val0),
376                                  ConstantFP::get(II.getContext(), Val1)});
377         return IC.replaceInstUsesWith(II, Folded);
378       }
379     }
380 
381     if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1)) {
382       return IC.replaceInstUsesWith(II, UndefValue::get(II.getType()));
383     }
384 
385     break;
386   }
387   case Intrinsic::amdgcn_cvt_pknorm_i16:
388   case Intrinsic::amdgcn_cvt_pknorm_u16:
389   case Intrinsic::amdgcn_cvt_pk_i16:
390   case Intrinsic::amdgcn_cvt_pk_u16: {
391     Value *Src0 = II.getArgOperand(0);
392     Value *Src1 = II.getArgOperand(1);
393 
394     if (isa<UndefValue>(Src0) && isa<UndefValue>(Src1)) {
395       return IC.replaceInstUsesWith(II, UndefValue::get(II.getType()));
396     }
397 
398     break;
399   }
400   case Intrinsic::amdgcn_ubfe:
401   case Intrinsic::amdgcn_sbfe: {
402     // Decompose simple cases into standard shifts.
403     Value *Src = II.getArgOperand(0);
404     if (isa<UndefValue>(Src)) {
405       return IC.replaceInstUsesWith(II, Src);
406     }
407 
408     unsigned Width;
409     Type *Ty = II.getType();
410     unsigned IntSize = Ty->getIntegerBitWidth();
411 
412     ConstantInt *CWidth = dyn_cast<ConstantInt>(II.getArgOperand(2));
413     if (CWidth) {
414       Width = CWidth->getZExtValue();
415       if ((Width & (IntSize - 1)) == 0) {
416         return IC.replaceInstUsesWith(II, ConstantInt::getNullValue(Ty));
417       }
418 
419       // Hardware ignores high bits, so remove those.
420       if (Width >= IntSize) {
421         return IC.replaceOperand(
422             II, 2, ConstantInt::get(CWidth->getType(), Width & (IntSize - 1)));
423       }
424     }
425 
426     unsigned Offset;
427     ConstantInt *COffset = dyn_cast<ConstantInt>(II.getArgOperand(1));
428     if (COffset) {
429       Offset = COffset->getZExtValue();
430       if (Offset >= IntSize) {
431         return IC.replaceOperand(
432             II, 1,
433             ConstantInt::get(COffset->getType(), Offset & (IntSize - 1)));
434       }
435     }
436 
437     bool Signed = IID == Intrinsic::amdgcn_sbfe;
438 
439     if (!CWidth || !COffset)
440       break;
441 
442     // The case of Width == 0 is handled above, which makes this transformation
443     // safe.  If Width == 0, then the ashr and lshr instructions become poison
444     // value since the shift amount would be equal to the bit size.
445     assert(Width != 0);
446 
447     // TODO: This allows folding to undef when the hardware has specific
448     // behavior?
449     if (Offset + Width < IntSize) {
450       Value *Shl = IC.Builder.CreateShl(Src, IntSize - Offset - Width);
451       Value *RightShift = Signed ? IC.Builder.CreateAShr(Shl, IntSize - Width)
452                                  : IC.Builder.CreateLShr(Shl, IntSize - Width);
453       RightShift->takeName(&II);
454       return IC.replaceInstUsesWith(II, RightShift);
455     }
456 
457     Value *RightShift = Signed ? IC.Builder.CreateAShr(Src, Offset)
458                                : IC.Builder.CreateLShr(Src, Offset);
459 
460     RightShift->takeName(&II);
461     return IC.replaceInstUsesWith(II, RightShift);
462   }
463   case Intrinsic::amdgcn_exp:
464   case Intrinsic::amdgcn_exp_compr: {
465     ConstantInt *En = cast<ConstantInt>(II.getArgOperand(1));
466     unsigned EnBits = En->getZExtValue();
467     if (EnBits == 0xf)
468       break; // All inputs enabled.
469 
470     bool IsCompr = IID == Intrinsic::amdgcn_exp_compr;
471     bool Changed = false;
472     for (int I = 0; I < (IsCompr ? 2 : 4); ++I) {
473       if ((!IsCompr && (EnBits & (1 << I)) == 0) ||
474           (IsCompr && ((EnBits & (0x3 << (2 * I))) == 0))) {
475         Value *Src = II.getArgOperand(I + 2);
476         if (!isa<UndefValue>(Src)) {
477           IC.replaceOperand(II, I + 2, UndefValue::get(Src->getType()));
478           Changed = true;
479         }
480       }
481     }
482 
483     if (Changed) {
484       return &II;
485     }
486 
487     break;
488   }
489   case Intrinsic::amdgcn_fmed3: {
490     // Note this does not preserve proper sNaN behavior if IEEE-mode is enabled
491     // for the shader.
492 
493     Value *Src0 = II.getArgOperand(0);
494     Value *Src1 = II.getArgOperand(1);
495     Value *Src2 = II.getArgOperand(2);
496 
497     // Checking for NaN before canonicalization provides better fidelity when
498     // mapping other operations onto fmed3 since the order of operands is
499     // unchanged.
500     CallInst *NewCall = nullptr;
501     if (match(Src0, PatternMatch::m_NaN()) || isa<UndefValue>(Src0)) {
502       NewCall = IC.Builder.CreateMinNum(Src1, Src2);
503     } else if (match(Src1, PatternMatch::m_NaN()) || isa<UndefValue>(Src1)) {
504       NewCall = IC.Builder.CreateMinNum(Src0, Src2);
505     } else if (match(Src2, PatternMatch::m_NaN()) || isa<UndefValue>(Src2)) {
506       NewCall = IC.Builder.CreateMaxNum(Src0, Src1);
507     }
508 
509     if (NewCall) {
510       NewCall->copyFastMathFlags(&II);
511       NewCall->takeName(&II);
512       return IC.replaceInstUsesWith(II, NewCall);
513     }
514 
515     bool Swap = false;
516     // Canonicalize constants to RHS operands.
517     //
518     // fmed3(c0, x, c1) -> fmed3(x, c0, c1)
519     if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
520       std::swap(Src0, Src1);
521       Swap = true;
522     }
523 
524     if (isa<Constant>(Src1) && !isa<Constant>(Src2)) {
525       std::swap(Src1, Src2);
526       Swap = true;
527     }
528 
529     if (isa<Constant>(Src0) && !isa<Constant>(Src1)) {
530       std::swap(Src0, Src1);
531       Swap = true;
532     }
533 
534     if (Swap) {
535       II.setArgOperand(0, Src0);
536       II.setArgOperand(1, Src1);
537       II.setArgOperand(2, Src2);
538       return &II;
539     }
540 
541     if (const ConstantFP *C0 = dyn_cast<ConstantFP>(Src0)) {
542       if (const ConstantFP *C1 = dyn_cast<ConstantFP>(Src1)) {
543         if (const ConstantFP *C2 = dyn_cast<ConstantFP>(Src2)) {
544           APFloat Result = fmed3AMDGCN(C0->getValueAPF(), C1->getValueAPF(),
545                                        C2->getValueAPF());
546           return IC.replaceInstUsesWith(
547               II, ConstantFP::get(IC.Builder.getContext(), Result));
548         }
549       }
550     }
551 
552     break;
553   }
554   case Intrinsic::amdgcn_icmp:
555   case Intrinsic::amdgcn_fcmp: {
556     const ConstantInt *CC = cast<ConstantInt>(II.getArgOperand(2));
557     // Guard against invalid arguments.
558     int64_t CCVal = CC->getZExtValue();
559     bool IsInteger = IID == Intrinsic::amdgcn_icmp;
560     if ((IsInteger && (CCVal < CmpInst::FIRST_ICMP_PREDICATE ||
561                        CCVal > CmpInst::LAST_ICMP_PREDICATE)) ||
562         (!IsInteger && (CCVal < CmpInst::FIRST_FCMP_PREDICATE ||
563                         CCVal > CmpInst::LAST_FCMP_PREDICATE)))
564       break;
565 
566     Value *Src0 = II.getArgOperand(0);
567     Value *Src1 = II.getArgOperand(1);
568 
569     if (auto *CSrc0 = dyn_cast<Constant>(Src0)) {
570       if (auto *CSrc1 = dyn_cast<Constant>(Src1)) {
571         Constant *CCmp = ConstantExpr::getCompare(CCVal, CSrc0, CSrc1);
572         if (CCmp->isNullValue()) {
573           return IC.replaceInstUsesWith(
574               II, ConstantExpr::getSExt(CCmp, II.getType()));
575         }
576 
577         // The result of V_ICMP/V_FCMP assembly instructions (which this
578         // intrinsic exposes) is one bit per thread, masked with the EXEC
579         // register (which contains the bitmask of live threads). So a
580         // comparison that always returns true is the same as a read of the
581         // EXEC register.
582         Function *NewF = Intrinsic::getDeclaration(
583             II.getModule(), Intrinsic::read_register, II.getType());
584         Metadata *MDArgs[] = {MDString::get(II.getContext(), "exec")};
585         MDNode *MD = MDNode::get(II.getContext(), MDArgs);
586         Value *Args[] = {MetadataAsValue::get(II.getContext(), MD)};
587         CallInst *NewCall = IC.Builder.CreateCall(NewF, Args);
588         NewCall->addFnAttr(Attribute::Convergent);
589         NewCall->takeName(&II);
590         return IC.replaceInstUsesWith(II, NewCall);
591       }
592 
593       // Canonicalize constants to RHS.
594       CmpInst::Predicate SwapPred =
595           CmpInst::getSwappedPredicate(static_cast<CmpInst::Predicate>(CCVal));
596       II.setArgOperand(0, Src1);
597       II.setArgOperand(1, Src0);
598       II.setArgOperand(
599           2, ConstantInt::get(CC->getType(), static_cast<int>(SwapPred)));
600       return &II;
601     }
602 
603     if (CCVal != CmpInst::ICMP_EQ && CCVal != CmpInst::ICMP_NE)
604       break;
605 
606     // Canonicalize compare eq with true value to compare != 0
607     // llvm.amdgcn.icmp(zext (i1 x), 1, eq)
608     //   -> llvm.amdgcn.icmp(zext (i1 x), 0, ne)
609     // llvm.amdgcn.icmp(sext (i1 x), -1, eq)
610     //   -> llvm.amdgcn.icmp(sext (i1 x), 0, ne)
611     Value *ExtSrc;
612     if (CCVal == CmpInst::ICMP_EQ &&
613         ((match(Src1, PatternMatch::m_One()) &&
614           match(Src0, m_ZExt(PatternMatch::m_Value(ExtSrc)))) ||
615          (match(Src1, PatternMatch::m_AllOnes()) &&
616           match(Src0, m_SExt(PatternMatch::m_Value(ExtSrc))))) &&
617         ExtSrc->getType()->isIntegerTy(1)) {
618       IC.replaceOperand(II, 1, ConstantInt::getNullValue(Src1->getType()));
619       IC.replaceOperand(II, 2,
620                         ConstantInt::get(CC->getType(), CmpInst::ICMP_NE));
621       return &II;
622     }
623 
624     CmpInst::Predicate SrcPred;
625     Value *SrcLHS;
626     Value *SrcRHS;
627 
628     // Fold compare eq/ne with 0 from a compare result as the predicate to the
629     // intrinsic. The typical use is a wave vote function in the library, which
630     // will be fed from a user code condition compared with 0. Fold in the
631     // redundant compare.
632 
633     // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, ne)
634     //   -> llvm.amdgcn.[if]cmp(a, b, pred)
635     //
636     // llvm.amdgcn.icmp([sz]ext ([if]cmp pred a, b), 0, eq)
637     //   -> llvm.amdgcn.[if]cmp(a, b, inv pred)
638     if (match(Src1, PatternMatch::m_Zero()) &&
639         match(Src0, PatternMatch::m_ZExtOrSExt(
640                         m_Cmp(SrcPred, PatternMatch::m_Value(SrcLHS),
641                               PatternMatch::m_Value(SrcRHS))))) {
642       if (CCVal == CmpInst::ICMP_EQ)
643         SrcPred = CmpInst::getInversePredicate(SrcPred);
644 
645       Intrinsic::ID NewIID = CmpInst::isFPPredicate(SrcPred)
646                                  ? Intrinsic::amdgcn_fcmp
647                                  : Intrinsic::amdgcn_icmp;
648 
649       Type *Ty = SrcLHS->getType();
650       if (auto *CmpType = dyn_cast<IntegerType>(Ty)) {
651         // Promote to next legal integer type.
652         unsigned Width = CmpType->getBitWidth();
653         unsigned NewWidth = Width;
654 
655         // Don't do anything for i1 comparisons.
656         if (Width == 1)
657           break;
658 
659         if (Width <= 16)
660           NewWidth = 16;
661         else if (Width <= 32)
662           NewWidth = 32;
663         else if (Width <= 64)
664           NewWidth = 64;
665         else if (Width > 64)
666           break; // Can't handle this.
667 
668         if (Width != NewWidth) {
669           IntegerType *CmpTy = IC.Builder.getIntNTy(NewWidth);
670           if (CmpInst::isSigned(SrcPred)) {
671             SrcLHS = IC.Builder.CreateSExt(SrcLHS, CmpTy);
672             SrcRHS = IC.Builder.CreateSExt(SrcRHS, CmpTy);
673           } else {
674             SrcLHS = IC.Builder.CreateZExt(SrcLHS, CmpTy);
675             SrcRHS = IC.Builder.CreateZExt(SrcRHS, CmpTy);
676           }
677         }
678       } else if (!Ty->isFloatTy() && !Ty->isDoubleTy() && !Ty->isHalfTy())
679         break;
680 
681       Function *NewF = Intrinsic::getDeclaration(
682           II.getModule(), NewIID, {II.getType(), SrcLHS->getType()});
683       Value *Args[] = {SrcLHS, SrcRHS,
684                        ConstantInt::get(CC->getType(), SrcPred)};
685       CallInst *NewCall = IC.Builder.CreateCall(NewF, Args);
686       NewCall->takeName(&II);
687       return IC.replaceInstUsesWith(II, NewCall);
688     }
689 
690     break;
691   }
692   case Intrinsic::amdgcn_ballot: {
693     if (auto *Src = dyn_cast<ConstantInt>(II.getArgOperand(0))) {
694       if (Src->isZero()) {
695         // amdgcn.ballot(i1 0) is zero.
696         return IC.replaceInstUsesWith(II, Constant::getNullValue(II.getType()));
697       }
698 
699       if (Src->isOne()) {
700         // amdgcn.ballot(i1 1) is exec.
701         const char *RegName = "exec";
702         if (II.getType()->isIntegerTy(32))
703           RegName = "exec_lo";
704         else if (!II.getType()->isIntegerTy(64))
705           break;
706 
707         Function *NewF = Intrinsic::getDeclaration(
708             II.getModule(), Intrinsic::read_register, II.getType());
709         Metadata *MDArgs[] = {MDString::get(II.getContext(), RegName)};
710         MDNode *MD = MDNode::get(II.getContext(), MDArgs);
711         Value *Args[] = {MetadataAsValue::get(II.getContext(), MD)};
712         CallInst *NewCall = IC.Builder.CreateCall(NewF, Args);
713         NewCall->addFnAttr(Attribute::Convergent);
714         NewCall->takeName(&II);
715         return IC.replaceInstUsesWith(II, NewCall);
716       }
717     }
718     break;
719   }
720   case Intrinsic::amdgcn_wqm_vote: {
721     // wqm_vote is identity when the argument is constant.
722     if (!isa<Constant>(II.getArgOperand(0)))
723       break;
724 
725     return IC.replaceInstUsesWith(II, II.getArgOperand(0));
726   }
727   case Intrinsic::amdgcn_kill: {
728     const ConstantInt *C = dyn_cast<ConstantInt>(II.getArgOperand(0));
729     if (!C || !C->getZExtValue())
730       break;
731 
732     // amdgcn.kill(i1 1) is a no-op
733     return IC.eraseInstFromFunction(II);
734   }
735   case Intrinsic::amdgcn_update_dpp: {
736     Value *Old = II.getArgOperand(0);
737 
738     auto *BC = cast<ConstantInt>(II.getArgOperand(5));
739     auto *RM = cast<ConstantInt>(II.getArgOperand(3));
740     auto *BM = cast<ConstantInt>(II.getArgOperand(4));
741     if (BC->isZeroValue() || RM->getZExtValue() != 0xF ||
742         BM->getZExtValue() != 0xF || isa<UndefValue>(Old))
743       break;
744 
745     // If bound_ctrl = 1, row mask = bank mask = 0xf we can omit old value.
746     return IC.replaceOperand(II, 0, UndefValue::get(Old->getType()));
747   }
748   case Intrinsic::amdgcn_permlane16:
749   case Intrinsic::amdgcn_permlanex16: {
750     // Discard vdst_in if it's not going to be read.
751     Value *VDstIn = II.getArgOperand(0);
752     if (isa<UndefValue>(VDstIn))
753       break;
754 
755     ConstantInt *FetchInvalid = cast<ConstantInt>(II.getArgOperand(4));
756     ConstantInt *BoundCtrl = cast<ConstantInt>(II.getArgOperand(5));
757     if (!FetchInvalid->getZExtValue() && !BoundCtrl->getZExtValue())
758       break;
759 
760     return IC.replaceOperand(II, 0, UndefValue::get(VDstIn->getType()));
761   }
762   case Intrinsic::amdgcn_readfirstlane:
763   case Intrinsic::amdgcn_readlane: {
764     // A constant value is trivially uniform.
765     if (Constant *C = dyn_cast<Constant>(II.getArgOperand(0))) {
766       return IC.replaceInstUsesWith(II, C);
767     }
768 
769     // The rest of these may not be safe if the exec may not be the same between
770     // the def and use.
771     Value *Src = II.getArgOperand(0);
772     Instruction *SrcInst = dyn_cast<Instruction>(Src);
773     if (SrcInst && SrcInst->getParent() != II.getParent())
774       break;
775 
776     // readfirstlane (readfirstlane x) -> readfirstlane x
777     // readlane (readfirstlane x), y -> readfirstlane x
778     if (match(Src,
779               PatternMatch::m_Intrinsic<Intrinsic::amdgcn_readfirstlane>())) {
780       return IC.replaceInstUsesWith(II, Src);
781     }
782 
783     if (IID == Intrinsic::amdgcn_readfirstlane) {
784       // readfirstlane (readlane x, y) -> readlane x, y
785       if (match(Src, PatternMatch::m_Intrinsic<Intrinsic::amdgcn_readlane>())) {
786         return IC.replaceInstUsesWith(II, Src);
787       }
788     } else {
789       // readlane (readlane x, y), y -> readlane x, y
790       if (match(Src, PatternMatch::m_Intrinsic<Intrinsic::amdgcn_readlane>(
791                          PatternMatch::m_Value(),
792                          PatternMatch::m_Specific(II.getArgOperand(1))))) {
793         return IC.replaceInstUsesWith(II, Src);
794       }
795     }
796 
797     break;
798   }
799   case Intrinsic::amdgcn_ldexp: {
800     // FIXME: This doesn't introduce new instructions and belongs in
801     // InstructionSimplify.
802     Type *Ty = II.getType();
803     Value *Op0 = II.getArgOperand(0);
804     Value *Op1 = II.getArgOperand(1);
805 
806     // Folding undef to qnan is safe regardless of the FP mode.
807     if (isa<UndefValue>(Op0)) {
808       auto *QNaN = ConstantFP::get(Ty, APFloat::getQNaN(Ty->getFltSemantics()));
809       return IC.replaceInstUsesWith(II, QNaN);
810     }
811 
812     const APFloat *C = nullptr;
813     match(Op0, PatternMatch::m_APFloat(C));
814 
815     // FIXME: Should flush denorms depending on FP mode, but that's ignored
816     // everywhere else.
817     //
818     // These cases should be safe, even with strictfp.
819     // ldexp(0.0, x) -> 0.0
820     // ldexp(-0.0, x) -> -0.0
821     // ldexp(inf, x) -> inf
822     // ldexp(-inf, x) -> -inf
823     if (C && (C->isZero() || C->isInfinity())) {
824       return IC.replaceInstUsesWith(II, Op0);
825     }
826 
827     // With strictfp, be more careful about possibly needing to flush denormals
828     // or not, and snan behavior depends on ieee_mode.
829     if (II.isStrictFP())
830       break;
831 
832     if (C && C->isNaN()) {
833       // FIXME: We just need to make the nan quiet here, but that's unavailable
834       // on APFloat, only IEEEfloat
835       auto *Quieted =
836           ConstantFP::get(Ty, scalbn(*C, 0, APFloat::rmNearestTiesToEven));
837       return IC.replaceInstUsesWith(II, Quieted);
838     }
839 
840     // ldexp(x, 0) -> x
841     // ldexp(x, undef) -> x
842     if (isa<UndefValue>(Op1) || match(Op1, PatternMatch::m_ZeroInt())) {
843       return IC.replaceInstUsesWith(II, Op0);
844     }
845 
846     break;
847   }
848   case Intrinsic::amdgcn_fmul_legacy: {
849     Value *Op0 = II.getArgOperand(0);
850     Value *Op1 = II.getArgOperand(1);
851 
852     // The legacy behaviour is that multiplying +/-0.0 by anything, even NaN or
853     // infinity, gives +0.0.
854     // TODO: Move to InstSimplify?
855     if (match(Op0, PatternMatch::m_AnyZeroFP()) ||
856         match(Op1, PatternMatch::m_AnyZeroFP()))
857       return IC.replaceInstUsesWith(II, ConstantFP::getNullValue(II.getType()));
858 
859     // If we can prove we don't have one of the special cases then we can use a
860     // normal fmul instruction instead.
861     if (canSimplifyLegacyMulToMul(Op0, Op1, IC)) {
862       auto *FMul = IC.Builder.CreateFMulFMF(Op0, Op1, &II);
863       FMul->takeName(&II);
864       return IC.replaceInstUsesWith(II, FMul);
865     }
866     break;
867   }
868   case Intrinsic::amdgcn_fma_legacy: {
869     Value *Op0 = II.getArgOperand(0);
870     Value *Op1 = II.getArgOperand(1);
871     Value *Op2 = II.getArgOperand(2);
872 
873     // The legacy behaviour is that multiplying +/-0.0 by anything, even NaN or
874     // infinity, gives +0.0.
875     // TODO: Move to InstSimplify?
876     if (match(Op0, PatternMatch::m_AnyZeroFP()) ||
877         match(Op1, PatternMatch::m_AnyZeroFP())) {
878       // It's tempting to just return Op2 here, but that would give the wrong
879       // result if Op2 was -0.0.
880       auto *Zero = ConstantFP::getNullValue(II.getType());
881       auto *FAdd = IC.Builder.CreateFAddFMF(Zero, Op2, &II);
882       FAdd->takeName(&II);
883       return IC.replaceInstUsesWith(II, FAdd);
884     }
885 
886     // If we can prove we don't have one of the special cases then we can use a
887     // normal fma instead.
888     if (canSimplifyLegacyMulToMul(Op0, Op1, IC)) {
889       II.setCalledOperand(Intrinsic::getDeclaration(
890           II.getModule(), Intrinsic::fma, II.getType()));
891       return &II;
892     }
893     break;
894   }
895   default: {
896     if (const AMDGPU::ImageDimIntrinsicInfo *ImageDimIntr =
897             AMDGPU::getImageDimIntrinsicInfo(II.getIntrinsicID())) {
898       return simplifyAMDGCNImageIntrinsic(ST, ImageDimIntr, II, IC);
899     }
900   }
901   }
902   return None;
903 }
904 
905 /// Implement SimplifyDemandedVectorElts for amdgcn buffer and image intrinsics.
906 ///
907 /// Note: This only supports non-TFE/LWE image intrinsic calls; those have
908 ///       struct returns.
909 static Value *simplifyAMDGCNMemoryIntrinsicDemanded(InstCombiner &IC,
910                                                     IntrinsicInst &II,
911                                                     APInt DemandedElts,
912                                                     int DMaskIdx = -1) {
913 
914   auto *IIVTy = cast<FixedVectorType>(II.getType());
915   unsigned VWidth = IIVTy->getNumElements();
916   if (VWidth == 1)
917     return nullptr;
918 
919   IRBuilderBase::InsertPointGuard Guard(IC.Builder);
920   IC.Builder.SetInsertPoint(&II);
921 
922   // Assume the arguments are unchanged and later override them, if needed.
923   SmallVector<Value *, 16> Args(II.args());
924 
925   if (DMaskIdx < 0) {
926     // Buffer case.
927 
928     const unsigned ActiveBits = DemandedElts.getActiveBits();
929     const unsigned UnusedComponentsAtFront = DemandedElts.countTrailingZeros();
930 
931     // Start assuming the prefix of elements is demanded, but possibly clear
932     // some other bits if there are trailing zeros (unused components at front)
933     // and update offset.
934     DemandedElts = (1 << ActiveBits) - 1;
935 
936     if (UnusedComponentsAtFront > 0) {
937       static const unsigned InvalidOffsetIdx = 0xf;
938 
939       unsigned OffsetIdx;
940       switch (II.getIntrinsicID()) {
941       case Intrinsic::amdgcn_raw_buffer_load:
942         OffsetIdx = 1;
943         break;
944       case Intrinsic::amdgcn_s_buffer_load:
945         // If resulting type is vec3, there is no point in trimming the
946         // load with updated offset, as the vec3 would most likely be widened to
947         // vec4 anyway during lowering.
948         if (ActiveBits == 4 && UnusedComponentsAtFront == 1)
949           OffsetIdx = InvalidOffsetIdx;
950         else
951           OffsetIdx = 1;
952         break;
953       case Intrinsic::amdgcn_struct_buffer_load:
954         OffsetIdx = 2;
955         break;
956       default:
957         // TODO: handle tbuffer* intrinsics.
958         OffsetIdx = InvalidOffsetIdx;
959         break;
960       }
961 
962       if (OffsetIdx != InvalidOffsetIdx) {
963         // Clear demanded bits and update the offset.
964         DemandedElts &= ~((1 << UnusedComponentsAtFront) - 1);
965         auto *Offset = II.getArgOperand(OffsetIdx);
966         unsigned SingleComponentSizeInBits =
967             IC.getDataLayout().getTypeSizeInBits(II.getType()->getScalarType());
968         unsigned OffsetAdd =
969             UnusedComponentsAtFront * SingleComponentSizeInBits / 8;
970         auto *OffsetAddVal = ConstantInt::get(Offset->getType(), OffsetAdd);
971         Args[OffsetIdx] = IC.Builder.CreateAdd(Offset, OffsetAddVal);
972       }
973     }
974   } else {
975     // Image case.
976 
977     ConstantInt *DMask = cast<ConstantInt>(II.getArgOperand(DMaskIdx));
978     unsigned DMaskVal = DMask->getZExtValue() & 0xf;
979 
980     // Mask off values that are undefined because the dmask doesn't cover them
981     DemandedElts &= (1 << countPopulation(DMaskVal)) - 1;
982 
983     unsigned NewDMaskVal = 0;
984     unsigned OrigLoadIdx = 0;
985     for (unsigned SrcIdx = 0; SrcIdx < 4; ++SrcIdx) {
986       const unsigned Bit = 1 << SrcIdx;
987       if (!!(DMaskVal & Bit)) {
988         if (!!DemandedElts[OrigLoadIdx])
989           NewDMaskVal |= Bit;
990         OrigLoadIdx++;
991       }
992     }
993 
994     if (DMaskVal != NewDMaskVal)
995       Args[DMaskIdx] = ConstantInt::get(DMask->getType(), NewDMaskVal);
996   }
997 
998   unsigned NewNumElts = DemandedElts.countPopulation();
999   if (!NewNumElts)
1000     return UndefValue::get(II.getType());
1001 
1002   if (NewNumElts >= VWidth && DemandedElts.isMask()) {
1003     if (DMaskIdx >= 0)
1004       II.setArgOperand(DMaskIdx, Args[DMaskIdx]);
1005     return nullptr;
1006   }
1007 
1008   // Validate function argument and return types, extracting overloaded types
1009   // along the way.
1010   SmallVector<Type *, 6> OverloadTys;
1011   if (!Intrinsic::getIntrinsicSignature(II.getCalledFunction(), OverloadTys))
1012     return nullptr;
1013 
1014   Module *M = II.getParent()->getParent()->getParent();
1015   Type *EltTy = IIVTy->getElementType();
1016   Type *NewTy =
1017       (NewNumElts == 1) ? EltTy : FixedVectorType::get(EltTy, NewNumElts);
1018 
1019   OverloadTys[0] = NewTy;
1020   Function *NewIntrin =
1021       Intrinsic::getDeclaration(M, II.getIntrinsicID(), OverloadTys);
1022 
1023   CallInst *NewCall = IC.Builder.CreateCall(NewIntrin, Args);
1024   NewCall->takeName(&II);
1025   NewCall->copyMetadata(II);
1026 
1027   if (NewNumElts == 1) {
1028     return IC.Builder.CreateInsertElement(UndefValue::get(II.getType()),
1029                                           NewCall,
1030                                           DemandedElts.countTrailingZeros());
1031   }
1032 
1033   SmallVector<int, 8> EltMask;
1034   unsigned NewLoadIdx = 0;
1035   for (unsigned OrigLoadIdx = 0; OrigLoadIdx < VWidth; ++OrigLoadIdx) {
1036     if (!!DemandedElts[OrigLoadIdx])
1037       EltMask.push_back(NewLoadIdx++);
1038     else
1039       EltMask.push_back(NewNumElts);
1040   }
1041 
1042   Value *Shuffle = IC.Builder.CreateShuffleVector(NewCall, EltMask);
1043 
1044   return Shuffle;
1045 }
1046 
1047 Optional<Value *> GCNTTIImpl::simplifyDemandedVectorEltsIntrinsic(
1048     InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts,
1049     APInt &UndefElts2, APInt &UndefElts3,
1050     std::function<void(Instruction *, unsigned, APInt, APInt &)>
1051         SimplifyAndSetOp) const {
1052   switch (II.getIntrinsicID()) {
1053   case Intrinsic::amdgcn_buffer_load:
1054   case Intrinsic::amdgcn_buffer_load_format:
1055   case Intrinsic::amdgcn_raw_buffer_load:
1056   case Intrinsic::amdgcn_raw_buffer_load_format:
1057   case Intrinsic::amdgcn_raw_tbuffer_load:
1058   case Intrinsic::amdgcn_s_buffer_load:
1059   case Intrinsic::amdgcn_struct_buffer_load:
1060   case Intrinsic::amdgcn_struct_buffer_load_format:
1061   case Intrinsic::amdgcn_struct_tbuffer_load:
1062   case Intrinsic::amdgcn_tbuffer_load:
1063     return simplifyAMDGCNMemoryIntrinsicDemanded(IC, II, DemandedElts);
1064   default: {
1065     if (getAMDGPUImageDMaskIntrinsic(II.getIntrinsicID())) {
1066       return simplifyAMDGCNMemoryIntrinsicDemanded(IC, II, DemandedElts, 0);
1067     }
1068     break;
1069   }
1070   }
1071   return None;
1072 }
1073