1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// \brief This is the parent TargetLowering class for hardware code gen
12 /// targets.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "AMDGPUISelLowering.h"
17 #include "AMDGPU.h"
18 #include "AMDGPUFrameLowering.h"
19 #include "AMDGPUIntrinsicInfo.h"
20 #include "AMDGPURegisterInfo.h"
21 #include "AMDGPUSubtarget.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "SIMachineFunctionInfo.h"
24 #include "llvm/CodeGen/CallingConvLower.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/SelectionDAG.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "SIInstrInfo.h"
32 using namespace llvm;
33 
34 static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT,
35                       CCValAssign::LocInfo LocInfo,
36                       ISD::ArgFlagsTy ArgFlags, CCState &State) {
37   unsigned Offset = State.AllocateStack(ValVT.getStoreSize(),
38                                         ArgFlags.getOrigAlign());
39   State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
40 
41   return true;
42 }
43 
44 #include "AMDGPUGenCallingConv.inc"
45 
46 // Find a larger type to do a load / store of a vector with.
47 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
48   unsigned StoreSize = VT.getStoreSizeInBits();
49   if (StoreSize <= 32)
50     return EVT::getIntegerVT(Ctx, StoreSize);
51 
52   assert(StoreSize % 32 == 0 && "Store size not a multiple of 32");
53   return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
54 }
55 
56 // Type for a vector that will be loaded to.
57 EVT AMDGPUTargetLowering::getEquivalentLoadRegType(LLVMContext &Ctx, EVT VT) {
58   unsigned StoreSize = VT.getStoreSizeInBits();
59   if (StoreSize <= 32)
60     return EVT::getIntegerVT(Ctx, 32);
61 
62   return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
63 }
64 
65 AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM,
66                                            const AMDGPUSubtarget &STI)
67     : TargetLowering(TM), Subtarget(&STI) {
68   setOperationAction(ISD::Constant, MVT::i32, Legal);
69   setOperationAction(ISD::Constant, MVT::i64, Legal);
70   setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
71   setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
72 
73   setOperationAction(ISD::BR_JT, MVT::Other, Expand);
74   setOperationAction(ISD::BRIND, MVT::Other, Expand);
75 
76   // This is totally unsupported, just custom lower to produce an error.
77   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
78 
79   // We need to custom lower some of the intrinsics
80   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
81 
82   // Library functions.  These default to Expand, but we have instructions
83   // for them.
84   setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
85   setOperationAction(ISD::FEXP2,  MVT::f32, Legal);
86   setOperationAction(ISD::FPOW,   MVT::f32, Legal);
87   setOperationAction(ISD::FLOG2,  MVT::f32, Legal);
88   setOperationAction(ISD::FABS,   MVT::f32, Legal);
89   setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
90   setOperationAction(ISD::FRINT,  MVT::f32, Legal);
91   setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
92   setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
93   setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
94 
95   setOperationAction(ISD::FROUND, MVT::f32, Custom);
96   setOperationAction(ISD::FROUND, MVT::f64, Custom);
97 
98   setOperationAction(ISD::FREM, MVT::f32, Custom);
99   setOperationAction(ISD::FREM, MVT::f64, Custom);
100 
101   // v_mad_f32 does not support denormals according to some sources.
102   if (!Subtarget->hasFP32Denormals())
103     setOperationAction(ISD::FMAD, MVT::f32, Legal);
104 
105   // Expand to fneg + fadd.
106   setOperationAction(ISD::FSUB, MVT::f64, Expand);
107 
108   // Lower floating point store/load to integer store/load to reduce the number
109   // of patterns in tablegen.
110   setOperationAction(ISD::STORE, MVT::f32, Promote);
111   AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32);
112 
113   setOperationAction(ISD::STORE, MVT::v2f32, Promote);
114   AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
115 
116   setOperationAction(ISD::STORE, MVT::v4f32, Promote);
117   AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
118 
119   setOperationAction(ISD::STORE, MVT::v8f32, Promote);
120   AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32);
121 
122   setOperationAction(ISD::STORE, MVT::v16f32, Promote);
123   AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32);
124 
125   setOperationAction(ISD::STORE, MVT::f64, Promote);
126   AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64);
127 
128   setOperationAction(ISD::STORE, MVT::v2f64, Promote);
129   AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v2i64);
130 
131   // Custom lowering of vector stores is required for local address space
132   // stores.
133   setOperationAction(ISD::STORE, MVT::v4i32, Custom);
134 
135   setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
136   setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
137   setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
138 
139   // XXX: This can be change to Custom, once ExpandVectorStores can
140   // handle 64-bit stores.
141   setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand);
142 
143   setTruncStoreAction(MVT::i64, MVT::i16, Expand);
144   setTruncStoreAction(MVT::i64, MVT::i8, Expand);
145   setTruncStoreAction(MVT::i64, MVT::i1, Expand);
146   setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand);
147   setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand);
148 
149 
150   setOperationAction(ISD::LOAD, MVT::f32, Promote);
151   AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32);
152 
153   setOperationAction(ISD::LOAD, MVT::v2f32, Promote);
154   AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32);
155 
156   setOperationAction(ISD::LOAD, MVT::v4f32, Promote);
157   AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32);
158 
159   setOperationAction(ISD::LOAD, MVT::v8f32, Promote);
160   AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32);
161 
162   setOperationAction(ISD::LOAD, MVT::v16f32, Promote);
163   AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32);
164 
165   setOperationAction(ISD::LOAD, MVT::f64, Promote);
166   AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64);
167 
168   setOperationAction(ISD::LOAD, MVT::v2f64, Promote);
169   AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v2i64);
170 
171   setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom);
172   setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom);
173   setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom);
174   setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom);
175   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom);
176   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom);
177   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom);
178   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom);
179   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom);
180   setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom);
181 
182   // There are no 64-bit extloads. These should be done as a 32-bit extload and
183   // an extension to 64-bit.
184   for (MVT VT : MVT::integer_valuetypes()) {
185     setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand);
186     setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand);
187     setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand);
188   }
189 
190   for (MVT VT : MVT::integer_vector_valuetypes()) {
191     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand);
192     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand);
193     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand);
194     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand);
195     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand);
196     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand);
197     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand);
198     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand);
199     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand);
200     setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand);
201     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand);
202     setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand);
203   }
204 
205   setOperationAction(ISD::BR_CC, MVT::i1, Expand);
206 
207   if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) {
208     setOperationAction(ISD::FCEIL, MVT::f64, Custom);
209     setOperationAction(ISD::FTRUNC, MVT::f64, Custom);
210     setOperationAction(ISD::FRINT, MVT::f64, Custom);
211     setOperationAction(ISD::FFLOOR, MVT::f64, Custom);
212   }
213 
214   if (!Subtarget->hasBFI()) {
215     // fcopysign can be done in a single instruction with BFI.
216     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
217     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
218   }
219 
220   setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
221 
222   setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
223   setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand);
224   setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand);
225   setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand);
226 
227   setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
228   setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand);
229   setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand);
230   setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand);
231 
232   setTruncStoreAction(MVT::f32, MVT::f16, Expand);
233   setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand);
234   setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand);
235   setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand);
236 
237   setTruncStoreAction(MVT::f64, MVT::f16, Expand);
238   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
239 
240   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
241   for (MVT VT : ScalarIntVTs) {
242     setOperationAction(ISD::SREM, VT, Expand);
243     setOperationAction(ISD::SDIV, VT, Expand);
244 
245     // GPU does not have divrem function for signed or unsigned.
246     setOperationAction(ISD::SDIVREM, VT, Custom);
247     setOperationAction(ISD::UDIVREM, VT, Custom);
248 
249     // GPU does not have [S|U]MUL_LOHI functions as a single instruction.
250     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
251     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
252 
253     setOperationAction(ISD::BSWAP, VT, Expand);
254     setOperationAction(ISD::CTTZ, VT, Expand);
255     setOperationAction(ISD::CTLZ, VT, Expand);
256   }
257 
258   if (!Subtarget->hasBCNT(32))
259     setOperationAction(ISD::CTPOP, MVT::i32, Expand);
260 
261   if (!Subtarget->hasBCNT(64))
262     setOperationAction(ISD::CTPOP, MVT::i64, Expand);
263 
264   // The hardware supports 32-bit ROTR, but not ROTL.
265   setOperationAction(ISD::ROTL, MVT::i32, Expand);
266   setOperationAction(ISD::ROTL, MVT::i64, Expand);
267   setOperationAction(ISD::ROTR, MVT::i64, Expand);
268 
269   setOperationAction(ISD::MUL, MVT::i64, Expand);
270   setOperationAction(ISD::MULHU, MVT::i64, Expand);
271   setOperationAction(ISD::MULHS, MVT::i64, Expand);
272   setOperationAction(ISD::UDIV, MVT::i32, Expand);
273   setOperationAction(ISD::UREM, MVT::i32, Expand);
274   setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
275   setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
276   setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
277   setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
278   setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
279 
280   setOperationAction(ISD::SMIN, MVT::i32, Legal);
281   setOperationAction(ISD::UMIN, MVT::i32, Legal);
282   setOperationAction(ISD::SMAX, MVT::i32, Legal);
283   setOperationAction(ISD::UMAX, MVT::i32, Legal);
284 
285   if (Subtarget->hasFFBH())
286     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
287 
288   if (Subtarget->hasFFBL())
289     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Legal);
290 
291   setOperationAction(ISD::CTLZ, MVT::i64, Custom);
292   setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
293 
294   // We only really have 32-bit BFE instructions (and 16-bit on VI).
295   //
296   // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any
297   // effort to match them now. We want this to be false for i64 cases when the
298   // extraction isn't restricted to the upper or lower half. Ideally we would
299   // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that
300   // span the midpoint are probably relatively rare, so don't worry about them
301   // for now.
302   if (Subtarget->hasBFE())
303     setHasExtractBitsInsn(true);
304 
305   static const MVT::SimpleValueType VectorIntTypes[] = {
306     MVT::v2i32, MVT::v4i32
307   };
308 
309   for (MVT VT : VectorIntTypes) {
310     // Expand the following operations for the current type by default.
311     setOperationAction(ISD::ADD,  VT, Expand);
312     setOperationAction(ISD::AND,  VT, Expand);
313     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
314     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
315     setOperationAction(ISD::MUL,  VT, Expand);
316     setOperationAction(ISD::OR,   VT, Expand);
317     setOperationAction(ISD::SHL,  VT, Expand);
318     setOperationAction(ISD::SRA,  VT, Expand);
319     setOperationAction(ISD::SRL,  VT, Expand);
320     setOperationAction(ISD::ROTL, VT, Expand);
321     setOperationAction(ISD::ROTR, VT, Expand);
322     setOperationAction(ISD::SUB,  VT, Expand);
323     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
324     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
325     setOperationAction(ISD::SDIV, VT, Expand);
326     setOperationAction(ISD::UDIV, VT, Expand);
327     setOperationAction(ISD::SREM, VT, Expand);
328     setOperationAction(ISD::UREM, VT, Expand);
329     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
330     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
331     setOperationAction(ISD::SDIVREM, VT, Custom);
332     setOperationAction(ISD::UDIVREM, VT, Expand);
333     setOperationAction(ISD::ADDC, VT, Expand);
334     setOperationAction(ISD::SUBC, VT, Expand);
335     setOperationAction(ISD::ADDE, VT, Expand);
336     setOperationAction(ISD::SUBE, VT, Expand);
337     setOperationAction(ISD::SELECT, VT, Expand);
338     setOperationAction(ISD::VSELECT, VT, Expand);
339     setOperationAction(ISD::SELECT_CC, VT, Expand);
340     setOperationAction(ISD::XOR,  VT, Expand);
341     setOperationAction(ISD::BSWAP, VT, Expand);
342     setOperationAction(ISD::CTPOP, VT, Expand);
343     setOperationAction(ISD::CTTZ, VT, Expand);
344     setOperationAction(ISD::CTLZ, VT, Expand);
345     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
346   }
347 
348   static const MVT::SimpleValueType FloatVectorTypes[] = {
349     MVT::v2f32, MVT::v4f32
350   };
351 
352   for (MVT VT : FloatVectorTypes) {
353     setOperationAction(ISD::FABS, VT, Expand);
354     setOperationAction(ISD::FMINNUM, VT, Expand);
355     setOperationAction(ISD::FMAXNUM, VT, Expand);
356     setOperationAction(ISD::FADD, VT, Expand);
357     setOperationAction(ISD::FCEIL, VT, Expand);
358     setOperationAction(ISD::FCOS, VT, Expand);
359     setOperationAction(ISD::FDIV, VT, Expand);
360     setOperationAction(ISD::FEXP2, VT, Expand);
361     setOperationAction(ISD::FLOG2, VT, Expand);
362     setOperationAction(ISD::FREM, VT, Expand);
363     setOperationAction(ISD::FPOW, VT, Expand);
364     setOperationAction(ISD::FFLOOR, VT, Expand);
365     setOperationAction(ISD::FTRUNC, VT, Expand);
366     setOperationAction(ISD::FMUL, VT, Expand);
367     setOperationAction(ISD::FMA, VT, Expand);
368     setOperationAction(ISD::FRINT, VT, Expand);
369     setOperationAction(ISD::FNEARBYINT, VT, Expand);
370     setOperationAction(ISD::FSQRT, VT, Expand);
371     setOperationAction(ISD::FSIN, VT, Expand);
372     setOperationAction(ISD::FSUB, VT, Expand);
373     setOperationAction(ISD::FNEG, VT, Expand);
374     setOperationAction(ISD::SELECT, VT, Expand);
375     setOperationAction(ISD::VSELECT, VT, Expand);
376     setOperationAction(ISD::SELECT_CC, VT, Expand);
377     setOperationAction(ISD::FCOPYSIGN, VT, Expand);
378     setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
379   }
380 
381   setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom);
382   setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom);
383 
384   setTargetDAGCombine(ISD::AND);
385   setTargetDAGCombine(ISD::SHL);
386   setTargetDAGCombine(ISD::SRA);
387   setTargetDAGCombine(ISD::SRL);
388   setTargetDAGCombine(ISD::MUL);
389   setTargetDAGCombine(ISD::SELECT);
390   setTargetDAGCombine(ISD::SELECT_CC);
391   setTargetDAGCombine(ISD::STORE);
392 
393   setTargetDAGCombine(ISD::FADD);
394   setTargetDAGCombine(ISD::FSUB);
395 
396   setTargetDAGCombine(ISD::BITCAST);
397 
398   setBooleanContents(ZeroOrNegativeOneBooleanContent);
399   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
400 
401   setSchedulingPreference(Sched::RegPressure);
402   setJumpIsExpensive(true);
403 
404   // SI at least has hardware support for floating point exceptions, but no way
405   // of using or handling them is implemented. They are also optional in OpenCL
406   // (Section 7.3)
407   setHasFloatingPointExceptions(Subtarget->hasFPExceptions());
408 
409   setSelectIsExpensive(false);
410   PredictableSelectIsExpensive = false;
411 
412   setFsqrtIsCheap(true);
413 
414   // We want to find all load dependencies for long chains of stores to enable
415   // merging into very wide vectors. The problem is with vectors with > 4
416   // elements. MergeConsecutiveStores will attempt to merge these because x8/x16
417   // vectors are a legal type, even though we have to split the loads
418   // usually. When we can more precisely specify load legality per address
419   // space, we should be able to make FindBetterChain/MergeConsecutiveStores
420   // smarter so that they can figure out what to do in 2 iterations without all
421   // N > 4 stores on the same chain.
422   GatherAllAliasesMaxDepth = 16;
423 
424   // FIXME: Need to really handle these.
425   MaxStoresPerMemcpy  = 4096;
426   MaxStoresPerMemmove = 4096;
427   MaxStoresPerMemset  = 4096;
428 }
429 
430 //===----------------------------------------------------------------------===//
431 // Target Information
432 //===----------------------------------------------------------------------===//
433 
434 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const {
435   return MVT::i32;
436 }
437 
438 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const {
439   return true;
440 }
441 
442 // The backend supports 32 and 64 bit floating point immediates.
443 // FIXME: Why are we reporting vectors of FP immediates as legal?
444 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
445   EVT ScalarVT = VT.getScalarType();
446   return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64);
447 }
448 
449 // We don't want to shrink f64 / f32 constants.
450 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const {
451   EVT ScalarVT = VT.getScalarType();
452   return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64);
453 }
454 
455 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N,
456                                                  ISD::LoadExtType,
457                                                  EVT NewVT) const {
458 
459   unsigned NewSize = NewVT.getStoreSizeInBits();
460 
461   // If we are reducing to a 32-bit load, this is always better.
462   if (NewSize == 32)
463     return true;
464 
465   EVT OldVT = N->getValueType(0);
466   unsigned OldSize = OldVT.getStoreSizeInBits();
467 
468   // Don't produce extloads from sub 32-bit types. SI doesn't have scalar
469   // extloads, so doing one requires using a buffer_load. In cases where we
470   // still couldn't use a scalar load, using the wider load shouldn't really
471   // hurt anything.
472 
473   // If the old size already had to be an extload, there's no harm in continuing
474   // to reduce the width.
475   return (OldSize < 32);
476 }
477 
478 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy,
479                                                    EVT CastTy) const {
480   if (LoadTy.getSizeInBits() != CastTy.getSizeInBits())
481     return true;
482 
483   unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits();
484   unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits();
485 
486   return ((LScalarSize <= CastScalarSize) ||
487           (CastScalarSize >= 32) ||
488           (LScalarSize < 32));
489 }
490 
491 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also
492 // profitable with the expansion for 64-bit since it's generally good to
493 // speculate things.
494 // FIXME: These should really have the size as a parameter.
495 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const {
496   return true;
497 }
498 
499 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const {
500   return true;
501 }
502 
503 //===---------------------------------------------------------------------===//
504 // Target Properties
505 //===---------------------------------------------------------------------===//
506 
507 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
508   assert(VT.isFloatingPoint());
509   return VT == MVT::f32 || VT == MVT::f64;
510 }
511 
512 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
513   assert(VT.isFloatingPoint());
514   return VT == MVT::f32 || VT == MVT::f64;
515 }
516 
517 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT,
518                                                          unsigned NumElem,
519                                                          unsigned AS) const {
520   return true;
521 }
522 
523 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const {
524   // There are few operations which truly have vector input operands. Any vector
525   // operation is going to involve operations on each component, and a
526   // build_vector will be a copy per element, so it always makes sense to use a
527   // build_vector input in place of the extracted element to avoid a copy into a
528   // super register.
529   //
530   // We should probably only do this if all users are extracts only, but this
531   // should be the common case.
532   return true;
533 }
534 
535 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
536   // Truncate is just accessing a subregister.
537   return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0);
538 }
539 
540 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
541   // Truncate is just accessing a subregister.
542   return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() &&
543          (Dest->getPrimitiveSizeInBits() % 32 == 0);
544 }
545 
546 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
547   unsigned SrcSize = Src->getScalarSizeInBits();
548   unsigned DestSize = Dest->getScalarSizeInBits();
549 
550   return SrcSize == 32 && DestSize == 64;
551 }
552 
553 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const {
554   // Any register load of a 64-bit value really requires 2 32-bit moves. For all
555   // practical purposes, the extra mov 0 to load a 64-bit is free.  As used,
556   // this will enable reducing 64-bit operations the 32-bit, which is always
557   // good.
558   return Src == MVT::i32 && Dest == MVT::i64;
559 }
560 
561 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
562   return isZExtFree(Val.getValueType(), VT2);
563 }
564 
565 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
566   // There aren't really 64-bit registers, but pairs of 32-bit ones and only a
567   // limited number of native 64-bit operations. Shrinking an operation to fit
568   // in a single 32-bit register should always be helpful. As currently used,
569   // this is much less general than the name suggests, and is only used in
570   // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is
571   // not profitable, and may actually be harmful.
572   return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32;
573 }
574 
575 //===---------------------------------------------------------------------===//
576 // TargetLowering Callbacks
577 //===---------------------------------------------------------------------===//
578 
579 void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State,
580                              const SmallVectorImpl<ISD::InputArg> &Ins) const {
581 
582   State.AnalyzeFormalArguments(Ins, CC_AMDGPU);
583 }
584 
585 void AMDGPUTargetLowering::AnalyzeReturn(CCState &State,
586                            const SmallVectorImpl<ISD::OutputArg> &Outs) const {
587 
588   State.AnalyzeReturn(Outs, RetCC_SI);
589 }
590 
591 SDValue AMDGPUTargetLowering::LowerReturn(
592                                      SDValue Chain,
593                                      CallingConv::ID CallConv,
594                                      bool isVarArg,
595                                      const SmallVectorImpl<ISD::OutputArg> &Outs,
596                                      const SmallVectorImpl<SDValue> &OutVals,
597                                      SDLoc DL, SelectionDAG &DAG) const {
598   return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain);
599 }
600 
601 //===---------------------------------------------------------------------===//
602 // Target specific lowering
603 //===---------------------------------------------------------------------===//
604 
605 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI,
606                                         SmallVectorImpl<SDValue> &InVals) const {
607   SDValue Callee = CLI.Callee;
608   SelectionDAG &DAG = CLI.DAG;
609 
610   const Function &Fn = *DAG.getMachineFunction().getFunction();
611 
612   StringRef FuncName("<unknown>");
613 
614   if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee))
615     FuncName = G->getSymbol();
616   else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
617     FuncName = G->getGlobal()->getName();
618 
619   DiagnosticInfoUnsupported NoCalls(
620       Fn, "unsupported call to function " + FuncName, CLI.DL.getDebugLoc());
621   DAG.getContext()->diagnose(NoCalls);
622   return SDValue();
623 }
624 
625 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
626                                                       SelectionDAG &DAG) const {
627   const Function &Fn = *DAG.getMachineFunction().getFunction();
628 
629   DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca",
630                                             SDLoc(Op).getDebugLoc());
631   DAG.getContext()->diagnose(NoDynamicAlloca);
632   return SDValue();
633 }
634 
635 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op,
636                                              SelectionDAG &DAG) const {
637   switch (Op.getOpcode()) {
638   default:
639     Op->dump(&DAG);
640     llvm_unreachable("Custom lowering code for this"
641                      "instruction is not implemented yet!");
642     break;
643   case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
644   case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
645   case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG);
646   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
647   case ISD::UDIVREM: return LowerUDIVREM(Op, DAG);
648   case ISD::SDIVREM: return LowerSDIVREM(Op, DAG);
649   case ISD::FREM: return LowerFREM(Op, DAG);
650   case ISD::FCEIL: return LowerFCEIL(Op, DAG);
651   case ISD::FTRUNC: return LowerFTRUNC(Op, DAG);
652   case ISD::FRINT: return LowerFRINT(Op, DAG);
653   case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG);
654   case ISD::FROUND: return LowerFROUND(Op, DAG);
655   case ISD::FFLOOR: return LowerFFLOOR(Op, DAG);
656   case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
657   case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
658   case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
659   case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
660   case ISD::CTLZ:
661   case ISD::CTLZ_ZERO_UNDEF:
662     return LowerCTLZ(Op, DAG);
663   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
664   }
665   return Op;
666 }
667 
668 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
669                                               SmallVectorImpl<SDValue> &Results,
670                                               SelectionDAG &DAG) const {
671   switch (N->getOpcode()) {
672   case ISD::SIGN_EXTEND_INREG:
673     // Different parts of legalization seem to interpret which type of
674     // sign_extend_inreg is the one to check for custom lowering. The extended
675     // from type is what really matters, but some places check for custom
676     // lowering of the result type. This results in trying to use
677     // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do
678     // nothing here and let the illegal result integer be handled normally.
679     return;
680   default:
681     return;
682   }
683 }
684 
685 // FIXME: This implements accesses to initialized globals in the constant
686 // address space by copying them to private and accessing that. It does not
687 // properly handle illegal types or vectors. The private vector loads are not
688 // scalarized, and the illegal scalars hit an assertion. This technique will not
689 // work well with large initializers, and this should eventually be
690 // removed. Initialized globals should be placed into a data section that the
691 // runtime will load into a buffer before the kernel is executed. Uses of the
692 // global need to be replaced with a pointer loaded from an implicit kernel
693 // argument into this buffer holding the copy of the data, which will remove the
694 // need for any of this.
695 SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
696                                                        const GlobalValue *GV,
697                                                        const SDValue &InitPtr,
698                                                        SDValue Chain,
699                                                        SelectionDAG &DAG) const {
700   const DataLayout &TD = DAG.getDataLayout();
701   SDLoc DL(InitPtr);
702   Type *InitTy = Init->getType();
703 
704   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) {
705     EVT VT = EVT::getEVT(InitTy);
706     PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
707     return DAG.getStore(Chain, DL, DAG.getConstant(*CI, DL, VT), InitPtr,
708                         MachinePointerInfo(UndefValue::get(PtrTy)), false,
709                         false, TD.getPrefTypeAlignment(InitTy));
710   }
711 
712   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
713     EVT VT = EVT::getEVT(CFP->getType());
714     PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
715     return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, DL, VT), InitPtr,
716                         MachinePointerInfo(UndefValue::get(PtrTy)), false,
717                         false, TD.getPrefTypeAlignment(CFP->getType()));
718   }
719 
720   if (StructType *ST = dyn_cast<StructType>(InitTy)) {
721     const StructLayout *SL = TD.getStructLayout(ST);
722 
723     EVT PtrVT = InitPtr.getValueType();
724     SmallVector<SDValue, 8> Chains;
725 
726     for (unsigned I = 0, N = ST->getNumElements(); I != N; ++I) {
727       SDValue Offset = DAG.getConstant(SL->getElementOffset(I), DL, PtrVT);
728       SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
729 
730       Constant *Elt = Init->getAggregateElement(I);
731       Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
732     }
733 
734     return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
735   }
736 
737   if (SequentialType *SeqTy = dyn_cast<SequentialType>(InitTy)) {
738     EVT PtrVT = InitPtr.getValueType();
739 
740     unsigned NumElements;
741     if (ArrayType *AT = dyn_cast<ArrayType>(SeqTy))
742       NumElements = AT->getNumElements();
743     else if (VectorType *VT = dyn_cast<VectorType>(SeqTy))
744       NumElements = VT->getNumElements();
745     else
746       llvm_unreachable("Unexpected type");
747 
748     unsigned EltSize = TD.getTypeAllocSize(SeqTy->getElementType());
749     SmallVector<SDValue, 8> Chains;
750     for (unsigned i = 0; i < NumElements; ++i) {
751       SDValue Offset = DAG.getConstant(i * EltSize, DL, PtrVT);
752       SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset);
753 
754       Constant *Elt = Init->getAggregateElement(i);
755       Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG));
756     }
757 
758     return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
759   }
760 
761   if (isa<UndefValue>(Init)) {
762     EVT VT = EVT::getEVT(InitTy);
763     PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
764     return DAG.getStore(Chain, DL, DAG.getUNDEF(VT), InitPtr,
765                         MachinePointerInfo(UndefValue::get(PtrTy)), false,
766                         false, TD.getPrefTypeAlignment(InitTy));
767   }
768 
769   Init->dump();
770   llvm_unreachable("Unhandled constant initializer");
771 }
772 
773 static bool hasDefinedInitializer(const GlobalValue *GV) {
774   const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
775   if (!GVar || !GVar->hasInitializer())
776     return false;
777 
778   return !isa<UndefValue>(GVar->getInitializer());
779 }
780 
781 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI,
782                                                  SDValue Op,
783                                                  SelectionDAG &DAG) const {
784 
785   const DataLayout &DL = DAG.getDataLayout();
786   GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op);
787   const GlobalValue *GV = G->getGlobal();
788 
789   switch (G->getAddressSpace()) {
790   case AMDGPUAS::LOCAL_ADDRESS: {
791     // XXX: What does the value of G->getOffset() mean?
792     assert(G->getOffset() == 0 &&
793          "Do not know what to do with an non-zero offset");
794 
795     // TODO: We could emit code to handle the initialization somewhere.
796     if (hasDefinedInitializer(GV))
797       break;
798 
799     unsigned Offset;
800     if (MFI->LocalMemoryObjects.count(GV) == 0) {
801       unsigned Align = GV->getAlignment();
802       if (Align == 0)
803         Align = DL.getABITypeAlignment(GV->getValueType());
804 
805       /// TODO: We should sort these to minimize wasted space due to alignment
806       /// padding. Currently the padding is decided by the first encountered use
807       /// during lowering.
808       Offset = MFI->LDSSize = alignTo(MFI->LDSSize, Align);
809       MFI->LocalMemoryObjects[GV] = Offset;
810       MFI->LDSSize += DL.getTypeAllocSize(GV->getValueType());
811     } else {
812       Offset = MFI->LocalMemoryObjects[GV];
813     }
814 
815     return DAG.getConstant(Offset, SDLoc(Op),
816                            getPointerTy(DL, AMDGPUAS::LOCAL_ADDRESS));
817   }
818   case AMDGPUAS::CONSTANT_ADDRESS: {
819     MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo();
820     Type *EltType = GV->getValueType();
821     unsigned Size = DL.getTypeAllocSize(EltType);
822     unsigned Alignment = DL.getPrefTypeAlignment(EltType);
823 
824     MVT PrivPtrVT = getPointerTy(DL, AMDGPUAS::PRIVATE_ADDRESS);
825     MVT ConstPtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
826 
827     int FI = FrameInfo->CreateStackObject(Size, Alignment, false);
828     SDValue InitPtr = DAG.getFrameIndex(FI, PrivPtrVT);
829 
830     const GlobalVariable *Var = cast<GlobalVariable>(GV);
831     if (!Var->hasInitializer()) {
832       // This has no use, but bugpoint will hit it.
833       return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
834     }
835 
836     const Constant *Init = Var->getInitializer();
837     SmallVector<SDNode*, 8> WorkList;
838 
839     for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(),
840                               E = DAG.getEntryNode()->use_end(); I != E; ++I) {
841       if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD)
842         continue;
843       WorkList.push_back(*I);
844     }
845     SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG);
846     for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(),
847                                            E = WorkList.end(); I != E; ++I) {
848       SmallVector<SDValue, 8> Ops;
849       Ops.push_back(Chain);
850       for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) {
851         Ops.push_back((*I)->getOperand(i));
852       }
853       DAG.UpdateNodeOperands(*I, Ops);
854     }
855     return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT);
856   }
857   }
858 
859   const Function &Fn = *DAG.getMachineFunction().getFunction();
860   DiagnosticInfoUnsupported BadInit(
861       Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc());
862   DAG.getContext()->diagnose(BadInit);
863   return SDValue();
864 }
865 
866 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op,
867                                                   SelectionDAG &DAG) const {
868   SmallVector<SDValue, 8> Args;
869 
870   for (const SDUse &U : Op->ops())
871     DAG.ExtractVectorElements(U.get(), Args);
872 
873   return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
874 }
875 
876 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op,
877                                                      SelectionDAG &DAG) const {
878 
879   SmallVector<SDValue, 8> Args;
880   unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
881   EVT VT = Op.getValueType();
882   DAG.ExtractVectorElements(Op.getOperand(0), Args, Start,
883                             VT.getVectorNumElements());
884 
885   return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args);
886 }
887 
888 SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
889     SelectionDAG &DAG) const {
890   unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
891   SDLoc DL(Op);
892   EVT VT = Op.getValueType();
893 
894   switch (IntrinsicID) {
895     default: return Op;
896     case AMDGPUIntrinsic::AMDGPU_clamp:
897     case AMDGPUIntrinsic::AMDIL_clamp: // Legacy name.
898       return DAG.getNode(AMDGPUISD::CLAMP, DL, VT,
899                          Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
900 
901     case Intrinsic::AMDGPU_ldexp: // Legacy name
902       return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, Op.getOperand(1),
903                                                    Op.getOperand(2));
904 
905     case AMDGPUIntrinsic::AMDGPU_bfe_i32:
906       return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT,
907                          Op.getOperand(1),
908                          Op.getOperand(2),
909                          Op.getOperand(3));
910 
911     case AMDGPUIntrinsic::AMDGPU_bfe_u32:
912       return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT,
913                          Op.getOperand(1),
914                          Op.getOperand(2),
915                          Op.getOperand(3));
916 
917     case AMDGPUIntrinsic::AMDIL_exp: // Legacy name.
918       return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1));
919 
920     case AMDGPUIntrinsic::AMDGPU_brev: // Legacy name
921       return DAG.getNode(ISD::BITREVERSE, DL, VT, Op.getOperand(1));
922   }
923 }
924 
925 /// \brief Generate Min/Max node
926 SDValue AMDGPUTargetLowering::CombineFMinMaxLegacy(SDLoc DL,
927                                                    EVT VT,
928                                                    SDValue LHS,
929                                                    SDValue RHS,
930                                                    SDValue True,
931                                                    SDValue False,
932                                                    SDValue CC,
933                                                    DAGCombinerInfo &DCI) const {
934   if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
935     return SDValue();
936 
937   if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
938     return SDValue();
939 
940   SelectionDAG &DAG = DCI.DAG;
941   ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
942   switch (CCOpcode) {
943   case ISD::SETOEQ:
944   case ISD::SETONE:
945   case ISD::SETUNE:
946   case ISD::SETNE:
947   case ISD::SETUEQ:
948   case ISD::SETEQ:
949   case ISD::SETFALSE:
950   case ISD::SETFALSE2:
951   case ISD::SETTRUE:
952   case ISD::SETTRUE2:
953   case ISD::SETUO:
954   case ISD::SETO:
955     break;
956   case ISD::SETULE:
957   case ISD::SETULT: {
958     if (LHS == True)
959       return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
960     return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
961   }
962   case ISD::SETOLE:
963   case ISD::SETOLT:
964   case ISD::SETLE:
965   case ISD::SETLT: {
966     // Ordered. Assume ordered for undefined.
967 
968     // Only do this after legalization to avoid interfering with other combines
969     // which might occur.
970     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
971         !DCI.isCalledByLegalizer())
972       return SDValue();
973 
974     // We need to permute the operands to get the correct NaN behavior. The
975     // selected operand is the second one based on the failing compare with NaN,
976     // so permute it based on the compare type the hardware uses.
977     if (LHS == True)
978       return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
979     return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
980   }
981   case ISD::SETUGE:
982   case ISD::SETUGT: {
983     if (LHS == True)
984       return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS);
985     return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS);
986   }
987   case ISD::SETGT:
988   case ISD::SETGE:
989   case ISD::SETOGE:
990   case ISD::SETOGT: {
991     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG &&
992         !DCI.isCalledByLegalizer())
993       return SDValue();
994 
995     if (LHS == True)
996       return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS);
997     return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS);
998   }
999   case ISD::SETCC_INVALID:
1000     llvm_unreachable("Invalid setcc condcode!");
1001   }
1002   return SDValue();
1003 }
1004 
1005 std::pair<SDValue, SDValue>
1006 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const {
1007   SDLoc SL(Op);
1008 
1009   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1010 
1011   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1012   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1013 
1014   SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1015   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1016 
1017   return std::make_pair(Lo, Hi);
1018 }
1019 
1020 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const {
1021   SDLoc SL(Op);
1022 
1023   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1024   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1025   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1026 }
1027 
1028 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const {
1029   SDLoc SL(Op);
1030 
1031   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op);
1032   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1033   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1034 }
1035 
1036 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
1037                                               SelectionDAG &DAG) const {
1038   LoadSDNode *Load = cast<LoadSDNode>(Op);
1039   EVT VT = Op.getValueType();
1040 
1041 
1042   // If this is a 2 element vector, we really want to scalarize and not create
1043   // weird 1 element vectors.
1044   if (VT.getVectorNumElements() == 2)
1045     return scalarizeVectorLoad(Load, DAG);
1046 
1047   SDValue BasePtr = Load->getBasePtr();
1048   EVT PtrVT = BasePtr.getValueType();
1049   EVT MemVT = Load->getMemoryVT();
1050   SDLoc SL(Op);
1051 
1052   const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
1053 
1054   EVT LoVT, HiVT;
1055   EVT LoMemVT, HiMemVT;
1056   SDValue Lo, Hi;
1057 
1058   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
1059   std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
1060   std::tie(Lo, Hi) = DAG.SplitVector(Op, SL, LoVT, HiVT);
1061 
1062   unsigned Size = LoMemVT.getStoreSize();
1063   unsigned BaseAlign = Load->getAlignment();
1064   unsigned HiAlign = MinAlign(BaseAlign, Size);
1065 
1066   SDValue LoLoad
1067     = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT,
1068                      Load->getChain(), BasePtr,
1069                      SrcValue,
1070                      LoMemVT, Load->isVolatile(), Load->isNonTemporal(),
1071                      Load->isInvariant(), BaseAlign);
1072 
1073   SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
1074                               DAG.getConstant(Size, SL, PtrVT));
1075 
1076   SDValue HiLoad
1077     = DAG.getExtLoad(Load->getExtensionType(), SL, HiVT,
1078                      Load->getChain(), HiPtr,
1079                      SrcValue.getWithOffset(LoMemVT.getStoreSize()),
1080                      HiMemVT, Load->isVolatile(), Load->isNonTemporal(),
1081                      Load->isInvariant(), HiAlign);
1082 
1083   SDValue Ops[] = {
1084     DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad),
1085     DAG.getNode(ISD::TokenFactor, SL, MVT::Other,
1086                 LoLoad.getValue(1), HiLoad.getValue(1))
1087   };
1088 
1089   return DAG.getMergeValues(Ops, SL);
1090 }
1091 
1092 // FIXME: This isn't doing anything for SI. This should be used in a target
1093 // combine during type legalization.
1094 SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op,
1095                                                SelectionDAG &DAG) const {
1096   StoreSDNode *Store = cast<StoreSDNode>(Op);
1097   EVT MemVT = Store->getMemoryVT();
1098   unsigned MemBits = MemVT.getSizeInBits();
1099 
1100   // Byte stores are really expensive, so if possible, try to pack 32-bit vector
1101   // truncating store into an i32 store.
1102   // XXX: We could also handle optimize other vector bitwidths.
1103   if (!MemVT.isVector() || MemBits > 32) {
1104     return SDValue();
1105   }
1106 
1107   SDLoc DL(Op);
1108   SDValue Value = Store->getValue();
1109   EVT VT = Value.getValueType();
1110   EVT ElemVT = VT.getVectorElementType();
1111   SDValue Ptr = Store->getBasePtr();
1112   EVT MemEltVT = MemVT.getVectorElementType();
1113   unsigned MemEltBits = MemEltVT.getSizeInBits();
1114   unsigned MemNumElements = MemVT.getVectorNumElements();
1115   unsigned PackedSize = MemVT.getStoreSizeInBits();
1116   SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, DL, MVT::i32);
1117 
1118   assert(Value.getValueType().getScalarSizeInBits() >= 32);
1119 
1120   SDValue PackedValue;
1121   for (unsigned i = 0; i < MemNumElements; ++i) {
1122     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value,
1123                               DAG.getConstant(i, DL, MVT::i32));
1124     Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32);
1125     Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg
1126 
1127     SDValue Shift = DAG.getConstant(MemEltBits * i, DL, MVT::i32);
1128     Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift);
1129 
1130     if (i == 0) {
1131       PackedValue = Elt;
1132     } else {
1133       PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt);
1134     }
1135   }
1136 
1137   if (PackedSize < 32) {
1138     EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize);
1139     return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr,
1140                              Store->getMemOperand()->getPointerInfo(),
1141                              PackedVT,
1142                              Store->isNonTemporal(), Store->isVolatile(),
1143                              Store->getAlignment());
1144   }
1145 
1146   return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr,
1147                       Store->getMemOperand()->getPointerInfo(),
1148                       Store->isVolatile(),  Store->isNonTemporal(),
1149                       Store->getAlignment());
1150 }
1151 
1152 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op,
1153                                                SelectionDAG &DAG) const {
1154   StoreSDNode *Store = cast<StoreSDNode>(Op);
1155   SDValue Val = Store->getValue();
1156   EVT VT = Val.getValueType();
1157 
1158   // If this is a 2 element vector, we really want to scalarize and not create
1159   // weird 1 element vectors.
1160   if (VT.getVectorNumElements() == 2)
1161     return scalarizeVectorStore(Store, DAG);
1162 
1163   EVT MemVT = Store->getMemoryVT();
1164   SDValue Chain = Store->getChain();
1165   SDValue BasePtr = Store->getBasePtr();
1166   SDLoc SL(Op);
1167 
1168   EVT LoVT, HiVT;
1169   EVT LoMemVT, HiMemVT;
1170   SDValue Lo, Hi;
1171 
1172   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
1173   std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT);
1174   std::tie(Lo, Hi) = DAG.SplitVector(Val, SL, LoVT, HiVT);
1175 
1176   EVT PtrVT = BasePtr.getValueType();
1177   SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr,
1178                               DAG.getConstant(LoMemVT.getStoreSize(), SL,
1179                                               PtrVT));
1180 
1181   const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo();
1182   unsigned BaseAlign = Store->getAlignment();
1183   unsigned Size = LoMemVT.getStoreSize();
1184   unsigned HiAlign = MinAlign(BaseAlign, Size);
1185 
1186   SDValue LoStore
1187     = DAG.getTruncStore(Chain, SL, Lo,
1188                         BasePtr,
1189                         SrcValue,
1190                         LoMemVT,
1191                         Store->isNonTemporal(),
1192                         Store->isVolatile(),
1193                         BaseAlign);
1194   SDValue HiStore
1195     = DAG.getTruncStore(Chain, SL, Hi,
1196                         HiPtr,
1197                         SrcValue.getWithOffset(Size),
1198                         HiMemVT,
1199                         Store->isNonTemporal(),
1200                         Store->isVolatile(),
1201                         HiAlign);
1202 
1203   return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore);
1204 }
1205 
1206 // This is a shortcut for integer division because we have fast i32<->f32
1207 // conversions, and fast f32 reciprocal instructions. The fractional part of a
1208 // float is enough to accurately represent up to a 24-bit integer.
1209 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const {
1210   SDLoc DL(Op);
1211   EVT VT = Op.getValueType();
1212   SDValue LHS = Op.getOperand(0);
1213   SDValue RHS = Op.getOperand(1);
1214   MVT IntVT = MVT::i32;
1215   MVT FltVT = MVT::f32;
1216 
1217   ISD::NodeType ToFp  = sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
1218   ISD::NodeType ToInt = sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
1219 
1220   if (VT.isVector()) {
1221     unsigned NElts = VT.getVectorNumElements();
1222     IntVT = MVT::getVectorVT(MVT::i32, NElts);
1223     FltVT = MVT::getVectorVT(MVT::f32, NElts);
1224   }
1225 
1226   unsigned BitSize = VT.getScalarType().getSizeInBits();
1227 
1228   SDValue jq = DAG.getConstant(1, DL, IntVT);
1229 
1230   if (sign) {
1231     // char|short jq = ia ^ ib;
1232     jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
1233 
1234     // jq = jq >> (bitsize - 2)
1235     jq = DAG.getNode(ISD::SRA, DL, VT, jq,
1236                      DAG.getConstant(BitSize - 2, DL, VT));
1237 
1238     // jq = jq | 0x1
1239     jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT));
1240 
1241     // jq = (int)jq
1242     jq = DAG.getSExtOrTrunc(jq, DL, IntVT);
1243   }
1244 
1245   // int ia = (int)LHS;
1246   SDValue ia = sign ?
1247     DAG.getSExtOrTrunc(LHS, DL, IntVT) : DAG.getZExtOrTrunc(LHS, DL, IntVT);
1248 
1249   // int ib, (int)RHS;
1250   SDValue ib = sign ?
1251     DAG.getSExtOrTrunc(RHS, DL, IntVT) : DAG.getZExtOrTrunc(RHS, DL, IntVT);
1252 
1253   // float fa = (float)ia;
1254   SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia);
1255 
1256   // float fb = (float)ib;
1257   SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib);
1258 
1259   // TODO: Should this propagate fast-math-flags?
1260   // float fq = native_divide(fa, fb);
1261   SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT,
1262                            fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb));
1263 
1264   // fq = trunc(fq);
1265   fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq);
1266 
1267   // float fqneg = -fq;
1268   SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq);
1269 
1270   // float fr = mad(fqneg, fb, fa);
1271   SDValue fr = DAG.getNode(ISD::FADD, DL, FltVT,
1272                            DAG.getNode(ISD::FMUL, DL, FltVT, fqneg, fb), fa);
1273 
1274   // int iq = (int)fq;
1275   SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq);
1276 
1277   // fr = fabs(fr);
1278   fr = DAG.getNode(ISD::FABS, DL, FltVT, fr);
1279 
1280   // fb = fabs(fb);
1281   fb = DAG.getNode(ISD::FABS, DL, FltVT, fb);
1282 
1283   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
1284 
1285   // int cv = fr >= fb;
1286   SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE);
1287 
1288   // jq = (cv ? jq : 0);
1289   jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT));
1290 
1291   // dst = trunc/extend to legal type
1292   iq = sign ? DAG.getSExtOrTrunc(iq, DL, VT) : DAG.getZExtOrTrunc(iq, DL, VT);
1293 
1294   // dst = iq + jq;
1295   SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq);
1296 
1297   // Rem needs compensation, it's easier to recompute it
1298   SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS);
1299   Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem);
1300 
1301   SDValue Res[2] = {
1302     Div,
1303     Rem
1304   };
1305   return DAG.getMergeValues(Res, DL);
1306 }
1307 
1308 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op,
1309                                       SelectionDAG &DAG,
1310                                       SmallVectorImpl<SDValue> &Results) const {
1311   assert(Op.getValueType() == MVT::i64);
1312 
1313   SDLoc DL(Op);
1314   EVT VT = Op.getValueType();
1315   EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1316 
1317   SDValue one = DAG.getConstant(1, DL, HalfVT);
1318   SDValue zero = DAG.getConstant(0, DL, HalfVT);
1319 
1320   //HiLo split
1321   SDValue LHS = Op.getOperand(0);
1322   SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, zero);
1323   SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, one);
1324 
1325   SDValue RHS = Op.getOperand(1);
1326   SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, zero);
1327   SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, one);
1328 
1329   if (VT == MVT::i64 &&
1330     DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) &&
1331     DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) {
1332 
1333     SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1334                               LHS_Lo, RHS_Lo);
1335 
1336     SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), zero});
1337     SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), zero});
1338 
1339     Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV));
1340     Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM));
1341     return;
1342   }
1343 
1344   // Get Speculative values
1345   SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo);
1346   SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo);
1347 
1348   SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, zero, REM_Part, LHS_Hi, ISD::SETEQ);
1349   SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, zero});
1350   REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM);
1351 
1352   SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, zero, DIV_Part, zero, ISD::SETEQ);
1353   SDValue DIV_Lo = zero;
1354 
1355   const unsigned halfBitWidth = HalfVT.getSizeInBits();
1356 
1357   for (unsigned i = 0; i < halfBitWidth; ++i) {
1358     const unsigned bitPos = halfBitWidth - i - 1;
1359     SDValue POS = DAG.getConstant(bitPos, DL, HalfVT);
1360     // Get value of high bit
1361     SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS);
1362     HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, one);
1363     HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit);
1364 
1365     // Shift
1366     REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT));
1367     // Add LHS high bit
1368     REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit);
1369 
1370     SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT);
1371     SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETUGE);
1372 
1373     DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT);
1374 
1375     // Update REM
1376     SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS);
1377     REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE);
1378   }
1379 
1380   SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi});
1381   DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV);
1382   Results.push_back(DIV);
1383   Results.push_back(REM);
1384 }
1385 
1386 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op,
1387                                            SelectionDAG &DAG) const {
1388   SDLoc DL(Op);
1389   EVT VT = Op.getValueType();
1390 
1391   if (VT == MVT::i64) {
1392     SmallVector<SDValue, 2> Results;
1393     LowerUDIVREM64(Op, DAG, Results);
1394     return DAG.getMergeValues(Results, DL);
1395   }
1396 
1397   SDValue Num = Op.getOperand(0);
1398   SDValue Den = Op.getOperand(1);
1399 
1400   if (VT == MVT::i32) {
1401     if (DAG.MaskedValueIsZero(Num, APInt::getHighBitsSet(32, 8)) &&
1402         DAG.MaskedValueIsZero(Den, APInt::getHighBitsSet(32, 8))) {
1403       // TODO: We technically could do this for i64, but shouldn't that just be
1404       // handled by something generally reducing 64-bit division on 32-bit
1405       // values to 32-bit?
1406       return LowerDIVREM24(Op, DAG, false);
1407     }
1408   }
1409 
1410   // RCP =  URECIP(Den) = 2^32 / Den + e
1411   // e is rounding error.
1412   SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den);
1413 
1414   // RCP_LO = mul(RCP, Den) */
1415   SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den);
1416 
1417   // RCP_HI = mulhu (RCP, Den) */
1418   SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den);
1419 
1420   // NEG_RCP_LO = -RCP_LO
1421   SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
1422                                                      RCP_LO);
1423 
1424   // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO)
1425   SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
1426                                            NEG_RCP_LO, RCP_LO,
1427                                            ISD::SETEQ);
1428   // Calculate the rounding error from the URECIP instruction
1429   // E = mulhu(ABS_RCP_LO, RCP)
1430   SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP);
1431 
1432   // RCP_A_E = RCP + E
1433   SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E);
1434 
1435   // RCP_S_E = RCP - E
1436   SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E);
1437 
1438   // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E)
1439   SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT),
1440                                      RCP_A_E, RCP_S_E,
1441                                      ISD::SETEQ);
1442   // Quotient = mulhu(Tmp0, Num)
1443   SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num);
1444 
1445   // Num_S_Remainder = Quotient * Den
1446   SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den);
1447 
1448   // Remainder = Num - Num_S_Remainder
1449   SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder);
1450 
1451   // Remainder_GE_Den = (Remainder >= Den ? -1 : 0)
1452   SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den,
1453                                                  DAG.getConstant(-1, DL, VT),
1454                                                  DAG.getConstant(0, DL, VT),
1455                                                  ISD::SETUGE);
1456   // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0)
1457   SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num,
1458                                                   Num_S_Remainder,
1459                                                   DAG.getConstant(-1, DL, VT),
1460                                                   DAG.getConstant(0, DL, VT),
1461                                                   ISD::SETUGE);
1462   // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero
1463   SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den,
1464                                                Remainder_GE_Zero);
1465 
1466   // Calculate Division result:
1467 
1468   // Quotient_A_One = Quotient + 1
1469   SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient,
1470                                        DAG.getConstant(1, DL, VT));
1471 
1472   // Quotient_S_One = Quotient - 1
1473   SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient,
1474                                        DAG.getConstant(1, DL, VT));
1475 
1476   // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One)
1477   SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
1478                                      Quotient, Quotient_A_One, ISD::SETEQ);
1479 
1480   // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div)
1481   Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
1482                             Quotient_S_One, Div, ISD::SETEQ);
1483 
1484   // Calculate Rem result:
1485 
1486   // Remainder_S_Den = Remainder - Den
1487   SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den);
1488 
1489   // Remainder_A_Den = Remainder + Den
1490   SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den);
1491 
1492   // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den)
1493   SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT),
1494                                     Remainder, Remainder_S_Den, ISD::SETEQ);
1495 
1496   // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem)
1497   Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT),
1498                             Remainder_A_Den, Rem, ISD::SETEQ);
1499   SDValue Ops[2] = {
1500     Div,
1501     Rem
1502   };
1503   return DAG.getMergeValues(Ops, DL);
1504 }
1505 
1506 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op,
1507                                            SelectionDAG &DAG) const {
1508   SDLoc DL(Op);
1509   EVT VT = Op.getValueType();
1510 
1511   SDValue LHS = Op.getOperand(0);
1512   SDValue RHS = Op.getOperand(1);
1513 
1514   SDValue Zero = DAG.getConstant(0, DL, VT);
1515   SDValue NegOne = DAG.getConstant(-1, DL, VT);
1516 
1517   if (VT == MVT::i32 &&
1518       DAG.ComputeNumSignBits(LHS) > 8 &&
1519       DAG.ComputeNumSignBits(RHS) > 8) {
1520     return LowerDIVREM24(Op, DAG, true);
1521   }
1522   if (VT == MVT::i64 &&
1523       DAG.ComputeNumSignBits(LHS) > 32 &&
1524       DAG.ComputeNumSignBits(RHS) > 32) {
1525     EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext());
1526 
1527     //HiLo split
1528     SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero);
1529     SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero);
1530     SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT),
1531                                  LHS_Lo, RHS_Lo);
1532     SDValue Res[2] = {
1533       DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)),
1534       DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1))
1535     };
1536     return DAG.getMergeValues(Res, DL);
1537   }
1538 
1539   SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT);
1540   SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT);
1541   SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign);
1542   SDValue RSign = LHSign; // Remainder sign is the same as LHS
1543 
1544   LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign);
1545   RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign);
1546 
1547   LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign);
1548   RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign);
1549 
1550   SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS);
1551   SDValue Rem = Div.getValue(1);
1552 
1553   Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign);
1554   Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign);
1555 
1556   Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign);
1557   Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign);
1558 
1559   SDValue Res[2] = {
1560     Div,
1561     Rem
1562   };
1563   return DAG.getMergeValues(Res, DL);
1564 }
1565 
1566 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y))
1567 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const {
1568   SDLoc SL(Op);
1569   EVT VT = Op.getValueType();
1570   SDValue X = Op.getOperand(0);
1571   SDValue Y = Op.getOperand(1);
1572 
1573   // TODO: Should this propagate fast-math-flags?
1574 
1575   SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y);
1576   SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div);
1577   SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y);
1578 
1579   return DAG.getNode(ISD::FSUB, SL, VT, X, Mul);
1580 }
1581 
1582 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const {
1583   SDLoc SL(Op);
1584   SDValue Src = Op.getOperand(0);
1585 
1586   // result = trunc(src)
1587   // if (src > 0.0 && src != result)
1588   //   result += 1.0
1589 
1590   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1591 
1592   const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
1593   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64);
1594 
1595   EVT SetCCVT =
1596       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
1597 
1598   SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT);
1599   SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1600   SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1601 
1602   SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero);
1603   // TODO: Should this propagate fast-math-flags?
1604   return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1605 }
1606 
1607 static SDValue extractF64Exponent(SDValue Hi, SDLoc SL, SelectionDAG &DAG) {
1608   const unsigned FractBits = 52;
1609   const unsigned ExpBits = 11;
1610 
1611   SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32,
1612                                 Hi,
1613                                 DAG.getConstant(FractBits - 32, SL, MVT::i32),
1614                                 DAG.getConstant(ExpBits, SL, MVT::i32));
1615   SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart,
1616                             DAG.getConstant(1023, SL, MVT::i32));
1617 
1618   return Exp;
1619 }
1620 
1621 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const {
1622   SDLoc SL(Op);
1623   SDValue Src = Op.getOperand(0);
1624 
1625   assert(Op.getValueType() == MVT::f64);
1626 
1627   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1628   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1629 
1630   SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
1631 
1632   // Extract the upper half, since this is where we will find the sign and
1633   // exponent.
1634   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One);
1635 
1636   SDValue Exp = extractF64Exponent(Hi, SL, DAG);
1637 
1638   const unsigned FractBits = 52;
1639 
1640   // Extract the sign bit.
1641   const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32);
1642   SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask);
1643 
1644   // Extend back to to 64-bits.
1645   SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit});
1646   SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64);
1647 
1648   SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src);
1649   const SDValue FractMask
1650     = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64);
1651 
1652   SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp);
1653   SDValue Not = DAG.getNOT(SL, Shr, MVT::i64);
1654   SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not);
1655 
1656   EVT SetCCVT =
1657       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
1658 
1659   const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32);
1660 
1661   SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
1662   SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
1663 
1664   SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0);
1665   SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1);
1666 
1667   return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2);
1668 }
1669 
1670 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const {
1671   SDLoc SL(Op);
1672   SDValue Src = Op.getOperand(0);
1673 
1674   assert(Op.getValueType() == MVT::f64);
1675 
1676   APFloat C1Val(APFloat::IEEEdouble, "0x1.0p+52");
1677   SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64);
1678   SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src);
1679 
1680   // TODO: Should this propagate fast-math-flags?
1681 
1682   SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign);
1683   SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign);
1684 
1685   SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src);
1686 
1687   APFloat C2Val(APFloat::IEEEdouble, "0x1.fffffffffffffp+51");
1688   SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64);
1689 
1690   EVT SetCCVT =
1691       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
1692   SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT);
1693 
1694   return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2);
1695 }
1696 
1697 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const {
1698   // FNEARBYINT and FRINT are the same, except in their handling of FP
1699   // exceptions. Those aren't really meaningful for us, and OpenCL only has
1700   // rint, so just treat them as equivalent.
1701   return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0));
1702 }
1703 
1704 // XXX - May require not supporting f32 denormals?
1705 SDValue AMDGPUTargetLowering::LowerFROUND32(SDValue Op, SelectionDAG &DAG) const {
1706   SDLoc SL(Op);
1707   SDValue X = Op.getOperand(0);
1708 
1709   SDValue T = DAG.getNode(ISD::FTRUNC, SL, MVT::f32, X);
1710 
1711   // TODO: Should this propagate fast-math-flags?
1712 
1713   SDValue Diff = DAG.getNode(ISD::FSUB, SL, MVT::f32, X, T);
1714 
1715   SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, MVT::f32, Diff);
1716 
1717   const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f32);
1718   const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32);
1719   const SDValue Half = DAG.getConstantFP(0.5, SL, MVT::f32);
1720 
1721   SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f32, One, X);
1722 
1723   EVT SetCCVT =
1724       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32);
1725 
1726   SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE);
1727 
1728   SDValue Sel = DAG.getNode(ISD::SELECT, SL, MVT::f32, Cmp, SignOne, Zero);
1729 
1730   return DAG.getNode(ISD::FADD, SL, MVT::f32, T, Sel);
1731 }
1732 
1733 SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const {
1734   SDLoc SL(Op);
1735   SDValue X = Op.getOperand(0);
1736 
1737   SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X);
1738 
1739   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1740   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1741   const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32);
1742   const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32);
1743   EVT SetCCVT =
1744       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32);
1745 
1746   SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X);
1747 
1748   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One);
1749 
1750   SDValue Exp = extractF64Exponent(Hi, SL, DAG);
1751 
1752   const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL,
1753                                        MVT::i64);
1754 
1755   SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp);
1756   SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64,
1757                           DAG.getConstant(INT64_C(0x0008000000000000), SL,
1758                                           MVT::i64),
1759                           Exp);
1760 
1761   SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M);
1762   SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT,
1763                               DAG.getConstant(0, SL, MVT::i64), Tmp0,
1764                               ISD::SETNE);
1765 
1766   SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1,
1767                              D, DAG.getConstant(0, SL, MVT::i64));
1768   SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2);
1769 
1770   K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64));
1771   K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K);
1772 
1773   SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT);
1774   SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT);
1775   SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ);
1776 
1777   SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64,
1778                             ExpEqNegOne,
1779                             DAG.getConstantFP(1.0, SL, MVT::f64),
1780                             DAG.getConstantFP(0.0, SL, MVT::f64));
1781 
1782   SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X);
1783 
1784   K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K);
1785   K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K);
1786 
1787   return K;
1788 }
1789 
1790 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const {
1791   EVT VT = Op.getValueType();
1792 
1793   if (VT == MVT::f32)
1794     return LowerFROUND32(Op, DAG);
1795 
1796   if (VT == MVT::f64)
1797     return LowerFROUND64(Op, DAG);
1798 
1799   llvm_unreachable("unhandled type");
1800 }
1801 
1802 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const {
1803   SDLoc SL(Op);
1804   SDValue Src = Op.getOperand(0);
1805 
1806   // result = trunc(src);
1807   // if (src < 0.0 && src != result)
1808   //   result += -1.0.
1809 
1810   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
1811 
1812   const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64);
1813   const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64);
1814 
1815   EVT SetCCVT =
1816       getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64);
1817 
1818   SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT);
1819   SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE);
1820   SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc);
1821 
1822   SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero);
1823   // TODO: Should this propagate fast-math-flags?
1824   return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add);
1825 }
1826 
1827 SDValue AMDGPUTargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const {
1828   SDLoc SL(Op);
1829   SDValue Src = Op.getOperand(0);
1830   bool ZeroUndef = Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF;
1831 
1832   if (ZeroUndef && Src.getValueType() == MVT::i32)
1833     return DAG.getNode(AMDGPUISD::FFBH_U32, SL, MVT::i32, Src);
1834 
1835   SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
1836 
1837   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
1838   const SDValue One = DAG.getConstant(1, SL, MVT::i32);
1839 
1840   SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero);
1841   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One);
1842 
1843   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
1844                                    *DAG.getContext(), MVT::i32);
1845 
1846   SDValue Hi0 = DAG.getSetCC(SL, SetCCVT, Hi, Zero, ISD::SETEQ);
1847 
1848   SDValue CtlzLo = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i32, Lo);
1849   SDValue CtlzHi = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i32, Hi);
1850 
1851   const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32);
1852   SDValue Add = DAG.getNode(ISD::ADD, SL, MVT::i32, CtlzLo, Bits32);
1853 
1854   // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x))
1855   SDValue NewCtlz = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0, Add, CtlzHi);
1856 
1857   if (!ZeroUndef) {
1858     // Test if the full 64-bit input is zero.
1859 
1860     // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32,
1861     // which we probably don't want.
1862     SDValue Lo0 = DAG.getSetCC(SL, SetCCVT, Lo, Zero, ISD::SETEQ);
1863     SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0, Hi0);
1864 
1865     // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction
1866     // with the same cycles, otherwise it is slower.
1867     // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src,
1868     // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ);
1869 
1870     const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32);
1871 
1872     // The instruction returns -1 for 0 input, but the defined intrinsic
1873     // behavior is to return the number of bits.
1874     NewCtlz = DAG.getNode(ISD::SELECT, SL, MVT::i32,
1875                           SrcIsZero, Bits32, NewCtlz);
1876   }
1877 
1878   return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewCtlz);
1879 }
1880 
1881 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG,
1882                                                bool Signed) const {
1883   // Unsigned
1884   // cul2f(ulong u)
1885   //{
1886   //  uint lz = clz(u);
1887   //  uint e = (u != 0) ? 127U + 63U - lz : 0;
1888   //  u = (u << lz) & 0x7fffffffffffffffUL;
1889   //  ulong t = u & 0xffffffffffUL;
1890   //  uint v = (e << 23) | (uint)(u >> 40);
1891   //  uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U);
1892   //  return as_float(v + r);
1893   //}
1894   // Signed
1895   // cl2f(long l)
1896   //{
1897   //  long s = l >> 63;
1898   //  float r = cul2f((l + s) ^ s);
1899   //  return s ? -r : r;
1900   //}
1901 
1902   SDLoc SL(Op);
1903   SDValue Src = Op.getOperand(0);
1904   SDValue L = Src;
1905 
1906   SDValue S;
1907   if (Signed) {
1908     const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64);
1909     S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit);
1910 
1911     SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S);
1912     L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S);
1913   }
1914 
1915   EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(),
1916                                    *DAG.getContext(), MVT::f32);
1917 
1918 
1919   SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32);
1920   SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64);
1921   SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L);
1922   LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ);
1923 
1924   SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32);
1925   SDValue E = DAG.getSelect(SL, MVT::i32,
1926     DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE),
1927     DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ),
1928     ZeroI32);
1929 
1930   SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64,
1931     DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ),
1932     DAG.getConstant((-1ULL) >> 1, SL, MVT::i64));
1933 
1934   SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U,
1935                           DAG.getConstant(0xffffffffffULL, SL, MVT::i64));
1936 
1937   SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64,
1938                              U, DAG.getConstant(40, SL, MVT::i64));
1939 
1940   SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32,
1941     DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)),
1942     DAG.getNode(ISD::TRUNCATE, SL, MVT::i32,  UShl));
1943 
1944   SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64);
1945   SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT);
1946   SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ);
1947 
1948   SDValue One = DAG.getConstant(1, SL, MVT::i32);
1949 
1950   SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One);
1951 
1952   SDValue R = DAG.getSelect(SL, MVT::i32,
1953     RCmp,
1954     One,
1955     DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32));
1956   R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R);
1957   R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R);
1958 
1959   if (!Signed)
1960     return R;
1961 
1962   SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R);
1963   return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R);
1964 }
1965 
1966 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG,
1967                                                bool Signed) const {
1968   SDLoc SL(Op);
1969   SDValue Src = Op.getOperand(0);
1970 
1971   SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src);
1972 
1973   SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
1974                            DAG.getConstant(0, SL, MVT::i32));
1975   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC,
1976                            DAG.getConstant(1, SL, MVT::i32));
1977 
1978   SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP,
1979                               SL, MVT::f64, Hi);
1980 
1981   SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo);
1982 
1983   SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi,
1984                               DAG.getConstant(32, SL, MVT::i32));
1985   // TODO: Should this propagate fast-math-flags?
1986   return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo);
1987 }
1988 
1989 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op,
1990                                                SelectionDAG &DAG) const {
1991   assert(Op.getOperand(0).getValueType() == MVT::i64 &&
1992          "operation should be legal");
1993 
1994   EVT DestVT = Op.getValueType();
1995   if (DestVT == MVT::f64)
1996     return LowerINT_TO_FP64(Op, DAG, false);
1997 
1998   if (DestVT == MVT::f32)
1999     return LowerINT_TO_FP32(Op, DAG, false);
2000 
2001   return SDValue();
2002 }
2003 
2004 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op,
2005                                               SelectionDAG &DAG) const {
2006   assert(Op.getOperand(0).getValueType() == MVT::i64 &&
2007          "operation should be legal");
2008 
2009   EVT DestVT = Op.getValueType();
2010   if (DestVT == MVT::f32)
2011     return LowerINT_TO_FP32(Op, DAG, true);
2012 
2013   if (DestVT == MVT::f64)
2014     return LowerINT_TO_FP64(Op, DAG, true);
2015 
2016   return SDValue();
2017 }
2018 
2019 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG,
2020                                                bool Signed) const {
2021   SDLoc SL(Op);
2022 
2023   SDValue Src = Op.getOperand(0);
2024 
2025   SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src);
2026 
2027   SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL,
2028                                  MVT::f64);
2029   SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL,
2030                                  MVT::f64);
2031   // TODO: Should this propagate fast-math-flags?
2032   SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0);
2033 
2034   SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul);
2035 
2036 
2037   SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc);
2038 
2039   SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL,
2040                            MVT::i32, FloorMul);
2041   SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma);
2042 
2043   SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi});
2044 
2045   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result);
2046 }
2047 
2048 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op,
2049                                               SelectionDAG &DAG) const {
2050   SDValue Src = Op.getOperand(0);
2051 
2052   if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2053     return LowerFP64_TO_INT(Op, DAG, true);
2054 
2055   return SDValue();
2056 }
2057 
2058 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op,
2059                                               SelectionDAG &DAG) const {
2060   SDValue Src = Op.getOperand(0);
2061 
2062   if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64)
2063     return LowerFP64_TO_INT(Op, DAG, false);
2064 
2065   return SDValue();
2066 }
2067 
2068 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
2069                                                      SelectionDAG &DAG) const {
2070   EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
2071   MVT VT = Op.getSimpleValueType();
2072   MVT ScalarVT = VT.getScalarType();
2073 
2074   if (!VT.isVector())
2075     return SDValue();
2076 
2077   SDValue Src = Op.getOperand(0);
2078   SDLoc DL(Op);
2079 
2080   // TODO: Don't scalarize on Evergreen?
2081   unsigned NElts = VT.getVectorNumElements();
2082   SmallVector<SDValue, 8> Args;
2083   DAG.ExtractVectorElements(Src, Args, 0, NElts);
2084 
2085   SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType());
2086   for (unsigned I = 0; I < NElts; ++I)
2087     Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp);
2088 
2089   return DAG.getBuildVector(VT, DL, Args);
2090 }
2091 
2092 //===----------------------------------------------------------------------===//
2093 // Custom DAG optimizations
2094 //===----------------------------------------------------------------------===//
2095 
2096 static bool isU24(SDValue Op, SelectionDAG &DAG) {
2097   APInt KnownZero, KnownOne;
2098   EVT VT = Op.getValueType();
2099   DAG.computeKnownBits(Op, KnownZero, KnownOne);
2100 
2101   return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
2102 }
2103 
2104 static bool isI24(SDValue Op, SelectionDAG &DAG) {
2105   EVT VT = Op.getValueType();
2106 
2107   // In order for this to be a signed 24-bit value, bit 23, must
2108   // be a sign bit.
2109   return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated
2110                                      // as unsigned 24-bit values.
2111          (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24;
2112 }
2113 
2114 static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) {
2115 
2116   SelectionDAG &DAG = DCI.DAG;
2117   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2118   EVT VT = Op.getValueType();
2119 
2120   APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24);
2121   APInt KnownZero, KnownOne;
2122   TargetLowering::TargetLoweringOpt TLO(DAG, true, true);
2123   if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
2124     DCI.CommitTargetLoweringOpt(TLO);
2125 }
2126 
2127 template <typename IntTy>
2128 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0,
2129                                uint32_t Offset, uint32_t Width, SDLoc DL) {
2130   if (Width + Offset < 32) {
2131     uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width);
2132     IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width);
2133     return DAG.getConstant(Result, DL, MVT::i32);
2134   }
2135 
2136   return DAG.getConstant(Src0 >> Offset, DL, MVT::i32);
2137 }
2138 
2139 static bool usesAllNormalStores(SDNode *LoadVal) {
2140   for (SDNode::use_iterator I = LoadVal->use_begin(); !I.atEnd(); ++I) {
2141     if (!ISD::isNormalStore(*I))
2142       return false;
2143   }
2144 
2145   return true;
2146 }
2147 
2148 // If we have a copy of an illegal type, replace it with a load / store of an
2149 // equivalently sized legal type. This avoids intermediate bit pack / unpack
2150 // instructions emitted when handling extloads and truncstores. Ideally we could
2151 // recognize the pack / unpack pattern to eliminate it.
2152 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N,
2153                                                   DAGCombinerInfo &DCI) const {
2154   if (!DCI.isBeforeLegalize())
2155     return SDValue();
2156 
2157   StoreSDNode *SN = cast<StoreSDNode>(N);
2158   SDValue Value = SN->getValue();
2159   EVT VT = Value.getValueType();
2160 
2161   if (isTypeLegal(VT) || SN->isVolatile() ||
2162       !ISD::isNormalLoad(Value.getNode()) || VT.getSizeInBits() < 8)
2163     return SDValue();
2164 
2165   LoadSDNode *LoadVal = cast<LoadSDNode>(Value);
2166   if (LoadVal->isVolatile() || !usesAllNormalStores(LoadVal))
2167     return SDValue();
2168 
2169   EVT MemVT = LoadVal->getMemoryVT();
2170 
2171   SDLoc SL(N);
2172   SelectionDAG &DAG = DCI.DAG;
2173   EVT LoadVT = getEquivalentMemType(*DAG.getContext(), MemVT);
2174 
2175   SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD,
2176                                 LoadVT, SL,
2177                                 LoadVal->getChain(),
2178                                 LoadVal->getBasePtr(),
2179                                 LoadVal->getOffset(),
2180                                 LoadVT,
2181                                 LoadVal->getMemOperand());
2182 
2183   SDValue CastLoad = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad.getValue(0));
2184   DCI.CombineTo(LoadVal, CastLoad, NewLoad.getValue(1), false);
2185 
2186   return DAG.getStore(SN->getChain(), SL, NewLoad,
2187                       SN->getBasePtr(), SN->getMemOperand());
2188 }
2189 
2190 // TODO: Should repeat for other bit ops.
2191 SDValue AMDGPUTargetLowering::performAndCombine(SDNode *N,
2192                                                 DAGCombinerInfo &DCI) const {
2193   if (N->getValueType(0) != MVT::i64)
2194     return SDValue();
2195 
2196   // Break up 64-bit and of a constant into two 32-bit ands. This will typically
2197   // happen anyway for a VALU 64-bit and. This exposes other 32-bit integer
2198   // combine opportunities since most 64-bit operations are decomposed this way.
2199   // TODO: We won't want this for SALU especially if it is an inline immediate.
2200   const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
2201   if (!RHS)
2202     return SDValue();
2203 
2204   uint64_t Val = RHS->getZExtValue();
2205   if (Lo_32(Val) != 0 && Hi_32(Val) != 0 && !RHS->hasOneUse()) {
2206     // If either half of the constant is 0, this is really a 32-bit and, so
2207     // split it. If we can re-use the full materialized constant, keep it.
2208     return SDValue();
2209   }
2210 
2211   SDLoc SL(N);
2212   SelectionDAG &DAG = DCI.DAG;
2213 
2214   SDValue Lo, Hi;
2215   std::tie(Lo, Hi) = split64BitValue(N->getOperand(0), DAG);
2216 
2217   SDValue LoRHS = DAG.getConstant(Lo_32(Val), SL, MVT::i32);
2218   SDValue HiRHS = DAG.getConstant(Hi_32(Val), SL, MVT::i32);
2219 
2220   SDValue LoAnd = DAG.getNode(ISD::AND, SL, MVT::i32, Lo, LoRHS);
2221   SDValue HiAnd = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, HiRHS);
2222 
2223   // Re-visit the ands. It's possible we eliminated one of them and it could
2224   // simplify the vector.
2225   DCI.AddToWorklist(Lo.getNode());
2226   DCI.AddToWorklist(Hi.getNode());
2227 
2228   SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd});
2229   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
2230 }
2231 
2232 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
2233                                                 DAGCombinerInfo &DCI) const {
2234   if (N->getValueType(0) != MVT::i64)
2235     return SDValue();
2236 
2237   // i64 (shl x, C) -> (build_pair 0, (shl x, C -32))
2238 
2239   // On some subtargets, 64-bit shift is a quarter rate instruction. In the
2240   // common case, splitting this into a move and a 32-bit shift is faster and
2241   // the same code size.
2242   const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
2243   if (!RHS)
2244     return SDValue();
2245 
2246   unsigned RHSVal = RHS->getZExtValue();
2247   if (RHSVal < 32)
2248     return SDValue();
2249 
2250   SDValue LHS = N->getOperand(0);
2251 
2252   SDLoc SL(N);
2253   SelectionDAG &DAG = DCI.DAG;
2254 
2255   SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32);
2256 
2257   SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS);
2258   SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt);
2259 
2260   const SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2261 
2262   SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift});
2263   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec);
2264 }
2265 
2266 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N,
2267                                                 DAGCombinerInfo &DCI) const {
2268   if (N->getValueType(0) != MVT::i64)
2269     return SDValue();
2270 
2271   const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
2272   if (!RHS)
2273     return SDValue();
2274 
2275   SelectionDAG &DAG = DCI.DAG;
2276   SDLoc SL(N);
2277   unsigned RHSVal = RHS->getZExtValue();
2278 
2279   // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31)
2280   if (RHSVal == 32) {
2281     SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
2282     SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
2283                                    DAG.getConstant(31, SL, MVT::i32));
2284 
2285     SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift});
2286     return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
2287   }
2288 
2289   // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31)
2290   if (RHSVal == 63) {
2291     SDValue Hi = getHiHalf64(N->getOperand(0), DAG);
2292     SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi,
2293                                    DAG.getConstant(31, SL, MVT::i32));
2294     SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift});
2295     return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec);
2296   }
2297 
2298   return SDValue();
2299 }
2300 
2301 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N,
2302                                                 DAGCombinerInfo &DCI) const {
2303   if (N->getValueType(0) != MVT::i64)
2304     return SDValue();
2305 
2306   const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1));
2307   if (!RHS)
2308     return SDValue();
2309 
2310   unsigned ShiftAmt = RHS->getZExtValue();
2311   if (ShiftAmt < 32)
2312     return SDValue();
2313 
2314   // srl i64:x, C for C >= 32
2315   // =>
2316   //   build_pair (srl hi_32(x), C - 32), 0
2317 
2318   SelectionDAG &DAG = DCI.DAG;
2319   SDLoc SL(N);
2320 
2321   SDValue One = DAG.getConstant(1, SL, MVT::i32);
2322   SDValue Zero = DAG.getConstant(0, SL, MVT::i32);
2323 
2324   SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, N->getOperand(0));
2325   SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32,
2326                            VecOp, One);
2327 
2328   SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32);
2329   SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst);
2330 
2331   SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero});
2332 
2333   return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair);
2334 }
2335 
2336 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N,
2337                                                 DAGCombinerInfo &DCI) const {
2338   EVT VT = N->getValueType(0);
2339 
2340   if (VT.isVector() || VT.getSizeInBits() > 32)
2341     return SDValue();
2342 
2343   SelectionDAG &DAG = DCI.DAG;
2344   SDLoc DL(N);
2345 
2346   SDValue N0 = N->getOperand(0);
2347   SDValue N1 = N->getOperand(1);
2348   SDValue Mul;
2349 
2350   if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) {
2351     N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32);
2352     N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32);
2353     Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1);
2354   } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) {
2355     N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32);
2356     N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32);
2357     Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1);
2358   } else {
2359     return SDValue();
2360   }
2361 
2362   // We need to use sext even for MUL_U24, because MUL_U24 is used
2363   // for signed multiply of 8 and 16-bit types.
2364   return DAG.getSExtOrTrunc(Mul, DL, VT);
2365 }
2366 
2367 static bool isNegativeOne(SDValue Val) {
2368   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val))
2369     return C->isAllOnesValue();
2370   return false;
2371 }
2372 
2373 static bool isCtlzOpc(unsigned Opc) {
2374   return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF;
2375 }
2376 
2377 // Get FFBH node if the incoming op may have been type legalized from a smaller
2378 // type VT.
2379 // Need to match pre-legalized type because the generic legalization inserts the
2380 // add/sub between the select and compare.
2381 static SDValue getFFBH_U32(const TargetLowering &TLI,
2382                            SelectionDAG &DAG, SDLoc SL, SDValue Op) {
2383   EVT VT = Op.getValueType();
2384   EVT LegalVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
2385   if (LegalVT != MVT::i32)
2386     return SDValue();
2387 
2388   if (VT != MVT::i32)
2389     Op = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Op);
2390 
2391   SDValue FFBH = DAG.getNode(AMDGPUISD::FFBH_U32, SL, MVT::i32, Op);
2392   if (VT != MVT::i32)
2393     FFBH = DAG.getNode(ISD::TRUNCATE, SL, VT, FFBH);
2394 
2395   return FFBH;
2396 }
2397 
2398 // The native instructions return -1 on 0 input. Optimize out a select that
2399 // produces -1 on 0.
2400 //
2401 // TODO: If zero is not undef, we could also do this if the output is compared
2402 // against the bitwidth.
2403 //
2404 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly.
2405 SDValue AMDGPUTargetLowering::performCtlzCombine(SDLoc SL,
2406                                                  SDValue Cond,
2407                                                  SDValue LHS,
2408                                                  SDValue RHS,
2409                                                  DAGCombinerInfo &DCI) const {
2410   ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
2411   if (!CmpRhs || !CmpRhs->isNullValue())
2412     return SDValue();
2413 
2414   SelectionDAG &DAG = DCI.DAG;
2415   ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
2416   SDValue CmpLHS = Cond.getOperand(0);
2417 
2418   // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x
2419   if (CCOpcode == ISD::SETEQ &&
2420       isCtlzOpc(RHS.getOpcode()) &&
2421       RHS.getOperand(0) == CmpLHS &&
2422       isNegativeOne(LHS)) {
2423     return getFFBH_U32(*this, DAG, SL, CmpLHS);
2424   }
2425 
2426   // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x
2427   if (CCOpcode == ISD::SETNE &&
2428       isCtlzOpc(LHS.getOpcode()) &&
2429       LHS.getOperand(0) == CmpLHS &&
2430       isNegativeOne(RHS)) {
2431     return getFFBH_U32(*this, DAG, SL, CmpLHS);
2432   }
2433 
2434   return SDValue();
2435 }
2436 
2437 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N,
2438                                                    DAGCombinerInfo &DCI) const {
2439   SDValue Cond = N->getOperand(0);
2440   if (Cond.getOpcode() != ISD::SETCC)
2441     return SDValue();
2442 
2443   EVT VT = N->getValueType(0);
2444   SDValue LHS = Cond.getOperand(0);
2445   SDValue RHS = Cond.getOperand(1);
2446   SDValue CC = Cond.getOperand(2);
2447 
2448   SDValue True = N->getOperand(1);
2449   SDValue False = N->getOperand(2);
2450 
2451   if (VT == MVT::f32 && Cond.hasOneUse()) {
2452     SDValue MinMax
2453       = CombineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI);
2454     // Revisit this node so we can catch min3/max3/med3 patterns.
2455     //DCI.AddToWorklist(MinMax.getNode());
2456     return MinMax;
2457   }
2458 
2459   // There's no reason to not do this if the condition has other uses.
2460   return performCtlzCombine(SDLoc(N), Cond, True, False, DCI);
2461 }
2462 
2463 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N,
2464                                                 DAGCombinerInfo &DCI) const {
2465   SelectionDAG &DAG = DCI.DAG;
2466   SDLoc DL(N);
2467 
2468   switch(N->getOpcode()) {
2469   default:
2470     break;
2471   case ISD::BITCAST: {
2472     EVT DestVT = N->getValueType(0);
2473     if (DestVT.getSizeInBits() != 64 && !DestVT.isVector())
2474       break;
2475 
2476     // Fold bitcasts of constants.
2477     //
2478     // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k)
2479     // TODO: Generalize and move to DAGCombiner
2480     SDValue Src = N->getOperand(0);
2481     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) {
2482       assert(Src.getValueType() == MVT::i64);
2483       SDLoc SL(N);
2484       uint64_t CVal = C->getZExtValue();
2485       return DAG.getNode(ISD::BUILD_VECTOR, SL, DestVT,
2486                          DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
2487                          DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
2488     }
2489 
2490     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) {
2491       const APInt &Val = C->getValueAPF().bitcastToAPInt();
2492       SDLoc SL(N);
2493       uint64_t CVal = Val.getZExtValue();
2494       SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32,
2495                                 DAG.getConstant(Lo_32(CVal), SL, MVT::i32),
2496                                 DAG.getConstant(Hi_32(CVal), SL, MVT::i32));
2497 
2498       return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec);
2499     }
2500 
2501     break;
2502   }
2503   case ISD::SHL: {
2504     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
2505       break;
2506 
2507     return performShlCombine(N, DCI);
2508   }
2509   case ISD::SRL: {
2510     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
2511       break;
2512 
2513     return performSrlCombine(N, DCI);
2514   }
2515   case ISD::SRA: {
2516     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
2517       break;
2518 
2519     return performSraCombine(N, DCI);
2520   }
2521   case ISD::AND: {
2522     if (DCI.getDAGCombineLevel() < AfterLegalizeDAG)
2523       break;
2524 
2525     return performAndCombine(N, DCI);
2526   }
2527   case ISD::MUL:
2528     return performMulCombine(N, DCI);
2529   case AMDGPUISD::MUL_I24:
2530   case AMDGPUISD::MUL_U24: {
2531     SDValue N0 = N->getOperand(0);
2532     SDValue N1 = N->getOperand(1);
2533     simplifyI24(N0, DCI);
2534     simplifyI24(N1, DCI);
2535     return SDValue();
2536   }
2537   case ISD::SELECT:
2538     return performSelectCombine(N, DCI);
2539   case AMDGPUISD::BFE_I32:
2540   case AMDGPUISD::BFE_U32: {
2541     assert(!N->getValueType(0).isVector() &&
2542            "Vector handling of BFE not implemented");
2543     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
2544     if (!Width)
2545       break;
2546 
2547     uint32_t WidthVal = Width->getZExtValue() & 0x1f;
2548     if (WidthVal == 0)
2549       return DAG.getConstant(0, DL, MVT::i32);
2550 
2551     ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
2552     if (!Offset)
2553       break;
2554 
2555     SDValue BitsFrom = N->getOperand(0);
2556     uint32_t OffsetVal = Offset->getZExtValue() & 0x1f;
2557 
2558     bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32;
2559 
2560     if (OffsetVal == 0) {
2561       // This is already sign / zero extended, so try to fold away extra BFEs.
2562       unsigned SignBits =  Signed ? (32 - WidthVal + 1) : (32 - WidthVal);
2563 
2564       unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom);
2565       if (OpSignBits >= SignBits)
2566         return BitsFrom;
2567 
2568       EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal);
2569       if (Signed) {
2570         // This is a sign_extend_inreg. Replace it to take advantage of existing
2571         // DAG Combines. If not eliminated, we will match back to BFE during
2572         // selection.
2573 
2574         // TODO: The sext_inreg of extended types ends, although we can could
2575         // handle them in a single BFE.
2576         return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom,
2577                            DAG.getValueType(SmallVT));
2578       }
2579 
2580       return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT);
2581     }
2582 
2583     if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) {
2584       if (Signed) {
2585         return constantFoldBFE<int32_t>(DAG,
2586                                         CVal->getSExtValue(),
2587                                         OffsetVal,
2588                                         WidthVal,
2589                                         DL);
2590       }
2591 
2592       return constantFoldBFE<uint32_t>(DAG,
2593                                        CVal->getZExtValue(),
2594                                        OffsetVal,
2595                                        WidthVal,
2596                                        DL);
2597     }
2598 
2599     if ((OffsetVal + WidthVal) >= 32) {
2600       SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32);
2601       return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32,
2602                          BitsFrom, ShiftVal);
2603     }
2604 
2605     if (BitsFrom.hasOneUse()) {
2606       APInt Demanded = APInt::getBitsSet(32,
2607                                          OffsetVal,
2608                                          OffsetVal + WidthVal);
2609 
2610       APInt KnownZero, KnownOne;
2611       TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
2612                                             !DCI.isBeforeLegalizeOps());
2613       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2614       if (TLO.ShrinkDemandedConstant(BitsFrom, Demanded) ||
2615           TLI.SimplifyDemandedBits(BitsFrom, Demanded,
2616                                    KnownZero, KnownOne, TLO)) {
2617         DCI.CommitTargetLoweringOpt(TLO);
2618       }
2619     }
2620 
2621     break;
2622   }
2623 
2624   case ISD::STORE:
2625     return performStoreCombine(N, DCI);
2626   }
2627   return SDValue();
2628 }
2629 
2630 //===----------------------------------------------------------------------===//
2631 // Helper functions
2632 //===----------------------------------------------------------------------===//
2633 
2634 void AMDGPUTargetLowering::getOriginalFunctionArgs(
2635                                SelectionDAG &DAG,
2636                                const Function *F,
2637                                const SmallVectorImpl<ISD::InputArg> &Ins,
2638                                SmallVectorImpl<ISD::InputArg> &OrigIns) const {
2639 
2640   for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
2641     if (Ins[i].ArgVT == Ins[i].VT) {
2642       OrigIns.push_back(Ins[i]);
2643       continue;
2644     }
2645 
2646     EVT VT;
2647     if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) {
2648       // Vector has been split into scalars.
2649       VT = Ins[i].ArgVT.getVectorElementType();
2650     } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() &&
2651                Ins[i].ArgVT.getVectorElementType() !=
2652                Ins[i].VT.getVectorElementType()) {
2653       // Vector elements have been promoted
2654       VT = Ins[i].ArgVT;
2655     } else {
2656       // Vector has been spilt into smaller vectors.
2657       VT = Ins[i].VT;
2658     }
2659 
2660     ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used,
2661                       Ins[i].OrigArgIndex, Ins[i].PartOffset);
2662     OrigIns.push_back(Arg);
2663   }
2664 }
2665 
2666 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
2667                                                   const TargetRegisterClass *RC,
2668                                                    unsigned Reg, EVT VT) const {
2669   MachineFunction &MF = DAG.getMachineFunction();
2670   MachineRegisterInfo &MRI = MF.getRegInfo();
2671   unsigned VirtualRegister;
2672   if (!MRI.isLiveIn(Reg)) {
2673     VirtualRegister = MRI.createVirtualRegister(RC);
2674     MRI.addLiveIn(Reg, VirtualRegister);
2675   } else {
2676     VirtualRegister = MRI.getLiveInVirtReg(Reg);
2677   }
2678   return DAG.getRegister(VirtualRegister, VT);
2679 }
2680 
2681 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset(
2682     const AMDGPUMachineFunction *MFI, const ImplicitParameter Param) const {
2683   uint64_t ArgOffset = MFI->ABIArgOffset;
2684   switch (Param) {
2685   case GRID_DIM:
2686     return ArgOffset;
2687   case GRID_OFFSET:
2688     return ArgOffset + 4;
2689   }
2690   llvm_unreachable("unexpected implicit parameter type");
2691 }
2692 
2693 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node;
2694 
2695 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
2696   switch ((AMDGPUISD::NodeType)Opcode) {
2697   case AMDGPUISD::FIRST_NUMBER: break;
2698   // AMDIL DAG nodes
2699   NODE_NAME_CASE(CALL);
2700   NODE_NAME_CASE(UMUL);
2701   NODE_NAME_CASE(RET_FLAG);
2702   NODE_NAME_CASE(BRANCH_COND);
2703 
2704   // AMDGPU DAG nodes
2705   NODE_NAME_CASE(DWORDADDR)
2706   NODE_NAME_CASE(FRACT)
2707   NODE_NAME_CASE(CLAMP)
2708   NODE_NAME_CASE(COS_HW)
2709   NODE_NAME_CASE(SIN_HW)
2710   NODE_NAME_CASE(FMAX_LEGACY)
2711   NODE_NAME_CASE(FMIN_LEGACY)
2712   NODE_NAME_CASE(FMAX3)
2713   NODE_NAME_CASE(SMAX3)
2714   NODE_NAME_CASE(UMAX3)
2715   NODE_NAME_CASE(FMIN3)
2716   NODE_NAME_CASE(SMIN3)
2717   NODE_NAME_CASE(UMIN3)
2718   NODE_NAME_CASE(FMED3)
2719   NODE_NAME_CASE(SMED3)
2720   NODE_NAME_CASE(UMED3)
2721   NODE_NAME_CASE(URECIP)
2722   NODE_NAME_CASE(DIV_SCALE)
2723   NODE_NAME_CASE(DIV_FMAS)
2724   NODE_NAME_CASE(DIV_FIXUP)
2725   NODE_NAME_CASE(TRIG_PREOP)
2726   NODE_NAME_CASE(RCP)
2727   NODE_NAME_CASE(RSQ)
2728   NODE_NAME_CASE(RSQ_LEGACY)
2729   NODE_NAME_CASE(RSQ_CLAMP)
2730   NODE_NAME_CASE(LDEXP)
2731   NODE_NAME_CASE(FP_CLASS)
2732   NODE_NAME_CASE(DOT4)
2733   NODE_NAME_CASE(CARRY)
2734   NODE_NAME_CASE(BORROW)
2735   NODE_NAME_CASE(BFE_U32)
2736   NODE_NAME_CASE(BFE_I32)
2737   NODE_NAME_CASE(BFI)
2738   NODE_NAME_CASE(BFM)
2739   NODE_NAME_CASE(FFBH_U32)
2740   NODE_NAME_CASE(MUL_U24)
2741   NODE_NAME_CASE(MUL_I24)
2742   NODE_NAME_CASE(MAD_U24)
2743   NODE_NAME_CASE(MAD_I24)
2744   NODE_NAME_CASE(TEXTURE_FETCH)
2745   NODE_NAME_CASE(EXPORT)
2746   NODE_NAME_CASE(CONST_ADDRESS)
2747   NODE_NAME_CASE(REGISTER_LOAD)
2748   NODE_NAME_CASE(REGISTER_STORE)
2749   NODE_NAME_CASE(LOAD_INPUT)
2750   NODE_NAME_CASE(SAMPLE)
2751   NODE_NAME_CASE(SAMPLEB)
2752   NODE_NAME_CASE(SAMPLED)
2753   NODE_NAME_CASE(SAMPLEL)
2754   NODE_NAME_CASE(CVT_F32_UBYTE0)
2755   NODE_NAME_CASE(CVT_F32_UBYTE1)
2756   NODE_NAME_CASE(CVT_F32_UBYTE2)
2757   NODE_NAME_CASE(CVT_F32_UBYTE3)
2758   NODE_NAME_CASE(BUILD_VERTICAL_VECTOR)
2759   NODE_NAME_CASE(CONST_DATA_PTR)
2760   case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
2761   NODE_NAME_CASE(SENDMSG)
2762   NODE_NAME_CASE(INTERP_MOV)
2763   NODE_NAME_CASE(INTERP_P1)
2764   NODE_NAME_CASE(INTERP_P2)
2765   NODE_NAME_CASE(STORE_MSKOR)
2766   NODE_NAME_CASE(LOAD_CONSTANT)
2767   NODE_NAME_CASE(TBUFFER_STORE_FORMAT)
2768   NODE_NAME_CASE(ATOMIC_CMP_SWAP)
2769   NODE_NAME_CASE(ATOMIC_INC)
2770   NODE_NAME_CASE(ATOMIC_DEC)
2771   case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break;
2772   }
2773   return nullptr;
2774 }
2775 
2776 SDValue AMDGPUTargetLowering::getRsqrtEstimate(SDValue Operand,
2777                                                DAGCombinerInfo &DCI,
2778                                                unsigned &RefinementSteps,
2779                                                bool &UseOneConstNR) const {
2780   SelectionDAG &DAG = DCI.DAG;
2781   EVT VT = Operand.getValueType();
2782 
2783   if (VT == MVT::f32) {
2784     RefinementSteps = 0;
2785     return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand);
2786   }
2787 
2788   // TODO: There is also f64 rsq instruction, but the documentation is less
2789   // clear on its precision.
2790 
2791   return SDValue();
2792 }
2793 
2794 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand,
2795                                                DAGCombinerInfo &DCI,
2796                                                unsigned &RefinementSteps) const {
2797   SelectionDAG &DAG = DCI.DAG;
2798   EVT VT = Operand.getValueType();
2799 
2800   if (VT == MVT::f32) {
2801     // Reciprocal, < 1 ulp error.
2802     //
2803     // This reciprocal approximation converges to < 0.5 ulp error with one
2804     // newton rhapson performed with two fused multiple adds (FMAs).
2805 
2806     RefinementSteps = 0;
2807     return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand);
2808   }
2809 
2810   // TODO: There is also f64 rcp instruction, but the documentation is less
2811   // clear on its precision.
2812 
2813   return SDValue();
2814 }
2815 
2816 void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
2817   const SDValue Op,
2818   APInt &KnownZero,
2819   APInt &KnownOne,
2820   const SelectionDAG &DAG,
2821   unsigned Depth) const {
2822 
2823   KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything.
2824 
2825   APInt KnownZero2;
2826   APInt KnownOne2;
2827   unsigned Opc = Op.getOpcode();
2828 
2829   switch (Opc) {
2830   default:
2831     break;
2832   case AMDGPUISD::CARRY:
2833   case AMDGPUISD::BORROW: {
2834     KnownZero = APInt::getHighBitsSet(32, 31);
2835     break;
2836   }
2837 
2838   case AMDGPUISD::BFE_I32:
2839   case AMDGPUISD::BFE_U32: {
2840     ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2841     if (!CWidth)
2842       return;
2843 
2844     unsigned BitWidth = 32;
2845     uint32_t Width = CWidth->getZExtValue() & 0x1f;
2846 
2847     if (Opc == AMDGPUISD::BFE_U32)
2848       KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - Width);
2849 
2850     break;
2851   }
2852   }
2853 }
2854 
2855 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode(
2856   SDValue Op,
2857   const SelectionDAG &DAG,
2858   unsigned Depth) const {
2859   switch (Op.getOpcode()) {
2860   case AMDGPUISD::BFE_I32: {
2861     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2862     if (!Width)
2863       return 1;
2864 
2865     unsigned SignBits = 32 - Width->getZExtValue() + 1;
2866     if (!isNullConstant(Op.getOperand(1)))
2867       return SignBits;
2868 
2869     // TODO: Could probably figure something out with non-0 offsets.
2870     unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
2871     return std::max(SignBits, Op0SignBits);
2872   }
2873 
2874   case AMDGPUISD::BFE_U32: {
2875     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2));
2876     return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1;
2877   }
2878 
2879   case AMDGPUISD::CARRY:
2880   case AMDGPUISD::BORROW:
2881     return 31;
2882 
2883   default:
2884     return 1;
2885   }
2886 }
2887